Docker container images are available on akiyamalab/megadock
repository (DockerHub).
Requirement Tools | GPU | CPU | Notes |
---|---|---|---|
Docker | x | x | |
nvidia-docker | x | > 2.0 |
# CPU single node (OpenMP parallelization)
docker run akiyamalab/megadock:cpu megadock -R data/1gcq_r.pdb -L data/1gcq_l.pdb -o data/1gcq_r-1gcq_r.out
# GPU single node (GPU parallelization)
docker run --runtime=nvidia akiyamalab/megadock:gpu megadock-gpu -R data/1gcq_r.pdb -L data/1gcq_l.pdb -o data/1gcq_r-1gcq_r.out
# on ${REPOSITORY_ROOT} dir
docker build . -f Dockerfiles/cpu/Dockerfile -t akiyamalab/megadock:cpu
docker run akiyamalab/megadock:cpu megadock -R data/1gcq_r.pdb -L data/1gcq_l.pdb
# optional) start interactive shell
docker run -it akiyamalab/megadock:cpu
# optional) run with your pdb (e.g. ${DATA_PATH} = your pdb-data directory abs path )
docker run -v ${DATA_PATH}:/opt/MEGADOCK/data akiyamalab/megadock:cpu megadock -R data/${RECEPTOR}.pdb -L data/${LIGAND}.pdb
nvidia-docker is required.
# on ${REPOSITORY_ROOT} dir
docker build . -f Dockerfiles/gpu/Dockerfile -t akiyamalab/megadock:gpu
docker run -it --runtime=nvidia akiyamalab/megadock:gpu megadock-gpu -R data/1gcq_r.pdb -L data/1gcq_l.pdb
# optional) start interactive shell
docker run -it --runtime=nvidia akiyamalab/megadock:gpu
# optional) run with your pdb (e.g. ${DATA_PATH} = your pdb-data directory abs path )
docker run -it --runtime=nvidia -v ${DATA_PATH}:/opt/MEGADOCK/data akiyamalab/megadock:gpu megadock-gpu -R data/${RECEPTOR}.pdb -L data/${LIGAND}.pdb