first add
This commit is contained in:
57
docker/Dockerfile
Normal file
57
docker/Dockerfile
Normal file
@@ -0,0 +1,57 @@
|
||||
# Copyright 2024 DeepMind Technologies Limited
|
||||
#
|
||||
# AlphaFold 3 source code is licensed under CC BY-NC-SA 4.0. To view a copy of
|
||||
# this license, visit https://creativecommons.org/licenses/by-nc-sa/4.0/
|
||||
#
|
||||
# To request access to the AlphaFold 3 model parameters, follow the process set
|
||||
# out at https://github.com/google-deepmind/alphafold3. You may only use these
|
||||
# if received directly from Google. Use is subject to terms of use available at
|
||||
# https://github.com/google-deepmind/alphafold3/blob/main/WEIGHTS_TERMS_OF_USE.md
|
||||
|
||||
FROM nvidia/cuda:12.6.0-base-ubuntu22.04
|
||||
|
||||
# Some RUN statements are combined together to make Docker build run faster.
|
||||
# Get latest package listing, install software-properties-common, git and wget.
|
||||
# git is required for pyproject.toml toolchain's use of CMakeLists.txt.
|
||||
RUN apt update --quiet \
|
||||
&& apt install --yes --quiet software-properties-common \
|
||||
&& apt install --yes --quiet git wget
|
||||
|
||||
# Get apt repository of specific Python versions. Then install Python. Tell APT
|
||||
# this isn't an interactive TTY to avoid timezone prompt when installing.
|
||||
RUN add-apt-repository ppa:deadsnakes/ppa \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt install --yes --quiet python3.11 python3-pip python3.11-venv python3.11-dev
|
||||
RUN python3.11 -m venv /alphafold3_venv
|
||||
ENV PATH="/hmmer/bin:/alphafold3_venv/bin:$PATH"
|
||||
|
||||
# Install HMMER. Do so before copying the source code, so that docker can cache
|
||||
# the image layer containing HMMER.
|
||||
RUN mkdir /hmmer_build /hmmer ; \
|
||||
wget http://eddylab.org/software/hmmer/hmmer-3.4.tar.gz --directory-prefix /hmmer_build ; \
|
||||
(cd /hmmer_build && tar zxf hmmer-3.4.tar.gz && rm hmmer-3.4.tar.gz) ; \
|
||||
(cd /hmmer_build/hmmer-3.4 && ./configure --prefix /hmmer) ; \
|
||||
(cd /hmmer_build/hmmer-3.4 && make -j8) ; \
|
||||
(cd /hmmer_build/hmmer-3.4 && make install) ; \
|
||||
(cd /hmmer_build/hmmer-3.4/easel && make install) ; \
|
||||
rm -R /hmmer_build
|
||||
|
||||
# Copy the AlphaFold 3 source code from the local machine to the container and
|
||||
# set the working directory to there.
|
||||
COPY . /app/alphafold
|
||||
WORKDIR /app/alphafold
|
||||
|
||||
# Install the Python dependencies AlphaFold 3 needs.
|
||||
RUN pip3 install -r dev-requirements.txt
|
||||
RUN pip3 install --no-deps .
|
||||
# Build chemical components database (this binary was installed by pip).
|
||||
RUN build_data
|
||||
|
||||
# To work around a known XLA issue causing the compilation time to greatly
|
||||
# increase, the following environment variable setting XLA flags must be enabled
|
||||
# when running AlphaFold 3:
|
||||
ENV XLA_FLAGS="--xla_gpu_enable_triton_gemm=false"
|
||||
# Memory settings used for folding up to 5,120 tokens on A100 80 GB.
|
||||
ENV XLA_PYTHON_CLIENT_PREALLOCATE=true
|
||||
ENV XLA_CLIENT_MEM_FRACTION=0.95
|
||||
|
||||
CMD ["python3", "run_alphafold.py"]
|
||||
13
docker/README.md
Normal file
13
docker/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
```shell
|
||||
|
||||
```
|
||||
|
||||
```shell
|
||||
python run_alphafold.py \
|
||||
--db_dir=/app/alphafold/alphafold3_database \
|
||||
--json_path=/app/alphafold/alphafold_input.json \
|
||||
--model_dir=/app/alphafold/models \
|
||||
--output_dir=/app/alphafold/af_output \
|
||||
--run_data_pipeline true \
|
||||
--run_inference true
|
||||
```
|
||||
67
docker/docker-compose.yml
Normal file
67
docker/docker-compose.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
version: '3.8'
|
||||
|
||||
# DeepSpeed支持多种C++/CUDA扩展(ops),这些ops旨在优化深度学习的训练和推理过程。以下是一些主要的DeepSpeed ops及其功能:
|
||||
|
||||
# FusedAdam - 提供融合优化的Adam优化器,适用于GPU。
|
||||
# FusedLamb - 类似FusedAdam,针对LAMB优化器,适用于大规模分布式训练。
|
||||
# SparseAttention - 用于高效计算稀疏注意力机制。
|
||||
# Transformer - 提供Transformer模型的高效实现。
|
||||
# TransformerInference - 专门用于Transformer模型的推理优化。
|
||||
# CPUAdam - 针对CPU优化的Adam优化器。
|
||||
# CPULion - 针对CPU的Lion优化器。
|
||||
# Quantizer - 提供量化支持,以减少模型大小和提高推理速度。
|
||||
# RandomLTD - 用于随机层裁剪的优化器。
|
||||
# StochasticTransformer - 支持随机Transformer模型的训练和推理。
|
||||
# 检测系统总内存(以GB为单位)
|
||||
# TOTAL_MEM=$(awk '/MemTotal/ {printf "%.0f\n", $2/1024/1024}' /proc/meminfo)
|
||||
# echo "Docker Compose 文件已生成,shm_size 设置为 ${TOTAL_MEM}GB。"
|
||||
|
||||
services:
|
||||
alphafold3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args: # PyTorch版本、Python版本与pytorch_lightning版本的对应关系表 https://blog.csdn.net/qq_41813454/article/details/137421822
|
||||
PYTHON_VERSION: "3.10"
|
||||
# CUDA_VERSION: "12.1.0"
|
||||
TAG_VERSION: "12.4.1"
|
||||
# env_file:
|
||||
# - .env
|
||||
volumes:
|
||||
- /mnt/d/alphafold3_database:/app/alphafold/alphafold3_database
|
||||
- /mnt/d/models:/app/alphafold/models
|
||||
- /mnt/d/alphafold_input.json:/app/alphafold/alphafold_input.json
|
||||
container_name: alphafold3
|
||||
pull_policy: if_not_present
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
restart: unless-stopped
|
||||
image: cford38/alphafold3:latest
|
||||
privileged: true
|
||||
cap_add:
|
||||
- ALL
|
||||
- CAP_SYS_PTRACE
|
||||
shm_size: '32gb'
|
||||
ipc: host
|
||||
# ports:
|
||||
# - 3228:2222
|
||||
environment:
|
||||
- NVIDIA_VISIBLE_DEVICES=all
|
||||
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
network_mode: host
|
||||
# networks:
|
||||
# - network_finetune
|
||||
command: ["tail", "-f", "/dev/null"]
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
|
||||
# networks:
|
||||
# network_finetune:
|
||||
# name: network_finetune
|
||||
2
docker/dockerignore
Normal file
2
docker/dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
dockerignore
|
||||
Dockerfile
|
||||
Reference in New Issue
Block a user