# syntax=docker/dockerfile:1.3-labs # NOTE: Building this image require's docker version >= 23.0. # # For reference: # - https://docs.docker.com/build/dockerfile/frontend/#stable-channel ARG TAG_VERSION="12.4.1" FROM nvidia/cuda:${TAG_VERSION}-cudnn-devel-ubuntu22.04 ARG HTTP_PROXY ARG HTTPS_PROXY ENV http_proxy=${HTTP_PROXY} ENV https_proxy=${HTTPS_PROXY} ARG DEBIAN_FRONTEND="noninteractive" ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} ARG ROOT_PASSWD="root" ENV ROOT_PASSWD=${ROOT_PASSWD} ENV SSH_PORT=2222 WORKDIR /root SHELL ["/bin/bash", "-c"] # base tools RUN < ~/.ssh/config cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config sed -i 's/^\(\s*\)GSSAPIAuthentication yes/\1GSSAPIAuthentication no/' /etc/ssh/ssh_config sed -i "s/^#Port 22/Port ${SSH_PORT}/" /etc/ssh/sshd_config sudo sed -i "s/# Port 22/Port ${SSH_PORT}/" /etc/ssh/ssh_config ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N "" <<< y cat ~/.ssh/id_rsa.pub >> ~/.ssh/auth cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys2 chmod 600 /root/.ssh/authorized_keys chmod 600 /root/.ssh/authorized_keys2 mkdir /var/run/sshd echo "root:${ROOT_PASSWD}" | chpasswd mkdir -p ~/.pip # timezone apt-get install -y tzdata ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime echo "Asia/Shanghai" > /etc/timezone # install pixi # curl -fsSL https://pixi.sh/install.sh | bash EOT ENV CUDA_HOME=/usr/local/cuda ENV PATH=/opt/modules/bin:$PATH ENV LIBRARY_PATH=/opt/modules/lib:$LIBRARY_PATH COPY ./file/modules-5.4.0.tar.gz /root ## install modules to manage environment variables download from https://modules.sourceforge.net/ # usage: https://nscc.mrzhenggang.com/user-manual/config-env-with-module RUN <> /etc/profile echo "source /opt/modules/init/profile.sh" >> ~/.bashrc # /opt/modules/bin/modulecmd EOT ARG FFTW_VERSION="3.3.10" ENV FFTW_VERSION=${FFTW_VERSION} ENV PATH=/usr/local/fftw:$PATH # 安装fftw RUN < -t bw -p -n # 测试 UCX 读取配置 # ucx_read_profile # 检查 UCX 进程 # mpirun -np 2 -mca pml ucx -x UCX_NET_DEVICES=mlx5_0:1 ./your_mpi_program # CUDA support check ucx_info -c ucx_info -d # ompi_info | grep ucx EOT # 安装openmpi ENV MPI_HOME=/usr/local/openmpi ENV PATH=${MPI_HOME}/bin:/usr/bin:$PATH ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${MPI_HOME}/lib:/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH ENV LIBRARY_PATH=/usr/local/cuda/lib64:${LIBRARY_PATH} ENV CPATH=/usr/local/cuda/include:${MPI_HOME}/include:${CUDA_HOME}/include:$CPATH # export C_INCLUDE_PATH=/usr/local/cuda/include:$C_INCLUDE_PATH # export LIBRARY_PATH=/usr/local/cuda/lib64:$LIBRARY_PATH # export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH RUN < ./test_mpi_cuda.cu #include #include #include __global__ void hello_cuda() { printf("Hello from CUDA kernel! Thread id: %d\n", threadIdx.x); } int main(int argc, char **argv) { MPI_Init(&argc, &argv); int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("Hello from MPI process %d!\n", rank); // Launch CUDA kernel hello_cuda<<<1, 10>>>(); cudaDeviceSynchronize(); // Wait for the CUDA kernel to finish MPI_Finalize(); return 0; } EOF nvcc -o test_mpi_cuda test_mpi_cuda.cu -I${CUDA_HOME}/include -I${MPI_HOME}/include -L${MPI_HOME}/lib -lcudart -lmpi # mpirun --allow-run-as-root -np 2 ./test_mpi_cuda ompi_info | grep "MPI extensions" EOT # 安装plumed ARG PLUMED_VERSION="2.9.2" ENV PLUMED_VERSION=${PLUMED_VERSION} ENV LD_LIBRARY_PATH=/usr/local/plumed/lib:$LD_LIBRARY_PATH ENV PATH=/usr/local/plumed:/usr/local/plumed/bin:$PATH RUN <> /root/.bashrc EOT # PyAutoFEP install support version: GROMACS 2021.7 # GROMACS 2022.5 和 2023 中遇到的“排除原子距离超过截断距离”的问题,确保模拟的稳定性和自由能计算的收敛性。 # https://github.com/luancarvalhomartins/PyAutoFEP/blob/master/docs/Manual.pdf # https://github.com/luancarvalhomartins/PyAutoFEP/tree/master/docs/tutorial01 ENV CPLUS_INCLUDE_PATH=/usr/include/openbabel3 ENV LIBRARY_PATH=/usr/lib:/usr/local/lib:${LIBRARY_PATH} RUN <2.0,<3.0" alchemlyb==0.6.0 pymbar==3.0.5 openbabel>3.0.0 matplotlib numpy biopython mdanalysis pytest packaging # or openbabel use 2.4.1 git clone https://github.com/luancarvalhomartins/PyAutoFEP.git EOT COPY file/Amber24.tar.bz2 file/AmberTools24.tar.bz2 /root COPY file/l_HPCKit_p_2024.2.1.79_offline.sh file/l_onemkl_p_2024.2.2.17_offline.sh /root COPY file/boost_1_86_0.tar.gz /root ENV DOWNLOAD_MINICONDA="False" # install HPCKit and oneMKL RUN <> /etc/profile # echo "source /opt/intel/onemkl/setvars.sh" >> /etc/profile echo "source /opt/intel/setvars.sh" >> ~/.bashrc echo "source /opt/intel/onemkl/setvars.sh" >> ~/.bashrc mkdir -p /opt/modulefiles/intel chmod +x /opt/intel/setvars.sh chmod +x /opt/intel/onemkl/setvars.sh # curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y # echo "source $HOME/.cargo/env" >> ~/.bashrc # cargo install modenv EOT # install ambertools ENV MODULEPATH=/opt/modulefiles/boost:$MODULEPATH RUN <> project-config.jam # # echo "using mpi : /opt/intel/mpi/2021.13/bin/mpicxx ;" >> project-config.jam ./b2 -j$(nproc) --layout=tagged link=static,shared threading=multi install mkdir -p /opt/modulefiles/boost # use modulefile to load boost command is: # module load boost/1.86.0-openmpi-5.1.0a1 | module list | module avail cat << EOF > /opt/modulefiles/boost/1.86.0-openmpi-5.1.0a1 #%Module1.0 set prefix /opt/boost # 设置库路径和头文件路径,方便编译器找到 Boost prepend-path LD_LIBRARY_PATH \$prefix/lib prepend-path CPATH \$prefix/include prepend-path LIBRARY_PATH \$prefix/lib prepend-path PATH \$prefix/bin EOF # 解压 Amber24 tar -xjvf Amber24.tar.bz2 # 解压 AmberTools24 tar -xjvf AmberTools24.tar.bz2 # 清理解压后的 .tar.bz2 文件(可选) # rm Amber24.tar.bz2 AmberTools24.tar.bz2 EOT RUN <