update add test dockerfile

This commit is contained in:
mm644706215
2025-01-18 17:49:21 +08:00
parent 582a5043e2
commit 4766780404
3 changed files with 411 additions and 1 deletions

View File

@@ -298,7 +298,7 @@ FROM nvidia/cuda:${TAG_VERSION}
ENV CUDA_HOME=/usr/local/cuda
ENV PATH=$CUDA_HOME/bin:$PATH
ENV LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
ENV LD_LIBRARY_PATH=$CUDA_HOME/lib64:/usr/local/plumed/lib:$LD_LIBRARY_PATH
ENV CPATH=$CUDA_HOME/include:$CPATH
ENV LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH
ARG SSH_PORT=2222

View File

@@ -0,0 +1,363 @@
# syntax=docker/dockerfile:1.3-labs
# NOTE: Building this image requires Docker version >= 23.0.
# Stage 1: Build environment
ARG TAG_VERSION="12.4.1-cudnn-devel-ubuntu22.04"
FROM nvidia/cuda:${TAG_VERSION} AS build-env
ENV CUDA_HOME=/usr/local/cuda
ENV PATH=$CUDA_HOME/bin:$PATH
ENV LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
ENV CPATH=$CUDA_HOME/include:$CPATH
ENV LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH
ARG HTTP_PROXY
ARG HTTPS_PROXY
ENV http_proxy=${HTTP_PROXY}
ENV https_proxy=${HTTPS_PROXY}
ARG DEBIAN_FRONTEND="noninteractive"
ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND}
ARG ROOT_PASSWD="root"
ENV ROOT_PASSWD=${ROOT_PASSWD}
ENV SSH_PORT=2222
WORKDIR /root
SHELL ["/bin/bash", "-c"]
# Replace sources.list with Aliyun mirror
RUN <<EOT
#!/bin/bash
cp /etc/apt/sources.list /etc/apt/sources.list_bak
cat << EOF > /etc/apt/sources.list
deb https://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse
deb https://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse
deb-src https://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse
EOF
EOT
# Install base tools for compilation
RUN <<EOT
#!/bin/bash
apt update && apt install -y --no-install-recommends \
build-essential \
autotools-dev \
libaio-dev \
git \
libnuma-dev \
python3-pip \
python3-venv \
python3-setuptools \
python3-dev \
tcl \
libtcl8.6 \
swig \
libfftw3-dev \
libfftw3-bin \
libfftw3-doc \
libibverbs-dev \
rdma-core \
gdb \
valgrind \
autoconf \
automake \
libtool \
flex \
gfortran \
libfuse3-dev \
pkg-config \
cmake \
bzip2 \
tar \
curl \
wget \
jq
pip install pipx
pipx install nvitop
pipx ensurepath
. ~/.bashrc
EOT
RUN echo "Check wget version" && wget --version
# Install Modules for environment management
COPY ./file/modules-5.4.0.tar.gz /root
RUN <<EOT
#!/bin/bash
tar zxvf modules-5.4.0.tar.gz
cd modules-5.4.0
./configure --prefix=/opt/modules --bindir=/opt/modules/bin --libdir=/opt/modules/lib --disable-libtclenvmodules
make -j$(nproc)
make install
echo "source /opt/modules/init/profile.sh" >> /etc/profile
echo "source /opt/modules/init/profile.sh" >> ~/.bashrc
EOT
# Install FFTW
ARG FFTW_VERSION="3.3.10"
ENV FFTW_VERSION=${FFTW_VERSION}
ENV PATH=/usr/local/fftw:$PATH
RUN <<EOT
#!/bin/bash
wget http://www.fftw.org/fftw-${FFTW_VERSION}.tar.gz
tar zxvf fftw-${FFTW_VERSION}.tar.gz
cd fftw-${FFTW_VERSION}
./configure --prefix=/usr/local/fftw --enable-sse2 --enable-avx --enable-float --enable-avx2 --enable-shared
make -j$(nproc)
make install
EOT
# Install UCX
ENV UCX_PREFIX=/usr/local/ucx
ENV PATH=$UCX_PREFIX/bin:$PATH
ENV LD_LIBRARY_PATH=$UCX_PREFIX/lib:$LD_LIBRARY_PATH
ENV CPATH=$UCX_PREFIX/include:$CPATH
ENV LIBRARY_PATH=$UCX_PREFIX/lib:$LIBRARY_PATH
RUN <<EOT
#!/bin/bash
git clone https://github.com/openucx/ucx.git
cd ucx
git checkout master
./autogen.sh
mkdir build
cd build
../contrib/configure-release --prefix=${UCX_PREFIX} --with-cuda=${CUDA_HOME} --with-mlx5 --with-avx --with-rc --with-ud --with-dc --with-dm --with-verbs
make -j$(nproc)
make install
EOT
# Install OpenMPI with UCX support
ENV MPI_HOME=/usr/local/openmpi
ENV PATH=${MPI_HOME}/bin:/usr/bin:$PATH
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${MPI_HOME}/lib:/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH
ENV LIBRARY_PATH=/usr/local/cuda/lib64:${LIBRARY_PATH}
ENV CPATH=/usr/local/cuda/include:${MPI_HOME}/include:${CUDA_HOME}/include:$CPATH
RUN <<EOT
#!/bin/bash
wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.6.tar.bz2
tar -xf openmpi-4.1.6.tar.bz2
cd openmpi-4.1.6
# git clone --recursive https://github.com/open-mpi/ompi.git
# cd ompi
# git checkout main
./autogen.pl
mkdir build
cd build
../configure FC=gfortran PYTHON=/usr/bin/python3 --with-cuda=/usr/local/cuda --with-cuda-libdir=/usr/local/cuda/lib64 --enable-python-bindings --enable-mpirun-prefix-by-default --prefix=${MPI_HOME} --with-ucx=${UCX_PREFIX} --enable-mca-dso=btl-smcuda,rcache-rgpusm,rcache-gpusm,accelerator-cuda --enable-mca-no-build=btl-uct --without-hcoll
make -j$(nproc)
make install
EOT
# Install PLUMED
ARG PLUMED_VERSION="2.9.2"
ENV PLUMED_VERSION=${PLUMED_VERSION}
ENV LD_LIBRARY_PATH=/usr/local/plumed/lib:$LD_LIBRARY_PATH
ENV PATH=/usr/local/plumed:/usr/local/plumed/bin:$PATH
RUN <<EOT
#!/bin/bash
curl -L -o plumed-${PLUMED_VERSION}.tar.gz https://github.com/plumed/plumed2/releases/download/v${PLUMED_VERSION}/plumed-${PLUMED_VERSION}.tgz
tar zxvf plumed-${PLUMED_VERSION}.tar.gz
cd plumed-${PLUMED_VERSION}
./configure --prefix=/usr/local/plumed
make -j$(nproc)
make install
EOT
# Install GROMACS with PLUMED support
ARG GROMACS_VERSION="2021.7"
ENV GROMACS_VERSION=${GROMACS_VERSION}
ENV GROMACS_HOME=/usr/local/gromacs-${GROMACS_VERSION}-plumed-${PLUMED_VERSION}
ENV PATH=$GROMACS_HOME/bin:$PATH
ARG CUDA_ARCH="75;86;89"
ENV CUDA_ARCH=${CUDA_ARCH}
RUN <<EOT
#!/bin/bash
wget -c https://ftp.gromacs.org/gromacs/gromacs-${GROMACS_VERSION}.tar.gz
tar zxvf gromacs-${GROMACS_VERSION}.tar.gz
cd gromacs-${GROMACS_VERSION}
plumed-patch -p -e gromacs-${GROMACS_VERSION}
mkdir build
cd build
cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local/gromacs-${GROMACS_VERSION}-plumed-${PLUMED_VERSION} \
-DGMX_BUILD_OWN_FFTW=ON \
-DREGRESSIONTEST_DOWNLOAD=ON \
-DGMX_GPU=CUDA \
-DGMX_CUDA_TARGET_COMPUTE="${CUDA_ARCH}" \
-DGMX_CUDA_TARGET_SM="${CUDA_ARCH}" \
-DGMX_MPI=ON
make -j$(nproc)
make install
echo "source /usr/local/gromacs-${GROMACS_VERSION}-plumed-${PLUMED_VERSION}/bin/GMXRC.bash" >> /root/.bashrc
EOT
# Install AmberTools and Amber
COPY file/Amber24.tar.bz2 file/AmberTools24.tar.bz2 /root
RUN <<EOT
#!/bin/bash
tar -xjvf Amber24.tar.bz2
tar -xjvf AmberTools24.tar.bz2
EOT
# Install Boost
COPY file/boost_1_86_0.tar.gz /root
ENV MODULEPATH=/opt/modulefiles/boost:$MODULEPATH
RUN <<EOT
#!/bin/bash
tar zxvf boost_1_86_0.tar.gz
cd boost_1_86_0
./bootstrap.sh --prefix=/opt/boost --with-libraries=all --with-toolset=gcc
echo "using mpi : /usr/local/openmpi/bin/mpicxx ;" >> project-config.jam
./b2 -j$(nproc) --layout=tagged link=static,shared threading=multi install
mkdir -p /opt/modulefiles/boost
cat << EOF > /opt/modulefiles/boost/1.86.0-openmpi-5.1.0a1
#%Module1.0
set prefix /opt/boost
prepend-path LD_LIBRARY_PATH \$prefix/lib
prepend-path CPATH \$prefix/include
prepend-path LIBRARY_PATH \$prefix/lib
prepend-path PATH \$prefix/bin
EOF
EOT
# Install Intel HPCKit and oneMKL
COPY file/l_HPCKit_p_2024.2.1.79_offline.sh file/l_onemkl_p_2024.2.2.17_offline.sh /root
RUN <<EOT
#!/bin/bash
chmod +x l_HPCKit_p_2024.2.1.79_offline.sh
./l_HPCKit_p_2024.2.1.79_offline.sh -a --silent --eula accept --install-dir /opt/intel
chmod +x l_onemkl_p_2024.2.2.17_offline.sh
./l_onemkl_p_2024.2.2.17_offline.sh -a --silent --eula accept --install-dir /opt/intel/onemkl
echo "source /opt/intel/setvars.sh" >> ~/.bashrc
echo "source /opt/intel/onemkl/setvars.sh" >> ~/.bashrc
EOT
# SSH setup
RUN <<EOT
#!/bin/bash
mkdir ~/.ssh
printf "Host * \n ForwardAgent yes\nHost *\n StrictHostKeyChecking no" > ~/.ssh/config
cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config
sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config
sed -i 's/^\(\s*\)GSSAPIAuthentication yes/\1GSSAPIAuthentication no/' /etc/ssh/ssh_config
sed -i "s/^#Port 22/Port ${SSH_PORT}/" /etc/ssh/sshd_config
ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N "" <<< y
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
mkdir /var/run/sshd
echo "root:${ROOT_PASSWD}" | chpasswd
EOT
# Timezone setup
RUN <<EOT
#!/bin/bash
apt-get install -y tzdata
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo "Asia/Shanghai" > /etc/timezone
EOT
# Install micromamba silently
WORKDIR /root
ENV BIN_FOLDER=/usr/local/bin
ENV INIT_YES=yes
ENV CONDA_FORGE_YES=yes
ENV PREFIX_LOCATION=/opt/micromamba
RUN <<EOT
#!/bin/bash
# Download and install micromamba
curl -L https://micro.mamba.pm/install.sh | bash -s -- --yes --prefix=${PREFIX_LOCATION} --bin-dir=${BIN_FOLDER}
# Initialize micromamba for bash
${BIN_FOLDER}/micromamba shell init --shell=bash --root-prefix=${PREFIX_LOCATION}
# Activate micromamba
source /root/.bashrc
# Create a conda environment for PyAutoFEP
${BIN_FOLDER}/micromamba create -n PyAutoFEP -c conda-forge -y rdkit openbabel matplotlib networkx pip
${BIN_FOLDER}/micromamba run -n PyAutoFEP python -m pip install pymbar alchemlyb==0.6.0 matplotlib biopython mdanalysis
EOT
# Install PyAutoFEP
RUN <<EOT
#!/bin/bash
git clone https://github.com/luancarvalhomartins/PyAutoFEP.git
EOT
# Final cleanup
RUN <<EOT
#!/bin/bash
apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
rm -rf /root/modules-5.4.0 /root/fftw-${FFTW_VERSION} /root/ucx /root/ompi /root/plumed-${PLUMED_VERSION} /root/gromacs-${GROMACS_VERSION}
rm -rf /root/Amber24.tar.bz2 /root/AmberTools24.tar.bz2 /root/boost_1_86_0.tar.gz
EOT
# Stage 2: Runtime image
# FROM nvidia/cuda:${TAG_VERSION}
# ENV CUDA_HOME=/usr/local/cuda
# ENV PATH=$CUDA_HOME/bin:$PATH
# ENV LD_LIBRARY_PATH=$CUDA_HOME/lib64:/usr/local/plumed/lib:$LD_LIBRARY_PATH
# ENV CPATH=$CUDA_HOME/include:$CPATH
# ENV LIBRARY_PATH=$CUDA_HOME/lib64:$LIBRARY_PATH
# ARG SSH_PORT=2222
# WORKDIR /root
# SHELL ["/bin/bash", "-c"]
# ARG GROMACS_VERSION="2021.7"
# ARG PLUMED_VERSION="2.9.2"
# # Copy necessary files from build stage
# COPY --from=build-env /usr/local/fftw /usr/local/fftw
# COPY --from=build-env /usr/local/ucx /usr/local/ucx
# COPY --from=build-env /usr/local/openmpi /usr/local/openmpi
# COPY --from=build-env /usr/local/plumed /usr/local/plumed
# COPY --from=build-env /usr/local/gromacs-${GROMACS_VERSION}-plumed-${PLUMED_VERSION} /usr/local/gromacs-${GROMACS_VERSION}-plumed-${PLUMED_VERSION}
# COPY --from=build-env /opt/micromamba /opt/micromamba
# COPY --from=build-env /usr/local/bin/micromamba /usr/local/bin/micromamba
# COPY --from=build-env /root/.bashrc /root/.bashrc
# # COPY --from=build-env /root/.condarc /root/.condarc # Copy micromamba config file
# # COPY --from=build-env /root/.mambarc /root/.mambarc # Copy micromamba config file (if exists)
# COPY --from=build-env /opt/intel /opt/intel
# COPY --from=build-env /opt/intel/onemkl /opt/intel/onemkl
# COPY --from=build-env /opt/boost /opt/boost
# COPY --from=build-env /opt/modulefiles /opt/modulefiles
# COPY --from=build-env /root/.ssh /root/.ssh
# # COPY --from=build-env /etc/ssh/sshd_config /etc/ssh/sshd_config
# COPY --from=build-env /var/run/sshd /var/run/sshd
# # Set up micromamba environment
# RUN <<EOT
# #!/bin/bash
# # Add micromamba to PATH
# echo 'export PATH=/usr/local/bin:$PATH' >> /root/.bashrc
# # Initialize micromamba for bash
# eval "$(micromamba shell hook --shell )"
# /usr/local/bin/micromamba shell init --shell=bash --root-prefix=/opt/micromamba
# # Activate micromamba
# source /root/.bashrc
# micromamba self-update
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch/linux-64/
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/msys2
# micromamba config append channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free
# micromamba config append channels https://mirrors.bfsu.edu.cn/anaconda/cloud/conda-forge/
# micromamba config append channels https://mirrors.bfsu.edu.cn/anaconda/cloud/bioconda/
# # Activate the PyAutoFEP environment
# micromamba activate PyAutoFEP
# mkdir -p ~/.pip
# cat << EOF > ~/.pip/pip.conf
# [global]
# index-url = https://mirrors.aliyun.com/pypi/simple/
# [install]
# trusted-host=mirrors.aliyun.com
# EOF
# EOT
# SSH setup
EXPOSE ${SSH_PORT}
CMD ["/usr/sbin/sshd", "-D"]

47
docker-compose_amber_staget.yml Executable file
View File

@@ -0,0 +1,47 @@
version: '3.8'
services:
gromacs:
build:
context: .
dockerfile: Dockerfile.gromacs_amber_staget
args:
CACHEBUST: 1
TAG_VERSION: "12.4.1-cudnn-devel-ubuntu22.04"
PLUMED_VERSION: "2.9.2"
FFTW_VERSION: "3.3.10"
GROMACS_VERSION: "2021.7"
BUILDKIT_INLINE_CACHE: 1
# env_file:
# - .env
volumes:
- ./data:/data
container_name: gromacs_amber_staget
pull_policy: if_not_present
ulimits:
memlock:
soft: -1
hard: -1
restart: unless-stopped
image: hotwa/gromacs:ambert
privileged: true
cap_add:
- ALL
- CAP_SYS_PTRACE
shm_size: '16gb'
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
- TMPDIR=/var/tmp
ports:
- "53322:2222"
# network_mode: host
# command: ["/usr/sbin/sshd", "-D"]
command: ["tail", "-f", "/dev/null"]
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]