- 适用于 在5090组成的HPC上配置带有nequip模型的lammps
- 配置环境
bash env_create.sh - 安装allegro
sbatch build_allegro.sh - 安装lammps
sbatch build_lammps.sh - 测试
sbatch test.sh
Last active
December 11, 2025 13:23
-
-
Save bio-punk/d9cecb418bc376fa2b69ad1ddc83b591 to your computer and use it in GitHub Desktop.
lammps with nequip in blackwell #x86 #build #lammps #blackwell
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| #SBATCH --gpus=1 | |
| #SBATCH | |
| LOGIN_NODE=ln08 | |
| ssh -CfNg -L 7897:127.0.0.1:7897 ${LOGIN_NODE} | |
| export https_proxy=http://127.0.0.1:7897 | |
| export http_proxy=http://127.0.0.1:7897 | |
| git config --global http.proxy http://127.0.0.1:7897 | |
| git config --global https.proxy http://127.0.0.1:7897 | |
| CONDA_ENV_NAME=nequip_dev251211 | |
| PYTHON_VER=3.11 | |
| LAMMPS_VER=stable_22Jul2025_update2 | |
| # 手动修改此处,指向工作目录 | |
| ALL_PREFIX= | |
| LAMMPS_SRC=${ALL_PREFIX}/lammps_${LAMMPS_VER} | |
| ALLEGRO_SRC=${ALL_PREFIX}/allegro | |
| PAIR_NEQUIP_ALLEGRO_SRC=${ALL_PREFIX}/pair_nequip_allegro | |
| ALI_MIRROR="https://mirrors.aliyun.com/pytorch-wheels/cu128/" | |
| export PIP_INDEX_URL=https://mirrors.cernet.edu.cn/pypi/web/simple | |
| LAMMPS_GIT=https://github.com/lammps/lammps.git | |
| ALLEGRO_GIT=https://github.com/mir-group/allegro.git | |
| PAIR_NEQUIP_ALLEGRO_GIT=https://github.com/mir-group/pair_nequip_allegro.git | |
| # 手动修改此处,指向conda安装位置 | |
| source /path/to/conda/etc/profile.d/conda.sh | |
| conda activate ${CONDA_ENV_NAME} | |
| module load cuda/12.8 cudnn/8.9.6.50_cuda12 | |
| export CUDNN_ROOT=${CUDNN_HOME} | |
| export CUDA_ROOT=${CUDA_HOME} | |
| cd $ALLEGRO_SRC | |
| pip install -v "numpy<2" . |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| #SBATCH --gpus=1 | |
| #SBATCH | |
| # 自定义变量 | |
| CONDA_ENV_NAME=nequip_dev251211 | |
| PYTHON_VER=3.11 | |
| LAMMPS_VER=stable_22Jul2025_update2 | |
| PAIR_NEQUIP_ALLEGRO_VER=v0.7.0 | |
| # 手动修改此处,指向工作目录 | |
| ALL_PREFIX= | |
| # 源码目录 | |
| LAMMPS_SRC=${ALL_PREFIX}/lammps_${LAMMPS_VER} | |
| ALLEGRO_SRC=${ALL_PREFIX}/allegro | |
| PAIR_NEQUIP_ALLEGRO_SRC=${ALL_PREFIX}/pair_nequip_allegro_${PAIR_NEQUIP_ALLEGRO_VER} | |
| # 登录节点 | |
| LOGIN_NODE=ln08 | |
| # 代理网络设置 | |
| ssh -CfNg -L 7897:127.0.0.1:7897 ${LOGIN_NODE} | |
| export https_proxy=http://127.0.0.1:7897 | |
| export http_proxy=http://127.0.0.1:7897 | |
| git config --global http.proxy http://127.0.0.1:7897 | |
| git config --global https.proxy http://127.0.0.1:7897 | |
| # 配置镜像 | |
| ALI_MIRROR="https://mirrors.aliyun.com/pytorch-wheels/cu128/" | |
| export PIP_INDEX_URL=https://mirrors.cernet.edu.cn/pypi/web/simple | |
| LAMMPS_GIT=https://github.com/lammps/lammps.git | |
| ALLEGRO_GIT=https://github.com/mir-group/allegro.git | |
| PAIR_NEQUIP_ALLEGRO_GIT=https://github.com/mir-group/pair_nequip_allegro.git | |
| # conda环境配置 | |
| # 手动修改此处,指向conda安装位置 | |
| source /path/to/conda/etc/profile.d/conda.sh | |
| conda activate ${CONDA_ENV_NAME} | |
| # CUDA环境配置 | |
| module load cuda/12.8 cudnn/8.9.6.50_cuda12 | |
| export CUDNN_ROOT=/data/apps/cudnn/8.9.6.50_cuda12 | |
| export CUDA_ROOT=${CUDA_HOME} | |
| unset CMAKE_PREFIX_PATH | |
| export TORCH_CUDA_ARCH_LIST="10.0;12.0" | |
| export USE_CUDNN=1 | |
| # 配置nvvm启用cicc | |
| export PATH=$CUDA_HOME/nvvm/bin:$PATH | |
| export LD_LIBRARY_PATH=$CUDA_HOME/nvvm/lib64:$LD_LIBRARY_PATH | |
| export LIBRARY_PATH=$CUDA_HOME/nvvm/lib64:$LIBRARY_PATH | |
| export CPATH=$CUDA_HOME/nvvm/include:$CPATH | |
| # 配置OpenMPI编译器 | |
| export CC=/usr/bin/x86_64-linux-gnu-gcc | |
| export CXX=/usr/bin/x86_64-linux-gnu-g++ | |
| export FC=/usr/bin/x86_64-linux-gnu-gfortran | |
| export OMPI_CC=$CC | |
| export OMPI_CXX=$CXX | |
| export OMPI_FC=$FC | |
| # 配置编译器 | |
| cp nvcc_wrapper ${LAMMPS_SRC}/lib/kokkos/bin/nvcc_wrapper | |
| # 配置allegro支持 | |
| cd $PAIR_NEQUIP_ALLEGRO_SRC | |
| chmod +x patch_lammps.sh | |
| ./patch_lammps.sh $LAMMPS_SRC | |
| # 配置构建目录 | |
| cd $LAMMPS_SRC | |
| mkdir -p build_$SLURM_JOB_ID | |
| cd build_$SLURM_JOB_ID | |
| # 检查环境变量 | |
| export | |
| cmake \ | |
| -D CMAKE_BUILD_TYPE=Debug \ | |
| -D CMAKE_CXX_STANDARD=17 \ | |
| -D CMAKE_CUDA_STANDARD=17 \ | |
| -D CMAKE_CXX_COMPILER=$LAMMPS_SRC/lib/kokkos/bin/nvcc_wrapper \ | |
| -D BUILD_MPI=ON \ | |
| -D PKG_KOKKOS=ON \ | |
| -D PKG_GPU=ON \ | |
| -D CUDA_ARCH_LIST=12.0 \ | |
| -D CUDAToolkit_ROOT=$CUDA_HOME \ | |
| -D CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME \ | |
| -D FFT=KISS \ | |
| -D GPU_API=cuda \ | |
| -D CMAKE_CUDA_ARCHITECTURES="120" \ | |
| -D GPU_ARCH=sm_120 \ | |
| -D Kokkos_ENABLE_CUDA=ON \ | |
| -D Kokkos_ARCH_AMDAVX=ON \ | |
| -D Kokkos_ARCH_BLACKWELL120=ON \ | |
| -D Kokkos_ENABLE_CUDA_LAMBDA=ON \ | |
| -D MKL_INCLUDE_DIR="$CONDA_PREFIX/include" \ | |
| -D CMAKE_PREFIX_PATH="$CUDA_HOME;$CONDA_PREFIX/lib/python$PYTHON_VER/site-packages/torch/share/cmake" \ | |
| -D CMAKE_LIBRARY_PATH=$CUDA_HOME/lib64/stubs \ | |
| -D CMAKE_MPI_C_COMPILER=mpicc \ | |
| -D CMAKE_MPI_CXX_COMPILER=mpicxx \ | |
| -D CMAKE_INSTALL_PREFIX=$CONDA_PREFIX \ | |
| -D Kokkos_ENABLE_OPENMP=yes \ | |
| -D BUILD_OMP=ON \ | |
| -D PKG_OPENMP=ON \ | |
| -D NEQUIP_AOT_COMPILE=ON \ | |
| $LAMMPS_SRC/cmake | |
| # -D CMAKE_VERBOSE_MAKEFILE=ON -D CMAKE_MESSAGE_LOG_LEVEL=DEBUG --trace-expand \ | |
| make -j16 VERBOSE=1 | |
| if [ $? -ne 0 ]; then | |
| echo "CMake configuration failed." | |
| exit 1 | |
| fi | |
| echo "CMake configuration succeeded." | |
| echo "install path: $CONDA_PREFIX" | |
| make install |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| set -x | |
| CONDA_ENV_NAME=nequip_dev251211 | |
| PYTHON_VER=3.11 | |
| LAMMPS_VER=stable_22Jul2025_update2 | |
| PAIR_NEQUIP_ALLEGRO_VER=v0.7.0 | |
| # 手动修改此处,指向工作目录 | |
| ALL_PREFIX= | |
| LAMMPS_SRC=${ALL_PREFIX}/lammps_${LAMMPS_VER} | |
| ALLEGRO_SRC=${ALL_PREFIX}/allegro | |
| PAIR_NEQUIP_ALLEGRO_SRC=${ALL_PREFIX}/pair_nequip_allegro_${PAIR_NEQUIP_ALLEGRO_VER} | |
| # 镜像配置 | |
| ALI_MIRROR="https://mirrors.aliyun.com/pytorch-wheels/cu128/" | |
| export PIP_INDEX_URL=https://mirrors.cernet.edu.cn/pypi/web/simple | |
| LAMMPS_GIT=https://github.com/lammps/lammps.git | |
| ALLEGRO_GIT=https://github.com/mir-group/allegro.git | |
| PAIR_NEQUIP_ALLEGRO_GIT=https://github.com/mir-group/pair_nequip_allegro.git | |
| # 手动修改此处,指向conda安装位置 | |
| source /path/to/conda/etc/profile.d/conda.sh | |
| module load cuda/12.8 cudnn/8.9.6.50_cuda12 | |
| conda create -n ${CONDA_ENV_NAME} python=${PYTHON_VER} openmpi "libblas=*=*_mkl" "cmake<=3.29.4" -y | |
| conda activate ${CONDA_ENV_NAME} | |
| pip install -f ${ALI_MIRROR} \ | |
| "numpy<2" \ | |
| torch==2.8.0+cu128 torchvision torchaudio | |
| python -c "import torch; print(torch._C._GLIBCXX_USE_CXX11_ABI)" | |
| python -c 'import torch;print(torch.utils.cmake_prefix_path)' | |
| git config --global http.proxy http://127.0.0.1:7897 | |
| git config --global https.proxy http://127.0.0.1:7897 | |
| git clone -b ${LAMMPS_VER} ${LAMMPS_GIT} ${LAMMPS_SRC} | |
| git clone ${ALLEGRO_GIT} ${ALLEGRO_SRC} | |
| git clone -b ${PAIR_NEQUIP_ALLEGRO_VER} ${PAIR_NEQUIP_ALLEGRO_GIT} ${PAIR_NEQUIP_ALLEGRO_SRC} | |
| cp ${LAMMPS_SRC}/lib/kokkos/bin/nvcc_wrapper . | |
| # cp nvcc_wrapper ${LAMMPS_SRC}/lib/kokkos/bin/nvcc_wrapper | |
| # default_arch="sm_120" | |
| # host_compiler='/usr/bin/x86_64-linux-gnu-g++' | |
| # cuda_args="$cuda_args --expt-extended-lambda" | |
| cp ${CONDA_PREFIX}/lib/python${PYTHON_VER}/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake . | |
| # torch版本大于等于2.8.0时不需要这个 | |
| # cp ./cuda.cmake ${CONDA_PREFIX}/lib/python3.11/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake | |
| # 修改172行寻找nvtx3 | |
| # set(USE_SYSTEM_NVTX ON) | |
| # 修改175行寻找nvtx3 | |
| # find_path(nvtx3_dir NAMES nvtx3 PATHS "/data/apps/cuda/12.8/include") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| #SBATCH --gpus=1 | |
| #SBATCH | |
| CONDA_ENV_NAME=nequip_dev251211 | |
| PYTHON_VER=3.11 | |
| # 手动修改此处,指向conda安装位置 | |
| source /path/to/conda/etc/profile.d/conda.sh | |
| conda activate ${CONDA_ENV_NAME} | |
| export LD_LIBRARY_PATH=${CONDA_PREFIX}/lib/python${PYTHON_VER}/site-packages/torch/lib:$LD_LIBRARY_PATH | |
| module load cuda/12.8 cudnn/8.9.6.50_cuda12 | |
| mpirun -n 1 lmp -h |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment