Skip to content

Instantly share code, notes, and snippets.

@bio-punk
Last active January 12, 2026 04:46
Show Gist options
  • Select an option

  • Save bio-punk/921e7dfb0419e5cca74e2a6a889ea544 to your computer and use it in GitHub Desktop.

Select an option

Save bio-punk/921e7dfb0419e5cca74e2a6a889ea544 to your computer and use it in GitHub Desktop.
lammps+deepmd+phonopy #lammps #deepmd #build

builtin.cmake

set(LAMMPS_VERSION_NUMBER 20250722)

nvcc_warpper

default_arch="sm_120" host_compiler=/usr/bin/x86_64-linux-gnu-g++ nvcc_compiler=nvcc

  1. env_create.sh 创建环境

  2. build_deepmd.sh 编译安装deepmd-kit,再通过uv安装dpgen

  3. build_lammps.sh

#!/bin/bash
#SBATCH --gpus=1
#SBATCH
export all_prefix=/data/run01/scvi905/dev260110
export conda_env_name=phonopy_lammps_dev260110
CLIENT_NODE=ln08
LAMMPS_TAG=stable_22Jul2025_update2
LAMMPS_GIT=https://github.com/lammps/lammps.git
DEEPMD_TAG=v3.1.1 # 3.1.1 适配 TensorFlow2.19
DEEPMD_GIT=https://github.com/deepmodeling/deepmd-kit.git
py_ver=3.12
conda_prefix=/data/apps/miniforge/25.3.0-3
LAMMPS_SRC=${all_prefix}/lammps_${LAMMPS_TAG}
DEEPMD_SRC=${all_prefix}/deepmd_${DEEPMD_TAG}
install_prefix=${all_prefix}/install
# ssh -CfNg -L 7897:127.0.0.1:7897 ${CLIENT_NODE}
# export http_proxy=http://127.0.0.1:7897
# export https_proxy=http://127.0.0.1:7897
module load cuda/12.8
source ${conda_prefix}/etc/profile.d/conda.sh
conda activate ${conda_env_name}
# 构建python接口
cd $DEEPMD_SRC
export PIP_INDEX_URL=https://mirrors.bfsu.edu.cn/pypi/web/simple
DP_VARIANT=cuda \
CUDAToolkit_ROOT=$CUDA_HOME \
DP_ENABLE_TENSORFLOW=1 \
DP_ENABLE_PYTORCH=1 \
DP_ENABLE_NATIVE_OPTIMIZATION=1 \
pip install -v . "numpy<2"
if [ $? -ne 0 ]; then
echo "Build failed"
exit 1
else
echo "Build Python interface succeeded"
fi
# 构建C++接口
cd $DEEPMD_SRC/source
mkdir -p build_${SLURM_JOB_ID}
cd build_${SLURM_JOB_ID}
unset CMAKE_PREFIX_PATH
export CUDA_ROOT=${CUDA_HOME}
export CC=`which x86_64-linux-gnu-gcc`
export CXX=`which x86_64-linux-gnu-g++`
cmake \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_INSTALL_PREFIX=$install_prefix \
-D CMAKE_CXX_STANDARD=17 \
-D CMAKE_CXX_STANDARD_REQUIRED=ON \
-D CMAKE_CUDA_STANDARD=17 \
-D CMAKE_CUDA_STANDARD_REQUIRED=ON \
-D CMAKE_CUDA_COMPILER=`which nvcc` \
-D MPI_CXX_COMPILER=`which mpicxx` \
-D CMAKE_CXX_COMPILER=$CXX \
-D ENABLE_TENSORFLOW=OFF \
-D ENABLE_PYTORCH=ON \
-D USE_CUDA_TOOLKIT=ON \
-D CUDAToolkit_ROOT=${CUDA_HOME} \
-D USE_TF_PYTHON_LIBS=OFF \
-D USE_PT_PYTHON_LIBS=ON \
-D ENABLE_NATIVE_OPTIMIZATION=1 \
..
make verbose=1 -j8
if [ $? -ne 0 ]; then
echo "Build failed"
else
echo "Build succeeded"
make install
fi
uv pip install dpgen
#!/bin/bash
#SBATCH --gpus=1
#SBATCH
set -euo pipefail
export all_prefix=/data/run01/scvi905/dev260110
export conda_env_name=phonopy_lammps_dev260110
CLIENT_NODE=ln08
LAMMPS_TAG=stable_22Jul2025_update2
LAMMPS_GIT=https://github.com/lammps/lammps.git
DEEPMD_TAG=v3.1.1 # 3.1.1 适配 TensorFlow2.19
DEEPMD_GIT=https://github.com/deepmodeling/deepmd-kit.git
PYTHON_VER=3.12
conda_prefix=/data/apps/miniforge/25.3.0-3
LAMMPS_SRC=${all_prefix}/lammps_${LAMMPS_TAG}
DEEPMD_SRC=${all_prefix}/deepmd_${DEEPMD_TAG}
install_prefix=${all_prefix}/install
# ssh -CfNg -L 7897:127.0.0.1:7897 ${CLIENT_NODE}
# export http_proxy=http://127.0.0.1:7897
# export https_proxy=http://127.0.0.1:7897
module load cuda/12.8
source ${conda_prefix}/etc/profile.d/conda.sh
conda activate ${conda_env_name}
# 添加 deepmd 动态库
export PATH=${install_prefix}/bin:$PATH
export LD_LIBRARY_PATH=${install_prefix}/lib:$LD_LIBRARY_PATH
export LIBRARY_PATH=${install_prefix}/lib:$LIBRARY_PATH
export CPATH=${install_prefix}/include:$CPATH
# 添加 NEP
cd NEP_CPU
cp src/* interface/lammps/USER-NEP
cp -r interface/lammps/USER-NEP ${LAMMPS_SRC}/src
cp interface/lammps/USER-NEP.cmake ${LAMMPS_SRC}/cmake/Modules/Packages/
# 仅运行前执行一次
# cd ${LAMMPS_SRC}/cmake
# sed -i '/foreach(PKG_WITH_INCL / s/)/ USER-NEP)/' CMakeLists.txt
# sed -i '/set(STANDARD_PACKAGES/,/)/ s/)/ \n USER-NEP)/' CMakeLists.txt
# 添加 deepmd
# 仅运行前执行一次
# echo "include(${DEEPMD_SRC}/source/lmp/builtin.cmake)" >> ${LAMMPS_SRC}/cmake/CMakeLists.txt
# 修正deepmd版本号读取逻辑
rm ${DEEPMD_SRC}/source/lmp/builtin.cmake
cp ${all_prefix}/builtin.cmake ${DEEPMD_SRC}/source/lmp/builtin.cmake
# 添加NVCC包装层
cp ${all_prefix}/nvcc_wrapper ${LAMMPS_SRC}/lib/kokkos/bin/nvcc_wrapper
# 添加cicc到PATH
export CUDA_ROOT=$CUDA_HOME
export LD_LIBRARY_PATH=${CUDA_HOME}/nvvm/lib64:$LD_LIBRARY_PATH
export LIBRARY_PATH=${CUDA_HOME}/nvvm/lib64:$LIBRARY_PATH
export CPATH=${CUDA_HOME}/nvvm/include:$CPATH
# 配置编译器和参数
export CC=/usr/bin/x86_64-linux-gnu-gcc
export CXX=/usr/bin/x86_64-linux-gnu-g++
export FC=/usr/bin/x86_64-linux-gnu-gfortran
export OMPI_CC=$CC
export OMPI_CXX=$CXX
export OMPI_FC=$FC
unset CMAKE_PREFIX_PATH
# 配置Pytorch的CUDA参数
export TORCH_CUDA_ARCH_LIST="12.0"
# 检查环境变量
export
# 开始编译
cd ${LAMMPS_SRC}
mkdir -p build_${SLURM_JOB_ID}
cd build_${SLURM_JOB_ID}
cmake \
-D LAMMPS_VERSION_NUMBER=202507223 \
-D CMAKE_BUILD_TYPE=Debug \
-D CMAKE_CXX_STANDARD=17 \
-D CMAKE_CUDA_STANDARD=17 \
-D CMAKE_CXX_COMPILER=$LAMMPS_SRC/lib/kokkos/bin/nvcc_wrapper \
-D BUILD_MPI=ON \
-D PKG_KOKKOS=ON \
-D PKG_GPU=ON \
-D CUDA_ARCH_LIST=12.0 \
-D CUDAToolkit_ROOT=$CUDA_HOME \
-D CUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME \
-D FFT=KISS \
-D GPU_API=cuda \
-D CMAKE_CUDA_ARCHITECTURES="120" \
-D GPU_ARCH=sm_120 \
-D Kokkos_ENABLE_CUDA=ON \
-D Kokkos_ARCH_BLACKWELL120=ON \
-D Kokkos_ENABLE_CUDA_LAMBDA=ON \
-D Kokkos_ENABLE_OPENMP=ON \
-D MKL_INCLUDE_DIR="$CONDA_PREFIX/include" \
-D CMAKE_PREFIX_PATH="$CUDA_HOME;$CONDA_PREFIX/lib/python$PYTHON_VER/site-packages/torch/share/cmake;${install_prefix}" \
-D CMAKE_LIBRARY_PATH=$CUDA_HOME/lib64/stubs \
-D CMAKE_MPI_C_COMPILER=mpicc \
-D CMAKE_MPI_CXX_COMPILER=mpicxx \
-D CMAKE_INSTALL_PREFIX=$install_prefix \
-D Kokkos_ENABLE_OPENMP=yes \
-D BUILD_OMP=ON \
-D PKG_OPENMP=ON \
-D BUILD_SHARED_LIBS=yes \
-D PKG_USER-NEP=on \
-D LAMMPS_EXCEPTIONS=ON \
-D PKG_PYTHON=ON \
$LAMMPS_SRC/cmake
make -j8 VERBOSE=1
if [ $? -ne 0 ]; then
echo "LAMMPS build failed"
else
echo "LAMMPS build succeeded"
make install
echo "LAMMPS installed to $install_prefix"
fi
#!/bin/bash
LAMMPS_TAG=stable_22Jul2025_update2
LAMMPS_GIT=https://github.com/lammps/lammps.git
DEEPMD_TAG=v3.0.3 # 3.0.3 适配 TensorFlow2.18
DEEPMD_TAG=v3.1.1 # 3.1.1 适配 TensorFlow2.19
DEEPMD_GIT=https://github.com/deepmodeling/deepmd-kit.git
LAMMPS_SRC=./lammps_${LAMMPS_TAG}
DEEPMD_SRC=./deepmd_${DEEPMD_TAG}
git config --global http.proxy http://127.0.0.1:7897
git config --global https.proxy http://127.0.0.1:7897
git clone -b ${LAMMPS_TAG} --depth=1 ${LAMMPS_GIT} ${LAMMPS_SRC}
git clone -b ${DEEPMD_TAG} ${DEEPMD_GIT} ${DEEPMD_SRC}
git clone https://github.com/brucefan1983/NEP_CPU.git NEP_CPU
tar -zcf $LAMMPS_SRC.tar.gz $LAMMPS_SRC
tar -zcf $DEEPMD_SRC.tar.gz $DEEPMD_SRC
tar -zcf NEP_CPU.tar.gz NEP_CPU
cp ${LAMMPS_SRC}/lib/kokkos/bin/nvcc_wrapper .
#!/bin/bash
export all_prefix=/data/run01/scvi905/dev260110
export conda_env_name=phonopy_lammps_dev260110
py_ver=3.12
conda_prefix=/data/apps/miniforge/25.3.0-3
source ${conda_prefix}/etc/profile.d/conda.sh
# https://conda-forge.org/docs/maintainer/knowledge_base/#switching-blas-implementation
conda create -n ${conda_env_name} -c conda-forge \
python=${py_ver} \
mpi4py \
openmpi \
"libblas=*=*_mkl" \
"cmake<=3.28.2" \
"numpy<2" \
phonolammps \
-y
conda activate ${conda_env_name}
export PIP_INDEX_URL=https://mirrors.bfsu.edu.cn/pypi/web/simple
ALI_MIRROR=https://mirrors.aliyun.com/pytorch-wheels/cu128/
pip install uv
# wget $ALI_MIRROR/torch-2.8.0+cu128-cp312-cp312-manylinux_2_28_x86_64.whl
uv pip install --extra-index-url ${ALI_MIRROR} --index-strategy unsafe-best-match \
"numpy<2" \
./torch-2.8.0+cu128-cp312-cp312-manylinux_2_28_x86_64.whl torchvision torchaudio \
tensorflow==2.19
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment