5.16. LAMMPS¶
Following version is available.
version |
module |
execution queue |
---|---|---|
29 AUG 2024 |
/work/app/LAMMPS/lammps-29Aug2024_cpu/src/lmp_mpi |
P_030 TP_002 MP_001 CP_001 DP_002 S_001 CS_001 |
Attention
Execute following command in advance.
module load oneapi/2025.0.1
version |
module |
queue |
---|---|---|
29 AUG 2024 |
/work/app/LAMMPS/lammps-29Aug2024_gpu/build/lmp_gpu |
A_002 CA_001 DA_002 |
Attention
Execute following command in advance.
module load oneapi/2025.0.1
module load cuda/12.8
Job Submission Script
・Large-scale Parallel Computing Server
#!/bin/sh
#PBS -l select=nodes
#PBS -q queue
#PBS -N jobname
module load oneapi/2025.0.1
cd ${PBS_O_WORKDIR}
mpirun [ -np MPI total tasks ] [ -ppn MPI tasks per node ] /work/app/LAMMPS/lammps-29Aug2024_cpu/src/lmp_mpi < input file > output file 2> error file
・Accelerator server
#!/bin/sh
#PBS -l select=1[:ncpus=number of CPUs][:ngpus=number of GPUs]
#PBS -q CA_001
#PBS -N jobname
module load oneapi/2025.0.1
module load cuda/12.8
cd ${PBS_O_WORKDIR}
mpirun [ -np MPI total tasks ] [ -ppn MPI tasks per node ] /work/app/LAMMPS/lammps-29Aug2024_gpu/build/lmp_gpu -sf gpu -pk gpu MPI tasks per node < input file > output file 2> error file
Example
・Large-scale Parallel Computing Server
#!/bin/sh
#PBS -l select=1
#PBS -q P_030
#PBS -N lammps
module load oneapi/2025.0.1
cd ${PBS_O_WORKDIR}
mpirun -np 112 -ppn 112 /work/app/LAMMPS/lammps-29Aug2024_cpu/src/lmp_mpi < in.lj > lammps.out 2> lammps.err
・Accelerator server
#!/bin/sh
#PBS -l select=1:ncpus=2:ngpus=2
#PBS -q CA_001
#PBS -N lammps
module load oneapi/2025.0.1
module load cuda/12.8
cd ${PBS_O_WORKDIR}
mpirun -np 2 -ppn 2 /work/app/LAMMPS/lammps-29Aug2024_gpu/build/lmp_gpu -sf gpu -pk gpu 2 < in.lj > lammps.out 2> lammps.err