5.19. GROMACS

Following version is available.

Large-scale Parallel Computing Server

version

module

execution queue

2025.4

/work/app/GROMACS/gromacs-2025.4_cpu/bin/GMXRC.bash

P_030 TP_002 MP_001 CP_001 DP_002 S_001 CS_001

Attention

Execute following command in advance.

module load oneapi/2025.0.1

Accelerator server

version

module

execution queue

2025.4

/work/app/GROMACS/gromacs-2025.4_gpu/bin/GMXRC.bash

A_002 CA_001 DA_002

Attention

Execute following command in advance.

module load oneapi/2025.0.1

module load cuda/12.8

  • Job Submission Script

・Large-scale Parallel Computing Server

#!/bin/sh
#PBS -l select=nodes
#PBS -q queue
#PBS -N jobname

module -s load oneapi/2025.0.1

cd ${PBS_O_WORKDIR}

source /work/app/GROMACS/gromacs-2025.4_cpu/bin/GMXRC.bash
mpirun [ -np MPI total tasks ] [ -ppn MPI tasks per node ] -hostfile $PBS_NODEFILE gmx_mpi mdrun input file > output file 2> error file

・Accelerator server

#!/bin/sh
#PBS -l select=1[:ncpus=number of CPUs][:ngpus=number of GPUs][:mem=amount of memory]
#PBS -q CA_001
#PBS -N jobname

module -s load oneapi/2025.0.1
module -s load cuda/12.8

cd ${PBS_O_WORKDIR}

source /work/app/GROMACS/gromacs-2025.4_gpu/bin/GMXRC.bash
mpirun [ -np MPI total tasks ] [ -N MPI tasks per node ] -hostfile $PBS_NODEFILE gmx_mpi mdrun input file > output file 2> error file
  • Example

・Large-scale Parallel Computing Server

#!/bin/sh
#PBS -l select=1
#PBS -q P_030
#PBS -N gromacs

module -s load oneapi/2025.0.1

cd ${PBS_O_WORKDIR}

source /work/app/GROMACS/gromacs-2025.4_cpu/bin/GMXRC.bash
mpirun -np 112 -ppn 112 -hostfile $PBS_NODEFILE gmx_mpi mdrun -s input.tpr > gromacs.out 2> gromacs.err

・Accelerator server

#!/bin/sh
#PBS -l select=1:ncpus=2:ngpus=2:mem=32gb
#PBS -q CA_001
#PBS -N gromacs

module -s load oneapi/2025.0.1
module -s load cuda/12.8

cd ${PBS_O_WORKDIR}

source /work/app/GROMACS/gromacs-2025.4_gpu/bin/GMXRC.bash
mpirun -np 2 -ppn 2 -hostfile $PBS_NODEFILE gmx_mpi mdrun -s input.tpr > gromacs.out 2> gromacs.err