Supercomputer Tips

Linux/Ubuntu general

sed

  • How to change a specific word of the first occurance in a text file
$ sed   -i   's/oldstring/newstring/'  myfile.txt
  • How to change a specific word of all the occurances in a text file
$ sed   -i   's/oldstring/newstring/g'  myfile.txt

 

UHHPC

Slurm scripts

  • Serial submission
#!/bin/bash
### Name for your job
#SBATCH -J example    
### Number of cores per task requests
#SBATCH -c 1 
### Number of nodes to spread total cores across
#SBATCH -N 1 
### minimum memory per cpu in mb
#SBATCH --mem-per-cpu 640 
### Runtime in minutes. 72 hours is current max
#SBATCH -t 30   
### Partition to submit to
#SBATCH -p community.q 
### Standard err goes to this file
#SBATCH -e example-%j.err 
#SBATCH -o example-%j.out
### your email
#SBATCH --mail-user yourid@gmail.com 
### this specifies what events you
#SBATCH --mail-type ALL 
### if you want to use modules or ENV vars
source ~/.bash_profile 
### some module to load a module use this.
module load intel/ics  

### simple bash commands for tests
pwd
ls -laF
whoami
hostname 

  • Parallel submission
#!/bin/bash
###  walltime, abbreviated by -t
#SBATCH --time=1:00:00 
### number of cluster nodes, abbreviated by -N
#SBATCH --nodes=1 
### stdout names, using the job number (%j) & the first node (%N)
#SBATCH -o slurm-%j-%N.out
### Standard err goes to this file
#SBATCH -e slurm-%j-%N.err 
### Number of MPI tasks, abbreviated by -n
#SBATCH --ntasks=20 
### partition, abbreviated by –p
#SBATCH --partition=community.q 
### load appropriate modules if necessary
source ~/.bash_profile
module load intel/ics
### set environment variables necessary for MPI
export OMP_NUM_THREADS=1
export I_MPI_FABRICS=tmi
export I_MPI_PMI_LIBRARY=/opt/local/slurm/default/lib64/libpmi.so
### run the program
mpirun –n 20 my_mpi_program > my_program.out

Stampede2 in TACC

  • Serial submission
    • OpenFOAM
#!/bin/bash
#SBATCH -J cavityPrl         # job name
#SBATCH -o cavityPrl.o%j     # output and error file name 
#SBATCH -e cavityPrl.e%j     # (%j expands to jobID)
#SBATCH -N 1    # total number of nodes
#SBATCH -n 1    # total number of mpi tasks requested
#SBATCH -p development       # queue (partition) or normal
#SBATCH -t 00:30:00          # run time (hh:mm:ss)  
# SBATCH --mail-user=youremail@gmail.com
# SBATCH --mail-type=begin   # email me when the job starts
# SBATCH --mail-type=end     # email me when the job finishes

export IROOT=/opt/intel/compilers_and_libraries_2017.4.196/linux
export MPI_ROOT=$IROOT/mpi/intel64
source $HOME/OpenFOAM/OpenFOAM-4.1.0/etc/bashrc
. $WM_PROJECT_DIR/bin/tools/RunFunctions

application=`getApplication` 
runApplication blockMesh
runApplication checkMesh
runApplication `getApplication`
  • Parallel submission
    • OpenFOAM
#!/bin/bash
#SBATCH -J cavityPrl       # job name
#SBATCH -o cavityPrl.o%j   # output and error file name 
#SBATCH -e cavityPrl.e%j   # (%j expands to jobID)
#SBATCH -N 1    # total number of nodes
#SBATCH -n 4    # total number of mpi tasks requested (max = 68x4 = 272)
#SBATCH -p development     # queue (partition) or normal
#SBATCH -t 00:30:00        # run time (hh:mm:ss) 
# SBATCH --mail-user=youremail@gmail.com 
# SBATCH --mail-type=begin # email me when the job starts
# SBATCH --mail-type=end   # email me when the job finishes

export IROOT=/opt/intel/compilers_and_libraries_2017.4.196/linux
export MPI_ROOT=$IROOT/mpi/intel64
source $HOME/OpenFOAM/OpenFOAM-4.1.0/etc/bashrc
. $WM_PROJECT_DIR/bin/tools/RunFunctions

application=`getApplication` 
runApplication blockMesh
runApplication checkMesh
runApplication decomposePar
runParallel `getApplication`
runApplication reconstructPar