├── GridEngine
├── adf-mpi.sub
├── amber-11-ompi-iqtc04.sub
├── amber10-mpi.sub
├── crystal-09-mpi-iqtc01.sub
├── crystal-09-mpi-iqtc02.sub
├── crystal-09-mpi-iqtc03.sub
├── crystal-09-mpi-iqtc04.sub
├── gamess-2010r2-ompi-iqtc01.sub
├── gamess-2010r2-ompi-iqtc02.sub
├── gamess-2010r2-ompi-iqtc03.sub
├── gamess-2010r2-ompi-iqtc04.sub
├── gamess-2010r2-smp-iqtc01.sub
├── gamess-2010r2-smp-iqtc02.sub
├── gamess-2010r2-smp-iqtc03.sub
├── gamess-2010r2-smp-iqtc04.sub
├── gamess-smp.sub
├── gamess_uk-7.0-ompi-iqtc01.sub
├── gamess_uk-7.0-ompi-iqtc02.sub
├── gamess_uk-7.0-ompi-iqtc03.sub
├── gamess_uk-7.0-ompi-iqtc04.sub
├── gamess_uk-7.0-smp-iqtc01.sub
├── gamess_uk-7.0-smp-iqtc02.sub
├── gamess_uk-7.0-smp-iqtc03.sub
├── gamess_uk-7.0-smp-iqtc04.sub
├── gaussian-03-linda-iqtc01.sub
├── gaussian-03-smp-iqtc01.sub
├── gaussian-03-smp-iqtc02.sub
├── gaussian-03-smp-iqtc03.sub
├── gaussian-03-smp-iqtc04.sub
├── gaussian-09-linda-iqtc01.sub
├── gaussian-09-linda-iqtc02.sub
├── gaussian-09-linda-iqtc03.sub
├── gaussian-09-linda-iqtc04.sub
├── gaussian-09-smp-iqtc01.sub
├── gaussian-09-smp-iqtc02.sub
├── gaussian-09-smp-iqtc03.sub
├── gaussian-09-smp-iqtc04.sub
├── gaussian-linda.sub
├── gaussian-serial.sub
├── gaussian-smp.sub
├── gromacs-3.3.1-ompi-iqtc04.sub
├── gromacs-mpi.sub
├── gromacs-ompi-iqtc02.sub
├── gromacs-ompi-iqtc04.sub
├── lammps-mpi.sub
├── molcas.sub
├── molcas_7.4-ompi-iqtc04.sub
├── molcas_7.4-serial-iqtc01.sub
├── molcas_7.4-serial-iqtc02.sub
├── molcas_7.4-serial-iqtc03.sub
├── molcas_7.4-serial-iqtc04.sub
├── nwchem-ompi-iqtc04.sub
├── orca-ompi-iqtc04.sub
├── orca_ompi_scratch.sub
├── orca_ompi_work.sub
├── siesta-3.0-ompi-iqtc02.sub
├── siesta-3.0-ompi-iqtc02_tcsh.sub
├── siesta-3.0-ompi-iqtc04.sub
├── siesta-mpi.sub
├── stress_x86-64.sub
├── stress_x86.sub
├── vasp-4.6-ompi-iqtc01.sub
├── vasp-4.6-ompi-iqtc02.sub
├── vasp-4.6-ompi-iqtc03.sub
├── vasp-4.6-ompi-iqtc04.sub
├── vasp-5.2-ompi-iqtc01.sub
├── vasp-5.2-ompi-iqtc02.sub
├── vasp-5.2-ompi-iqtc03.sub
├── vasp-5.2-ompi-iqtc04.sub
└── vasp-mpi.sub
├── LSF
├── ior.lsf
├── iozone.lsf
└── mdtest.lsf
├── README.md
└── Slurm
├── GATK_multithreads.sl
├── Intel-Trace-Analyser-and-Collector-instrumented.sl
├── Intel-Trace-Analyser-and-Collector.sl
├── OpenFOAM-parallel.sl
├── OpenFOAM-serial.sl
├── OpenSees.sl
├── QuantumESPRESSO-hybrid.sl
├── QuantumESPRESSO-mpi.sl
├── R-parallel-mpi.r
├── R-parallel-mpi.sl
├── R-template.sl
├── VASP-iomkl.sl
├── VTune-MPI.sl
├── VTune-OpenMP.sl
├── VTune-serial.sl
├── abaqus.sl
├── ansys-cfx.sl
├── ansys-fluent-requeue.sl
├── ansys-fluent.sl
├── array-io.sl
├── array-mpi.sl
├── array.sl
├── array_builder.sh
├── array_multi_parameters.sl
├── blast+_array.sl
├── blast+_array_multithreads.sl
├── checkpoint-blcr.sl
├── cuda-mpi.sl
├── cuda.sl
├── easybuild.sl
├── flexpart.sl
├── gaussian.sl
├── gromacs-cuda-mpi.sl
├── gromacs-mpi.sl
├── gromacs-openmp.sl
├── hello_world-array.sl
├── hello_world.sl
├── helloworld_multi
├── hybrid.sl
├── intel-mpi.sl
├── intel-mpitune.sl
├── lammps-hybrid.sl
├── launcher.sl
├── matlab.sl
├── mb.sl
├── migrate-benchmark-serial.sl
├── migrate-benchmark-tuned.sl
├── migrate-benchmark.sl
├── migrate-mpi.sl
├── migrate-profile.sl
├── migrate-traces.sl
├── mpi-all.sl
├── mpi.sl
├── multi-mpi.conf
├── multi-prog-mpi.sl
├── multi-prog.sl
├── multi.conf
├── multistage.sl
├── namd-cuda.sl
├── namd-mpi.sl
├── nextflow.config
├── node_stress.sl
├── octave.sl
├── open-mpi.sl
├── openmp.sl
├── orca-mpi.sl
├── orca-smp.sl
├── params.dat
├── platform-mpi.sl
├── post-processing.sl
├── pre-processing.sl
├── profile.sl
├── serial.sl
├── slurm_setup_abaqus-env.sh
├── slurm_setup_cfx-env.sh
├── slurm_setup_cfx-env2.sh
├── slurm_setup_cfx-env3.sh
├── slurm_setup_fluent.sh
├── star-ccm+-platform.sl
├── stress.sl
├── supermagic.sl
└── trace.conf
/GridEngine/adf-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script per enviar calculs de ADF amb MPI
3 | #$ -o jobfile.out
4 | #$ -e jobfile.err
5 | #$ -N adf-test
6 |
7 | #$ -S /bin/bash
8 | #$ -V
9 | #$ -cwd
10 | #$ -m e
11 | #$ -notify
12 | #$ -M jblasco@fbg.ub.es
13 | #$ -l h_rt=10:00:00
14 | #$ -l h_vmem=1G
15 | #$ -pe mpi 4
16 | #$ -R y
17 |
18 | module load adf
19 |
20 | SDIR=$(pwd)
21 | export P4_RSHCOMMAND=ssh
22 |
23 | export SCM_MACHINEFILE=$SDIR/scm.machines
24 | export SCM_TMPDIR=$TMPDIR
25 | export SCM_USETMPDIR=yes
26 | export NSCM=$NSLOTS
27 |
28 | rm -f $SCM_MACHINEFILE
29 | for line in `cat $TMPDIR/machines`; do
30 | node=`echo $line | cut -f1 -d ":"`
31 | slots=`echo $line | cut -s -f2 -d ":"`
32 | ssh $node mkdir -p $SCM_TMPDIR
33 | if [[ $slots -le 0 ]]; then
34 | slots=1
35 | fi
36 | j=0
37 | while [ $j -lt $slots ]; do
38 | echo $node >> $SCM_MACHINEFILE
39 | j=$(($j + 1))
40 | done
41 | done
42 |
43 | # Run your executable:
44 | $ADFBIN/adf -n $NSLOTS << EOR
45 | Title WATER Geometry Optimization with Delocalized Coordinates
46 |
47 | Atoms
48 | O 0.000000 0.000000 0.000000
49 | H 0.000000-0.689440-0.578509
50 | H 0.000000 0.689440-0.578509
51 | End
52 |
53 | Basis
54 | Type TZ2P
55 | Core Small
56 | End
57 |
58 | Geometry
59 | Optim Deloc
60 | End
61 |
62 | End Input
63 | EOR
--------------------------------------------------------------------------------
/GridEngine/amber-11-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N TEST-ncores
7 | # (2) Resources requested
8 | #$ -pe omp* ncores
9 | # Queue
10 | #$ -q iqtc04.q
11 | # (3) Output files
12 | #$ -cwd
13 | #$ -o amber-TEST-ncores_BINAMBER.out
14 | #$ -e amber-TEST-ncores_BINAMBER.err
15 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
16 | ##$ -m e
17 | ##$ -M yourmail@ub.edu
18 | #$ -S /bin/bash
19 | ###########################################
20 | ## User environment.
21 | ###########################################
22 | ## Load the modules needed
23 | #. /etc/profile.d/modules.sh
24 | module load amber/11_OpenMPI_1.4.2_intel_11.1.072
25 | ##########################################
26 | # Copying files needed
27 | ##########################################
28 | # We copy the inputs to the directory where the jobs will run
29 | cd $TMPDIR
30 | #mkdir -p /work/jblasco/TEST-ncores-BINAMBER
31 | #cd /work/jblasco/TEST-ncores-BINAMBER
32 | cp -r $HOME/bench/AMBER/TEST/input/* .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | export OMP_NUM_THREADS=1
37 | ulimit -l unlimited
38 |
39 |
40 | A1=ispfv_trxm1_04_din
41 | A2=ispfv_trxm1
42 | A3=ispfv_trxm1_03_din
43 | A4=rst
44 |
45 | mpirun -np $NSLOTS pmemd \
46 | -i $SGE_O_WORKDIR/$A1.in \
47 | -o $TMPDIR/$A1.out \
48 | -p $SGE_O_WORKDIR/$A2.top \
49 | -c $SGE_O_WORKDIR/$A3.$A4 \
50 | -ref $SGE_O_WORKDIR/$A3.$A4 \
51 | -r $TMPDIR/$A1.rst \
52 | -e $TMPDIR/$A1.mden \
53 | -x $TMPDIR/$A1.mdcrd \
54 | -v $TMPDIR/$A1.mdvel \
55 | -l $TMPDIR/$A1.logfile \
56 | -inf $TMPDIR/$A1.inf
57 |
58 | ##########################################
59 | # Copy the results to our home directory
60 | ##########################################
61 | mkdir -p $HOME/bench/AMBER/TEST/OUT/amber
62 | cp -r $TMPDIR $HOME/bench/AMBER/TEST/OUT/amber/
63 | #cp -r /work/jblasco/TEST-ncores-BINAMBER $HOME/bench/AMBER/TEST/OUT/amber/
64 | ##########################################
65 | # Temps dels resultats
66 | ##########################################
67 | TEMPS=$(cat $A1.out | grep "Master Total wall time" | awk '{print $6}')
68 | echo "$NSLOTS $TEMPS" >> $HOME/bench2/AMBER/benchmark-Nehalem-DP-TEST-BINAMBER.dat
69 | #cd
70 | #rm -fr /work/jblasco/TEST-ncores-BINAMBER
71 |
72 |
--------------------------------------------------------------------------------
/GridEngine/amber10-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N AMBER-10-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l mem_free=4.0G
10 | #$ -pe mpi 16
11 | # (3) Fitxers de sortida
12 | #$ -cwd
13 | #$ -o amber01.out
14 | #$ -e amber01.err
15 | #$ (4) Envia un mail quan acava el treball.
16 | #$ -m e
17 | #$ -M jblasco@fbg.ub.es
18 | ##########################################
19 | # Entorn d.usuari
20 | ##########################################
21 | # Es carreguen els moduls a utilitzar
22 | . /etc/profile
23 | module load amber
24 | module load openmpi
25 | ##########################################
26 | # transferencia de dades
27 | ##########################################
28 | # Es copien les dades al directori on es llenc,aran els calculs.
29 | cd $TMPDIR
30 | export Project=amber_mpi_16
31 | export Input=$Project
32 | cp -pr $HOME/path/amb/els/inputs/ $Input
33 | ##########################################
34 | # calcul
35 | ##########################################
36 | # Es crea un directori de sortida pels resultats.
37 | export OMP_NUM_THREADS=1
38 | #mpirun -np $NSLOTS $pe_machines sander.MPI -O -i in.md -c crd.md.23 -o cytosine.out
39 | mpirun -np $NSLOTS sander_mpi -O -i in.md -c crd.md.23 -o cytosine.out
40 | ##########################################
41 | # Transferencia dels resultats
42 | ##########################################
43 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
44 |
--------------------------------------------------------------------------------
/GridEngine/crystal-09-mpi-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N crystal09-MPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe mpi 16
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o crystal09-mpi.out
17 | #$ -e crystal09-mpi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load crystal/09
27 | ##########################################
28 | # Run the job
29 | #########################################
30 | runmpi.iqtc INPUT $NSLOTS
31 |
32 | ##########################################
33 | # Copy the results to our home directory
34 | ##########################################
35 | mkdir -p $HOME/TESTS/crystal09/tests/out
36 | cp -r $TMPDIR/* $HOME/TESTS/crystal09/tests/out/
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/GridEngine/crystal-09-mpi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N crystal09-MPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe mpi 16
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o crystal09-mpi.out
17 | #$ -e crystal09-mpi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load crystal/09
27 | ##########################################
28 | # Run the job
29 | #########################################
30 | runmpi.iqtc INPUT $NSLOTS
31 |
32 | ##########################################
33 | # Copy the results to our home directory
34 | ##########################################
35 | mkdir -p $HOME/TESTS/crystal09/tests/out
36 | cp -r $TMPDIR/* $HOME/TESTS/crystal09/tests/out/
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/GridEngine/crystal-09-mpi-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N crystal09-MPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe mpi 16
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o crystal09-mpi.out
17 | #$ -e crystal09-mpi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load crystal/09
27 | ##########################################
28 | # Run the job
29 | #########################################
30 | runmpi.iqtc INPUT $NSLOTS
31 |
32 | ##########################################
33 | # Copy the results to our home directory
34 | ##########################################
35 | mkdir -p $HOME/TESTS/crystal09/tests/out
36 | cp -r $TMPDIR/* $HOME/TESTS/crystal09/tests/out/
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/GridEngine/crystal-09-mpi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N crystal09-MPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o crystal09-mpi.out
17 | #$ -e crystal09-mpi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load crystal/09
27 | ##########################################
28 | # Run the job
29 | #########################################
30 | runmpi.iqtc INPUT $NSLOTS
31 |
32 | ##########################################
33 | # Copy the results to our home directory
34 | ##########################################
35 | mkdir -p $HOME/TESTS/crystal09/tests/out
36 | cp -r $TMPDIR/* $HOME/TESTS/crystal09/tests/out/
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-ompi-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 8
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_intel_ompi-1.3.3
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 8
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_ompi-1.3
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-ompi-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 8
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_ompi-1.3
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_intel11.1_mkl11.1_ompi-1.4.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-smp-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 1
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_intel
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-smp-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 1
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-smp-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 1
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-2010r2-smp-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GAMESS-prova01
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 1
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess/2010r2_intel11.1_mkl11.1
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/GAMESS/XRQTC.Gamess_MCQDPT/input/XRQTC.Gamess_MCQDPT.inp .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | rungms XRQTC.Gamess_MCQDPT.inp 00 $NSLOTS >& XRQTC.Gamess_MCQDPT.log
37 | ##########################################
38 | # Copy the results to our home directory
39 | ##########################################
40 | cp -r $TMPDIR $HOME/tests.out
41 |
--------------------------------------------------------------------------------
/GridEngine/gamess-smp.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script per enviar càlculs de Gamess
3 | # amb 8 processadors sobre la mateixa maquina
4 | # - Jordi Blasco 12/08/07 -
5 | ##########################################
6 | # Opcions i parametres de l'SGE
7 | ##########################################
8 | # (1) Nom del treball (per identificar)
9 | #$ -N GAMESS-prova01
10 | # (2) Recursos sol.licitats
11 | #$ -l h_rt=0:50:0
12 | #$ -l mem_free=16.0G
13 | #$ -pe smp 8
14 | # (3) Fitxers de sortida
15 | #$ -cwd
16 | #$ -o gamess01.out
17 | #$ -e gamess01.err
18 | # (4) Envia un mail quan acava el treball.
19 | #$ -m e
20 | #$ -M jblasco@fbg.ub.es
21 | ##########################################
22 | # Entorn d.usuari
23 | ##########################################
24 | # Es carreguen els moduls a utilitzar
25 | . /etc/profile
26 | module load gamess
27 | ##########################################
28 | # transferencia de dades
29 | ##########################################
30 | # Es copien les dades al directori on es llenc,aran els calculs.
31 | cd $TMPDIR
32 | export Project=gamess_smp_8
33 | export Input=$Project
34 | cp -pr $HOME/path/amb/els/inputs/ $Input
35 | ##########################################
36 | # calcul
37 | ##########################################
38 | # Es crea un directori de sortida pels resultats.
39 | rungms JOB.inp 00 $NCPUS >& JOB.log
40 | ##########################################
41 | # Transferencia dels resultats
42 | ##########################################
43 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
44 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-ompi-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #$ -pe ompi 4
3 | #$ -cwd
4 | #$ -V
5 | #$ -o Si8O13.log
6 | #$ -S /bin/bash
7 | #$ -q iqtc01.q
8 | #
9 | source /opt/modules/init/bash
10 | module load gamess-uk/7.0_intel10.1_ompi1.3.3
11 | jobname=Si8O13
12 | root=Si8O13
13 | executable=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
14 | jobmode=openmpi
15 | flist="-k ftn058=Si8O13.pun -k ed3"
16 | nprocs=4
17 | nnodes=4
18 | jobtime=120
19 | scratchtopdir=.
20 | libdir=/aplic/gamess-uk_ompi_intel/lib
21 | tempdir=/tmp
22 | homedir=/home/rporcel/subscripts/gamessuk/Si8O13
23 | stdout=0
24 | listing=TEST.out
25 | datain=1
26 | procspernode=1
27 | project=none
28 | RUNGAMESS_DIR=/aplic/gamess-uk_ompi_intel/rungamess
29 | PROG=rungamess
30 | queueargs=""
31 | queue=iqtc01.q
32 | debug=0
33 | jobworkdir=
34 | export jobname
35 | export root
36 | export executable
37 | export jobmode
38 | export flist
39 | export nprocs
40 | export nnodes
41 | export jobtime
42 | export scratchtopdir
43 | export libdir
44 | export tempdir
45 | export homedir
46 | export stdout
47 | export listing
48 | export datain
49 | export procs_per_node
50 | export project
51 | export RUNGAMESS_DIR
52 | export PROG
53 | export queueargs
54 | export queue
55 | export debug
56 | export jobworkdir
57 | #if test -f /home/g4stefan/.profile
58 | #then
59 | #. /home/g4stefan/.profile
60 | #fi
61 | if test ${GAMESS_LIB:=unset} = unset
62 | then
63 | GAMESS_LIB=/aplic/gamess-uk_ompi_intel/lib
64 | export GAMESS_LIB
65 | fi
66 | if test ${GAMESS_SCR:=unset} = unset
67 | then
68 | GAMESS_SCR=.
69 | export GAMESS_SCR
70 | fi
71 | if test ${GAMESS_WORK:=unset} = unset
72 | then
73 | GAMESS_WORK=
74 | export GAMESS_WORK
75 | fi
76 | if test ${GAMESS_TMP:=unset} = unset
77 | then
78 | GAMESS_TMP=/tmp
79 | export GAMESS_TMP
80 | fi
81 | if test ${GAMESS_PAREXE:=unset} = unset
82 | then
83 | GAMESS_PAREXE=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
84 | export GAMESS_PAREXE
85 | fi
86 | if test ${GAMESS_EXE:=unset} = unset
87 | then
88 | GAMESS_EXE=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
89 | export GAMESS_EXE
90 | fi
91 | $RUNGAMESS_DIR/rg_exe.$jobmode > /home/rporcel/subscripts/gamessuk/Si8O13/Si8O13.out
92 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #$ -pe ompi 4
3 | #$ -cwd
4 | #$ -V
5 | #$ -o Si8O13.log
6 | #$ -S /bin/bash
7 | #$ -q iqtc02.q
8 | #
9 | source /opt/modules/init/bash
10 | module load gamessuk/7.0_ompi
11 | jobname=Si8O13
12 | root=Si8O13
13 | executable=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
14 | jobmode=openmpi
15 | flist="-k ftn058=Si8O13.pun -k ed3"
16 | nprocs=4
17 | nnodes=4
18 | jobtime=120
19 | scratchtopdir=.
20 | libdir=/aplic/gamess-uk/7.0_ompi/lib
21 | tempdir=/tmp
22 | homedir=/home/rporcel/subscripts/gamessuk/Si8O13
23 | stdout=0
24 | listing=TEST.out
25 | datain=1
26 | procspernode=1
27 | project=none
28 | RUNGAMESS_DIR=/aplic/gamess-uk/7.0_ompi/rungamess
29 | PROG=rungamess
30 | queueargs=""
31 | queue=iqtc02.q
32 | debug=0
33 | jobworkdir=
34 | export jobname
35 | export root
36 | export executable
37 | export jobmode
38 | export flist
39 | export nprocs
40 | export nnodes
41 | export jobtime
42 | export scratchtopdir
43 | export libdir
44 | export tempdir
45 | export homedir
46 | export stdout
47 | export listing
48 | export datain
49 | export procs_per_node
50 | export project
51 | export RUNGAMESS_DIR
52 | export PROG
53 | export queueargs
54 | export queue
55 | export debug
56 | export jobworkdir
57 | #if test -f /home/g4stefan/.profile
58 | #then
59 | #. /home/g4stefan/.profile
60 | #fi
61 | if test ${GAMESS_LIB:=unset} = unset
62 | then
63 | GAMESS_LIB=/aplic/gamess-uk/7.0_ompi/lib
64 | export GAMESS_LIB
65 | fi
66 | if test ${GAMESS_SCR:=unset} = unset
67 | then
68 | GAMESS_SCR=.
69 | export GAMESS_SCR
70 | fi
71 | if test ${GAMESS_WORK:=unset} = unset
72 | then
73 | GAMESS_WORK=
74 | export GAMESS_WORK
75 | fi
76 | if test ${GAMESS_TMP:=unset} = unset
77 | then
78 | GAMESS_TMP=/tmp
79 | export GAMESS_TMP
80 | fi
81 | if test ${GAMESS_PAREXE:=unset} = unset
82 | then
83 | GAMESS_PAREXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
84 | export GAMESS_PAREXE
85 | fi
86 | if test ${GAMESS_EXE:=unset} = unset
87 | then
88 | GAMESS_EXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
89 | export GAMESS_EXE
90 | fi
91 | $RUNGAMESS_DIR/rg_exe.$jobmode > /home/rporcel/subscripts/gamessuk/Si8O13/Si8O13.out
92 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-ompi-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #$ -pe ompi 4
3 | #$ -cwd
4 | #$ -V
5 | #$ -o Si8O13.log
6 | #$ -S /bin/bash
7 | #$ -q iqtc03.q
8 | #
9 | source /opt/modules/init/bash
10 | module load gamessuk/7.0_ompi
11 | jobname=Si8O13
12 | root=Si8O13
13 | executable=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
14 | jobmode=openmpi
15 | flist="-k ftn058=Si8O13.pun -k ed3"
16 | nprocs=4
17 | nnodes=4
18 | jobtime=120
19 | scratchtopdir=.
20 | libdir=/aplic/gamess-uk/7.0_ompi/lib
21 | tempdir=/tmp
22 | homedir=/home/rporcel/subscripts/gamessuk/Si8O13
23 | stdout=0
24 | listing=TEST.out
25 | datain=1
26 | procspernode=1
27 | project=none
28 | RUNGAMESS_DIR=/aplic/gamess-uk/7.0_ompi/rungamess
29 | PROG=rungamess
30 | queueargs=""
31 | queue=iqtc03.q
32 | debug=0
33 | jobworkdir=
34 | export jobname
35 | export root
36 | export executable
37 | export jobmode
38 | export flist
39 | export nprocs
40 | export nnodes
41 | export jobtime
42 | export scratchtopdir
43 | export libdir
44 | export tempdir
45 | export homedir
46 | export stdout
47 | export listing
48 | export datain
49 | export procs_per_node
50 | export project
51 | export RUNGAMESS_DIR
52 | export PROG
53 | export queueargs
54 | export queue
55 | export debug
56 | export jobworkdir
57 | #if test -f /home/g4stefan/.profile
58 | #then
59 | #. /home/g4stefan/.profile
60 | #fi
61 | if test ${GAMESS_LIB:=unset} = unset
62 | then
63 | GAMESS_LIB=/aplic/gamess-uk/7.0_ompi/lib
64 | export GAMESS_LIB
65 | fi
66 | if test ${GAMESS_SCR:=unset} = unset
67 | then
68 | GAMESS_SCR=.
69 | export GAMESS_SCR
70 | fi
71 | if test ${GAMESS_WORK:=unset} = unset
72 | then
73 | GAMESS_WORK=
74 | export GAMESS_WORK
75 | fi
76 | if test ${GAMESS_TMP:=unset} = unset
77 | then
78 | GAMESS_TMP=/tmp
79 | export GAMESS_TMP
80 | fi
81 | if test ${GAMESS_PAREXE:=unset} = unset
82 | then
83 | GAMESS_PAREXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
84 | export GAMESS_PAREXE
85 | fi
86 | if test ${GAMESS_EXE:=unset} = unset
87 | then
88 | GAMESS_EXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
89 | export GAMESS_EXE
90 | fi
91 | $RUNGAMESS_DIR/rg_exe.$jobmode > /home/rporcel/subscripts/gamessuk/Si8O13/Si8O13.out
92 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #$ -pe omp* 4
3 | #$ -cwd
4 | #$ -V
5 | #$ -o Si8O13.log
6 | #$ -S /bin/bash
7 | #$ -q iqtc04.q
8 | #
9 | source /opt/modules/init/bash
10 | module load gamess-uk/7.0_intel10.1_mkl10.1_ompi1.4.2
11 | jobname=Si8O13
12 | root=Si8O13
13 | executable=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
14 | jobmode=openmpi
15 | flist="-k ftn058=Si8O13.pun -k ed3"
16 | nprocs=4
17 | nnodes=4
18 | jobtime=120
19 | scratchtopdir=.
20 | libdir=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/lib
21 | tempdir=/tmp
22 | homedir=/home/rporcel/subscripts/gamessuk/Si8O13
23 | stdout=0
24 | listing=TEST.out
25 | datain=1
26 | procspernode=1
27 | project=none
28 | RUNGAMESS_DIR=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/rungamess
29 | PROG=rungamess
30 | queueargs=""
31 | queue=iqtc04.q
32 | debug=0
33 | jobworkdir=
34 | export jobname
35 | export root
36 | export executable
37 | export jobmode
38 | export flist
39 | export nprocs
40 | export nnodes
41 | export jobtime
42 | export scratchtopdir
43 | export libdir
44 | export tempdir
45 | export homedir
46 | export stdout
47 | export listing
48 | export datain
49 | export procs_per_node
50 | export project
51 | export RUNGAMESS_DIR
52 | export PROG
53 | export queueargs
54 | export queue
55 | export debug
56 | export jobworkdir
57 | #if test -f /home/g4stefan/.profile
58 | #then
59 | #. /home/g4stefan/.profile
60 | #fi
61 | if test ${GAMESS_LIB:=unset} = unset
62 | then
63 | GAMESS_LIB=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/lib
64 | export GAMESS_LIB
65 | fi
66 | if test ${GAMESS_SCR:=unset} = unset
67 | then
68 | GAMESS_SCR=.
69 | export GAMESS_SCR
70 | fi
71 | if test ${GAMESS_WORK:=unset} = unset
72 | then
73 | GAMESS_WORK=
74 | export GAMESS_WORK
75 | fi
76 | if test ${GAMESS_TMP:=unset} = unset
77 | then
78 | GAMESS_TMP=/tmp
79 | export GAMESS_TMP
80 | fi
81 | if test ${GAMESS_PAREXE:=unset} = unset
82 | then
83 | GAMESS_PAREXE=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
84 | export GAMESS_PAREXE
85 | fi
86 | if test ${GAMESS_EXE:=unset} = unset
87 | then
88 | GAMESS_EXE=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
89 | export GAMESS_EXE
90 | fi
91 | $RUNGAMESS_DIR/rg_exe.$jobmode > /home/rporcel/subscripts/gamessuk/Si8O13/Si8O13.out
92 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-smp-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N gamess-uk
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 4
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o Si8O13.out
17 | #$ -e Si8O13.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess-uk/7.0_intel10.1_ompi1.3.3
27 | ##########################################
28 | # GAMESS-UK cruft
29 | ##########################################
30 |
31 | jobname=Si8O13
32 | root=Si8O13
33 | executable=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
34 | jobmode=openmpi
35 | flist="-k ftn058=Si8O13.pun -k ed3"
36 | nprocs=$NSLOTS
37 | nnodes=$NSLOTS
38 | jobtime=120
39 | scratchtopdir=.
40 | libdir=/aplic/gamess-uk_ompi_intel/lib
41 | tempdir=/tmp
42 | homedir=/home/rporcel/subscripts/gamessuk/tst
43 | stdout=0
44 | listing=TEST.out
45 | datain=1
46 | procspernode=1
47 | project=none
48 | RUNGAMESS_DIR=/aplic/gamess-uk_ompi_intel/rungamess
49 | PROG=rungamess
50 | queueargs=""
51 | queue=iqtc01.q
52 | debug=0
53 | jobworkdir=
54 | export jobname
55 | export root
56 | export executable
57 | export jobmode
58 | export flist
59 | export nprocs
60 | export nnodes
61 | export jobtime
62 | export scratchtopdir
63 | export libdir
64 | export tempdir
65 | export homedir
66 | export stdout
67 | export listing
68 | export datain
69 | export procs_per_node
70 | export project
71 | export RUNGAMESS_DIR
72 | export PROG
73 | export queueargs
74 | export queue
75 | export debug
76 | export jobworkdir
77 | if test ${GAMESS_LIB:=unset} = unset
78 | then
79 | GAMESS_LIB=/aplic/gamess-uk_ompi_intel/lib
80 | export GAMESS_LIB
81 | fi
82 | if test ${GAMESS_SCR:=unset} = unset
83 | then
84 | GAMESS_SCR=.
85 | export GAMESS_SCR
86 | fi
87 | if test ${GAMESS_WORK:=unset} = unset
88 | then
89 | GAMESS_WORK=
90 | export GAMESS_WORK
91 | fi
92 | if test ${GAMESS_TMP:=unset} = unset
93 | then
94 | GAMESS_TMP=/tmp
95 | export GAMESS_TMP
96 | fi
97 | if test ${GAMESS_PAREXE:=unset} = unset
98 | then
99 | GAMESS_PAREXE=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
100 | export GAMESS_PAREXE
101 | fi
102 | if test ${GAMESS_EXE:=unset} = unset
103 | then
104 | GAMESS_EXE=/aplic/gamess-uk_ompi_intel/bin/gamess-uk
105 | export GAMESS_EXE
106 | fi
107 |
108 | export GAMESS_SCR=$TMPDIR
109 | cp $homedir/$jobname.in $TMPDIR
110 |
111 | $RUNGAMESS_DIR/rg_exe.$jobmode > $homedir/$jobname.out
112 |
113 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-smp-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N gamess-uk
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o Si8O13.out
17 | #$ -e Si8O13.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamessuk/7.0_ompi
27 | ##########################################
28 | # GAMESS-UK cruft
29 | ##########################################
30 |
31 | jobname=Si8O13
32 | root=Si8O13
33 | executable=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
34 | jobmode=openmpi
35 | flist="-k ftn058=Si8O13.pun -k ed3"
36 | nprocs=$NSLOTS
37 | nnodes=$NSLOTS
38 | jobtime=120
39 | scratchtopdir=.
40 | libdir=/aplic/gamess-uk/7.0_ompi/lib
41 | tempdir=/tmp
42 | homedir=/home/rporcel/subscripts/gamessuk/tst
43 | stdout=0
44 | listing=TEST.out
45 | datain=1
46 | procspernode=1
47 | project=none
48 | RUNGAMESS_DIR=/aplic/gamess-uk/7.0_ompi/rungamess
49 | PROG=rungamess
50 | queueargs=""
51 | queue=iqtc02.q
52 | debug=0
53 | jobworkdir=
54 | export jobname
55 | export root
56 | export executable
57 | export jobmode
58 | export flist
59 | export nprocs
60 | export nnodes
61 | export jobtime
62 | export scratchtopdir
63 | export libdir
64 | export tempdir
65 | export homedir
66 | export stdout
67 | export listing
68 | export datain
69 | export procs_per_node
70 | export project
71 | export RUNGAMESS_DIR
72 | export PROG
73 | export queueargs
74 | export queue
75 | export debug
76 | export jobworkdir
77 | if test ${GAMESS_LIB:=unset} = unset
78 | then
79 | GAMESS_LIB=/aplic/gamess-uk/7.0_ompi/lib
80 | export GAMESS_LIB
81 | fi
82 | if test ${GAMESS_SCR:=unset} = unset
83 | then
84 | GAMESS_SCR=.
85 | export GAMESS_SCR
86 | fi
87 | if test ${GAMESS_WORK:=unset} = unset
88 | then
89 | GAMESS_WORK=
90 | export GAMESS_WORK
91 | fi
92 | if test ${GAMESS_TMP:=unset} = unset
93 | then
94 | GAMESS_TMP=/tmp
95 | export GAMESS_TMP
96 | fi
97 | if test ${GAMESS_PAREXE:=unset} = unset
98 | then
99 | GAMESS_PAREXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
100 | export GAMESS_PAREXE
101 | fi
102 | if test ${GAMESS_EXE:=unset} = unset
103 | then
104 | GAMESS_EXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
105 | export GAMESS_EXE
106 | fi
107 |
108 | export GAMESS_SCR=$TMPDIR
109 | cp $homedir/$jobname.in $TMPDIR
110 |
111 | $RUNGAMESS_DIR/rg_exe.$jobmode > $homedir/$jobname.out
112 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-smp-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N gamess-uk
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o Si8O13.out
17 | #$ -e Si8O13.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamessuk/7.0_ompi
27 | ##########################################
28 | # GAMESS-UK cruft
29 | ##########################################
30 |
31 | jobname=Si8O13
32 | root=Si8O13
33 | executable=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
34 | jobmode=openmpi
35 | flist="-k ftn058=Si8O13.pun -k ed3"
36 | nprocs=$NSLOTS
37 | nnodes=$NSLOTS
38 | jobtime=120
39 | scratchtopdir=.
40 | libdir=/aplic/gamess-uk/7.0_ompi/lib
41 | tempdir=/tmp
42 | homedir=/home/rporcel/subscripts/gamessuk/tst
43 | stdout=0
44 | listing=TEST.out
45 | datain=1
46 | procspernode=1
47 | project=none
48 | RUNGAMESS_DIR=/aplic/gamess-uk/7.0_ompi/rungamess
49 | PROG=rungamess
50 | queueargs=""
51 | queue=iqtc03.q
52 | debug=0
53 | jobworkdir=
54 | export jobname
55 | export root
56 | export executable
57 | export jobmode
58 | export flist
59 | export nprocs
60 | export nnodes
61 | export jobtime
62 | export scratchtopdir
63 | export libdir
64 | export tempdir
65 | export homedir
66 | export stdout
67 | export listing
68 | export datain
69 | export procs_per_node
70 | export project
71 | export RUNGAMESS_DIR
72 | export PROG
73 | export queueargs
74 | export queue
75 | export debug
76 | export jobworkdir
77 | if test ${GAMESS_LIB:=unset} = unset
78 | then
79 | GAMESS_LIB=/aplic/gamess-uk/7.0_ompi/lib
80 | export GAMESS_LIB
81 | fi
82 | if test ${GAMESS_SCR:=unset} = unset
83 | then
84 | GAMESS_SCR=.
85 | export GAMESS_SCR
86 | fi
87 | if test ${GAMESS_WORK:=unset} = unset
88 | then
89 | GAMESS_WORK=
90 | export GAMESS_WORK
91 | fi
92 | if test ${GAMESS_TMP:=unset} = unset
93 | then
94 | GAMESS_TMP=/tmp
95 | export GAMESS_TMP
96 | fi
97 | if test ${GAMESS_PAREXE:=unset} = unset
98 | then
99 | GAMESS_PAREXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
100 | export GAMESS_PAREXE
101 | fi
102 | if test ${GAMESS_EXE:=unset} = unset
103 | then
104 | GAMESS_EXE=/aplic/gamess-uk/7.0_ompi/bin/gamess-uk
105 | export GAMESS_EXE
106 | fi
107 |
108 | export GAMESS_SCR=$TMPDIR
109 | cp $homedir/$jobname.in $TMPDIR
110 |
111 | $RUNGAMESS_DIR/rg_exe.$jobmode > $homedir/$jobname.out
112 |
113 |
--------------------------------------------------------------------------------
/GridEngine/gamess_uk-7.0-smp-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N gamess-uk
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o Si8O13.out
17 | #$ -e Si8O13.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gamess-uk/7.0_intel10.1_mkl10.1_ompi1.4.2
27 | ##########################################
28 | # GAMESS-UK cruft
29 | ##########################################
30 |
31 | jobname=Si8O13
32 | root=Si8O13
33 | executable=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
34 | jobmode=openmpi
35 | flist="-k ftn058=Si8O13.pun -k ed3"
36 | nprocs=$NSLOTS
37 | nnodes=$NSLOTS
38 | jobtime=120
39 | scratchtopdir=.
40 | libdir=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/lib
41 | tempdir=/tmp
42 | homedir=/home/rporcel/subscripts/gamessuk/tst
43 | stdout=0
44 | listing=TEST.out
45 | datain=1
46 | procspernode=1
47 | project=none
48 | RUNGAMESS_DIR=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/rungamess
49 | PROG=rungamess
50 | queueargs=""
51 | queue=iqtc04.q
52 | debug=0
53 | jobworkdir=
54 | export jobname
55 | export root
56 | export executable
57 | export jobmode
58 | export flist
59 | export nprocs
60 | export nnodes
61 | export jobtime
62 | export scratchtopdir
63 | export libdir
64 | export tempdir
65 | export homedir
66 | export stdout
67 | export listing
68 | export datain
69 | export procs_per_node
70 | export project
71 | export RUNGAMESS_DIR
72 | export PROG
73 | export queueargs
74 | export queue
75 | export debug
76 | export jobworkdir
77 | if test ${GAMESS_LIB:=unset} = unset
78 | then
79 | GAMESS_LIB=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/lib
80 | export GAMESS_LIB
81 | fi
82 | if test ${GAMESS_SCR:=unset} = unset
83 | then
84 | GAMESS_SCR=.
85 | export GAMESS_SCR
86 | fi
87 | if test ${GAMESS_WORK:=unset} = unset
88 | then
89 | GAMESS_WORK=
90 | export GAMESS_WORK
91 | fi
92 | if test ${GAMESS_TMP:=unset} = unset
93 | then
94 | GAMESS_TMP=/tmp
95 | export GAMESS_TMP
96 | fi
97 | if test ${GAMESS_PAREXE:=unset} = unset
98 | then
99 | GAMESS_PAREXE=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
100 | export GAMESS_PAREXE
101 | fi
102 | if test ${GAMESS_EXE:=unset} = unset
103 | then
104 | GAMESS_EXE=/aplic/gamess-uk/gamess-uk-7.0_intel10.1_mkl10.1_ompi1.4.2/bin/gamess-uk
105 | export GAMESS_EXE
106 | fi
107 |
108 | export GAMESS_SCR=$TMPDIR
109 | cp $homedir/$jobname.in $TMPDIR
110 |
111 | $RUNGAMESS_DIR/rg_exe.$jobmode > $homedir/$jobname.out
112 |
113 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-03-linda-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe linda 8
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g03d02
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Es carreguen algunes variables pel Linda
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 | setenv NODES \"`cat $TMPDIR/tsnet.nodes`\"
37 | setenv GAUSS_LFLAGS "-v -nodelist ${NODES} -mp 4"
38 |
39 | ##########################################
40 | # Run the job
41 | ##########################################
42 | # We run gaussian g03l
43 | g03l < ./h2o_opt.dat > h2ol_opt.log
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir $HOME/proves/resultatl
48 | cp -r . $HOME/proves/resultatl/
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-03-smp-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 4
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g03d02
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g03
41 | g03 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-03-smp-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g03d02
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g03
41 | g03 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-03-smp-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g03d02
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g03
41 | g03 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-03-smp-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gaussian/g03d02
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/proves/h2o_opt.dat .
33 |
34 | # Set some variables for gaussian
35 |
36 | setenv GAUSS_SCRDIR $TMPDIR
37 |
38 | ##########################################
39 | # Run the job
40 | ##########################################
41 | # We run gaussian g03
42 | g03 < ./h2o_opt.dat > h2ol_opt.log
43 | ##########################################
44 | # Copy the results to our home directory
45 | ##########################################
46 | mkdir $HOME/proves/resultatl
47 | cp -r . $HOME/proves/resultatl/
48 |
49 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-linda-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe linda 8
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Es carreguen algunes variables pel Linda
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 | setenv NODES \"`cat $TMPDIR/tsnet.nodes`\"
37 | setenv GAUSS_LFLAGS "-v -nodelist ${NODES} -mp 4"
38 |
39 | ##########################################
40 | # Run the job
41 | ##########################################
42 | # We run gaussian g09
43 | g09 < ./h2o_opt.dat > h2ol_opt.log
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir $HOME/proves/resultatl
48 | cp -r . $HOME/proves/resultatl/
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-linda-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe linda 16
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Es carreguen algunes variables pel Linda
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 | setenv NODES \"`cat $TMPDIR/tsnet.nodes`\"
37 | setenv GAUSS_LFLAGS "-v -nodelist ${NODES} -mp 8"
38 |
39 | ##########################################
40 | # Run the job
41 | ##########################################
42 | # We run gaussian g09
43 | g09 < ./h2o_opt.dat > h2ol_opt.log
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir $HOME/proves/resultatl
48 | cp -r . $HOME/proves/resultatl/
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-linda-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe linda 16
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Es carreguen algunes variables pel Linda
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 | setenv NODES \"`cat $TMPDIR/tsnet.nodes`\"
37 | setenv GAUSS_LFLAGS "-v -nodelist ${NODES} -mp 8"
38 |
39 | ##########################################
40 | # Run the job
41 | ##########################################
42 | # We run gaussian g09
43 | g09 < ./h2o_opt.dat > h2ol_opt.log
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir $HOME/proves/resultatl
48 | cp -r . $HOME/proves/resultatl/
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-linda-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe linda 24
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.csh
26 | module load gaussian/g09b01
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/proves/h2o_opt.dat .
33 |
34 | # Es carreguen algunes variables pel Linda
35 |
36 | setenv GAUSS_SCRDIR $TMPDIR
37 | setenv NODES \"`cat $TMPDIR/tsnet.nodes | uniq`\"
38 | setenv GAUSS_LFLAGS "-v -nodelist ${NODES} -mp 12"
39 |
40 | ##########################################
41 | # Run the job
42 | ##########################################
43 | # We run gaussian g09
44 | g09 < ./h2o_opt.dat > h2ol_opt.log
45 | ##########################################
46 | # Copy the results to our home directory
47 | ##########################################
48 | mkdir $HOME/proves/resultatl
49 | cp -r . $HOME/proves/resultatl/
50 |
51 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-smp-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 4
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g09
41 | g09 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-smp-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g09
41 | g09 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-smp-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 8
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | module load gaussian/g09b01
26 | ##########################################
27 | # Copying files needed
28 | ##########################################
29 | # We copy the inputs to the directory where the jobs will run
30 | cd $TMPDIR
31 | cp -r $HOME/proves/h2o_opt.dat .
32 |
33 | # Set some variables for gaussian
34 |
35 | setenv GAUSS_SCRDIR $TMPDIR
36 |
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | # We run gaussian g09
41 | g09 < ./h2o_opt.dat > h2ol_opt.log
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir $HOME/proves/resultatl
46 | cp -r . $HOME/proves/resultatl/
47 |
48 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-09-smp-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/csh
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_gaus
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/csh
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o testl_gaus.out
17 | #$ -e testl_gaus.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.csh
26 | module load gaussian/g09b01
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/proves/h2o_opt.dat .
33 |
34 | # Set some variables for gaussian
35 |
36 | setenv GAUSS_SCRDIR $TMPDIR
37 |
38 | ##########################################
39 | # Run the job
40 | ##########################################
41 | # We run gaussian g09
42 | g09 < ./h2o_opt.dat > h2ol_opt.log
43 | ##########################################
44 | # Copy the results to our home directory
45 | ##########################################
46 | mkdir $HOME/proves/resultatl
47 | cp -r . $HOME/proves/resultatl/
48 |
49 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-linda.sub:
--------------------------------------------------------------------------------
1 |
2 | #!/bin/bash
3 | ##########################################
4 | # Opcions i parametres de l'SGE
5 | ##########################################
6 | # (1) Nom del treball (per identificar)
7 | #$ -N gaussian-linda-prova01
8 | # (2) Recursos sol.licitats
9 | #$ -l h_rt=0:50:0
10 | #$ -l h_vmem=8G
11 | #$ -pe linda 4
12 | # (3) Fitxers de sortida
13 | #$ -cwd
14 | #$ -o gaussian-linda.out
15 | #$ -e gaussian-linda.err
16 | #$ (4) Envia un mail quan acava el treball.
17 | #$ -m e
18 | #$ -M jblasco@fbg.ub.es
19 | ##########################################
20 | # Entorn d.usuari
21 | ##########################################
22 | # Es carreguen els moduls a utilitzar
23 | . /etc/profile
24 | module load gaussian
25 | ##########################################
26 | # transferencia de dades
27 | ##########################################
28 | # Es copien les dades al directori on es llenc,aran els calculs.
29 | cd $TMPDIR
30 | export Project=gaussian-linda
31 | export Input=$Project
32 | cp -pr $HOME/path/amb/els/input.com $Input
33 | # Es carreguen algunes variables pel Linda
34 | export GAUSS_SCRDIR=$TMPDIR
35 | export NODES=\"`cat $TMPDIR/machines`\"
36 | export GAUSS_LFLAGS="-v -nodelist ${NODES}"
37 | ##########################################
38 | # calcul
39 | ##########################################
40 | # executem el gaussian g03
41 | g03l g03lindatest.com > g03lindatest.log
42 | ##########################################
43 | # Transferencia dels resultats
44 | ##########################################
45 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
46 |
47 |
48 | Cal incloure 3 paràmetres:
49 | * '''nproc''' : número de processadors a utilitzar dins de cada node
50 | * '''NProcLinda''' : número de nodes a utilitzar
51 | * '''mem''' : quantitat de memòria necessaria per realitzar el càlcul
52 |
53 | El input de Gaussian per enviar-ho amb el PE de linda te la forma:
54 |
55 | %nproc=2
56 | %NProcLinda=4
57 | %mem=400MB
58 | %chk=t_cp
59 | #p b3lyp/cc-pvtz counterpoise=3 opt=z-matrix optcyc=999
60 |
61 | t
62 |
63 | 0 1
64 | O 1
65 | O 1 oo 2
66 | O 2 oo 1 60.0 3
67 | H 1 oh 3 hoo 2 0.0 0 1
68 | H 2 oh 1 hoo 3 0.0 0 2
69 | H 3 oh 2 hoo 1 0.0 0 3
70 | H 1 ho 2 oho 3 d1 0 1
71 | H 2 ho 3 oho 1 d2 0 2
72 | H 3 ho 1 oho 2 d2 0 3
73 |
74 | oo 2.7873
75 | oh 0.9771
76 | hoo 78.2005
77 | ho 0.9609
78 | oho 113.8022
79 | d1 112.335
80 | d2 -113.7495
81 |
82 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-serial.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N gaussian-serial-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l h_vmem=6G
10 | # (3) Fitxers de sortida
11 | #$ -cwd
12 | #$ -o gaussian-serial.out
13 | #$ -e gaussian-serial.err
14 | #$ (4) Envia un mail quan acava el treball.
15 | #$ -m e
16 | #$ -M jblasco@fbg.ub.es
17 | ##########################################
18 | # Entorn d.usuari
19 | ##########################################
20 | # Es carreguen els moduls a utilitzar
21 | module load gaussian
22 | ##########################################
23 | # transferencia de dades
24 | ##########################################
25 | # Es copien les dades al directori on es llenc,aran els calculs.
26 | cd $TMPDIR
27 | export Project=gaussian-serial
28 | export Input=$Project
29 | cp -pr $HOME/path/amb/els/input.com $Input
30 | ##########################################
31 | # calcul
32 | ##########################################
33 | # executem el gaussian g03
34 | g03 < ./test001.com > test001.log
35 | ##########################################
36 | # Transferencia dels resultats
37 | ##########################################
38 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
39 |
--------------------------------------------------------------------------------
/GridEngine/gaussian-smp.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N gaussian-smp-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l h_vmem=8G
10 | #$ -pe smp 4
11 | # (3) Fitxers de sortida
12 | #$ -cwd
13 | #$ -o gaussian-smp.out
14 | #$ -e gaussian-smp.err
15 | #$ (4) Envia un mail quan acava el treball.
16 | #$ -m e
17 | #$ -M jblasco@fbg.ub.es
18 | ##########################################
19 | # Entorn d.usuari
20 | ##########################################
21 | # Es carreguen els moduls a utilitzar
22 | module load gaussian
23 | ##########################################
24 | # transferencia de dades
25 | ##########################################
26 | # Es copien les dades al directori on es llenc,aran els calculs.
27 | cd $TMPDIR
28 | export Project=gaussian-smp
29 | export Input=$Project
30 | cp -pr $HOME/path/amb/els/input.com $Input
31 | ##########################################
32 | # calcul
33 | ##########################################
34 | # executem el gaussian g03
35 | g03 < ./test001.com > test001.log
36 | ##########################################
37 | # Transferencia dels resultats
38 | ##########################################
39 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
40 |
41 | Per enviar un càlcul de Gaussian en paral·lel cal:
42 | Afegir en el input de gaussian el nombre de processadors a utilitzar %NPROCS=4
43 | Afegir en el input de gaussian la quantitat de memoria a utilitzar %MEM=8GB
44 | Afegir el Parallel Environment smp per que paral·lelitzi sobre la mateixa màquina (shared memory)
45 | Gaussian no acostuma a escalar per sobre dels 8 processadors i depenent del tipus de càlcul, inclús menys, per aquest motiu recomanem utilitzar SMP (amb un sol node) en comptes de Linda (amb varis nodes)
46 |
--------------------------------------------------------------------------------
/GridEngine/gromacs-3.3.1-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N XRQTC.Gromacs_9LDT-72
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gromacs-XRQTC.Gromacs_9LDT-72.out
17 | #$ -e gromacs-XRQTC.Gromacs_9LDT-72.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load Gromacs/3.3.1_ics-11.1.072_fftw3.2.2_ompi-1.4.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | mkdir -p gromacs-ompi-XRQTC.Gromacs_9LDT-72
33 | cd gromacs-ompi-XRQTC.Gromacs_9LDT-72
34 | cp $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/* .
35 | ##########################################
36 | # Run the job
37 | ##########################################
38 | export OMP_NUM_THREADS=1
39 | ulimit -s unlimited
40 | grompp_d -f full_vdw.mdp -c 9LDT-pt-md-3.gro -p 9LDT-bu.top -o 9LDT-bu.tpr
41 | mpirun -np $NSLOTS mdrun_mpi -v -s 9LDT-bu.tpr -o 9LDT-bu.trr > mdrun.out
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir -p $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/OUT/gromacs
46 | cp -r $TMPDIR/* $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/OUT/gromacs/
47 | ##########################################
48 | # Temps dels resultats
49 | ##########################################
50 | TEMPS=$(cat md.log | grep Time: | awk '{print $3}')
51 | echo "$NSLOTS $TEMPS" >> $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/benchmark-ompi-XRQTC.Gromacs_9LDT.dat
52 |
--------------------------------------------------------------------------------
/GridEngine/gromacs-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script per enviar càlculs de Gromacs
3 | # amb 16 processadors amb openmpi
4 | # Recomenat enviar-ho a 2 nodes de 8 cores
5 | # - Jordi Blasco 12/08/09 -
6 | ##########################################
7 | # Opcions i parametres de l'SGE
8 | ##########################################
9 | # (1) Nom del treball (per identificar)
10 | #$ -N GROMACS-OpenMPI-16
11 | # (2) Recursos sol.licitats
12 | #$ -pe ompi 16
13 | # (3) Fitxers de sortida
14 | #$ -cwd
15 | #$ -o gromacs01.out
16 | #$ -e gromacs01.err
17 | # (4) Envia un mail quan acava el treball.
18 | #$ -m e
19 | #$ -M jblasco@fbg.ub.es
20 | ##########################################
21 | # Entorn d.usuari
22 | ##########################################
23 | # Es carreguen els moduls a utilitzar
24 | . /etc/profile
25 | module load gromacs/4.0.5_ompi_fftw-3.2.1
26 | ##########################################
27 | # transferencia de dades
28 | ##########################################
29 | # Es copien les dades al directori on es llenc,aran els calculs.
30 | cd $TMPDIR
31 | cp -pr /$WORK/jblasco/d.dppc/* .
32 | ##########################################
33 | # calcul
34 | ##########################################
35 | # Es crea un directori de sortida pels resultats.
36 | export OMP_NUM_THREADS=1
37 | grompp -v &> grompp.out
38 | mpirun -np $NSLOTS mdrun_mpi -v &> mdrun.out
39 | ##########################################
40 | # Transferencia dels resultats
41 | ##########################################
42 | cp -pr mdrun.out /$WORK/jblasco/d.dppc/
--------------------------------------------------------------------------------
/GridEngine/gromacs-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N GROMACS-OpenMPI-8
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 8
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gromacs01-8.out
17 | #$ -e gromacs01-8.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load gromacs/4.0.5_ompfftw-3.2.1
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | rm gromacs01-8.out
32 | rm gromacs01-8.err
33 | cd $TMPDIR
34 | cp /work/jblasco/d.dppc/* .
35 | ##########################################
36 | # Run the job
37 | ##########################################
38 | export OMP_NUM_THREADS=1
39 | grompp -v &> grompp.out
40 | mpirun -np $NSLOTS mdrun_mpi -v &> mdrun.out
41 | ##########################################
42 | # Copy the results to our home directory
43 | ##########################################
44 | mkdir -p /work/jblasco/bench/gromacs-8
45 | cp -r * /work/jblasco/bench/gromacs-8/
46 |
--------------------------------------------------------------------------------
/GridEngine/gromacs-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N XRQTC.Gromacs_9LDT-72
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o gromacs-XRQTC.Gromacs_9LDT-72.out
17 | #$ -e gromacs-XRQTC.Gromacs_9LDT-72.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load Gromacs/4.0.7_ics-11.1.072_fftw3.2.2_ompi-1.4.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | mkdir -p gromacs-ompi-XRQTC.Gromacs_9LDT-72
33 | cd gromacs-ompi-XRQTC.Gromacs_9LDT-72
34 | cp $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/* .
35 | ##########################################
36 | # Run the job
37 | ##########################################
38 | export OMP_NUM_THREADS=1
39 | ulimit -s unlimited
40 | grompp_d -f full_vdw.mdp -c 9LDT-pt-md-3.gro -p 9LDT-bu.top -o 9LDT-bu.tpr
41 | mpirun -np $NSLOTS mdrun_mpi -v -s 9LDT-bu.tpr -o 9LDT-bu.trr > mdrun.out
42 | ##########################################
43 | # Copy the results to our home directory
44 | ##########################################
45 | mkdir -p $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/OUT/gromacs
46 | cp -r $TMPDIR/* $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/OUT/gromacs/
47 | ##########################################
48 | # Temps dels resultats
49 | ##########################################
50 | TEMPS=$(cat md.log | grep Time: | awk '{print $3}')
51 | echo "$NSLOTS $TEMPS" >> $HOME/bench/GROMACS/XRQTC.Gromacs_9LDT/benchmark-ompi-XRQTC.Gromacs_9LDT.dat
52 |
--------------------------------------------------------------------------------
/GridEngine/lammps-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N LAMMPS-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l mem_free=16.0G
10 | #$ -l num_proc=1
11 | #$ -pe mpi 16
12 | # (3) Fitxers de sortida
13 | #$ -cwd
14 | #$ -o lammps01.out
15 | #$ -e lammps01.err
16 | #$ (4) Envia un mail quan acava el treball.
17 | #$ -m e
18 | #$ -M jblasco@fbg.ub.es
19 | ##########################################
20 | # Entorn d.usuari
21 | ##########################################
22 | # Es carreguen els moduls a utilitzar
23 | . /etc/profile
24 | module load lammps
25 | ##########################################
26 | # transferencia de dades
27 | ##########################################
28 | # Es copien les dades al directori on es llenc,aran els calculs.
29 | cd $TMPDIR
30 | export Project=lammps_mpi_16
31 | export Input=$Project
32 | cp -pr $HOME/path/amb/els/inputs/ $Input
33 | ##########################################
34 | # calcul
35 | ##########################################
36 | # Es crea un directori de sortida pels resultats.
37 | export OMP_NUM_THREADS=1
38 | lmp_mpi -np $NSLOTS < in.test > out.txt
39 | ##########################################
40 | # Transferencia dels resultats
41 | ##########################################
42 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
43 |
--------------------------------------------------------------------------------
/GridEngine/molcas.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Script per enviar càlculs de Molcas
3 | # amb 4 processadors sobre la mateixa maquina
4 | # - Jordi Blasco 12/08/07 -
5 | ##########################################
6 | # Opcions i parametres de l'SGE
7 | ##########################################
8 | # (1) Nom del treball (per identificar)
9 | #$ -N Molcas-prova01
10 | # (2) Recursos sol.licitats
11 | #$ -l h_rt=0:50:0
12 | #$ -l mem_free=4.0G
13 | #$ -pe smp 4
14 | # (3) Fitxers de sortida
15 | #$ -cwd
16 | #$ -o molcas01.out
17 | #$ -e molcas01.err
18 | # (4) Envia un mail quan acava el treball.
19 | #$ -m e
20 | #$ -M jblasco@fbg.ub.es
21 | ##########################################
22 | # Entorn d.usuari
23 | ##########################################
24 | # Es carreguen els moduls a utilitzar
25 | module load molcas
26 | ##########################################
27 | # transferencia de dades
28 | ##########################################
29 | # Es copien les dades al directori on es llenc,aran els calculs.
30 | cd $TMPDIR
31 | export Project=Jobname
32 | export Input="$Project".input
33 | export Output="$Project".out
34 | cp -pr $HOME/path/amb/el/input/fitxer.input $Input
35 | ##########################################
36 | # calcul
37 | ##########################################
38 | # Es crea un directori de sortida pels resultats.
39 | export OMP_NUM_THREADS=$NSLOTS
40 | export CPUS=4
41 | molcas "$Input" >>"$Output"
42 |
43 | ##########################################
44 | # Transferencia dels resultats
45 | ##########################################
46 | cp -pr $Output $HOME/path/a/on/guardar/output/
47 |
--------------------------------------------------------------------------------
/GridEngine/molcas_7.4-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N MOLCAS-mpi-ncores
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o molcas-mpi-TEST-ncores.out
17 | #$ -e molcas-mpi-TEST-ncores.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load molcas/7.4_patch045_serial_ics-11.1.072_mkl-11.1.072_ompi-1.4.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp -r $HOME/bench/MOLCAS/XRQTC.Molcas_caspt2/* .
33 | ##########################################
34 | # Run the job
35 | ##########################################
36 | export OMP_NUM_THREADS=1
37 | export CPUS=$NSLOTS
38 | #export MOLCASMEM=47104
39 | molcas "MOLCAS.inputmol" >> "MOLCAS.out"
40 | ##########################################
41 | # Copy the results to our home directory
42 | ##########################################
43 | mkdir -p $HOME/bench/MOLCAS/OUT/molcas
44 | #cp -r * $HOME/bench/MOLCAS/OUT/molcas
45 | cp -r $TMPDIR $HOME/bench/MOLCAS/OUT/molcas/
46 |
--------------------------------------------------------------------------------
/GridEngine/molcas_7.4-serial-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/tcsh
2 |
3 | #16/12/09
4 | #NOTA: Molcas 7.2 dona uns resultats diferents a la versió 7.4
5 | #això es degut a que la nova versió utilitza un level shift de 0.5 respecte
6 | #al level shift 1 que utilitza la versió 7.2. Si en la versió s'utilitza
7 | #un level shift de 0.5 els resultats ja son bastant concordants amb la
8 | #última versió
9 |
10 | #-------------------------------------------------------------------
11 | # Parametres del SGE
12 | # ------------------------------------------------------------------
13 |
14 | #$ -N NOM_SCRIPT
15 | #$ -pe mpi 1
16 | #$ -cwd
17 | #$ -S /bin/tcsh
18 | #$ -o molcas.out
19 | #$ -e molcas.err
20 | #$ -m e
21 | #$ -M yourmail@ub.edu
22 | #$ -q iqtc01.q
23 |
24 | #---------------------------------------------------------------------
25 | # Carreguem els moduls
26 | #---------------------------------------------------------------------
27 | source /opt/Modules/3.2.3/init/tcsh
28 | module load molcas/molcas74_patch045_serial_pgi10.0_blasmolcas
29 |
30 | #---------------------------------------------------------------------
31 | #DADES A MODIFICAR PER L'USUARI
32 | # Preparem l'entorn
33 | #---------------------------------------------------------------------
34 |
35 | setenv CurrDir /home/jingles/tests/molcas/7.4/Albert
36 | setenv Project Ps
37 | setenv MOLCASMEM 4000
38 | setenv MOLCASDISK 12000
39 | setenv Title Titol_projecte
40 | setenv TempDir $TMPDIR
41 | setenv WorkDir $TempDir/$Project.Work
42 | setenv MOLCAS_SAVE $WorkDir
43 |
44 | #--------------------------------------------------------------------
45 | # Preparem els directoris
46 | #--------------------------------------------------------------------
47 | mkdir -p $WorkDir
48 | mkdir -p $TempDir
49 |
50 | cp -r $CurrDir/* $WorkDir
51 | cd $WorkDir
52 |
53 | #---------------------------------------------------------------------
54 | #
55 | # Start executing molcas job qsub -q iqtc01.q script
56 | #
57 | #---------------------------------------------------------------------
58 |
59 | setenv OMP_NUM_THREADS $NSLOTS
60 | set Infile=$WorkDir/$Project.input
61 | set Outfile=$WorkDir/$Project.out
62 | molcas $Infile >> $Outfile
63 |
64 | #---------------------------------------------------------------------
65 | #
66 | # Copiem el resultat
67 | #
68 | #---------------------------------------------------------------------
69 |
70 | cp $Outfile $CurrDir
71 |
72 | #--------------------------------------------------------------------
73 | #
74 | # Netejem el scratch
75 | #
76 | #--------------------------------------------------------------------
77 |
78 | cd -
79 | rm -rf $TempDir
80 |
81 | exit
82 |
--------------------------------------------------------------------------------
/GridEngine/molcas_7.4-serial-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/tcsh
2 |
3 | #16/12/09
4 | #NOTA: Molcas 7.2 dona uns resultats diferents a la versió 7.4
5 | #això es degut a que la nova versió utilitza un level shift de 0.5 respecte
6 | #al level shift 1 que utilitza la versió 7.2. Si en la versió s'utilitza
7 | #un level shift de 0.5 els resultats ja son bastant concordants amb la
8 | #última versió
9 |
10 | #-------------------------------------------------------------------
11 | # Parametres del SGE
12 | # ------------------------------------------------------------------
13 |
14 | #$ -N NOM_SCRIPT
15 | #$ -pe mpi 1
16 | #$ -cwd
17 | #$ -S /bin/tcsh
18 | #$ -o molcas.out
19 | #$ -e molcas.err
20 | #$ -m e
21 | #$ -M yourmail@ub.edu
22 | #$ -q iqtc02.q
23 |
24 | #---------------------------------------------------------------------
25 | # Carreguem els moduls
26 | #---------------------------------------------------------------------
27 | source /opt/modules/init/tcsh
28 | module load molcas/molcas74_patch045_serial_intel9.1_mkl9.1
29 |
30 | #---------------------------------------------------------------------
31 | #DADES A MODIFICAR PER L'USUARI
32 | # Preparem l'entorn
33 | #---------------------------------------------------------------------
34 |
35 | setenv CurrDir /home/jingles/tests/molcas/7.4/Albert
36 | setenv Project Ps
37 | setenv MOLCASMEM 4000
38 | setenv MOLCASDISK 12000
39 | setenv Title Titol_projecte
40 | setenv TempDir $TMPDIR #$TMP/$Project
41 | # Nota $TMP variable ja cargada pel propi sistema que conté el path /scratch/$USER
42 | setenv WorkDir $TempDir/$Project.Work
43 | setenv MOLCAS_SAVE $WorkDir
44 |
45 | #--------------------------------------------------------------------
46 | # Preparem els directoris
47 | #--------------------------------------------------------------------
48 | mkdir -p $WorkDir
49 | mkdir -p $TempDir
50 |
51 | cp -r $CurrDir/* $WorkDir
52 | cd $WorkDir
53 |
54 | #---------------------------------------------------------------------
55 | #
56 | # Start executing molcas job qsub -q iqtc01.q script
57 | #
58 | #---------------------------------------------------------------------
59 |
60 | setenv OMP_NUM_THREADS $NSLOTS
61 | set Infile=$WorkDir/$Project.input
62 | set Outfile=$WorkDir/$Project.out
63 | molcas $Infile >> $Outfile
64 |
65 | #---------------------------------------------------------------------
66 | #
67 | # Copiem el resultat
68 | #
69 | #---------------------------------------------------------------------
70 |
71 | cp $Outfile $CurrDir
72 |
73 | #--------------------------------------------------------------------
74 | #
75 | # Netejem el scratch
76 | #
77 | #--------------------------------------------------------------------
78 |
79 | cd -
80 | rm -rf $TempDir
81 |
82 | exit
83 |
--------------------------------------------------------------------------------
/GridEngine/molcas_7.4-serial-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/tcsh
2 |
3 | #16/12/09
4 | #NOTA: Molcas 7.2 dona uns resultats diferents a la versió 7.4
5 | #això es degut a que la nova versió utilitza un level shift de 0.5 respecte
6 | #al level shift 1 que utilitza la versió 7.2. Si en la versió s'utilitza
7 | #un level shift de 0.5 els resultats ja son bastant concordants amb la
8 | #última versió
9 |
10 | #-------------------------------------------------------------------
11 | # Parametres del SGE
12 | # ------------------------------------------------------------------
13 |
14 | #$ -N NOM_SCRIPT
15 | #$ -pe mpi 1
16 | #$ -cwd
17 | #$ -S /bin/tcsh
18 | #$ -o molcas.out
19 | #$ -e molcas.err
20 | #$ -m e
21 | #$ -M yourmail@ub.edu
22 | #$ -q iqtc03.q
23 |
24 | #---------------------------------------------------------------------
25 | # Carreguem els moduls
26 | #---------------------------------------------------------------------
27 | source /opt/modules/init/tcsh
28 | module load molcas/molcas74_patch045_serial_intel9.1_mkl9.1
29 |
30 | #---------------------------------------------------------------------
31 | #DADES A MODIFICAR PER L'USUARI
32 | # Preparem l'entorn
33 | #---------------------------------------------------------------------
34 |
35 | setenv CurrDir /home/jingles/tests/molcas/7.4/Albert
36 | setenv Project Ps
37 | setenv MOLCASMEM 4000
38 | setenv MOLCASDISK 12000
39 | setenv Title Titol_projecte
40 | setenv TempDir $TMPDIR #$TMP/$Project
41 | # Nota $TMP variable ja cargada pel propi sistema que conté el path /scratch/$USER
42 | setenv WorkDir $TempDir/$Project.Work
43 | setenv MOLCAS_SAVE $WorkDir
44 |
45 | #--------------------------------------------------------------------
46 | # Preparem els directoris
47 | #--------------------------------------------------------------------
48 | mkdir -p $WorkDir
49 | mkdir -p $TempDir
50 |
51 | cp -r $CurrDir/* $WorkDir
52 | cd $WorkDir
53 |
54 | #---------------------------------------------------------------------
55 | #
56 | # Start executing molcas job qsub -q iqtc01.q script
57 | #
58 | #---------------------------------------------------------------------
59 |
60 | setenv OMP_NUM_THREADS $NSLOTS
61 | set Infile=$WorkDir/$Project.input
62 | set Outfile=$WorkDir/$Project.out
63 | molcas $Infile >> $Outfile
64 |
65 | #---------------------------------------------------------------------
66 | #
67 | # Copiem el resultat
68 | #
69 | #---------------------------------------------------------------------
70 |
71 | cp $Outfile $CurrDir
72 |
73 | #--------------------------------------------------------------------
74 | #
75 | # Netejem el scratch
76 | #
77 | #--------------------------------------------------------------------
78 |
79 | cd -
80 | rm -rf $TempDir
81 |
82 | exit
83 |
--------------------------------------------------------------------------------
/GridEngine/molcas_7.4-serial-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/tcsh
2 |
3 | #16/12/09
4 | #NOTA: Molcas 7.2 dona uns resultats diferents a la versió 7.4
5 | #això es degut a que la nova versió utilitza un level shift de 0.5 respecte
6 | #al level shift 1 que utilitza la versió 7.2. Si en la versió s'utilitza
7 | #un level shift de 0.5 els resultats ja son bastant concordants amb la
8 | #última versió
9 |
10 | #-------------------------------------------------------------------
11 | # Parametres del SGE
12 | # ------------------------------------------------------------------
13 |
14 | #$ -N NOM_SCRIPT
15 | #$ -pe smp 1
16 | #$ -cwd
17 | #$ -S /bin/tcsh
18 | #$ -o molcas.out
19 | #$ -e molcas.err
20 | #$ -m e
21 | #$ -M yourmail@ub.edu
22 | #$ -q iqtc04.q
23 |
24 | #---------------------------------------------------------------------
25 | # Carreguem els moduls
26 | #---------------------------------------------------------------------
27 | source /opt/modules/init/tcsh
28 | module load molcas/7.4_patch045_serial_intel10.1_mkl10.1
29 |
30 | #---------------------------------------------------------------------
31 | #DADES A MODIFICAR PER L'USUARI
32 | # Preparem l'entorn
33 | #---------------------------------------------------------------------
34 |
35 | setenv CurrDir /home/jingles/tests/molcas/7.4/Albert
36 | setenv Project Ps
37 | setenv MOLCASMEM 4000
38 | setenv MOLCASDISK 12000
39 | setenv Title Titol_projecte
40 | setenv TempDir $TMPDIR #$TMP/$Project
41 | # Nota $TMP variable ja cargada pel propi sistema que conté el path /scratch/$USER
42 | setenv WorkDir $TempDir/$Project.Work
43 | setenv MOLCAS_SAVE $WorkDir
44 |
45 | #--------------------------------------------------------------------
46 | # Preparem els directoris
47 | #--------------------------------------------------------------------
48 | mkdir -p $WorkDir
49 | mkdir -p $TempDir
50 |
51 | cp -r $CurrDir/* $WorkDir
52 | cd $WorkDir
53 |
54 | #---------------------------------------------------------------------
55 | #
56 | # Start executing molcas job qsub -q iqtc01.q script
57 | #
58 | #---------------------------------------------------------------------
59 |
60 | setenv OMP_NUM_THREADS $NSLOTS
61 | set Infile=$WorkDir/$Project.input
62 | set Outfile=$WorkDir/$Project.out
63 | molcas $Infile >> $Outfile
64 |
65 | #---------------------------------------------------------------------
66 | #
67 | # Copiem el resultat
68 | #
69 | #---------------------------------------------------------------------
70 |
71 | cp $Outfile $CurrDir
72 |
73 | #--------------------------------------------------------------------
74 | #
75 | # Netejem el scratch
76 | #
77 | #--------------------------------------------------------------------
78 |
79 | cd -
80 | rm -rf $TempDir
81 |
82 | exit
83 |
--------------------------------------------------------------------------------
/GridEngine/nwchem-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_nwchem_iqtc04
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 4
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o iqtc04-4.out
17 | #$ -e iqtc04-4.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load nwchem/5.1.1_ics-11.1.072_ompi-1.4.2
27 | INPUT=input_m5_2x_Rqm3.nw
28 | output=$INPUT.log
29 | ##########################################
30 | # Copying files needed
31 | ##########################################
32 | # We copy the inputs to the directory where the jobs will run
33 |
34 | cd $TMPDIR
35 | cp -r $HOME/bench/NWCHEM/$INPUT .
36 | ##########################################
37 | # Run the job
38 | ##########################################
39 | export OMP_NUM_THREADS=1
40 |
41 | echo "INICI"
42 | date
43 | mpirun -np $NSLOTS nwchem $INPUT > $output
44 | echo "FI"
45 | date
46 | ##########################################
47 | # Copy the results to our home directory
48 | ##########################################
49 | mkdir -p $HOME/bench/NWCHEM/OUT_iqtc04
50 | cp -r $TMPDIR $HOME/bench/NWCHEM/OUT_iqtc04
51 |
52 |
--------------------------------------------------------------------------------
/GridEngine/orca-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N XRQTC.Orca_B3LYP-12
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 12
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o orca-XRQTC.Orca_B3LYP-12.out
17 | #$ -e orca-XRQTC.Orca_B3LYP-12.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load orca
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | cd $TMPDIR
32 | cp $HOME/bench/Orca/XRQTC.Orca_B3LYP/input/* .
33 |
34 | # --------------> ATENCIO <-------------------
35 | # Cal posar % pal nprocs $NSLOTS dins de l.input!
36 | # en les seguents linies es modifica en funcio del PE
37 | mv XRQTC.Orca_B3LYP.inp XRQTC.Orca_B3LYP.inp.1
38 | echo "% pal nprocs $NSLOTS
39 | end" > XRQTC.Orca_B3LYP.inp.0
40 | cat XRQTC.Orca_B3LYP.inp.0 XRQTC.Orca_B3LYP.inp.1 > XRQTC.Orca_B3LYP.inp
41 |
42 |
43 | rm XRQTC.Orca_B3LYP.inp.*
44 | ##########################################
45 | # Run the job
46 | ##########################################
47 | export P4_RSHCOMMAND=ssh
48 | export OMP_NUM_THREADS=1
49 | /aplic/ORCA/2.7.0b/ompi-1.4.2/orca XRQTC.Orca_B3LYP.inp > XRQTC.Orca_B3LYP.out
50 |
51 | ##########################################
52 | # Copy the results to our home directory
53 | ##########################################
54 | mkdir -p $HOME/bench/Orca/XRQTC.Orca_B3LYP/OUT/orca
55 | cp -r $TMPDIR $HOME/bench/Orca/XRQTC.Orca_B3LYP/OUT/orca/
56 | ##########################################
57 | # Temps dels resultats
58 | ##########################################
59 | TEMPS=$(cat XRQTC.Orca_B3LYP.out | grep Time: | awk '{print$3}')
60 | echo "$NSLOTS $TEMPS" >> $HOME/bench/Orca/XRQTC.Orca_B3LYP/benchmark-ompi-XRQTC.Orca_B3LYP.dat
61 |
62 |
--------------------------------------------------------------------------------
/GridEngine/orca_ompi_scratch.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ####################################################
3 | # To use this script execute next instruccion
4 | #
5 | # qsub orca_ompi_scratch.sub name_of_input (without extention)
6 | #
7 | #
8 | ####################################################
9 | #$ -S /bin/bash
10 | #
11 | # name of job
12 | #$ -N orca_ompi ### job name
13 | #$ -e orca_ompi.err ### sge error file
14 | #$ -o orca_ompi.out ### sge output file
15 | ####
16 | #### Select the cluster where you want to run your job iqtc01.q, iqtc02.q & iqtc03.q ####
17 | #$ -q iqtc01.q
18 | #$ -cwd
19 | #$ -m e
20 | ####
21 | #### change this mail to recive an alert amil when the job finishes ########
22 | #$ -M my@mail.com
23 | #
24 | #
25 | ##### Emember that in iqtc01.q the maximum is 4 cores/node ans iqtc02.q & iqtc03.q is 8 cores/node
26 | #$ -pe ompi 4
27 |
28 |
29 | source /etc/profile
30 | . /etc/profile.d/modules.sh
31 |
32 | cwd=$PWD
33 |
34 | module load orca/ompi_r1730
35 |
36 | cp $1.inp $TMPDIR/
37 | cd $TMPDIR
38 |
39 | /aplic/orca/orca_amd64_exe_r1730/orca $1.inp > $1.out
40 |
41 | mkdir -p $cwd/$1_out
42 | cp -r * $cwd/$1_out/
43 |
44 |
--------------------------------------------------------------------------------
/GridEngine/orca_ompi_work.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ####################################################
3 | # To use this script execute next instruccion
4 | #
5 | # qsub orca_ompi_work.sub name_of_input (without extention)
6 | #
7 | #
8 | ####################################################
9 | #$ -S /bin/bash
10 | #
11 | # name of job
12 | #$ -N orca_ompi ### job name flag
13 | #$ -e orca_ompi.err ### sge error file
14 | #$ -o orca_ompi.out ### sge output file
15 | ####
16 | #### Select the cluster where you want to run your job iqtc01.q, iqtc02.q & iqtc03.q ####
17 | #$ -q iqtc01.q
18 | #$ -cwd
19 | #$ -m e
20 | ####
21 | #### change this mail to recive an alert amil when the job finishes ########
22 | #$ -M my@mail.com
23 | #
24 | #
25 | ##### Emember that in iqtc01.q the maximum is 4 cores/node ans iqtc02.q & iqtc03.q is 8 cores/node
26 | #$ -pe ompi 4
27 |
28 |
29 | source /etc/profile
30 | . /etc/profile.d/modules.sh
31 |
32 | cwd=$PWD
33 |
34 | module load orca/ompi_r1730
35 |
36 | cp $1.inp $TMPDIR/
37 |
38 | mkdir $WORK/$1_DIR
39 | cd $WORK/$1_DIR
40 | cp -r $TMPDIR/* .
41 |
42 | /aplic/orca/orca_amd64_exe_r1730/orca $1.inp > $1.out
43 |
44 | mkdir -p $cwd/$1_out
45 | cp -r * $cwd/$1_out/
46 |
47 | cd $cwd/$1_out/
48 | rm -r $WORK/$1_DIR
49 |
--------------------------------------------------------------------------------
/GridEngine/siesta-3.0-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_siesta_iqtc02
7 | # (2) Recursos sol.licitats
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 4
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o siesta_iqtc02-4.out
17 | #$ -e siesta_iqtc02-4.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load siesta/3.0-rc2_paralel_ompi
27 |
28 | INPUT=SnO2FC.fdf
29 | OUTPUT=$INPUT.log
30 | ##########################################
31 | # Copying files needed
32 | ##########################################
33 | # We copy the inputs to the directory where the jobs will run
34 |
35 | cp -r * $TMPDIR/
36 | cd $TMPDIR
37 | #cp -r $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/input/* .
38 | ##########################################
39 | # Run the job
40 | ##########################################
41 | export OMP_NUM_THREADS=1
42 |
43 | mpirun -np $NSLOTS siesta < $INPUT > $OUTPUT
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir -p $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc02
48 | cp -r $TMPDIR $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc02
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/siesta-3.0-ompi-iqtc02_tcsh.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_siesta_iqtc02
7 | # (2) Recursos sol.licitats
8 | #$ -pe ompi 4
9 | # Queue
10 | #$ -q iqtc02.q
11 | # Shell
12 | #$ -S /bin/tcsh
13 | # (3) Output files
14 | #$ -cwd
15 | #$ -o siesta_iqtc02-4.out
16 | #$ -e siesta_iqtc02-4.err
17 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
18 | ##$ -m e
19 | ##$ -M yourmail@ub.edu
20 | #$ -S /bin/tcsh
21 | ##########################################
22 | # User environment
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.csh
26 | module load siesta/3.0-rc2_paralel_ompi
27 |
28 | set INPUT=SnO2FC.fdf
29 | set OUTPUT=$INPUT.log
30 | ##########################################
31 | # Copying files needed
32 | ##########################################
33 | # We copy the inputs to the directory where the jobs will run
34 |
35 | cp -r * $TMPDIR/
36 | cd $TMPDIR
37 | #cp -r $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/input/* .
38 | ##########################################
39 | # Run the job
40 | ##########################################
41 | setenv OMP_NUM_THREADS 1
42 |
43 | mpirun -np $NSLOTS siesta < $INPUT > $OUTPUT
44 | ##########################################
45 | # Copy the results to our home directory
46 | ##########################################
47 | mkdir -p $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc02
48 | cp -r $TMPDIR $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc02
49 |
50 |
--------------------------------------------------------------------------------
/GridEngine/siesta-3.0-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N test_siesta_iqtc04
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 8
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o siesta_iqtc04-8.out
17 | #$ -e siesta_iqtc04-8.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu.com
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load siesta/3.0-rc2_paralel_ompi
27 | INPUT=SnO2FC.fdf
28 | output=$INPUT.log
29 | ##########################################
30 | # Copying files needed
31 | ##########################################
32 | # We copy the inputs to the directory where the jobs will run
33 |
34 | cd $TMPDIR
35 | cp -r $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/input/* .
36 | ##########################################
37 | # Run the job
38 | ##########################################
39 | export OMP_NUM_THREADS=1
40 |
41 | echo "INICI"
42 | date
43 | mpirun -np $NSLOTS siesta < $INPUT > $output
44 | echo "FI"
45 | date
46 |
47 | ##########################################
48 | # Copy the results to our home directory
49 | ##########################################
50 | mkdir -p $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc04
51 | cp -r $TMPDIR $HOME/bench/SIESTA/XRQTC.SIESTA_SnO2_FCfullBZ/OUT_iqtc04
52 |
53 |
--------------------------------------------------------------------------------
/GridEngine/siesta-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N siesta-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l mem_free=2.0G
10 | #$ -l num_proc=1
11 | #$ -pe mpi 4
12 | # (3) Fitxers de sortida
13 | #$ -cwd
14 | #$ -o siesta01.out
15 | #$ -e siesta01.err
16 | #$ (4) Envia un mail quan acava el treball.
17 | #$ -m e
18 | #$ -M jblasco@fbg.ub.es
19 | ##########################################
20 | # Entorn d.usuari
21 | ##########################################
22 | # Es carreguen els moduls a utilitzar
23 | . /etc/profile
24 | module load siesta
25 | ##########################################
26 | # transferencia de dades
27 | ##########################################
28 | # Es copien les dades al directori on es llenc,aran els calculs.
29 | cd $TMPDIR
30 | export Project=siesta_mpi_4
31 | export Input=$Project
32 | cp -pr $HOME/path/amb/els/inputs/ $Input
33 | ##########################################
34 | # calcul
35 | ##########################################
36 | # Es crea un directori de sortida pels resultats.
37 | export OMP_NUM_THREADS=1
38 | mpirun -np $NSLOTS siesta_mpi < input.fdf > output.out
39 | ##########################################
40 | # Transferencia dels resultats
41 | ##########################################
42 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
43 |
--------------------------------------------------------------------------------
/GridEngine/stress_x86-64.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N STRESS-ncores
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp ncores
10 | # Shell
11 | #$ -S /bin/bash
12 | # (3) Output files
13 | #$ -cwd
14 | #$ -o $HOME/stress-out/stress-$HOSTNAME.out
15 | #$ -e $HOME/stress-out/stress-$HOSTNAME.err
16 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
17 | ##$ -m e
18 | ##$ -M yourmail@ub.edu
19 | ##########################################
20 | # User environment.
21 | ##########################################
22 | # Load the modules needed
23 | . /etc/profile.d/modules.sh
24 | module load stress/1.0.4
25 | ##########################################
26 | # Copying files needed
27 | ##########################################
28 | # We copy the inputs to the directory where the jobs will run
29 | cd $TMPDIR
30 | ##########################################
31 | # Run the job
32 | disk=$(df -hP /scratch | tail -1 | gawk '{DF=$2; if( DF ~ /G/){gsub("G","",DF); print DF/100}}')
33 | mem=$(cat /proc/meminfo | grep MemFree | gawk '{print $2/1024/20}')
34 | stress_x86-64 --cpu $NSLOTS -m 20 --vm-bytes $mem -d 100 --hdd-bytes $disk
35 | ##########################################
36 | # redirecciono un dmesg per revisar possibles errors de sistema
37 | dmesg > $HOME/stress-out/dmesg-$HOSTNAME.out
38 |
--------------------------------------------------------------------------------
/GridEngine/stress_x86.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N STRESS-ncores
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe smp ncores
10 | # Shell
11 | #$ -S /bin/bash
12 | # (3) Output files
13 | #$ -cwd
14 | #$ -o $HOME/stress-out/stress-$HOSTNAME.out
15 | #$ -e $HOME/stress-out/stress-$HOSTNAME.err
16 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
17 | ##$ -m e
18 | ##$ -M yourmail@ub.edu
19 | ##########################################
20 | # User environment.
21 | ##########################################
22 | # Load the modules needed
23 | . /etc/profile.d/modules.sh
24 | module load stress/1.0.4
25 | ##########################################
26 | # Copying files needed
27 | ##########################################
28 | # We copy the inputs to the directory where the jobs will run
29 | cd $TMPDIR
30 | ##########################################
31 | # Run the job
32 | stress-x86
33 | ##########################################
34 |
--------------------------------------------------------------------------------
/GridEngine/vasp-4.6-ompi-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-4.6-OMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 16
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load openmpi/1.3.3_intel_11.1.059
27 | module load vasp/4.6.28_openmpi_intel
28 | ##########################################
29 | # Copying files needed
30 | ##########################################
31 | # We copy the inputs to the directory where the jobs will run
32 | echo "Got $NSLOTS processors."
33 | cd $WORKDIR
34 | cp -r /home/jblasco/TESTS/vasp-4.6/tests/test-hg/* .
35 | ##########################################
36 | # Run the job
37 | ##########################################
38 |
39 | startdir=$PWD
40 | export OMP_NUM_THREADS=1
41 |
42 | # -------------> WARNING <-------------------
43 | # Check what binary fits your needs
44 | # vasp MPI parallel, charge density and wavefunction complex
45 | # vasp_gamma MPI parallel, gamma-point only (-DwNGZhalf)
46 | # vasp_vtst MPI parallel, charge density + wavefunction complex + VASP TST Tools
47 | # -------------------------------------------
48 |
49 | mpirun -np $NSLOTS /aplic/vasp/4.6.28_ompi_intel/vasp
50 |
51 | ##########################################
52 | # Copy the results to our home directory
53 | ##########################################
54 | mkdir -p $HOME/TESTS/vasp-4.6/tests/out
55 | cp -r * $HOME/TESTS/vasp-4.6/tests/out/
56 |
--------------------------------------------------------------------------------
/GridEngine/vasp-4.6-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-4.6-OMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 16
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load vasp/4.6
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | echo "Got $NSLOTS processors."
32 | cd $WORKDIR
33 | cp -r $HOME/TESTS/vasp-4.6/tests/test-hg/* .
34 | ##########################################
35 | # Run the job
36 | ##########################################
37 |
38 | startdir=$PWD
39 | export OMP_NUM_THREADS=1
40 | export MPICH_PROCESS_GROUP=no
41 | mpirun -np $NSLOTS /aplic/VASP/4.6_intel_11.0.074_openmpi/vasp.4.6/vasp
42 |
43 | ##########################################
44 | # Copy the results to our home directory
45 | ##########################################
46 | mkdir -p $HOME/TESTS/vasp-4.6/tests/out
47 | cp -r * $HOME/TESTS/vasp-4.6/tests/out/
48 |
--------------------------------------------------------------------------------
/GridEngine/vasp-4.6-ompi-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-4.6-OMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 16
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load vasp/4.6
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | echo "Got $NSLOTS processors."
32 | cd $WORKDIR
33 | cp -r /home/jblasco/TESTS/vasp-4.6/tests/test-hg/* .
34 | ##########################################
35 | # Run the job
36 | ##########################################
37 |
38 | startdir=$PWD
39 | export OMP_NUM_THREADS=1
40 | export MPICH_PROCESS_GROUP=no
41 | mpirun -np $NSLOTS /aplic/VASP/4.6_intel_11.0.074_openmpi/vasp.4.6/vasp
42 |
43 | ##########################################
44 | # Copy the results to our home directory
45 | ##########################################
46 | mkdir -p $HOME/TESTS/vasp-4.6/tests/out
47 | cp -r * $HOME/TESTS/vasp-4.6/tests/out/
48 |
--------------------------------------------------------------------------------
/GridEngine/vasp-4.6-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N XRQTC.VASP_ceria-surface-96
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | # !!!WARNING!!!!
10 | # IF YOU'RE GOING TO USE 12 OR LESS CORES PLEASE USE THE SMP SCRIPT!!!
11 | #$ -pe omp* 24
12 | # Queue
13 | #$ -q iqtc04.q
14 | # Shell
15 | #$ -S /bin/bash
16 | # (3) Output files
17 | #$ -cwd
18 | #$ -o vasp-XRQTC.VASP_ceria-surface-96.out
19 | #$ -e vasp-XRQTC.VASP_ceria-surface-96.err
20 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
21 | ##$ -m e
22 | ##$ -M yourmail@ub.edu
23 | ##########################################
24 | # User environment.
25 | ##########################################
26 | # Load the modules needed
27 | . /etc/profile.d/modules.sh
28 | module load VASP/4.6.36_ics-11.1.072_ompi-1.4.2
29 | ##########################################
30 | # Copying files needed
31 | ##########################################
32 | # We copy the inputs to the directory where the jobs will run
33 | #cd $TMPDIR
34 | mkdir -p /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
35 | cd /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
36 | cp -r $HOME/bench/VASP/XRQTC.VASP_ceria-surface/input/* .
37 | ##########################################
38 | # Run the job
39 | ##########################################
40 | export OMP_NUM_THREADS=1
41 | ulimit -l unlimited
42 |
43 | # -------------> WARNING <-------------------
44 | # Check what binary fits your needs
45 | # vasp MPI parallel, charge density and wavefunction complex
46 | # vasp_cd MPI parallel, charge density: half grid mode (-DNGZhalf)
47 | # vasp_gamma MPI parallel, gamma-point only (-DwNGZhalf)
48 | # vasp_vtst MPI parallel, charge density + wavefunction complex + VASP TST Tools
49 | # vasp_vtst_cd MPI parallel, charge density: half grid mode (-DNGZhalf) + VASP TST Tools
50 | # vasp_vtst_gamma MPI parallel, gamma-point only (-DwNGZhalf) + VASP TST Tools
51 | # -------------------------------------------
52 |
53 | mpirun -np $NSLOTS vasp_cd
54 |
55 | ##########################################
56 | # Copy the results to our home directory
57 | ##########################################
58 | mkdir -p $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp
59 | #cp -r $TMPDIR $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp/
60 | cp -r /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96 $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp/
61 | rm -fr /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
62 | ##########################################
63 | # Temps dels resultats
64 | ##########################################
65 | TEMPS=$(cat OUTCAR | grep "Total CPU time used" | awk '{print $(NF)}')
66 | echo "$NSLOTS $TEMPS" >> $HOME/bench/VASP/XRQTC.VASP_ceria-surface/benchmark-Nehalem-DP-XRQTC.VASP_ceria-surface.dat
67 |
68 |
--------------------------------------------------------------------------------
/GridEngine/vasp-5.2-ompi-iqtc01.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-5.2-OpenMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 8
10 | # Queue
11 | #$ -q iqtc01.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load vasp/5.2_intel11.1_openmpi1.3.3
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | echo "Got $NSLOTS processors."
32 | cd $TMPDIR
33 | cp -r /home/jblasco/TESTS/vasp5.2/tests/test-hg/* .
34 | ##########################################
35 | # Run the job
36 | ##########################################
37 | export OMP_NUM_THREADS=1
38 | mpirun -np $NSLOTS vasp
39 |
40 | ##########################################
41 | # Copy the results to our home directory
42 | ##########################################
43 | mkdir -p $HOME/TESTS/vasp5.2/tests/out
44 | cp -r * $HOME/TESTS/vasp5.2/tests/out/
45 |
--------------------------------------------------------------------------------
/GridEngine/vasp-5.2-ompi-iqtc02.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-5.2-OpenMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 16
10 | # Queue
11 | #$ -q iqtc02.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load vasp/5.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | echo "Got $NSLOTS processors."
32 | cd $TMPDIR
33 | cp -r /home/jblasco/TESTS/vasp5.2/tests/test-hg/* .
34 | ##########################################
35 | # Run the job
36 | ##########################################
37 | export OMP_NUM_THREADS=1
38 | mpirun -np $NSLOTS vasp
39 |
40 | ##########################################
41 | # Copy the results to our home directory
42 | ##########################################
43 | mkdir -p $HOME/TESTS/vasp5.2/tests/out
44 | cp -r * $HOME/TESTS/vasp5.2/tests/out/
45 |
--------------------------------------------------------------------------------
/GridEngine/vasp-5.2-ompi-iqtc03.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N VASP-5.2-OpenMPI
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe ompi 16
10 | # Queue
11 | #$ -q iqtc03.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-ompi.out
17 | #$ -e vasp-ompi.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load vasp/5.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | echo "Got $NSLOTS processors."
32 | cd $TMPDIR
33 | cp -r /home/jblasco/TESTS/vasp5.2/tests/test-hg/* .
34 | ##########################################
35 | # Run the job
36 | ##########################################
37 | export OMP_NUM_THREADS=1
38 | mpirun -np $NSLOTS vasp
39 |
40 | ##########################################
41 | # Copy the results to our home directory
42 | ##########################################
43 | mkdir -p $HOME/TESTS/vasp5.2/tests/out
44 | cp -r * $HOME/TESTS/vasp5.2/tests/out/
45 |
--------------------------------------------------------------------------------
/GridEngine/vasp-5.2-ompi-iqtc04.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # SGE options and parameters
4 | ##########################################
5 | # (1) Name of the job
6 | #$ -N XRQTC.VASP_ceria-surface-96
7 | # (2) Requested resources
8 | # Parallel Environment and number of cores
9 | #$ -pe omp* 24
10 | # Queue
11 | #$ -q iqtc04.q
12 | # Shell
13 | #$ -S /bin/bash
14 | # (3) Output files
15 | #$ -cwd
16 | #$ -o vasp-XRQTC.VASP_ceria-surface-96.out
17 | #$ -e vasp-XRQTC.VASP_ceria-surface-96.err
18 | # (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
19 | ##$ -m e
20 | ##$ -M yourmail@ub.edu
21 | ##########################################
22 | # User environment.
23 | ##########################################
24 | # Load the modules needed
25 | . /etc/profile.d/modules.sh
26 | module load VASP/5.2.12_ics-11.1.072_ompi-1.4.2
27 | ##########################################
28 | # Copying files needed
29 | ##########################################
30 | # We copy the inputs to the directory where the jobs will run
31 | #cd $TMPDIR
32 | mkdir -p /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
33 | cd /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
34 | cp -r $HOME/bench/VASP/XRQTC.VASP_ceria-surface/input/* .
35 | ##########################################
36 | # Run the job
37 | ##########################################
38 | export OMP_NUM_THREADS=1
39 | ulimit -l unlimited
40 |
41 | # -------------> WARNING <-------------------
42 | # Check what binary fits your needs
43 | # vasp MPI parallel, charge density and wavefunction complex
44 | # vasp_cd MPI parallel, charge density: half grid mode (-DNGZhalf)
45 | # vasp_gamma MPI parallel, gamma-point only (-DwNGZhalf)
46 | # vasp_vtst MPI parallel, charge density + wavefunction complex + VASP TST Tools
47 | # vasp_vtst_cd MPI parallel, charge density: half grid mode (-DNGZhalf) + VASP TST Tools
48 | # vasp_vtst_gamma MPI parallel, gamma-point only (-DwNGZhalf) + VASP TST Tools
49 | # -------------------------------------------
50 |
51 | mpirun -np $NSLOTS vasp_cd
52 |
53 | ##########################################
54 | # Copy the results to our home directory
55 | ##########################################
56 | mkdir -p $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp
57 | #cp -r $TMPDIR $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp/
58 | cp -r /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96 $HOME/bench/VASP/XRQTC.VASP_ceria-surface/OUT/vasp/
59 | rm -fr /work/jblasco/vasp-XRQTC.VASP_ceria-surface-96
60 | ##########################################
61 | # Temps dels resultats
62 | ##########################################
63 | TEMPS=$(cat OUTCAR | grep "Total CPU time used" | awk '{print $(NF)}')
64 | echo "$NSLOTS $TEMPS" >> $HOME/bench/VASP/XRQTC.VASP_ceria-surface/benchmark-Nehalem-DP-XRQTC.VASP_ceria-surface.dat
65 |
66 |
--------------------------------------------------------------------------------
/GridEngine/vasp-mpi.sub:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##########################################
3 | # Opcions i parametres de l'SGE
4 | ##########################################
5 | # (1) Nom del treball (per identificar)
6 | #$ -N VASP-5.2-prova01
7 | # (2) Recursos sol.licitats
8 | #$ -l h_rt=0:50:0
9 | #$ -l mem_free=4.0G
10 | #$ -pe mpi 16
11 | # (3) Fitxers de sortida
12 | #$ -cwd
13 | #$ -o molcas01.out
14 | #$ -e molcas01.err
15 | #$ (4) Envia un mail quan acava el treball.
16 | #$ -m e
17 | #$ -M jblasco@fbg.ub.es
18 | ##########################################
19 | # Entorn d.usuari
20 | ##########################################
21 | # Es carreguen els moduls a utilitzar
22 | module load vasp/5.2
23 | module load openmpi
24 | ##########################################
25 | # transferencia de dades
26 | ##########################################
27 | # Es copien les dades al directori on es llenc,aran els calculs.
28 | cd $TMPDIR
29 | export Project=vasp_mpi_16
30 | export Input=$Project
31 | cp -pr $HOME/path/amb/els/inputs/ $Input
32 | ##########################################
33 | # calcul
34 | ##########################################
35 | # Es crea un directori de sortida pels resultats.
36 | export OMP_NUM_THREADS=1
37 | mpirun -C np $NSLOTS $pe_machines vasp
38 |
39 | ##########################################
40 | # Transferencia dels resultats
41 | ##########################################
42 | cp -pr $Input $HOME/path/a/on/guardar/els/outputs/
43 |
--------------------------------------------------------------------------------
/LSF/ior.lsf:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #BSUB -J IOR
3 | #BSUB -W 2:00
4 | #BSUB -n 8
5 | #BSUB -e %J.err
6 | #BSUB -o %J.out
7 | #BSUB -m "hostA hostD hostB"
8 | module load openmpi/4.0.2
9 |
10 | # Options
11 | NUM_PROCS=8
12 | #BLOCK_SIZE=$((3*${RAM_SIZE_PER_STORAGE_SERVER}*${NUM_STORAGE_SERVERS})/${NUM_PROCS}))
13 | BLOCK_SIZE=4000
14 | WORKDIR=$HOME/workdir/ior
15 | mkdir -p $WORKDIR
16 | cd $WORKDIR
17 |
18 | #Multi-stream Throughput Benchmark
19 | mpirun --map-by node -np ${NUM_PROCS} ~/IOR/IOR -wr -i5 -t2m -b ${BLOCK_SIZE} -g -F -e
20 | -o $WORKDIR/ior_multi_stream_throughput.txt
21 |
22 | #Shared File Throughput Benchmark
23 | mpirun --map-by node -np ${NUM_PROCS} ~/IOR/IOR -wr -i5 -t1200k -b ${BLOCK_SIZE} -g -e
24 | -o $WORKDIR/ior_share_file_throughput.txt
25 |
26 | # IOPS Benchmark
27 | mpirun --map-by node -np ${NUM_PROCS} ~/IOR/IOR -w -i5 -t4k -b ${BLOCK_SIZE} -F -z -g
28 | -o $WORKDIR/ior_iops.txt
29 |
--------------------------------------------------------------------------------
/LSF/iozone.lsf:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #BSUB -J IOZone
3 | #BSUB -W 2:00
4 | #BSUB -n 1
5 | #BSUB -e %J.err
6 | #BSUB -o %J.out
7 | #BSUB -m "hostA hostD hostB"
8 | module load openmpi/4.0.2
9 | WORKDIR=$HOME/workdir/iozone
10 | mkdir -p $WORKDIR
11 | cd $WORKDIR
12 | ~/iozone –Rac –g 16G > $WORKDIR/iozone.log
13 |
--------------------------------------------------------------------------------
/LSF/mdtest.lsf:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #BSUB -J MDTest
3 | #BSUB -W 2:00
4 | #BSUB -n 1
5 | #BSUB -e %J.err
6 | #BSUB -o %J.out
7 | #BSUB -m "hostA hostD hostB"
8 | module load openmpi/4.0.2
9 |
10 | # Options
11 | #
12 | # The value for the number of processes ${NUM_PROCS} depends on the number on clients to test and the number of processes per client to test.
13 | # The number of directories can be calculated as ${NUM_DIRS} = (parameter -b ^ parameter -z).
14 | # The total amount of files should always be higher than 1 000 000, so ${FILES_PER_DIR} is calculated as:
15 |
16 | NUM_DIRS=18
17 | NUM_PROCS=1
18 | FILES_PER_DIR=$((1000000 / ${NUM_DIRS} / ${NUM_PROCS}))
19 | WORKDIR=$HOME/workdir/mdtest
20 | mkdir -p $WORKDIR
21 | cd ~/mdtest
22 | # mpirun --map-by node -np 10 mdtest -C -T -r -F -d $HOME/workdir/mdtest -i 3 -I ${FILES_PER_DIR} -z 2 -b 8 -L -u
23 | ./mdtest -d /tmp/ -b 18 -z 2 -i 1 -I ${FILES_PER_DIR} -d $WORKDIR
24 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | High Performance Computing Knowledge Portal
2 | ====================================================================
3 | This GIT Project contains a collection of usefull Submit Scripts
4 | for most popular applications in HPC.
5 |
6 | Please, send us feedback and suggestions.
7 |
8 | Thanks!
9 |
10 | Learn more about HPC Knowledge Portal @ http://www.hpckp.org
11 |
12 | Learn more about HPCNow! @ http://www.hpcnow.com
13 |
--------------------------------------------------------------------------------
/Slurm/GATK_multithreads.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # GATK SubmitScript
3 | #################################################################
4 | #SBATCH -J GATK_SHM_THREADS
5 | #SBATCH -A hpcnow # Project Account
6 | #SBATCH --time=08:00:00 # Walltime
7 | #SBATCH --mem-per-cpu=4G # memory/cpu
8 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
9 | #################################################################
10 | ### Load the Environment
11 | module load GATK/3.1-1
12 | #################################################################
13 | ### The files will be allocated in the /dev/shm FS
14 | cd $SHM_DIR
15 | cp -r /sNow/test/GATK/examples/* .
16 | #################################################################
17 | ### Run the Parallel Program
18 | ### More information about Multihreading capabilities here:
19 | ### http://gatkforums.broadinstitute.org/discussion/1975/how-can-i-use-parallelism-to-make-gatk-tools-run-faster
20 | MEM=$(($SLURM_CPUS_PER_TASK*$SLURM_MEM_PER_CPU))
21 | srun java -Xmx${MEM}m -Djava.io.tmpdir=$SHM_DIR -jar $GATK -R exampleFASTA.fasta -I exampleBAM.bam -T CountReads
22 | # Multi-threading options
23 | # -nt / --num_threads controls the number of data threads sent to the processor
24 | # -nct / --num_cpu_threads_per_data_thread controls the number of CPU threads allocated to each data thread
25 | #srun java -Xmx${MEM}m -Djava.io.tmpdir=$SHM_DIR -jar $GATK -R exampleFASTA.fasta -I exampleBAM.bam -T CountReads -nct $SLURM_CPUS_PER_TASK
26 | #srun java -Xmx${MEM}m -Djava.io.tmpdir=$SHM_DIR -jar $GATK -R exampleFASTA.fasta -I exampleBAM.bam -T CountReads -nt $SLURM_CPUS_PER_TASK
27 | #################################################################
28 | ### Transferring the results to the project directory
29 | #mkdir -p $HOME/OUT/GATK
30 | #cp -pr *out $HOME/OUT/GATK/
31 |
--------------------------------------------------------------------------------
/Slurm/Intel-Trace-Analyser-and-Collector-instrumented.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J ITAC
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=6:00:00
5 | #SBATCH --mem-per-cpu=6144
6 | #SBATCH --ntasks=48
7 | #SBATCH --nodes=2
8 | ml 2021
9 | ml intel/2021a
10 | ml VTune/2021.6.0
11 | ml itac/2021.5.0
12 | source itacvars.sh impi5
13 | ######################################################################################
14 | export VT_PCTRACE=4
15 | export VT_CONFIG=/sNow/SubmitScripts/slurm/trace.conf
16 | # the code needs to be compiled with -tcollect option in order to be instrumented
17 | # mpiifort -g -O3 -xhost -ip -tcollect yourcode.f -o yourcode_traces $VT_ADD_LIBS
18 | srun /path/to/instrumented/code [my apps options]
19 |
--------------------------------------------------------------------------------
/Slurm/Intel-Trace-Analyser-and-Collector.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J ITAC
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=6:00:00
5 | #SBATCH --mem-per-cpu=6144
6 | #SBATCH --ntasks=48
7 | #SBATCH --nodes=2
8 | ml 2021
9 | ml intel/2021a
10 | ml VTune/2021.6.0
11 | ml itac/2021.5.0
12 | source itacvars.sh impi5
13 | ######################################################################################
14 | unset I_MPI_PMI_LIBRARY
15 | mpiexec.hydra -trace /path/to/instrumented/code [my apps options]
16 |
--------------------------------------------------------------------------------
/Slurm/OpenFOAM-parallel.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J OpenFOAM
3 | #SBATCH -A hpcnow # Project account
4 | #SBATCH -D /sNow/tests/OpenFOAM/test01
5 | #SBATCH --time=06:00:00 # Walltime
6 | #SBATCH --mem-per-cpu=4G # memory/cpu
7 | #SBATCH --ntasks=256 # MPI processes
8 |
9 | module load OpenFOAM/2.3.0-ictce-5.4.0
10 | source $FOAM_BASH
11 |
12 | srun interFoam -parallel
13 |
14 |
--------------------------------------------------------------------------------
/Slurm/OpenFOAM-serial.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J OpenFOAM
3 | #SBATCH -A hpcnow # Project account
4 | #SBATCH -D /projects/hpcnow/planejet
5 | #SBATCH --time=06:00:00 # Walltime
6 | #SBATCH --mem-per-cpu=4G # memory/cpu
7 |
8 | module load OpenFOAM/2.3.0-ictce-5.4.0
9 | source $FOAM_BASH
10 |
11 | srun blockMesh
12 | srun simpleFoam
13 |
14 |
--------------------------------------------------------------------------------
/Slurm/OpenSees.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J example
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=00:20:00 # Walltime
5 | #SBATCH --ntasks=48 # number of tasks
6 | #SBATCH --mem-per-cpu=2G # memory/cpu
7 | #SBATCH --workdir=/sNow/test/OpenSees
8 | #SBATCH -C wm
9 | module load OpenSees
10 | srun OpenSeesMP ./example.tcl
11 |
--------------------------------------------------------------------------------
/Slurm/QuantumESPRESSO-hybrid.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J QuantumESPRESSO-Hybrid
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --ntasks=64 # number of tasks
5 | #SBATCH --mem-per-cpu=4G # memory/cpu
6 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
7 | #SBATCH --time=2:00:00
8 |
9 | # Load the user environment
10 | ml QuantumESPRESSO/5.0.2-intel-2015a
11 | ### The files will be allocated in the shared FS
12 | cp -pr /sNow/test/QuantumESPRESSO/Benchmarks/GRIR443/* $SCRATCH_DIR/
13 | cd $SCRATCH_DIR
14 | ### how to run executables (not all of them are parallel)
15 | PARA_PREFIX=srun
16 | PARA_POSTFIX="-ntg 8"
17 | ### available flags:
18 | # -nimage n number of images (or -nimages)
19 | # -npool n number of pools (or -npools)
20 | # -nband n number of band groups (or -nb, -nbgrp, -nband_group )
21 | # -ntask_groups n number of task groups (or -ntg)
22 | # -ndiag n number of processors for linear algebra (or -nproc_ortho, -northo, -nproc_diag)
23 | PW_COMMAND="$PARA_PREFIX pw.x $PARA_POSTFIX"
24 | PP_COMMAND="$PARA_PREFIX pp.x $PARA_POSTFIX"
25 | PLOTRHO_COMMAND="$PARA_PREFIX plotrho.x"
26 | BANDS_COMMAND="$PARA_PREFIX bands.x $PARA_POSTFIX"
27 | PLOTBAND_COMMAND="$PARA_PREFIX plotband.x"
28 | ### Run the Parallel Program
29 | $PW_COMMAND -in grir443.in
30 | ### Transfer output files back to the project folder
31 | #cp -pr $SCRATCH_DIR/ /your_project/folder/
32 |
--------------------------------------------------------------------------------
/Slurm/QuantumESPRESSO-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J QuantumESPRESSO-MPI
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --ntasks=512 # number of tasks
5 | #SBATCH --mem-per-cpu=4G # memory/cpu
6 | #SBATCH --time=2:00:00
7 |
8 | # Load the user environment
9 | ml QuantumESPRESSO/5.0.2-intel-2015a
10 | ### The files will be allocated in the shared FS
11 | cp -pr /sNow/test/QuantumESPRESSO/Benchmarks/GRIR443/* $SCRATCH_DIR/
12 | cd $SCRATCH_DIR
13 | ### how to run executables (not all of them are parallel)
14 | PARA_PREFIX=srun
15 | PARA_POSTFIX=
16 | ### available flags:
17 | # -nimage n number of images (or -nimages)
18 | # -npool n number of pools (or -npools)
19 | # -nband n number of band groups (or -nb, -nbgrp, -nband_group )
20 | # -ntask_groups n number of task groups (or -ntg)
21 | # -ndiag n number of processors for linear algebra (or -nproc_ortho, -northo, -nproc_diag)
22 | PW_COMMAND="$PARA_PREFIX pw.x $PARA_POSTFIX"
23 | PP_COMMAND="$PARA_PREFIX pp.x $PARA_POSTFIX"
24 | PLOTRHO_COMMAND="$PARA_PREFIX plotrho.x"
25 | BANDS_COMMAND="$PARA_PREFIX bands.x $PARA_POSTFIX"
26 | PLOTBAND_COMMAND="$PARA_PREFIX plotband.x"
27 | ### Run the Parallel Program
28 | $PW_COMMAND -in grir443.in
29 | ### Transfer output files back to the project folder
30 | #cp -pr $SCRATCH_DIR/ /your_project/folder/
31 |
--------------------------------------------------------------------------------
/Slurm/R-parallel-mpi.r:
--------------------------------------------------------------------------------
1 | library(Rmpi)
2 |
3 | id <- mpi.comm.rank(comm = 0)
4 | np <- mpi.comm.size(comm = 0)
5 | hostname <- mpi.get.processor.name()
6 |
7 | msg <- sprintf("Hello world from process %03d of %03d, on host %s\n",
8 | id, np, hostname)
9 | cat(msg)
10 |
11 | mpi.barrier(comm = 0)
12 | mpi.finalize()
13 |
--------------------------------------------------------------------------------
/Slurm/R-parallel-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -A hpcnow
3 | #SBATCH --ntasks=24
4 | #SBATCH --time=00:10:00
5 | #SBATCH --mem-per-cpu=2G
6 | #SBATCH --job-name=TestJob
7 |
8 | module load R/3.0.3-goolf-1.5.14
9 | mpirun Rscript R-parallel-mpi.r
10 |
--------------------------------------------------------------------------------
/Slurm/R-template.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -A hpcnow
3 | #SBATCH --time=7-00:00:00
4 | #SBATCH --mem-per-cpu=6G
5 | #SBATCH -J TestJob
6 |
7 | module load R/3.0.3-goolf-1.5.14
8 |
9 | srun Rscript ./PMT.R
10 |
--------------------------------------------------------------------------------
/Slurm/VASP-iomkl.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Scalability tests
3 | # for i in {4..12}; do sbatch --ntasks=$(($i*$i)) --nodes=$((($i*$i)/12))-$((1+($i*$i)/12)) -C wm /sNow/SubmitScripts/slurm/VASP-iomkl.sl ; done
4 | #SBATCH -J VASPBench
5 | #SBATCH -A hpcnow
6 | #SBATCH --time=1:59:00
7 | ##SBATCH --ntasks=96
8 | #SBATCH --mem-per-cpu=8G
9 | ml VASP/5.3.5-iomkl-4.6.13
10 | cd $SCRATCH_DIR
11 | #
12 | # Available tests are : NeSI.VASP_BandsandDOS NeSI.VASP_SurfOpt XRQTC.VASP_ceria-surface XRQTC.VASP_DFT
13 | #
14 | TEST=KristaGSteenbergen-MD-bulk_melting
15 | cp -pr /sNow/test/VASP/$TEST/input/* .
16 | SQRTCORES=$(echo "sqrt($SLURM_NPROCS)"|bc)
17 | sed -i "s/SQRTCORES/$SQRTCORES/g" INCAR
18 | ##########################################################################
19 | ### Run the Parallel Program
20 | # -------------> WARNING <-------------------
21 | # Check what binary fits your needs
22 | # vasp MPI parallel, charge density and wavefunction complex
23 | # vasp_cd MPI parallel, charge density: half grid mode (-DNGZhalf)
24 | # vasp_md_cd MPI parallel, charge density: half grid mode (-DNGZhalf), MD
25 | # vasp_gamma MPI parallel, gamma-point only (-DwNGZhalf)
26 | # vasp_md_gamma MPI parallel, gamma-point only (-DwNGZhalf), MD
27 | # vasp_vtst MPI parallel, charge density + wavefunction complex + VASP TST Tools
28 | # vasp_vtst_cd MPI parallel, charge density: half grid mode (-DNGZhalf) + VASP TST Tools
29 | # vasp_vtst_gamma MPI parallel, gamma-point only (-DwNGZhalf) + VASP TST Tools
30 | # -------------------------------------------
31 | export OMP_NUM_THREADS=1
32 | VASPBIN=vasp_md_cd
33 | srun $VASPBIN
34 | ##########################################################################
35 | ### Transfering the results to the home directory ($HOME)
36 | mkdir -p /sNow/test/VASP/OUT/$TEST
37 | rm WAVECAR CHG*
38 | cp -pr $SCRATCH_DIR /sNow/test/VASP/OUT/$TEST/
39 | TEMPS=$(cat OUTCAR | grep "Total CPU time used" | awk '{print $(NF)}')
40 | echo "$SLURM_NPROCS $TEMPS" >> /sNow/test/VASP/benchmark-$TEST-$VASPBIN-5.3.5-iomkl-4.6.13-$LMOD_SYSTEM_NAME.log
41 |
--------------------------------------------------------------------------------
/Slurm/VTune-MPI.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J VTuneMPI
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=1:00:00
5 | #SBATCH --mem-per-cpu=8G
6 | #SBATCH -C ib
7 | #SBATCH --ntasks=48
8 | #SBATCH --nodes=2
9 | ml 2021
10 | ml intel/2021a
11 | ml VTune/2021.6.0
12 | ml itac/2021.5.0
13 |
14 | # Some sites may require to unset the following variable and use mpiexec.hydra instead of srun.
15 | #unset I_MPI_PMI_LIBRARY
16 | # The following line uses GTOOL option to instruct Intel MPI to collect the hotspots metrics using VTune only on task #11.
17 | export I_MPI_GTOOL="vtune -collect hotspots -r result:10"
18 | srun my_app [my_app_options]
19 |
20 | #To analyze a single MPI process with VTune Profiler, you must specify the relevant command in the corresponding argument set. The example below runs 47 MPI tasks where one of them is analysed with vtune:
21 | #mpiexec.hydra -n 47 ./my_app : -n 1 vtune -c hotspots -r results_dir -- ./my_app
22 |
--------------------------------------------------------------------------------
/Slurm/VTune-OpenMP.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J VTuneOpenMP
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=1:00:00
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH -C ib
7 | #SBATCH --cpus-per-task=24
8 | ml 2021
9 | ml intel/2021a
10 | ml VTune/2021.6.0
11 |
12 | srun vtune -r my_profile_dir -c hotspots my_app [my_app_options]
13 |
--------------------------------------------------------------------------------
/Slurm/VTune-serial.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J VTuneSerial
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=1:00:00
5 | #SBATCH --mem-per-cpu=4G
6 | #SBATCH -C ib
7 | #SBATCH --cpus-per-task=1
8 | ml 2021
9 | ml intel/2021a
10 | ml VTune/2021.6.0
11 |
12 | srun vtune -r my_profile_dir -c hotspots my_app [my_app_options]
13 |
--------------------------------------------------------------------------------
/Slurm/abaqus.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Abaqus SubmitScript
3 | # Optimized for run parallel job
4 | ######################################################
5 | #SBATCH -J Abaqus_TEST
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=08:00:00 # Walltime
8 | #SBATCH --ntasks=24 # number of tasks
9 | #SBATCH --mem-per-cpu=1G # memory/cpu
10 | ######################################################
11 | ### Load the Environment
12 | module load Abaqus
13 | ######################################################
14 | ### The files will be allocated in the shared FS
15 | #cp myinput.inp $SCRATCH_DIR
16 | #cd $SCRATCH_DIR
17 | source /sNow/SubmitScripts/slurm/slurm_setup_abaqus-env.sh
18 | ######################################################
19 | ### Run the Parallel Program
20 | abaqus job=test input=myinput.inp cpus=$SLURM_NTASKS -verbose 3 standard_parallel=all mp_mode=mpi interactive
21 | ######################################################
22 | ### Transferring the results to the home directory ($HOME)
23 | #mkdir -p $HOME/OUT/abaqus
24 | #cp -pr $SCRATCH_DIR $HOME/OUT/abaqus
25 |
--------------------------------------------------------------------------------
/Slurm/ansys-cfx.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # ANSYS_FLUENT SubmitScript
3 | # Optimized for run parallel job
4 | ######################################################
5 | #SBATCH -J ANSYS_CFX_TEST
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=01:00:00 # Walltime
8 | #SBATCH --ntasks=16 # number of tasks
9 | #SBATCH --mem-per-cpu=7G # memory/cpu
10 | ######################################################
11 |
12 | ### Load the Environment
13 | module load ANSYS/15.0
14 |
15 | source /sNow/SubmitScripts/slurm/slurm_setup_cfx-env2.sh
16 |
17 | # Executable and input file.
18 | defname=example.def
19 |
20 | cfx5solve -batch -single -def $defname -par -par-dist $CFX_HOSTLIST
21 |
--------------------------------------------------------------------------------
/Slurm/ansys-fluent-requeue.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # ANSYS_FLUENT SubmitScript
3 | # Optimized for run parallel job
4 | ################################################################
5 | #SBATCH -J FLUENT_REQUEUE
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=01:03:00 # Walltime
8 | #SBATCH --ntasks=48 # number of tasks
9 | #SBATCH --mem-per-cpu=4G # memory/cpu
10 | #SBATCH -C sb
11 | #SBATCH -p requeue
12 | ################################################################
13 | ### Load the Environment
14 | module load ANSYS/15.0
15 | cd /sNow/test/ansys/fluent/case3/input/
16 | ### Creating the hostfile
17 | srun hostname > FLUENT_HOSTFILE
18 | ### Run Fluent Model
19 | fluent -v3ddp -g -t $SLURM_NTASKS -mpi=pcmpi -cnf=FLUENT_HOSTFILE -pinfiniband -ssh << EOFluentInput > stats-$SLURM_NTASKS-$SLURM_JOBID.out
20 | rc a16.cas
21 | rd fluent-1-12000.dat
22 | /solve/dual-time-iterate 10 10
23 | /file/write-case-data a16-$SLURM_NTASKS-$SLURM_JOBID.out
24 | exit
25 | EOFluentInput
26 | ################################################################
27 |
--------------------------------------------------------------------------------
/Slurm/ansys-fluent.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # ANSYS_FLUENT SubmitScript
3 | # Optimized for run parallel job
4 | ######################################################
5 | #SBATCH -J ANSYS_FLUENT_TEST
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=01:03:00 # Walltime
8 | #SBATCH --ntasks=24 # number of tasks
9 | #SBATCH --mem-per-cpu=4G # memory/cpu
10 | #SBATCH --nodes=1-2
11 | ######################################################
12 | ### Load the Environment
13 | module load ANSYS/15.0
14 |
15 | rm FLUENT_HOSTFILE
16 | srun hostname > FLUENT_HOSTFILE
17 | cat FLUENT_HOSTFILE
18 |
19 | #########################################################
20 | ## Run the Parallel Program
21 | # the following is a generic command. In order to get this command working
22 | # please decide if you want to run the 2d or 2ddp OR 3d OR 3ddp solver and
23 | # please use ONLY one of the options in the <> shown below
24 | # fluent <2d|2ddp|3d|3ddp> -g -t2
25 | fluent -v3ddp -g -t$SLURM_NTASKS -mpi=pcmpi -cnf=FLUENT_HOSTFILE -pib -ssh << EOFluentInput > output1.stat.dat
26 | rcd testCase.cas
27 | /solve/dual-time-iterate 201
28 | /file/write-case-data cdfiner.out.cas
29 | exit
30 | EOFluentInput
31 | ######################################################
32 | ### Transferring the results to the home directory ($HOME)
33 |
--------------------------------------------------------------------------------
/Slurm/array-io.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J JobArray
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --ntasks=1 # number of tasks
6 | #SBATCH --mem-per-cpu=8G # memory/cpu
7 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
8 | #SBATCH --array=1-1000 # Array definition
9 | #SBATCH --gres=io # IO intensive jobs
10 | srun sleep $SLURM_ARRAY_TASK_ID
11 | srun echo running on $(hostname)
12 |
--------------------------------------------------------------------------------
/Slurm/array-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J JobArray
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --ntasks=2 # number of tasks
6 | #SBATCH --mem-per-cpu=8G # memory/cpu
7 | #SBATCH --cpus-per-task=2 # 4 OpenMP Threads
8 | #SBATCH --array=20-180 # Array definition
9 | srun sleep $SLURM_ARRAY_TASK_ID
10 | srun echo running on $(hostname)
11 | #srun stress --cpu 1 --vm-bytes 128M --timeout 10s
12 | module load intel/2015a
13 | srun /sNow/utils/bin/pi_mpi
14 |
--------------------------------------------------------------------------------
/Slurm/array.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J JobArray
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --ntasks=1 # number of tasks
6 | #SBATCH --mem-per-cpu=8G # memory/cpu
7 | #SBATCH --cpus-per-task=2 # 4 OpenMP Threads
8 | #SBATCH --array=100-120 # Array definition
9 | srun sleep $SLURM_ARRAY_TASK_ID
10 | srun echo running on $(hostname)
11 |
--------------------------------------------------------------------------------
/Slurm/array_builder.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | for o in $(seq 2 2 2); do
3 | for po in $(seq 2.5 0.5 10.000); do
4 | for osc in $(seq 2 .25 2.00); do
5 | for freq in $(seq .6 .2 .800); do
6 | echo "${osc} ${freq} ${po} 1500 ${o}" >> params.dat
7 | done
8 | done
9 | done
10 | done
11 |
--------------------------------------------------------------------------------
/Slurm/array_multi_parameters.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J JobArray
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --mem-per-cpu=1G # memory/cpu
6 | #SBATCH --array=1-1000 # Array definition
7 | #SBATCH -n 64
8 |
9 | # If you need to explore several parameters or to use several input files
10 | # you can create a mapping file like params.dat and use a single array
11 | # job description file to run all the simulations.
12 | # The script array_builder.sh is a simple example that generates the params.dat file
13 |
14 | PARAMETERS=$(sed -n ${SLURM_ARRAY_TASK_ID}p params.dat)
15 |
16 | #srun your_binary $PARAMETERS
17 | srun -n 1 sleep 120
18 |
--------------------------------------------------------------------------------
/Slurm/blast+_array.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # NCBI-BLAST+ SubmitScript
3 | # Optimized for run 500 array jobs using single core each.
4 | #################################################################
5 | #SBATCH -J BLAST-SHM
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=08:00:00 # Walltime
8 | #SBATCH --mem-per-cpu=4G # memory/cpu
9 | #SBATCH --cpus-per-task=1 # 1 OpenMP Threads
10 | #SBATCH --array=1-500 # Array definition
11 | #################################################################
12 | ### Load the Environment
13 | module load BLAST/2.2.29-intel-2015a
14 | #################################################################
15 | ### The files will be allocated in the /dev/shm FS
16 | echo $SHM_DIR
17 | mkdir $SHM_DIR/{queries,results}
18 | QUERY=frag_$(printf "%03d\n" $SLURM_ARRAY_TASK_ID)
19 | cp -r /sNow/test/blast/queries/$QUERY $SHM_DIR/queries/
20 | cd $SHM_DIR
21 | #################################################################
22 | ### Run the Parallel Program
23 | echo $SLURM_ARRAY_TASK_ID
24 | srun blastn -query queries/$QUERY \
25 | -db nt -task blastn -num_alignments 5 \
26 | -num_descriptions 5 -evalue 0.0001 \
27 | -out results/$QUERY.out
28 | #################################################################
29 | ### Transferring the results to the project directory
30 | mkdir -p $HOME/OUT/BLAST
31 | cp -pr results/$QUERY.out $HOME/OUT/BLAST/
32 |
--------------------------------------------------------------------------------
/Slurm/blast+_array_multithreads.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # NCBI-BLAST+ SubmitScript
3 | # Optimized for run 1000 array jobs using 4 Threads each.
4 | #################################################################
5 | #SBATCH -J BLAST_SHM_THREADS
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=08:00:00 # Walltime
8 | #SBATCH --mem-per-cpu=4G # memory/cpu
9 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
10 | #SBATCH --array=1-1000 # Array definition
11 | #################################################################
12 | ### Load the Environment
13 | module load BLAST/2.2.29-intel-2015a
14 | #################################################################
15 | ### The files will be allocated in the /dev/shm FS
16 | echo $SHM_DIR
17 | mkdir $SHM_DIR/{queries,results}
18 | QUERY=frag_$(printf "%03d\n" $SLURM_ARRAY_TASK_ID)
19 | cp -r /sNow/test/blast/queries/$QUERY $SHM_DIR/queries/
20 | cd $SHM_DIR
21 | #################################################################
22 | ### Run the Parallel Program
23 | echo $SLURM_ARRAY_TASK_ID
24 | srun blastn -query queries/$QUERY \
25 | -db nt -task blastn -num_alignments 5 \
26 | -num_threads 3 -num_descriptions 5 -evalue 0.0001 \
27 | -out results/$QUERY.out
28 | #################################################################
29 | ### Transferring the results to the project directory
30 | mkdir -p $HOME/OUT/BLAST
31 | cp -pr results/$QUERY.out $HOME/OUT/BLAST/
32 |
--------------------------------------------------------------------------------
/Slurm/checkpoint-blcr.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J OpenMP_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:05:00 # Walltime
5 | #SBATCH --mem-per-cpu=6G # memory/cpu
6 | #SBATCH --cpus-per-task=12 # 12 OpenMP Threads
7 | export SLURM_CHECKPOINT=12:00:00
8 | export SLURM_CHECKPOINT_DIR=$CHK_DIR/$SLURM_JOB_NAME
9 | #export SLURM_RESTART_DIR=$SLURM_CHECKPOINT_DIR
10 |
11 | mkdir $SLURM_CHECKPOINT_DIR
12 |
13 | cd $SCRATCH_DIR
14 |
15 | srun_cr sleep 3600
16 | #srun_cr stress --vm 1 --vm-bytes 128M --timeout 3600s
17 |
--------------------------------------------------------------------------------
/Slurm/cuda-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J GPU_JOB
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --ntasks=4 # number of tasks
6 | #SBATCH --ntasks-per-node=2 # number of tasks per node
7 | #SBATCH --mem-per-cpu=8G # memory/cpu
8 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
9 | #SBATCH --gres=gpu:2 # GPUs per node
10 | srun binary_cuda_mpi
11 |
--------------------------------------------------------------------------------
/Slurm/cuda.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # CUDA SubmitScript
3 | ##########################################################################
4 | #SBATCH -J CUDA_JOB
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=2G # memory/cpu
8 | #SBATCH --cpus-per-task=2 # 2 OpenMP Threads
9 | #SBATCH --ntasks=1
10 | #SBATCH --gres=gpu:2
11 | ##########################################################################
12 | ### Load the Enviroment Modules
13 | module load CUDA/5.5
14 | ##########################################################################
15 | ### The files will be allocated in the shared FS ($SCRATCH_DIR)
16 | cd $SCRATCH_DIR
17 | ##########################################################################
18 | ### Run the Parallel Program
19 | srun cuda_binary
20 |
--------------------------------------------------------------------------------
/Slurm/easybuild.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ######################################################
3 | #SBATCH -J EasyBuild
4 | #SBATCH -A hpcnow
5 | #SBATCH --time=06:00:00
6 | #SBATCH --mem-per-cpu=4G
7 | #SBATCH --cpus-per-task=4
8 | #SBATCH -D /sNow/easybuild/jobs
9 | #SBATCH --uid=5674
10 | ######################################################
11 | ## Call the Easy Build
12 | srun eb GROMACS-4.6.5-ictce-5.5.0-mt.eb --try-toolchain=ictce,5.4.0 --robot --force
13 | srun eb WRF-3.4-goalf-1.1.0-no-OFED-dmpar.eb -r
14 | ######################################################
15 | ## Update the LMOD cache
16 | /sNow/apps/lmod/utils/BuildSystemCacheFile/createSystemCacheFile.sh
17 |
--------------------------------------------------------------------------------
/Slurm/flexpart.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # FLEXPART-WRF SubmitScript
3 | ######################################################
4 | #SBATCH -J FLEXPART-WRF
5 | #SBATCH -A hpcnow # Project Account
6 | #SBATCH --time=00:10:00 # Walltime
7 | #SBATCH --mem-per-cpu=1G # memory/cpu
8 | ######################################################
9 | ### Load the Environment
10 | module load flexpart-wrf/2006-intel-2015a
11 | ######################################################
12 | ### The files will be allocated in the shared FS
13 | cd /sNow/src/flexpart-wrf/run.example2
14 | ######################################################
15 | ### Run the Parallel Program
16 | srun flexpart_wrf
17 |
--------------------------------------------------------------------------------
/Slurm/gaussian.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Gaussian SubmitScript
3 | # Optimized for run parallel job of 8 Cores
4 | ######################################################
5 | #SBATCH -J Gaussian
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=01:00:00 # Walltime
8 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
9 | #SBATCH --mem-per-cpu=8G # memory/cpu
10 | ######################################################
11 | ### Load the Environment Modules for Gaussian
12 | module load Gaussian/D.01
13 | ######################################################
14 | ### Transferring the data to the local disk ($SCRATCH_DIR)
15 | cd $SCRATCH_DIR
16 | cp /sNow/test/Gaussian/test0324.com .
17 | export GAUSS_SCRDIR=$SCRATCH_DIR
18 | ######################################################
19 | ### Run the Parallel Program
20 | srun g09 < ./test0324.com > test0324.out
21 | ######################################################
22 | ### Transferring the results to the project directory
23 | cp -pr $SCRATCH_DIR $HOME/OUT/gaussian/
24 |
--------------------------------------------------------------------------------
/Slurm/gromacs-cuda-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Gromacs MPI+OpenMP+CUDA SubmitScript
3 | ##########################################################################
4 | #SBATCH -J GROMACS_JOB
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=8G # memory/cpu
8 | #SBATCH --ntasks=4 # number of tasks
9 | #SBATCH --ntasks-per-node=2 # number of tasks per node
10 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
11 | #SBATCH --gres=gpu:2 # GPUs per node
12 | ##########################################################################
13 | ### Load the Enviroment Modules for Gromacs 4.5.4
14 | module load GROMACS/4.5.4
15 | ##########################################################################
16 | ### Transfering the data to the local disk ($SCRATCH_DIR)
17 | cd $SCRATCH_DIR
18 | cp $HOME/Gromacs_9LDT/input/* .
19 | ##########################################################################
20 | ### Run the Parallel Program
21 | srun grompp -f full_vdw.mdp -c 9LDT-pt-md-3.gro -p 9LDT-bu.top -o 9LDT-bu.tpr
22 | srun mdrun_mpi -v -s 9LDT-bu.tpr -o 9LDT-bu.trr > mdrun.out
23 | ##########################################################################
24 | ### Transfering the results to the home directory ($HOME)
25 | cp -pr $SCRATCH_DIR $HOME/results/
26 |
--------------------------------------------------------------------------------
/Slurm/gromacs-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Gromacs MPI SubmitScript
3 | ##########################################################################
4 | #SBATCH -J GROMACS_JOB
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=2G # memory/cpu
8 | #SBATCH --ntasks=24 # number of tasks
9 | ##########################################################################
10 | ### Load the Enviroment Modules for Gromacs 4.5.4
11 | module load GROMACS/4.5.4
12 | ##########################################################################
13 | ### Transfering the data to the local disk ($SCRATCH_DIR)
14 | cd $SCRATCH_DIR
15 | cp $HOME/Gromacs_9LDT/input/* .
16 | ##########################################################################
17 | ### Run the Parallel Program
18 | srun grompp -f full_vdw.mdp -c 9LDT-pt-md-3.gro -p 9LDT-bu.top -o 9LDT-bu.tpr
19 | srun mdrun_mpi -v -s 9LDT-bu.tpr -o 9LDT-bu.trr > mdrun.out
20 | ##########################################################################
21 | ### Transfering the results to the home directory ($HOME)
22 | cp -pr $SCRATCH_DIR $HOME/results/
23 |
--------------------------------------------------------------------------------
/Slurm/gromacs-openmp.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Gromacs OpenMP SubmitScript
3 | ##########################################################################
4 | #SBATCH -J GROMACS_JOB
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=2G # memory/cpu
8 | #SBATCH --cpus-per-task=12 # 4 OpenMP Threads
9 | ##########################################################################
10 | ### Load the Enviroment Modules for Gromacs 4.5.4
11 | module load GROMACS/4.5.4
12 | ##########################################################################
13 | ### Transfering the data to the local disk ($SCRATCH_DIR)
14 | cd $SCRATCH_DIR
15 | cp $HOME/Gromacs_9LDT/input/* .
16 | ##########################################################################
17 | ### Run the Parallel Program
18 | srun grompp -f full_vdw.mdp -c 9LDT-pt-md-3.gro -p 9LDT-bu.top -o 9LDT-bu.tpr
19 | srun mdrun_mpi -v -s 9LDT-bu.tpr -o 9LDT-bu.trr > mdrun.out
20 | ##########################################################################
21 | ### Transfering the results to the home directory ($HOME)
22 | cp -pr $SCRATCH_DIR $HOME/results/
23 |
--------------------------------------------------------------------------------
/Slurm/hello_world-array.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ######################################################
3 | #SBATCH -J JobName
4 | ##SBATCH -A # Project Account
5 | #SBATCH --time=00:10:00 # Walltime
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | #SBATCH -a 1-96 # array of 96 jobs
8 | #SBATCH -n 1 # 1 core means serial
9 | ######################################################
10 | ### Run the Parallel Program
11 | srun echo "hello from SLURM_ARRAY_TASK_ID : $SLURM_ARRAY_TASK_ID"
12 | srun sleep 10
13 |
--------------------------------------------------------------------------------
/Slurm/hello_world.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ######################################################
3 | #SBATCH -J JobName
4 | #SBATCH -A # Project Account
5 | #SBATCH --time=00:10:00 # Walltime
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | #SBATCH -n 1 # 1 core means serial
8 | #SBATCH -o serial.out # OPTIONAL
9 | #SBATCH -e serial.err # OPTIONAL
10 | ######################################################
11 | ### Run the Parallel Program
12 | srun echo "hello"
13 |
--------------------------------------------------------------------------------
/Slurm/hybrid.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Hybrid_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --ntasks=4 # number of tasks
6 | #SBATCH --mem-per-cpu=4G # memory/cpu
7 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
8 | module load intel/intel-2015a
9 | srun /sNow/utils/bin/pi_hybrid
10 |
--------------------------------------------------------------------------------
/Slurm/intel-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=10:00:00 # Walltime
5 | #SBATCH --ntasks=16 # number of tasks
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | module load intel/2015a
8 | srun /sNow/utils/bin/pi_mpi
9 |
--------------------------------------------------------------------------------
/Slurm/intel-mpitune.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPITUNE
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=1:00:00 # Walltime
5 | ##SBATCH --ntasks=48 # number of tasks
6 | #SBATCH --mem-per-cpu=2048 # memory/cpu (in MB)
7 |
8 | ml VTune/2015_update2
9 | ml supermagic/20130104-intel-2015a
10 | ml itac/9.0.3.051
11 | source itacvars.sh impi5
12 | unset I_MPI_PMI_LIBRARY
13 | export I_MPI_FABRICS=shm:dapl
14 |
15 | mpitune -a \"mpiexec.hydra -n 16 supermagic -a -m 2M -w $SCRATCH_DIR/ -n 10 \" -of tune.conf
16 |
--------------------------------------------------------------------------------
/Slurm/lammps-hybrid.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # LAMMPS SubmitScript
3 | # Optimized for run parallel job of 1024 Cores
4 | ######################################################
5 | #SBATCH -J LAMMPS
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --time=00:30:00 # Walltime
8 | #SBATCH --mem-per-cpu=4G # memory/cpu
9 | #SBATCH --ntasks=128 # Number of tasks
10 | #SBATCH --cpus-per-task=8 # Number of OpenMP threads
11 | ######################################################
12 | ### Load the Environment
13 | module load lammps
14 | ######################################################
15 | ### The files will be allocated in the shared FS
16 | cd $SCRATCH_DIR
17 | cp -pr /sNow/test/LAMMPS/* .
18 | ######################################################
19 | ### Run the Parallel Program
20 | #Lennard Jones Benchmark input parameters: Weak Scaling
21 | srun lmp_mpi -var x 10 -var y 40 -var z 40 -in in.lj
22 | ######################################################
23 | ### Transferring the results to the home directory
24 | cp -pr $SCRATCH_DIR $HOME/OUT/lammps/
25 |
--------------------------------------------------------------------------------
/Slurm/launcher.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Launcher
3 | #SBATCH --time=01:00:00 # Walltime
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --nodes=2 # 2 nodes
6 | #SBATCH --ntasks=128 # number of tasks
7 | #SBATCH --mem-per-cpu=6G # memory/cpu
8 | #SBATCH -o Parametric.%j.out
9 | #SBATCH -e Parametric.%j.err
10 | #
11 | # Simple SLURM script for submitting multiple serial
12 | # jobs (e.g. parametric studies) using a script wrapper
13 | # to launch the jobs.
14 | #
15 | # To use, build the launcher executable and your
16 | # serial application(s) and place them in your WORKDIR
17 | # directory. Then, edit the LAUNCHER_JOB_FILE to specify
18 | # each executable per process.
19 | #------------------------------------------------------
20 |
21 | # Load user environment
22 | module load launcher
23 | export LAUNCHER_RMI=SLURM
24 | export LAUNCHER_WORKDIR=/home/easybuild/launcher_test
25 | export LAUNCHER_JOB_FILE=helloworld_multi
26 |
27 | # Run the job file
28 | paramrun
29 |
--------------------------------------------------------------------------------
/Slurm/matlab.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Serial_Job
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=00:10:00 # Walltime
5 | #SBATCH --mem-per-cpu=4G # memory/cpu
6 | #SBATCH --cpus-per-task=5
7 | module load MATLAB/R2012b
8 | srun matlab -nodesktop -nosplash -r myLu
9 |
--------------------------------------------------------------------------------
/Slurm/mb.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J nex
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=2:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=4G # memory/cpu
6 | #SBATCH --ntasks=6
7 |
8 | module load MrBayes/3.1.2-ictce-5.4.0
9 | srun mb k86_07.nex
10 |
--------------------------------------------------------------------------------
/Slurm/migrate-benchmark-serial.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of Cores
4 | # for j in {2..4} ; do sbatch --ntasks=$((16*$j)) --ntasks-per-node=16 --array=1-100 /sNow/SubmitScripts/slurm/migrate-benchmark.sl ; done
5 | ######################################################
6 | #SBATCH -J MIGRATE_TEST
7 | #SBATCH -A hpcnow
8 | #SBATCH --time=08:00:00
9 | ##SBATCH --ntasks-per-node=24
10 | #SBATCH --mem-per-cpu=60000
11 | ######################################################
12 | ######################################################
13 | ### The files will be allocated in the shared FS
14 | cp -pr /sNow/test/migrate/parmfile* $SCRATCH_DIR/
15 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
16 | ######################################################
17 | ### Run the Parallel Program
18 | for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0 3.4.4-goolf-1.4.10 3.6.6-goolf-1.4.10 3.4.4-goolf-1.5.14 3.6.6-goolf-1.5.14
19 | do
20 | echo "version $i"
21 | ml MIGRATE/$i
22 | cd $SCRATCH_DIR
23 | mkdir $i
24 | cp parmfile* $i/
25 | cp infile $i/
26 | cd $i
27 | echo "version $i"
28 | /usr/bin/time -f "real %e" -a -o benchmark-$LMOD_SYSTEM_NAME-$i-$SLURM_NTASKS.dat srun migrate-n parmfile.short -nomenu
29 | ml purge
30 | done
31 | ######################################################
32 | ### Transferring the results to the home directory ($HOME)
33 | mkdir -p $HOME/OUT/migrate
34 | cp -pr $SCRATCH_DIR $HOME/OUT/migrate
35 |
--------------------------------------------------------------------------------
/Slurm/migrate-benchmark-tuned.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of Cores
4 | # for j in {2..4} ; do sbatch --ntasks=$((16*$j)) --ntasks-per-node=16 --array=1-100 /sNow/SubmitScripts/slurm/migrate-benchmark.sl ; done
5 | ######################################################
6 | #SBATCH -J MIGRATE_TEST
7 | #SBATCH -A hpcnow
8 | #SBATCH --time=01:00:00
9 | ##SBATCH --ntasks-per-node=16
10 | #SBATCH --mem-per-cpu=4G
11 | ##SBATCH -C sb
12 | #SBATCH -x compute-bigmem-[001,003-004],compute-chem-001
13 | ######################################################
14 | ######################################################
15 | ### The files will be allocated in the shared FS
16 | cp -pr /sNow/test/migrate/parmfile* $SCRATCH_DIR/
17 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
18 | ######################################################
19 | ### Run the Parallel Program
20 | #for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0
21 | for i in 3.4.4-ictce-5.4.0
22 | do
23 | export I_MPI_FABRICS="shm:dapl"
24 | #export I_MPI_FALLBACK=disable
25 | #export I_MPI_DAPL_UD=enable
26 | #export I_MPI_ADJUST_COLLECTIVES="bcast:0;reduce:2"
27 | #export I_MPI_DAPL_SCALABLE_PROGRESS=1
28 | #export I_MPI_WAIT_MODE=enable
29 | export I_MPI_PIN_PROCESSOR_LIST="grain=cache2,shift=sock"
30 | #export I_MPI_WAIT_MODE=enable
31 | #export I_MPI_SHM_BYPASS=enable
32 | #export I_MPI_INTRANODE_EAGER_THRESHOLD=262144
33 | #export I_MPI_SHM_CACHE_BYPASS_THRESHOLDS=,[,,]
34 | #export I_MPI_SHM_CACHE_BYPASS_THRESHOLDS=16384,16384,-1,16384,-1,16384
35 | echo "version $i"
36 | ml MIGRATE/$i
37 | #cd $SCRATCH_DIR
38 | cd $SCRATCH_DIR
39 | mkdir -p $i
40 | cp parmfile* $i/
41 | cp infile $i/
42 | cd $i
43 | echo "version $i"
44 | /usr/bin/time -f "real %e" -a -o /home/jbla572/OUT/migrate/benchmark-95r1-tuned-$LMOD_SYSTEM_NAME-$i-$SLURM_NTASKS.dat srun migrate-n-mpi parmfile.short -nomenu
45 | ml purge
46 | done
47 | ######################################################
48 | ### Transferring the results to the home directory ($HOME)
49 | #mkdir -p $HOME/OUT/migrate
50 | #cp -pr $SCRATCH_DIR $HOME/OUT/migrate
51 |
--------------------------------------------------------------------------------
/Slurm/migrate-benchmark.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of Cores
4 | # for j in {2..4} ; do sbatch --ntasks=$((16*$j)) --ntasks-per-node=16 --array=1-100 /sNow/SubmitScripts/slurm/migrate-benchmark.sl ; done
5 | ######################################################
6 | #SBATCH -J MIGRATE_TEST
7 | #SBATCH -A hpcnow
8 | #SBATCH --time=08:00:00
9 | ##SBATCH --ntasks-per-node=24
10 | #SBATCH --mem-per-cpu=2048
11 | ##SBATCH -C ib
12 | #SBATCH -x compute-bigmem-[001,003-004],compute-chem-001
13 | ######################################################
14 | ######################################################
15 | ### The files will be allocated in the shared FS
16 | cp -pr /sNow/test/migrate/parmfile* $SCRATCH_DIR/
17 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
18 | ######################################################
19 | ### Run the Parallel Program
20 | for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0 3.4.4-goolf-1.4.10 3.6.6-goolf-1.4.10 3.4.4-goolf-1.5.14 3.6.6-goolf-1.5.14
21 | #for i in 3.4.4-goolf-1.4.10 3.6.6-goolf-1.4.10
22 | #for i in 3.4.4-goolf-1.5.14 3.6.6-goolf-1.5.14
23 | #export I_MPI_FABRICS=shm:dapl
24 | #export I_MPI_ADJUST_COLLECTIVES = bcast:0;reduce:2
25 | #export I_MPI_DAPL_SCALABLE_PROGRESS=1
26 | #export I_MPI_PIN_PROCESSOR_LIST='grain=cache2,shift=sock'
27 | #for i in 3.4.4-ictce-5.4.0
28 | do
29 | echo "version $i"
30 | ml MIGRATE/$i
31 | cd $SCRATCH_DIR
32 | mkdir $i
33 | cp parmfile* $i/
34 | cp infile $i/
35 | cd $i
36 | echo "version $i"
37 | /usr/bin/time -f "real %e" -a -o /home/jbla572/OUT/migrate/benchmark-10r-$LMOD_SYSTEM_NAME-$i-$SLURM_NTASKS.dat srun migrate-n-mpi parmfile.short -nomenu
38 | ml purge
39 | done
40 | ######################################################
41 | ### Transferring the results to the home directory ($HOME)
42 | #mkdir -p $HOME/OUT/migrate
43 | #cp -pr $SCRATCH_DIR $HOME/OUT/migrate
44 |
--------------------------------------------------------------------------------
/Slurm/migrate-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of 96 Cores
4 | ######################################################
5 | #SBATCH -J MIGRATE_TEST
6 | #SBATCH -A hpcnow
7 | #SBATCH --time=08:00:00
8 | #SBATCH --ntasks=ncores
9 | #SBATCH --ntasks-per-node=16
10 | #SBATCH --mem-per-cpu=4G
11 | ######################################################
12 | ######################################################
13 | ### The files will be allocated in the shared FS
14 | cp -pr /sNow/test/migrate/parmfile.* $SCRATCH_DIR/
15 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
16 | ######################################################
17 | ### Run the Parallel Program
18 | for i in 3.4.4-goolf-1.4.10 3.4.4-ictce-5.4.0 3.6.6-goolf-1.4.10 3.6.6-ictce-5.4.0
19 | do
20 | echo "version $i"
21 | cd $SCRATCH_DIR
22 | mkdir $i
23 | cp parmfile.* $i/
24 | cp infile $i/
25 | cd $i
26 | time srun migrate-n-mpi parmfile -nomenu
27 | done
28 | sstat -j $SLURM_JOBID -o JobID,MaxVMSize,AveVMSize,MinCPU,AveCPU,NTasks
29 | ######################################################
30 | ### Transferring the results to the home directory ($HOME)
31 | mkdir -p $HOME/OUT/migrate
32 | cp -pr $SCRATCH_DIR $HOME/OUT/migrate
33 |
--------------------------------------------------------------------------------
/Slurm/migrate-profile.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of Cores
4 | ######################################################
5 | #SBATCH -J MIGRATE_Profile
6 | #SBATCH -A hpcnow
7 | #SBATCH --time=01:00:00
8 | #SBATCH --ntasks-per-node=16
9 | #SBATCH --mem-per-cpu=4G
10 | #SBATCH -C sb
11 | ######################################################
12 | ######################################################
13 | ### The files will be allocated in the shared FS
14 | cp -pr /sNow/test/migrate/parmfile* $SCRATCH_DIR/
15 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
16 | ######################################################
17 | ### Run the Parallel Program
18 | for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0
19 | do
20 | #export I_MPI_FABRICS=shm:dapl
21 | #export I_MPI_ADJUST_COLLECTIVES = bcast:0;reduce:2
22 | #export I_MPI_DAPL_SCALABLE_PROGRESS=1
23 | #export I_MPI_STATS=3
24 | #export I_MPI_STATS_SCOPE=coll
25 | echo "version $i"
26 | ml MIGRATE/$i
27 | ml VTune/2013_update8
28 | ml itac/8.1.4.045
29 | source itacvars.sh impi4
30 | #export VT_PCTRACE=4
31 | #export VT_CONFIG=/sNow/SubmitScripts/slurm/trace.conf
32 | cd $SCRATCH_DIR
33 | mkdir $i
34 | cp parmfile* $i/
35 | cp infile $i/
36 | cd $i
37 | #srun migrate-n-mpi parmfile.short -nomenu
38 | unset I_MPI_PMI_LIBRARY
39 | srun amplxe-cl -collect hotspots -- migrate-n-mpi parmfile.short -nomenu
40 | ml purge
41 | done
42 | sstat -j $SLURM_JOBID -o JobID,MaxVMSize,AveVMSize,MinCPU,AveCPU,NTasks
43 | ######################################################
44 | ### Transferring the results to the home directory ($HOME)
45 | mkdir -p $HOME/OUT/migrate-profile
46 | cp -pr $SCRATCH_DIR $HOME/OUT/migrate-profile/
47 |
--------------------------------------------------------------------------------
/Slurm/migrate-traces.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # MIGRATE Benchmark SubmitScript
3 | # Optimized for run parallel job of Cores
4 | ######################################################
5 | #SBATCH -J MIGRATE_traces
6 | #SBATCH -A hpcnow
7 | #SBATCH --time=08:00:00
8 | ##SBATCH --ntasks-per-node=16
9 | #SBATCH --mem-per-cpu=4G
10 | ##SBATCH -C sb
11 | ######################################################
12 | ######################################################
13 | ### The files will be allocated in the shared FS
14 | cp -pr /sNow/test/migrate/parmfile* $SCRATCH_DIR/
15 | cp -pr /sNow/test/migrate/infile $SCRATCH_DIR/
16 | ######################################################
17 | ### Run the Parallel Program
18 | mkdir -p $HOME/OUT/migrate-traces/tuned-c
19 | #for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0 3.4.4-goolf-1.4.10 3.6.6-goolf-1.4.10
20 | for i in 3.4.4-ictce-5.4.0 3.6.6-ictce-5.4.0
21 | do
22 | export I_MPI_FABRICS=shm:dapl
23 | #export I_MPI_ADJUST_COLLECTIVES = bcast:0;reduce:2
24 | export I_MPI_FALLBACK=disable
25 | export I_MPI_DAPL_UD=enable
26 | export I_MPI_DAPL_SCALABLE_PROGRESS=1
27 | export I_MPI_PIN_PROCESSOR_LIST='grain=cache2,shift=sock'
28 | export I_MPI_WAIT_MODE=enable
29 | export I_MPI_SHM_BYPASS=enable
30 | export I_MPI_INTRANODE_EAGER_THRESHOLD=262144
31 | #export I_MPI_SHM_CACHE_BYPASS_THRESHOLDS=,[,,]
32 | export I_MPI_SHM_CACHE_BYPASS_THRESHOLDS=16384,16384,-1,16384,-1,16384
33 | #export I_MPI_STATS=3
34 | #export I_MPI_STATS_SCOPE=coll
35 | echo "version $i"
36 | ml MIGRATE/$i
37 | ml VTune/2013_update8
38 | ml itac/8.1.4.045
39 | source itacvars.sh impi4
40 | #export VT_PCTRACE=4
41 | #export VT_CONFIG=/sNow/SubmitScripts/slurm/trace.conf
42 | cd $SCRATCH_DIR
43 | mkdir $i
44 | cp parmfile* $i/
45 | cp infile $i/
46 | cd $i
47 | #srun migrate-n-mpi parmfile.short -nomenu
48 | unset I_MPI_PMI_LIBRARY
49 | mpiexec.hydra -trace migrate-n-mpi parmfile.short -nomenu
50 | #mpiexec.hydra -trace migrate-n-mpi parmfile.short -nomenu
51 | #mpirun itcpin --run -- migrate-n-mpi parmfile.short -nomenu
52 | ml purge
53 | cp -pr $SCRATCH_DIR $HOME/OUT/migrate-traces/tuned-c/$i-$SLURM_NTASKS
54 | done
55 | #sstat -j $SLURM_JOBID -o JobID,MaxVMSize,AveVMSize,MinCPU,AveCPU,NTasks
56 | ######################################################
57 | ### Transferring the results to the home directory ($HOME)
58 |
--------------------------------------------------------------------------------
/Slurm/mpi-all.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=1:00:00 # Walltime
5 | #SBATCH --nodes=200
6 | #SBATCH --tasks-per-node=16
7 | #SBATCH --mem-per-cpu=1G # memory/cpu
8 | module load intel/ics-2013
9 | srun hostname
10 | srun /sNow/training/slurm/bin/pi_mpi
11 |
--------------------------------------------------------------------------------
/Slurm/mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_JOB
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=00:15:00
5 | #SBATCH --ntasks=32
6 | #SBATCH --mem-per-cpu=1G
7 | module load ictce/5.4.0
8 | srun hostname
9 | srun /sNow/training/slurm/bin/pi_mpi
10 |
--------------------------------------------------------------------------------
/Slurm/multi-mpi.conf:
--------------------------------------------------------------------------------
1 | 0 echo 'I am the Master'
2 | 1 sleep 120
3 | 2-3 printenv SLURM_PROCID
4 | 4-47 hostname
5 |
--------------------------------------------------------------------------------
/Slurm/multi-prog-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MasterSlave
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --ntasks=48 # number of tasks
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | /sNow/SubmitScripts/slurm/multi-prog-mpi.sl
8 | srun --multi-prog multi-mpi.conf
9 |
--------------------------------------------------------------------------------
/Slurm/multi-prog.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MasterSlave
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --ntasks=48 # number of tasks
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | srun --multi-prog multi.conf
8 |
--------------------------------------------------------------------------------
/Slurm/multi.conf:
--------------------------------------------------------------------------------
1 | 0 echo 'I am the Master'
2 | 1 sleep 120
3 | 2-3 printenv SLURM_PROCID
4 | 4-47 hostname
5 |
--------------------------------------------------------------------------------
/Slurm/multistage.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J OpenMP_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
7 | srun openmp_binary stage1.dat
8 | srun openmp_binary stage2.dat
9 | srun openmp_binary stage3.dat
10 | srun openmp_binary stage4.dat
11 | srun openmp_binary stage5.dat
12 | srun openmp_binary stage6.dat
13 | srun openmp_binary stage7.dat
14 | srun openmp_binary stage7.dat
15 |
--------------------------------------------------------------------------------
/Slurm/namd-cuda.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # NAMD CUDA SubmitScript
3 | ##########################################################################
4 | #SBATCH -J NAMD
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=2G # memory/cpu
8 | #SBATCH --cpus-per-task=2 # 2 OpenMP Threads
9 | #SBATCH --gres=gpu:2 # GPUs per node
10 | ##########################################################################
11 | ### Load the Enviroment Modules for NAMD
12 | module load NAMD/2.9
13 | ##########################################################################
14 | ### The files will be allocated in the shared FS ($SCRATCH_DIR)
15 | cp -pr /sNow/test/NAMD/apoa1/* $SCRATCH_DIR
16 | cd $SCRATCH_DIR
17 | ##########################################################################
18 | ### Run the Parallel Program
19 | export OMP_NUM_THREADS=1
20 | srun namd2 apoa1.namd
21 | ##########################################################################
22 | ### Transfering the results to the home directory ($HOME)
23 | cp -pr $SCRATCH_DIR $HOME/OUT/namd
24 |
--------------------------------------------------------------------------------
/Slurm/namd-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # NAMD MPI SubmitScript
3 | ##########################################################################
4 | #SBATCH -J NAMD
5 | #SBATCH --time=01:00:00 # Walltime
6 | #SBATCH -A hpcnow # Project Account
7 | #SBATCH --mem-per-cpu=2G # memory/cpu
8 | #SBATCH --ntasks=24 # number of tasks
9 | ##########################################################################
10 | ### Load the Enviroment Modules for Gromacs 4.5.4
11 | module load NAMD/2.9
12 | ##########################################################################
13 | ### The files will be allocated in the shared FS ($SCRATCH_DIR)
14 | cp -pr /sNow/test/NAMD/apoa1/* $SCRATCH_DIR
15 | cd $SCRATCH_DIR
16 | ##########################################################################
17 | ### Run the Parallel Program
18 | export OMP_NUM_THREADS=1
19 | srun namd2 apoa1.namd
20 | ##########################################################################
21 | ### Transfering the results to the home directory ($HOME)
22 | cp -pr $SCRATCH_DIR $HOME/OUT/namd
23 |
--------------------------------------------------------------------------------
/Slurm/nextflow.config:
--------------------------------------------------------------------------------
1 | process {
2 | executor='slurm'
3 | queueSize = 15
4 | pollInterval = '5 min'
5 | dumpInterval = '6 min'
6 | queueStatInterval = '5 min'
7 | exitReadTimeout = '13 min'
8 | killBatchSize = 30
9 | submitRateLimit = '20 min'
10 | clusterOptions = '-p short -t 00:30:00 -C ilk'
11 | }
12 |
--------------------------------------------------------------------------------
/Slurm/node_stress.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ######################################################
3 | #SBATCH -J STRESS_TEST
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --time=12:00:00 # Walltime
6 | #SBATCH --mem-per-cpu=4G # memory/cpu
7 | #SBATCH --cpus-per-task=12 # 12 OpenMP Threads
8 | #SBATCH --nodes=1 # number nodes
9 | ######################################################
10 | ### Load the Environment
11 | ######################################################
12 | cd $SHM_DIR
13 | #srun stress -v --cpu 12 --vm 12 --vm-bytes 4096M --timeout 1800s
14 | srun stress -v --cpu 12
15 | ######################################################
16 |
--------------------------------------------------------------------------------
/Slurm/octave.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Serial_Job
3 | #SBATCH --job-name=octave
4 | #SBATCH -A hpcnow # Project Account
5 | #SBATCH --time=00:10:00 # Walltime
6 | #SBATCH --mem-per-cpu=2G # memory/cpu
7 | #SBATCH --mail-user=b.verleye@auckland.ac.nz
8 | #SBATCH --mail-type=ALL
9 | module load Octave
10 | octave 'myTest.m'
11 |
--------------------------------------------------------------------------------
/Slurm/open-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=10:00:00 # Walltime
5 | #SBATCH --ntasks=96 # number of tasks
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | ml load OpenMPI/1.6.5-GCC-4.8.2
8 | srun /sNow/training/slurm/bin/pi_openmpi-1.6.5-GCC-4.8.2
9 |
--------------------------------------------------------------------------------
/Slurm/openmp.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J OpenMP_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
7 | srun openmp_binary
8 |
--------------------------------------------------------------------------------
/Slurm/orca-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J ORCA
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | #SBATCH --ntasks=48 # number of tasks
7 |
8 | # Load the user environment
9 | ml ORCA/3_0_2-linux_x86-64
10 | export P4_RSHCOMMAND=ssh
11 | export OMP_NUM_THREADS=1
12 | ### The files will be allocated in the shared FS
13 | cd $SCRATCH_DIR
14 | cp /sNow/test/benchmarks/Orca/XRQTC.Orca_localCCSD/input/* .
15 | # --------------> WARNING <-------------------
16 | # The number of CPUs needs to be specified in
17 | # the input file : % pal nprocs $SLURM_NTASKS
18 | mv XRQTC.Orca_localCCSD.inp XRQTC.Orca_localCCSD.inp.1
19 | echo "% pal nprocs $SLURM_NTASKS
20 | end" > XRQTC.Orca_localCCSD.inp.0
21 | cat XRQTC.Orca_localCCSD.inp.0 XRQTC.Orca_localCCSD.inp.1 > XRQTC.Orca_localCCSD.inp
22 | rm XRQTC.Orca_localCCSD.inp.*
23 | ##########################################
24 | # Run the job
25 | ##########################################
26 | srun orca XRQTC.Orca_localCCSD.inp > XRQTC.Orca_localCCSD.out
27 | ##########################################
28 | # Copy the results to our home directory
29 | ##########################################
30 | mkdir -p /sNow/test/benchmarks/Orca/XRQTC.Orca_localCCSD/OUT/orca
31 | cp -r $SCRATCH_DIR /sNow/test/benchmarks/Orca/XRQTC.Orca_localCCSD/OUT/orca/
32 | ##########################################
33 | # Temps dels resultats
34 | ##########################################
35 | TEMPS=$(cat XRQTC.Orca_localCCSD.out | grep "Time:" | awk '{print $3}')
36 | echo "$SLURM_NTASKS $TEMPS" >> /sNow/test/benchmarks/Orca/XRQTC.Orca_localCCSD/benchmark-ompi-XRQTC.Orca_localCCSD.dat
37 |
--------------------------------------------------------------------------------
/Slurm/orca-smp.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J ORCA
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | #SBATCH --cpus-per-task=8 # 8 OpenMP Threads
7 |
8 | # Load the user environment
9 | ml ORCA/3_0_2-linux_x86-64
10 | export P4_RSHCOMMAND=ssh
11 | export OMP_NUM_THREADS=1
12 | ### The files will be allocated in the shared FS
13 | cd $TMP_DIR
14 | cp /sNow/test/benchmarks/Orca/XRQTC.Orca_B3LYP/input/* .
15 | # --------------> WARNING <-------------------
16 | # The number of CPUs needs to be specified in
17 | # the input file : % pal nprocs $SLURM_NPROCS
18 | mv XRQTC.Orca_B3LYP.inp XRQTC.Orca_B3LYP.inp.1
19 | echo "% pal nprocs $NSLOTS
20 | end" > XRQTC.Orca_B3LYP.inp.0
21 | cat XRQTC.Orca_B3LYP.inp.0 XRQTC.Orca_B3LYP.inp.1 > XRQTC.Orca_B3LYP.inp
22 | rm XRQTC.Orca_B3LYP.inp.*
23 | ##########################################
24 | # Run the job
25 | ##########################################
26 | srun orca XRQTC.Orca_B3LYP.inp > XRQTC.Orca_B3LYP.out
27 | ##########################################
28 | # Copy the results to our home directory
29 | ##########################################
30 | mkdir -p /sNow/test/benchmarks/Orca/XRQTC.Orca_B3LYP/OUT/orca
31 | cp -r $TMP_DIR /sNow/test/benchmarks/Orca/XRQTC.Orca_B3LYP/OUT/orca/
32 | ##########################################
33 | # Temps dels resultats
34 | ##########################################
35 | TEMPS=$(cat XRQTC.Orca_B3LYP.out | grep Time: | awk '{print$3}')
36 | echo "$NSLOTS $TEMPS" >> /sNow/test/benchmarks/Orca/XRQTC.Orca_B3LYP/benchmark-ompi-XRQTC.Orca_B3LYP.dat
37 |
--------------------------------------------------------------------------------
/Slurm/params.dat:
--------------------------------------------------------------------------------
1 | 2.00 0.6 10 1500 2
2 | 2.00 0.8 10 1500 2
3 | 2.00 0.6 2.5 1500 2
4 | 2.00 0.8 2.5 1500 2
5 | 2.00 0.6 3.0 1500 2
6 | 2.00 0.8 3.0 1500 2
7 | 2.00 0.6 3.5 1500 2
8 | 2.00 0.8 3.5 1500 2
9 | 2.00 0.6 4.0 1500 2
10 | 2.00 0.8 4.0 1500 2
11 | 2.00 0.6 4.5 1500 2
12 | 2.00 0.8 4.5 1500 2
13 | 2.00 0.6 5.0 1500 2
14 | 2.00 0.8 5.0 1500 2
15 | 2.00 0.6 5.5 1500 2
16 | 2.00 0.8 5.5 1500 2
17 | 2.00 0.6 6.0 1500 2
18 | 2.00 0.8 6.0 1500 2
19 | 2.00 0.6 6.5 1500 2
20 | 2.00 0.8 6.5 1500 2
21 | 2.00 0.6 7.0 1500 2
22 | 2.00 0.8 7.0 1500 2
23 | 2.00 0.6 7.5 1500 2
24 | 2.00 0.8 7.5 1500 2
25 | 2.00 0.6 8.0 1500 2
26 | 2.00 0.8 8.0 1500 2
27 | 2.00 0.6 8.5 1500 2
28 | 2.00 0.8 8.5 1500 2
29 | 2.00 0.6 9.0 1500 2
30 | 2.00 0.8 9.0 1500 2
31 | 2.00 0.6 9.5 1500 2
32 | 2.00 0.8 9.5 1500 2
33 | 2.00 0.6 10.0 1500 2
34 | 2.00 0.8 10.0 1500 2
35 | 2.00 0.6 2.5 1500 2
36 | 2.00 0.8 2.5 1500 2
37 | 2.00 0.6 3.0 1500 2
38 | 2.00 0.8 3.0 1500 2
39 | 2.00 0.6 3.5 1500 2
40 | 2.00 0.8 3.5 1500 2
41 | 2.00 0.6 4.0 1500 2
42 | 2.00 0.8 4.0 1500 2
43 | 2.00 0.6 4.5 1500 2
44 | 2.00 0.8 4.5 1500 2
45 | 2.00 0.6 5.0 1500 2
46 | 2.00 0.8 5.0 1500 2
47 | 2.00 0.6 5.5 1500 2
48 | 2.00 0.8 5.5 1500 2
49 | 2.00 0.6 6.0 1500 2
50 | 2.00 0.8 6.0 1500 2
51 | 2.00 0.6 6.5 1500 2
52 | 2.00 0.8 6.5 1500 2
53 | 2.00 0.6 7.0 1500 2
54 | 2.00 0.8 7.0 1500 2
55 | 2.00 0.6 7.5 1500 2
56 | 2.00 0.8 7.5 1500 2
57 | 2.00 0.6 8.0 1500 2
58 | 2.00 0.8 8.0 1500 2
59 | 2.00 0.6 8.5 1500 2
60 | 2.00 0.8 8.5 1500 2
61 | 2.00 0.6 9.0 1500 2
62 | 2.00 0.8 9.0 1500 2
63 | 2.00 0.6 9.5 1500 2
64 | 2.00 0.8 9.5 1500 2
65 | 2.00 0.6 10.0 1500 2
66 | 2.00 0.8 10.0 1500 2
67 |
--------------------------------------------------------------------------------
/Slurm/platform-mpi.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_JOB
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=10:00:00 # Walltime
5 | #SBATCH --ntasks=14 # number of tasks
6 | #SBATCH --mem-per-cpu=1G # memory/cpu
7 | module load Abaqus/6.13
8 | mpirun -srun uname
9 | #mpirun -srun pi_platform-mpi-08.3.0.0
10 |
--------------------------------------------------------------------------------
/Slurm/post-processing.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Post-ProcessingJob
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | srun echo "Post-Processing done."
7 |
--------------------------------------------------------------------------------
/Slurm/pre-processing.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Pre-ProcessingJob
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | srun echo "Pre-Processing done. Run the job dependencies"
7 |
--------------------------------------------------------------------------------
/Slurm/profile.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Slurm-Profiler
3 | #SBATCH -A hpcnow
4 | ##SBATCH --time=24:00:00 # Walltime
5 | #SBATCH --time=00:15:00 # Walltime
6 | #SBATCH --mem-per-cpu=8G # memory/cpu
7 | #SBATCH --cpus-per-task=4 # 4 OpenMP Threads
8 | srun --profile=task stress --vm 4 --vm-bytes 2GB --timeout 900s
9 |
--------------------------------------------------------------------------------
/Slurm/serial.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J Serial_Job
3 | #SBATCH -A hpcnow # Project Account
4 | #SBATCH --time=01:00:00 # Walltime
5 | #SBATCH --mem-per-cpu=8G # memory/cpu
6 | srun sleep 30
7 | srun echo "My first serial Slurm job"
8 |
--------------------------------------------------------------------------------
/Slurm/slurm_setup_abaqus-env.sh:
--------------------------------------------------------------------------------
1 | # This script will create the User Environment to run Abaqus under Slurm
2 | # Created by Jordi Blasco
3 | # The previous releases of Abaqus was using HPMPI but the current one 6.13 uses Platform/IBM MPI which is not fully integrated with srun
4 | # According to the slurm website, it seems that HP-MPI only needs -srun flag, and we can avoid to use the following lines of code.
5 | # http://slurm.schedmd.com/mpi_guide.html#hp_mpi
6 | envFile=abaqus_v6.env
7 | if [[ -f $envFile ]]; then
8 | rm $envFile
9 | fi
10 | echo "import os">>$envFile
11 | srun hostname -s | sort > slurm.hosts
12 | mp_host_list="["
13 | for i in $(cat slurm.hosts) ; do
14 | mp_host_list="${mp_host_list}['$i', 1],"
15 | done
16 | #mp_host_list=`echo ${mp_host_list} | sed -e "s/,$//" | sed -e "s/\-p/\-ib/g"`
17 | mp_host_list=`echo ${mp_host_list}`
18 | mp_host_list="${mp_host_list}]"
19 | export mp_host_list
20 | #echo "mp_host_list=${mp_host_list}" >>$envFile
21 | echo "mp_host_list=${mp_host_list}" >>$envFile
22 | echo "max_history_requests=0" >> $envFile
23 | echo "mp_rsh_command = 'ssh -x -n -l %U %H %C'" >> $envFile
24 | alias mpirun='mpirun -srun'
25 | #echo "mp_mpirun_path = {HP:'/sNow/apps/Abaqus/6.13/installation/6.13-2/code/bin/SMAExternal/pmpi-8.3/bin/mpirun -srun'}" >> $envFile
26 | #echo "mp_rsh_command = 'ssh -x -n -l %U %H %C'" >> $envFile
27 |
--------------------------------------------------------------------------------
/Slurm/slurm_setup_cfx-env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script will create the User Environment to run ANSYS CFX under Slurm
3 | # Created by Jordi Blasco , Bart Verleye
4 | export CFX5RSH=ssh
5 | export MPI_REMSH=ssh
6 | export MPI_IC_ORDER="udapl:ibv:tcp"
7 | #export MPI_CPU_AFFINITY=SLURM
8 |
9 | envFile=ansys.env
10 | rm $envFile
11 | srun hostname -s | sort > slurm.hosts
12 | for i in $(cat slurm.hosts) ; do
13 | mp_host_list="${mp_host_list}$i,"
14 | done
15 | mp_host_list=`echo ${mp_host_list} | sed -e "s/,$//"`
16 | export mp_host_list
17 | echo "${mp_host_list}" >>$envFile
18 | unset mp_host_list
19 |
--------------------------------------------------------------------------------
/Slurm/slurm_setup_cfx-env2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script will create the User Environment to run ANSYS CFX under Slurm
3 | # Created by Jordi Blasco , Bart Verleye
4 | export CFX5RSH=ssh
5 | export MPI_REMSH=ssh
6 | export MPI_IC_ORDER="udapl:ibv:tcp"
7 | #export MPI_CPU_AFFINITY=SLURM
8 |
9 | CFX_HOSTLIST_TMP=`srun hostname -s | sort `
10 | CFX_HOSTLIST_TMP=`echo $CFX_HOSTLIST_TMP | sed -e 's/ /,/g'`
11 | export CFX_HOSTLIST=$CFX_HOSTLIST_TMP
12 |
--------------------------------------------------------------------------------
/Slurm/slurm_setup_cfx-env3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script will create the User Environment to run ANSYS CFX under Slurm
3 | # Created by Jordi Blasco , Bart Verleye
4 | #export CFX5RSH=ssh
5 | #export MPI_REMSH=ssh
6 | #export MPI_IC_ORDER="udapl:ibv:tcp"
7 | #export MPI_CPU_AFFINITY=SLURM
8 |
9 | CFX_HOSTLIST_TMP=`srun hostname -s | sort `
10 | CFX_HOSTLIST_TMP=`echo $CFX_HOSTLIST_TMP | sed -e 's/ /,/g'`
11 | export CFX_HOSTLIST=$CFX_HOSTLIST_TMP
12 |
--------------------------------------------------------------------------------
/Slurm/slurm_setup_fluent.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export MPI_IC_ORDER="udapl:ibv:tcp"
3 | HOSTSFILE=.hostlist-job$SLURM_JOB_ID
4 | if [ "$SLURM_PROCID" == "0" ]; then
5 | srun hostname -f > $HOSTSFILE
6 | fi
7 |
--------------------------------------------------------------------------------
/Slurm/star-ccm+-platform.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # STAR-CCM+ SubmitScript
3 | # Optimized for run parallel job with Platform MPI
4 | ######################################################
5 | #SBATCH -J STAR-CCM+PMPI
6 | #SBATCH -A hpcnow
7 | #SBATCH -D /sNow/test/Star-CCM+/case06/input
8 | #SBATCH --time=06:00:00
9 | #SBATCH --mem-per-cpu=2048
10 | #SBATCH --nodes=10
11 | #SBATCH --ntasks=160
12 | ######################################################
13 |
14 | export SIM_FILE=Test7.1-Fn-0.45-layers1.5-timestep15.sim
15 | export BATCH_FILE=Run.java
16 | export PATH=$PATH:/projects/hpcnow/STAR-CCM+9.04.009-R8/star/bin
17 |
18 | cp -pr * $SCRATCH_DIR
19 | cd $SCRATCH_DIR
20 |
21 | echo "JOB_ID:" $SLURM_JOB_ID
22 | echo '#!/bin/bash' >$(echo "star-connect-"$SLURM_JOB_ID)
23 | echo "/projects/hpcnow/STAR-CCM+9.04.009-R8/star/bin/starccm+ -host" $SLURMD_NODENAME "-port 47827 &" >>$(echo "star-connect-"$SLURM_JOB_ID)
24 | chmod +x $(echo "star-connect-"$SLURM_JOB_ID)
25 |
26 | # Build node list
27 | srun hostname -s | sort >slurm.hosts
28 |
29 | # Start Star-CCM+ server in batch mode
30 | echo "Starting Star-CCM+"
31 | echo
32 |
33 | #starccm+ -power -rsh ssh -licpath 1999@flex.cd-adapco.com -np $SLURM_NPROCS -cpubind off -podkey $PODKEY -machinefile slurm.hosts -batch $BATCH_FILE $SIM_FILE
34 | starccm+ -power -collab -np $SLURM_NPROCS -licpath $LICENSE -podkey $PODKEY -cpubind off -machinefile slurm.hosts -time -mppflags "-srun" -fabric UDAPL -batch $BATCH_FILE -rsh ssh $SIM_FILE
35 | # Where:
36 | # -collab is an option necessary to avoid user name checking. my.java is an optional java file.
37 | # mycase.sim is the STAR-CCM+ simulation file.
38 |
39 | rm slurm.hosts
40 | rm $(echo "star-connect-"$SLURM_JOB_ID)
41 |
42 | mv Resistance_Data.csv $(echo $SIM_FILE".csv")
43 |
44 | cp -pr $SCRATCH_DIR /sNow/test/Star-CCM+/case06/output/
45 |
46 |
--------------------------------------------------------------------------------
/Slurm/stress.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J STRESS
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=1:05:00
5 | #SBATCH --mem-per-cpu=4G
6 | ##SBATCH --cpus-per-task=12
7 | #srun stress --cpu 12 --vm-bytes 2GB --timeout 3600s
8 | cd $SCRATCH_DIR
9 | srun stress --cpu 1 -d 10 --timeout 3600s
10 |
--------------------------------------------------------------------------------
/Slurm/supermagic.sl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #SBATCH -J MPI_SuperMagic
3 | #SBATCH -A hpcnow
4 | #SBATCH --time=02:00:00
5 | #SBATCH --mem-per-cpu=7800M
6 | #SBATCH --ntasks=1024
7 | #
8 | # options:
9 | # [-a|--all] run all tests in suite
10 | # [-h|--help] display this message
11 | # [-m|--msg-size x[B,k,M,G]] change message size
12 | # [-M|--file-size B[B,k,M,G]] change file size (per rank)
13 | # [-n|--n-iters X] run X iterations of a test suite
14 | # [-q|--quiet] run in quiet mode
15 | # [-s|--stat /a/path] add /a/path to stat list
16 | # [-t|--with-tests t1[,t2,tn]] run tests in requested order
17 | # [-w|--write /a/path] add /a/path to IO tests
18 | # [-V|--verbose] display verbose output
19 | #
20 |
21 | for i in 20130104-goolf-1.5.14 20130104-ictce-5.4.0 20130104-iomkl-4.6.13 20130104-iomkl-6.5.4 20130104-foss-2015a 20130104-intel-2015a
22 | do
23 | module load supermagic/$i
24 | echo "Tool Chain $i"
25 | srun supermagic -a -m 2M -w $SCRATCH_DIR/ -n 10 > supermagic-$i-$SLURM_JOB_ID.dat
26 | module purge
27 | done
28 |
--------------------------------------------------------------------------------
/Slurm/trace.conf:
--------------------------------------------------------------------------------
1 | LOGFILE-FORMAT STFSINGLE
2 | ACTIVITY MPI ON
3 | STATE MPI:* ON
4 | PROCESS 0:N OFF
5 | PROCESS 0:3 ON
6 | TIME-WINDOWS 0:300000l
7 |
--------------------------------------------------------------------------------