├── .gitignore ├── Amber ├── ala.in ├── ala.rst7 ├── ala.top ├── run_falcon.sh ├── run_tinkercliffs.sh └── tleap.in ├── AmberTools ├── README ├── min.in ├── run_minimize_cpu.sh └── test.leap ├── Profiler ├── amd-uprof │ ├── amd-uprof-example.slurm │ └── my_app.c └── vtune │ ├── my_app.c │ └── vtune-example.slurm ├── README.md ├── VASP ├── INCAR ├── KPOINTS ├── POSACR ├── POTCAR └── submit_tinker_vasp.sh ├── abaqus └── heattransfermanifold.sh ├── abinit ├── 01h.pspgth ├── abinit_tinkercliffs_rome.sh ├── tbase1_1.in └── tbase1_x.files ├── alphafold2 ├── Melanogaster_GR28BD_tetramer.fasta └── run_tinkercliffs_A100.sh ├── alphafold3 ├── fold_input.json └── run_tinkercliffs_A100.sh ├── ansys ├── example.slurm ├── inlet.cas ├── inlet.dat ├── run-injet.jou └── tc-fluent.sh ├── apptainer ├── 1.4.0 │ ├── H2O_GW100_def2-QZVP.inp │ ├── README.md │ └── common.sh └── README.md ├── cellranger ├── 9.0.1 │ ├── README.md │ ├── common.sh │ └── run_count_1kpbmcs.png └── README.md ├── cuda_bandwidth ├── cuda_bandwidth_infer_t4.sh └── cuda_bandwidth_tinkercliffs_a100.sh ├── cuquantum ├── cuquantum_22.07_tinkercliffs_a100.sh ├── cuquantum_22.07_tinkercliffs_dgx.sh ├── cuquantum_23.06_tinkercliffs_a100.sh ├── cuquantum_23.06_tinkercliffs_dgx.sh ├── cuquantum_tinkercliffs_a100.sh └── cuquantum_tinkercliffs_dgx.sh ├── dalton ├── H2O_cc-pVDZ_nosym.mol ├── cc2dc_energy.dal ├── cc2dc_energy.mol ├── dalton_nompi_tinkercliffs_rome.sh ├── dalton_tinkercliffs_rome.sh └── dft_rspexci_nosym.dal ├── espresso ├── README ├── environment_variables ├── espresso_tinkercliffs_rome.sh ├── pseudo │ ├── Al.pz-vbc.UPF │ ├── Cu.pz-d-rrkjus.UPF │ ├── Ni.pz-nd-rrkjus.UPF │ └── Si.pz-vbc.UPF └── run_example ├── gaussian ├── g09_input.txt └── gaussian_tinkercliffs_rome.sh ├── gmt └── gmt_tinkercliffs_rome.sh ├── go ├── README.md ├── common.sh └── main.go ├── gromacs ├── 1aki.pdb ├── README ├── energy_input1.txt ├── energy_input2.txt ├── energy_input3.txt ├── energy_input4.txt ├── genion_input.txt ├── gromacs_falcon_gpu.sh ├── gromacs_tinkercliffs_owl_cpu.sh ├── gyrate_input.txt ├── ions.mdp ├── md.mdp ├── minim.mdp ├── npt.mdp ├── nvt.mdp ├── pdb2gmx_input.txt ├── plumed.dat └── trjconv_input.txt ├── gurobi └── 12.0.1 │ └── serial │ ├── README.md │ └── code01 │ ├── coins.sol.valid │ ├── gurobi.log │ ├── run.01 │ └── sbatch.01.slurm ├── hdf5 ├── hdf5_test.c └── hdf5_tinkercliffs_rome.sh ├── hpl ├── hybrid │ ├── HPL.dat │ ├── hpl_hybrid.sh │ └── hpl_setup.sh └── mpi │ ├── HPL.dat │ ├── hpl_mpi.sh │ └── hpl_setup.sh ├── julia ├── 1.10.4 │ ├── README.md │ ├── helloworld.jl │ └── helloworld.sh └── 1.11.3 │ ├── parallel │ ├── README.md │ └── code01 │ │ ├── README.make-env.01.falcon.l40s.tex │ │ ├── run.01 │ │ ├── run.delete.me │ │ ├── sample-output-previous-execution │ │ ├── gpu.24618.log │ │ ├── slurm.julia.01.gpu.24618.err │ │ └── slurm.julia.01.gpu.24618.out │ │ ├── sbatch.01.slurm │ │ └── src01.jl │ └── serial │ ├── README.md │ └── code01 │ ├── output.01.06.out.valid │ ├── run.01.06 │ ├── sbatch.01.06.slurm │ └── src.01.06.jl ├── lammps ├── in.lj ├── input.in ├── lammps_tinkercliffs_rome.sh ├── submit_tinker_hybrid_parallel.sh ├── submit_tinker_parallel.sh └── system.data ├── matlab ├── matlab_owl.sh ├── matlab_tinkercliffs_rome.sh ├── other │ ├── matlab_owl.sh │ ├── matlab_tinkercliffs_rome.sh │ ├── prime_batch_local.m │ ├── prime_fun.m │ └── prime_script.m ├── parallel │ ├── README.md │ ├── code01 │ │ ├── code02.m │ │ ├── previous-execution-output │ │ │ ├── 255.gpu.log │ │ │ ├── iostat-stderr.txt │ │ │ ├── iostat-stdout.txt │ │ │ ├── mpstat-stderr.txt │ │ │ ├── mpstat-stdout.txt │ │ │ ├── run.delete.me │ │ │ ├── slurm.matlab.02.gpu.255.err │ │ │ ├── slurm.matlab.02.gpu.255.out │ │ │ ├── vmstat-stderr.txt │ │ │ └── vmstat-stdout.txt │ │ ├── run.02 │ │ └── sbatch.02.slurm │ ├── code02 │ │ ├── code04.m │ │ ├── mat.out.valid │ │ ├── previous-execution-output │ │ │ ├── gpu.259.log │ │ │ ├── gpu.262.log │ │ │ ├── iostat-stderr.txt │ │ │ ├── iostat-stdout.txt │ │ │ ├── mat.out │ │ │ ├── mpstat-stderr.txt │ │ │ ├── mpstat-stdout.txt │ │ │ ├── slurm.matlab.04.cpu.259.err │ │ │ ├── slurm.matlab.04.cpu.259.out │ │ │ ├── slurm.matlab.04.cpu.262.err │ │ │ ├── slurm.matlab.04.cpu.262.out │ │ │ ├── vmstat-stderr.txt │ │ │ └── vmstat-stdout.txt │ │ ├── run.04 │ │ ├── run.delete.me │ │ └── sbatch.04.slurm │ └── code03 │ │ ├── code02b.m │ │ ├── mat.02b.out.valid │ │ ├── previous-execution-results │ │ ├── 24820.gpu.log │ │ ├── iostat-stderr.txt │ │ ├── iostat-stdout.txt │ │ ├── mat.02b.out │ │ ├── mpstat-stderr.txt │ │ ├── mpstat-stdout.txt │ │ ├── slurm.matlab.02.gpu.24820.err │ │ ├── slurm.matlab.02.gpu.24820.out │ │ ├── vmstat-stderr.txt │ │ └── vmstat-stdout.txt │ │ ├── run.02b │ │ ├── run.delete.me │ │ └── sbatch.02b.slurm ├── prime_batch_local.m ├── prime_fun.m ├── prime_script.m └── serial │ ├── README.md │ ├── code01 │ ├── code04.m │ ├── mat.out.valid │ ├── previous-execution-results │ │ ├── iostat-stderr.txt │ │ ├── iostat-stdout.txt │ │ ├── mat.out │ │ ├── mpstat-stderr.txt │ │ ├── mpstat-stdout.txt │ │ ├── slurm.matlab.04.cpu.263.err │ │ ├── slurm.matlab.04.cpu.263.out │ │ ├── vmstat-stderr.txt │ │ └── vmstat-stdout.txt │ ├── run.04 │ ├── run.delete.me │ └── sbatch.04.slurm │ └── code02 │ ├── code04.m │ ├── mat.out.valid │ ├── previous-execution-results │ ├── iostat-stderr.txt │ ├── iostat-stdout.txt │ ├── mat.out │ ├── mpstat-stderr.txt │ ├── mpstat-stdout.txt │ ├── slurm.matlab.04.cpu.96099.err │ ├── slurm.matlab.04.cpu.96099.out │ ├── vmstat-stderr.txt │ └── vmstat-stdout.txt │ ├── run.04 │ ├── run.delete.me │ └── sbatch.04.slurm ├── mpi ├── mpi_quad.c ├── mpihello.c └── tc-mpihello.sh ├── mpi4py ├── hello_mpi.py └── mpi4py_tinkercliffs_rome.sh ├── namd ├── namd_tinkercliffs_rome.sh ├── par_all22_prot.inp ├── par_all27_prot_lipid.inp ├── submit_tinker.sh ├── tiny.namd ├── tiny.pdb ├── tiny.psf ├── ubq_wb.pdb ├── ubq_wb.psf └── ubq_wb_eq.conf ├── netcdf ├── netcdf_test.c └── netcdf_tinkercliffs_rome.sh ├── openblas ├── openblas_infer.sh ├── openblas_test_c.c └── openblas_tinkercliffs_rome.sh ├── openfoam ├── 12-foss-2023a │ ├── README.md │ ├── blockMeshDict │ ├── decomposeParDict │ └── openfoam12_tinkercliffs_rome.sh ├── README.md ├── desktop.pdf └── v2406-foss-2023a │ ├── README.md │ ├── blockMeshDict │ ├── decomposeParDict │ └── openfoam_ex.sh ├── openmm ├── HelloArgonInC.c ├── Makefile ├── openmm_infer.sh └── openmm_tinkercliffs_a100.sh ├── openmolcas ├── openmolcas_tinkercliffs_rome.sh ├── water.input └── water.xyz ├── openmpi ├── openmpi_nvhpc_example.slurm ├── vector_add_mpi └── vector_add_mpi.cu ├── p7zip └── example.slurm ├── parallel ├── 20240722 │ ├── README.md │ └── code01 │ │ ├── gnu-parallel.277.err │ │ ├── gnu-parallel.277.out │ │ ├── mcpi_collect.R │ │ ├── mcpi_run.R │ │ └── parallel_mcpi.sh ├── mcpi_collect.R ├── mcpi_run.R └── parallel_mcpi.sh ├── paraview ├── pvcone.png ├── pvcone.py └── pvcone.sh ├── python ├── miniconda │ ├── example.slurm │ └── numpy_compute.py ├── seaborn │ ├── example.slurm │ └── seaborn_plot.py └── statsmodels │ ├── 0.14.4 │ └── code01 │ │ ├── previous-execution-results │ │ ├── sm.01.out │ │ ├── statsmodels.273.err │ │ └── statsmodels.273.out │ │ ├── sbatch.01.slurm │ │ ├── sm.01.out.valid │ │ └── sm.01.py │ └── README.md ├── r ├── 4.4.2 │ ├── parallel_02 │ │ └── README.md │ └── serial │ │ ├── README.md │ │ ├── code01 │ │ ├── output.owl.dev_q.txt.valid │ │ ├── previous-execution-results │ │ │ ├── output.owl.dev_q.96108.txt │ │ │ ├── r.serial.owl.02.96108.err │ │ │ └── r.serial.owl.02.96108.out │ │ ├── sbatch.02.slurm │ │ └── test_args.02.R │ │ └── code02 │ │ ├── output.tc.normal_q.txt.valid │ │ ├── previous-execution-results │ │ ├── output.tc.normal_q.267.txt │ │ ├── r.serial.tc.267.err │ │ └── r.serial.tc.267.out │ │ ├── sbatch.02.slurm │ │ └── test_args.02.R ├── mcpi_parallel.r ├── mcpi_parallel_cascades.sh ├── mh_parallel.r ├── mh_parallel_cascades.sh └── mh_parallel_tinkercliffs.sh ├── rclone └── rclone_example.slurm ├── scikit-bio ├── skbio.sh └── skbio_dist.py ├── scikit-learn └── script.sh ├── stream ├── stream.c ├── stream_infer.sh └── stream_tinkercliffs_rome.sh ├── su2 └── su2-tc-kmt.sh ├── tensorflow ├── beginner.py ├── tensorflow_infer_p100.sh ├── tensorflow_infer_t4.sh ├── tensorflow_infer_v100.sh └── tensorflow_tinkercliffs_a100.sh ├── tinker9 ├── t9-example-falcon-a30.sh └── t9-example-infer-v100.sh └── wrf ├── input_sounding ├── namelist.input └── wrf_tinkercliffs_rome.sh /.gitignore: -------------------------------------------------------------------------------- 1 | #ignore slurm files 2 | slurm-*.out 3 | 4 | #ignore objects 5 | *.o 6 | 7 | #ignore txt (typically output) files 8 | *.txt 9 | -------------------------------------------------------------------------------- /Amber/ala.in: -------------------------------------------------------------------------------- 1 | Production MD 2 | &cntrl 3 | imin=0, 4 | ntx=1, 5 | irest=0, 6 | ntc=2, ntf=2, 7 | ntb=0, 8 | igb=1, 9 | cut=999.0, 10 | ntt=3, gamma_ln=1.0, temp0=300.0, 11 | ntp=0, pres0=1.0, taup=2.0, 12 | nstlim=1000, dt=0.002, 13 | ntpr=100, ntwx=100, ntwr=500 14 | / 15 | -------------------------------------------------------------------------------- /Amber/ala.rst7: -------------------------------------------------------------------------------- 1 | ACE 2 | 22 3 | 2.0000010 1.0000000 -0.0000013 2.0000010 2.0900000 0.0000001 4 | 1.4862640 2.4538490 0.8898240 1.4862590 2.4538520 -0.8898200 5 | 3.4274200 2.6407950 -0.0000030 4.3905800 1.8774060 -0.0000066 6 | 3.5553754 3.9696488 -0.0000032 2.7331200 4.5561601 -0.0000013 7 | 4.8532621 4.6139253 -0.0000043 5.4075960 4.3155388 0.8898152 8 | 5.6613044 4.2208425 -1.2321480 5.1232615 4.5213630 -2.1312016 9 | 6.6304840 4.7189354 -1.2057907 5.8085401 3.1408723 -1.2413850 10 | 4.7126759 6.1294185 0.0000014 3.6006445 6.6527027 0.0000062 11 | 5.8460533 6.8348833 0.0000025 6.7370014 6.3591620 -0.0000004 12 | 5.8460551 8.2838837 0.0000061 4.8185761 8.6477349 0.0000104 13 | 6.3597984 8.6477313 0.8898282 6.3597900 8.6477353 -0.8898188 14 | -------------------------------------------------------------------------------- /Amber/run_falcon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=amber_md_gpu 3 | #SBATCH --account=personal 4 | #SBATCH --partition=l40s_normal_q 5 | #SBATCH --gres=gpu:3 6 | #SBATCH --nodes=1 7 | #SBATCH --ntasks=3 8 | #SBATCH --cpus-per-task=4 9 | #SBATCH --time=00:05:00 10 | 11 | module reset 12 | module load Amber/24.0-foss-2023b-AmberTools-24.0-CUDA-12.4.0 13 | 14 | mpirun -np 3 pmemd.cuda.MPI -O -i ala.in -o ala.out -p ala.top -c ala.rst7 -r ala_prod.rst7 -x ala.nc 15 | -------------------------------------------------------------------------------- /Amber/run_tinkercliffs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=amber_md_gpu 3 | #SBATCH --account=personal 4 | #SBATCH --partition=a100_normal_q 5 | #SBATCH --gres=gpu:3 6 | #SBATCH --nodes=1 7 | #SBATCH --ntasks=3 8 | #SBATCH --cpus-per-task=4 9 | #SBATCH --time=00:05:00 10 | 11 | module reset 12 | module load Amber/24.0-foss-2023b-AmberTools-24.0-CUDA-12.4.0 13 | 14 | mpirun -np 3 pmemd.cuda.MPI -O -i ala.in -o ala.out -p ala.top -c ala.rst7 -r ala_prod.rst7 -x ala.nc 15 | -------------------------------------------------------------------------------- /Amber/tleap.in: -------------------------------------------------------------------------------- 1 | source leaprc.protein.ff14SB 2 | m = sequence { ACE ALA NME } 3 | saveamberparm m ala.top ala.rst7 4 | quit 5 | -------------------------------------------------------------------------------- /AmberTools/README: -------------------------------------------------------------------------------- 1 | First, create a minimal input system (water molecule) 2 | test.leap creates a TIP3P water box. 3 | Run 4 | tleap -f test.leap 5 | will generate test.prmtop (parameter/topology file)and test.inpcrd (coordinates file). 6 | 7 | Second, create a simple input file for sander* 8 | min.in 9 | 10 | Third, run sander with slurm and submit it: 11 | sbatch run_minimize_cpu.sh 12 | 13 | ==================================================================================================================== 14 | * sander which stands for Simulated Annealing with Numerical DErivatives of Restraints, is one of the core molecular 15 | dynamics (MD) engines in Amber. It’s a CPU-based program used for Energy minimization,Molecular dynamics simulations, 16 | NMR refinement and restraints,Thermodynamic integration and Free energy calculations. 17 | -------------------------------------------------------------------------------- /AmberTools/min.in: -------------------------------------------------------------------------------- 1 | # min.in 2 | Minimize 3 | &cntrl 4 | imin=1, ! Do energy minimization 5 | maxcyc=500, ! Maximum number of minimization cycles 6 | ncyc=250, ! Switch from steepest descent to conjugate gradient after 250 steps 7 | cut=8.0, ! Nonbonded cutoff (Angstroms) 8 | &end 9 | -------------------------------------------------------------------------------- /AmberTools/run_minimize_cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --job-name=amber_cpu_test 4 | #SBATCH --output=amber_cpu_test.out 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=4 7 | #SBATCH --time=00:10:00 8 | #SBATCH --partition=normal_q 9 | 10 | module reset 11 | module load AmberTools 12 | 13 | sander -O -i min.in -o min.out -p test.prmtop -c test.inpcrd -r min.rst 14 | -------------------------------------------------------------------------------- /AmberTools/test.leap: -------------------------------------------------------------------------------- 1 | # test.leap 2 | source leaprc.protein.ff14SB 3 | source leaprc.water.tip3p 4 | model = sequence { ACE ALA NME } 5 | solvatebox model TIP3PBOX 10.0 6 | saveamberparm model test.prmtop test.inpcrd 7 | quit 8 | -------------------------------------------------------------------------------- /Profiler/amd-uprof/my_app.c: -------------------------------------------------------------------------------- 1 | // my_app.c: Simple matrix multiplication 2 | #include 3 | #include 4 | #include 5 | 6 | #define N 512 7 | 8 | int main() { 9 | static float A[N][N], B[N][N], C[N][N]; 10 | int i, j, k; 11 | 12 | // Initialize matrices A and B with random values 13 | srand(time(NULL)); 14 | for (i = 0; i < N; i++) 15 | for (j = 0; j < N; j++) { 16 | A[i][j] = rand() % 100; 17 | B[i][j] = rand() % 100; 18 | } 19 | 20 | // Multiply A and B to get C 21 | for (i = 0; i < N; i++) 22 | for (j = 0; j < N; j++) { 23 | C[i][j] = 0; 24 | for (k = 0; k < N; k++) 25 | C[i][j] += A[i][k] * B[k][j]; 26 | } 27 | 28 | // Print one element to prevent compiler optimization 29 | printf("C[100][100] = %.2f\n", C[100][100]); 30 | return 0; 31 | } 32 | -------------------------------------------------------------------------------- /Profiler/vtune/my_app.c: -------------------------------------------------------------------------------- 1 | // my_app.c: Simple matrix multiplication 2 | #include 3 | #include 4 | #include 5 | 6 | #define N 512 7 | 8 | int main() { 9 | static float A[N][N], B[N][N], C[N][N]; 10 | int i, j, k; 11 | 12 | // Initialize matrices A and B with random values 13 | srand(time(NULL)); 14 | for (i = 0; i < N; i++) 15 | for (j = 0; j < N; j++) { 16 | A[i][j] = rand() % 100; 17 | B[i][j] = rand() % 100; 18 | } 19 | 20 | // Multiply A and B to get C 21 | for (i = 0; i < N; i++) 22 | for (j = 0; j < N; j++) { 23 | C[i][j] = 0; 24 | for (k = 0; k < N; k++) 25 | C[i][j] += A[i][k] * B[k][j]; 26 | } 27 | 28 | // Print one element to prevent compiler optimization 29 | printf("C[100][100] = %.2f\n", C[100][100]); 30 | return 0; 31 | } 32 | -------------------------------------------------------------------------------- /Profiler/vtune/vtune-example.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=vtune-profile # Job name 7 | #SBATCH --account=personal # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=job_output_%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=job_error_%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks per node 17 | #SBATCH --cpus-per-task=8 # Number of CPUs for profiling 18 | #SBATCH --mem=8G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | # Description: This SLURM job script profiles an application using Intel VTune Profiler CLI 22 | # and generates a performance report. 23 | 24 | ######################################################################################### 25 | # Load VTune module 26 | module reset 27 | module load VTune 28 | 29 | # Define profiling parameters 30 | APP_PATH="my_app" # Path to the binary 31 | RESULTS_DIR="vtune_results_${SLURM_JOB_ID}" # Unique output directory 32 | ANALYSIS_TYPE="hotspots" # Change to "memory-access", "threading", etc. 33 | 34 | # Define paths 35 | SRC="my_app.c" 36 | BIN="my_app" 37 | 38 | # Compile the C program (optional if precompiled) 39 | gcc -O2 -o "$BIN" "$SRC" 40 | 41 | # Create results directory 42 | mkdir -p "$RESULTS_DIR" 43 | 44 | # Run VTune profiling 45 | vtune -collect $ANALYSIS_TYPE \ 46 | -result-dir "$RESULTS_DIR" \ 47 | "$APP_PATH" > "$RESULTS_DIR/vtune_output.log" 2>&1 48 | 49 | # Summarize results 50 | vtune -report summary -result-dir "$RESULTS_DIR" > "$RESULTS_DIR/summary.txt" 51 | 52 | # Completion log 53 | echo "VTune profiling completed at $(date)" 54 | echo "Results saved in $RESULTS_DIR" 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## examples 2 | Usage examples for computing resources offered by Advanced Research Computing at Virginia Tech (https://www.arc.vt.edu). All examples can be run with 3 | `sbatch -Ayourallocation software_system.sh` 4 | where 5 | - `yourallocation` is the allocation associated with your research project or class 6 | - `software_system.sh` is the submission script (e.g., `openblas_tinkercliffs_rome.sh`) 7 | 8 | Most examples here were put together by former ARC Computational Scientist John Burkardt (see, e.g., https://people.sc.fsu.edu/~jburkardt/) and later updated to the Slurm scheduler and newer clusters. 9 | 10 | If you are an ARC user and would like a new example for a particular software package, contact us: https://arc.vt.edu/help 11 | -------------------------------------------------------------------------------- /VASP/INCAR: -------------------------------------------------------------------------------- 1 | PREC = Low 2 | IBRION = 2 3 | POTIM=.080 4 | NSW = 0 5 | IALGO = 38 6 | ISMEAR = 0 7 | SIGMA = 0.01 8 | LREAL = .TRUE 9 | LWAVE = .TRUE. 10 | LCHARG = .TRUE. 11 | EDIFFG= .0001 12 | LVTOT = .TRUE. 13 | 14 | -------------------------------------------------------------------------------- /VASP/KPOINTS: -------------------------------------------------------------------------------- 1 | Automatic Mesh 2 | 0 3 | Monkhorst-Pack 4 | 1 1 1 5 | 0. 0. 0. 6 | -------------------------------------------------------------------------------- /VASP/POSACR: -------------------------------------------------------------------------------- 1 | Si Bulk (Primitive Cell) 2 | 5.43 3 | 0.000000 2.715000 2.715000 4 | 2.715000 0.000000 2.715000 5 | 2.715000 2.715000 0.000000 6 | Si 7 | 2 8 | Direct 9 | 0.000000 0.000000 0.000000 10 | 0.250000 0.250000 0.250000 11 | -------------------------------------------------------------------------------- /VASP/submit_tinker_vasp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH --ntasks-per-node=4 4 | #SBATCH --account=personal 5 | #SBATCH --partition=normal_q 6 | #SBATCH --time=00:15:00 7 | # 8 | module reset 9 | module load VASP 10 | module list 11 | # 12 | echo "VASP_TINKERCLIFFS ROME: Normal beginning of execution." 13 | # 14 | # Instead of the command 15 | # 16 | # mpirun -np 4 vasp 17 | # 18 | # we use the following command, which sets the stacksize to "unlimited": 19 | # 20 | mpirun -np 4 /bin/bash -c "ulimit -s unlimited; vasp_std" 21 | if [ $? -ne 0 ]; then 22 | echo "VASP_TINKERCLIFFS ROME: Run error!" 23 | exit 1 24 | fi 25 | # 26 | echo "VASP_TINKERCLIFFS ROME: Normal end of execution." 27 | exit 0 28 | -------------------------------------------------------------------------------- /abaqus/heattransfermanifold.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH --partition=normal_q 4 | #SBATCH --constraint=amd 5 | #SBATCH --ntasks-per-node=16 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --time=1:00:00 8 | ## Replace "personal" with the name of your slurm account 9 | #SBATCH --account=personal 10 | ## This job is intended for CPU nodes on either the Tinkercliffs or Owl clusters 11 | ## normal_q has multiple types of CPU nodes with differing features like "amd", "intel", or "avx512". 12 | ## We specify which node types to select with the "--constraint=" option as above 13 | 14 | module reset 15 | module load ABAQUS/2024 16 | 17 | # /scratch/ is preferred place for staging and running jobs 18 | cd $SLURM_SUBMIT_DIR 19 | echo "working in `pwd`" 20 | 21 | #echo "Current license availability is:" 22 | #abaqus licensing lmdiag -n 23 | 24 | echo "Unsetting SLURM_GTIDS=$SLURM_GTIDS to prevent error" 25 | unset SLURM_GTIDS 26 | 27 | # Input files are provided as part of the ABAQUS installation. Documentation on the example is available here: 28 | # https://docs.software.vt.edu/abaqusv2024/English/?show=SIMACAEEXARefMap/simaexa-c-heattransmanifold.htm 29 | echo "Fetching job files from Abaqus installation" 30 | abaqus fetch job=heattransfermanifold* 31 | 32 | echo "Running with cpus=$SLURM_NTASKS" 33 | abaqus analysis job=heattransfermanifold cpus=$SLURM_NTASKS interactive 34 | abaqus analysis job=heattransfermanifold_cavity cpus=$SLURM_NTASKS interactive 35 | abaqus analysis job=heattransfermanifold_cavity_parallel cpus=$SLURM_NTASKS interactive -------------------------------------------------------------------------------- /abinit/01h.pspgth: -------------------------------------------------------------------------------- 1 | Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996 2 | 1 1 960508 zatom,zion,pspdat 3 | 2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well 4 | 0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4 5 | 0 0 0 rs, h1s, h2s 6 | 0 0 rp, h1p 7 | 1.36 .2 0.0 rcutoff, rloc 8 | 9 | -------------------------------------------------------------------------------- /abinit/abinit_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load ABINIT 11 | module list 12 | # 13 | echo "ABINIT_TINKERCLIFFS ROME: Normal beginning of execution." 14 | # 15 | abinit < tbase1_x.files > abinit_tinkercliffs_rome.txt 16 | if [ $? -ne 0 ]; then 17 | echo "ABINIT_TINKERCLIFFS ROME: Run error!" 18 | exit 1 19 | fi 20 | # 21 | echo "ABINIT_TINKERCLIFFS ROME: Normal end of execution." 22 | exit 0 23 | -------------------------------------------------------------------------------- /abinit/tbase1_x.files: -------------------------------------------------------------------------------- 1 | tbase1_1.in 2 | tbase1_x.out 3 | tbase1_xi 4 | tbase1_xo 5 | tbase1_x 6 | 01h.pspgth 7 | -------------------------------------------------------------------------------- /alphafold2/Melanogaster_GR28BD_tetramer.fasta: -------------------------------------------------------------------------------- 1 | >sp|Q9VM08|GR28B_DROME Putative gustatory receptor 28b OS=Drosophila melanogaster GN=Gr28b 2 | MSFYFCEIFKPRDAFGAEQTLLLYTYLLGLTPFRLRGQAGERQFHLSKIGYLNAFLQLSF 3 | FSYCFLAALIEQQSIVGYFFKSEISQMGDSLQKFIGMTGMSILFLCSSIRVRLLIHIWDR 4 | ISYIDDRFLNLGVCFNYPAIMRLRLLQIFLINGVQLGYLISSNWMLLGNDVRPIYTAIVA 5 | FYVPQIFLLSIVMLFNATLHRLWQHFTVLNQVLKNLAHQWDTRSLKAVNQKQRSLQCLDS 6 | FSMYTIVTKDPAEIIQESMEIHHLICEAAATANKYFTYQLLTIISIAFLIIVFDAYYVLE 7 | TLLGKSKRESKFKTVEFVTFFSCQMILYLIAIISIVEGSNRAIKKSEKTGGIVHSLLNKT 8 | KSAEVKEKLQQFSMQLMHLKINFTAAGLFNIDRTLYFTISGALTTYLIILLQFTSNSPNN 9 | GYGNGSSCCETFNNMTNHTL 10 | >sp|Q9VM08|GR28B_DROME Putative gustatory receptor 28b OS=Drosophila melanogaster GN=Gr28b 11 | MSFYFCEIFKPRDAFGAEQTLLLYTYLLGLTPFRLRGQAGERQFHLSKIGYLNAFLQLSF 12 | FSYCFLAALIEQQSIVGYFFKSEISQMGDSLQKFIGMTGMSILFLCSSIRVRLLIHIWDR 13 | ISYIDDRFLNLGVCFNYPAIMRLRLLQIFLINGVQLGYLISSNWMLLGNDVRPIYTAIVA 14 | FYVPQIFLLSIVMLFNATLHRLWQHFTVLNQVLKNLAHQWDTRSLKAVNQKQRSLQCLDS 15 | FSMYTIVTKDPAEIIQESMEIHHLICEAAATANKYFTYQLLTIISIAFLIIVFDAYYVLE 16 | TLLGKSKRESKFKTVEFVTFFSCQMILYLIAIISIVEGSNRAIKKSEKTGGIVHSLLNKT 17 | KSAEVKEKLQQFSMQLMHLKINFTAAGLFNIDRTLYFTISGALTTYLIILLQFTSNSPNN 18 | GYGNGSSCCETFNNMTNHTL 19 | >sp|Q9VM08|GR28B_DROME Putative gustatory receptor 28b OS=Drosophila melanogaster GN=Gr28b 20 | MSFYFCEIFKPRDAFGAEQTLLLYTYLLGLTPFRLRGQAGERQFHLSKIGYLNAFLQLSF 21 | FSYCFLAALIEQQSIVGYFFKSEISQMGDSLQKFIGMTGMSILFLCSSIRVRLLIHIWDR 22 | ISYIDDRFLNLGVCFNYPAIMRLRLLQIFLINGVQLGYLISSNWMLLGNDVRPIYTAIVA 23 | FYVPQIFLLSIVMLFNATLHRLWQHFTVLNQVLKNLAHQWDTRSLKAVNQKQRSLQCLDS 24 | FSMYTIVTKDPAEIIQESMEIHHLICEAAATANKYFTYQLLTIISIAFLIIVFDAYYVLE 25 | TLLGKSKRESKFKTVEFVTFFSCQMILYLIAIISIVEGSNRAIKKSEKTGGIVHSLLNKT 26 | KSAEVKEKLQQFSMQLMHLKINFTAAGLFNIDRTLYFTISGALTTYLIILLQFTSNSPNN 27 | GYGNGSSCCETFNNMTNHTL 28 | >sp|Q9VM08|GR28B_DROME Putative gustatory receptor 28b OS=Drosophila melanogaster GN=Gr28b 29 | MSFYFCEIFKPRDAFGAEQTLLLYTYLLGLTPFRLRGQAGERQFHLSKIGYLNAFLQLSF 30 | FSYCFLAALIEQQSIVGYFFKSEISQMGDSLQKFIGMTGMSILFLCSSIRVRLLIHIWDR 31 | ISYIDDRFLNLGVCFNYPAIMRLRLLQIFLINGVQLGYLISSNWMLLGNDVRPIYTAIVA 32 | FYVPQIFLLSIVMLFNATLHRLWQHFTVLNQVLKNLAHQWDTRSLKAVNQKQRSLQCLDS 33 | FSMYTIVTKDPAEIIQESMEIHHLICEAAATANKYFTYQLLTIISIAFLIIVFDAYYVLE 34 | TLLGKSKRESKFKTVEFVTFFSCQMILYLIAIISIVEGSNRAIKKSEKTGGIVHSLLNKT 35 | KSAEVKEKLQQFSMQLMHLKINFTAAGLFNIDRTLYFTISGALTTYLIILLQFTSNSPNN 36 | GYGNGSSCCETFNNMTNHTL 37 | -------------------------------------------------------------------------------- /alphafold2/run_tinkercliffs_A100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account= 3 | #SBATCH --partition=a100_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --ntasks-per-node=16 7 | #SBATCH --cpus-per-task=1 8 | #SBATCH --time=1-00:00:00 9 | 10 | echo "AlphaFold2 job launched on `hostname`" 11 | 12 | module reset 13 | module load AlphaFold/2.3.2-foss-2023a-CUDA-12.1.1 14 | 15 | cd $SLURM_SUBMIT_DIR 16 | echo "Working directory is `pwd`" 17 | 18 | INPUTFASTA="Melanogaster_GR28BD_tetramer.fasta" 19 | 20 | echo "Checking for existence of input fasta file: $INPUTFASTA" 21 | if [[ -a "./$INPUTFASTA" ]] 22 | then 23 | echo "Input fasta found, continuing... " 24 | else 25 | echo "Fasta file not found in current directory, exiting ..." 26 | exit 1; 27 | fi 28 | 29 | mkdir -p ./output 30 | 31 | gpumon () 32 | { 33 | nvidia-smi --query-gpu=timestamp,pci.bus_id,temperature.gpu,utilization.gpu,utilization.memory,memory.used --format=csv -lms 100 | grep --color=auto -v " 0 %, 0 %" 34 | } 35 | 36 | echo "Logging gpu utilization in the background to $SLURM_JOBID.gpu.log" 37 | gpumon > $SLURM_JOBID.gpu.log & 38 | 39 | export ALPHAFOLD_HHBLITS_N_CPU=$SLURM_NTASKS 40 | export ALPHAFOLD_JACKHMMER_N_CPU=$SLURM_NTASKS 41 | echo "HHBLITS CPUS=$ALPHAFOLD_HHBLITS_N_CPU and HMMR CPUS=$ALPHAFOLD_JACKHMMER_N_CPU" 42 | 43 | TMPDIR=$TMPNVME 44 | echo "TMPDIR set to $TMPDIR" 45 | alphafold --model_preset=multimer \ 46 | --test_tmpdir=$TMPNVME \ 47 | --fasta_paths=./$INPUTFASTA \ 48 | --output_dir=./output \ 49 | --max_template_date=3000-01-01 50 | -------------------------------------------------------------------------------- /alphafold3/fold_input.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "2PV7", 3 | "sequences": [ 4 | { 5 | "protein": { 6 | "id": ["A", "B"], 7 | "sequence": "GMRESYANENQFGFKTINSDIHKIVIVGGYGKLGGLFARYLRASGYPISILDREDWAVAESILANADVVIVSVPINLTLETIERLKPYLTENMLLADLTSVKREPLAKMLEVHTGAVLGLHPMFGADIASMAKQVVVRCDGRFPERYEWLLEQIQIWGAKIYQTNATEHDHNMTYIQALRHFSTFANGLHLSKQPINLANLLALSSPIYRLELAMIGRLFAQDAELYADIIMDKSENLAVIETLKQTYDEALTFFENNDRQGFIDAFHKVRDWFGDYSEQFLKESRQLLQQANDLKQG" 8 | } 9 | } 10 | ], 11 | "modelSeeds": [1], 12 | "dialect": "alphafold3", 13 | "version": 1 14 | } -------------------------------------------------------------------------------- /alphafold3/run_tinkercliffs_A100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account= 3 | #SBATCH --partition=a100_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --ntasks-per-node=16 7 | #SBATCH --cpus-per-task=1 8 | #SBATCH --time=1-00:00:00 9 | 10 | echo "AlphaFold3 job launched on `hostname`" 11 | 12 | module reset 13 | module load AlphaFold/3.0.1 14 | 15 | ## Copy example input JSON file to the expected location 16 | cp fold_input.json $HOME/AlphaFold3/af_input/fold_input.json 17 | 18 | alphafold3 19 | 20 | echo "Results available at $HOME/AlphaFold3/af_input" -------------------------------------------------------------------------------- /ansys/example.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=ansys-fluent-example # Job name 7 | #SBATCH --account=personal # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=job_output_%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=job_error_%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks (processes) per node 17 | #SBATCH --cpus-per-task=8 # Number of CPUs per task (threading) 18 | #SBATCH --mem=64G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | # Description: SLURM job script to run ANSYS Fluent using a journal file 22 | # Usage: sbatch example.slurm 23 | 24 | ######################################################################################### 25 | 26 | # Reset module system and load ANSYS 27 | module reset 28 | module load ANSYS 29 | 30 | # Echo job info 31 | echo "Running ANSYS Fluent with $SLURM_CPUS_PER_TASK CPUs" 32 | 33 | # Run Fluent in batch mode with the specified journal file 34 | fluent 2d -g -t$SLURM_CPUS_PER_TASK -i run-injet.jou 35 | -------------------------------------------------------------------------------- /ansys/inlet.cas: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/ansys/inlet.cas -------------------------------------------------------------------------------- /ansys/inlet.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/ansys/inlet.dat -------------------------------------------------------------------------------- /ansys/run-injet.jou: -------------------------------------------------------------------------------- 1 | /file/read-case inlet.cas 2 | /file/read-data inlet.dat 3 | /solve/iterate 100 4 | /file/write-case-data results_inlet 5 | /exit 6 | yes 7 | -------------------------------------------------------------------------------- /ansys/tc-fluent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## tc-fluent.sh - demo Ansys Fluent batch job for Tinkercliffs 3 | ## Usage: set input file below, then "sbatch tc-fluent.sh" 4 | #SBATCH --time=02:00:00 5 | #SBATCH --nodes=1 6 | #SBATCH --ntasks-per-node=128 7 | #SBATCH --cpus-per-task=1 8 | #SBATCH --partition=normal_q 9 | #SBATCH --account= 10 | 11 | 12 | module reset 13 | module load ANSYS/22.1 14 | 15 | cd $SLURM_SUBMIT_DIR 16 | 17 | NODEFILE="$(pwd)/slurmhosts.$SLURM_JOB_ID.txt" 18 | srun hostname -s &> $NODEFILE 19 | 20 | ansysFile="input.jou" 21 | 22 | fluent 3ddp -g -t $SLURM_NTASKS -cnf=$NODEFILE -mpi=intel -i $ansysFile > out_wNodeList.txt 23 | 24 | echo "Normal end of execution." -------------------------------------------------------------------------------- /apptainer/1.4.0/H2O_GW100_def2-QZVP.inp: -------------------------------------------------------------------------------- 1 | &FORCE_EVAL 2 | METHOD Quickstep 3 | &DFT 4 | BASIS_SET_FILE_NAME BASIS_def2_QZVP_RI_ALL 5 | POTENTIAL_FILE_NAME POTENTIAL 6 | &MGRID 7 | CUTOFF 400 8 | REL_CUTOFF 50 9 | &END MGRID 10 | &QS 11 | ! all electron calculation since GW100 is all-electron test 12 | METHOD GAPW 13 | &END QS 14 | &POISSON 15 | PERIODIC NONE 16 | PSOLVER MT 17 | &END 18 | &SCF 19 | EPS_SCF 1.0E-6 20 | SCF_GUESS ATOMIC 21 | MAX_SCF 200 22 | &END SCF 23 | &XC 24 | &XC_FUNCTIONAL PBE 25 | &END XC_FUNCTIONAL 26 | ! GW is part of the WF_CORRELATION section 27 | &WF_CORRELATION 28 | &RI_RPA 29 | ! use 100 points to perform the frequency integration in GW 30 | QUADRATURE_POINTS 100 31 | &GW 32 | ! compute the G0W0@PBE energy of HOMO-9, HOMO-8, ... , HOMO-1, HOMO 33 | CORR_OCC 10 34 | ! compute the G0W0@PBE energy of LUMO, LUMO+1, ... , LUMO+20 35 | CORR_VIRT 20 36 | &END GW 37 | &END RI_RPA 38 | &END 39 | &END XC 40 | &END DFT 41 | &SUBSYS 42 | &CELL 43 | ABC 10.0 10.0 10.0 44 | PERIODIC NONE 45 | &END CELL 46 | &COORD 47 | O 0.0000 0.0000 0.0000 48 | H 0.7571 0.0000 0.5861 49 | H -0.7571 0.0000 0.5861 50 | &END COORD 51 | &TOPOLOGY 52 | &CENTER_COORDINATES 53 | &END 54 | &END TOPOLOGY 55 | &KIND H 56 | ! def2-QZVP is the basis which has been used in the GW100 paper 57 | BASIS_SET def2-QZVP 58 | ! just use a very large RI basis to ensure excellent convergence with respect to the RI basis 59 | BASIS_SET RI_AUX RI-5Z 60 | POTENTIAL ALL 61 | &END KIND 62 | &KIND O 63 | BASIS_SET def2-QZVP 64 | BASIS_SET RI_AUX RI-5Z 65 | POTENTIAL ALL 66 | &END KIND 67 | &END SUBSYS 68 | &END FORCE_EVAL 69 | &GLOBAL 70 | RUN_TYPE ENERGY 71 | PROJECT ALL_ELEC 72 | PRINT_LEVEL MEDIUM 73 | PREFERRED_DIAG_LIBRARY ScaLAPACK 74 | &END GLOBAL 75 | 76 | -------------------------------------------------------------------------------- /apptainer/1.4.0/README.md: -------------------------------------------------------------------------------- 1 | # Apptainer 2 | 3 | This example uses a container with the CP2K software and runs a simple case from https://github.com/cp2k/cp2k-examples/blob/master/gw/1_H2O_GW100/H2O_GW100_def2-QZVP.inp. 4 | -------------------------------------------------------------------------------- /apptainer/1.4.0/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --partition=normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --output=output.log 8 | #SBATCH --time=0-00:30:00 9 | 10 | module load apptainer 11 | CTNR=/common/containers/cp2k_latest.sif 12 | apptainer run $CTNR cp2k -i H2O_GW100_def2-QZVP.inp 13 | -------------------------------------------------------------------------------- /apptainer/README.md: -------------------------------------------------------------------------------- 1 | # Apptainer 2 | 3 | Apptainer (formerly known as singularity) is a containerization software that works well on HPC systems. 4 | 5 | Image files can be pulled from various container repositories including docker: 6 | ``` 7 | apptainer pull docker://cp2k/cp2k:2025.1_openmpi_generic_psmp 8 | ``` 9 | 10 | Please see the apptainer documentation for more details: https://apptainer.org/docs/user/main/ 11 | -------------------------------------------------------------------------------- /cellranger/9.0.1/README.md: -------------------------------------------------------------------------------- 1 | # CellRanger 2 | 3 | This example follows the 10XGenomics tutorial for a simple gene expression analysis: https://www.10xgenomics.com/support/cn/software/cell-ranger/8.0/tutorials/cr-tutorial-ct 4 | 5 | ![run_count_1kpbmcs](run_count_1kpbmcs.png) -------------------------------------------------------------------------------- /cellranger/9.0.1/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --partition=normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --output=output.log 8 | #SBATCH --time=0-00:30:00 9 | 10 | module load CellRanger 11 | cellranger count --id=run_count_1kpbmcs \ 12 | --fastqs=/common/data/cellranger/fastqs/pbmc_1k_v3_fastqs \ 13 | --sample=pbmc_1k_v3 \ 14 | --transcriptome=/common/data/cellranger/references/refdata-gex-GRCh38-2024-A \ 15 | --create-bam false 16 | -------------------------------------------------------------------------------- /cellranger/9.0.1/run_count_1kpbmcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/cellranger/9.0.1/run_count_1kpbmcs.png -------------------------------------------------------------------------------- /cellranger/README.md: -------------------------------------------------------------------------------- 1 | # CellRanger 2 | 3 | CellRanger is a free software by 10X Genomics for processing single cell sequencing data (e.g. gene expression analysis). The results can be transfered and viewed on a personal workstation. 4 | 5 | For convenience, sample datasets and reference genomes are provided in /common/data/cellranger. -------------------------------------------------------------------------------- /cuda_bandwidth/cuda_bandwidth_infer_t4.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=32 --gres=gpu:1 5 | #SBATCH -p t4_dev_q 6 | # 7 | 8 | #Load CUDA module 9 | module reset 10 | module load CUDA/11.1.1-GCC-10.2.0 11 | # 12 | 13 | #Run CUDA's bandwidth test 14 | $EBROOTCUDA/extras/demo_suite/bandwidthTest --memory=pinned --mode=quick --dtoh 15 | 16 | -------------------------------------------------------------------------------- /cuda_bandwidth/cuda_bandwidth_tinkercliffs_a100.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 --gres=gpu:1 5 | #SBATCH -p a100_dev_q 6 | # 7 | 8 | #Load CUDA module 9 | module reset 10 | module load cuda11.2/toolkit 11 | module load CUDA/11.1.1-GCC-10.2.0 12 | # 13 | 14 | #Run CUDA's bandwidth test 15 | $EBROOTCUDA/extras/demo_suite/bandwidthTest --memory=pinned --mode=quick --dtoh 16 | 17 | -------------------------------------------------------------------------------- /cuquantum/cuquantum_22.07_tinkercliffs_a100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=cuquantum 3 | #SBATCH --partition=a100_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --cpus-per-task=8 7 | #SBATCH --account= 8 | ## This requests 1 node from the a100_normal_q partition, 1 gpu on that node, and 8 cores which provides 256GB memory 9 | 10 | module load containers/apptainer 11 | 12 | #Ensure shell variable USER is set correctly and that /globalscratch directory is set up. 13 | USER=`whoami` 14 | 15 | #Set which container sif to use 16 | # This container was created on 8/23/2022 from Nvidia's docker registry using the following command: 17 | # "apptainer pull --dir /localscratch/brownm12/ cuquantum-appliance_22.07-cirq.sif docker://nvcr.io/nvidia/cuquantum-appliance_22.07-cirq" 18 | CTNR=/global/arcsingularity/cuquantum-appliance_22.07-cirq.sif 19 | 20 | #Set bind options to map directories into the container 21 | BOPTS="--bind /home/$USER,/projects" 22 | [[ -d /globalscratch/$USER ]] && BOPTS="$BOPTS,/globalscratch/$USER" 23 | 24 | cd $SLURM_SUBMIT_DIR 25 | 26 | # Run the three examples included with cuQuantum and sends output to files in the submission directory 27 | # "exec" runs the requested commands inside the container and then exits 28 | # "--nv" turns on singularity's nvidia GPU support which maps libraries and variables into the container 29 | # BOPTS and CTNR are expanded to values set above 30 | # /workspace/examples is a directory inside the container supplied by Nvidia 31 | 32 | echo "Running three examples in the Nvidia cuQuantum container" 33 | echo "jobid: $SLURM_JOBID, working directory: `pwd`" 34 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/ghz.py --nqubits 20 --nsamples 10000 --ngpus 1 > ghz_out.$SLURM_JOBID.txt 35 | echo "Ran GHZ example, output written to ghz_out.$SLURM_JOBID.txt" 36 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/hidden_shift.py --nqubits 20 --nsamples 100000 --ngpus 1 > hidden_shift_out.$SLURM_JOBID.txt 37 | echo "Ran Hidden-Shift example, output written to hidden_shift_out.$SLURM_JOBID.txt" 38 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/simon.py --nbits 15 --ngpus 1 > simon_out.$SLURM_JOBID.txt 39 | echo "Ran Simon example, output written to simon_out.$SLURM_JOBID.txt" 40 | -------------------------------------------------------------------------------- /cuquantum/cuquantum_22.07_tinkercliffs_dgx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=cuquantum 3 | #SBATCH --partition=dgx_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --cpus-per-task=8 7 | #SBATCH --account= 8 | ## This requests 1 node from the dgx_normal_q partition, 1 gpu on that node, and 8 cores which provides 256GB memory 9 | 10 | module load containers/apptainer 11 | 12 | #Ensure shell variable USER is set correctly and that /globalscratch directory is set up. 13 | USER=`whoami` 14 | 15 | #Set which container sif to use 16 | # This container was created on 8/23/2022 from Nvidia's docker registry using the following command: 17 | # "singularity pull --dir /localscratch/brownm12/ cuquantum-appliance_22.07-cirq.sif docker://nvcr.io/nvidia/cuquantum-appliance_22.07-cirq" 18 | CTNR=/global/arcsingularity/cuquantum-appliance_22.07-cirq.sif 19 | 20 | #Set bind options to map directories into the container 21 | BOPTS="--bind /home/$USER,/projects" 22 | [[ -d /globalscratch/$USER ]] && BOPTS="$BOPTS,/globalscratch/$USER" 23 | 24 | cd $SLURM_SUBMIT_DIR 25 | 26 | # Run the three examples included with cuQuantum and sends output to files in the submission directory 27 | # "exec" runs the requested commands inside the container and then exits 28 | # "--nv" turns on singularity's nvidia GPU support which maps libraries and variables into the container 29 | # BOPTS and CTNR are expanded to values set above 30 | # /workspace/examples is a directory inside the container supplied by Nvidia 31 | 32 | echo "Running three examples in the Nvidia cuQuantum container" 33 | echo "jobid: $SLURM_JOBID, working directory: `pwd`" 34 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/ghz.py --nqubits 20 --nsamples 10000 --ngpus 1 > ghz_out.$SLURM_JOBID.txt 35 | echo "Ran GHZ example, output written to ghz_out.$SLURM_JOBID.txt" 36 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/hidden_shift.py --nqubits 20 --nsamples 100000 --ngpus 1 > hidden_shift_out.$SLURM_JOBID.txt 37 | echo "Ran Hidden-Shift example, output written to hidden_shift_out.$SLURM_JOBID.txt" 38 | apptainer exec --nv $BOPTS $CTNR python /workspace/examples/simon.py --nbits 15 --ngpus 1 > simon_out.$SLURM_JOBID.txt 39 | echo "Ran Simon example, output written to simon_out.$SLURM_JOBID.txt" 40 | -------------------------------------------------------------------------------- /dalton/H2O_cc-pVDZ_nosym.mol: -------------------------------------------------------------------------------- 1 | BASIS 2 | cc-pVDZ 3 | H2O 4 | 5 | 2 0 6 | 8. 1 7 | O 0.0 0.0000000000 0.0 8 | 1. 2 9 | H1 1.430 0.0 1.1 10 | H2 -1.430 0.0 1.1 11 | -------------------------------------------------------------------------------- /dalton/cc2dc_energy.dal: -------------------------------------------------------------------------------- 1 | **DALTON INPUT 2 | .RUN WAVEFUNCTION 3 | **INTEGRALS 4 | .DIPLEN 5 | .NUCPOT 6 | .NELFLD 7 | .THETA 8 | .SECMOM 9 | *ONEINT 10 | .SOLVENT 11 | 10 12 | **WAVE FUNCTIONS 13 | .CC 14 | *SCF INPUT 15 | .THRESH 16 | 1.0D-11 17 | *CC INP 18 | .CC2 19 | .THRLEQ 20 | 1.0D-9 21 | .THRENR 22 | 1.0D-9 23 | .MAX IT 24 | 90 25 | .MXLRV 26 | 180 27 | *CCSLV 28 | .SOLVAT 29 | 1 30 | 10 4.00 78.54 1.778 31 | .ETOLSL 32 | 1.0D-7 33 | .TTOLSL 34 | 1.0D-7 35 | .LTOLSL 36 | 1.0D-7 37 | .MXSLIT 38 | 200 39 | *CCFOP 40 | .DIPMOM 41 | .QUADRU 42 | .SECMOM 43 | .NONREL 44 | **END OF DALTON INPUT 45 | -------------------------------------------------------------------------------- /dalton/cc2dc_energy.mol: -------------------------------------------------------------------------------- 1 | BASIS 2 | cc-pVDZ 3 | H2O i H2O(DC) 4 | ------------------------ 5 | 2 0 1 1.00D-12 6 | 8.0 1 7 | O 0.000000 0.000000 0.000000 8 | 1.0 2 9 | H -0.756799 0.000000 0.586007 10 | H 0.756799 0.000000 0.586007 11 | -------------------------------------------------------------------------------- /dalton/dalton_nompi_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH --nodes=1 4 | #SBATCH --ntasks-per-node=4 5 | #SBATCH --time=00:30:00 6 | 7 | #load modules 8 | module reset 9 | module load Dalton/2020-iomkl-2019b-nompi 10 | 11 | # Setting the variables: 12 | daltoninp=cc2dc_energy.dal 13 | daltonmol=cc2dc_energy.mol 14 | 15 | #Use local scratch for temporary directory 16 | export DALTON_TMPDIR=$TMPDIR 17 | 18 | echo "$(date): Starting run" 19 | 20 | echo "Running the example: INPUT=${daltoninp} - Molecule=${daltonmol}" 21 | 22 | dalton -omp ${SLURM_NTASKS} -dal ${daltoninp} -mol ${daltonmol} 23 | 24 | echo "$(date): Program finished with exit code $?" 25 | -------------------------------------------------------------------------------- /dalton/dalton_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH --nodes=1 4 | #SBATCH --ntasks-per-node=4 5 | #SBATCH --time=00:30:00 6 | 7 | #load modules 8 | module reset 9 | module load Dalton 10 | 11 | # Setting the variables: 12 | daltoninp=dft_rspexci_nosym.dal 13 | daltonmol=H2O_cc-pVDZ_nosym.mol 14 | 15 | #Use local scratch for temporary directory 16 | export DALTON_TMPDIR=$TMPDIR 17 | 18 | echo "$(date): Starting run" 19 | 20 | echo "Running the example: INPUT=${daltoninp} - Molecule=${daltonmol}" 21 | 22 | dalton -N ${SLURM_NTASKS} -dal ${daltoninp} -mol ${daltonmol} 23 | 24 | echo "$(date): Program finished with exit code $?" 25 | -------------------------------------------------------------------------------- /dalton/dft_rspexci_nosym.dal: -------------------------------------------------------------------------------- 1 | **DALTON INPUT 2 | .RUN RESPONSE 3 | **INTEGRALS 4 | .PROPRINT 5 | **WAVE FUNCTIONS 6 | .DFT 7 | B3LYP 8 | **RESPONSE 9 | *LINEAR 10 | .SINGLE RESIDUE 11 | .ROOTS 12 | 3 13 | **END OF DALTON INPUT 14 | -------------------------------------------------------------------------------- /espresso/README: -------------------------------------------------------------------------------- 1 | The example here is taken from example01 from the Quantum Espresso source tarball. 2 | Note that we set Espresso's TMP_DIR variable to $TMPDIR, which on ARC systems 3 | points to the local hard drive. One might also try $TMPFS (memory) for a faster 4 | calculation, depending on how much temporary storage is required (note that putting 5 | TMP_DIR in memory will eat up memory assigned to the job). 6 | -------------------------------------------------------------------------------- /espresso/espresso_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=4 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load QuantumESPRESSO 11 | # 12 | echo "ESPRESSO_TINKERCLIFFS ROME: Normal beginning of execution." 13 | # 14 | ./run_example 15 | if [ $? -ne 0 ]; then 16 | echo "ESPRESSO_TINKERCLIFFS ROME: Run error!" 17 | exit 1 18 | fi 19 | # 20 | echo "ESPRESSO_TINKERCLIFFS ROME: Normal end of execution." 21 | exit 0 22 | 23 | -------------------------------------------------------------------------------- /gaussian/g09_input.txt: -------------------------------------------------------------------------------- 1 | %nproc=4 2 | #P TEST STO-3G COMPLEX pop=full scf=conventional 3 | 4 | Gaussian Test Job 01 5 | SINGLET DELTA STO-3G//STO-3G DIOXYGEN 6 | 7 | 0 1 8 | O 9 | O 1 R 10 | 11 | R 1.220 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /gaussian/gaussian_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=8 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load gaussian 11 | module list 12 | 13 | # Set Gaussian's scratch directory to local disk 14 | # Could also try tmpfs ($TMPFS) to provide fastest performance 15 | # But will consume memory 16 | export GAUSS_SCRDIR=$TMPFS 17 | # 18 | echo "GAUSSIAN_TINKERCLIFFS ROME: Normal beginning of execution." 19 | # 20 | # Run. 21 | # 22 | g09 < g09_input.txt > gaussian_tinkercliffs_rome.txt 23 | if [ $? -ne 0 ]; then 24 | echo "GAUSSIAN_TINKERCLIFFS ROME: Run error!" 25 | exit 1 26 | fi 27 | # 28 | echo "GAUSSIAN_TINKERCLIFFS ROME: Normal end of execution." 29 | exit 0 30 | -------------------------------------------------------------------------------- /gmt/gmt_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load GMT 11 | module list 12 | # 13 | echo "GMT_TINKERCLIFFS ROME: Normal beginning of execution." 14 | # 15 | gmt pscoast -R-130/-70/24/52 -JB-100/35/33/45/6i -Ba -B+t"Conic Projection" -N1/thickest -N2/thinnest -A500 -Ggray -Wthinnest -P > GMT_tut_4.ps 16 | if [ $? -ne 0 ]; then 17 | echo "GMT_TINKERCLIFFS ROME: Run error!" 18 | exit 1 19 | fi 20 | # 21 | gmt pscoast -Rg -JG280/30/6i -Bag -Dc -A5000 -Gwhite -SDarkTurquoise -P > GMT_tut_5.ps 22 | if [ $? -ne 0 ]; then 23 | echo "GMT_TINKERCLIFFS ROME: Run error!" 24 | exit 1 25 | fi 26 | # 27 | ls *.ps 28 | # 29 | echo "GMT_TINKERCLIFFS ROME: Normal end of execution." 30 | exit 0 31 | -------------------------------------------------------------------------------- /go/README.md: -------------------------------------------------------------------------------- 1 | # Golang 2 | 3 | This example is a basic demonstration of using go to run a standalone main program. 4 | 5 | For more details on the go language, please see the documentation: https://go.dev/ -------------------------------------------------------------------------------- /go/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --partition=normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks-per-node=1 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --output=output.log 8 | #SBATCH --time=0-00:30:00 9 | 10 | module load Go/1.23.6 11 | go run main.go -------------------------------------------------------------------------------- /go/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | println("Hello, World!") 5 | } -------------------------------------------------------------------------------- /gromacs/README: -------------------------------------------------------------------------------- 1 | This tutorial is based on the Lysozyme in Water example from Justin Lemkul in the VT Department of Biochemistry. See http://www.mdtutorials.com for a detailed explanation of each step of the example. 2 | 3 | The new 2024 version supports PLUMED 2.9.2 integration which supports umbrella sampling, metadynamics, steered MD, path collective variable, etc. 4 | 5 | See `gromacs_tinkercliffs_owl_cpu.sh` for a Slurm example script that can run on the CPU nodes of the TinkerCliffs and Owl clusters. 6 | 7 | See `gromacs_falcon_gpu.sh` for a Slurm example script that can run on the GPU nodes of the Falcon cluster. -------------------------------------------------------------------------------- /gromacs/energy_input1.txt: -------------------------------------------------------------------------------- 1 | 10 0 2 | 3 | -------------------------------------------------------------------------------- /gromacs/energy_input2.txt: -------------------------------------------------------------------------------- 1 | 15 0 2 | 3 | -------------------------------------------------------------------------------- /gromacs/energy_input3.txt: -------------------------------------------------------------------------------- 1 | 16 0 2 | 3 | -------------------------------------------------------------------------------- /gromacs/energy_input4.txt: -------------------------------------------------------------------------------- 1 | 22 0 2 | 3 | -------------------------------------------------------------------------------- /gromacs/genion_input.txt: -------------------------------------------------------------------------------- 1 | 13 2 | 3 | -------------------------------------------------------------------------------- /gromacs/gyrate_input.txt: -------------------------------------------------------------------------------- 1 | 4 2 | 3 | -------------------------------------------------------------------------------- /gromacs/ions.mdp: -------------------------------------------------------------------------------- 1 | ; ions.mdp - used as input into grompp to generate ions.tpr 2 | ; Parameters describing what to do, when to stop and what to save 3 | integrator = steep ; Algorithm (steep = steepest descent minimization) 4 | emtol = 1000.0 ; Stop minimization when the maximum force < 1000.0 kJ/mol/nm 5 | emstep = 0.01 ; Energy step size 6 | nsteps = 50000 ; Maximum number of (minimization) steps to perform 7 | 8 | ; Parameters describing how to find the neighbors of each atom and how to calculate the interactions 9 | nstlist = 1 ; Frequency to update the neighbor list and long range forces 10 | cutoff-scheme = Verlet 11 | ns_type = grid ; Method to determine neighbor list (simple, grid) 12 | coulombtype = PME ; Treatment of long range electrostatic interactions 13 | rcoulomb = 1.0 ; Short-range electrostatic cut-off 14 | rvdw = 1.0 ; Short-range Van der Waals cut-off 15 | pbc = xyz ; Periodic Boundary Conditions (yes/no) 16 | -------------------------------------------------------------------------------- /gromacs/minim.mdp: -------------------------------------------------------------------------------- 1 | ; minim.mdp - used as input into grompp to generate em.tpr 2 | integrator = steep ; Algorithm (steep = steepest descent minimization) 3 | emtol = 1000.0 ; Stop minimization when the maximum force < 1000.0 kJ/mol/nm 4 | emstep = 0.01 ; Energy step size 5 | nsteps = 50000 ; Maximum number of (minimization) steps to perform 6 | 7 | ; Parameters describing how to find the neighbors of each atom and how to calculate the interactions 8 | nstlist = 1 ; Frequency to update the neighbor list and long range forces 9 | cutoff-scheme = Verlet 10 | ns_type = grid ; Method to determine neighbor list (simple, grid) 11 | coulombtype = PME ; Treatment of long range electrostatic interactions 12 | rcoulomb = 1.0 ; Short-range electrostatic cut-off 13 | rvdw = 1.0 ; Short-range Van der Waals cut-off 14 | pbc = xyz ; Periodic Boundary Conditions (yes/no) 15 | -------------------------------------------------------------------------------- /gromacs/npt.mdp: -------------------------------------------------------------------------------- 1 | title = OPLS Lysozyme NPT equilibration 2 | define = -DPOSRES ; position restrain the protein 3 | ; Run parameters 4 | integrator = md ; leap-frog integrator 5 | nsteps = 50000 ; 2 * 50000 = 100 ps 6 | dt = 0.002 ; 2 fs 7 | ; Output control 8 | nstxout = 500 ; save coordinates every 1.0 ps 9 | nstvout = 500 ; save velocities every 1.0 ps 10 | nstenergy = 500 ; save energies every 1.0 ps 11 | nstlog = 500 ; update log file every 1.0 ps 12 | ; Bond parameters 13 | continuation = yes ; Restarting after NVT 14 | constraint_algorithm = lincs ; holonomic constraints 15 | constraints = all-bonds ; all bonds (even heavy atom-H bonds) constrained 16 | lincs_iter = 1 ; accuracy of LINCS 17 | lincs_order = 4 ; also related to accuracy 18 | ; Neighborsearching 19 | cutoff-scheme = Verlet 20 | ns_type = grid ; search neighboring grid cells 21 | nstlist = 10 ; 20 fs, largely irrelevant with Verlet scheme 22 | rcoulomb = 1.0 ; short-range electrostatic cutoff (in nm) 23 | rvdw = 1.0 ; short-range van der Waals cutoff (in nm) 24 | ; Electrostatics 25 | coulombtype = PME ; Particle Mesh Ewald for long-range electrostatics 26 | pme_order = 4 ; cubic interpolation 27 | fourierspacing = 0.16 ; grid spacing for FFT 28 | ; Temperature coupling is on 29 | tcoupl = V-rescale ; modified Berendsen thermostat 30 | tc-grps = Protein Non-Protein ; two coupling groups - more accurate 31 | tau_t = 0.1 0.1 ; time constant, in ps 32 | ref_t = 300 300 ; reference temperature, one for each group, in K 33 | ; Pressure coupling is on 34 | pcoupl = Parrinello-Rahman ; Pressure coupling on in NPT 35 | pcoupltype = isotropic ; uniform scaling of box vectors 36 | tau_p = 2.0 ; time constant, in ps 37 | ref_p = 1.0 ; reference pressure, in bar 38 | compressibility = 4.5e-5 ; isothermal compressibility of water, bar^-1 39 | refcoord_scaling = com 40 | ; Periodic boundary conditions 41 | pbc = xyz ; 3-D PBC 42 | ; Dispersion correction 43 | DispCorr = EnerPres ; account for cut-off vdW scheme 44 | ; Velocity generation 45 | gen_vel = no ; Velocity generation is off 46 | -------------------------------------------------------------------------------- /gromacs/nvt.mdp: -------------------------------------------------------------------------------- 1 | title = OPLS Lysozyme NVT equilibration 2 | define = -DPOSRES ; position restrain the protein 3 | ; Run parameters 4 | integrator = md ; leap-frog integrator 5 | nsteps = 50000 ; 2 * 50000 = 100 ps 6 | dt = 0.002 ; 2 fs 7 | ; Output control 8 | nstxout = 500 ; save coordinates every 1.0 ps 9 | nstvout = 500 ; save velocities every 1.0 ps 10 | nstenergy = 500 ; save energies every 1.0 ps 11 | nstlog = 500 ; update log file every 1.0 ps 12 | ; Bond parameters 13 | continuation = no ; first dynamics run 14 | constraint_algorithm = lincs ; holonomic constraints 15 | constraints = all-bonds ; all bonds (even heavy atom-H bonds) constrained 16 | lincs_iter = 1 ; accuracy of LINCS 17 | lincs_order = 4 ; also related to accuracy 18 | ; Neighborsearching 19 | cutoff-scheme = Verlet 20 | ns_type = grid ; search neighboring grid cells 21 | nstlist = 10 ; 20 fs, largely irrelevant with Verlet 22 | rcoulomb = 1.0 ; short-range electrostatic cutoff (in nm) 23 | rvdw = 1.0 ; short-range van der Waals cutoff (in nm) 24 | ; Electrostatics 25 | coulombtype = PME ; Particle Mesh Ewald for long-range electrostatics 26 | pme_order = 4 ; cubic interpolation 27 | fourierspacing = 0.16 ; grid spacing for FFT 28 | ; Temperature coupling is on 29 | tcoupl = V-rescale ; modified Berendsen thermostat 30 | tc-grps = Protein Non-Protein ; two coupling groups - more accurate 31 | tau_t = 0.1 0.1 ; time constant, in ps 32 | ref_t = 300 300 ; reference temperature, one for each group, in K 33 | ; Pressure coupling is off 34 | pcoupl = no ; no pressure coupling in NVT 35 | ; Periodic boundary conditions 36 | pbc = xyz ; 3-D PBC 37 | ; Dispersion correction 38 | DispCorr = EnerPres ; account for cut-off vdW scheme 39 | ; Velocity generation 40 | gen_vel = yes ; assign velocities from Maxwell distribution 41 | gen_temp = 300 ; temperature for Maxwell distribution 42 | gen_seed = -1 ; generate a random seed 43 | -------------------------------------------------------------------------------- /gromacs/pdb2gmx_input.txt: -------------------------------------------------------------------------------- 1 | 15 2 | 3 | -------------------------------------------------------------------------------- /gromacs/plumed.dat: -------------------------------------------------------------------------------- 1 | # plumed.dat 2 | gyr: GYRATION ATOMS=1-100 3 | PRINT ARG=gyr FILE=plumed_output.dat STRIDE=100 4 | -------------------------------------------------------------------------------- /gromacs/trjconv_input.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 3 | -------------------------------------------------------------------------------- /gurobi/12.0.1/serial/code01/coins.sol.valid: -------------------------------------------------------------------------------- 1 | # Objective value = 113.45 2 | Pennies 0 3 | Nickels 0 4 | Dimes 2 5 | Quarters 53 6 | Dollars 100 7 | Cu 999.8 8 | Ni 46.9 9 | Zi 50 10 | Mn 30 11 | -------------------------------------------------------------------------------- /gurobi/12.0.1/serial/code01/run.01: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Reset modules and load Gurobi. 4 | ## Uncomment these two lines to run this code right from the command line. 5 | # module reset 6 | # module load Gurobi/12.0.1-GCCcore-13.3.0 7 | 8 | 9 | # Run code. 10 | gurobi_cl ResultFile=coins.sol $GUROBI_HOME/examples/data/coins.lp 11 | -------------------------------------------------------------------------------- /gurobi/12.0.1/serial/code01/sbatch.01.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=gurobi # Job name 7 | #SBATCH --account=arcadm # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=gurobi.%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=gurobi.%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks (processes) per node 17 | #SBATCH --cpus-per-task=1 # Number of CPUs per task (threading) 18 | ## #SBATCH --mem=64G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | ######################################################################################### 22 | # Reset module system. 23 | module reset 24 | 25 | # Load Gurobi. 26 | module load Gurobi/12.0.1-GCCcore-13.3.0 27 | 28 | # Run gurobi script 29 | sh run.01 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /hdf5/hdf5_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load HDF5 11 | # 12 | echo "HDF5_TINKERCLIFFS ROME: Normal beginning of execution." 13 | # 14 | h5pcc -c hdf5_test.c 15 | if [ $? -ne 0 ]; then 16 | echo "HDF5_TINKERCLIFFS ROME: Compile error." 17 | exit 1 18 | fi 19 | # 20 | h5pcc -o hdf5_test hdf5_test.o 21 | if [ $? -ne 0 ]; then 22 | echo "HDF5_TINKERCLIFFS ROME: Load error." 23 | exit 1 24 | fi 25 | rm hdf5_test.o 26 | # 27 | ./hdf5_test > hdf5_tinkercliffs_rome.txt 28 | if [ $? -ne 0 ]; then 29 | echo "HDF5_TINKERCLIFFS ROME: Run error." 30 | exit 1 31 | fi 32 | rm hdf5_test 33 | # 34 | echo "HDF5_TINKERCLIFFS ROME: Normal end of execution." 35 | exit 0 36 | -------------------------------------------------------------------------------- /hpl/hybrid/HPL.dat: -------------------------------------------------------------------------------- 1 | HPLinpack benchmark input file 2 | Innovative Computing Laboratory, University of Tennessee 3 | HPL.out output file name (if any) 4 | 6 device out (6=stdout,7=stderr,file) 5 | 1 # of problems sizes (N) 6 | 36844 Ns 7 | 1 # of NBs 8 | 244 NBs 9 | 0 PMAP process mapping (0=Row-,1=Column-major) 10 | 1 # of process grids (P x Q) 11 | 8 Ps 12 | 8 Qs 13 | 16.0 threshold 14 | 1 # of panel fact 15 | 2 PFACTs (0=left, 1=Crout, 2=Right) 16 | 1 # of recursive stopping criterium 17 | 4 NBMINs (>= 1) 18 | 1 # of panels in recursion 19 | 2 NDIVs 20 | 1 # of recursive panel fact. 21 | 1 RFACTs (0=left, 1=Crout, 2=Right) 22 | 1 # of broadcast 23 | 1 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM) 24 | 1 # of lookahead depth 25 | 1 DEPTHs (>=0) 26 | 2 SWAP (0=bin-exch,1=long,2=mix) 27 | 64 swapping threshold 28 | 0 L1 in (0=transposed,1=no-transposed) form 29 | 0 U in (0=transposed,1=no-transposed) form 30 | 1 Equilibration (0=no,1=yes) 31 | 8 memory alignment in double (> 0) 32 | ##### This line (no. 32) is ignored (it serves as a separator). ###### 33 | 0 Number of additional problem sizes for PTRANS 34 | 1200 10000 30000 values of N 35 | 0 number of additional blocking sizes for PTRANS 36 | 40 9 8 13 13 20 16 32 64 values of NB 37 | 38 | -------------------------------------------------------------------------------- /hpl/hybrid/hpl_hybrid.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Run HPL in hybrid MPI+OpenMP model 3 | 4 | #Adjust runtime and number of nodes as needed 5 | #Doubling nodes increases runtime by ~sqrt(2) 6 | #SBATCH -t 2:00:00 7 | #SBATCH -N 1 8 | 9 | #SBATCH --ntasks-per-node=32 10 | #SBATCH --cpus-per-task=4 11 | #SBATCH -p normal_q 12 | 13 | #variables 14 | hplnb=244 #block size 15 | pctmem=85 #% of memory (max performance but long runtime for ~85) 16 | gbpercore=2 #gb ram per core 17 | 18 | #setup run directory so that jobs running at the same time don't collide 19 | rundir=runs/$SLURM_JOBID 20 | mkdir -p $rundir 21 | cp HPL.dat hpl_setup.sh $rundir/ 22 | cd $rundir 23 | 24 | #print some key variables 25 | env | egrep "SLURM_NNODES|SLURM_NTASKS|SLURM_CPUS_PER_TASK" 26 | echo "job is running on:" 27 | scontrol show hostnames $SLURM_NODELIST 28 | 29 | #### HYBRID (MPI+OPENMP, 1 mpi process/l3 cache) #### 30 | 31 | #setup HPL.dat 32 | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK 33 | np=$SLURM_NTASKS 34 | ./hpl_setup.sh -np $np -nb $hplnb -pm $pctmem -m $(( $OMP_NUM_THREADS*$gbpercore )) 35 | 36 | #get cpu mask for groups of 4 cores 37 | mask=""; imask="0xF" 38 | for i in $( seq 32 ); do 39 | mask="${mask}${imask}," 40 | imask="${imask}0" 41 | done 42 | mask=$( echo $mask | sed 's/,$//' ) 43 | echo "cpu mask is: $mask" 44 | 45 | #intel 46 | module reset; module unload gcc; module load HPL/2.3-intel-2019b 47 | echo "LOG: intel | mpi+omp | launch with mpirun" 48 | mpirun -n $np -genv I_MPI_PIN_PROCESSOR_LIST="$( seq -s , 0 4 127 )" -genv I_MPI_PIN_DOMAIN=omp -genv OMP_NUM_THREADS=4 -genv OMP_PROC_BIND=TRUE -genv OMP_PLACES=cores xhpl 49 | echo "LOG: intel | mpi+omp | launch with srun" 50 | srun -n $np --cpu-bind=mask_cpu=$mask xhpl 51 | 52 | #gcc 53 | module reset; module unload gcc; module load HPL/2.3-foss-2020a 54 | #these break openblas for some reason - make sure they're unset 55 | unset OMP_PROC_BIND; unset OMP_PLACES 56 | echo "LOG: gcc | mpi+omp | launch with mpirun" 57 | mpirun -np $np --map-by ppr:1:L3cache --bind-to l3cache -x OMP_NUM_THREADS=4 xhpl 58 | echo "LOG: gcc | mpi+omp | launch with srun" 59 | srun -n $np --cpu-bind=mask_cpu=$mask xhpl 60 | 61 | -------------------------------------------------------------------------------- /hpl/mpi/HPL.dat: -------------------------------------------------------------------------------- 1 | HPLinpack benchmark input file 2 | Innovative Computing Laboratory, University of Tennessee 3 | HPL.out output file name (if any) 4 | 6 device out (6=stdout,7=stderr,file) 5 | 1 # of problems sizes (N) 6 | 41236 Ns 7 | 1 # of NBs 8 | 244 NBs 9 | 0 PMAP process mapping (0=Row-,1=Column-major) 10 | 1 # of process grids (P x Q) 11 | 8 Ps 12 | 16 Qs 13 | 16.0 threshold 14 | 1 # of panel fact 15 | 2 PFACTs (0=left, 1=Crout, 2=Right) 16 | 1 # of recursive stopping criterium 17 | 4 NBMINs (>= 1) 18 | 1 # of panels in recursion 19 | 2 NDIVs 20 | 1 # of recursive panel fact. 21 | 1 RFACTs (0=left, 1=Crout, 2=Right) 22 | 1 # of broadcast 23 | 1 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM) 24 | 1 # of lookahead depth 25 | 1 DEPTHs (>=0) 26 | 2 SWAP (0=bin-exch,1=long,2=mix) 27 | 64 swapping threshold 28 | 0 L1 in (0=transposed,1=no-transposed) form 29 | 0 U in (0=transposed,1=no-transposed) form 30 | 1 Equilibration (0=no,1=yes) 31 | 8 memory alignment in double (> 0) 32 | ##### This line (no. 32) is ignored (it serves as a separator). ###### 33 | 0 Number of additional problem sizes for PTRANS 34 | 1200 10000 30000 values of N 35 | 0 number of additional blocking sizes for PTRANS 36 | 40 9 8 13 13 20 16 32 64 values of NB 37 | 38 | -------------------------------------------------------------------------------- /julia/1.10.4/README.md: -------------------------------------------------------------------------------- 1 | ## example: Hello World 2 | To run the "hello world" example, 3 | 1. download the files `helloworld.jl` and `helloworld.sh` 4 | 2. run the command `sbatch helloworld.sh` to jubmit the batch script as a job 5 | 3. when the job completes, inspect the Slurm output file to verify success. -------------------------------------------------------------------------------- /julia/1.10.4/helloworld.jl: -------------------------------------------------------------------------------- 1 | print("Hello World\n") -------------------------------------------------------------------------------- /julia/1.10.4/helloworld.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################################################################################### 3 | #SBATCH --account=personal # edit this to specify another Slurm account if desired 4 | #SBATCH --partition=normal_q 5 | #SBATCH --time=0-0:10:00 # 10 minutes - more than enough for this helloworld job 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=4 8 | #SBATCH --mem=1G # this minimal example needs very little memory 9 | ########################## end of slurm options ######################################### 10 | # Description: Slurm batch job to run a "hello world" code written in Julia 11 | # Usage: 12 | # sbatch helloworld.sh 13 | ######################################################################################### 14 | date #print the date and time in the output file 15 | echo "Running a Julia \"Hello World\" example" 16 | 17 | cd $SLURM_SUBMIT_DIR # change to the current working directory when the job was submitted 18 | 19 | module reset 20 | module load Julia/1.10.4-linux-x86_64 21 | 22 | echo "Julia installation info:" 23 | which julia # print the location of the julia executable 24 | julia -v # ask the julia executable to print its version 25 | 26 | echo "... running..." 27 | 28 | julia helloworld.jl 29 | 30 | date 31 | echo "complete!" 32 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/README.md: -------------------------------------------------------------------------------- 1 | # Julia Parallel Code 2 | 3 | Version of Julia: Julia 1.11.3 4 | 5 | ## Codes 6 | 7 | 8 | ### code01 9 | 10 | This is a gpu-based code. 11 | 12 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/falcon/l40s/test10 13 | 14 | Simple vector addition. 15 | 16 | This example is meant to be run on falcon and on an L40S gpu node. 17 | If you run on some other cluster and/or some other compute node type, 18 | then you have to make the virtual environment (instructions below) in 19 | that (cluster, compute node) pair. 20 | And you will have to modify the path to the resulting VE in the julia 21 | source code src.01.jl. 22 | 23 | 24 | ---------------------- 25 | To set up to run. 26 | 27 | module load Julia/1.11.3-linux-x86_64 28 | 29 | We need a virtual environment. 30 | 31 | You need only build this virtual environment once. 32 | 33 | The directions are in the code01 directory, in file: README.make-env.01.falcon.l40s.tex 34 | 35 | Execute this first. If you put the VE (virtual environment) in a different directory, 36 | you will need to change the directory in the src.01.jl julia source code file, at the 37 | top. 38 | 39 | 40 | 41 | ---------------------- 42 | To run code. 43 | 44 | 45 | Tested on fal052, the l40s_normal_q 46 | 47 | Launch with slurm: sbatch sbatch.01.slurm 48 | 49 | Launch on compute node: sh run.01 50 | 51 | No output; too big. 52 | 53 | But there are performance data gathered to ensure gpu is running. 54 | 55 | Various tests done with a version of this code to ensure correctness. 56 | 57 | 58 | 59 | ---------------------------- 60 | Background. 61 | 62 | There is a lot of stuff in the sbatch script. 63 | It is worth wading through this, for a user, because it is close to a 64 | "real" sbatch slurm script. 65 | 66 | 67 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/run.01: -------------------------------------------------------------------------------- 1 | julia src01.jl 2 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.julia.01.gpu.* 3 | rm gpu.*.log 4 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/sample-output-previous-execution/gpu.24618.log: -------------------------------------------------------------------------------- 1 | timestamp, name, pci.bus_id, driver_version, temperature.gpu, utilization.gpu [%], utilization.memory [%], memory.total [MiB], memory.free [MiB], memory.used [MiB] 2 | 2025/05/15 14:16:25.958, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 45021 MiB, 465 MiB 3 | 2025/05/15 14:16:28.968, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 45021 MiB, 465 MiB 4 | 2025/05/15 14:16:31.977, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 45021 MiB, 465 MiB 5 | 2025/05/15 14:16:34.986, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 44559 MiB, 926 MiB 6 | 2025/05/15 14:16:37.995, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 44559 MiB, 926 MiB 7 | 2025/05/15 14:16:41.004, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 33, 0 %, 0 %, 46068 MiB, 44559 MiB, 926 MiB 8 | 2025/05/15 14:16:44.013, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 69 %, 0 %, 46068 MiB, 44527 MiB, 958 MiB 9 | 2025/05/15 14:16:47.028, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 71 %, 0 %, 46068 MiB, 44015 MiB, 1470 MiB 10 | 2025/05/15 14:16:50.036, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 71 %, 0 %, 46068 MiB, 43535 MiB, 1950 MiB 11 | 2025/05/15 14:16:53.049, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 68 %, 0 %, 46068 MiB, 43055 MiB, 2430 MiB 12 | 2025/05/15 14:16:56.058, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 65 %, 0 %, 46068 MiB, 42575 MiB, 2910 MiB 13 | 2025/05/15 14:16:59.066, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 65 %, 0 %, 46068 MiB, 42095 MiB, 3390 MiB 14 | 2025/05/15 14:17:02.074, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 67 %, 0 %, 46068 MiB, 41615 MiB, 3870 MiB 15 | 2025/05/15 14:17:05.087, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 65 %, 0 %, 46068 MiB, 41167 MiB, 4318 MiB 16 | 2025/05/15 14:17:08.101, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 34, 70 %, 0 %, 46068 MiB, 40687 MiB, 4798 MiB 17 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/sample-output-previous-execution/slurm.julia.01.gpu.24618.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | Activating project at `~/env-julia/falcon/benchmarking/BenchmarkTools` 3 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/sample-output-previous-execution/slurm.julia.01.gpu.24618.out: -------------------------------------------------------------------------------- 1 | ------------ 2 | Set of cores job running on: 3 | 4 | JobId=24618 JobName=matgpu 5 | UserId=ckuhlman(1344122) GroupId=ckuhlman(1344122) MCS_label=N/A 6 | Priority=1211 Nice=0 Account=arcadm QOS=normal 7 | JobState=RUNNING Reason=None Dependency=(null) 8 | Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0 9 | DerivedExitCode=0:0 10 | RunTime=00:00:01 TimeLimit=01:00:00 TimeMin=N/A 11 | SubmitTime=2025-05-15T14:16:24 EligibleTime=2025-05-15T14:16:24 12 | AccrueTime=2025-05-15T14:16:24 13 | StartTime=2025-05-15T14:16:24 EndTime=2025-05-15T15:16:24 Deadline=N/A 14 | SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-05-15T14:16:24 Scheduler=Main 15 | Partition=l40s_normal_q AllocNode:Sid=fal052:267766 16 | ReqNodeList=(null) ExcNodeList=(null) 17 | NodeList=fal052 18 | BatchHost=fal052 19 | NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:* 20 | ReqTRES=cpu=1,mem=7648M,node=1,gres/gpu=1 21 | AllocTRES=cpu=1,mem=7648M,node=1,gres/gpu=1,gres/gpu:l40s=1 22 | Socks/Node=* NtasksPerN:B:S:C=1:0:*:* CoreSpec=* 23 | JOB_GRES=gpu:l40s:1 24 | Nodes=fal052 CPU_IDs=0 Mem=7648 GRES=gpu:l40s:1(IDX:0) 25 | MinCPUsNode=1 MinMemoryCPU=7648M MinTmpDiskNode=0 26 | Features=(null) DelayBoot=00:00:00 27 | Reservation=HPCMaintTesting 28 | OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) 29 | Command=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/falcon/l40s/test10/sbatch.01.slurm 30 | WorkDir=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/falcon/l40s/test10 31 | StdErr=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/falcon/l40s/test10/slurm.julia.01.gpu.24618.err 32 | StdIn=/dev/null 33 | StdOut=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/falcon/l40s/test10/slurm.julia.01.gpu.24618.out 34 | Power= 35 | TresPerNode=gres:gpu:1 36 | 37 | 38 | 39 | 40 | 41 | 42 | Start file and monitoring of GPU. 43 | 44 | 45 | 46 | ------------ 47 | Running IOSTAT 48 | ------------ 49 | Running MPSTAT 50 | ------------ 51 | Running VMSTAT 52 | ------------ 53 | Running executable 54 | ------------ 55 | Executable done 56 | ------------ 57 | Killing IOSTAT 58 | ------------ 59 | Killing MPSTAT 60 | ------------ 61 | Killing VMSTAT 62 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/sbatch.01.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -J matgpu 4 | 5 | 6 | ## Wall time. 7 | #SBATCH --time=0-01:00:00 8 | 9 | ## Account to "charge" to/run against. 10 | #SBATCH --account=arcadm 11 | 12 | ## Partition/queue. 13 | ## #SBATCH --partition=normal_q 14 | ## #SBATCH --partition=largemem_q 15 | #SBATCH --partition=l40s_normal_q 16 | 17 | ### This requests 1 node, 1 core. 1 gpu. 18 | #SBATCH --nodes=1 19 | #SBATCH --ntasks-per-node=1 20 | #SBATCH --cpus-per-task=1 21 | #SBATCH --gres=gpu:1 22 | 23 | 24 | ## Reservation. 25 | #SBATCH --reservation=HPCMaintTesting 26 | 27 | ## Use the compute node only for this job, and use all memory on this node. 28 | ## #SBATCH --exclusive 29 | ## #SBATCH --mem=500G 30 | 31 | ## Slurm output and error files. 32 | #SBATCH -o slurm.julia.01.gpu.%j.out 33 | #SBATCH -e slurm.julia.01.gpu.%j.err 34 | 35 | 36 | ## Load modules, if any. 37 | module reset 38 | module load Julia/1.11.3-linux-x86_64 39 | 40 | ## Load virtual environments, if any. 41 | ## source activate ~/env/falcon/l40s_normal_q/py312_osu_gpu_timing 42 | 43 | # Set up 44 | 45 | ## Get the core number for job and other job details. 46 | echo " ------------" 47 | echo "Set of cores job running on: " 48 | echo " " 49 | scontrol show job -d $SLURM_JOB_ID 50 | echo " " 51 | echo " " 52 | 53 | ## Monitor the GPU. 54 | echo " " 55 | echo " " 56 | echo "Start file and monitoring of GPU." 57 | nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 3 > gpu.$SLURM_JOBID.log & 58 | echo " " 59 | echo " " 60 | 61 | echo " " 62 | echo " ------------" 63 | echo "Running IOSTAT" 64 | 65 | iostat 2 >iostat-stdout.txt 2>iostat-stderr.txt & 66 | 67 | echo " ------------" 68 | echo "Running MPSTAT" 69 | 70 | mpstat -P ALL 2 >mpstat-stdout.txt 2>mpstat-stderr.txt & 71 | 72 | echo " ------------" 73 | echo "Running VMSTAT" 74 | 75 | vmstat 2 >vmstat-stdout.txt 2>vmstat-stderr.txt & 76 | 77 | echo " ------------" 78 | echo "Running executable" 79 | 80 | # Code to execute. 81 | sh ./run.01 82 | 83 | echo " ------------" 84 | echo "Executable done" 85 | 86 | echo " ------------" 87 | echo "Killing IOSTAT" 88 | kill %1 89 | 90 | echo " ------------" 91 | echo "Killing MPSTAT" 92 | kill %2 93 | 94 | echo " ------------" 95 | echo "Killing VMSTAT" 96 | kill %3 97 | 98 | -------------------------------------------------------------------------------- /julia/1.11.3/parallel/code01/src01.jl: -------------------------------------------------------------------------------- 1 | 2 | # ----------------- 3 | # Packages and environments. 4 | using Pkg 5 | Pkg.activate("/home/ckuhlman/env-julia/falcon/benchmarking/BenchmarkTools/.") 6 | using BenchmarkTools 7 | using CUDA 8 | 9 | # ----------------- 10 | # Constants. 11 | ## Too long. 12 | # outerLoop=100000 13 | outerLoop=500 14 | exponent=20 15 | 16 | # ----------------- 17 | # Functions. 18 | 19 | # Do addition. 20 | function gpu_add1!(y, x) 21 | for i = 1:length(y) 22 | @inbounds y[i] += x[i] 23 | end 24 | return nothing 25 | end 26 | 27 | 28 | # Do performance eval call. 29 | function bench_gpu1!(y, x) 30 | CUDA.@sync begin 31 | @cuda gpu_add1!(y, x) 32 | end 33 | end 34 | 35 | 36 | # ---------------- 37 | # Commands. 38 | 39 | 40 | for itime in 1:outerLoop 41 | 42 | # Initialize. 43 | N = 2^exponent 44 | x_d = CUDA.fill(1.0f0, N) # a vector stored on the GPU filled with 1.0 (Float32) 45 | y_d = CUDA.fill(2.0f0, N) # a vector stored on the GPU filled with 2.0 (Float32) 46 | 47 | # Compute. 48 | # @btime bench_gpu1!($y_d, $x_d) 49 | ## bench_gpu1!($y_d, $x_d) 50 | bench_gpu1!(y_d, x_d) 51 | 52 | end 53 | 54 | 55 | -------------------------------------------------------------------------------- /julia/1.11.3/serial/README.md: -------------------------------------------------------------------------------- 1 | # Julia Serial Code 2 | 3 | Julia version: Julia 1.11.3 4 | 5 | ## Codes 6 | 7 | 8 | ### code01 9 | 10 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-julia/owl/dev_q/test01 11 | 12 | Simple print of material. 13 | 14 | ---------------------------- 15 | To set up to run interactively. 16 | 17 | module load Julia/1.11.3-linux-x86_64 18 | 19 | 20 | ---------------------------- 21 | To run code. 22 | 23 | Tested on owl083, the dev_q 24 | 25 | Launch with slurm: sbatch sbatch.01.06.slurm 26 | 27 | Launch on compute node: sh run.01.06 28 | 29 | Diff output: diff output.01.06.out output.01.06.out.valid 30 | 31 | 32 | ---------------------------- 33 | Background. 34 | 35 | There is a lot of stuff in the sbatch script. 36 | It is worth wading through this, for a user, because it is close to a 37 | "real" sbatch slurm script. 38 | 39 | -------------------------------------------------------------------------------- /julia/1.11.3/serial/code01/output.01.06.out.valid: -------------------------------------------------------------------------------- 1 | CLA 4: output.01.04.out 2 | CLA 3: 18 3 | CLA 2: 16 4 | CLA 1: 14 5 | Julia also has argparse 6 | friendString01: Hello, friend01 7 | friendString02: Hello, friend02 8 | arg3String: Hello, 18 9 | -------------------------------------------------------------------------------- /julia/1.11.3/serial/code01/run.01.06: -------------------------------------------------------------------------------- 1 | 2 | 3 | mycode="src.01.06.jl" 4 | args=" 14 16 18 output.01.06.out" 5 | 6 | ## Invocation. Julia. 7 | julia ${mycode} $args 8 | 9 | -------------------------------------------------------------------------------- /julia/1.11.3/serial/code01/src.01.06.jl: -------------------------------------------------------------------------------- 1 | # Code from: 2 | # https://stackoverflow.com/questions/21056991/access-command-line-arguments-in-julia 3 | 4 | # Must have at least three CLAs. 5 | 6 | # Command line arguments (CLAs) are automatically in variable ARGS. 7 | # 8 | 9 | 10 | # Output file. 11 | outfile = string(ARGS[4]) 12 | 13 | open(outfile, "w") do fh 14 | write(fh, "CLA 4: " * string(ARGS[4]) * "\n" ) 15 | write(fh, "CLA 3: " * string(ARGS[3]) * "\n" ) 16 | write(fh, "CLA 2: " * string(ARGS[2]) * "\n" ) 17 | write(fh, "CLA 1: " * string(ARGS[1]) * "\n" ) 18 | write(fh, "Julia also has argparse" * "\n" ) 19 | end 20 | 21 | 22 | -------------------------------------------------------------------------------- /lammps/in.lj: -------------------------------------------------------------------------------- 1 | # 3d Lennard-Jones melt 2 | 3 | variable x index 1 4 | variable y index 1 5 | variable z index 1 6 | 7 | variable xx equal 20*$x 8 | variable yy equal 20*$y 9 | variable zz equal 20*$z 10 | 11 | units lj 12 | atom_style atomic 13 | 14 | lattice fcc 0.8442 15 | region box block 0 ${xx} 0 ${yy} 0 ${zz} 16 | create_box 1 box 17 | create_atoms 1 box 18 | mass 1 1.0 19 | 20 | velocity all create 1.44 87287 loop geom 21 | 22 | pair_style lj/cut 2.5 23 | pair_coeff 1 1 1.0 1.0 2.5 24 | 25 | neighbor 0.3 bin 26 | neigh_modify delay 0 every 20 check no 27 | 28 | fix 1 all nve 29 | 30 | run 100 31 | 32 | -------------------------------------------------------------------------------- /lammps/input.in: -------------------------------------------------------------------------------- 1 | units lj 2 | 3 | atom_style atomic 4 | 5 | neighbor 0.3 bin 6 | 7 | neigh_modify delay 0 every 20 check no 8 | 9 | pair_style lj/cut 2.5 10 | 11 | read_data system.data 12 | 13 | pair_coeff 1 1 1.0 1.0 2.5 14 | 15 | variable t index 100 16 | 17 | fix 1 all nve 18 | 19 | thermo 100 20 | 21 | run $t 22 | -------------------------------------------------------------------------------- /lammps/lammps_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=48 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load LAMMPS 11 | # 12 | echo "LAMMPS_TINKERCLIFFS ROME: Normal beginning of execution." 13 | # 14 | mpirun -np $SLURM_NTASKS lmp < in.lj > lammps_tinkercliffs_rome.txt 15 | if [ $? -ne 0 ]; then 16 | echo "LAMMPS_TINKERCLIFFS ROME: Run error!" 17 | exit 1 18 | fi 19 | # 20 | echo "LAMMPS_TINKERCLIFFS ROME: Normal end of execution." 21 | exit 0 22 | -------------------------------------------------------------------------------- /lammps/submit_tinker_hybrid_parallel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH --ntasks-per-node=16 # Adjusted to fit the hybrid setup 4 | #SBATCH --cpus-per-task=4 # Using 4 OpenMP threads per MPI task 5 | #SBATCH --account=personal 6 | #SBATCH --partition=normal_q 7 | #SBATCH --time=00:15:00 8 | 9 | module reset 10 | module load LAMMPS/29Aug2024-foss-2023b-kokkos 11 | 12 | export OMP_NUM_THREADS=4 # Set OpenMP threads 13 | 14 | # Run LAMMPS with optimized mpirun settings and OMP suffix 15 | 16 | echo "LAMMPS_TINKERCLIFFS ROME: Normal beginning of execution." 17 | 18 | 19 | mpirun --map-by ppr:1:L3cache --bind-to l3cache -x OMP_NUM_THREADS lmp -sf omp -pk omp $OMP_NUM_THREADS < input.in > output.txt 20 | 21 | if [ $? -ne 0 ]; then 22 | echo "LAMMPS_TINKERCLIFFS ROME: Run error!" 23 | exit 1 24 | fi 25 | # 26 | echo "LAMMPS_TINKERCLIFFS ROME: Normal end of execution." 27 | exit 0 28 | 29 | -------------------------------------------------------------------------------- /lammps/submit_tinker_parallel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH --ntasks-per-node=16 4 | #SBATCH --account=personal 5 | #SBATCH --partition=normal_q 6 | #SBATCH --time=00:15:00 7 | # 8 | module reset 9 | module load LAMMPS/29Aug2024-foss-2023b-kokkos 10 | # 11 | echo "LAMMPS_TINKERCLIFFS ROME: Normal beginning of execution." 12 | # 13 | mpirun -np $SLURM_NTASKS lmp < input.in > output.txt 14 | 15 | if [ $? -ne 0 ]; then 16 | echo "LAMMPS_TINKERCLIFFS ROME: Run error!" 17 | exit 1 18 | fi 19 | # 20 | echo "LAMMPS_TINKERCLIFFS ROME: Normal end of execution." 21 | exit 0 22 | -------------------------------------------------------------------------------- /matlab/matlab_owl.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=16 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load MATLAB 11 | 12 | ## Start MATLAB and call the script 13 | matlab -batch prime_batch_local 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /matlab/matlab_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=16 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load MATLAB 11 | 12 | ## Start MATLAB and call the script 13 | matlab -batch prime_batch_local 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /matlab/other/matlab_owl.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=16 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load MATLAB 11 | 12 | ## Start MATLAB and call the script 13 | matlab -batch prime_batch_local 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /matlab/other/matlab_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=16 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load MATLAB 11 | 12 | ## Start MATLAB and call the script 13 | matlab -batch prime_batch_local 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /matlab/other/prime_batch_local.m: -------------------------------------------------------------------------------- 1 | %% PRIME_BATCH_LOCAL uses the BATCH command to run the PRIME code locally. 2 | % 3 | % Discussion: 4 | % 5 | % The PRIME code is a function, so first we must write a script 6 | % called PRIME_SCRIPT that runs the function. 7 | % 8 | % Licensing: 9 | % 10 | % This code is distributed under the GNU LGPL license. 11 | % 12 | % Modified: 13 | % 14 | % 12 July 2016 by Justin Krometis 15 | % 16 | % Author: 17 | % 18 | % John Burkardt 19 | % 20 | clear 21 | 22 | fprintf ( 1, '\n' ); 23 | fprintf ( 1, 'PRIME_BATCH_LOCAL\n' ); 24 | fprintf ( 1, ' Run PRIME_SCRIPT locally.\n' ); 25 | % 26 | % BATCH defines the job and sends it for execution. 27 | % 28 | my_job = batch ( 'prime_script', 'Profile', 'local' , 'Pool', 15); 29 | % 30 | % WAIT pauses the MATLAB session til the job completes. 31 | % 32 | wait ( my_job ); 33 | % 34 | % DIARY displays any messages printed during execution. 35 | % 36 | diary ( my_job ); 37 | % 38 | % LOAD makes the script's workspace available. 39 | % 40 | % total = total number of primes. 41 | % 42 | load ( my_job ); 43 | 44 | fprintf ( 1, '\n' ); 45 | fprintf ( 1, ' Total number of primes = %d\n', total ); 46 | % 47 | % These commands clean up data about the job we no longer need. 48 | % 49 | delete ( my_job ); %Use delete() for R2012a or later 50 | 51 | fprintf ( 1, '\n' ); 52 | fprintf ( 1, 'PRIME_BATCH_LOCAL\n' ); 53 | fprintf ( 1, ' Normal end of execution.\n' ); 54 | -------------------------------------------------------------------------------- /matlab/other/prime_fun.m: -------------------------------------------------------------------------------- 1 | function total = prime_fun ( n ) 2 | 3 | %*****************************************************************************80 4 | % 5 | %% PRIME_FUN returns the number of primes between 1 and N. 6 | % 7 | % Discussion: 8 | % 9 | % A naive algorithm is used. 10 | % 11 | % Mathematica can return the number of primes less than or equal to N 12 | % by the command PrimePi[N]. 13 | % 14 | % N TOTAL 15 | % 16 | % 1 0 17 | % 10 4 18 | % 100 25 19 | % 1,000 168 20 | % 10,000 1,229 21 | % 100,000 9,592 22 | % 1,000,000 78,498 23 | % 10,000,000 664,579 24 | % 100,000,000 5,761,455 25 | % 1,000,000,000 50,847,534 26 | % 27 | % Licensing: 28 | % 29 | % This code is distributed under the GNU LGPL license. 30 | % 31 | % Modified: 32 | % 33 | % 22 April 2009 34 | % 35 | % Author: 36 | % 37 | % John Burkardt 38 | % 39 | % Input, integer N, the maximum number to check. 40 | % 41 | % Output, integer TOTAL, the number of prime numbers up to N. 42 | % 43 | total = 0; 44 | 45 | parfor i = 2 : n 46 | 47 | prime = 1; 48 | 49 | for j = 2 : sqrt ( i ) 50 | if ( mod ( i, j ) == 0 ) 51 | prime = 0; 52 | break 53 | end 54 | end 55 | 56 | total = total + prime; 57 | 58 | end 59 | 60 | return 61 | end 62 | -------------------------------------------------------------------------------- /matlab/other/prime_script.m: -------------------------------------------------------------------------------- 1 | %% PRIME_SCRIPT is a script to call PRIME_FUN. 2 | % 3 | % Discussion: 4 | % 5 | % The BATCH command runs scripts, not functions. So we have to write 6 | % this short script if we want to work with BATCH! 7 | % 8 | % Licensing: 9 | % 10 | % This code is distributed under the GNU LGPL license. 11 | % 12 | % Modified: 13 | % 14 | % 27 March 2010 15 | % 16 | % Author: 17 | % 18 | % John Burkardt 19 | % 20 | n = 100000000; 21 | 22 | fprintf ( 1, '\n' ); 23 | fprintf ( 1, 'PRIME_SCRIPT\n' ); 24 | fprintf ( 1, ' Count prime numbers from 1 to %d\n', n ); 25 | 26 | total = prime_fun ( n ); 27 | -------------------------------------------------------------------------------- /matlab/parallel/code01/code02.m: -------------------------------------------------------------------------------- 1 | % ## Switches for invoking matlab. 2 | % ## https://stackoverflow.com/questions/8981168/running-a-matlab-program-with-arguments 3 | % ## Example: matlab -nodisplay -nosplash -r progName args 4 | 5 | % ## Invoke code02 arrayLength numIterations 6 | 7 | % function A = code02(varargin) 8 | % arrayLength = varargin{1} 9 | % numIterations = varargin{2} 10 | 11 | function aa = code02(arrayLength, numIterations) 12 | 13 | fprintf('arrayLength: \n'); 14 | disp (arrayLength); 15 | fprintf('numIterations: \n'); 16 | disp (numIterations); 17 | 18 | % N = 200000; 19 | N = arrayLength; 20 | r = gpuArray.linspace(0,4,N); 21 | x = rand(1,N,"gpuArray"); 22 | 23 | % numIterations = 1000; 24 | for n=1:numIterations 25 | x = r.*x.*(1-x); 26 | end 27 | 28 | % plot(r,x,'.',MarkerSize=1) 29 | % xlabel("Growth Rate") 30 | % ylabel("Population") 31 | 32 | % Return argument. 33 | aa="done"; 34 | end 35 | -------------------------------------------------------------------------------- /matlab/parallel/code01/previous-execution-output/iostat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code01/previous-execution-output/iostat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code01/previous-execution-output/mpstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code01/previous-execution-output/mpstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code01/previous-execution-output/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.matlab.02.gpu.* 3 | rm *.gpu.log 4 | -------------------------------------------------------------------------------- /matlab/parallel/code01/previous-execution-output/slurm.matlab.02.gpu.255.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/parallel/code01/previous-execution-output/vmstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code01/previous-execution-output/vmstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code01/run.02: -------------------------------------------------------------------------------- 1 | 2 | ## ------- Parameters; CLAs. 3 | 4 | ## For t4_normal_q 5 | ## (same as for l40s_normal_q ) 6 | ## arrayLength=2000000 7 | ## numIterations=1000000 8 | arrayLength=2000000 9 | numIterations=1000000 10 | 11 | ## Code name. 12 | mycode="code02" 13 | 14 | ## Invocation. Matlab syntax in double-quotes. 15 | matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations})" 16 | 17 | 18 | ## If you do not have this command, 19 | ## then you will stay in the matlab 20 | ## environment. 21 | # exit 0 22 | # quit(0,"force") 23 | # quit 24 | -------------------------------------------------------------------------------- /matlab/parallel/code01/sbatch.02.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -J matgpu 4 | 5 | 6 | ## Wall time. 7 | #SBATCH --time=0-01:00:00 8 | 9 | ## Account to "charge" to/run against. 10 | #SBATCH --account=arcadm 11 | 12 | ## Partition/queue. 13 | ## #SBATCH --partition=normal_q 14 | ## #SBATCH --partition=largemem_q 15 | #SBATCH --partition=a100_normal_q 16 | 17 | ### This requests 1 node, 1 core. 1 gpu. 18 | #SBATCH --nodes=1 19 | #SBATCH --ntasks-per-node=1 20 | #SBATCH --cpus-per-task=1 21 | #SBATCH --gres=gpu:1 22 | 23 | 24 | ## Reservation. 25 | #### #SBATCH --reservation=HPCMaintTesting 26 | 27 | ## Use the compute node only for this job, and use all memory on this node. 28 | ## #SBATCH --exclusive 29 | ## #SBATCH --mem=500G 30 | 31 | ## Slurm output and error files. 32 | #SBATCH -o slurm.matlab.02.gpu.%j.out 33 | #SBATCH -e slurm.matlab.02.gpu.%j.err 34 | 35 | 36 | ## Load modules, if any. 37 | module reset 38 | module load MATLAB/R2024b 39 | 40 | ## Load virtual environments, if any. 41 | ## source activate ~/env/falcon/l40s_normal_q/py312_osu_gpu_timing 42 | 43 | # Set up 44 | 45 | ## Get the core number for job and other job details. 46 | echo " ------------" 47 | echo "Set of cores job running on: " 48 | echo " " 49 | scontrol show job -d $SLURM_JOB_ID 50 | echo " " 51 | echo " " 52 | 53 | ## Monitor the GPU. 54 | echo " " 55 | echo " " 56 | echo "Start file and monitoring of GPU." 57 | nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 3 > $SLURM_JOBID.gpu.log & 58 | echo " " 59 | echo " " 60 | 61 | echo " " 62 | echo " ------------" 63 | echo "Running IOSTAT" 64 | 65 | iostat 2 >iostat-stdout.txt 2>iostat-stderr.txt & 66 | 67 | echo " ------------" 68 | echo "Running MPSTAT" 69 | 70 | mpstat -P ALL 2 >mpstat-stdout.txt 2>mpstat-stderr.txt & 71 | 72 | echo " ------------" 73 | echo "Running VMSTAT" 74 | 75 | vmstat 2 >vmstat-stdout.txt 2>vmstat-stderr.txt & 76 | 77 | echo " ------------" 78 | echo "Running executable" 79 | 80 | # Code to execute. 81 | sh ./run.02 82 | 83 | echo " ------------" 84 | echo "Executable done" 85 | 86 | echo " ------------" 87 | echo "Killing IOSTAT" 88 | kill %1 89 | 90 | echo " ------------" 91 | echo "Killing MPSTAT" 92 | kill %2 93 | 94 | echo " ------------" 95 | echo "Killing VMSTAT" 96 | kill %3 97 | 98 | -------------------------------------------------------------------------------- /matlab/parallel/code02/code04.m: -------------------------------------------------------------------------------- 1 | % ## Switches for invoking matlab. 2 | % ## https://stackoverflow.com/questions/8981168/running-a-matlab-program-with-arguments 3 | % ## Example: matlab -nodisplay -nosplash -r progName args 4 | 5 | % ## Invoke code02 arrayLength numIterations 6 | 7 | % function A = code02(varargin) 8 | % arrayLength = varargin{1} 9 | % numIterations = varargin{2} 10 | 11 | % This function is now for CPU, not GPU. 12 | % Slight modifications below 13 | % to set up r and x. 14 | % function aa = code04(arrayLength, numIterations, outfile) 15 | function aa = code04(arrayLength, numIterations) 16 | 17 | % Put output file name in code; have problems passing it in. 18 | outfile = "mat.out" 19 | 20 | fprintf('arrayLength: \n'); 21 | disp (arrayLength); 22 | fprintf('numIterations: \n'); 23 | disp (numIterations); 24 | fprintf('outfile: \n'); 25 | disp (outfile); 26 | 27 | % This is code for CPU computations. 28 | % % N = 200000; 29 | % N = arrayLength; 30 | % r = linspace(1,100,N); 31 | % % x = rand(1,N); 32 | % x = linspace(1,100,N); 33 | % x = transpose(x); 34 | 35 | % This is code for gpu computations. 36 | % N = 200000; 37 | N = arrayLength; 38 | r = gpuArray.linspace(0,4,N); 39 | x = rand(1,N,"gpuArray"); 40 | 41 | % numIterations = 1000; 42 | for n=1:numIterations 43 | x = r.*x.*(1-x); 44 | end 45 | 46 | % For CPU computations. 47 | % % Write x to file. 48 | % fid = fopen(outfile,'w'); 49 | % fprintf(fid,'%f\n',x); 50 | % fclose(fid); 51 | 52 | 53 | % plot(r,x,'.',MarkerSize=1) 54 | % xlabel("Growth Rate") 55 | % ylabel("Population") 56 | 57 | % Return argument. 58 | aa="done"; 59 | end 60 | -------------------------------------------------------------------------------- /matlab/parallel/code02/mat.out.valid: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/gpu.259.log: -------------------------------------------------------------------------------- 1 | timestamp, name, pci.bus_id, driver_version, temperature.gpu, utilization.gpu [%], utilization.memory [%], memory.total [MiB], memory.free [MiB], memory.used [MiB] 2 | 2025/05/16 10:16:43.879, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 3 | 2025/05/16 10:16:46.894, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 4 | 2025/05/16 10:16:49.906, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 5 | 2025/05/16 10:16:52.920, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 6 | 2025/05/16 10:16:55.931, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 7 | 2025/05/16 10:16:58.942, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 8 | 2025/05/16 10:17:01.952, NVIDIA H200, 00000000:19:00.0, 565.57.01, 33, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 9 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/gpu.262.log: -------------------------------------------------------------------------------- 1 | timestamp, name, pci.bus_id, driver_version, temperature.gpu, utilization.gpu [%], utilization.memory [%], memory.total [MiB], memory.free [MiB], memory.used [MiB] 2 | 2025/05/16 10:30:07.241, NVIDIA H200, 00000000:19:00.0, 565.57.01, 34, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 3 | 2025/05/16 10:30:10.251, NVIDIA H200, 00000000:19:00.0, 565.57.01, 34, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 4 | 2025/05/16 10:30:13.273, NVIDIA H200, 00000000:19:00.0, 565.57.01, 34, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 5 | 2025/05/16 10:30:16.288, NVIDIA H200, 00000000:19:00.0, 565.57.01, 34, 0 %, 0 %, 143771 MiB, 143183 MiB, 1 MiB 6 | 2025/05/16 10:30:19.299, NVIDIA H200, 00000000:19:00.0, 565.57.01, 34, 0 %, 0 %, 143771 MiB, 143180 MiB, 4 MiB 7 | 2025/05/16 10:30:22.309, NVIDIA H200, 00000000:19:00.0, 565.57.01, 38, 45 %, 43 %, 143771 MiB, 142556 MiB, 628 MiB 8 | 2025/05/16 10:30:25.320, NVIDIA H200, 00000000:19:00.0, 565.57.01, 38, 25 %, 24 %, 143771 MiB, 142556 MiB, 628 MiB 9 | 2025/05/16 10:30:28.331, NVIDIA H200, 00000000:19:00.0, 565.57.01, 39, 17 %, 16 %, 143771 MiB, 142556 MiB, 628 MiB 10 | 2025/05/16 10:30:31.344, NVIDIA H200, 00000000:19:00.0, 565.57.01, 41, 50 %, 47 %, 143771 MiB, 142556 MiB, 628 MiB 11 | 2025/05/16 10:30:34.355, NVIDIA H200, 00000000:19:00.0, 565.57.01, 37, 0 %, 0 %, 143771 MiB, 142746 MiB, 438 MiB 12 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/iostat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code02/previous-execution-output/iostat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/mat.out: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/mpstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code02/previous-execution-output/mpstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/slurm.matlab.04.cpu.259.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/slurm.matlab.04.cpu.262.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/vmstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code02/previous-execution-output/vmstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code02/previous-execution-output/vmstat-stdout.txt: -------------------------------------------------------------------------------- 1 | procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- 2 | r b swpd free buff cache si so bi bo in cs us sy id wa st 3 | 2 0 0 2100595456 10848 2980376 0 0 0 0 0 0 0 0 100 0 0 4 | 1 0 0 2100502528 10848 2980400 0 0 0 0 7056 10793 0 0 99 0 0 5 | 2 0 0 2100399616 10848 2981664 0 0 0 4 10734 18295 1 0 99 0 0 6 | 3 0 0 2100336896 10848 2983076 0 0 0 4 8377 15045 1 0 99 0 0 7 | 6 0 0 2100086272 10848 2983624 0 0 0 0 12745 22340 1 0 99 0 0 8 | 1 0 0 2099812480 10848 2983672 0 0 0 0 4815 10079 1 0 99 0 0 9 | 1 1 0 2099641600 10848 3016440 0 0 1196 0 8888 12012 1 0 98 0 0 10 | 0 0 0 2099299968 10848 3149436 0 0 12686 0 25389 30704 0 0 98 1 0 11 | 2 0 0 2099283840 10848 3159984 0 0 0 22 22664 30097 1 0 99 0 0 12 | 3 0 0 2099044992 10848 3159992 0 0 0 0 16565 20728 1 0 98 0 0 13 | 2 0 0 2099045376 10848 3160004 0 0 0 2 22374 28522 1 0 98 0 0 14 | 1 0 0 2099036800 10848 3161392 0 0 0 0 9982 13653 1 0 98 0 0 15 | 1 0 0 2099037824 10848 3161444 0 0 0 184 2096 1526 2 0 98 0 0 16 | 1 0 0 2099024512 10848 3161836 0 0 0 0 2194 3551 0 0 99 0 0 17 | -------------------------------------------------------------------------------- /matlab/parallel/code02/run.04: -------------------------------------------------------------------------------- 1 | 2 | ## ------- Parameters; CLAs. 3 | 4 | ## For Owl normal_q 5 | ## (same as for l40s_normal_q ) 6 | arrayLength=2000000 7 | numIterations=1000000 8 | ## arrayLength=20000000 9 | ## numIterations=10000000 10 | 11 | outfile=mat.out 12 | 13 | ## Code name. 14 | mycode="code04" 15 | 16 | ## Invocation. Matlab syntax in double-quotes. 17 | ## matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations}, ${outfile})" 18 | matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations})" 19 | 20 | 21 | ## If you do not have this command, 22 | ## then you will stay in the matlab 23 | ## environment. 24 | # exit 0 25 | # quit(0,"force") 26 | # quit 27 | -------------------------------------------------------------------------------- /matlab/parallel/code02/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.matlab.* 3 | rm gpu.*.log 4 | -------------------------------------------------------------------------------- /matlab/parallel/code03/code02b.m: -------------------------------------------------------------------------------- 1 | % ## Switches for invoking matlab. 2 | % ## https://stackoverflow.com/questions/8981168/running-a-matlab-program-with-arguments 3 | % ## Example: matlab -nodisplay -nosplash -r progName args 4 | 5 | % ## Invoke code02b arrayLength numIterations 6 | 7 | % function A = code02b(varargin) 8 | % arrayLength = varargin{1} 9 | % numIterations = varargin{2} 10 | 11 | function aa = code02b(arrayLength, numIterations) 12 | 13 | outfile="mat.02b.out"; 14 | 15 | fprintf('arrayLength: \n'); 16 | disp (arrayLength); 17 | fprintf('numIterations: \n'); 18 | disp (numIterations); 19 | fprintf('outfile: \n'); 20 | disp (outfile); 21 | 22 | % N = 200000; 23 | N = arrayLength; 24 | r = gpuArray.linspace(1,100,N); 25 | % x = rand(1,N,"gpuArray"); 26 | x = gpuArray.linspace(1,100,N); 27 | x = transpose(x); 28 | 29 | % numIterations = 1000; 30 | for n=1:numIterations 31 | x = r.*x.*(1-x); 32 | end 33 | 34 | % Write x to file. 35 | fid = fopen(outfile,'w'); 36 | fprintf(fid,'%f\n',x); 37 | fclose(fid); 38 | 39 | 40 | % plot(r,x,'.',MarkerSize=1) 41 | % xlabel("Growth Rate") 42 | % ylabel("Population") 43 | 44 | % Return argument. 45 | aa="done"; 46 | end 47 | -------------------------------------------------------------------------------- /matlab/parallel/code03/mat.02b.out.valid: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/24820.gpu.log: -------------------------------------------------------------------------------- 1 | timestamp, name, pci.bus_id, driver_version, temperature.gpu, utilization.gpu [%], utilization.memory [%], memory.total [MiB], memory.free [MiB], memory.used [MiB] 2 | 2025/05/16 11:10:37.014, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 26, 0 %, 0 %, 46068 MiB, 45485 MiB, 1 MiB 3 | 2025/05/16 11:10:40.024, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 26, 0 %, 0 %, 46068 MiB, 45485 MiB, 1 MiB 4 | 2025/05/16 11:10:43.034, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 26, 0 %, 0 %, 46068 MiB, 45485 MiB, 1 MiB 5 | 2025/05/16 11:10:46.042, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 26, 0 %, 0 %, 46068 MiB, 45485 MiB, 1 MiB 6 | 2025/05/16 11:10:49.053, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 26, 0 %, 0 %, 46068 MiB, 45485 MiB, 1 MiB 7 | 2025/05/16 11:10:52.062, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 28, 0 %, 0 %, 46068 MiB, 45051 MiB, 435 MiB 8 | 2025/05/16 11:10:55.071, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 28, 0 %, 0 %, 46068 MiB, 45051 MiB, 435 MiB 9 | 2025/05/16 11:10:58.079, NVIDIA L40S, 00000000:4A:00.0, 565.57.01, 28, 0 %, 0 %, 46068 MiB, 45051 MiB, 435 MiB 10 | -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/iostat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code03/previous-execution-results/iostat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/mat.02b.out: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/mpstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code03/previous-execution-results/mpstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/slurm.matlab.02.gpu.24820.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/vmstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/parallel/code03/previous-execution-results/vmstat-stderr.txt -------------------------------------------------------------------------------- /matlab/parallel/code03/previous-execution-results/vmstat-stdout.txt: -------------------------------------------------------------------------------- 1 | procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- 2 | r b swpd free buff cache si so bi bo in cs us sy id wa st 3 | 4 0 0 292119360 3144 230230400 0 0 0 0 0 0 0 0 100 0 0 4 | 1 0 0 292027264 3144 230230416 0 0 0 0 6794 10541 1 0 99 0 0 5 | 1 0 0 291922720 3144 230231120 0 0 0 0 12561 19919 0 0 99 0 0 6 | 1 0 0 291868384 3144 230233440 0 0 0 44 6337 8354 1 0 99 0 0 7 | 2 0 0 291608672 3144 230235152 0 0 0 0 11264 20924 1 0 98 0 0 8 | 1 0 0 291346496 3144 230235344 0 0 0 2 8822 16414 1 0 99 0 0 9 | 1 0 0 291216736 3144 230235424 0 0 0 0 4365 7409 1 0 99 0 0 10 | 1 0 0 291009280 3144 230243648 0 0 0 0 17763 28551 0 0 99 0 0 11 | 0 0 0 291037440 3144 230243776 0 0 0 0 15050 26234 0 0 100 0 0 12 | 1 0 0 291037792 3144 230243792 0 0 0 0 12686 22660 0 0 100 0 0 13 | 2 0 0 290798176 3144 230243792 0 0 0 2 9867 16974 0 0 99 0 0 14 | 0 0 0 290798176 3144 230243808 0 0 0 0 12259 21735 0 0 100 0 0 15 | -------------------------------------------------------------------------------- /matlab/parallel/code03/run.02b: -------------------------------------------------------------------------------- 1 | 2 | ## ------- Parameters; CLAs. 3 | 4 | ## For t4_normal_q 5 | ## (same as for l40s_normal_q ) 6 | # arrayLength=20000 7 | # numIterations=100000 8 | arrayLength=10 9 | numIterations=1 10 | 11 | ## Code name. 12 | mycode="code02b" 13 | 14 | ## Invocation. Matlab syntax in double-quotes. 15 | matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations})" 16 | 17 | 18 | ## If you do not have this command, 19 | ## then you will stay in the matlab 20 | ## environment. 21 | # exit 0 22 | # quit(0,"force") 23 | # quit 24 | -------------------------------------------------------------------------------- /matlab/parallel/code03/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.matlab.02.gpu.* 3 | rm *.gpu.log 4 | -------------------------------------------------------------------------------- /matlab/parallel/code03/sbatch.02b.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -J matgpu 4 | 5 | 6 | ## Wall time. 7 | #SBATCH --time=0-01:00:00 8 | 9 | ## Account to "charge" to/run against. 10 | #SBATCH --account=arcadm 11 | 12 | ## Partition/queue. 13 | ## #SBATCH --partition=normal_q 14 | ## #SBATCH --partition=largemem_q 15 | #SBATCH --partition=l40s_normal_q 16 | 17 | ### This requests 1 node, 1 core. 1 gpu. 18 | #SBATCH --nodes=1 19 | #SBATCH --ntasks-per-node=1 20 | #SBATCH --cpus-per-task=1 21 | #SBATCH --gres=gpu:1 22 | 23 | 24 | ## Reservation. 25 | #SBATCH --reservation=HPCMaintTesting 26 | 27 | ## Use the compute node only for this job, and use all memory on this node. 28 | ## #SBATCH --exclusive 29 | ## #SBATCH --mem=500G 30 | 31 | ## Slurm output and error files. 32 | #SBATCH -o slurm.matlab.02.gpu.%j.out 33 | #SBATCH -e slurm.matlab.02.gpu.%j.err 34 | 35 | 36 | ## Load modules, if any. 37 | module reset 38 | module load MATLAB/R2024b 39 | 40 | ## Load virtual environments, if any. 41 | ## source activate ~/env/falcon/l40s_normal_q/py312_osu_gpu_timing 42 | 43 | # Set up 44 | 45 | ## Get the core number for job and other job details. 46 | echo " ------------" 47 | echo "Set of cores job running on: " 48 | echo " " 49 | scontrol show job -d $SLURM_JOB_ID 50 | echo " " 51 | echo " " 52 | 53 | ## Monitor the GPU. 54 | echo " " 55 | echo " " 56 | echo "Start file and monitoring of GPU." 57 | nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 3 > $SLURM_JOBID.gpu.log & 58 | echo " " 59 | echo " " 60 | 61 | echo " " 62 | echo " ------------" 63 | echo "Running IOSTAT" 64 | 65 | iostat 2 >iostat-stdout.txt 2>iostat-stderr.txt & 66 | 67 | echo " ------------" 68 | echo "Running MPSTAT" 69 | 70 | mpstat -P ALL 2 >mpstat-stdout.txt 2>mpstat-stderr.txt & 71 | 72 | echo " ------------" 73 | echo "Running VMSTAT" 74 | 75 | vmstat 2 >vmstat-stdout.txt 2>vmstat-stderr.txt & 76 | 77 | echo " ------------" 78 | echo "Running executable" 79 | 80 | # Code to execute. 81 | sh ./run.02b 82 | 83 | echo " ------------" 84 | echo "Executable done" 85 | 86 | echo " ------------" 87 | echo "Killing IOSTAT" 88 | kill %1 89 | 90 | echo " ------------" 91 | echo "Killing MPSTAT" 92 | kill %2 93 | 94 | echo " ------------" 95 | echo "Killing VMSTAT" 96 | kill %3 97 | 98 | -------------------------------------------------------------------------------- /matlab/prime_batch_local.m: -------------------------------------------------------------------------------- 1 | %% PRIME_BATCH_LOCAL uses the BATCH command to run the PRIME code locally. 2 | % 3 | % Discussion: 4 | % 5 | % The PRIME code is a function, so first we must write a script 6 | % called PRIME_SCRIPT that runs the function. 7 | % 8 | % Licensing: 9 | % 10 | % This code is distributed under the GNU LGPL license. 11 | % 12 | % Modified: 13 | % 14 | % 12 July 2016 by Justin Krometis 15 | % 16 | % Author: 17 | % 18 | % John Burkardt 19 | % 20 | clear 21 | 22 | fprintf ( 1, '\n' ); 23 | fprintf ( 1, 'PRIME_BATCH_LOCAL\n' ); 24 | fprintf ( 1, ' Run PRIME_SCRIPT locally.\n' ); 25 | % 26 | % BATCH defines the job and sends it for execution. 27 | % 28 | my_job = batch ( 'prime_script', 'Profile', 'local' , 'Pool', 15); 29 | % 30 | % WAIT pauses the MATLAB session til the job completes. 31 | % 32 | wait ( my_job ); 33 | % 34 | % DIARY displays any messages printed during execution. 35 | % 36 | diary ( my_job ); 37 | % 38 | % LOAD makes the script's workspace available. 39 | % 40 | % total = total number of primes. 41 | % 42 | load ( my_job ); 43 | 44 | fprintf ( 1, '\n' ); 45 | fprintf ( 1, ' Total number of primes = %d\n', total ); 46 | % 47 | % These commands clean up data about the job we no longer need. 48 | % 49 | delete ( my_job ); %Use delete() for R2012a or later 50 | 51 | fprintf ( 1, '\n' ); 52 | fprintf ( 1, 'PRIME_BATCH_LOCAL\n' ); 53 | fprintf ( 1, ' Normal end of execution.\n' ); 54 | -------------------------------------------------------------------------------- /matlab/prime_fun.m: -------------------------------------------------------------------------------- 1 | function total = prime_fun ( n ) 2 | 3 | %*****************************************************************************80 4 | % 5 | %% PRIME_FUN returns the number of primes between 1 and N. 6 | % 7 | % Discussion: 8 | % 9 | % A naive algorithm is used. 10 | % 11 | % Mathematica can return the number of primes less than or equal to N 12 | % by the command PrimePi[N]. 13 | % 14 | % N TOTAL 15 | % 16 | % 1 0 17 | % 10 4 18 | % 100 25 19 | % 1,000 168 20 | % 10,000 1,229 21 | % 100,000 9,592 22 | % 1,000,000 78,498 23 | % 10,000,000 664,579 24 | % 100,000,000 5,761,455 25 | % 1,000,000,000 50,847,534 26 | % 27 | % Licensing: 28 | % 29 | % This code is distributed under the GNU LGPL license. 30 | % 31 | % Modified: 32 | % 33 | % 22 April 2009 34 | % 35 | % Author: 36 | % 37 | % John Burkardt 38 | % 39 | % Input, integer N, the maximum number to check. 40 | % 41 | % Output, integer TOTAL, the number of prime numbers up to N. 42 | % 43 | total = 0; 44 | 45 | parfor i = 2 : n 46 | 47 | prime = 1; 48 | 49 | for j = 2 : sqrt ( i ) 50 | if ( mod ( i, j ) == 0 ) 51 | prime = 0; 52 | break 53 | end 54 | end 55 | 56 | total = total + prime; 57 | 58 | end 59 | 60 | return 61 | end 62 | -------------------------------------------------------------------------------- /matlab/prime_script.m: -------------------------------------------------------------------------------- 1 | %% PRIME_SCRIPT is a script to call PRIME_FUN. 2 | % 3 | % Discussion: 4 | % 5 | % The BATCH command runs scripts, not functions. So we have to write 6 | % this short script if we want to work with BATCH! 7 | % 8 | % Licensing: 9 | % 10 | % This code is distributed under the GNU LGPL license. 11 | % 12 | % Modified: 13 | % 14 | % 27 March 2010 15 | % 16 | % Author: 17 | % 18 | % John Burkardt 19 | % 20 | n = 100000000; 21 | 22 | fprintf ( 1, '\n' ); 23 | fprintf ( 1, 'PRIME_SCRIPT\n' ); 24 | fprintf ( 1, ' Count prime numbers from 1 to %d\n', n ); 25 | 26 | total = prime_fun ( n ); 27 | -------------------------------------------------------------------------------- /matlab/serial/code01/code04.m: -------------------------------------------------------------------------------- 1 | % ## Switches for invoking matlab. 2 | % ## https://stackoverflow.com/questions/8981168/running-a-matlab-program-with-arguments 3 | % ## Example: matlab -nodisplay -nosplash -r progName args 4 | 5 | % ## Invoke code02 arrayLength numIterations 6 | 7 | % function A = code02(varargin) 8 | % arrayLength = varargin{1} 9 | % numIterations = varargin{2} 10 | 11 | % This function is now for CPU, not GPU. 12 | % Slight modifications below 13 | % to set up r and x. 14 | % function aa = code04(arrayLength, numIterations, outfile) 15 | function aa = code04(arrayLength, numIterations) 16 | 17 | % Put output file name in code; have problems passing it in. 18 | outfile = "mat.out"; 19 | 20 | fprintf('arrayLength: \n'); 21 | disp (arrayLength); 22 | fprintf('numIterations: \n'); 23 | disp (numIterations); 24 | fprintf('outfile: \n'); 25 | disp (outfile); 26 | 27 | % N = 200000; 28 | N = arrayLength; 29 | r = linspace(1,100,N); 30 | % x = rand(1,N); 31 | x = linspace(1,100,N); 32 | x = transpose(x); 33 | 34 | % numIterations = 1000; 35 | for n=1:numIterations 36 | x = r.*x.*(1-x); 37 | end 38 | 39 | % Write x to file. 40 | fid = fopen(outfile,'w'); 41 | fprintf(fid,'%f\n',x); 42 | fclose(fid); 43 | 44 | 45 | % plot(r,x,'.',MarkerSize=1) 46 | % xlabel("Growth Rate") 47 | % ylabel("Population") 48 | 49 | % Return argument. 50 | aa="done"; 51 | end 52 | -------------------------------------------------------------------------------- /matlab/serial/code01/mat.out.valid: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/serial/code01/previous-execution-results/iostat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code01/previous-execution-results/iostat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code01/previous-execution-results/mat.out: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/serial/code01/previous-execution-results/mpstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code01/previous-execution-results/mpstat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code01/previous-execution-results/slurm.matlab.04.cpu.263.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/serial/code01/previous-execution-results/vmstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code01/previous-execution-results/vmstat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code01/run.04: -------------------------------------------------------------------------------- 1 | 2 | ## ------- Parameters; CLAs. 3 | 4 | ## For Owl normal_q 5 | ## (same as for l40s_normal_q ) 6 | ## arrayLength=2000000 7 | ## numIterations=1000000 8 | ## arrayLength=20000000 9 | ## numIterations=10000000 10 | arrayLength=10 11 | numIterations=1 12 | 13 | outfile=mat.out 14 | 15 | ## Code name. 16 | mycode="code04" 17 | 18 | ## Invocation. Matlab syntax in double-quotes. 19 | ## matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations}, ${outfile})" 20 | matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations})" 21 | 22 | 23 | ## If you do not have this command, 24 | ## then you will stay in the matlab 25 | ## environment. 26 | # exit 0 27 | # quit(0,"force") 28 | # quit 29 | -------------------------------------------------------------------------------- /matlab/serial/code01/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.matlab.* 3 | rm *.gpu.log 4 | -------------------------------------------------------------------------------- /matlab/serial/code01/sbatch.04.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -J matcpu 4 | 5 | 6 | ## Wall time. 7 | #SBATCH --time=0-01:00:00 8 | 9 | ## Account to "charge" to/run against. 10 | #SBATCH --account=arcadm 11 | 12 | ## Partition/queue. 13 | #SBATCH --partition=normal_q 14 | ## #SBATCH --partition=largemem_q 15 | ## #SBATCH --partition=v100_normal_q 16 | 17 | ### This requests 1 node, 1 core. 1 gpu. 18 | #SBATCH --nodes=1 19 | #SBATCH --ntasks-per-node=1 20 | #SBATCH --cpus-per-task=1 21 | ## #SBATCH --gres=gpu:1 22 | 23 | 24 | ## Reservation. 25 | ## #SBATCH --reservation=HPCMaintTesting 26 | 27 | ## Use the compute node only for this job, and use all memory on this node. 28 | ## #SBATCH --exclusive 29 | ## #SBATCH --mem=500G 30 | 31 | ## Slurm output and error files. 32 | #SBATCH -o slurm.matlab.04.cpu.%j.out 33 | #SBATCH -e slurm.matlab.04.cpu.%j.err 34 | 35 | 36 | ## Load modules, if any. 37 | module reset 38 | module load MATLAB/R2024b 39 | 40 | ## Load virtual environments, if any. 41 | ## source activate ~/env/falcon/l40s_normal_q/py312_osu_gpu_timing 42 | 43 | # Set up 44 | 45 | ## Get the core number for job and other job details. 46 | echo " ------------" 47 | echo "Set of cores job running on: " 48 | echo " " 49 | scontrol show job -d $SLURM_JOB_ID 50 | echo " " 51 | echo " " 52 | 53 | ## Monitor the GPU. 54 | ## echo " " 55 | ## echo " " 56 | ## echo "Start file and monitoring of GPU." 57 | ## nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 3 > $SLURM_JOBID.gpu.log & 58 | ## echo " " 59 | ## echo " " 60 | 61 | echo " " 62 | echo " ------------" 63 | echo "Running IOSTAT" 64 | 65 | iostat 2 >iostat-stdout.txt 2>iostat-stderr.txt & 66 | 67 | echo " ------------" 68 | echo "Running MPSTAT" 69 | 70 | mpstat -P ALL 2 >mpstat-stdout.txt 2>mpstat-stderr.txt & 71 | 72 | echo " ------------" 73 | echo "Running VMSTAT" 74 | 75 | vmstat 2 >vmstat-stdout.txt 2>vmstat-stderr.txt & 76 | 77 | echo " ------------" 78 | echo "Running executable" 79 | 80 | # Code to execute. 81 | sh ./run.04 82 | 83 | echo " ------------" 84 | echo "Executable done" 85 | 86 | echo " ------------" 87 | echo "Killing IOSTAT" 88 | kill %1 89 | 90 | echo " ------------" 91 | echo "Killing MPSTAT" 92 | kill %2 93 | 94 | echo " ------------" 95 | echo "Killing VMSTAT" 96 | kill %3 97 | 98 | -------------------------------------------------------------------------------- /matlab/serial/code02/code04.m: -------------------------------------------------------------------------------- 1 | % ## Switches for invoking matlab. 2 | % ## https://stackoverflow.com/questions/8981168/running-a-matlab-program-with-arguments 3 | % ## Example: matlab -nodisplay -nosplash -r progName args 4 | 5 | % ## Invoke code02 arrayLength numIterations 6 | 7 | % function A = code02(varargin) 8 | % arrayLength = varargin{1} 9 | % numIterations = varargin{2} 10 | 11 | % This function is now for CPU, not GPU. 12 | % Slight modifications below 13 | % to set up r and x. 14 | % function aa = code04(arrayLength, numIterations, outfile) 15 | function aa = code04(arrayLength, numIterations) 16 | 17 | % Put output file name in code; have problems passing it in. 18 | outfile = "mat.out" 19 | 20 | fprintf('arrayLength: \n'); 21 | disp (arrayLength); 22 | fprintf('numIterations: \n'); 23 | disp (numIterations); 24 | fprintf('outfile: \n'); 25 | disp (outfile); 26 | 27 | % N = 200000; 28 | N = arrayLength; 29 | r = linspace(1,100,N); 30 | % x = rand(1,N); 31 | x = linspace(1,100,N); 32 | x = transpose(x); 33 | 34 | % numIterations = 1000; 35 | for n=1:numIterations 36 | x = r.*x.*(1-x); 37 | end 38 | 39 | % Write x to file. 40 | fid = fopen(outfile,'w'); 41 | fprintf(fid,'%f\n',x); 42 | fclose(fid); 43 | 44 | 45 | % plot(r,x,'.',MarkerSize=1) 46 | % xlabel("Growth Rate") 47 | % ylabel("Population") 48 | 49 | % Return argument. 50 | aa="done"; 51 | end 52 | -------------------------------------------------------------------------------- /matlab/serial/code02/mat.out.valid: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/serial/code02/previous-execution-results/iostat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code02/previous-execution-results/iostat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code02/previous-execution-results/mat.out: -------------------------------------------------------------------------------- 1 | 0.000000 2 | -132.000000 3 | -506.000000 4 | -1122.000000 5 | -1980.000000 6 | -3080.000000 7 | -4422.000000 8 | -6006.000000 9 | -7832.000000 10 | -9900.000000 11 | 0.000000 12 | -1584.000000 13 | -6072.000000 14 | -13464.000000 15 | -23760.000000 16 | -36960.000000 17 | -53064.000000 18 | -72072.000000 19 | -93984.000000 20 | -118800.000000 21 | 0.000000 22 | -3036.000000 23 | -11638.000000 24 | -25806.000000 25 | -45540.000000 26 | -70840.000000 27 | -101706.000000 28 | -138138.000000 29 | -180136.000000 30 | -227700.000000 31 | 0.000000 32 | -4488.000000 33 | -17204.000000 34 | -38148.000000 35 | -67320.000000 36 | -104720.000000 37 | -150348.000000 38 | -204204.000000 39 | -266288.000000 40 | -336600.000000 41 | 0.000000 42 | -5940.000000 43 | -22770.000000 44 | -50490.000000 45 | -89100.000000 46 | -138600.000000 47 | -198990.000000 48 | -270270.000000 49 | -352440.000000 50 | -445500.000000 51 | 0.000000 52 | -7392.000000 53 | -28336.000000 54 | -62832.000000 55 | -110880.000000 56 | -172480.000000 57 | -247632.000000 58 | -336336.000000 59 | -438592.000000 60 | -554400.000000 61 | 0.000000 62 | -8844.000000 63 | -33902.000000 64 | -75174.000000 65 | -132660.000000 66 | -206360.000000 67 | -296274.000000 68 | -402402.000000 69 | -524744.000000 70 | -663300.000000 71 | 0.000000 72 | -10296.000000 73 | -39468.000000 74 | -87516.000000 75 | -154440.000000 76 | -240240.000000 77 | -344916.000000 78 | -468468.000000 79 | -610896.000000 80 | -772200.000000 81 | 0.000000 82 | -11748.000000 83 | -45034.000000 84 | -99858.000000 85 | -176220.000000 86 | -274120.000000 87 | -393558.000000 88 | -534534.000000 89 | -697048.000000 90 | -881100.000000 91 | 0.000000 92 | -13200.000000 93 | -50600.000000 94 | -112200.000000 95 | -198000.000000 96 | -308000.000000 97 | -442200.000000 98 | -600600.000000 99 | -783200.000000 100 | -990000.000000 101 | -------------------------------------------------------------------------------- /matlab/serial/code02/previous-execution-results/mpstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code02/previous-execution-results/mpstat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code02/previous-execution-results/slurm.matlab.04.cpu.96099.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /matlab/serial/code02/previous-execution-results/vmstat-stderr.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/matlab/serial/code02/previous-execution-results/vmstat-stderr.txt -------------------------------------------------------------------------------- /matlab/serial/code02/run.04: -------------------------------------------------------------------------------- 1 | 2 | ## ------- Parameters; CLAs. 3 | 4 | ## For Owl normal_q 5 | ## (same as for l40s_normal_q ) 6 | ## arrayLength=2000000 7 | ## numIterations=1000000 8 | ## arrayLength=20000000 9 | ## numIterations=10000000 10 | arrayLength=10 11 | numIterations=1 12 | 13 | outfile=mat.out 14 | 15 | ## Code name. 16 | mycode="code04" 17 | 18 | ## Invocation. Matlab syntax in double-quotes. 19 | ## matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations}, ${outfile})" 20 | matlab -nodisplay -nosplash -r "bogus = ${mycode}(${arrayLength}, ${numIterations})" 21 | 22 | 23 | ## If you do not have this command, 24 | ## then you will stay in the matlab 25 | ## environment. 26 | # exit 0 27 | # quit(0,"force") 28 | # quit 29 | -------------------------------------------------------------------------------- /matlab/serial/code02/run.delete.me: -------------------------------------------------------------------------------- 1 | rm *.txt 2 | rm slurm.matlab.* 3 | rm *.gpu.log 4 | -------------------------------------------------------------------------------- /matlab/serial/code02/sbatch.04.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -J matcpu 4 | 5 | 6 | ## Wall time. 7 | #SBATCH --time=0-01:00:00 8 | 9 | ## Account to "charge" to/run against. 10 | #SBATCH --account=arcadm 11 | 12 | ## Partition/queue. 13 | #SBATCH --partition=interactive_q 14 | ## #SBATCH --partition=largemem_q 15 | ## #SBATCH --partition=v100_normal_q 16 | 17 | ### This requests 1 node, 1 core. 1 gpu. 18 | #SBATCH --nodes=1 19 | #SBATCH --ntasks-per-node=1 20 | #SBATCH --cpus-per-task=1 21 | ## #SBATCH --gres=gpu:1 22 | 23 | 24 | ## Reservation. 25 | #SBATCH --reservation=HPCMaintTesting 26 | 27 | ## Use the compute node only for this job, and use all memory on this node. 28 | ## #SBATCH --exclusive 29 | ## #SBATCH --mem=500G 30 | 31 | ## Slurm output and error files. 32 | #SBATCH -o slurm.matlab.04.cpu.%j.out 33 | #SBATCH -e slurm.matlab.04.cpu.%j.err 34 | 35 | 36 | ## Load modules, if any. 37 | module reset 38 | module load MATLAB/R2024b 39 | 40 | ## Load virtual environments, if any. 41 | ## source activate ~/env/falcon/l40s_normal_q/py312_osu_gpu_timing 42 | 43 | # Set up 44 | 45 | ## Get the core number for job and other job details. 46 | echo " ------------" 47 | echo "Set of cores job running on: " 48 | echo " " 49 | scontrol show job -d $SLURM_JOB_ID 50 | echo " " 51 | echo " " 52 | 53 | ## Monitor the GPU. 54 | ## echo " " 55 | ## echo " " 56 | ## echo "Start file and monitoring of GPU." 57 | ## nvidia-smi --query-gpu=timestamp,name,pci.bus_id,driver_version,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 3 > $SLURM_JOBID.gpu.log & 58 | ## echo " " 59 | ## echo " " 60 | 61 | echo " " 62 | echo " ------------" 63 | echo "Running IOSTAT" 64 | 65 | iostat 2 >iostat-stdout.txt 2>iostat-stderr.txt & 66 | 67 | echo " ------------" 68 | echo "Running MPSTAT" 69 | 70 | mpstat -P ALL 2 >mpstat-stdout.txt 2>mpstat-stderr.txt & 71 | 72 | echo " ------------" 73 | echo "Running VMSTAT" 74 | 75 | vmstat 2 >vmstat-stdout.txt 2>vmstat-stderr.txt & 76 | 77 | echo " ------------" 78 | echo "Running executable" 79 | 80 | # Code to execute. 81 | sh ./run.04 82 | 83 | echo " ------------" 84 | echo "Executable done" 85 | 86 | echo " ------------" 87 | echo "Killing IOSTAT" 88 | kill %1 89 | 90 | echo " ------------" 91 | echo "Killing MPSTAT" 92 | kill %2 93 | 94 | echo " ------------" 95 | echo "Killing VMSTAT" 96 | kill %3 97 | 98 | -------------------------------------------------------------------------------- /mpi/mpihello.c: -------------------------------------------------------------------------------- 1 | //Hello world program: 2 | #include 3 | #include 4 | 5 | int main(int argc, char** argv) { 6 | // Initialize the MPI environment 7 | MPI_Init(NULL, NULL); 8 | 9 | // Get the number of processes 10 | int world_size; 11 | MPI_Comm_size(MPI_COMM_WORLD, &world_size); 12 | 13 | // Get the rank of the process 14 | int world_rank; 15 | MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); 16 | 17 | // Get the name of the processor 18 | char processor_name[MPI_MAX_PROCESSOR_NAME]; 19 | int name_len; 20 | MPI_Get_processor_name(processor_name, &name_len); 21 | 22 | // Print off a hello world message 23 | printf("Hello world from processor %s, rank %d out of %d processors\n", 24 | processor_name, world_rank, world_size); 25 | 26 | // Finalize the MPI environment. 27 | MPI_Finalize(); 28 | } 29 | -------------------------------------------------------------------------------- /mpi/tc-mpihello.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Translation from PBS to SLURM can be found at 4 | ## https://slurm.schedmd.com/rosetta.pdf 5 | 6 | #SBATCH --partition=normal_q # queue type 7 | #SBATCH --nodes=2 # this requests 1 node 8 | #SBATCH --ntasks-per-node=64 # this requests 128 cores across 2 nodes 9 | #SBATCH --time=0-0:05:00 # wall clock time time [days-hh:mm:ss] 10 | #SBATCH --account=personal 11 | ##SBATCH --export=NONE # this makes sure the compute environment is clean 12 | 13 | 14 | echo "Starting hello world mpi job" 15 | 16 | ##printenv 17 | # Navigate to the directory where the submit script was executed in 18 | export RUNDIR=$SLURM_SUBMIT_DIR 19 | cd $RUNDIR 20 | 21 | module reset 22 | module load foss 23 | 24 | echo "loaded modules are:" 25 | module list 26 | 27 | echo "compiling mpihello.c" 28 | mpicc mpihello.c -o tc-mpihello.x 29 | 30 | # Store the nodes which are being used for referece 31 | echo $SLURM_JOB_NODELIST | uniq > node_list.log 32 | 33 | # Store the date and time the job is being run for reference 34 | echo "Job begin: " `date` 35 | 36 | # Run the job 37 | #OMPI_MCA_btl_openib_allow_ib=true mpirun -np 64 $EXEC 38 | mpirun -np $SLURM_NTASKS ./tc-mpihello.x 39 | 40 | echo "Job end: " `date` 41 | 42 | exit 43 | -------------------------------------------------------------------------------- /mpi4py/hello_mpi.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from mpi4py import MPI 3 | 4 | comm = MPI.COMM_WORLD 5 | 6 | id = comm.Get_rank() 7 | 8 | p = comm.Get_size() 9 | 10 | if ( id == 0 ): 11 | print("") 12 | print("HELLO_MPI:") 13 | print(" P",id,": There are ",p," MPI processes running.") 14 | 15 | print(" P",id,": Hello, world!") 16 | -------------------------------------------------------------------------------- /mpi4py/mpi4py_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=4 5 | #SBATCH -p dev_q 6 | # 7 | 8 | #load modules 9 | module reset 10 | module load mpi4py 11 | 12 | #run mpi4py hello world 13 | mpirun -np $SLURM_NTASKS python hello_mpi.py 14 | -------------------------------------------------------------------------------- /namd/namd_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -N1 --ntasks-per-node=32 4 | #SBATCH -t 00:30:00 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load NAMD 11 | # 12 | echo "NAMD_TINKERCLIFFS ROME: Normal beginning of execution." 13 | # 14 | # We need the following files in this directory: 15 | # 16 | # par_all27_prot_lipid.inp 17 | # ubq_wb.pdb 18 | # ubq_wb.psf 19 | # ubq_wb_eq.conf 20 | # 21 | ls -la 22 | # 23 | # Run the program with $SLURM_NTASKS MPI processes. 24 | # 25 | # charmrun runs srun -c 2 for some reason. seems like it's 26 | # better to just call srun directly 27 | #charmrun namd2 +p$SLURM_NTASKS ubq_wb_eq.conf > namd_tinkercliffs_rome.txt 28 | srun namd2 ./ubq_wb_eq.conf > namd_tinkercliffs_rome.txt 29 | if [ $? -ne 0 ]; then 30 | echo "NAMD_TINKERCLIFFS ROME: Run error." 31 | exit 1 32 | fi 33 | # 34 | echo "NAMD_TINKERCLIFFS ROME: Normal end of execution." 35 | exit 0 36 | -------------------------------------------------------------------------------- /namd/submit_tinker.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | #SBATCH -N1 --ntasks-per-node=32 3 | #SBATCH -t 00:30:00 4 | #SBATCH --account=personal 5 | #SBATCH -p normal_q 6 | 7 | 8 | # Reset modules and load NAMD 9 | module reset 10 | module load NAMD/3.0 11 | 12 | echo "NAMD_TINKERCLIFFS ROME: Normal beginning of execution." 13 | 14 | # List files in current directory 15 | ls -la 16 | 17 | srun namd3 ./tiny.namd > tiny.txt 18 | if [ $? -ne 0 ]; then 19 | echo "NAMD_TINKERCLIFFS ROME: Run error." 20 | exit 1 21 | fi 22 | 23 | echo "NAMD_TINKERCLIFFS ROME: Normal end of execution." 24 | exit 0 25 | 26 | -------------------------------------------------------------------------------- /namd/tiny.namd: -------------------------------------------------------------------------------- 1 | structure tiny.psf 2 | coordinates tiny.pdb 3 | 4 | paraTypeCharmm on 5 | parameters par_all22_prot.inp 6 | temperature 300 7 | seed 785639 8 | 9 | exclude scaled1-4 10 | 1-4scaling 1.0 11 | cutoff 12. 12 | switching on 13 | switchdist 10. 14 | pairlistdist 13.5 15 | margin 0.001 16 | 17 | timestep 1.0 18 | nonbondedFreq 1 19 | fullElectFrequency 4 20 | stepspercycle 20 21 | 22 | cellBasisVector1 16.8 0. 0. 23 | cellBasisVector2 0. 16.8 0. 24 | cellBasisVector3 0. 0 16.8 25 | cellOrigin 0. 0. 0. 26 | 27 | PME on 28 | PMEGridSizeX 20 29 | PMEGridSizeY 20 30 | PMEGridSizeZ 20 31 | 32 | outputName /tmp/tiny_out 33 | binaryoutput no 34 | wrapwater yes 35 | 36 | outputEnergies 100 37 | outputTiming 100 38 | 39 | numsteps 500 40 | 41 | -------------------------------------------------------------------------------- /netcdf/netcdf_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load netCDF 11 | module list 12 | # 13 | echo "NETCDF_TINKERCLIFFS ROME: Normal beginning of execution." 14 | # 15 | gcc -c -I$EBROOTNETCDF/include netcdf_test.c 16 | if [ $? -ne 0 ]; then 17 | echo "NETCDF_TINKERCLIFFS ROME: Compile error!" 18 | exit 1 19 | fi 20 | # 21 | gcc -o netcdf_test netcdf_test.o -L$EBROOTNETCDF/lib64 -lnetcdf 22 | if [ $? -ne 0 ]; then 23 | echo "NETCDF_TINKERCLIFFS ROME: Load error!" 24 | exit 1 25 | fi 26 | rm netcdf_test.o 27 | # 28 | ./netcdf_test > netcdf_tinkercliffs_rome.txt 29 | if [ $? -ne 0 ]; then 30 | echo "NETCDF_TINKERCLIFFS ROME: Run error!" 31 | exit 1 32 | fi 33 | rm netcdf_test 34 | # 35 | echo "NETCDF_TINKERCLIFFS ROME: Normal end of execution." 36 | exit 0 37 | -------------------------------------------------------------------------------- /openblas/openblas_infer.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 05:00 4 | #SBATCH -N 1 --ntasks-per-node=4 5 | #SBATCH -p t4_dev_q 6 | # 7 | # 8 | module reset 9 | module load foss 10 | 11 | export OMP_NUM_THREADS=$SLURM_NTASKS 12 | # 13 | echo "OPENBLAS_INFER: Normal beginning of execution." 14 | # 15 | gcc -c -I$EBROOTOPENBLAS/include openblas_test_c.c 16 | if [ $? -ne 0 ]; then 17 | echo "OPENBLAS_INFER: Compile error!" 18 | exit 1 19 | fi 20 | # 21 | gcc -o openblas_test_c openblas_test_c.o -L$EBROOTOPENBLAS/lib -lopenblas 22 | if [ $? -ne 0 ]; then 23 | echo "OPENBLAS_INFER: Load error!" 24 | exit 1 25 | fi 26 | # 27 | ./openblas_test_c > openblas_infer.txt 28 | if [ $? -ne 0 ]; then 29 | echo "OPENBLAS_INFER: Run error!" 30 | exit 1 31 | fi 32 | # 33 | rm openblas_test_c.o 34 | rm openblas_test_c 35 | # 36 | echo "OPENBLAS_INFER: Normal end of execution." 37 | exit 0 38 | -------------------------------------------------------------------------------- /openblas/openblas_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 05:00 4 | #SBATCH -N 1 --ntasks-per-node=4 5 | #SBATCH -p dev_q 6 | # 7 | # 8 | module reset 9 | module load foss 10 | 11 | export OMP_NUM_THREADS=$SLURM_NTASKS 12 | # 13 | echo "OPENBLAS_TINKERCLIFFS: Normal beginning of execution." 14 | # 15 | gcc -c -I$OPENBLAS_DIR/include openblas_test_c.c 16 | if [ $? -ne 0 ]; then 17 | echo "OPENBLAS_TINKERCLIFFS: Compile error!" 18 | exit 1 19 | fi 20 | # 21 | gcc -o openblas_test_c openblas_test_c.o -L$OPENBLAS_LIB -lopenblas 22 | if [ $? -ne 0 ]; then 23 | echo "OPENBLAS_TINKERCLIFFS: Load error!" 24 | exit 1 25 | fi 26 | # 27 | ./openblas_test_c > openblas_tinkercliffs_rome.txt 28 | if [ $? -ne 0 ]; then 29 | echo "OPENBLAS_TINKERCLIFFS: Run error!" 30 | exit 1 31 | fi 32 | # 33 | rm openblas_test_c.o 34 | rm openblas_test_c 35 | # 36 | echo "OPENBLAS_TINKERCLIFFS: Normal end of execution." 37 | exit 0 38 | -------------------------------------------------------------------------------- /openfoam/12-foss-2023a/README.md: -------------------------------------------------------------------------------- 1 | OpenFOAM 12 2 | 3 | This is a simple incompressible Fluid example provided with the installation. The necessary data is copied and the blockMeshDict and decomposeParDict files for parallelization are provided here. This example is designed to run with 4 processes in parallel; thus the job requests 4 CPU processors (aka “cores” or “chips”). 4 | 5 | Alternatively, this example can be run in an interactive job. See the attached documentation `desktop.pdf` for running this example with the Desktop App on the owl cluster. 6 | -------------------------------------------------------------------------------- /openfoam/12-foss-2023a/decomposeParDict: -------------------------------------------------------------------------------- 1 | /*---------------------------------------------------------------------------*\ 2 | | ========= | | 3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox | 4 | | \\ / O peration | Version: 1.3 | 5 | | \\ / A nd | Web: http://www.openfoam.org | 6 | | \\/ M anipulation | | 7 | \*---------------------------------------------------------------------------*/ 8 | 9 | FoamFile 10 | { 11 | version 2.0; 12 | format ascii; 13 | 14 | root ""; 15 | case ""; 16 | instance ""; 17 | local ""; 18 | 19 | class dictionary; 20 | object decomposeParDict; 21 | } 22 | 23 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // 24 | 25 | 26 | numberOfSubdomains 4; 27 | 28 | method simple; 29 | 30 | simpleCoeffs 31 | { 32 | n (1 4 1); 33 | delta 0.001; 34 | } 35 | 36 | hierarchicalCoeffs 37 | { 38 | n (1 1 1); 39 | delta 0.001; 40 | order xyz; 41 | } 42 | 43 | metisCoeffs 44 | { 45 | processorWeights 46 | ( 47 | 1 48 | 1 49 | 1 50 | ); 51 | } 52 | 53 | manualCoeffs 54 | { 55 | dataFile ""; 56 | } 57 | 58 | distributed no; 59 | 60 | roots 61 | ( 62 | ); 63 | 64 | 65 | // ************************************************************************* // -------------------------------------------------------------------------------- /openfoam/12-foss-2023a/openfoam12_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --partition=normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks-per-node=4 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --output=output.log 8 | #SBATCH --time=0-00:30:00 9 | 10 | 11 | module load OpenFOAM/12-foss-2023a 12 | 13 | # source provided bash script to find executables 14 | # FOAM_BASH is defined upon loading module 15 | . $FOAM_BASH 16 | 17 | # Copy example from official tutorial 18 | # Reference: https://southernmethodistuniversity.github.io/hpc_docs/examples/openfoam/index.html 19 | my_example="pitzDaily" 20 | # Remove if already exists 21 | [[ -d $my_example ]] && rm -Rf $my_example 22 | # Example subdirectory (e.g. incompressible simple example) 23 | my_tutorial="tutorials/incompressibleFluid/$my_example" 24 | # Copy to working directory 25 | cp -r $EBROOTOPENFOAM/OpenFOAM-12/$my_tutorial . 26 | # Copy dictionaries for parallel execution 27 | # Reference: https://develop.openfoam.com/Development/openfoam/-/blob/master/tutorials/incompressible/simpleFoam/pitzDaily/system/blockMeshDict 28 | # Reference: https://openfoamwiki.net/index.php/DecomposePar 29 | cp /home/$USER/MyTests/foam12/blockMeshDict $my_example/system/blockMeshDict 30 | cp /home/$USER/MyTests/foam12/decomposeParDict $my_example/system/decomposeParDict 31 | 32 | cd $my_example 33 | 34 | # run the example 35 | blockMesh 36 | decomposePar 37 | mpirun -np $SLURM_NTASKS foamRun -solver incompressibleFluid -parallel 38 | -------------------------------------------------------------------------------- /openfoam/README.md: -------------------------------------------------------------------------------- 1 | OpenFOAM 2 | 3 | Multiple versions of OpenFOAM are available on ARC clusters. 4 | 5 | The examples here are provided with the installation. Different versions have different working environments and input definitions. The parallelization and mesh-blocking setup for OpenFOAM must be well designed according to the specific version requirements. 6 | 7 | OpenFOAM has a unique way of setting up the working environment. Upon loading the module, $FOAM_BASH is defined and must be sourced. Please see OpenFOAM documentation for proper configuration. 8 | 9 | As a numerical integration ("simulation") software, the most efficent way to use this software is in batch mode. Alternatively, jobs can be run in interactively. See the attached documentation `desktop.pdf` for running this example with the Desktop App on the owl cluster. 10 | -------------------------------------------------------------------------------- /openfoam/desktop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/openfoam/desktop.pdf -------------------------------------------------------------------------------- /openfoam/v2406-foss-2023a/README.md: -------------------------------------------------------------------------------- 1 | OpenFOAM v2406 2 | 3 | This is a simple incompressible Fluid example provided with the installation. The necessary data is copied and the blockMeshDict and decomposeParDict files for parallelization are provided here. This example is designed to run with 4 processes in parallel; thus the job requests 4 CPU processors (aka “cores” or “chips”). 4 | 5 | Alternatively, this example can be run in an interactive job. See the attached documentation `desktop.pdf` for running this example with the Desktop App on the owl cluster. 6 | -------------------------------------------------------------------------------- /openfoam/v2406-foss-2023a/decomposeParDict: -------------------------------------------------------------------------------- 1 | /*---------------------------------------------------------------------------*\ 2 | | ========= | | 3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox | 4 | | \\ / O peration | Version: 1.3 | 5 | | \\ / A nd | Web: http://www.openfoam.org | 6 | | \\/ M anipulation | | 7 | \*---------------------------------------------------------------------------*/ 8 | 9 | FoamFile 10 | { 11 | version 2.0; 12 | format ascii; 13 | 14 | root ""; 15 | case ""; 16 | instance ""; 17 | local ""; 18 | 19 | class dictionary; 20 | object decomposeParDict; 21 | } 22 | 23 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // 24 | 25 | 26 | numberOfSubdomains 4; 27 | 28 | method simple; 29 | 30 | simpleCoeffs 31 | { 32 | n (1 4 1); 33 | delta 0.001; 34 | } 35 | 36 | hierarchicalCoeffs 37 | { 38 | n (1 1 1); 39 | delta 0.001; 40 | order xyz; 41 | } 42 | 43 | metisCoeffs 44 | { 45 | processorWeights 46 | ( 47 | 1 48 | 1 49 | 1 50 | ); 51 | } 52 | 53 | manualCoeffs 54 | { 55 | dataFile ""; 56 | } 57 | 58 | distributed no; 59 | 60 | roots 61 | ( 62 | ); 63 | 64 | 65 | // ************************************************************************* // -------------------------------------------------------------------------------- /openfoam/v2406-foss-2023a/openfoam_ex.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --partition=normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks-per-node=4 6 | #SBATCH --cpus-per-task=1 7 | #SBATCH --output=output.log 8 | #SBATCH --time=0-00:30:00 9 | 10 | 11 | module load OpenFOAM/v2406-foss-2023a 12 | 13 | # source provided bash script to find executables 14 | # FOAM_BASH is defined upon loading module 15 | . $FOAM_BASH 16 | 17 | # Copy example from official tutorial 18 | # Reference: https://southernmethodistuniversity.github.io/hpc_docs/examples/openfoam/index.html 19 | my_example="pitzDaily" 20 | # Remove if already exists 21 | [[ -d $my_example ]] && rm -Rf $my_example 22 | # Example subdirectory (e.g. incompressible simple example) 23 | my_tutorial="tutorials/incompressible/simpleFoam/$my_example" 24 | # Copy to working directory 25 | cp -r $EBROOTOPENFOAM/OpenFOAM-v2406/$my_tutorial . 26 | # Copy dictionaries for parallel execution 27 | # Reference: https://develop.openfoam.com/Development/openfoam/-/blob/master/tutorials/incompressible/simpleFoam/pitzDaily/system/blockMeshDict 28 | # Reference: https://openfoamwiki.net/index.php/DecomposePar 29 | cp /home/$USER/MyTests/OpenFOAM-v2406/blockMeshDict $my_example/system/blockMeshDict 30 | cp /home/$USER/MyTests/OpenFOAM-v2406/decomposeParDict $my_example/system/decomposeParDict 31 | 32 | cd $my_example 33 | 34 | # run the example 35 | blockMesh 36 | decomposePar 37 | mpirun -np 4 simpleFoam -parallel 38 | reconstructPar -verbose 39 | -------------------------------------------------------------------------------- /openmm/openmm_infer.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 --gres=gpu:1 5 | #SBATCH -p t4_dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load OpenMM/7.5.0-fosscuda-2020b-Python-3.8.6 11 | # 12 | 13 | 14 | #Run OpenMM's HelloArgonInC example 15 | # This example and the Makefile were copied from OpenMM's 16 | # examples directory $EBROOTOPENMM/examples 17 | # Note that we adjusted the value of OpenMM_INSTALL_DIR in the 18 | # Makefile to point to the module's directory $(EBROOTOPENMM) 19 | make HelloArgonInC 20 | ./HelloArgonInC 21 | -------------------------------------------------------------------------------- /openmm/openmm_tinkercliffs_a100.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=1 --gres=gpu:1 5 | #SBATCH -p a100_dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load OpenMM/7.5.1-fosscuda-2020b 11 | # 12 | 13 | 14 | #Run OpenMM's HelloArgonInC example 15 | # This example and the Makefile were copied from OpenMM's 16 | # examples directory $EBROOTOPENMM/examples 17 | # Note that we adjusted the value of OpenMM_INSTALL_DIR in the 18 | # Makefile to point to the module's directory $(EBROOTOPENMM) 19 | make HelloArgonInC 20 | ./HelloArgonInC 21 | -------------------------------------------------------------------------------- /openmolcas/openmolcas_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:05:00 4 | #SBATCH -N1 --ntasks-per-node=8 5 | #SBATCH -p dev_q 6 | # 7 | 8 | # 9 | module reset 10 | module load OpenMolcas 11 | module list 12 | 13 | # 14 | echo "OPENMOLCAS_TINKERCLIFFS ROME: Normal beginning of execution." 15 | # 16 | # Run. 17 | # 18 | pymolcas water.input -f 19 | if [ $? -ne 0 ]; then 20 | echo "OPENMOLCAS_TINKERCLIFFS ROME: Run error!" 21 | exit 1 22 | fi 23 | # 24 | echo "OPENMOLCAS_TINKERCLIFFS ROME: Normal end of execution." 25 | exit 0 26 | -------------------------------------------------------------------------------- /openmolcas/water.input: -------------------------------------------------------------------------------- 1 | &GATEWAY 2 | coord=water.xyz 3 | basis=sto-3g 4 | &SEWARD 5 | &SCF 6 | -------------------------------------------------------------------------------- /openmolcas/water.xyz: -------------------------------------------------------------------------------- 1 | 3 2 | Angstrom 3 | O 0.000000 0.000000 0.000000 4 | H 0.758602 0.000000 0.504284 5 | H 0.758602 0.000000 -0.504284 6 | -------------------------------------------------------------------------------- /openmpi/vector_add_mpi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/openmpi/vector_add_mpi -------------------------------------------------------------------------------- /openmpi/vector_add_mpi.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define N 1024 6 | 7 | __global__ void vector_add(float *a, float *b, float *c, int n) { 8 | int i = blockIdx.x * blockDim.x + threadIdx.x; 9 | if (i < n) 10 | c[i] = a[i] + b[i]; 11 | } 12 | 13 | void check_cuda(cudaError_t err, const char *msg) { 14 | if (err != cudaSuccess) { 15 | fprintf(stderr, "CUDA Error: %s: %s\n", msg, cudaGetErrorString(err)); 16 | MPI_Abort(MPI_COMM_WORLD, -1); 17 | } 18 | } 19 | 20 | int main(int argc, char **argv) { 21 | MPI_Init(&argc, &argv); 22 | int rank, size; 23 | MPI_Comm_rank(MPI_COMM_WORLD, &rank); 24 | MPI_Comm_size(MPI_COMM_WORLD, &size); 25 | 26 | // Set device by rank 27 | check_cuda(cudaSetDevice(rank), "cudaSetDevice"); 28 | 29 | float *a, *b, *c; 30 | float *d_a, *d_b, *d_c; 31 | 32 | // Allocate host memory 33 | a = (float*)malloc(N * sizeof(float)); 34 | b = (float*)malloc(N * sizeof(float)); 35 | c = (float*)malloc(N * sizeof(float)); 36 | 37 | // Initialize input vectors 38 | for (int i = 0; i < N; ++i) { 39 | a[i] = rank + 1.0f; 40 | b[i] = i * 1.0f; 41 | } 42 | 43 | // Allocate device memory 44 | check_cuda(cudaMalloc(&d_a, N * sizeof(float)), "cudaMalloc d_a"); 45 | check_cuda(cudaMalloc(&d_b, N * sizeof(float)), "cudaMalloc d_b"); 46 | check_cuda(cudaMalloc(&d_c, N * sizeof(float)), "cudaMalloc d_c"); 47 | 48 | // Copy to device 49 | check_cuda(cudaMemcpy(d_a, a, N * sizeof(float), cudaMemcpyHostToDevice), "Memcpy a"); 50 | check_cuda(cudaMemcpy(d_b, b, N * sizeof(float), cudaMemcpyHostToDevice), "Memcpy b"); 51 | 52 | // Launch kernel 53 | vector_add<<<(N + 255)/256, 256>>>(d_a, d_b, d_c, N); 54 | 55 | // Copy result back 56 | check_cuda(cudaMemcpy(c, d_c, N * sizeof(float), cudaMemcpyDeviceToHost), "Memcpy c"); 57 | 58 | // Print a few elements from each rank 59 | printf("Rank %d: c[0]=%f, c[N-1]=%f\n", rank, c[0], c[N-1]); 60 | 61 | // Clean up 62 | free(a); free(b); free(c); 63 | cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); 64 | 65 | MPI_Finalize(); 66 | return 0; 67 | } 68 | -------------------------------------------------------------------------------- /p7zip/example.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=p7zip-example # Job name 7 | #SBATCH --account=personal # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=job_output_%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=job_error_%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks (processes) per node 17 | #SBATCH --cpus-per-task=8 # Number of CPUs per task (threading) 18 | #SBATCH --mem=64G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | # Description: This is an example slurm batch job script to zip and unzip example text file using p7zip 22 | # Usage: To run this script, run the command 'sbatch example.slurm' 23 | 24 | ######################################################################################### 25 | 26 | # Load required module 27 | module reset 28 | module load p7zip 29 | 30 | # Create a sample folder and file for demonstration 31 | mkdir -p example 32 | echo "This is a sample file." > example/sample.txt 33 | 34 | # Compress the folder into a .7z archive 35 | 7z a example.7z example/ 36 | 37 | # Extract the archive to verify contents 38 | 7z x example.7z -oexample_unzipped/ 39 | 40 | # List contents of the unzipped directory 41 | ls -R example_unzipped/ 42 | -------------------------------------------------------------------------------- /parallel/20240722/README.md: -------------------------------------------------------------------------------- 1 | # GNU parallel 2 | 3 | Version of parallel: 20240722 4 | 5 | ## Codes 6 | 7 | 8 | ------------------------------------------ 9 | ------------------------------------------ 10 | ### code01 11 | 12 | This is a parallel code. 13 | 14 | modules: 15 | 16 | 1. parallel/20240722-GCCcore-13.3.0 17 | 2. R/4.4.2-gfbf-2024a 18 | 19 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-gnu-parallel/tc/test01 20 | 21 | Cluster: TC. 22 | 23 | Partition: normal_q 24 | 25 | Compute node: tc285 26 | 27 | The example comes directly from the code already in 28 | this repo. 29 | We just updated the modules. 30 | 31 | 32 | ---------------------- 33 | To run code. 34 | 35 | Type: 36 | 37 | sbatch parallel_mcpi.sh 38 | 39 | 40 | The output file is in the slurm-generated output file. 41 | 42 | **There could be very minor discrepancies due to rounding.** 43 | 44 | 45 | -------------------------------------------------------------------------------- /parallel/20240722/code01/gnu-parallel.277.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | Setting R_LIBS_USER to: /home/ckuhlman/R/tinkercliffs-rome/4.4.2 3 | -------------------------------------------------------------------------------- /parallel/20240722/code01/gnu-parallel.277.out: -------------------------------------------------------------------------------- 1 | Fri May 16 04:23:31 PM EDT 2025: Starting mcpi 2 | ran mcpi 32 times with results: 3 | mean = 3.141506 4 | min = 3.140576 5 | max = 3.142883 6 | error = 2.747924e-05 7 | Fri May 16 04:24:30 PM EDT 2025: Finished mcpi 8 | -------------------------------------------------------------------------------- /parallel/20240722/code01/mcpi_collect.R: -------------------------------------------------------------------------------- 1 | ## find the spam saved outputs in the CWD 2 | files <- list.files("./", pattern="mcpi_[0-9]*.RData") 3 | 4 | ## empty initialization 5 | pi.sum = 0.0 6 | pi.max = 0.0 7 | pi.min = 0.0 8 | 9 | ## loop over found RData files 10 | for (i in 1:length(files)) 11 | { 12 | ## read file i and print to screen 13 | load(files[i]) 14 | 15 | #add to sum 16 | pi.sum = pi.sum + pi.approx 17 | 18 | #set min/max 19 | if (i == 1) { 20 | pi.max = pi.approx 21 | pi.min = pi.approx 22 | } 23 | if (pi.max < pi.approx) { 24 | pi.max = pi.approx 25 | } 26 | if (pi.min > pi.approx) { 27 | pi.min = pi.approx 28 | } 29 | } 30 | 31 | pi.mean = pi.sum / length(files) 32 | pi.err = abs(pi - pi.mean)/pi 33 | 34 | cat('ran mcpi ',length(files),' times with results:\n') 35 | cat(' mean = ',pi.mean,'\n') 36 | cat(' min = ',pi.min,'\n') 37 | cat(' max = ',pi.max,'\n') 38 | cat(' error = ',pi.err,'\n') 39 | 40 | -------------------------------------------------------------------------------- /parallel/20240722/code01/mcpi_run.R: -------------------------------------------------------------------------------- 1 | #Script to calculate pi using Monte Carlo 2 | 3 | ## read in the command line arguments 4 | ## run with: R CMD BATCH '--args seed=0 reps=5' spam_mc.R 5 | args <- commandArgs(TRUE) 6 | if(length(args) > 0) 7 | for(i in 1:length(args)) 8 | eval(parse(text=args[[i]])) 9 | 10 | ## print seed 11 | cat("seed is ", seed, "\n", sep="") 12 | set.seed(seed) 13 | 14 | 15 | #Function to approximate pi 16 | mcpi <- function(n.pts) { 17 | #generate n.pts (x,y) points in the unit square 18 | m = matrix(runif(2*n.pts),n.pts,2) 19 | #determine if they are in the unit circle 20 | #in.ucir <- function(x) { as.integer(norm(as.matrix(x),"F") <= 1) } 21 | in.ucir <- function(x) { as.integer((x[1]^2 + x[2]^2) <= 1) } 22 | in.cir = apply(m, 1, in.ucir ) 23 | #return the proportion of points in the unit circle * 4 24 | return (4*sum(in.cir)/n.pts) 25 | } 26 | 27 | #problem size (number of points) 28 | n.pts <- 5000000 29 | 30 | cat('running serial version with n.pts = ',n.pts,'\n') 31 | 32 | tm.st = Sys.time() 33 | 34 | pi.approx <- mcpi(n.pts) 35 | cat(' pi estimate = ',pi.approx,'\n') 36 | 37 | pi.err = abs(pi - pi.approx)/pi 38 | cat(' relative error = ',pi.err,'\n') 39 | 40 | tm.tot = as.numeric(Sys.time() - tm.st, units="secs") 41 | cat(' time used = ',tm.tot,'\n') 42 | 43 | ## save results to a file 44 | save(pi.approx, file=paste("mcpi_", seed, ".RData", sep="")) 45 | -------------------------------------------------------------------------------- /parallel/20240722/code01/parallel_mcpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Slurm submission script for running a batch of R scripts in parallel 4 | #using GNU parallel 5 | 6 | # Job specs. 7 | #SBATCH --job-name=gnu-parallel 8 | #SBATCH --account=arcadm 9 | #SBATCH --time=01:00:00 10 | #SBATCH --partition=normal_q 11 | #SBATCH --output=gnu-parallel.%j.out 12 | #SBATCH --error=gnu-parallel.%j.err 13 | 14 | #SBATCH --nodes=1 15 | #SBATCH --ntasks-per-node=16 16 | #SBATCH --cpus-per-task=1 17 | 18 | 19 | # Load modules. 20 | module reset 21 | module load parallel/20240722-GCCcore-13.3.0 22 | module load R/4.4.2-gfbf-2024a 23 | 24 | # #set r libraries (if you need custom libraries) 25 | # export R_LIBS="$HOME/R/4.0.2-foss-2020a/tinkercliffs/lib:$R_LIBS" 26 | 27 | # Number of r processes to run in all. 28 | ncopies=32 29 | 30 | # Processes to run at a time. 31 | nparallel=$SLURM_NTASKS 32 | 33 | echo "$( date ): Starting mcpi" 34 | 35 | #parallel will be doing something like the following bash loop, 36 | #but with load balancing and other nice features 37 | # for i in $( seq 1 $ncopies ); do 38 | # Rscript mcpi_run.R seed=$i > mcpi_run_${i}.Rout & 39 | # done 40 | # wait 41 | 42 | #parallel version 43 | seq 1 $ncopies | parallel -j$nparallel "Rscript mcpi_run.R seed={} > mcpi_run_{}.Rout" 44 | 45 | #Collect results and print some statistics 46 | Rscript mcpi_collect.R 47 | 48 | echo "$( date ): Finished mcpi" 49 | -------------------------------------------------------------------------------- /parallel/mcpi_collect.R: -------------------------------------------------------------------------------- 1 | ## find the spam saved outputs in the CWD 2 | files <- list.files("./", pattern="mcpi_[0-9]*.RData") 3 | 4 | ## empty initialization 5 | pi.sum = 0.0 6 | pi.max = 0.0 7 | pi.min = 0.0 8 | 9 | ## loop over found RData files 10 | for (i in 1:length(files)) 11 | { 12 | ## read file i and print to screen 13 | load(files[i]) 14 | 15 | #add to sum 16 | pi.sum = pi.sum + pi.approx 17 | 18 | #set min/max 19 | if (i == 1) { 20 | pi.max = pi.approx 21 | pi.min = pi.approx 22 | } 23 | if (pi.max < pi.approx) { 24 | pi.max = pi.approx 25 | } 26 | if (pi.min > pi.approx) { 27 | pi.min = pi.approx 28 | } 29 | } 30 | 31 | pi.mean = pi.sum / length(files) 32 | pi.err = abs(pi - pi.mean)/pi 33 | 34 | cat('ran mcpi ',length(files),' times with results:\n') 35 | cat(' mean = ',pi.mean,'\n') 36 | cat(' min = ',pi.min,'\n') 37 | cat(' max = ',pi.max,'\n') 38 | cat(' error = ',pi.err,'\n') 39 | 40 | -------------------------------------------------------------------------------- /parallel/mcpi_run.R: -------------------------------------------------------------------------------- 1 | #Script to calculate pi using Monte Carlo 2 | 3 | ## read in the command line arguments 4 | ## run with: R CMD BATCH '--args seed=0 reps=5' spam_mc.R 5 | args <- commandArgs(TRUE) 6 | if(length(args) > 0) 7 | for(i in 1:length(args)) 8 | eval(parse(text=args[[i]])) 9 | 10 | ## print seed 11 | cat("seed is ", seed, "\n", sep="") 12 | set.seed(seed) 13 | 14 | 15 | #Function to approximate pi 16 | mcpi <- function(n.pts) { 17 | #generate n.pts (x,y) points in the unit square 18 | m = matrix(runif(2*n.pts),n.pts,2) 19 | #determine if they are in the unit circle 20 | #in.ucir <- function(x) { as.integer(norm(as.matrix(x),"F") <= 1) } 21 | in.ucir <- function(x) { as.integer((x[1]^2 + x[2]^2) <= 1) } 22 | in.cir = apply(m, 1, in.ucir ) 23 | #return the proportion of points in the unit circle * 4 24 | return (4*sum(in.cir)/n.pts) 25 | } 26 | 27 | #problem size (number of points) 28 | n.pts <- 5000000 29 | 30 | cat('running serial version with n.pts = ',n.pts,'\n') 31 | 32 | tm.st = Sys.time() 33 | 34 | pi.approx <- mcpi(n.pts) 35 | cat(' pi estimate = ',pi.approx,'\n') 36 | 37 | pi.err = abs(pi - pi.approx)/pi 38 | cat(' relative error = ',pi.err,'\n') 39 | 40 | tm.tot = as.numeric(Sys.time() - tm.st, units="secs") 41 | cat(' time used = ',tm.tot,'\n') 42 | 43 | ## save results to a file 44 | save(pi.approx, file=paste("mcpi_", seed, ".RData", sep="")) 45 | -------------------------------------------------------------------------------- /parallel/parallel_mcpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Slurm submission script for running a batch of R scripts in parallel 3 | #using GNU parallel 4 | 5 | #SBATCH -N 1 6 | #SBATCH --ntasks-per-node=16 7 | #SBATCH -t 0:05:00 8 | #SBATCH -p normal_q 9 | 10 | 11 | #load modules 12 | module reset 13 | module load parallel/20200522-GCCcore-9.3.0 14 | module load R/4.0.2-foss-2020a 15 | 16 | # #set r libraries (if you need custom libraries) 17 | # export R_LIBS="$HOME/R/4.0.2-foss-2020a/tinkercliffs/lib:$R_LIBS" 18 | 19 | #number of r processes to run in all 20 | ncopies=32 21 | 22 | #processes to run at a time 23 | nparallel=$SLURM_NTASKS 24 | 25 | echo "$( date ): Starting mcpi" 26 | 27 | #parallel will be doing something like the following bash loop, 28 | #but with load balancing and other nice features 29 | # for i in $( seq 1 $ncopies ); do 30 | # Rscript mcpi_run.R seed=$i > mcpi_run_${i}.Rout & 31 | # done 32 | # wait 33 | 34 | #parallel version 35 | seq 1 $ncopies | parallel -j$nparallel "Rscript mcpi_run.R seed={} > mcpi_run_{}.Rout" 36 | 37 | #Collect results and print some statistics 38 | Rscript mcpi_collect.R 39 | 40 | echo "$( date ): Finished mcpi" 41 | -------------------------------------------------------------------------------- /paraview/pvcone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdvancedResearchComputing/examples/6ee2bdbf26cf1811d2436b355717bcf046e8e0a7/paraview/pvcone.png -------------------------------------------------------------------------------- /paraview/pvcone.py: -------------------------------------------------------------------------------- 1 | ## pvcone.py 2 | from paraview.simple import * 3 | Cone() 4 | Show() 5 | SaveScreenshot("pvcone.png") 6 | -------------------------------------------------------------------------------- /paraview/pvcone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH -p dev_q #l40s_dev_q # partition to run in 4 | #SBATCH -N 1 # Total number of nodes requested 5 | #SBATCH -n 1 # Total number of processes to run 6 | ##SBATCH --gres=gpu:1 # Total number of gpus requested 7 | #SBATCH -t 00:10:00 # Run time (hh:mm:ss) - 10 mins 8 | #SBATCH -J pvbatch 9 | #SBATCH -o pvbatch.out 10 | #SBATCH -e pvbatch.err 11 | 12 | #module load ParaView/5.13.2-foss-2023a-CUDA-12.1.1 13 | module load ParaView/5.11.2-foss-2023a 14 | 15 | # You might need to change your working directory, to the path of your data and Python script. Uncomment the following command and change the path accordingly. 16 | 17 | #cd $SCRATCH/data 18 | 19 | # run pvbatch one or more times (sequentially) 20 | 21 | mpirun pvbatch pvcone.py 22 | -------------------------------------------------------------------------------- /python/miniconda/example.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=miniconda-example # Job name 7 | #SBATCH --account=personal # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=job_output_%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=job_error_%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks (processes) per node 17 | #SBATCH --cpus-per-task=8 # Number of CPUs per task (threading) 18 | #SBATCH --mem=64G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | # Description: SLURM job script to create conda env, install numpy, and run Python code 22 | # Usage: sbatch example.slurm 23 | 24 | ######################################################################################### 25 | # Resets module system (recommended) 26 | module reset 27 | 28 | # Load Miniconda 29 | module load Miniconda3 30 | 31 | # Define environment name 32 | ENV_NAME="numpy_env" 33 | 34 | # Create environment if not already present 35 | if ! conda env list | grep -q "$ENV_NAME"; then 36 | echo "Creating conda environment: $ENV_NAME" 37 | conda create -y -n "$ENV_NAME" python=3.10 38 | fi 39 | 40 | # Activate environment 41 | source activate "$ENV_NAME" 42 | 43 | # Check if conda environment is loaded correctly 44 | echo "You are using python of this conda environment:" 45 | which python 46 | 47 | # Install required package 48 | pip install --upgrade pip 49 | pip install numpy 50 | 51 | # Run Python script 52 | python numpy_compute.py 53 | -------------------------------------------------------------------------------- /python/miniconda/numpy_compute.py: -------------------------------------------------------------------------------- 1 | # numpy_compute.py 2 | 3 | import numpy as np 4 | 5 | # Create two random matrices 6 | A = np.random.rand(1000, 1000) 7 | B = np.random.rand(1000, 1000) 8 | 9 | # Perform matrix multiplication 10 | C = np.matmul(A, B) 11 | 12 | # Print summary statistics 13 | print("Matrix multiplication complete.") 14 | print(f"Result matrix shape: {C.shape}") 15 | print(f"Mean of result: {C.mean():.4f}, Std Dev: {C.std():.4f}") 16 | -------------------------------------------------------------------------------- /python/seaborn/example.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################## start of slurm options ######################################### 3 | ######################################## 4 | # Job Identification & Runtime # 5 | ######################################## 6 | #SBATCH --job-name=seaborn-example # Job name 7 | #SBATCH --account=personal # Account name 8 | #SBATCH --time=01:00:00 # Time limit (HH:MM:SS) 9 | #SBATCH --partition=normal_q # Partition name 10 | #SBATCH --output=job_output_%j.out # Standard output file (%j = job ID) 11 | #SBATCH --error=job_error_%j.err # Standard error file 12 | ####################################### 13 | # CPU and Node Configuration # 14 | ####################################### 15 | #SBATCH --nodes=1 # Number of nodes 16 | #SBATCH --ntasks-per-node=1 # Number of tasks (processes) per node 17 | #SBATCH --cpus-per-task=8 # Number of CPUs per task (threading) 18 | #SBATCH --mem=64G # Memory per node 19 | ########################## end of slurm options ######################################### 20 | 21 | # Description: This is an example slurm batch job script to run seaborn in python 22 | # Usage: To run this script, run the command 'sbatch example.slurm' 23 | 24 | ######################################################################################### 25 | # Resets module system (recommended) 26 | module reset 27 | 28 | # Load Seaborn 29 | module load Seaborn 30 | 31 | # Run the Python script 32 | python seaborn_plot.py 33 | -------------------------------------------------------------------------------- /python/seaborn/seaborn_plot.py: -------------------------------------------------------------------------------- 1 | # seaborn_plot.py 2 | 3 | import seaborn as sns 4 | import matplotlib.pyplot as plt 5 | 6 | # Load example dataset 7 | df = sns.load_dataset("penguins") 8 | 9 | # Create a simple seaborn scatter plot 10 | sns.set(style="whitegrid") 11 | plot = sns.scatterplot(data=df, x="bill_length_mm", y="bill_depth_mm", hue="species") 12 | 13 | # Save the plot 14 | plt.title("Penguin Bill Dimensions") 15 | plt.savefig("penguin_plot.png") 16 | print("Plot saved as penguin_plot.png") 17 | -------------------------------------------------------------------------------- /python/statsmodels/0.14.4/code01/previous-execution-results/sm.01.out: -------------------------------------------------------------------------------- 1 | OLS Regression Results 2 | ============================================================================== 3 | Dep. Variable: y R-squared: 1.000 4 | Model: OLS Adj. R-squared: 1.000 5 | Method: Least Squares F-statistic: 4.020e+06 6 | Date: Fri, 16 May 2025 Prob (F-statistic): 2.83e-239 7 | Time: 14:49:08 Log-Likelihood: -146.51 8 | No. Observations: 100 AIC: 299.0 9 | Df Residuals: 97 BIC: 306.8 10 | Df Model: 2 11 | Covariance Type: nonrobust 12 | ============================================================================== 13 | coef std err t P>|t| [0.025 0.975] 14 | ------------------------------------------------------------------------------ 15 | const 1.3423 0.313 4.292 0.000 0.722 1.963 16 | x1 -0.0402 0.145 -0.278 0.781 -0.327 0.247 17 | x2 10.0103 0.014 715.745 0.000 9.982 10.038 18 | ============================================================================== 19 | Omnibus: 2.042 Durbin-Watson: 2.274 20 | Prob(Omnibus): 0.360 Jarque-Bera (JB): 1.875 21 | Skew: 0.234 Prob(JB): 0.392 22 | Kurtosis: 2.519 Cond. No. 144. 23 | ============================================================================== 24 | 25 | Notes: 26 | [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. 27 | 28 | 29 | Parameters: 30 | [ 1.34233516 -0.04024948 10.01025357] 31 | 32 | 33 | R2: 34 | 0.9999879365025871 35 | -------------------------------------------------------------------------------- /python/statsmodels/0.14.4/code01/previous-execution-results/statsmodels.273.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | -------------------------------------------------------------------------------- /python/statsmodels/0.14.4/code01/previous-execution-results/statsmodels.273.out: -------------------------------------------------------------------------------- 1 | You are using python of this conda environment: 2 | /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/bin/python 3 | Requirement already satisfied: pip in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (25.1.1) 4 | Requirement already satisfied: statsmodels==0.14.4 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (0.14.4) 5 | Requirement already satisfied: numpy<3,>=1.22.3 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from statsmodels==0.14.4) (2.2.5) 6 | Requirement already satisfied: scipy!=1.9.2,>=1.8 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from statsmodels==0.14.4) (1.15.3) 7 | Requirement already satisfied: pandas!=2.1.0,>=1.4 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from statsmodels==0.14.4) (2.2.3) 8 | Requirement already satisfied: patsy>=0.5.6 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from statsmodels==0.14.4) (1.0.1) 9 | Requirement already satisfied: packaging>=21.3 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from statsmodels==0.14.4) (25.0) 10 | Requirement already satisfied: python-dateutil>=2.8.2 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from pandas!=2.1.0,>=1.4->statsmodels==0.14.4) (2.9.0.post0) 11 | Requirement already satisfied: pytz>=2020.1 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from pandas!=2.1.0,>=1.4->statsmodels==0.14.4) (2025.2) 12 | Requirement already satisfied: tzdata>=2022.7 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from pandas!=2.1.0,>=1.4->statsmodels==0.14.4) (2025.2) 13 | Requirement already satisfied: six>=1.5 in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (from python-dateutil>=2.8.2->pandas!=2.1.0,>=1.4->statsmodels==0.14.4) (1.17.0) 14 | Requirement already satisfied: numpy in /apps/common/software/Miniconda3/24.7.1-0/envs/statsmodels_env/lib/python3.10/site-packages (2.2.5) 15 | execution time (s): 0.008250236511230469 16 | -------------------------------------------------------------------------------- /python/statsmodels/0.14.4/code01/sm.01.out.valid: -------------------------------------------------------------------------------- 1 | OLS Regression Results 2 | ============================================================================== 3 | Dep. Variable: y R-squared: 1.000 4 | Model: OLS Adj. R-squared: 1.000 5 | Method: Least Squares F-statistic: 4.020e+06 6 | Date: Fri, 16 May 2025 Prob (F-statistic): 2.83e-239 7 | Time: 14:46:22 Log-Likelihood: -146.51 8 | No. Observations: 100 AIC: 299.0 9 | Df Residuals: 97 BIC: 306.8 10 | Df Model: 2 11 | Covariance Type: nonrobust 12 | ============================================================================== 13 | coef std err t P>|t| [0.025 0.975] 14 | ------------------------------------------------------------------------------ 15 | const 1.3423 0.313 4.292 0.000 0.722 1.963 16 | x1 -0.0402 0.145 -0.278 0.781 -0.327 0.247 17 | x2 10.0103 0.014 715.745 0.000 9.982 10.038 18 | ============================================================================== 19 | Omnibus: 2.042 Durbin-Watson: 2.274 20 | Prob(Omnibus): 0.360 Jarque-Bera (JB): 1.875 21 | Skew: 0.234 Prob(JB): 0.392 22 | Kurtosis: 2.519 Cond. No. 144. 23 | ============================================================================== 24 | 25 | Notes: 26 | [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. 27 | 28 | 29 | Parameters: 30 | [ 1.34233516 -0.04024948 10.01025357] 31 | 32 | 33 | R2: 34 | 0.9999879365025871 35 | -------------------------------------------------------------------------------- /python/statsmodels/0.14.4/code01/sm.01.py: -------------------------------------------------------------------------------- 1 | # import matplotlib.pyplot as plt 2 | # import pandas as pd 3 | import numpy as np 4 | import statsmodels.api as sm 5 | import argparse 6 | import time 7 | 8 | 9 | begin_time = time.time() 10 | 11 | 12 | # Define the parser. 13 | parser = argparse.ArgumentParser(description='For output filename.') 14 | 15 | # Declare an argument (`--algo`), saying that the 16 | # corresponding value should be stored in the `algo` 17 | # field, and using a default value if the argument 18 | # isn't given 19 | parser.add_argument('--outfile', action="store", dest='outfile', type=str, default=0) 20 | 21 | # Now, parse the command line arguments and store the 22 | # values in the `args` variable 23 | args = parser.parse_args() 24 | 25 | # Individual arguments can be accessed as attributes. 26 | outfile=args.outfile 27 | 28 | # Seed for reproducibility. 29 | np.random.seed(9876789) 30 | 31 | 32 | # Generate the data. 33 | nsample = 100 34 | x = np.linspace(0, 10, 100) 35 | X = np.column_stack((x, x ** 2)) 36 | beta = np.array([1, 0.1, 10]) 37 | e = np.random.normal(size=nsample) 38 | 39 | # Add the intercept for the model. 40 | X = sm.add_constant(X) 41 | y = np.dot(X, beta) + e 42 | 43 | # Open output file. 44 | # Write to file. 45 | fhout=open(outfile,"w") 46 | 47 | 48 | # Compute model. 49 | model = sm.OLS(y, X) 50 | results = model.fit() 51 | ## print(results.summary()) 52 | fhout.write( str( results.summary() ) ) 53 | fhout.write("\n") 54 | fhout.write(" \n") 55 | fhout.write(" \n") 56 | 57 | 58 | # Output results. 59 | ## print("Parameters: ", results.params) 60 | ## print("R2: ", results.rsquared) 61 | fhout.write("Parameters: \n") 62 | fhout.write( str(results.params) ) 63 | fhout.write("\n") 64 | fhout.write(" \n") 65 | fhout.write(" \n") 66 | fhout.write("R2: \n") 67 | fhout.write( str(results.rsquared) ) 68 | fhout.write("\n") 69 | 70 | # Close output file. 71 | fhout.close() 72 | 73 | 74 | end_time = time.time() 75 | 76 | delta_time = end_time - begin_time 77 | 78 | print(" execution time (s): ",delta_time) 79 | 80 | -------------------------------------------------------------------------------- /python/statsmodels/README.md: -------------------------------------------------------------------------------- 1 | # Python Package statsmodels Serial Code 2 | 3 | Version of python statsmodels: 0.14.4 4 | 5 | ## Codes 6 | 7 | 8 | ------------------------------------------ 9 | ------------------------------------------ 10 | ### code01 11 | 12 | This is a serial code. 13 | 14 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-statsmodels/tc/test01 15 | 16 | Cluster: TC. 17 | 18 | Partition: normal_q 19 | 20 | Compute node: tc285 21 | 22 | 23 | ---------------------- 24 | To set up to run. 25 | 26 | Setup is in the sbatch slurm script. 27 | A virtual environment is created. 28 | 29 | 30 | ---------------------- 31 | To run code. 32 | 33 | 34 | Tested on TC cluster, compute node tc285, on standard CPU nodes. 35 | 36 | Launch with slurm: sbatch sbatch.01.slurm 37 | 38 | Diff output: diff sm.01.out sm.01.out.valid 39 | 40 | **There could be very minor discrepancies due to rounding.** 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /r/4.4.2/parallel_02/README.md: -------------------------------------------------------------------------------- 1 | # R Parallel 2 | 3 | ## Note 4 | 5 | I could not find any straight-forward way to implement and R code using GPUs. 6 | All methods that I encountered have been deprecated and are no longer 7 | support in places like CRAN. 8 | 9 | If you know of an approach, please contact: 10 | 11 | - chris kuhlman, ckuhlman@vt.edu 12 | - ARC (e.g., via "help ticket" at arc.vt.edu/help) 13 | -------------------------------------------------------------------------------- /r/4.4.2/serial/README.md: -------------------------------------------------------------------------------- 1 | # R Serial Code 2 | 3 | Version of R: module load R/4.4.2-gfbf-2024a 4 | 5 | ## Codes 6 | 7 | 8 | ------------------------------------------ 9 | ------------------------------------------ 10 | ### code01 11 | 12 | This is a serial code. 13 | 14 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/owl/standard/test01 15 | 16 | Cluster: Owl. 17 | 18 | Partition: dev_q 19 | 20 | Compute node: owl083 21 | 22 | 23 | ---------------------- 24 | To set up to run. 25 | 26 | For interactive jobs, need: 27 | module load module load R/4.4.2-gfbf-2024a 28 | 29 | 30 | ---------------------- 31 | To run code. 32 | 33 | 34 | Tested on Owl cluster, compute node owl083, on standard CPU nodes. 35 | 36 | Launch with slurm: sbatch sbatch.02.slurm 37 | 38 | Diff output: diff output.owl.dev_q..txt output.owl.dev_q.txt.valid 39 | 40 | 41 | 42 | ---------------------------- 43 | Background. 44 | 45 | There is a lot of stuff in the sbatch script. 46 | It is worth wading through this, for a user, because it is close to a 47 | "real" sbatch slurm script. 48 | 49 | 50 | ------------------------------------------ 51 | ------------------------------------------ 52 | ### code02 53 | 54 | This is a serial code. 55 | 56 | Comes from: /projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/tc/test01 57 | 58 | Cluster: TC. 59 | 60 | Partition: normal_q 61 | 62 | Compute node: tc285 63 | 64 | 65 | ---------------------- 66 | To set up to run. 67 | 68 | For interactive jobs, need: 69 | module load module load R/4.4.2-gfbf-2024a 70 | 71 | 72 | ---------------------- 73 | To run code. 74 | 75 | 76 | Tested on TC cluster, compute node tc285, on CPU node. 77 | 78 | Launch with slurm: sbatch sbatch.02.slurm 79 | 80 | Diff output: diff output.tc.normal_q..txt output.tc.normal_q.txt.valid 81 | 82 | **The output from this diff should be that one line is different. 83 | The _*.valid_ has SLURM_JOB_ID of 266 and your new job will have 84 | a different number.** 85 | 86 | 87 | 88 | ---------------------------- 89 | Background. 90 | 91 | There is a lot of stuff in the sbatch script. 92 | It is worth wading through this, for a user, because it is close to a 93 | "real" sbatch slurm script. 94 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/output.owl.dev_q.txt.valid: -------------------------------------------------------------------------------- 1 | param1 2 | param2 3 | param3 4 | owl.dev_q 5 | 88919 6 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/previous-execution-results/output.owl.dev_q.96108.txt: -------------------------------------------------------------------------------- 1 | param1 2 | param2 3 | param3 4 | owl.dev_q 5 | 96108 6 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/previous-execution-results/r.serial.owl.02.96108.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | Setting R_LIBS_USER to: /home/ckuhlman/R/owl-genoa/4.4.2 3 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/previous-execution-results/r.serial.owl.02.96108.out: -------------------------------------------------------------------------------- 1 | date: Fri May 16 11:33:16 AM EDT 2025 2 | hostname: owl083 3 | 4 | 5 | Checking job details for CPU_IDs... 6 | JobId=96108 JobName=rserial.02 7 | UserId=ckuhlman(1344122) GroupId=ckuhlman(1344122) MCS_label=N/A 8 | Priority=1111 Nice=0 Account=arcadm QOS=normal 9 | JobState=RUNNING Reason=None Dependency=(null) 10 | Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0 11 | DerivedExitCode=0:0 12 | RunTime=00:00:01 TimeLimit=00:10:00 TimeMin=N/A 13 | SubmitTime=2025-05-16T11:33:14 EligibleTime=2025-05-16T11:33:15 14 | AccrueTime=2025-05-16T11:33:15 15 | StartTime=2025-05-16T11:33:15 EndTime=2025-05-16T11:43:15 Deadline=N/A 16 | SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-05-16T11:33:15 Scheduler=Main 17 | Partition=dev_q AllocNode:Sid=owl083:251996 18 | ReqNodeList=(null) ExcNodeList=(null) 19 | NodeList=owl083 20 | BatchHost=owl083 21 | NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:* 22 | ReqTRES=cpu=1,mem=7656M,node=1 23 | AllocTRES=cpu=1,mem=7656M,node=1 24 | Socks/Node=* NtasksPerN:B:S:C=1:0:*:* CoreSpec=* 25 | JOB_GRES=(null) 26 | Nodes=owl083 CPU_IDs=0 Mem=7656 GRES= 27 | MinCPUsNode=1 MinMemoryCPU=7656M MinTmpDiskNode=0 28 | Features=(null) DelayBoot=00:00:00 29 | Reservation=HPCMaintTesting 30 | OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) 31 | Command=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/owl/standard/test01/sbatch.02.slurm 32 | WorkDir=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/owl/standard/test01 33 | StdErr=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/owl/standard/test01/r.serial.owl.02.96108.err 34 | StdIn=/dev/null 35 | StdOut=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/owl/standard/test01/r.serial.owl.02.96108.out 36 | Power= 37 | 38 | 39 | 40 | [1] "Guten tag, from owl083 ... Arguments: param1 param2 param3 owl.dev_q 96108" 41 | [1] "param1" 42 | [1] "param2" 43 | [1] "param3" 44 | [1] "owl.dev_q" 45 | [1] "96108" 46 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/sbatch.02.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | #SBATCH -J rserial.02 5 | 6 | ## Account. 7 | #SBATCH --account arcadm 8 | 9 | ## Time. 10 | #SBATCH --time 00:10:00 11 | 12 | ## Reservation. 13 | #SBATCH --reservation=HPCMaintTesting 14 | 15 | ## Partition. 16 | ## #SBATCH --partition normal_q 17 | #SBATCH --partition dev_q 18 | 19 | ## Num nodes, tasks, cores. 20 | #SBATCH --nodes=1 21 | #SBATCH --ntasks-per-node=1 22 | #SBATCH --cpus-per-task=1 23 | 24 | ## Slurm output and error files. 25 | ## Always put "%j" into the output (-o) and error (-e) files. 26 | ## %j is the unique job ID assigned by slurm and provides 27 | ## traceability. 28 | #SBATCH -o r.serial.owl.02.%j.out 29 | #SBATCH -e r.serial.owl.02.%j.err 30 | 31 | ## Get familiar with global variables whose values 32 | ## are provided by Slurm, e.g., ${SLURM_JOB_ID}. 33 | echo "date: `date`" 34 | echo "hostname: $HOSTNAME"; echo 35 | echo -e "\nChecking job details for CPU_IDs..." 36 | ## The switch "--details" gives the IDs of the cores of a 37 | ## compute node that are assigned to this job. 38 | scontrol show job --details $SLURM_JOB_ID 39 | 40 | echo " " 41 | 42 | ## Load all required modules. 43 | module purge 44 | module reset 45 | module load R/4.4.2-gfbf-2024a 46 | 47 | param4="owl.dev_q" 48 | param5=${SLURM_JOB_ID} 49 | ## param5="output.owl.dev_q.JOB_ID.txt" 50 | 51 | 52 | ## Invocation of the R code. 53 | Rscript test_args.02.R param1 param2 param3 ${param4} ${param5} 54 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code01/test_args.02.R: -------------------------------------------------------------------------------- 1 | # Load the stringr package 2 | library(stringr) 3 | 4 | args = commandArgs(trailingOnly=TRUE) 5 | 6 | fmt_str = paste("Guten tag, from ", Sys.info()["nodename"], "... Arguments: ") 7 | 8 | # Write to stdout the CLAs. 9 | for (a in args) {fmt_str <- paste(fmt_str, " ", a)} 10 | print(fmt_str) 11 | 12 | # Generate the output filename. 13 | jobid=args[4] 14 | arrayid=args[5] 15 | outName<-str_c("output", jobid, arrayid, "txt", sep=".") 16 | 17 | # Write to file. 18 | fhout<-file(outName,"w") 19 | for (a in args) { 20 | print(a) 21 | writeLines(a,fhout) 22 | } 23 | close(fhout) 24 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/output.tc.normal_q.txt.valid: -------------------------------------------------------------------------------- 1 | param1 2 | param2 3 | param3 4 | tc.normal_q 5 | 266 6 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/previous-execution-results/output.tc.normal_q.267.txt: -------------------------------------------------------------------------------- 1 | param1 2 | param2 3 | param3 4 | tc.normal_q 5 | 267 6 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/previous-execution-results/r.serial.tc.267.err: -------------------------------------------------------------------------------- 1 | Resetting modules to system default. Reseting $MODULEPATH back to system default. All extra directories will be removed from $MODULEPATH. 2 | Setting R_LIBS_USER to: /home/ckuhlman/R/tinkercliffs-rome/4.4.2 3 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/previous-execution-results/r.serial.tc.267.out: -------------------------------------------------------------------------------- 1 | date: Fri May 16 12:00:00 PM EDT 2025 2 | hostname: tc285 3 | 4 | 5 | Checking job details for CPU_IDs... 6 | JobId=267 JobName=rserial.02 7 | UserId=ckuhlman(1344122) GroupId=ckuhlman(1344122) MCS_label=N/A 8 | Priority=5002 Nice=0 Account=arcadm QOS=tc_normal_base 9 | JobState=RUNNING Reason=None Dependency=(null) 10 | Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0 11 | DerivedExitCode=0:0 12 | RunTime=00:00:01 TimeLimit=00:10:00 TimeMin=N/A 13 | SubmitTime=2025-05-16T11:59:59 EligibleTime=2025-05-16T11:59:59 14 | AccrueTime=2025-05-16T11:59:59 15 | StartTime=2025-05-16T11:59:59 EndTime=2025-05-16T12:09:59 Deadline=N/A 16 | SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-05-16T11:59:59 Scheduler=Main 17 | Partition=normal_q AllocNode:Sid=tc285:256825 18 | ReqNodeList=(null) ExcNodeList=(null) 19 | NodeList=tc285 20 | BatchHost=tc285 21 | NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:* 22 | ReqTRES=cpu=1,mem=1944M,node=1,billing=1 23 | AllocTRES=cpu=1,mem=1944M,node=1,billing=1 24 | Socks/Node=* NtasksPerN:B:S:C=1:0:*:* CoreSpec=* 25 | JOB_GRES=(null) 26 | Nodes=tc285 CPU_IDs=0 Mem=1944 GRES= 27 | MinCPUsNode=1 MinMemoryCPU=1944M MinTmpDiskNode=0 28 | Features=(null) DelayBoot=00:00:00 29 | OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null) 30 | Command=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/tc/test01/sbatch.02.slurm 31 | WorkDir=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/tc/test01 32 | StdErr=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/tc/test01/r.serial.tc.267.err 33 | StdIn=/dev/null 34 | StdOut=/projects/kuhlman-project-storage/system-maint/y2025/2025-05/test-r/tc/test01/r.serial.tc.267.out 35 | Power= 36 | 37 | 38 | 39 | [1] "Guten tag, from tc285 ... Arguments: param1 param2 param3 tc.normal_q 267" 40 | [1] "param1" 41 | [1] "param2" 42 | [1] "param3" 43 | [1] "tc.normal_q" 44 | [1] "267" 45 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/sbatch.02.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | #SBATCH -J rserial.02 5 | 6 | ## Account. 7 | #SBATCH --account arcadm 8 | 9 | ## Time. 10 | #SBATCH --time 00:10:00 11 | 12 | ## Reservation. 13 | ## #SBATCH --reservation=HPCMaintTesting 14 | 15 | ## Partition. 16 | #SBATCH --partition normal_q 17 | ## #SBATCH --partition dev_q 18 | 19 | ## Num nodes, tasks, cores. 20 | #SBATCH --nodes=1 21 | #SBATCH --ntasks-per-node=1 22 | #SBATCH --cpus-per-task=1 23 | 24 | ## Slurm output and error files. 25 | ## Always put "%j" into the output (-o) and error (-e) files. 26 | ## %j is the unique job ID assigned by slurm and provides 27 | ## traceability. 28 | #SBATCH -o r.serial.tc.%j.out 29 | #SBATCH -e r.serial.tc.%j.err 30 | 31 | ## Get familiar with global variables whose values 32 | ## are provided by Slurm, e.g., ${SLURM_JOB_ID}. 33 | echo "date: `date`" 34 | echo "hostname: $HOSTNAME"; echo 35 | echo -e "\nChecking job details for CPU_IDs..." 36 | ## The switch "--details" gives the IDs of the cores of a 37 | ## compute node that are assigned to this job. 38 | scontrol show job --details $SLURM_JOB_ID 39 | 40 | echo " " 41 | 42 | ## Load all required modules. 43 | module purge 44 | module reset 45 | module load R/4.4.2-gfbf-2024a 46 | 47 | param4="tc.normal_q" 48 | param5=${SLURM_JOB_ID} 49 | 50 | 51 | ## Invocation of the R code. 52 | Rscript test_args.02.R param1 param2 param3 ${param4} ${param5} 53 | -------------------------------------------------------------------------------- /r/4.4.2/serial/code02/test_args.02.R: -------------------------------------------------------------------------------- 1 | # Load the stringr package 2 | library(stringr) 3 | 4 | args = commandArgs(trailingOnly=TRUE) 5 | 6 | fmt_str = paste("Guten tag, from ", Sys.info()["nodename"], "... Arguments: ") 7 | 8 | # Write to stdout the CLAs. 9 | for (a in args) {fmt_str <- paste(fmt_str, " ", a)} 10 | print(fmt_str) 11 | 12 | # Generate the output filename. 13 | jobid=args[4] 14 | arrayid=args[5] 15 | outName<-str_c("output", jobid, arrayid, "txt", sep=".") 16 | 17 | # Write to file. 18 | fhout<-file(outName,"w") 19 | for (a in args) { 20 | print(a) 21 | writeLines(a,fhout) 22 | } 23 | close(fhout) 24 | -------------------------------------------------------------------------------- /r/mcpi_parallel.r: -------------------------------------------------------------------------------- 1 | # File name: mcpi_parallel.r 2 | # Description: Calculate pi using Monte Carlo (parallel) 3 | # Run: Rscript mcpi_parallel.r 4 | 5 | #Load the parallel, Rmpi, and random number packages 6 | library(parallel) 7 | library(Rmpi) 8 | library(rlecuyer) 9 | 10 | #problem size (number of points) 11 | n.pts <- 5e7 12 | 13 | #cluster size (from Slurm environment variable) 14 | ncores = as.numeric(Sys.getenv("SLURM_NTASKS")) 15 | 16 | tm.tot.st = Sys.time() 17 | 18 | #function to determine if a point is in the unit circle 19 | in.ucir <- function(x) { as.integer((x[1]^2 + x[2]^2) <= 1) } 20 | 21 | n.pts.cl <- ceiling(n.pts / ncores) # Strong scaling 22 | m = matrix(runif(2*n.pts),n.pts,2) 23 | # n.pts.cl <- n.pts # Weak scaling 24 | # m = matrix(runif(2*ncores*n.pts.cl),ncores*n.pts.cl,2) 25 | cat('running parallel version with n.pts = ',n.pts,' and ncores = ',ncores,' (',n.pts.cl,' pts per core) \n') 26 | 27 | #start up and initialize the cluster 28 | cl <- makeCluster(ncores, type = 'MPI') 29 | clusterSetRNGStream(cl, NULL) 30 | 31 | tm.comp.st = Sys.time() 32 | 33 | cir = parRapply(cl, m, in.ucir ) 34 | 35 | #return the proportion of points in the unit circle * 4 36 | pi.approx = 4*mean(cir) 37 | 38 | tm.comp = as.numeric(Sys.time() - tm.comp.st, units="secs") 39 | 40 | cat(' pi estimate = ',pi.approx,'\n') 41 | 42 | pi.err = abs(pi - pi.approx)/pi 43 | cat(' relative error = ',pi.err,'\n') 44 | 45 | tm.tot = as.numeric(Sys.time() - tm.tot.st, units="secs") 46 | cat(' computational time = ',tm.comp,'\n') 47 | cat(' total time = ',tm.tot,'\n') 48 | 49 | 50 | #stop the cluster 51 | mpi.exit() 52 | stopCluster(cl) 53 | -------------------------------------------------------------------------------- /r/mcpi_parallel_cascades.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Sample submission script for Pi Monte Carlo using the parallel and Rmpi packages 4 | 5 | #SBATCH -t 00:10:00 6 | #SBATCH -N 1 --ntasks-per-node=8 7 | #SBATCH -p dev_q 8 | 9 | # Add modules 10 | module purge 11 | module load intel/18.2 openmpi/4.0.1 R/3.6.1 R-parallel/3.6.1 12 | 13 | # Run R 14 | #Run as a script; this works but the .r file must be changed so the number 15 | #of child processes is 1 less than the number of cores allocated 16 | #Rscript mcpi_parallel.r 17 | 18 | #Start with MPI 19 | # --oversubscribe so we can start as many child processes as cores 20 | mpirun -np 1 --oversubscribe Rscript mcpi_parallel.r 21 | 22 | exit; 23 | -------------------------------------------------------------------------------- /r/mh_parallel_cascades.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Sample submission script for parallel Metropolis-Hastings 4 | # This example uses the parallel package only without Rmpi, 5 | # so it only worked within a node. Rmpi can be enabled by 6 | # uncommenting a few lines in the R script and below 7 | 8 | #SBATCH -t 00:10:00 9 | #SBATCH -N 1 --ntasks-per-node=8 10 | #SBATCH -p dev_q 11 | 12 | # Add modules 13 | module purge 14 | module load intel/18.2 openmpi/4.0.1 R/3.6.1 R-parallel/3.6.1 15 | 16 | # Run R 17 | Rscript mh_parallel.r 18 | 19 | #Uncomment to start with MPI 20 | # --oversubscribe so we can start as many child processes as cores 21 | #mpirun -np 1 --oversubscribe Rscript mh_parallel.r 22 | 23 | -------------------------------------------------------------------------------- /r/mh_parallel_tinkercliffs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Sample submission script for parallel Metropolis-Hastings 4 | # This example uses the parallel package only without Rmpi, 5 | # so it only worked within a node. Rmpi can be enabled by 6 | # uncommenting a few lines in the R script and below 7 | 8 | #SBATCH -t 00:10:00 9 | #SBATCH -N 1 --ntasks-per-node=8 10 | #SBATCH -p dev_q 11 | 12 | # Add modules 13 | module reset 14 | module load R 15 | 16 | # Run R 17 | Rscript mh_parallel.r 18 | 19 | #Uncomment to start with MPI 20 | # --oversubscribe so we can start as many child processes as cores 21 | #mpirun -np 1 --oversubscribe Rscript mh_parallel.r 22 | 23 | -------------------------------------------------------------------------------- /scikit-bio/skbio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account=personal 3 | #SBATCH --job-name=skbio_distance 4 | #SBATCH --nodes=1 5 | #SBATCH --ntasks=1 6 | #SBATCH --time=00:05:00 7 | #SBATCH --partition=normal_q 8 | 9 | module reset 10 | module load Miniforge3/24.1.2-0 11 | module load scikit-bio/0.5.7-foss-2022a 12 | 13 | # Activate your virtual environment if needed 14 | #source activate path/to/venv/bin/activate 15 | 16 | # Run the test 17 | python skbio_dist.py 18 | -------------------------------------------------------------------------------- /scikit-bio/skbio_dist.py: -------------------------------------------------------------------------------- 1 | # test_skbio.py 2 | # Tests scikit-bio by running a simple PERMANOVA (non-parametric statistical test) on a toy distance matrix. 3 | 4 | from skbio.stats.distance import DistanceMatrix 5 | from skbio.stats.distance import permanova 6 | import numpy as np 7 | 8 | # Create a small distance matrix 9 | ids = ['A', 'B', 'C'] 10 | data = np.array([ 11 | [0.0, 0.5, 0.75], 12 | [0.5, 0.0, 0.25], 13 | [0.75, 0.25, 0.0] 14 | ]) 15 | 16 | dm = DistanceMatrix(data, ids) 17 | 18 | # Perform a simple PERMANOVA (Permutational Multivariate Analysis of Variance) 19 | grouping = ['Group1', 'Group1', 'Group2'] 20 | 21 | result = permanova(dm, grouping, permutations=99) 22 | 23 | print(result) 24 | -------------------------------------------------------------------------------- /scikit-learn/script.sh: -------------------------------------------------------------------------------- 1 | #SBATCH 2 | -------------------------------------------------------------------------------- /stream/stream_infer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Run the Stream memory bandwidth benchmark (https://www.cs.virginia.edu/stream/) 3 | 4 | #SBATCH -t 0:05:00 5 | #SBATCH -N 1 6 | #SBATCH --exclusive 7 | #SBATCH -p t4_dev_q 8 | 9 | arrsize="1500M" #array size 10 | 11 | module reset 12 | module load intel 13 | 14 | #set some variables 15 | exename="stream.$arrsize" 16 | arrsizefull="$( echo $arrsize | sed 's/M/000000/' )" 17 | icc -o $exename stream.c -DSTATIC -DNTIMES=10 -DSTREAM_ARRAY_SIZE=$arrsizefull \ 18 | -mcmodel=large -shared-intel -Ofast -qopenmp -ffreestanding -qopt-streaming-stores always 19 | 20 | #run 21 | export OMP_PROC_BIND=true 22 | export OMP_NUM_THREADS=32 23 | #export OMP_PLACES="$( seq -s },{ 0 4 127 | sed -e 's/\(.*\)/\{\1\}/' )" 24 | echo "running stream benchmark on host $( hostname ) with $OMP_NUM_THREADS threads..." 25 | echo "using $arrsize array size..." 26 | module list 27 | ./$exename 28 | 29 | -------------------------------------------------------------------------------- /stream/stream_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Run the Stream memory bandwidth benchmark (https://www.cs.virginia.edu/stream/) 3 | 4 | #SBATCH -t 0:05:00 5 | #SBATCH -N 1 6 | #SBATCH --exclusive 7 | #SBATCH -p dev_q 8 | 9 | arrsize="2500M" #array size 10 | 11 | module reset 12 | module load intel 13 | 14 | #set some variables 15 | exename="stream.$arrsize" 16 | arrsizefull="$( echo $arrsize | sed 's/M/000000/' )" 17 | icc -o $exename stream.c -DSTATIC -DNTIMES=10 -DSTREAM_ARRAY_SIZE=$arrsizefull \ 18 | -mcmodel=large -shared-intel -Ofast -qopenmp -ffreestanding -qopt-streaming-stores always 19 | 20 | #run 21 | export OMP_PROC_BIND=true 22 | export OMP_NUM_THREADS=32 23 | export OMP_PLACES="$( seq -s },{ 0 4 127 | sed -e 's/\(.*\)/\{\1\}/' )" 24 | echo "running stream benchmark on host $( hostname ) with $OMP_NUM_THREADS threads..." 25 | echo "using $arrsize array size..." 26 | module list 27 | ./$exename 28 | 29 | -------------------------------------------------------------------------------- /su2/su2-tc-kmt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This example was runs SU2's published Incompressible Flow tutorial on Composition-Dependent model for Species Transport equations 3 | # https://su2code.github.io/tutorials/Inc_Species_Transport_Composition_Dependent_Model/ 4 | # It demonstrates running an SU2 solver which has been compiled with MPI for distributed parallelism 5 | # On Tinkercliffs normal_q, it takes about 16.5 minutes using 4 nodes and 32 tasks-per-node 6 | # 7 | # Usage: to run this example, 8 | # 1. copy the contents of this script into a file on an ARC system named su2-tc-kmt.sh 9 | # 2. edit the script to reference your Slurm account and adjust nodes/tasks as desired 10 | # 3. submit the job with the command "sbatch su2-tc-kmt.sh" 11 | # ---------------------------------------------------------------------------------------- 12 | # 13 | #SBATCH --account= 14 | #SBATCH --partition=normal_q 15 | #SBATCH --time=0-0:30:00 16 | #SBATCH --nodes=4 17 | #SBATCH --ntasks-per-node=32 18 | #SBATCH --cpus-per-task=1 19 | 20 | module reset 21 | module load tinkercliffs-rome/su2 22 | 23 | # We will use it, so make sure the USERNAME variable is set correctly 24 | if [ "$USER" != `id -un` ]; then 25 | USER=`id -un` 26 | fi 27 | 28 | # Make a directory on /scratch for the test if it doesn't already exist 29 | WORKDIR="/scratch/${USER}/su2-tut" 30 | if [ ! -d "$WORKDIR" ]; then 31 | mkdir -p $WORKDIR 32 | fi 33 | 34 | # Copy the tutorial directory 35 | cp -r $SU2_DIR/Tutorials/incompressible_flow/Inc_Species_Transport_Composition_Dependent_Model/ $WORKDIR 36 | cd $WORKDIR/Inc_Species_Transport_Composition_Dependent_Model/ 37 | 38 | mpirun -n $SLURM_NTASKS SU2_CFD kenics_mixer_tutorial.cfg -------------------------------------------------------------------------------- /tensorflow/beginner.py: -------------------------------------------------------------------------------- 1 | # Beginner example extracted from 2 | # https://github.com/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb 3 | # using 4 | # jupyter nbconvert --to script beginner.ipynb 5 | 6 | import tensorflow as tf 7 | 8 | mnist = tf.keras.datasets.mnist 9 | 10 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 11 | x_train, x_test = x_train / 255.0, x_test / 255.0 12 | 13 | model = tf.keras.models.Sequential([ 14 | tf.keras.layers.Flatten(input_shape=(28, 28)), 15 | tf.keras.layers.Dense(128, activation='relu'), 16 | tf.keras.layers.Dropout(0.2), 17 | tf.keras.layers.Dense(10) 18 | ]) 19 | 20 | predictions = model(x_train[:1]).numpy() 21 | predictions 22 | 23 | tf.nn.softmax(predictions).numpy() 24 | 25 | loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) 26 | 27 | loss_fn(y_train[:1], predictions).numpy() 28 | 29 | model.compile(optimizer='adam', 30 | loss=loss_fn, 31 | metrics=['accuracy']) 32 | 33 | model.fit(x_train, y_train, epochs=5) 34 | 35 | model.evaluate(x_test, y_test, verbose=2) 36 | 37 | probability_model = tf.keras.Sequential([ 38 | model, 39 | tf.keras.layers.Softmax() 40 | ]) 41 | 42 | probability_model(x_test[:5]) 43 | -------------------------------------------------------------------------------- /tensorflow/tensorflow_infer_p100.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH --gres gpu:1 6 | #SBATCH -p p100_dev_q 7 | # 8 | 9 | #Load modules 10 | module reset 11 | module load TensorFlow 12 | 13 | #Run beginner tutorial 14 | echo "TENSORFLOW_INFER_P100: Normal beginning of execution." 15 | python beginner.py 16 | echo "TENSORFLOW_INFER_P100: Normal end of execution." 17 | -------------------------------------------------------------------------------- /tensorflow/tensorflow_infer_t4.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH --gres gpu:1 6 | #SBATCH -p t4_dev_q 7 | # 8 | 9 | #Load modules 10 | module reset 11 | module load TensorFlow 12 | 13 | #Run beginner tutorial 14 | echo "TENSORFLOW_INFER_T4: Normal beginning of execution." 15 | python beginner.py 16 | echo "TENSORFLOW_INFER_T4: Normal end of execution." 17 | -------------------------------------------------------------------------------- /tensorflow/tensorflow_infer_v100.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH --gres gpu:1 6 | #SBATCH -p v100_dev_q 7 | # 8 | 9 | #Load modules 10 | module reset 11 | module load TensorFlow 12 | 13 | #Run beginner tutorial 14 | echo "TENSORFLOW_INFER_V100: Normal beginning of execution." 15 | python beginner.py 16 | echo "TENSORFLOW_INFER_V100: Normal end of execution." 17 | -------------------------------------------------------------------------------- /tensorflow/tensorflow_tinkercliffs_a100.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # 3 | #SBATCH -t 00:10:00 4 | #SBATCH -N1 --ntasks-per-node=1 5 | #SBATCH --gres gpu:1 6 | #SBATCH -p a100_dev_q 7 | # 8 | 9 | #Load modules 10 | module reset 11 | module load cuda11.2/toolkit #hopefully will be added to defaults soon 12 | module load TensorFlow 13 | 14 | #Run beginner tutorial 15 | echo "TENSORFLOW_TINKERCLIFFS_A100: Normal beginning of execution." 16 | python beginner.py 17 | echo "TENSORFLOW_TINKERCLIFFS_A100: Normal end of execution." 18 | -------------------------------------------------------------------------------- /tinker9/t9-example-falcon-a30.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account= 3 | #SBATCH --partition=a30_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --cpus-per-task=4 8 | #SBATCH --time=0-1:00:00 9 | 10 | # Each node type has different modules avilable. Resetting makes the appropriate stack available 11 | module reset 12 | module load falcon-sapphirerapids/tinker9/1.4.0-nvhpc-23.7 13 | 14 | # We will use it, so make sure the USERNAME variable is set correctly 15 | if [ "$USER" != `id -un` ]; then 16 | USER=`id -un` 17 | fi 18 | 19 | # Make a directory on /scratch for the test if it doesn't already exist 20 | # /scratch is preferred over /home or /projects for staging and running jobs 21 | WORKDIR="/scratch/${USER}/t9-example" 22 | if [ ! -d "$WORKDIR" ]; then 23 | mkdir -p $WORKDIR 24 | fi 25 | 26 | # Tinker9 installation has example files. Copying them to working directory 27 | cp -vr $EBROOTTINKER9/tinker9/{example,params} . 28 | cd example 29 | 30 | # Diagnostic output 31 | echo "working in `pwd`" 32 | ls -l 33 | 34 | # Start background process to log GPU utilization to a file 35 | # This example should run at about 97% GPU utilization and <1GB of device memory while running 36 | /apps/useful_scripts/bin/gpumon > $SLURM_SUBMIT_DIR/job-${SLURM_JOB_ID}-gpu.log & 37 | 38 | tinker9 info 39 | # Run the example. Expected runtime for this example is 1 minute on a A30 GPU 40 | echo "-------- Starting tinker9: `date` -------" 41 | tinker9 dynamic dhfr2.xyz 5000 2 1 2 298 42 | echo "------- tinker9 has exited: `date` --------" -------------------------------------------------------------------------------- /tinker9/t9-example-infer-v100.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --account= 3 | #SBATCH --partition=v100_normal_q 4 | #SBATCH --nodes=1 5 | #SBATCH --gres=gpu:1 6 | #SBATCH --ntasks-per-node=12 7 | #SBATCH --cpus-per-task=1 8 | #SBATCH --time=0-1:00:00 9 | 10 | # Each node type has different modules avilable. Resetting makes the appropriate stack available 11 | module reset 12 | module load infer-skylake_v100/tinker9/1.4.0-nvhpc-21.11 13 | 14 | # V100 nodes have local SSD drives which are much faster than HOME or PROJECTS 15 | # Using this scratch storage as the working directory for the job 16 | cd $TMPSSD 17 | 18 | # Tinker9 installation has example files. Copying them to working directory 19 | cp -vr $EBROOTTINKER9/tinker9/{example,params} . 20 | cd example 21 | 22 | # Diagnostic output 23 | echo "working in `pwd`" 24 | ls -l 25 | 26 | # Start background process to log GPU utilization to a file 27 | /apps/useful_scripts/bin/gpumon > $SLURM_SUBMIT_DIR/job-${SLURM_JOB_ID}-gpu.log & 28 | 29 | tinker9 info 30 | # Run the example. Expected runtime for this example is 1 minute on a V100 GPU 31 | echo "-------- Starting tinker9: `date` -------" 32 | tinker9 dynamic dhfr2.xyz 5000 2 1 2 298 33 | echo "------- tinker9 has exited: `date` --------" 34 | 35 | # Copy result files from local scratch (TMPSSD) back to job directory 36 | # Local scratch is erased when the job ends 37 | cp $TMPSSD/example/dhfr2.arc $SLURM_SUBMIT_DIR 38 | -------------------------------------------------------------------------------- /wrf/input_sounding: -------------------------------------------------------------------------------- 1 | 1015.10 298.1718 18.20000 2 | 132.0000 299.1500 17.60000 0.0000000E+00 0.0000000E+00 3 | 583.0000 300.5175 15.30000 0.0000000E+00 0.0000000E+00 4 | 1054.000 301.8932 13.00000 0.0000000E+00 0.0000000E+00 5 | 1547.000 304.2399 11.00000 0.0000000E+00 0.0000000E+00 6 | 2063.000 306.6724 8.400000 0.0000000E+00 0.0000000E+00 7 | 2609.000 309.3341 7.100000 0.0000000E+00 0.0000000E+00 8 | 3182.000 311.9428 5.800000 0.0000000E+00 0.0000000E+00 9 | 3792.000 314.6532 4.600000 0.0000000E+00 0.0000000E+00 10 | 4442.000 317.6435 3.600000 0.0000000E+00 0.0000000E+00 11 | 5138.000 321.0051 3.200000 0.0000000E+00 0.0000000E+00 12 | 5888.000 324.4946 2.100000 0.0000000E+00 0.0000000E+00 13 | 6703.000 328.1210 1.400000 0.0000000E+00 0.0000000E+00 14 | 7595.000 331.8052 0.0000000E+00 0.0000000E+00 0.0000000E+00 15 | 8581.000 335.1144 0.0000000E+00 0.0000000E+00 0.0000000E+00 16 | 9682.000 338.3430 0.0000000E+00 0.0000000E+00 0.0000000E+00 17 | 10935.00 341.4132 0.0000000E+00 0.0000000E+00 0.0000000E+00 18 | 12396.00 345.0262 0.0000000E+00 0.0000000E+00 0.0000000E+00 19 | 13236.00 348.0688 0.0000000E+00 0.0000000E+00 0.0000000E+00 20 | 14177.00 353.2416 0.0000000E+00 0.0000000E+00 0.0000000E+00 21 | 15260.00 363.7825 0.0000000E+00 0.0000000E+00 0.0000000E+00 22 | 16568.00 385.1966 0.0000000E+00 0.0000000E+00 0.0000000E+00 23 | 17883.00 418.1352 0.0000000E+00 0.0000000E+00 0.0000000E+00 24 | 19620.00 467.0863 0.0000000E+00 0.0000000E+00 0.0000000E+00 25 | 20743.00 499.7953 0.0000000E+00 0.0000000E+00 0.0000000E+00 26 | 22139.00 540.9318 0.0000000E+00 0.0000000E+00 0.0000000E+00 27 | 23971.00 596.1987 0.0000000E+00 0.0000000E+00 0.0000000E+00 28 | 30000.00 778.0787 0.0000000E+00 0.0000000E+00 0.0000000E+00 29 | -------------------------------------------------------------------------------- /wrf/wrf_tinkercliffs_rome.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | #Example script for running WRF on ARC systems 3 | # - Input files input_sounding and namelist.input copied from 4 | # test/em_tropical_cyclone in the WRF install directory 5 | # - namelist.input edited to simulate 1 day rather than 6 to 6 | # reduce runtime 7 | # 8 | #SBATCH -t 00:30:00 9 | #SBATCH -N1 --ntasks-per-node=4 10 | #SBATCH -p dev_q 11 | # 12 | 13 | #load modules 14 | module reset 15 | module load WRF/4.2.2-foss-2020b-dmpar 16 | 17 | #symlink to required input data (mimic run_me_first.csh from example) 18 | ln -sf "$EBROOTWRF/WRF-4.2.2/run/LANDUSE.TBL" . 19 | ln -sf "$EBROOTWRF/WRF-4.2.2/run/RRTM_DATA" . 20 | 21 | #run ideal.exe to generate input files 22 | ideal.exe 23 | 24 | #move the outputs from ideal.exe so they're not overwritten by wrf 25 | mv rsl.error.0000 ideal_rsl.error.0000 26 | mv rsl.out.0000 ideal_rsl.out.0000 27 | 28 | #run WRF 29 | mpirun -np $SLURM_NTASKS wrf.exe 30 | --------------------------------------------------------------------------------