├── machine_specific_scripts ├── ANL_polaris │ ├── polaris_launch.sh │ ├── polaris_submit.sh │ ├── polaris_compile.sh │ └── polaris_run.sh ├── ORNL_crusher_and_frontier │ ├── compile_crusher_mpi.sh │ ├── crusher_run.sh │ ├── CMakePresets.json │ └── CMakeLists.txt └── ANL_sunspot │ ├── sunspot_build.sh │ ├── scaling_launch.sh │ └── scaling_run.pbs ├── LICENSE └── README.md /machine_specific_scripts/ANL_polaris/polaris_launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | num_gpus=4 3 | gpu=$((${num_gpus} - 1 - ${PMI_LOCAL_RANK} % ${num_gpus})) 4 | echo $gpu 5 | OMP_DEFAULT_DEVICE=$gpu openmc --event -s 4 -i 1500000 -n ${1} 6 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_polaris/polaris_submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #PBS -N openmc_scaling 4 | #PBS -l select=1 5 | #PBS -l walltime=0:20:00 6 | #PBS -l filesystems=home 7 | #PBS -q prod 8 | #PBS -A CSC249ADSE08 9 | 10 | # Adjust select=n line above to indicate how many 11 | # nodes you want to run on (n=# of nodes) 12 | 13 | cd /path/to/problem 14 | ./polaris_run.sh 15 | -------------------------------------------------------------------------------- /machine_specific_scripts/ORNL_crusher_and_frontier/compile_crusher_mpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | module use /gpfs/alpine/csc404/proj-shared/openmc/Modules/modulefiles 3 | module load llvm/current 4 | 5 | rm -rf build 6 | mkdir build 7 | cd build 8 | cmake --preset=llvm_mi250x_mpi -DCMAKE_INSTALL_PREFIX=./install -Doptimize=on -Ddevice_printf=off -Ddebug=on -Dhip_thrust_sort=on .. 9 | make VERBOSE=1 install 10 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_polaris/polaris_compile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | module use /home/jtramm/Modules/modulefiles 3 | module load cmake 4 | module load mpiwrappers/cray-mpich-llvm 5 | module load cudatoolkit-standalone 6 | export HDF5_ROOT=/home/jtramm/hdf5/hdf5_install 7 | 8 | module load llvm/release-16.0.0 9 | 10 | rm -rf build 11 | mkdir build 12 | cd build 13 | cmake --preset=llvm_a100_mpi -DCMAKE_INSTALL_PREFIX=./install -Doptimize=on -Ddevice_printf=off -Ddebug=on -Dcuda_thrust_sort=on .. 14 | make VERBOSE=1 install 15 | -------------------------------------------------------------------------------- /machine_specific_scripts/ORNL_crusher_and_frontier/crusher_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -A CSC404_crusher 3 | #SBATCH -J openmc_scaling 4 | #SBATCH -t 00:20:00 5 | #SBATCH -p batch 6 | #SBATCH -N 24 7 | 8 | NNODES=${SLURM_NNODES} 9 | NPARTICLES_PER_NODE=320000000 10 | NPARTICLES=$(( $NNODES * $NPARTICLES_PER_NODE )) 11 | NRANKS=$(( $NNODES * 16 )) 12 | 13 | module reset 14 | module use /gpfs/alpine/csc404/proj-shared/openmc/Modules/modulefiles 15 | module load llvm/current 16 | module load openmc/experimental 17 | 18 | cd /gpfs/alpine/csc404/proj-shared/openmc/core-fom-depleted 19 | 20 | srun -n ${NRANKS} -c2 --ntasks-per-gpu=2 --gpu-bind=closest openmc --event -n $NPARTICLES -i 1000000 -s 2 21 | 22 | #NPARTICLES=80000000 23 | #srun -n4 -c2 --ntasks-per-gpu=2 --gpu-bind=closest openmc --event -n $NPARTICLES -i 1000000 -s 2 24 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_sunspot/sunspot_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # It is assumed that this script will be run in the top level of the OpenMC source directory (.../openmc/.) 4 | # It will install OpenMC into the .../openmc/build/install directory 5 | 6 | module load spack 7 | module load cmake 8 | module load cray-hdf5/1.12.2.1 9 | 10 | export IGC_ForceOCLSIMDWidth=16 11 | export OMP_TARGET_OFFLOAD=MANDATORY 12 | unset LIBOMPTARGET_LEVEL_ZERO_COMMAND_BATCH 13 | export LIBOMPTARGET_LEVEL_ZERO_USE_IMMEDIATE_COMMAND_LIST=1 14 | export CFESingleSliceDispatchCCSMode=1 15 | export LIBOMPTARGET_DEVICES=SUBSUBDEVICE 16 | 17 | rm -rf build 18 | mkdir build 19 | cd build 20 | icpx --version 21 | cmake --preset=spirv_aot -Dsycl_sort=on -Ddevice_printf=off -Ddebug=off -DCMAKE_INSTALL_PREFIX=./install -Doptimize=on .. 22 | make VERBOSE=1 install 23 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_polaris/polaris_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | module use /home/jtramm/Modules/modulefiles 4 | module load cmake 5 | module load mpiwrappers/cray-mpich-llvm 6 | module load cudatoolkit-standalone 7 | export HDF5_ROOT=/home/jtramm/hdf5/hdf5_install 8 | module load llvm/release-16.0.0 9 | module load openmc/experimental 10 | 11 | NNODES=`wc -l < $PBS_NODEFILE` 12 | NRANKS_PER_NODE=16 13 | NRANKS=$(( $NRANKS_PER_NODE * $NNODES )) 14 | NPARTICLES_PER_RANK=5000000 15 | 16 | mpiexec -n ${NNODES} --ppn 1 ./enable_mps_polaris.sh 17 | 18 | NPARTICLES=$(( $NRANKS * $NPARTICLES_PER_RANK )) 19 | 20 | #mpiexec -n ${NRANKS} --ppn ${NRANKS_PER_NODE} -d 1 /home/jtramm/core-fom-depleted/polaris_launch.sh ${NPARTICLES} 21 | mpiexec -n ${NRANKS} --ppn ${NRANKS_PER_NODE} -d 4 /home/jtramm/core-fom-depleted/polaris_launch.sh ${NPARTICLES} 22 | 23 | #openmc --event -s 1 -i 1750000 -n 10000000 24 | #mpiexec -n ${NRANKS} --ppn ${NRANKS_PER_NODE} -d 4 /home/jtramm/core-fom-depleted/polaris_launch.sh ${NPARTICLES} 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 John Tramm 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_sunspot/scaling_launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load command line arguments 4 | NPARTICLES=${1} 5 | NRANKSPERTILE=${2} 6 | 7 | # As we are running multiple ranks per tile, we need to manually specify 8 | # the affinity of each MPI rank to assign them to tiles 9 | # The below logic assigns ranks to fill up each tile fully and then 10 | # each card fully before moving onto the next card. E.g.: 11 | # Ranks 0-3: PVC 0, Tile 0 12 | # Ranks 4-7: PVC 0, Tile 1 13 | # Ranks 8-11: PVC 1, Tile 0 14 | 15 | CARD=0 16 | TILE=0 17 | LOCAL_RANK=${PALS_LOCAL_RANKID} 18 | NRANKSPERCARD=$(( $NRANKSPERTILE * 2 )) 19 | 20 | CARD=$(( $LOCAL_RANK / $NRANKSPERCARD )) 21 | CARD_RANK=$(( $LOCAL_RANK - $CARD * $NRANKSPERCARD)) 22 | TILE=$(( $CARD_RANK / $NRANKSPERTILE )) 23 | 24 | export ZEX_NUMBER_OF_CCS=0:4 25 | 26 | # Assign 8 MPI ranks for each card 27 | export ZE_AFFINITY_MASK=${CARD}.${TILE} 28 | 29 | echo "Rank ID $LOCAL_RANK will run on GPU $CARD tile $TILE. ZE_AFFINITY_MASK = $ZE_AFFINITY_MASK. ZEX_NUMBER_OF_CCS = $ZEX_NUMBER_OF_CCS" 30 | 31 | # Debugging Run 32 | #if [ $PMIX_RANK -eq 623 ] 33 | #then 34 | #LIBOMPTARGET_DEBUG=1 gdb-oneapi -ex=r --args openmc --event -s 1 -i 775000 -n ${1} 35 | # gdb-oneapi -ex=r --args openmc --event -s 1 -i 775000 -n ${1} 36 | #else 37 | # openmc --event -s 1 -i 775000 -n ${1} 38 | #fi 39 | 40 | # Regular Run 41 | openmc --event -s 2 -i 1000000 -n ${NPARTICLES} --no-sort-non-fissionable-xs 42 | 43 | # Device Profiling Run 44 | #onetrace -d -v openmc --event -s 2 -i 775000 -n ${1} &> onetrace_${LOCAL_RANK}.txt 45 | 46 | # Host API profiling Run 47 | #onetrace -h openmc --event -s 2 -i 775000 -n ${1} &> host_onetrace_${LOCAL_RANK}.txt 48 | 49 | # All Debug 50 | #export FI_CXI_DEFAULT_CQ_SIZE=131072 51 | #export FI_CXI_OVFLOW_BUF_SIZE=8388608 52 | #export FI_CXI_CQ_FILL_PERCENT=20 53 | 54 | #gdb-oneapi -ex=r --args openmc --event -s 1 -i 775000 -n ${1} 55 | #gdb-oneapi -ex=r --args openmc -s 2 -i 775000 -n ${1} 56 | #openmc -s 2 -i 775000 -n ${1} 57 | -------------------------------------------------------------------------------- /machine_specific_scripts/ANL_sunspot/scaling_run.pbs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #PBS -l select=1:system=sunspot,place=scatter 4 | #PBS -A Aurora_deployment 5 | #PBS -q debug 6 | #PBS -l walltime=00:15:00 7 | #PBS -N openmc_scaling 8 | 9 | ############################################################### 10 | # SETTINGS 11 | ############################################################### 12 | 13 | # To control the total number of nodes, adjust the "select=1" 14 | # part above to specify total number of sunspot nodes 15 | 16 | # A single node on Sunspot is composed of six PVC GPUs, each featuring two NUMA "tiles". 17 | # To use the full resources of each Sunspot node, we want to run on all 12 tiles. 18 | NTILES=12 19 | 20 | # On PVC, it is optimal to run 4 MPI ranks for each PVC tile 21 | NRANKSPERTILE=4 22 | 23 | # Number of particles to run per MPI rank 24 | NPARTICLESPERRANK=10000000 25 | 26 | # NOTE: you need to run this script from the directory 27 | # where an OpenMC problem exists, with accompanying: 28 | # settings.xml 29 | # materials.xml 30 | # geometry.xml 31 | # tallies.xml (not always required for all problems) 32 | 33 | # Load your compiled OpenMC into your environment. The below 34 | # method will load a pre-installed version of OpenMC on 35 | # sunspot. 36 | module use /home/jtramm/Modules/modulefiles/ 37 | module load openmc/working 38 | 39 | ############################################################### 40 | # SETTINGS END 41 | ############################################################### 42 | 43 | export OMP_TARGET_OFFLOAD=MANDATORY 44 | export IGC_ForceOCLSIMDWidth=16 # Does appear to have a minor affect (about 3%) on 1 rank/tile 45 | unset LIBOMPTARGET_LEVEL_ZERO_COMMAND_BATCH 46 | export LIBOMPTARGET_LEVEL_ZERO_USE_IMMEDIATE_COMMAND_LIST=1 47 | export CFESingleSliceDispatchCCSMode=1 48 | export LIBOMPTARGET_DEVICES=SUBSUBDEVICE 49 | 50 | export TZ='/usr/share/zoneinfo/US/Central' 51 | export OMP_PROC_BIND=spread 52 | export OMP_NUM_THREADS=1 53 | unset OMP_PLACES 54 | 55 | ulimit -c 0 56 | 57 | echo Jobid: $PBS_JOBID 58 | echo Running on host `hostname` 59 | echo Running on nodes `cat $PBS_NODEFILE` 60 | 61 | NNODES=`wc -l < $PBS_NODEFILE` 62 | NRANKS=$(( NTILES * NRANKSPERTILE )) # Number of MPI ranks per node 63 | NDEPTH=1 # Number of hardware threads per rank, spacing between MPI ranks on a node 64 | NTHREADS=$OMP_NUM_THREADS # Number of OMP threads per rank, given to OMP_NUM_THREADS 65 | 66 | NTOTRANKS=$(( NNODES * NRANKS )) 67 | 68 | echo "NUM_NODES=${NNODES} TOTAL_RANKS=${NTOTRANKS} RANKS_PER_NODE=${NRANKS} THREADS_PER_RANK=${OMP_NUM_THREADS}" 69 | echo "OMP_PROC_BIND=$OMP_PROC_BIND OMP_PLACES=$OMP_PLACES" 70 | 71 | NPARTICLES=$(( $NTOTRANKS * $NPARTICLESPERRANK )) 72 | 73 | #mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind depth -envall /home/jtramm/core-fom-depleted/scaling_launch.sh ${NPARTICLES} 74 | #mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind=verbose,list:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17:18:19:20:21:22:23:24:25:57:58:59:60:61:62:63:64:65:66:67:68:69:70:71:72:73:74:75:76:77:78:79:80 -envall /home/jtramm/core-fom-depleted/scaling_launch.sh ${NPARTICLES} 75 | mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind=verbose,list:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17:18:19:20:21:22:23:24:25:57:58:59:60:61:62:63:64:65:66:67:68:69:70:71:72:73:74:75:76:77:78:79:80 -envall ./scaling_launch.sh ${NPARTICLES} ${NRANKSPERTILE} 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## What the `build_openmc.sh` script does 2 | 3 | This script is capable of downloading the OpenMC source, a set of 4 | OpenMC data files required to run simulations, and a small repository 5 | of benchmarks for testing the OpenMP offloading capabilities of OpenMC on GPU. 6 | The script is also capable of compiling OpenMC and running a few different simulation 7 | problems, some with validation capabilites to ensure that it is working. 8 | 9 | Specific instructions for using this script are given below 10 | 11 | ## Step 1 12 | 13 | You need to edit the preamble of the script to solve for OpenMC's dependencies: 14 | 15 | - HDF5 16 | - CMake version 3.22 or newer 17 | - An OpenMP offloading compiler 18 | - LLVM Clang version 16 RC1 or newer (see build_llvm.sh section below for instructions on how to compile/install this if needed) 19 | - Intel OneAPI SDK version 2023.1.0 (2023.x.0.20221013) or newer 20 | - Other compilers (GCC, AMD AOMP, HPE/Cray, IBM, Nvidia NVHPC) have bugs preventing OpenMC from working with them currently. 21 | - For running on NVIDIA GPUs, CUDA SDK 11.0 or newer 22 | - For running on AMD GPUs, rocm 5.4 or newer 23 | 24 | The script is self-documenting in where/how to do edit the script to declare your specific dependency solutions. By default, the script 25 | will assume you have HDF5 and CMake installations through spack. If you need 26 | more help or info regarding these dependencies, see OpenMC's main installation 27 | documentation: https://docs.openmc.org/en/stable/usersguide/install.html#prerequisites 28 | 29 | ## Step 2 30 | 31 | If you are running the script for the first time and wish to download/compile/install 32 | everything from scratch, then you should run the script as: 33 | 34 | ``` 35 | ./build_openmc.sh all 36 | ``` 37 | 38 | By default, this will download/install everything in the directory where the script 39 | was run from. 40 | 41 | ## Step 3 42 | 43 | If step 2 completes and validation passes, then you can begin testing 44 | a larger, more realistic problem via: 45 | 46 | ``` 47 | ./build_openmc.sh performance 48 | ``` 49 | 50 | Which will run the Hoogenboom-Martin "large" depleted fuel reactor benchmark. 51 | 52 | 53 | ## Additional Options 54 | 55 | Command line options: 56 | - `all`: Does all basic steps (download + compile + validate) 57 | - `download`: only downloads data files 58 | - `compile`: only compiles (deletes old build and install first) 59 | - `small`: runs a small test problem 60 | - `validate`: runs a small test problem and checks for correctness 61 | - `performance`: runs a large test problem and reports performance 62 | 63 | Note that you can also optionally pass a second argument specifying 64 | a unique name for the openmc install. This allows for multiple installs 65 | to exist at once, for instance if testing the performance of different 66 | versions of OpenMC or different versions of compilers. 67 | 68 | 69 | For those interested in varying the compiler commands passed to 70 | cmake, it is recommended to directly edit the CMakePresets.json file 71 | to alter an existing preset to your needs or to add another preset 72 | (perhaps one that inherits from an existing one). 73 | 74 | Alternatively, you can also overwrite the CMakePresets.json preset 75 | via definition of the following optional varbiables: 76 | 77 | - `OPENMC_CXX_FLAGS` 78 | 79 | - `OPENMC_LD_FLAGS` 80 | 81 | For instance, one may wish to run the script as: 82 | 83 | `OPENMC_CXX_FLAGS="-mllvm -scalar-evolution-infer-max-trip-count-from-memory-access=1" OPENMC_LD_FLAGS="-fuse-ld=lld" ./build_openmc.sh compile experimental_flags` 84 | 85 | However, it is important to note that these flags are not being appended to the preset flags, rather, they are overwriting them. Thus, in the above example, 86 | one would lose the standard offloading flags for that architecture. You would need to locate the desired preset you wanted to append to in CMakePresets.json 87 | and be sure to include those commands in your definitions. For example, a full redfinition for llvm native with the new flags appended might look like: 88 | 89 | `OPENMC_CXX_FLAGS="-Dgsl_CONFIG_CONTRACT_CHECKING_OFF -Wno-tautological-constant-compare -Wno-openmp-mapping -fopenmp -fopenmp-cuda-mode --offload-arch=native -mllvm -scalar-evolution-infer-max-trip-count-from-memory-access=1" OPENMC_LD_FLAGS="-fuse-ld=lld" ./build_openmc.sh compile experimental_flags` 90 | 91 | ## What the `build_llvm.sh` script does 92 | 93 | In the event that you do not have LLVM Clang installed (or your install was 94 | was not built with OpenMP offloading support) this script will allow you to 95 | compile LLVM from source with the needed build options. 96 | 97 | The script contains some notes regarding a few areas you'll need to edit 98 | to update install locations for your system. 99 | 100 | The script also contains info on what environment variables need to be set 101 | to add LLVM to your environment, along with an example modulefile. 102 | 103 | ## Additional machine-specific scripts 104 | 105 | There are also scripts available in this repository for compiling on several specific supercomputers. These are provided to make it easier for people to know which modules to load on these systems, and to know how to launch OpenMC in its optimal configuration at scale. 106 | -------------------------------------------------------------------------------- /machine_specific_scripts/ORNL_crusher_and_frontier/CMakePresets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 3, 3 | "configurePresets": [ 4 | { 5 | "name": "base", 6 | "generator": "Unix Makefiles", 7 | "cacheVariables": { 8 | "CMAKE_CXX_FLAGS": "$env{PRESET_CXX_FLAGS} $env{INTEL_CXX_FLAGS} $env{LLVM_CXX_FLAGS} $env{COMMON_CXX_FLAGS}" 9 | }, 10 | "environment": { 11 | "COMMON_CXX_FLAGS": "-Dgsl_CONFIG_CONTRACT_CHECKING_OFF -Wno-tautological-constant-compare -Wno-openmp-mapping" 12 | } 13 | }, 14 | { 15 | "name": "unity", 16 | "inherits": ["base"], 17 | "cacheVariables": { 18 | "CMAKE_UNITY_BUILD": "ON", 19 | "CMAKE_UNITY_BUILD_MODE": "BATCH", 20 | "CMAKE_UNITY_BUILD_BATCH_SIZE": "1000" 21 | } 22 | }, 23 | { 24 | "name": "intel", 25 | "inherits": ["unity"], 26 | "cacheVariables": { 27 | "CMAKE_C_COMPILER": "mpicc", 28 | "CMAKE_CXX_COMPILER": "mpic++" 29 | }, 30 | "environment": { 31 | "INTEL_CXX_FLAGS": "-fiopenmp" 32 | } 33 | }, 34 | { 35 | "name": "llvm", 36 | "inherits": ["unity"], 37 | "cacheVariables": { 38 | "CMAKE_C_COMPILER": "clang", 39 | "CMAKE_CXX_COMPILER": "clang++" 40 | }, 41 | "environment": { 42 | "LLVM_CXX_FLAGS": "-fopenmp -fopenmp-cuda-mode" 43 | } 44 | }, 45 | { 46 | "name": "spirv", 47 | "inherits": ["intel"], 48 | "displayName": "Intel GPUs", 49 | "environment": { 50 | "PRESET_CXX_FLAGS": "-fopenmp-targets=spir64 -mllvm -vpo-paropt-atomic-free-reduction=false" 51 | } 52 | }, 53 | { 54 | "name": "spirv_aot", 55 | "inherits": ["intel"], 56 | "displayName": "Intel GPUs w/Ahead of Time (AOT) Compilation", 57 | "environment": { 58 | "PRESET_CXX_FLAGS": "-fopenmp-targets=spir64_gen -mllvm -indvars-widen-indvars=false -Xopenmp-target-backend \"-device 12.60.7\"" 59 | } 60 | }, 61 | { 62 | "name": "spirv_aot_no_workarounds", 63 | "inherits": ["intel"], 64 | "displayName": "Intel GPUs w/Ahead of Time (AOT) Compilation", 65 | "environment": { 66 | "PRESET_CXX_FLAGS": "-fopenmp-targets=spir64_gen -Xopenmp-target-backend \"-device 12.60.7\"" 67 | } 68 | }, 69 | { 70 | "name": "llvm_v100", 71 | "inherits": ["llvm"], 72 | "displayName": "LLVM Clang V100", 73 | "environment": { 74 | "PRESET_CXX_FLAGS": "-fopenmp-targets=nvptx64 -Xopenmp-target -march=sm_70" 75 | } 76 | }, 77 | { 78 | "name": "llvm_v100_mpi", 79 | "inherits": ["llvm"], 80 | "displayName": "LLVM Clang A100", 81 | "cacheVariables": { 82 | "CMAKE_C_COMPILER": "mpicc", 83 | "CMAKE_CXX_COMPILER": "mpic++" 84 | }, 85 | "environment": { 86 | "PRESET_CXX_FLAGS": "-fopenmp-targets=nvptx64 -Xopenmp-target -march=sm_70" 87 | } 88 | }, 89 | { 90 | "name": "llvm_a100", 91 | "inherits": ["llvm"], 92 | "displayName": "LLVM Clang A100", 93 | "environment": { 94 | "PRESET_CXX_FLAGS": "-fopenmp-targets=nvptx64 -Xopenmp-target -march=sm_80" 95 | } 96 | }, 97 | { 98 | "name": "llvm_a100_mpi", 99 | "inherits": ["llvm_a100"], 100 | "displayName": "LLVM Clang A100", 101 | "cacheVariables": { 102 | "CMAKE_C_COMPILER": "mpicc", 103 | "CMAKE_CXX_COMPILER": "mpic++" 104 | } 105 | }, 106 | { 107 | "name": "llvm_a100_lto", 108 | "inherits": ["llvm_a100"], 109 | "displayName": "LLVM Clang A100", 110 | "environment": { 111 | "PRESET_CXX_FLAGS": "-foffload-lto" 112 | } 113 | }, 114 | { 115 | "name": "llvm_mi100", 116 | "inherits": ["llvm"], 117 | "displayName": "LLVM Clang MI100", 118 | "environment": { 119 | "PRESET_CXX_FLAGS": "-fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx908" 120 | } 121 | }, 122 | { 123 | "name": "llvm_mi250", 124 | "inherits": ["llvm"], 125 | "displayName": "LLVM Clang MI250 and MI250X", 126 | "environment": { 127 | "PRESET_CXX_FLAGS": "-fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx90a" 128 | } 129 | }, 130 | { 131 | "name": "llvm_mi100_mpi", 132 | "inherits": ["llvm"], 133 | "displayName": "LLVM Clang MI100", 134 | "environment": { 135 | "PRESET_CXX_FLAGS": "-fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx908" 136 | } 137 | }, 138 | { 139 | "name": "llvm_mi250x_mpi", 140 | "inherits": ["llvm"], 141 | "displayName": "LLVM Clang MI100", 142 | "environment": { 143 | "PRESET_CXX_FLAGS": "-fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx90a -I/opt/cray/pe/mpich/8.1.17/ofi/amd/5.0/include -L/opt/cray/pe/mpich/8.1.17/ofi/amd/5.0/lib -lmpi -L/opt/cray/pe/mpich/8.1.17/gtl/lib -lmpi_gtl_hsa" 144 | } 145 | }, 146 | { 147 | "name": "nvhpc_v100", 148 | "inherits": ["base"], 149 | "displayName": "NVIDIA NVHPC V100", 150 | "cacheVariables": { 151 | "CMAKE_C_COMPILER": "nvc", 152 | "CMAKE_CXX_COMPILER": "nvc++" 153 | }, 154 | "environment": { 155 | "PRESET_CXX_FLAGS": "-mp=gpu -Minfo=mp -gpu=cc70" 156 | } 157 | }, 158 | { 159 | "name": "nvhpc_a100", 160 | "inherits": ["base"], 161 | "displayName": "NVIDIA NVHPC A100", 162 | "cacheVariables": { 163 | "CMAKE_C_COMPILER": "nvc", 164 | "CMAKE_CXX_COMPILER": "nvc++" 165 | }, 166 | "environment": { 167 | "PRESET_CXX_FLAGS": "-mp=gpu -Minfo=mp -gpu=cc80" 168 | } 169 | } 170 | ] 171 | } 172 | -------------------------------------------------------------------------------- /machine_specific_scripts/ORNL_crusher_and_frontier/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.22 FATAL_ERROR) 2 | project(openmc C CXX) 3 | 4 | # Set version numbers 5 | set(OPENMC_VERSION_MAJOR 0) 6 | set(OPENMC_VERSION_MINOR 13) 7 | set(OPENMC_VERSION_RELEASE 0) 8 | set(OPENMC_VERSION ${OPENMC_VERSION_MAJOR}.${OPENMC_VERSION_MINOR}.${OPENMC_VERSION_RELEASE}) 9 | configure_file(include/openmc/version.h.in "${CMAKE_BINARY_DIR}/include/openmc/version.h" @ONLY) 10 | 11 | # Setup output directories 12 | set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 13 | set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) 14 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 15 | 16 | # Set module path 17 | set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules) 18 | 19 | # Allow user to specify _ROOT variables 20 | if (NOT (CMAKE_VERSION VERSION_LESS 3.12)) 21 | cmake_policy(SET CMP0074 NEW) 22 | endif() 23 | 24 | #=============================================================================== 25 | # Command line options 26 | #=============================================================================== 27 | 28 | option(profile "Compile with profiling flags" OFF) 29 | option(debug "Compile with debug flags" OFF) 30 | option(optimize "Turn on all compiler optimization flags" OFF) 31 | option(coverage "Compile with coverage analysis flags" OFF) 32 | option(dagmc "Enable support for DAGMC (CAD) geometry" OFF) 33 | option(new_w "Use Ben Forget's Rational Fraction Approximation of Faddeeva W(z)" ON) 34 | option(device_history "Enable history-based transport on device" OFF) 35 | 36 | option(device_printf "Enable printf statements on device" ON) 37 | option(disable_xs_cache "Disable Micro XS cache" ON) 38 | option(cuda_thrust_sort "Enable on-device sorting via CUDA Thrust (NVIDIA devices only)" OFF) 39 | option(hip_thrust_sort "Enable on-device sorting via HIP Thrust (AMD devices only)" OFF) 40 | option(sycl_sort "Enable on-device sorting via SYCL OneAPI DPL (Intel devices only)" OFF) 41 | 42 | #=============================================================================== 43 | # MPI for distributed-memory parallelism 44 | #=============================================================================== 45 | 46 | set(MPI_ENABLED TRUE) 47 | 48 | #set(MPI_ENABLED FALSE) 49 | #if(${CMAKE_CXX_COMPILER} MATCHES "(mpi[^/]*|CC)$") 50 | # message(STATUS "Detected MPI wrapper: ${CMAKE_CXX_COMPILER}") 51 | # set(MPI_ENABLED TRUE) 52 | #endif() 53 | 54 | #=============================================================================== 55 | # DAGMC Geometry Support - need DAGMC/MOAB 56 | #=============================================================================== 57 | if(dagmc) 58 | find_package(DAGMC REQUIRED PATH_SUFFIXES lib/cmake) 59 | endif() 60 | 61 | #=============================================================================== 62 | # Check for submodules perhaps already on system 63 | #=============================================================================== 64 | 65 | # If not found, we just pull appropriate versions from github and build them. 66 | find_package(fmt QUIET NO_SYSTEM_ENVIRONMENT_PATH) 67 | if(fmt_FOUND) 68 | message(STATUS "Found fmt: ${fmt_DIR} (version ${fmt_VERSION})") 69 | else() 70 | message(STATUS "Did not find fmt, will use submodule instead") 71 | endif() 72 | find_package(pugixml QUIET NO_SYSTEM_ENVIRONMENT_PATH) 73 | if(pugixml_FOUND) 74 | message(STATUS "Found pugixml: ${pugixml_DIR}") 75 | else() 76 | message(STATUS "Did not find pugixml, will use submodule instead") 77 | endif() 78 | 79 | #=============================================================================== 80 | # HDF5 for binary output 81 | #=============================================================================== 82 | 83 | # Unfortunately FindHDF5.cmake will always prefer a serial HDF5 installation 84 | # over a parallel installation if both appear on the user's PATH. To get around 85 | # this, we check for the environment variable HDF5_ROOT and if it exists, use it 86 | # to check whether its a parallel version. 87 | 88 | if(NOT DEFINED HDF5_PREFER_PARALLEL) 89 | if(DEFINED ENV{HDF5_ROOT} AND EXISTS $ENV{HDF5_ROOT}/bin/h5pcc) 90 | set(HDF5_PREFER_PARALLEL TRUE) 91 | else() 92 | set(HDF5_PREFER_PARALLEL FALSE) 93 | endif() 94 | endif() 95 | 96 | find_package(HDF5 REQUIRED COMPONENTS C HL) 97 | if(HDF5_IS_PARALLEL) 98 | if(NOT MPI_ENABLED) 99 | message(FATAL_ERROR "Parallel HDF5 was detected, but the detected compiler,\ 100 | ${CMAKE_CXX_COMPILER}, does not support MPI. An MPI-capable compiler must \ 101 | be used with parallel HDF5.") 102 | endif() 103 | message(STATUS "Using parallel HDF5") 104 | endif() 105 | 106 | # Version 1.12 of HDF5 deprecates the H5Oget_info_by_idx() interface. 107 | # Thus, we give these flags to allow usage of the old interface in newer 108 | # versions of HDF5. 109 | if(NOT (${HDF5_VERSION} VERSION_LESS 1.12.0)) 110 | list(APPEND cxxflags -DH5Oget_info_by_idx_vers=1 -DH5O_info_t_vers=1) 111 | endif() 112 | 113 | #=============================================================================== 114 | # Set compile/link flags based on which compiler is being used 115 | #=============================================================================== 116 | 117 | # Skip for Visual Studio which has its own configurations through GUI 118 | if(NOT MSVC) 119 | 120 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 121 | 122 | list(APPEND cxxflags -O2) 123 | if(debug) 124 | list(APPEND cxxflags -gline-tables-only) 125 | endif() 126 | if(profile) 127 | list(APPEND cxxflags -g -fno-omit-frame-pointer) 128 | endif() 129 | if(optimize) 130 | list(REMOVE_ITEM cxxflags -O2) 131 | list(APPEND cxxflags -O3) 132 | endif() 133 | if(coverage) 134 | list(APPEND cxxflags --coverage) 135 | list(APPEND ldflags --coverage) 136 | endif() 137 | if(sycl_sort) 138 | list(APPEND cxxflags -fsycl -D_PSTL_PAR_BACKEND_SERIAL=1 -DPSTL_USE_PARALLEL_POLICIES=0 -D_GLIBCXX_USE_TBB_PAR_BACKEND=0) 139 | list(APPEND ldflags -fsycl -D_PSTL_PAR_BACKEND_SERIAL=1 -DPSTL_USE_PARALLEL_POLICIES=0 -D_GLIBCXX_USE_TBB_PAR_BACKEND=0) 140 | endif() 141 | 142 | # Show flags being used 143 | message(STATUS "OpenMC C++ flags: ${cxxflags}") 144 | message(STATUS "OpenMC Linker flags: ${ldflags}") 145 | 146 | endif() 147 | 148 | #=============================================================================== 149 | # Update git submodules as needed 150 | #=============================================================================== 151 | 152 | find_package(Git) 153 | if(GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git") 154 | option(GIT_SUBMODULE "Check submodules during build" ON) 155 | if(GIT_SUBMODULE) 156 | message(STATUS "Submodule update") 157 | execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive 158 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 159 | RESULT_VARIABLE GIT_SUBMOD_RESULT) 160 | if(NOT GIT_SUBMOD_RESULT EQUAL 0) 161 | message(FATAL_ERROR "git submodule update --init failed with \ 162 | ${GIT_SUBMOD_RESULT}, please checkout submodules") 163 | endif() 164 | endif() 165 | endif() 166 | 167 | # Check to see if submodules exist (by checking one) 168 | if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/vendor/pugixml/CMakeLists.txt") 169 | message(FATAL_ERROR "The git submodules were not downloaded! GIT_SUBMODULE was \ 170 | turned off or failed. Please update submodules and try again.") 171 | endif() 172 | 173 | #=============================================================================== 174 | # pugixml library 175 | #=============================================================================== 176 | 177 | if (NOT pugixml_FOUND) 178 | add_subdirectory(vendor/pugixml) 179 | set_target_properties(pugixml PROPERTIES CXX_STANDARD 14 CXX_EXTENSIONS OFF) 180 | endif() 181 | 182 | #=============================================================================== 183 | # {fmt} library 184 | #=============================================================================== 185 | 186 | if (NOT fmt_FOUND) 187 | set(FMT_INSTALL ON CACHE BOOL "Generate the install target.") 188 | add_subdirectory(vendor/fmt) 189 | endif() 190 | 191 | #=============================================================================== 192 | # xtensor header-only library 193 | #=============================================================================== 194 | 195 | 196 | # CMake 3.13+ will complain about policy CMP0079 unless it is set explicitly 197 | if (NOT (CMAKE_VERSION VERSION_LESS 3.13)) 198 | cmake_policy(SET CMP0079 NEW) 199 | endif() 200 | 201 | add_subdirectory(vendor/xtl) 202 | set(xtl_DIR ${CMAKE_CURRENT_BINARY_DIR}/vendor/xtl) 203 | set(XTENSOR_DISABLE_EXCEPTIONS 1) 204 | add_subdirectory(vendor/xtensor) 205 | 206 | #=============================================================================== 207 | # GSL header-only library 208 | #=============================================================================== 209 | 210 | set(GSL_LITE_OPT_INSTALL_COMPAT_HEADER ON CACHE BOOL 211 | "Install MS-GSL compatibility header ") 212 | add_subdirectory(vendor/gsl-lite) 213 | 214 | # Make sure contract violations throw exceptions 215 | #target_compile_definitions(gsl-lite-v1 INTERFACE GSL_THROW_ON_CONTRACT_VIOLATION) 216 | target_compile_definitions(gsl-lite-v1 INTERFACE gsl_CONFIG_ALLOWS_NONSTRICT_SPAN_COMPARISON=1) 217 | 218 | #=============================================================================== 219 | # CUDA Thrust sorting library 220 | #=============================================================================== 221 | 222 | # Define header location variable 223 | set(OPENMC_HEADER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include ) 224 | if(cuda_thrust_sort) 225 | add_subdirectory(cuda_thrust_sort) 226 | endif() 227 | if(hip_thrust_sort) 228 | add_subdirectory(hip_thrust_sort) 229 | endif() 230 | 231 | 232 | #=============================================================================== 233 | # RPATH information 234 | #=============================================================================== 235 | 236 | # Provide install directory variables as defined by GNU coding standards 237 | include(GNUInstallDirs) 238 | 239 | # This block of code ensures that dynamic libraries can be found via the RPATH 240 | # whether the executable is the original one from the build directory or the 241 | # installed one in CMAKE_INSTALL_PREFIX. Ref: 242 | # https://gitlab.kitware.com/cmake/community/wikis/doc/cmake/RPATH-handling 243 | 244 | # use, i.e. don't skip the full RPATH for the build tree 245 | set(CMAKE_SKIP_BUILD_RPATH FALSE) 246 | 247 | # when building, don't use the install RPATH already 248 | # (but later on when installing) 249 | set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) 250 | 251 | # add the automatically determined parts of the RPATH 252 | # which point to directories outside the build tree to the install RPATH 253 | set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) 254 | 255 | # the RPATH to be used when installing, but only if it's not a system directory 256 | list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_FULL_LIBDIR}" isSystemDir) 257 | if("${isSystemDir}" STREQUAL "-1") 258 | set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_FULL_LIBDIR}") 259 | endif() 260 | 261 | #=============================================================================== 262 | # faddeeva library 263 | #=============================================================================== 264 | 265 | #add_library(faddeeva STATIC vendor/faddeeva/Faddeeva.cc) 266 | #target_include_directories(faddeeva 267 | # PUBLIC 268 | # $ 269 | # $ 270 | #) 271 | ##target_compile_options(faddeeva PRIVATE ${cxxflags}) 272 | 273 | #=============================================================================== 274 | # libopenmc 275 | #=============================================================================== 276 | 277 | # NOTE: sycl_sort.cpp must be first due to poor namespace usage in the OneAPI 278 | # DPL library (they have a namespace called "data" that clashes with OpenMC when 279 | # using a unity build). 280 | 281 | list(APPEND libopenmc_SOURCES 282 | src/sycl_sort.cpp 283 | src/bank.cpp 284 | src/boundary_condition.cpp 285 | src/bremsstrahlung.cpp 286 | src/dagmc.cpp 287 | src/cell.cpp 288 | src/cmfd_solver.cpp 289 | src/cross_sections.cpp 290 | src/device_alloc.cpp 291 | src/distribution.cpp 292 | src/distribution_angle.cpp 293 | src/distribution_energy.cpp 294 | src/distribution_multi.cpp 295 | src/distribution_spatial.cpp 296 | src/eigenvalue.cpp 297 | src/endf.cpp 298 | src/endf_flat.cpp 299 | src/error.cpp 300 | src/event.cpp 301 | src/initialize.cpp 302 | src/finalize.cpp 303 | src/geometry.cpp 304 | src/geometry_aux.cpp 305 | src/hdf5_interface.cpp 306 | src/lattice.cpp 307 | src/material.cpp 308 | src/math_functions.cpp 309 | src/mesh.cpp 310 | src/message_passing.cpp 311 | src/mgxs.cpp 312 | src/mgxs_interface.cpp 313 | src/neighbor_list.cpp 314 | src/nuclide.cpp 315 | src/output.cpp 316 | src/particle.cpp 317 | src/particle_restart.cpp 318 | src/photon.cpp 319 | src/physics.cpp 320 | src/physics_common.cpp 321 | src/physics_mg.cpp 322 | src/plot.cpp 323 | src/position.cpp 324 | src/progress_bar.cpp 325 | src/random_lcg.cpp 326 | src/reaction.cpp 327 | src/reaction_product.cpp 328 | src/scattdata.cpp 329 | src/secondary_correlated.cpp 330 | src/secondary_kalbach.cpp 331 | src/secondary_nbody.cpp 332 | src/secondary_thermal.cpp 333 | src/secondary_uncorrelated.cpp 334 | src/secondary_flat.cpp 335 | src/serialize.cpp 336 | src/settings.cpp 337 | src/simulation.cpp 338 | src/source.cpp 339 | src/state_point.cpp 340 | src/string_utils.cpp 341 | src/summary.cpp 342 | src/surface.cpp 343 | src/tallies/derivative.cpp 344 | src/tallies/filter.cpp 345 | src/tallies/filter_azimuthal.cpp 346 | src/tallies/filter_cellborn.cpp 347 | src/tallies/filter_cellfrom.cpp 348 | src/tallies/filter_cell.cpp 349 | src/tallies/filter_cell_instance.cpp 350 | src/tallies/filter_delayedgroup.cpp 351 | src/tallies/filter_distribcell.cpp 352 | src/tallies/filter_energyfunc.cpp 353 | src/tallies/filter_energy.cpp 354 | src/tallies/filter_legendre.cpp 355 | src/tallies/filter_material.cpp 356 | src/tallies/filter_mesh.cpp 357 | src/tallies/filter_meshsurface.cpp 358 | src/tallies/filter_mu.cpp 359 | src/tallies/filter_particle.cpp 360 | src/tallies/filter_polar.cpp 361 | src/tallies/filter_sph_harm.cpp 362 | src/tallies/filter_sptl_legendre.cpp 363 | src/tallies/filter_surface.cpp 364 | src/tallies/filter_universe.cpp 365 | src/tallies/filter_zernike.cpp 366 | src/tallies/tally.cpp 367 | src/tallies/tally_scoring.cpp 368 | src/tallies/trigger.cpp 369 | src/timer.cpp 370 | src/thermal.cpp 371 | src/track_output.cpp 372 | src/urr.cpp 373 | src/volume_calc.cpp 374 | src/wmp.cpp 375 | src/xml_interface.cpp 376 | src/xsdata.cpp) 377 | 378 | 379 | # For Visual Studio compilers 380 | if(MSVC) 381 | # Use static library (otherwise explicit symbol portings are needed) 382 | add_library(libopenmc STATIC ${libopenmc_SOURCES}) 383 | 384 | # To use the shared HDF5 libraries on Windows, the H5_BUILT_AS_DYNAMIC_LIB 385 | # compile definition must be specified. 386 | target_compile_definitions(libopenmc PRIVATE -DH5_BUILT_AS_DYNAMIC_LIB) 387 | else() 388 | add_library(libopenmc SHARED ${libopenmc_SOURCES}) 389 | endif() 390 | 391 | # Avoid vs error lnk1149 :output filename matches input filename 392 | if(NOT MSVC) 393 | set_target_properties(libopenmc PROPERTIES OUTPUT_NAME openmc) 394 | endif() 395 | 396 | target_include_directories(libopenmc 397 | PUBLIC 398 | $ 399 | $ 400 | ${HDF5_INCLUDE_DIRS} 401 | ) 402 | 403 | # Set compile flags 404 | target_compile_options(libopenmc PRIVATE ${cxxflags}) 405 | 406 | # Add include directory for configured version file 407 | target_include_directories(libopenmc PRIVATE ${CMAKE_BINARY_DIR}/include) 408 | 409 | if (HDF5_IS_PARALLEL) 410 | target_compile_definitions(libopenmc PRIVATE -DPHDF5) 411 | endif() 412 | if (MPI_ENABLED) 413 | target_compile_definitions(libopenmc PUBLIC -DOPENMC_MPI) 414 | endif() 415 | 416 | # Set git SHA1 hash as a compile definition 417 | if(GIT_FOUND) 418 | execute_process(COMMAND ${GIT_EXECUTABLE} rev-parse HEAD 419 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 420 | RESULT_VARIABLE GIT_SHA1_SUCCESS 421 | OUTPUT_VARIABLE GIT_SHA1 422 | ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) 423 | if(GIT_SHA1_SUCCESS EQUAL 0) 424 | target_compile_definitions(libopenmc PRIVATE -DGIT_SHA1="${GIT_SHA1}") 425 | endif() 426 | endif() 427 | 428 | 429 | # target_link_libraries treats any arguments starting with - but not -l as 430 | # linker flags. Thus, we can pass both linker flags and libraries together. 431 | target_link_libraries(libopenmc ${ldflags} ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES} 432 | pugixml xtensor gsl-lite-v1 fmt::fmt) 433 | #pugixml faddeeva xtensor gsl-lite-v1 fmt::fmt) 434 | 435 | if(dagmc) 436 | target_compile_definitions(libopenmc PRIVATE DAGMC) 437 | target_link_libraries(libopenmc dagmc-shared uwuw-shared) 438 | endif() 439 | 440 | if(cuda_thrust_sort OR hip_thrust_sort) 441 | target_compile_definitions(libopenmc PRIVATE CUDA_THRUST_SORT) 442 | target_link_libraries(libopenmc openmc_thrust_sort) 443 | endif() 444 | 445 | if(sycl_sort) 446 | target_compile_definitions(libopenmc PRIVATE SYCL_SORT) 447 | endif() 448 | 449 | if(new_w) 450 | target_compile_definitions(libopenmc PRIVATE NEW_FADDEEVA) 451 | endif() 452 | 453 | if(device_history) 454 | target_compile_definitions(libopenmc PRIVATE DEVICE_HISTORY) 455 | endif() 456 | 457 | if(device_printf) 458 | target_compile_definitions(libopenmc PRIVATE DEVICE_PRINTF) 459 | endif() 460 | 461 | if(disable_xs_cache) 462 | target_compile_definitions(libopenmc PRIVATE NO_MICRO_XS_CACHE) 463 | endif() 464 | 465 | #=============================================================================== 466 | # openmc executable 467 | #=============================================================================== 468 | add_executable(openmc src/main.cpp) 469 | target_compile_options(openmc PRIVATE ${cxxflags}) 470 | target_include_directories(openmc PRIVATE ${CMAKE_BINARY_DIR}/include) 471 | target_link_libraries(openmc libopenmc) 472 | 473 | # Ensure C++17 standard is used (required for Intel OneDPL on-device sorting) 474 | set_target_properties( 475 | #openmc libopenmc faddeeva 476 | openmc libopenmc 477 | PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF) 478 | 479 | #=============================================================================== 480 | # Python package 481 | #=============================================================================== 482 | 483 | add_custom_command(TARGET libopenmc POST_BUILD 484 | COMMAND ${CMAKE_COMMAND} -E copy 485 | $ 486 | ${CMAKE_CURRENT_SOURCE_DIR}/openmc/lib/$ 487 | COMMENT "Copying libopenmc to Python module directory") 488 | 489 | #=============================================================================== 490 | # Install executable, scripts, manpage, license 491 | #=============================================================================== 492 | 493 | configure_file(cmake/OpenMCConfig.cmake.in "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/OpenMCConfig.cmake" @ONLY) 494 | configure_file(cmake/OpenMCConfigVersion.cmake.in "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/OpenMCConfigVersion.cmake" @ONLY) 495 | 496 | set(INSTALL_CONFIGDIR ${CMAKE_INSTALL_LIBDIR}/cmake/OpenMC) 497 | #install(TARGETS openmc libopenmc faddeeva 498 | if(cuda_thrust_sort OR hip_thrust_sort) 499 | install(TARGETS openmc libopenmc openmc_thrust_sort 500 | EXPORT openmc-targets 501 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 502 | LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} 503 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} 504 | ) 505 | else() 506 | install(TARGETS openmc libopenmc 507 | EXPORT openmc-targets 508 | RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} 509 | LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} 510 | ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} 511 | ) 512 | endif() 513 | install(EXPORT openmc-targets 514 | FILE OpenMCTargets.cmake 515 | NAMESPACE OpenMC:: 516 | DESTINATION ${INSTALL_CONFIGDIR}) 517 | 518 | install(DIRECTORY src/relaxng DESTINATION ${CMAKE_INSTALL_DATADIR}/openmc) 519 | install(FILES 520 | "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/OpenMCConfig.cmake" 521 | "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/OpenMCConfigVersion.cmake" 522 | DESTINATION ${INSTALL_CONFIGDIR}) 523 | install(FILES man/man1/openmc.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1) 524 | install(FILES LICENSE DESTINATION "${CMAKE_INSTALL_DOCDIR}" RENAME copyright) 525 | install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) 526 | install(FILES "${CMAKE_BINARY_DIR}/include/openmc/version.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/openmc) 527 | 528 | # Copy headers for vendored dependencies (note that all except faddeeva are handled 529 | # separately since they are managed by CMake) 530 | #install(DIRECTORY vendor/faddeeva DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) 531 | --------------------------------------------------------------------------------