├── VERSION.txt ├── examples ├── propti_desc_stat_01 │ ├── input_analyser.py │ ├── propti.pickle.init │ ├── propti.pickle.finished │ ├── propti_db.csv │ ├── input.py │ └── tga_analysis_01.fds ├── propti_plot_best_sim_exp_01 │ ├── input_analyser.py │ ├── propti.pickle.init │ ├── propti.pickle.finished │ ├── propti_db.csv │ ├── input.py │ └── tga_analysis_01.fds ├── propti_analyser_01 │ ├── propti.pickle.init │ ├── propti.pickle.finished │ ├── propti_db.csv │ ├── input.py │ └── tga_analysis_01.fds ├── cone_02 │ ├── cone_template.fds │ ├── input.py │ └── experimental_data.csv ├── tga_analysis_01 │ ├── input.py │ └── tga_analysis_01.fds ├── tga_analysis_02 │ ├── OriginalFiles │ │ ├── tga_5K.fds │ │ ├── tga_10K.fds │ │ └── tga_15K.fds │ ├── tga_analysis_02.fds │ └── input.py └── cone_01 │ ├── input.py │ ├── SimpleConeLaunchTest_Alu_BestParaSet_Toast.fds │ └── SimpleConeLaunchTest_ISO_BestParaSet_Toast.fds ├── requirements.txt ├── .dockerignore ├── resources ├── jureca │ ├── propti.png │ ├── HelpJURECA.txt │ ├── jureca.modules │ ├── propti-chain.sh │ ├── fds.jureca.sh │ ├── propti-chain-part.job │ └── propti.svg └── CERN_HPC │ └── CERN_HPC_slurm.sh ├── .gitignore ├── LICENSE ├── docker ├── Dockerfile.nightly ├── Dockerfile └── readme.md ├── .github └── workflows │ ├── update.yaml │ ├── update.nightly.yaml │ ├── deploy.nightly.yaml │ └── deploy.yaml ├── propti ├── __init__.py ├── propti_pre_processing.py ├── basic_functions.py ├── spotpy_wrapper.py ├── propti_post_processing.py └── fitness_methods.py ├── propti_run.py ├── propti_sense.py ├── propti_prepare.py └── README.md /VERSION.txt: -------------------------------------------------------------------------------- 1 | PROPTI version.McTestcase -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/input_analyser.py: -------------------------------------------------------------------------------- 1 | 'pearson_coeff' -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/input_analyser.py: -------------------------------------------------------------------------------- 1 | 'pearson_coeff' -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | matplotlib 3 | scipy 4 | pandas 5 | spotpy 6 | mpi4py 7 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .* 2 | *.md 3 | *.txt 4 | examples 5 | docker 6 | !VERSION.txt 7 | !.git/refs/heads/master -------------------------------------------------------------------------------- /resources/jureca/propti.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/resources/jureca/propti.png -------------------------------------------------------------------------------- /examples/propti_analyser_01/propti.pickle.init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_analyser_01/propti.pickle.init -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/propti.pickle.init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_desc_stat_01/propti.pickle.init -------------------------------------------------------------------------------- /examples/propti_analyser_01/propti.pickle.finished: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_analyser_01/propti.pickle.finished -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/propti.pickle.finished: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_desc_stat_01/propti.pickle.finished -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/propti.pickle.init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_plot_best_sim_exp_01/propti.pickle.init -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/propti.pickle.finished: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FireDynamics/propti/HEAD/examples/propti_plot_best_sim_exp_01/propti.pickle.finished -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | .idea/codeStyleSettings.xml 4 | .idea/misc.xml 5 | .idea/vcs.xml 6 | .idea/dictionaries/thehnen.xml 7 | .idea/modules.xml 8 | .idea/workspace.xml 9 | .idea/propti.iml -------------------------------------------------------------------------------- /resources/jureca/HelpJURECA.txt: -------------------------------------------------------------------------------- 1 | Help for usage of JURECA 2 | 3 | SLURM batch system: 4 | http://www.fz-juelich.de/SharedDocs/Downloads/IAS/JSC/EN/JURECA/jureca_batch_system_manual.pdf?__blob=publicationFile -------------------------------------------------------------------------------- /resources/jureca/jureca.modules: -------------------------------------------------------------------------------- 1 | ml use /usr/local/software/jureca/OtherStages 2 | ml Stages/2018b 3 | 4 | module use -a ~arnold1/modules_fire/ 5 | ml spotpy/1.5.13-py3.6.6_IntelCompiler_2019.0_ParaStationMPI_5.2.1 6 | -------------------------------------------------------------------------------- /resources/CERN_HPC/CERN_HPC_slurm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --nodes=1 3 | #SBATCH --ntasks=3 4 | #SBATCH --mail-type=ALL 5 | #SBATCH --cpus-per-task=2 6 | #SBATCH --time=0-02 7 | #SBATCH -p batch-short 8 | 9 | 10 | cd $SLURM_SUBMIT_DIR 11 | 12 | pwd 13 | 14 | printenv &> user.env-$SLURM_JOB_ID 15 | 16 | which python3 17 | 18 | which fds653_serial 19 | 20 | #module load mpi/mpich/3.2/gcc-6.3.1 21 | module load mpi/mpich/3.2.1 22 | 23 | export PATH=$PATH:/hpcscratch/user/username/path/to/FDS 24 | mpirun python3 /hpcscratch/user/username/path/to/propti/propti/propti_run.py . &> log.spotpy_mpi 25 | 26 | 27 | -------------------------------------------------------------------------------- /resources/jureca/propti-chain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash ­x 2 | # submit a chain of jobs with dependency 3 | 4 | # number of jobs to submit 5 | NO_OF_JOBS=XX 6 | 7 | # define jobscript 8 | JOB_SCRIPT=fds-chain-part-modified.job 9 | 10 | echo "sbatch ${JOB_SCRIPT}" 11 | JOBID=$(sbatch ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}') 12 | 13 | # Launch the next job, after the previous one has been finished successfully. 14 | I=0 15 | while [ ${I} -le ${NO_OF_JOBS} ]; do 16 | echo "sbatch -d afterok:${JOBID} ${JOB_SCRIPT}" 17 | JOBID=$(sbatch -d afterok:${JOBID} ${JOB_SCRIPT} 2>&1 | awk '{print $(NF)}') 18 | let I=${I}+1 19 | done 20 | -------------------------------------------------------------------------------- /resources/jureca/fds.jureca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sh 2 | 3 | env -i bash -l -c " 4 | echo 'running FDS' 5 | 6 | echo 'loading modules' 7 | module use -a ~arnold1/modules_fire/ 8 | module load FDS/6.7.4-IntelComp2019.0_ParaStationMPI_5.2.1 9 | 10 | which fds 11 | 12 | if [ \"$#\" -eq 0 ]; then 13 | echo | OMP_NUM_THREADS=1 fds 14 | else 15 | OMP_NUM_THREADS=1 fds $1 16 | wct=\`grep 'Total Elapsed Wall Clock Time' *.out | cut -d: -f2\` 17 | hostname=`hostname` 18 | dir=${PWD##*} 19 | date=\`date\` 20 | echo \"`date`; `hostname`; \`grep 'Total Elapsed Wall Clock Time' *.out | cut -d: -f2\`; `pwd` \" > wct.csv 21 | fi 22 | " 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 inverseproblemsinfiremodeling 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/cone_02/cone_template.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='Example from FDS user guide' / 2 | 3 | &MESH IJK=3,3,3, XB=-0.15,0.15,-0.15,0.15,0.0,0.3 / 4 | 5 | &TIME T_END=600., WALL_INCREMENT=1, DT=0.05 / 6 | 7 | &MISC SOLID_PHASE_ONLY=.TRUE. / 8 | 9 | &SPEC ID='METHANE' / 10 | &MATL ID='BLACKPMMA' 11 | ABSORPTION_COEFFICIENT=2700. 12 | N_REACTIONS=1 13 | A(1) = 8.5E12 14 | E(1) = 188000 15 | EMISSIVITY=#EMISSIVITY# 16 | DENSITY=#DENSITY# 17 | SPEC_ID='METHANE' 18 | NU_SPEC=1. 19 | HEAT_OF_REACTION=870. 20 | CONDUCTIVITY = #CONDUCTIVITY# 21 | SPECIFIC_HEAT = #SPECIFIC_HEAT# / 22 | 23 | &SURF ID='PMMA SLAB' 24 | COLOR='BLACK' 25 | BACKING='INSULATED' 26 | MATL_ID='BLACKPMMA' 27 | THICKNESS=0.0085 28 | EXTERNAL_FLUX=50 / 29 | 30 | &VENT XB=-0.05,0.05,-0.05,0.05,0.0,0.0, SURF_ID = 'PMMA SLAB' / 31 | 32 | &DUMP DT_DEVC=5. / 33 | 34 | &DEVC XYZ=0.0,0.0,0.0, IOR=3, QUANTITY='WALL TEMPERATURE', ID='temp' / 35 | 36 | &DEVC XYZ=0.0,0.0,0.0, IOR=3, QUANTITY='MASS FLUX', SPEC_ID='METHANE', ID='MF' / &DEVC XYZ=0.0,0.0,0.0, IOR=3, QUANTITY='WALL THICKNESS', ID='thick' / 37 | 38 | &TAIL / -------------------------------------------------------------------------------- /examples/propti_analyser_01/propti_db.csv: -------------------------------------------------------------------------------- 1 | like1,parrtc01,parrrc01,parrtc02,parrrc02,simulation_0,chain 2 | 3.209546693766574,278.485477459408,0.002683834572369881,494.25210668233166,0.0013888877862282308,1.0,0.0 3 | 3.399302789280479,264.0545992516876,0.004861303282235932,334.90563761829515,0.0034005935275503323,1.0,0.0 4 | 2.5039757079800298,314.88835783046466,0.00788597569175583,337.39532707348246,0.008161171847868132,1.0,0.0 5 | 3.6192188285071367,289.6178164428413,0.0038180911818259593,375.768328464159,0.009115418567930515,1.0,0.0 6 | 3.994589491480186,208.92789102391367,0.006821840434951581,489.2182508466174,0.0014408386011559758,1.0,0.0 7 | 2.917380190635498,254.7120093183821,0.0010629421588660357,447.15543324440245,0.004171193967805327,1.0,0.0 8 | 4.365819043686002,355.4459524400206,0.008514555927736162,480.0425087136034,0.004178025249871386,1.0,0.0 9 | 4.366567592756199,232.74965445976255,0.00950208027150265,523.7173466201759,0.0011982513478988085,1.0,0.0 10 | 3.129144905581259,353.1764128369194,0.003159760403928445,502.27659183042806,0.0024204242269118266,1.0,0.0 11 | 3.0149817342735306,246.3742952052502,0.001072150038062642,418.11778400665935,0.003599528937827717,1.0,0.0 12 | -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/propti_db.csv: -------------------------------------------------------------------------------- 1 | like1,parrtc01,parrrc01,parrtc02,parrrc02,simulation_0,chain 2 | 3.186085581005752,298.66246059710613,0.009967956266346525,336.1455251845624,0.0030998967698113045,1.0,0.0 3 | 3.0981746773200727,362.16855387331094,0.0016093572833183905,485.3792944970136,0.006987981350207484,1.0,0.0 4 | 3.015002486720121,340.60657394396605,0.0031630040536680623,306.40569686703253,0.007098186730047772,1.0,0.0 5 | 2.2914902600008467,334.495189602499,0.004236352428223365,441.72519615779765,0.003262989520328451,1.0,0.0 6 | 4.06869302904421,264.003153370627,0.00495311090528692,515.191782121342,0.006693266519207433,1.0,0.0 7 | 2.3470750705090038,332.20103321322915,0.0011332733173137935,322.7202833023535,0.006721094275148063,1.0,0.0 8 | 2.826310827212377,312.994053345496,0.0022361629886376976,378.21710772021277,0.008794266885957056,1.0,0.0 9 | 3.6641185411173742,298.27955505501603,0.009697626506790648,405.15248963999534,0.009499317533299648,1.0,0.0 10 | 3.7828068927322245,227.339806915961,0.005825230000114715,420.04288370364674,0.005863109589648466,1.0,0.0 11 | 2.8212703980031093,308.5771213453069,0.00960342866819313,591.3936930483835,0.008496619339296763,1.0,0.0 12 | -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/propti_db.csv: -------------------------------------------------------------------------------- 1 | like1,parrtc01,parrrc01,parrtc02,parrrc02,simulation_0,chain 2 | 3.844631046676181,334.7513812953995,0.00827969171197122,406.6182475985039,0.003362944816020538,1.0,0.0 3 | 3.867087463365662,277.1721623827514,0.005843268517661063,573.9501954423542,0.0018766736810617024,1.0,0.0 4 | 4.282719271649019,272.2304878685822,0.007701613247873529,529.587225202336,0.00486635230471907,1.0,0.0 5 | 3.5866793021547294,285.8791513669977,0.003232332332024565,555.5516345008225,0.007466805993263811,1.0,0.0 6 | 4.619911535148377,211.9809179663303,0.008946579782350503,579.4934202417057,0.007209865453343702,1.0,0.0 7 | 3.9514646015747097,232.93933606188733,0.0063051532033440725,329.73245684107366,0.008743672515839348,1.0,0.0 8 | 3.8067367327736887,333.25385873824507,0.007299548689332602,381.26322256063065,0.007382670391212174,1.0,0.0 9 | 4.319707338680415,245.16949454707043,0.006434297447170571,386.5404620671424,0.007749110171764587,1.0,0.0 10 | 4.112199897420039,244.20036808226348,0.006766113592149648,561.3414299660872,0.0016393433060387407,1.0,0.0 11 | 2.9749331831325296,324.31187153332996,0.008537213867730285,399.4525685763077,0.0011849991548271333,1.0,0.0 12 | -------------------------------------------------------------------------------- /docker/Dockerfile.nightly: -------------------------------------------------------------------------------- 1 | # FDS version 2 | ARG FDS_VERSION 3 | 4 | # FDS base image 5 | FROM ghcr.io/openbcl/fds-nightly:${FDS_VERSION} 6 | 7 | # Enable mpiexec.openmpi run as root 8 | ENV OMPI_ALLOW_RUN_AS_ROOT=1 9 | ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 10 | 11 | # install app dependencies 12 | RUN apt-get update && apt-get install -y libopenmpi-dev python3-pip python3-venv 13 | 14 | # create python venv for propti 15 | RUN python3 -m venv /opt/venv/propti 16 | 17 | # append python venv for propti to path 18 | ENV PATH="/opt/venv/propti/bin:$PATH" 19 | 20 | # install python dependencies 21 | RUN pip install matplotlib mpi4py numpy pandas scipy spotpy 22 | 23 | # copy propti 24 | COPY ./ /root/propti/ 25 | 26 | # create wrapper script, fix permissions and create symlinks 27 | RUN echo '/usr/bin/env bash -l -c "$*"' > /root/propti/wrap.sh && \ 28 | chmod +x /root/propti/wrap.sh && \ 29 | chmod +x /root/propti/*.py && \ 30 | rm -rf /root/propti/docker && \ 31 | ln -s /root/propti/wrap.sh /bin/wrap && \ 32 | ln -s /root/propti/propti_analyse.py /bin/propti_analyse && \ 33 | ln -s /root/propti/propti_prepare.py /bin/propti_prepare && \ 34 | ln -s /root/propti/propti_run.py /bin/propti_run && \ 35 | ln -s /root/propti/propti_sense.py /bin/propti_sense -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # FDS version (default: latest), e.g. 6.9.1 2 | ARG FDS_VERSION=latest 3 | 4 | # FDS base image 5 | FROM ghcr.io/openbcl/fds:${FDS_VERSION} 6 | 7 | # Enable mpiexec.openmpi run as root 8 | ENV OMPI_ALLOW_RUN_AS_ROOT=1 9 | ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 10 | 11 | # install app dependencies 12 | RUN apt-get update && apt-get install -y libopenmpi-dev python3-pip python3-venv 13 | 14 | # create python venv for propti 15 | RUN python3 -m venv /opt/venv/propti 16 | 17 | # append python venv for propti to path 18 | ENV PATH="/opt/venv/propti/bin:$PATH" 19 | 20 | # install python dependencies 21 | RUN pip install matplotlib mpi4py numpy pandas scipy spotpy 22 | 23 | # copy propti 24 | COPY ./ /root/propti/ 25 | 26 | # create wrapper script, fix permissions and create symlinks 27 | RUN echo '/usr/bin/env bash -l -c "$*"' > /root/propti/wrap.sh && \ 28 | chmod +x /root/propti/wrap.sh && \ 29 | chmod +x /root/propti/*.py && \ 30 | rm -rf /root/propti/docker && \ 31 | ln -s /root/propti/wrap.sh /bin/wrap && \ 32 | ln -s /root/propti/propti_analyse.py /bin/propti_analyse && \ 33 | ln -s /root/propti/propti_prepare.py /bin/propti_prepare && \ 34 | ln -s /root/propti/propti_run.py /bin/propti_run && \ 35 | ln -s /root/propti/propti_sense.py /bin/propti_sense -------------------------------------------------------------------------------- /.github/workflows/update.yaml: -------------------------------------------------------------------------------- 1 | name: Update All Propti Images 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: [master] 6 | paths: 7 | - '**.py' 8 | - '.dockerignore' 9 | - 'docker/Dockerfile' 10 | env: 11 | ORG: firedynamics 12 | IMAGE_NAME: propti 13 | jobs: 14 | trigger-updates: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | packages: read 18 | actions: write 19 | steps: 20 | - name: Extract available docker image versions and trigger build 21 | continue-on-error: true 22 | run: | 23 | REGEX=[0-9]+\.[0-9]+\.[0-9]+ 24 | VERSIONS=$(curl --silent "https://api.github.com/users/${{ env.ORG }}/packages/container/${{ env.IMAGE_NAME }}/versions" --stderr - \ 25 | --header "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" | \ 26 | grep -E "[[:space:]]+\"${REGEX}\"" | grep -oEi ${REGEX}) 27 | for VERSION in ${VERSIONS}; do 28 | curl -L -X POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ 29 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 30 | https://api.github.com/repos/${{ github.repository }}/actions/workflows/deploy.yaml/dispatches \ 31 | -d "{\"ref\":\"master\",\"inputs\": {\"tag\": \"${VERSION}\"}}" 32 | done -------------------------------------------------------------------------------- /.github/workflows/update.nightly.yaml: -------------------------------------------------------------------------------- 1 | name: Update All Propti Nightly Images 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: [master] 6 | paths: 7 | - '**.py' 8 | - '.dockerignore' 9 | - 'docker/Dockerfile.nightly' 10 | env: 11 | ORG: firedynamics 12 | IMAGE_NAME: propti-nightly 13 | jobs: 14 | trigger-updates: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | packages: read 18 | actions: write 19 | steps: 20 | - name: Extract available docker image versions and trigger build 21 | continue-on-error: true 22 | run: | 23 | REGEX=[0-9]+\.[0-9]+\.[0-9]+ 24 | VERSIONS=$(curl --silent "https://api.github.com/users/${{ env.ORG }}/packages/container/${{ env.IMAGE_NAME }}/versions" --stderr - \ 25 | --header "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" | \ 26 | grep -E "[[:space:]]+\"${REGEX}\"" | grep -oEi ${REGEX}) 27 | for VERSION in ${VERSIONS}; do 28 | curl -L -X POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \ 29 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 30 | https://api.github.com/repos/${{ github.repository }}/actions/workflows/deploy.nightly.yaml/dispatches \ 31 | -d "{\"ref\":\"master\",\"inputs\": {\"tag\": \"${VERSION}\"}}" 32 | done -------------------------------------------------------------------------------- /.github/workflows/deploy.nightly.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy Propti Nightly 2 | on: 3 | repository_dispatch: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | description: 'FDS version' 8 | required: true 9 | type: string 10 | env: 11 | REGISTRY: ghcr.io 12 | ORG: firedynamics 13 | IMAGE_NAME: propti-nightly 14 | jobs: 15 | build-and-push-image: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: read 19 | packages: write 20 | steps: 21 | - name: Checkout repository 22 | uses: actions/checkout@v4 23 | with: 24 | ref: ${{ github.ref }} 25 | - name: Log in to the Container registry 26 | uses: docker/login-action@v3 27 | with: 28 | registry: ${{ env.REGISTRY }} 29 | username: ${{ github.actor }} 30 | password: ${{ secrets.GITHUB_TOKEN }} 31 | - name: Extract metadata (tags, labels) for Docker 32 | id: meta 33 | uses: docker/metadata-action@v5 34 | with: 35 | images: ${{ env.REGISTRY }}/${{ env.ORG }}/${{ env.IMAGE_NAME }} 36 | - name: Set up Docker Buildx 37 | uses: docker/setup-buildx-action@v3 38 | - name: Build and push Docker image 39 | uses: docker/build-push-action@v5 40 | with: 41 | context: . 42 | provenance: false 43 | file: docker/Dockerfile.nightly 44 | build-args: | 45 | FDS_VERSION=${{ inputs.tag }} 46 | push: true 47 | tags: ${{ env.REGISTRY }}/${{ env.ORG }}/${{ env.IMAGE_NAME }}:${{ inputs.tag }} 48 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /resources/jureca/propti-chain-part.job: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Provide job name to be displayed in the queue overview 4 | #SBATCH --job-name propti-test 5 | 6 | # Set desired number of nodes. 7 | # JURECA provides 24 cores per node. 8 | # The first 24 jobs per node get one physical core each. If more 9 | # jobs are scheduled per node, hyperthreading is invoked. 10 | #SBATCH --nodes 2 11 | #SBATCH --ntasks 18 12 | 13 | # Set number of cores that are to be used per task. For example when 14 | # multiple cores are to be used per mesh, to speed up the calculation. 15 | #SBATCH --cpus-per-task 3 16 | #SBATCH --output %j.out 17 | #SBATCH --error %j.err 18 | 19 | # Wall clock time is 24 h on JURECA 20 | # Set time constrains for the job execution, by the following format: 21 | # days-hours:minutes:seconds - example 5-12:35:42 22 | #SBATCH --time=05:30:00 23 | 24 | # This signal is used to control the relaunch process of the simulation. 25 | # It is needed when e.g. large FDS simulations are conducted, that would 26 | # have longer execution times then the 24 h possible. 27 | # Not necessary for propti. 28 | ## SBATCH --signal=B:SIGUSR1@600 29 | 30 | 31 | cd $SLURM_SUBMIT_DIR 32 | 33 | 34 | # load necessary modules 35 | ml use /usr/local/software/jureca/OtherStages 36 | ml Stages/2018b 37 | 38 | module use -a ~arnold1/modules_fire/ 39 | ml spotpy/1.5.13-py3.6.6_IntelCompiler_2019.0_ParaStationMPI_5.2.1 40 | 41 | 42 | pwd 43 | 44 | printenv &> user.env-$SLURM_JOB_ID 45 | 46 | which python3 47 | 48 | # Add an extra line to the propti_db, to be able to destinguish between restarts (e.g. job crash). 49 | # These markers are also looked for by the '--clean_db' function of the post processing scripts. 50 | if [ -f propti_db.csv ]; then 51 | echo "#Restart#" >> propti_db.csv 52 | fi 53 | 54 | # Run PROPTI. Also, create a log file to save the output generated. 55 | # Assumption that PROPTI is stoed two layers above this script. 56 | # CHECK PATH FOR YOUR CASE! 57 | 58 | srun --export=ALL python3.6 ../propti/propti_run.py . &>>log.spotpy_mpi 59 | -------------------------------------------------------------------------------- /propti/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | ######### 4 | # LOGGING 5 | # set up logging to file - see previous section for more details 6 | 7 | # get MPI rank for individual log files 8 | import mpi4py 9 | mpi4py.rc.recv_mprobe = False 10 | 11 | from mpi4py import MPI 12 | my_rank = MPI.COMM_WORLD.Get_rank() 13 | 14 | logging.basicConfig(level=logging.DEBUG, 15 | format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', 16 | datefmt='%m-%d %H:%M', 17 | filename='propti.{:03d}.log'.format(my_rank), 18 | filemode='w') 19 | 20 | # define a Handler which writes INFO messages or higher to the sys.stderr 21 | console = logging.StreamHandler() 22 | console.setLevel(logging.INFO) 23 | 24 | # set a format which is simpler for console use 25 | formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') 26 | 27 | # tell the handler to use this format 28 | console.setFormatter(formatter) 29 | 30 | # add the handler to the root logger 31 | logging.getLogger('').addHandler(console) 32 | 33 | ######## 34 | # PROPTI AND SPOTPY 35 | 36 | from .spotpy_wrapper import run_optimisation, create_input_file 37 | from .data_structures import Parameter, ParameterSet, \ 38 | SimulationSetupSet, SimulationSetup, Relation, DataSource, \ 39 | OptimiserProperties, Version 40 | from .basic_functions import run_simulations 41 | from .propti_post_processing import run_best_para 42 | 43 | from .propti_monitor import plot_scatter, plot_scatter2, \ 44 | plot_para_vs_fitness, plot_box_rmse 45 | from .propti_post_processing import run_best_para, plot_hist, \ 46 | calc_pearson_coefficient, collect_best_para_multi, plot_best_sim_exp 47 | from .propti_pre_processing import interpolate_lists 48 | 49 | from .fitness_methods import FitnessMethodRMSE, FitnessMethodInterface, \ 50 | FitnessMethodThreshold, FitnessMethodRangeRMSE, FitnessMethodBandRMSE, \ 51 | FitnessMethodIntegrate 52 | 53 | 54 | ########### 55 | # CONSTANTS 56 | 57 | # TODO: respect this variable in scripts 58 | pickle_prefix = 'propti.pickle' 59 | -------------------------------------------------------------------------------- /examples/tga_analysis_01/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'TGA_analysis_01' 10 | TEND = 9360 11 | 12 | # define the optimisation parameter 13 | op1 = pr.Parameter(name='ref_temp_comp_01', 14 | place_holder='rtc01', 15 | min_value=200, max_value=400) 16 | op2 = pr.Parameter(name='ref_rate_comp_01', 17 | place_holder='rrc01', 18 | min_value=0.001, max_value=0.01) 19 | op3 = pr.Parameter(name='ref_temp_comp_02', 20 | place_holder='rtc02', 21 | min_value=300, max_value=600) 22 | op4 = pr.Parameter(name='ref_rate_comp_02', 23 | place_holder='rrc02', 24 | min_value=0.001, max_value=0.01) 25 | 26 | ops = pr.ParameterSet(params=[op1, op2, op3, op4]) 27 | 28 | # define general model parameter, including optimisation parameter 29 | mps0 = pr.ParameterSet(params=[op1, op2, op3, op4]) 30 | mps0.append(pr.Parameter(name='heating rate', place_holder='hr', value=10)) 31 | mps0.append(pr.Parameter(name='chid', place_holder='CHID', value=CHID)) 32 | 33 | # define empty simulation setup set 34 | setups = pr.SimulationSetupSet() 35 | 36 | # define model-experiment data relation 37 | r = pr.Relation() 38 | r.model.file_name = "{}_tga.csv".format(CHID) 39 | r.model.label_x = 'Time' 40 | r.model.label_y = 'Total MLR' 41 | r.model.header_line = 1 42 | r.experiment.file_name = "tga_experimental_data.csv" 43 | r.experiment.label_x = 'Time' 44 | r.experiment.label_y = 'MassLossRate' 45 | r.experiment.header_line = 0 46 | r.fitness_method=pr.FitnessMethodRMSE(n_points=100) 47 | 48 | # create simulation setup object 49 | template_file = "tga_analysis_01.fds" 50 | s = pr.SimulationSetup(name='tga_analysis_01', 51 | work_dir='tga_analysis_run_01', 52 | model_template=template_file, 53 | model_parameter=mps0, 54 | model_executable='fds', 55 | relations=r) 56 | 57 | # append above object to simulation setup set 58 | setups.append(s) 59 | 60 | # use default values for optimiser 61 | optimiser = pr.OptimiserProperties(algorithm='sceua', 62 | repetitions=10) 63 | -------------------------------------------------------------------------------- /examples/cone_02/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'CONE' 10 | 11 | # define the optimisation parameter 12 | op1 = pr.Parameter(name='density', place_holder='DENSITY', 13 | min_value=1e2, max_value=1e4) 14 | op2 = pr.Parameter(name='emissivity', place_holder='EMISSIVITY', 15 | min_value=0.01, max_value=1) 16 | op3 = pr.Parameter(name='conductivity', place_holder='CONDUCTIVITY', 17 | min_value=0.01, max_value=1) 18 | op4 = pr.Parameter(name='specific_heat', place_holder='SPECIFIC_HEAT', 19 | min_value=0.01, max_value=10) 20 | ops = pr.ParameterSet(params=[op1, op2, op3, op4]) 21 | 22 | # define general model parameter, including optimisation parameter 23 | mps = pr.ParameterSet(params=[op1, op2, op3, op4]) 24 | mps.append(pr.Parameter(name='chid', place_holder='CHID', value=CHID)) 25 | 26 | # define empty simulation setup set 27 | setups = pr.SimulationSetupSet() 28 | 29 | # define model-experiment data relation 30 | r1 = pr.Relation() 31 | r1.model.file_name = "{}_devc.csv".format(CHID) 32 | r1.model.label_x = 'Time' 33 | r1.model.label_y = 'temp' 34 | r1.model.header_line = 1 35 | r1.experiment.file_name = "experimental_data.csv" 36 | r1.experiment.label_x = 'time' 37 | r1.experiment.label_y = 'temp' 38 | r1.experiment.header_line = 0 39 | r1.fitness_method=pr.FitnessMethodRMSE(n_points=100) 40 | 41 | r2 = pr.Relation() 42 | r2.model.file_name = "{}_devc.csv".format(CHID) 43 | r2.model.label_x = 'Time' 44 | r2.model.label_y = 'temp' 45 | r2.model.header_line = 1 46 | r2.experiment = None 47 | r2.fitness_method=pr.FitnessMethodThreshold("upper", threshold_target_value=90, threshold_value=400) 48 | 49 | # create simulation setup object 50 | template_file = "cone_template.fds" 51 | s = pr.SimulationSetup(name='cone_pmma', 52 | work_dir='cone_pmma', 53 | model_template=template_file, 54 | model_parameter=mps, 55 | model_executable='fds', 56 | relations=[r1,r2]) 57 | 58 | setups.append(s) 59 | 60 | # use default values for optimiser 61 | optimiser = pr.OptimiserProperties(algorithm='sceua', 62 | repetitions=10) 63 | -------------------------------------------------------------------------------- /examples/propti_analyser_01/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'TGA_analysis_01' 10 | TEND = 9360 11 | 12 | 13 | # use default values for optimiser 14 | optimiser = pr.OptimiserProperties(algorithm='sceua', 15 | repetitions=10) 16 | #ngs=4, 17 | 18 | # define the optimisation parameter 19 | op1 = pr.Parameter(name='ref_temp_comp_01', 20 | place_holder='rtc01', 21 | min_value=200, max_value=400) 22 | op2 = pr.Parameter(name='ref_rate_comp_01', 23 | place_holder='rrc01', 24 | min_value=0.001, max_value=0.01) 25 | op3 = pr.Parameter(name='ref_temp_comp_02', 26 | place_holder='rtc02', 27 | min_value=300, max_value=600) 28 | op4 = pr.Parameter(name='ref_rate_comp_02', 29 | place_holder='rrc02', 30 | min_value=0.001, max_value=0.01) 31 | 32 | ops = pr.ParameterSet(params=[op1, op2, op3, op4]) 33 | 34 | # define general model parameter, including optimisation parameter 35 | mps0 = pr.ParameterSet(params=[op1, op2, op3, op4]) 36 | mps0.append(pr.Parameter(name='heating rate', place_holder='hr', value=10)) 37 | mps0.append(pr.Parameter(name='chid', place_holder='CHID', value=CHID)) 38 | 39 | # define empty simulation setup set 40 | setups = pr.SimulationSetupSet() 41 | 42 | # define model-experiment data relation 43 | r = pr.Relation() 44 | r.model.file_name = "{}_tga.csv".format(CHID) 45 | r.model.label_x = 'Time' 46 | r.model.label_y = 'Total MLR' 47 | r.model.header_line = 1 48 | r.experiment.file_name = "tga_experimental_data.csv" 49 | r.experiment.label_x = 'Time' 50 | r.experiment.label_y = 'MassLossRate' 51 | r.experiment.header_line = 0 52 | r.fitness_method=pr.FitnessMethodRMSE(n_points=200) 53 | 54 | # create simulation setup object 55 | template_file = "tga_analysis_01.fds" 56 | s = pr.SimulationSetup(name='tga_analysis_01', 57 | work_dir='tga_analysis_run_01', 58 | model_template=template_file, 59 | model_parameter=mps0, 60 | model_executable='fds', 61 | relations=r) 62 | 63 | # append above object to simulation setup set 64 | setups.append(s) 65 | 66 | -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'TGA_analysis_01' 10 | TEND = 9360 11 | 12 | 13 | # use default values for optimiser 14 | optimiser = pr.OptimiserProperties(algorithm='sceua', 15 | repetitions=10) 16 | #ngs=4, 17 | 18 | # define the optimisation parameter 19 | op1 = pr.Parameter(name='ref_temp_comp_01', 20 | place_holder='rtc01', 21 | min_value=200, max_value=400) 22 | op2 = pr.Parameter(name='ref_rate_comp_01', 23 | place_holder='rrc01', 24 | min_value=0.001, max_value=0.01) 25 | op3 = pr.Parameter(name='ref_temp_comp_02', 26 | place_holder='rtc02', 27 | min_value=300, max_value=600) 28 | op4 = pr.Parameter(name='ref_rate_comp_02', 29 | place_holder='rrc02', 30 | min_value=0.001, max_value=0.01) 31 | 32 | ops = pr.ParameterSet(params=[op1, op2, op3, op4]) 33 | 34 | # define general model parameter, including optimisation parameter 35 | mps0 = pr.ParameterSet(params=[op1, op2, op3, op4]) 36 | mps0.append(pr.Parameter(name='heating rate', place_holder='hr', value=10)) 37 | mps0.append(pr.Parameter(name='chid', place_holder='CHID', value=CHID)) 38 | 39 | # define empty simulation setup set 40 | setups = pr.SimulationSetupSet() 41 | 42 | # define model-experiment data relation 43 | r = pr.Relation() 44 | r.model.file_name = "{}_tga.csv".format(CHID) 45 | r.model.label_x = 'Time' 46 | r.model.label_y = 'Total MLR' 47 | r.model.header_line = 1 48 | r.experiment.file_name = "tga_experimental_data.csv" 49 | r.experiment.label_x = 'Time' 50 | r.experiment.label_y = 'MassLossRate' 51 | r.experiment.header_line = 0 52 | r.fitness_method=pr.FitnessMethodRMSE(n_points=100) 53 | 54 | # create simulation setup object 55 | template_file = "tga_analysis_01.fds" 56 | s = pr.SimulationSetup(name='tga_analysis_01', 57 | work_dir='tga_analysis_run_01', 58 | model_template=template_file, 59 | model_parameter=mps0, 60 | model_executable='fds', 61 | relations=r) 62 | 63 | # append above object to simulation setup set 64 | setups.append(s) 65 | 66 | -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'TGA_analysis_01' 10 | TEND = 9360 11 | 12 | 13 | # use default values for optimiser 14 | optimiser = pr.OptimiserProperties(algorithm='sceua', 15 | repetitions=10) 16 | #ngs=4, 17 | 18 | # define the optimisation parameter 19 | op1 = pr.Parameter(name='ref_temp_comp_01', 20 | place_holder='rtc01', 21 | min_value=200, max_value=400) 22 | op2 = pr.Parameter(name='ref_rate_comp_01', 23 | place_holder='rrc01', 24 | min_value=0.001, max_value=0.01) 25 | op3 = pr.Parameter(name='ref_temp_comp_02', 26 | place_holder='rtc02', 27 | min_value=300, max_value=600) 28 | op4 = pr.Parameter(name='ref_rate_comp_02', 29 | place_holder='rrc02', 30 | min_value=0.001, max_value=0.01) 31 | 32 | ops = pr.ParameterSet(params=[op1, op2, op3, op4]) 33 | 34 | # define general model parameter, including optimisation parameter 35 | mps0 = pr.ParameterSet(params=[op1, op2, op3, op4]) 36 | mps0.append(pr.Parameter(name='heating rate', place_holder='hr', value=10)) 37 | mps0.append(pr.Parameter(name='chid', place_holder='CHID', value=CHID)) 38 | 39 | # define empty simulation setup set 40 | setups = pr.SimulationSetupSet() 41 | 42 | # define model-experiment data relation 43 | r = pr.Relation() 44 | r.model.file_name = "{}_tga.csv".format(CHID) 45 | r.model.label_x = 'Time' 46 | r.model.label_y = 'Total MLR' 47 | r.model.header_line = 1 48 | r.experiment.file_name = "tga_experimental_data.csv" 49 | r.experiment.label_x = 'Time' 50 | r.experiment.label_y = 'MassLossRate' 51 | r.experiment.header_line = 0 52 | r.fitness_method=pr.FitnessMethodRMSE(n_points=100) 53 | 54 | # create simulation setup object 55 | template_file = "tga_analysis_01.fds" 56 | s = pr.SimulationSetup(name='tga_analysis_01', 57 | work_dir='tga_analysis_run_01', 58 | model_template=template_file, 59 | model_parameter=mps0, 60 | model_executable='fds', 61 | relations=r) 62 | 63 | # append above object to simulation setup set 64 | setups.append(s) 65 | 66 | -------------------------------------------------------------------------------- /propti_run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os 4 | import numpy as np 5 | import copy 6 | import pandas as pd 7 | import shutil as sh 8 | import pickle 9 | import propti as pr 10 | import logging 11 | import argparse 12 | 13 | import mpi4py 14 | mpi4py.rc.recv_mprobe = False 15 | 16 | from mpi4py import MPI 17 | comm = MPI.COMM_WORLD 18 | print('Starting PROPTI on MPI rank {} out of {} ranks.'.format(comm.Get_rank(), 19 | comm.Get_size())) 20 | 21 | 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument("root_dir", type=str, 24 | help="optimisation root directory") 25 | cmdl_args = parser.parse_args() 26 | 27 | setups = None # type: pr.SimulationSetupSet 28 | ops = None # type: pr.ParameterSet 29 | optimiser = None # type: pr.OptimiserProperties 30 | 31 | in_file = open('propti.pickle.init', 'rb') 32 | ver, setups, ops, optimiser = pickle.load(in_file) 33 | in_file.close() 34 | 35 | # Check if PROPTI version is same as in pickle file 36 | # else upgrade files to new values using defaults. 37 | dict_of_upgrades = {} 38 | if ver.ver_propti != pr.Version(setups[0]).ver_propti: 39 | temp = setups.upgrade() 40 | dict_of_upgrades["setups"] = temp 41 | temp = ops.upgrade() 42 | dict_of_upgrades["ops"] = temp 43 | temp = optimiser.upgrade() 44 | dict_of_upgrades["optimiser"] = temp 45 | ver = pr.Version(setups[0]) 46 | logging.warning("Pickle init file is old. Upgrading...") 47 | logging.warning("Optimization run with defaults for missing parameters.") 48 | logging.warning("Following data was upgraded: " + str(dict_of_upgrades)) # TODO: pPRINT? 49 | # Create new pickle file 50 | out_file = open('new_propti.pickle.init', 'wb') 51 | pickle.dump((ver, setups, ops, optimiser), out_file) 52 | out_file.close() 53 | 54 | if ver.flag_propti != 0: 55 | logging.warning("No git. PROPTI version is represented as a hash.") 56 | if ops is None: 57 | logging.critical("Optimisation parameter are not defined.") 58 | if setups is None: 59 | logging.critical("Simulation setups are not defined.") 60 | if optimiser is None: 61 | logging.critical("Optimiser properties are not defined.") 62 | 63 | print(ver, setups, ops, optimiser) 64 | 65 | res = pr.run_optimisation(ops, setups, optimiser) 66 | 67 | print(ops) 68 | 69 | out_file = open('propti.pickle.finished', 'wb') 70 | pickle.dump((ver, setups, ops, optimiser), out_file) 71 | out_file.close() 72 | -------------------------------------------------------------------------------- /docker/readme.md: -------------------------------------------------------------------------------- 1 | ## Run propti interactive 2 | ```bash 3 | # Linux 4 | docker run -it -v $(pwd):/workdir ghcr.io/FireDynamics/propti 5 | 6 | # Windows PowerShell 7 | docker run --it -v ${pwd}:/workdir ghcr.io/FireDynamics/propti 8 | 9 | # Windows Command Prompt 10 | docker run -it -v %cd%:/workdir ghcr.io/FireDynamics/propti 11 | ``` 12 | 13 | You can now use propti with the following commands: 14 | - `propti_analyse <...arguments>` 15 | - `propti_prepare ` 16 | - `propti_run .` 17 | - `propti_sense .` 18 | 19 | *You can display the help for each command by using the `-h` argument (e.g. `propti_analyse -h`).* 20 | 21 | If you would like to run the `propti_run` command in parallel via mpi, you can start it as follows: 22 | ```bash 23 | mpiexec.openmpi -n propti_run . 24 | ``` 25 | 26 | ## Run propti non-interactive 27 | ```bash 28 | # Linux 29 | docker run --rm -v $(pwd):/workdir ghcr.io/FireDynamics/propti {propti_analyse|propti_prepare|propti_run|propti_sense} <...arguments> 30 | 31 | # Windows PowerShell 32 | docker run --rm -v ${pwd}:/workdir ghcr.io/FireDynamics/propti {propti_analyse|propti_prepare|propti_run|propti_sense} <...arguments> 33 | 34 | # Windows Command Prompt 35 | docker run --rm -v %cd%:/workdir ghcr.io/FireDynamics/propti {propti_analyse|propti_prepare|propti_run|propti_sense} <...arguments> 36 | ``` 37 | 38 | ## Additional information 39 | This docker image is based on the FDS image [ghcr.io/openbcl/fds](https://github.com/openbcl/fds-dockerfiles/pkgs/container/fds). 40 | There you will find all information on options, problems, errors and their solutions when operating FDS in Docker containers. 41 | 42 | ## How to build the docker image by yourself 43 | 1. Clone this repository. 44 | 1. Navigate with your shell to this folder. 45 | 1. Choose between building the image with the latest FDS version or with a specific one. You can find all available versions (tags) [here](https://github.com/openbcl/fds-dockerfiles/pkgs/container/fds/versions). 46 | For compatibility reasons, the FDS version should be at least 6.7.4. 47 | 48 | ```bash 49 | # build command: propti image with latest FDS version 50 | docker build -t propti -f Dockerfile .. 51 | 52 | # build command: propti image with specific FDS version (e.g. 6.9.1) 53 | docker build --build-arg="FDS_VERSION=6.9.1" -t propti -f Dockerfile .. 54 | ``` 55 | 56 | To run your docker image of propti use the `docker run` commands described above and replace the package name `ghcr.io/FireDynamics/propti` with `propti`. -------------------------------------------------------------------------------- /propti_sense.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import numpy as np 3 | import propti as pr 4 | 5 | import pickle 6 | 7 | import propti as pr 8 | 9 | import logging 10 | 11 | import argparse 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument("root_dir", type=str, 14 | help="optimisation root directory") 15 | cmdl_args = parser.parse_args() 16 | 17 | setups = None # type: pr.SimulationSetupSet 18 | ops = None # type: pr.ParameterSet 19 | optimiser = None # type: pr.OptimiserProperties 20 | 21 | in_file = open('propti.pickle.init', 'rb') 22 | ver, setups, ops, optimiser = pickle.load(in_file) 23 | in_file.close() 24 | # Check if everything is in order 25 | if ops is None: 26 | logging.critical("optimisation parameter are not defined") 27 | if setups is None: 28 | logging.critical("simulation setups are not defined") 29 | if optimiser is None: 30 | logging.critical("optimiser properties are not defined") 31 | 32 | # Optimiser to perform FAST sensitivity analysis. 33 | 34 | # Use the same data from optimiser for sensitivity analysis 35 | num_subproc = optimiser.num_subprocesses 36 | mpi_bool = optimiser.mpi 37 | backup = optimiser.backup_every 38 | 39 | # Compute number of repetitions required 40 | k = len(ops) # total number of optimization params 41 | (M, d) = 3, 2 # M = inference factor, d = freq. step 42 | rep = (1 + 4*(M**2)*(1+(k-2)*d))*k 43 | 44 | sensitivity = pr.OptimiserProperties('fast', 45 | repetitions=rep, 46 | backup_every=backup, 47 | db_name="propti_sensitivity_db", 48 | db_type="csv", 49 | num_subprocesses=num_subproc, 50 | mpi=mpi_bool, 51 | optimization_direction="minimize") 52 | 53 | 54 | print(ver, setups, ops, sensitivity) 55 | ''' 56 | Not writing any data to pickle file since there is only 57 | one method to analyse sensitivity. It makes sense to Wiki this instead. 58 | ''' 59 | # Write sensitivity pickle file 60 | # out_file = open('propti_sensitivity.pickle.init', 'wb') 61 | # pickle.dump((setups, ops, optimiser), out_file) 62 | # out_file.close() 63 | 64 | # Run optimization. 65 | # Output of run_optimisation will be None. 66 | # What are some good parameters that can be printed ? 67 | res = pr.run_optimisation(ops, setups, sensitivity) 68 | # print(res) # output of res is None. 69 | -------------------------------------------------------------------------------- /examples/tga_analysis_02/OriginalFiles/tga_5K.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='tga_5K', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = 315. 35 | REFERENCE_RATE = 0.0056 36 | HEATING_RATE = 5 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = 430. 57 | REFERENCE_RATE = 0.0075 58 | HEATING_RATE = 5 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = 5 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/tga_analysis_02/OriginalFiles/tga_10K.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='tga_10K', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = 315. 35 | REFERENCE_RATE = 0.0056 36 | HEATING_RATE = 10 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = 430. 57 | REFERENCE_RATE = 0.0075 58 | HEATING_RATE = 10 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = 10 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/tga_analysis_01/tga_analysis_01.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = #rtc01# 35 | REFERENCE_RATE = #rrc01# 36 | HEATING_RATE = #hr# 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = #rtc02# 57 | REFERENCE_RATE = #rrc02# 58 | HEATING_RATE = #hr# 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = #hr# 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/tga_analysis_02/OriginalFiles/tga_15K.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='tga_15K', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | 13 | &SURF ID = 'SAMPLE' 14 | TGA_ANALYSIS = .TRUE. 15 | COLOR = 'RED' 16 | THICKNESS = 0.01 17 | MATL_ID(1,1) = 'component 1' 18 | MATL_ID(1,2) = 'water' 19 | MATL_MASS_FRACTION(1,1) = 0.90 20 | MATL_MASS_FRACTION(1,2) = 0.10 / 21 | 22 | 23 | 24 | Original values for material component 1 25 | have been: 26 | REFERENCE_TEMPERATURE = 315. 27 | REFERENCE_RATE = 0.0056 28 | 29 | &MATL ID = 'component 1' 30 | EMISSIVITY = 1.0 31 | DENSITY = 500. 32 | CONDUCTIVITY = 0.20 33 | SPECIFIC_HEAT = 1.0 34 | N_REACTIONS = 1 35 | REFERENCE_TEMPERATURE = 315. 36 | REFERENCE_RATE = 0.0056 37 | HEATING_RATE = 15 38 | NU_SPEC = 0.60 39 | SPEC_ID = 'CELLULOSE' 40 | NU_MATL = 0.40 41 | MATL_ID = 'component 2' 42 | HEAT_OF_REACTION = 1000. / 43 | 44 | 45 | Original values for material component 2 46 | have been: 47 | REFERENCE_TEMPERATURE = 430. 48 | REFERENCE_RATE = 0.0075 49 | 50 | 51 | &MATL ID = 'component 2' 52 | EMISSIVITY = 1.0 53 | DENSITY = 500. 54 | CONDUCTIVITY = 0.20 55 | SPECIFIC_HEAT = 1.0 56 | N_REACTIONS = 1 57 | REFERENCE_TEMPERATURE = 430. 58 | REFERENCE_RATE = 0.0075 59 | HEATING_RATE = 15 60 | NU_SPEC = 0.85 61 | SPEC_ID = 'CELLULOSE' 62 | NU_MATL = 0.15 63 | MATL_ID = 'residue' 64 | HEAT_OF_REACTION = 1000. / 65 | 66 | &MATL ID = 'water' 67 | DENSITY = 1000. 68 | CONDUCTIVITY = 0.1 69 | SPECIFIC_HEAT= 4.184 70 | N_REACTIONS = 1 71 | REFERENCE_TEMPERATURE = 100. 72 | REFERENCE_RATE = 0.0016 73 | HEATING_RATE = 15 74 | NU_SPEC = 1.0 75 | SPEC_ID = 'WATER VAPOR' 76 | HEAT_OF_REACTION= 2500. / 77 | 78 | &MATL ID = 'residue' 79 | DENSITY = 200. 80 | CONDUCTIVITY = 0.20 81 | SPECIFIC_HEAT = 1.0 / 82 | 83 | &TAIL / 84 | -------------------------------------------------------------------------------- /examples/propti_analyser_01/tga_analysis_01.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = #rtc01# 35 | REFERENCE_RATE = #rrc01# 36 | HEATING_RATE = #hr# 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = #rtc02# 57 | REFERENCE_RATE = #rrc02# 58 | HEATING_RATE = #hr# 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = #hr# 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/propti_desc_stat_01/tga_analysis_01.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = #rtc01# 35 | REFERENCE_RATE = #rrc01# 36 | HEATING_RATE = #hr# 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = #rtc02# 57 | REFERENCE_RATE = #rrc02# 58 | HEATING_RATE = #hr# 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = #hr# 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/propti_plot_best_sim_exp_01/tga_analysis_01.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | COLOR = 'RED' 15 | THICKNESS = 0.01 16 | MATL_ID(1,1) = 'component 1' 17 | MATL_ID(1,2) = 'water' 18 | MATL_MASS_FRACTION(1,1) = 0.90 19 | MATL_MASS_FRACTION(1,2) = 0.10 / 20 | 21 | 22 | 23 | Original values for material component 1 24 | have been: 25 | REFERENCE_TEMPERATURE = 315. 26 | REFERENCE_RATE = 0.0056 27 | 28 | &MATL ID = 'component 1' 29 | EMISSIVITY = 1.0 30 | DENSITY = 500. 31 | CONDUCTIVITY = 0.20 32 | SPECIFIC_HEAT = 1.0 33 | N_REACTIONS = 1 34 | REFERENCE_TEMPERATURE = #rtc01# 35 | REFERENCE_RATE = #rrc01# 36 | HEATING_RATE = #hr# 37 | NU_SPEC = 0.60 38 | SPEC_ID = 'CELLULOSE' 39 | NU_MATL = 0.40 40 | MATL_ID = 'component 2' 41 | HEAT_OF_REACTION = 1000. / 42 | 43 | 44 | Original values for material component 2 45 | have been: 46 | REFERENCE_TEMPERATURE = 430. 47 | REFERENCE_RATE = 0.0075 48 | 49 | 50 | &MATL ID = 'component 2' 51 | EMISSIVITY = 1.0 52 | DENSITY = 500. 53 | CONDUCTIVITY = 0.20 54 | SPECIFIC_HEAT = 1.0 55 | N_REACTIONS = 1 56 | REFERENCE_TEMPERATURE = #rtc02# 57 | REFERENCE_RATE = #rrc02# 58 | HEATING_RATE = #hr# 59 | NU_SPEC = 0.85 60 | SPEC_ID = 'CELLULOSE' 61 | NU_MATL = 0.15 62 | MATL_ID = 'residue' 63 | HEAT_OF_REACTION = 1000. / 64 | 65 | &MATL ID = 'water' 66 | DENSITY = 1000. 67 | CONDUCTIVITY = 0.1 68 | SPECIFIC_HEAT= 4.184 69 | N_REACTIONS = 1 70 | REFERENCE_TEMPERATURE = 100. 71 | REFERENCE_RATE = 0.0016 72 | HEATING_RATE = #hr# 73 | NU_SPEC = 1.0 74 | SPEC_ID = 'WATER VAPOR' 75 | HEAT_OF_REACTION= 2500. / 76 | 77 | &MATL ID = 'residue' 78 | DENSITY = 200. 79 | CONDUCTIVITY = 0.20 80 | SPECIFIC_HEAT = 1.0 / 81 | 82 | &TAIL / 83 | -------------------------------------------------------------------------------- /examples/tga_analysis_02/tga_analysis_02.fds: -------------------------------------------------------------------------------- 1 | &HEAD CHID='#CHID#', TITLE='TGA tutorial' / 2 | 3 | &MESH IJK=3,1,4, XB=-2,2,-0.5,0.5,0,1 / 4 | 5 | &TIME T_END=60. / 6 | 7 | &REAC FUEL='CELLULOSE', C=6, H=10, O=5, SOOT_YIELD=0.015 / 8 | &SPEC ID='WATER VAPOR' / 9 | 10 | &VENT XB=-1,1,-0.5,0.5,0.0,0.0, SURF_ID='SAMPLE' / 11 | 12 | &SURF ID = 'SAMPLE' 13 | TGA_ANALYSIS = .TRUE. 14 | TGA_HEATING_RATE = #hr# 15 | COLOR = 'RED' 16 | THICKNESS = 0.01 17 | MATL_ID(1,1) = 'component 1' 18 | MATL_ID(1,2) = 'water' 19 | MATL_MASS_FRACTION(1,1) = 0.90 20 | MATL_MASS_FRACTION(1,2) = 0.10 / 21 | 22 | 23 | 24 | Original values for material component 1 25 | have been: 26 | REFERENCE_TEMPERATURE = 315. 27 | REFERENCE_RATE = 0.0056 28 | 29 | &MATL ID = 'component 1' 30 | EMISSIVITY = 1.0 31 | DENSITY = 500. 32 | CONDUCTIVITY = 0.20 33 | SPECIFIC_HEAT = 1.0 34 | N_REACTIONS = 1 35 | REFERENCE_TEMPERATURE = #rtc01# 36 | REFERENCE_RATE = #rrc01# 37 | HEATING_RATE = #hr# 38 | NU_SPEC = 0.60 39 | SPEC_ID = 'CELLULOSE' 40 | NU_MATL = 0.40 41 | MATL_ID = 'component 2' 42 | HEAT_OF_REACTION = 1000. / 43 | 44 | 45 | Original values for material component 2 46 | have been: 47 | REFERENCE_TEMPERATURE = 430. 48 | REFERENCE_RATE = 0.0075 49 | 50 | 51 | &MATL ID = 'component 2' 52 | EMISSIVITY = 1.0 53 | DENSITY = 500. 54 | CONDUCTIVITY = 0.20 55 | SPECIFIC_HEAT = 1.0 56 | N_REACTIONS = 1 57 | REFERENCE_TEMPERATURE = #rtc02# 58 | REFERENCE_RATE = #rrc02# 59 | HEATING_RATE = #hr# 60 | NU_SPEC = 0.85 61 | SPEC_ID = 'CELLULOSE' 62 | NU_MATL = 0.15 63 | MATL_ID = 'residue' 64 | HEAT_OF_REACTION = 1000. / 65 | 66 | &MATL ID = 'water' 67 | DENSITY = 1000. 68 | CONDUCTIVITY = 0.1 69 | SPECIFIC_HEAT= 4.184 70 | N_REACTIONS = 1 71 | REFERENCE_TEMPERATURE = 100. 72 | REFERENCE_RATE = 0.0016 73 | HEATING_RATE = #hr# 74 | NU_SPEC = 1.0 75 | SPEC_ID = 'WATER VAPOR' 76 | HEAT_OF_REACTION= 2500. / 77 | 78 | &MATL ID = 'residue' 79 | DENSITY = 200. 80 | CONDUCTIVITY = 0.20 81 | SPECIFIC_HEAT = 1.0 / 82 | 83 | &TAIL / 84 | -------------------------------------------------------------------------------- /examples/cone_01/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | # import just for IDE convenience 6 | import propti as pr 7 | 8 | # fix the chid 9 | CHID = 'CONE' 10 | 11 | # define the optimisation parameter 12 | op1 = pr.Parameter(name='thickness1', place_holder='thickness1', 13 | min_value=1e-3, max_value=1e-1) 14 | op2 = pr.Parameter(name='thickness2', place_holder='thickness2', 15 | min_value=1e-3, max_value=1e-1) 16 | ops = pr.ParameterSet(params=[op1, op2]) 17 | 18 | # define general model parameter, including optimisation parameter 19 | mps0 = pr.ParameterSet(params=[op1, op2]) 20 | mps0.append(pr.Parameter(name='heat flux', place_holder='exflux', value=75)) 21 | mps0.append(pr.Parameter(name='tend', place_holder='tend')) 22 | mps0.append(pr.Parameter(name='mesh_i', place_holder='i', value=3)) 23 | mps0.append(pr.Parameter(name='mesh_j', place_holder='j', value=3)) 24 | mps0.append(pr.Parameter(name='mesh_k', place_holder='k', value=4)) 25 | mps0.append(pr.Parameter(name='chid', place_holder='filename', value=CHID)) 26 | 27 | # define empty simulation setup set 28 | setups = pr.SimulationSetupSet() 29 | 30 | # loop over all 'iso' values 31 | for iso in ['Alu', 'ISO']: 32 | 33 | # define model-experiment data relation 34 | r = pr.Relation() 35 | r.model.file_name = "{}_hrr.csv".format(CHID) 36 | r.model.label_x = 'Time' 37 | r.model.label_y = 'MLR_TOTAL' 38 | r.model.header_line = 1 39 | r.experiment.file_name = "Data.csv" 40 | r.experiment.label_x = '# Time_{}_75'.format(iso) 41 | r.experiment.label_y = 'SG_{}_75'.format(iso) 42 | r.experiment.header_line = 0 43 | r.experiment.yfactor = 1e-3 44 | r.fitness_method=pr.FitnessMethodRMSE(n_points=100) 45 | 46 | # use above model prototype (mps0) as template 47 | mps = copy.deepcopy(mps0) 48 | 49 | TEND = 600 50 | # modify a single value of model parameter 51 | mps[3].value = TEND 52 | 53 | # create simulation setup object 54 | template_file = "SimpleConeLaunchTest_{}_BestParaSet_Toast.fds".format(iso) 55 | s = pr.SimulationSetup(name='cone_{}'.format(iso), 56 | work_dir='cone_{}'.format(iso), 57 | model_template=template_file, 58 | model_parameter=mps, 59 | model_executable='fds', 60 | relations=r) 61 | 62 | # append above object to simulation setup set 63 | setups.append(s) 64 | 65 | # use default values for optimiser 66 | optimiser = pr.OptimiserProperties() 67 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy Propti 2 | on: 3 | repository_dispatch: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | description: 'FDS version' 8 | required: true 9 | type: string 10 | env: 11 | REGISTRY: ghcr.io 12 | ORG: firedynamics 13 | IMAGE_NAME: propti 14 | jobs: 15 | build-and-push-image: 16 | runs-on: ubuntu-latest 17 | env: 18 | IS_LATEST: true 19 | permissions: 20 | contents: read 21 | packages: write 22 | steps: 23 | - name: Check compatibility (minimum FDS version should be 6.7.4) 24 | run: | 25 | if [ "${{ inputs.tag }}" == "`echo -e "6.7.3\n${{ inputs.tag }}" | sort -V | tail -n1`" ] 26 | then 27 | exit 0 28 | fi 29 | exit 1 30 | - name: Extract image name 31 | run: echo "BASE=${{ env.REGISTRY }}/${{ env.ORG }}/${{ env.IMAGE_NAME }}" >> $GITHUB_ENV 32 | - name: Checkout repository 33 | uses: actions/checkout@v4 34 | with: 35 | ref: ${{ github.ref }} 36 | - name: Log in to the Container registry 37 | uses: docker/login-action@v3 38 | with: 39 | registry: ${{ env.REGISTRY }} 40 | username: ${{ github.actor }} 41 | password: ${{ secrets.GITHUB_TOKEN }} 42 | - name: Extract metadata (tags, labels) for Docker 43 | id: meta 44 | uses: docker/metadata-action@v5 45 | with: 46 | images: ${BASE} 47 | - name: Check if new build version is latest 48 | continue-on-error: true 49 | run: | 50 | REGEX=[0-9]+\.[0-9]+\.[0-9]+ 51 | LATEST_VERSION=$(curl --silent "https://api.github.com/users/${{ env.ORG }}/packages/container/${{ env.IMAGE_NAME }}/versions" --stderr - \ 52 | --header "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" | \ 53 | grep -E "[[:space:]]+\"${REGEX}\"" | grep -oEi ${REGEX} | tr " " "\n" | sort -V | tail -n1) 54 | if [ "${{ inputs.tag }}" != "$LATEST_VERSION" ] && [ "${{ inputs.tag }}" == "`echo -e "$LATEST_VERSION\n${{ inputs.tag }}" | sort -V | head -n1`" ] 55 | then 56 | echo "IS_LATEST=false" >> $GITHUB_ENV 57 | fi 58 | - name: Set Tags 59 | run: | 60 | if [[ ${{ env.IS_LATEST }} == true ]] 61 | then 62 | echo "TAGS=${BASE}:${{ inputs.tag }},${BASE}:latest" >> $GITHUB_ENV 63 | else 64 | echo "TAGS=${BASE}:${{ inputs.tag }}" >> $GITHUB_ENV 65 | fi 66 | - name: Set up Docker Buildx 67 | uses: docker/setup-buildx-action@v3 68 | - name: Build and push Docker image 69 | uses: docker/build-push-action@v5 70 | with: 71 | context: . 72 | provenance: false 73 | file: docker/Dockerfile 74 | build-args: | 75 | FDS_VERSION=${{ inputs.tag }} 76 | push: true 77 | tags: ${{ env.TAGS }} 78 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /propti_prepare.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os 4 | import numpy as np 5 | import copy 6 | import pandas as pd 7 | import shutil as sh 8 | import pickle 9 | 10 | import propti as pr 11 | 12 | import logging 13 | 14 | import argparse 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("input_file", type=str, 17 | help="python input file containing parameters and " 18 | "simulation setups") 19 | parser.add_argument("--root_dir", type=str, 20 | help="root directory for optimisation process", default='.') 21 | parser.add_argument("--prepare_init_inputs", 22 | help="prepare input files with initial values", 23 | action="store_true") 24 | cmdl_args = parser.parse_args() 25 | 26 | setups = None # type: pr.SimulationSetupSet 27 | ops = None # type: pr.ParameterSet 28 | optimiser = None # type: pr.OptimiserProperties 29 | 30 | input_file = cmdl_args.input_file 31 | 32 | logging.info("reading input file: {}".format(input_file)) 33 | exec(open(input_file).read(), globals()) 34 | 35 | # Check version numbers 36 | ver = pr.Version(setups[0]) 37 | 38 | if ver.flag_propti == 1: 39 | logging.warning("Cannot determine PROPTI version.") 40 | logging.info("No version file present for PROPTI.") 41 | if ver.flag_exec == 1: 42 | logging.critical("No executable present for optimization!") 43 | logging.critical("Cannot perform optimization process !") 44 | # TODO: check for correct execution 45 | if ops is None: 46 | logging.critical("optimisation parameters not defined") 47 | if setups is None: 48 | logging.critical("simulation setups not defined") 49 | if optimiser is None: 50 | logging.critical("optimiser properties not defined") 51 | 52 | input_file_directory = os.path.dirname(input_file) 53 | logging.info("input file directory: {}".format(input_file_directory)) 54 | 55 | 56 | # TODO: put the following lines into a general function (basic_functions.py)? 57 | for s in setups: 58 | 59 | cdir = os.path.join(cmdl_args.root_dir, s.work_dir) 60 | 61 | # create work directories 62 | if not os.path.exists(cdir): 63 | os.mkdir(cdir) 64 | 65 | # copy model template 66 | sh.copy(os.path.join(input_file_directory, s.model_template), cdir) 67 | 68 | s.model_template = os.path.join(cdir, os.path.basename(s.model_template)) 69 | 70 | # copy all experimental data 71 | # TODO: Re-think the copy behaviour. If file is identical, just keep one 72 | # instance? 73 | for r in s.relations: 74 | if r.experiment is not None: 75 | sh.copy(os.path.join(input_file_directory, r.experiment.file_name), cdir) 76 | r.experiment.file_name = os.path.join(cdir, os.path.basename(r.experiment.file_name)) 77 | 78 | # check for potential non-unique model input files 79 | in_file_list = [] 80 | for s in setups: 81 | tpath = os.path.join(s.work_dir, s.model_input_file) 82 | logging.debug("check if {} is in {}".format(tpath, in_file_list)) 83 | if tpath in in_file_list: 84 | logging.error("non unique module input file path: {}".format(tpath)) 85 | sys.exit() 86 | in_file_list.append(tpath) 87 | 88 | print(ver, setups, ops, optimiser) 89 | 90 | out_file = open('propti.pickle.init', 'wb') 91 | pickle.dump((ver, setups, ops, optimiser), out_file) 92 | out_file.close() 93 | 94 | if cmdl_args.prepare_init_inputs: 95 | logging.info("prepare input files with initial values") 96 | for s in setups: 97 | pr.create_input_file(s) 98 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | 3 | PROPTI is a Python module, written in Python 3.x. It provides a frame work for inverse modelling (or optimisation) of parameters in computer simulation. It's focused on handling the communication between simulation software and optimisation algorithms. Up to now, ["a Statistical Parameter Optimization Framework for Python" SPOTPY](https://github.com/thouska/spotpy) is used to provide a library of algorithm implementations. The [Fire Dynamics Simulator FDS](https://pages.nist.gov/fds-smv/) is used for the simulation side of things. 4 | 5 | For newcomers to inverse modelling in fire (safety) simulation, PROPTI may serve as a good starting point. Specifically, its documentation and examples are aimed to provide a smooth entrance. Also, due to its generalised structure, input scripts can easily shared and discussed with collaborators. Furthermore, the open design of this framework allows it to be connected to arbitrary simulation software and/or algorithm libraries. Thus, PROPTI is not limited to fire safety engineering. Although, for now only SPOTPY and FDS connections are implemented, due to the current focus of the authors work. 6 | 7 | It should also be noted: even though PROPTI started out for investigations of the simulation of material pyrolysis it is by NO MEANS limited to this application. This was just the focus of the primary authors at that time. PROPTI is very flexible and should be able to adjust nearly any input parameter and compare it to nearly any output. As long as the user can think of a way to combine the desired parameters it can be done! We have used PROPTI successfully to determine constituents of gas mixtures or to define RAMPs for fire using the HRRPUA, both obviously for FDS. 8 | 9 | ## Features 10 | 11 | Input files, that are used to steer the inverse modelling process, are written in Python syntax. The user needs to provide templates for the simulation software input files and files of the experimental (target) data. PROPTI will collect all necessary files and group them in a separate directory. Meta-data is collected, as well and stored in a easy-to-use way for documentation and post processing purpose. Means to interact with the PROPTI framework via the command line are provided, even though its methods can of course be used in individually written Python scripts, as already known from the Python ecosystem. 12 | 13 | Since the input file templates are text files, connection to arbitrary simulation software, which uses text input files, is relatively simple. Furthermore, the parameter set is generated, using the simulation software with which the actual simulation project is to be conducted, later on. Thus, the parameter set takes the limitations and advantages of said simulation software into account right from the start. However, this makes the parameter sets model specific. 14 | 15 | Parallel execution of the algorithms is provided by the respective SPOTPY algorithms. Further parallelisation is provided within the PROPTI framework. Thus, it is relatively easy to set up inverse modelling processes across multiple simulation setups, for instance material parameter estimation based on different experiments. 16 | 17 | Basic functionality for data analysis of the inverse modelling process is provided out of the box. 18 | 19 | ## Documentation and Examples 20 | 21 | Documentation is provided in [Wiki](https://github.com/FireDynamics/propti/wiki). The folder 'examples' contains application examples tested with FDS version 6.7. 22 | 23 | ## Citation 24 | 25 | PROPTI is listed to ZENODO to get Data Object Identifiers (DOI) and allow for citations in scientific papers. You can find the necessary information here: 26 | 27 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1188756.svg)](https://doi.org/10.5281/zenodo.1188756) 28 | 29 | We have set up a project on ResearchGate: [PROPTI project](https://www.researchgate.net/project/PROPTI-An-Generalised-Inverse-Modelling-Framework) 30 | 31 | Corresponding publications can be found here: 32 | 33 | [PROPTI - A Generalised Inverse Modelling Framework](https://www.researchgate.net/publication/327655651_PROPTI_-_A_Generalised_Inverse_Modelling_Framework) 34 | 35 | [Application cases of inverse modelling with the PROPTI framework](https://doi.org/10.1016/j.firesaf.2019.102835) 36 | 37 | [Role of the Cost Function for Material Parameter Estimation](https://www.researchgate.net/publication/344217501_ROLE_OF_THE_COST_FUNCTION_FOR_MATERIAL_PARAMETER_ESTIMATION) 38 | -------------------------------------------------------------------------------- /propti/propti_pre_processing.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import sys 4 | import shutil as sh 5 | import logging 6 | 7 | import propti as pr 8 | 9 | import statistics as stat 10 | import numpy as np 11 | import pandas as pd 12 | from scipy import stats 13 | from scipy import interpolate 14 | import scipy.signal as sign 15 | from scipy.stats import norm 16 | import matplotlib as mpl 17 | mpl.use('pdf') 18 | 19 | import matplotlib.pyplot as plt 20 | 21 | 22 | setups = None # type: pr.SimulationSetupSet 23 | ops = None # type: pr.ParameterSet 24 | optimiser = None # type: pr.OptimiserProperties 25 | 26 | 27 | # This function takes a Pandas data frame and a list with header labels. 28 | # Based on the header labels it looks for the shortest column. Afterwards 29 | # it takes the smallest and largest values of the provided columns (per 30 | # line) and collects them one list, each. 31 | def calculate_min_mean_max_lists(data_frame, header_list): 32 | # Initialise the lists. 33 | list_min = [] 34 | list_mean = [] 35 | list_max = [] 36 | 37 | # Determine the length of the shortest column. 38 | min_len_list = [] 39 | 40 | # for column in range(len(header_list)): 41 | # 42 | # a = len(data_frame[header_list[column]]) 43 | # min_len_list.append(a) 44 | 45 | for column in header_list: 46 | 47 | a = len(data_frame[column]) 48 | min_len_list.append(a) 49 | 50 | min_len = min(min_len_list) 51 | 52 | # Iterate over all provided columns for the shortest length. 53 | for column in range(min_len): 54 | 55 | # Iterate over the columns by line and collect min and max values 56 | # in separate lists. 57 | interm_list = [] 58 | for line in range(len(header_list)): 59 | interm_list.append(data_frame[header_list[line]][column]) 60 | 61 | list_max.append(max(interm_list)) 62 | list_mean.append(np.mean(interm_list)) 63 | list_min.append(min(interm_list)) 64 | 65 | return min_len, list_min, list_mean, list_max 66 | 67 | 68 | def savgol_filter(x_values): 69 | filtered_data = sign.savgol_filter(x_values, 70 | 37, 71 | 3, 72 | deriv=0, 73 | delta=1.0, 74 | axis=-1, 75 | mode='interp', 76 | cval=0.0) 77 | return filtered_data 78 | 79 | 80 | def interpolate_lists(raw_lists, x_increment=1, window=21, poly_order=3, 81 | new_data_file='proc_data', 82 | plot_file_name='average_smooth_plot', 83 | plot_title='Averaged and Sav-Gol smoothed', 84 | x_label='x label', 85 | y_label='y label'): 86 | 87 | n_lists = range(len(raw_lists)) 88 | 89 | x_max_collection = [] 90 | for i in n_lists: 91 | x_max_collection.append(max(raw_lists[i][0])) 92 | 93 | # Determine the length of the shortest data series to fit the other to it. 94 | x_min = int(min(x_max_collection)) 95 | print('max col: {}'.format(x_max_collection)) 96 | print('x_min: {}'.format(int(x_min))) 97 | x_new = np.arange(0, x_min, x_increment) 98 | 99 | # Interpolate each data series to fit to the same x-values. 100 | interpolated_data = [x_new] 101 | for i in n_lists: 102 | f = interpolate.interp1d(raw_lists[i][0], raw_lists[i][1]) 103 | y_new = f(x_new) 104 | interpolated_data.append(y_new) 105 | 106 | # Calculate the average over all lists per x-value. 107 | data_mean = [] 108 | data_median = [] 109 | for i in range(len(interpolated_data[0])): 110 | data_to_be_averaged = [] 111 | for j in n_lists[0:]: 112 | new_element = interpolated_data[j+1][i] 113 | data_to_be_averaged.append(new_element) 114 | 115 | element_mean = stat.mean(data_to_be_averaged) 116 | element_median = stat.median(data_to_be_averaged) 117 | 118 | data_mean.append(element_mean) 119 | data_median.append(element_median) 120 | 121 | # Smoothing of the new data, using Savitzky-Golay filter. 122 | data_smoothed = sign.savgol_filter(data_mean, 123 | window, 124 | poly_order) 125 | 126 | d1 = sign.savgol_filter(data_median, 127 | window, 128 | poly_order) 129 | processed_data1 = [x_new, d1] 130 | 131 | processed_data = [x_new, data_smoothed] 132 | 133 | # Create Pandas DataFrame with the new values and save them as CSV. 134 | proc1 = np.vstack((x_new, data_smoothed)) 135 | proc2 = pd.DataFrame.from_records(proc1.transpose(), 136 | columns=['newx', 'newy']).set_index('newx') 137 | proc2.to_csv('{}.csv'.format(new_data_file)) 138 | print(proc2.head()) 139 | 140 | fig = plt.figure() 141 | plt.title(plot_title) 142 | plt.xlabel(x_label) 143 | plt.ylabel(y_label) 144 | for i in n_lists: 145 | plt.plot(raw_lists[i][0], raw_lists[i][1], 146 | color='gray', label='Raw') 147 | 148 | plt.plot(processed_data[0], processed_data[1], 149 | color='black', label='Processed mean') 150 | 151 | plt.plot(processed_data1[0], processed_data1[1], 152 | color='red', label='Processed median', linestyle='--') 153 | 154 | plt.grid() 155 | plt.legend(loc='best') 156 | plt.savefig(plot_file_name) 157 | plt.close(fig) 158 | 159 | return interpolated_data 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | -------------------------------------------------------------------------------- /examples/cone_02/experimental_data.csv: -------------------------------------------------------------------------------- 1 | time,temp,MF 2 | 0.0000000E+000,2.0000000E+001,0.0000000E+000 3 | 5.0500000E+000,7.7359914E+001,2.1118041E-014 4 | 1.0000000E+001,1.4805611E+002,4.9254331E-011 5 | 1.5000000E+001,1.9555422E+002,5.5849483E-009 6 | 2.0000000E+001,2.3287986E+002,1.5120970E-007 7 | 2.5000000E+001,2.6375351E+002,1.7834122E-006 8 | 3.0000000E+001,2.8999533E+002,1.2223300E-005 9 | 3.5000000E+001,3.1257264E+002,5.7011452E-005 10 | 4.0050000E+001,3.3211390E+002,1.9984869E-004 11 | 4.5050000E+001,3.4873779E+002,5.4963692E-004 12 | 5.0050000E+001,3.6237253E+002,1.2197298E-003 13 | 5.5050000E+001,3.7307177E+002,2.2376052E-003 14 | 6.0050000E+001,3.8102672E+002,3.4798604E-003 15 | 6.5050000E+001,3.8673173E+002,4.7500530E-003 16 | 7.0050000E+001,3.9079968E+002,5.9042996E-003 17 | 7.5050000E+001,3.9375691E+002,6.8895582E-003 18 | 8.0050000E+001,3.9597303E+002,7.7107906E-003 19 | 8.5050000E+001,3.9768523E+002,8.3940230E-003 20 | 9.0050000E+001,3.9904381E+002,8.9674144E-003 21 | 9.5050000E+001,4.0014803E+002,9.4550552E-003 22 | 1.0005000E+002,4.0106827E+002,9.8762350E-003 23 | 1.0505000E+002,4.0185799E+002,1.0246406E-002 24 | 1.1005000E+002,4.0255903E+002,1.0578340E-002 25 | 1.1505000E+002,4.0320282E+002,1.0882963E-002 26 | 1.2005000E+002,4.0380961E+002,1.1169781E-002 27 | 1.2505000E+002,4.0438701E+002,1.1446909E-002 28 | 1.3005000E+002,4.0492920E+002,1.1720853E-002 29 | 1.3505000E+002,4.0541776E+002,1.1996132E-002 30 | 1.4005000E+002,4.0582483E+002,1.2274869E-002 31 | 1.4505000E+002,4.0611901E+002,1.2556472E-002 32 | 1.5005000E+002,4.0627346E+002,1.2837570E-002 33 | 1.5500000E+002,4.0627518E+002,1.3111075E-002 34 | 1.6000000E+002,4.0613144E+002,1.3371353E-002 35 | 1.6500000E+002,4.0586957E+002,1.3612688E-002 36 | 1.7000000E+002,4.0553782E+002,1.3828392E-002 37 | 1.7500000E+002,4.0519548E+002,1.4015923E-002 38 | 1.8000000E+002,4.0490384E+002,1.4175968E-002 39 | 1.8500000E+002,4.0471820E+002,1.4312370E-002 40 | 1.9000000E+002,4.0468276E+002,1.4431904E-002 41 | 1.9500000E+002,4.0482825E+002,1.4544153E-002 42 | 2.0000000E+002,4.0517101E+002,1.4661548E-002 43 | 2.0500000E+002,4.0571250E+002,1.4799365E-002 44 | 2.1000000E+002,4.0643805E+002,1.4975375E-002 45 | 2.1500000E+002,4.0731480E+002,1.5208773E-002 46 | 2.2000000E+002,4.0828899E+002,1.5518127E-002 47 | 2.2500000E+002,4.0928396E+002,1.5918398E-002 48 | 2.3000000E+002,4.1020103E+002,1.6417498E-002 49 | 2.3500000E+002,4.1092653E+002,1.7013227E-002 50 | 2.4000000E+002,4.1134770E+002,1.7691497E-002 51 | 2.4500000E+002,4.1137730E+002,1.8426802E-002 52 | 2.5000000E+002,4.0558459E+002,2.0242702E-002 53 | 2.5500000E+002,4.0990270E+002,2.4392503E-002 54 | 2.6000000E+002,4.1072691E+002,2.3371256E-002 55 | 2.6500000E+002,4.1109706E+002,2.2745386E-002 56 | 2.7000000E+002,4.1150012E+002,2.2567972E-002 57 | 2.7500000E+002,4.1189492E+002,2.2598802E-002 58 | 2.8000000E+002,4.1227717E+002,2.2738779E-002 59 | 2.8500000E+002,4.1264482E+002,2.2955992E-002 60 | 2.9000000E+002,4.1297218E+002,2.3247828E-002 61 | 2.9500000E+002,4.1320983E+002,2.3621918E-002 62 | 3.0000000E+002,4.1330666E+002,2.4084727E-002 63 | 3.0500000E+002,4.1324234E+002,2.4634689E-002 64 | 3.1000000E+002,4.1305170E+002,2.5259823E-002 65 | 3.1500000E+002,4.1282370E+002,2.5939450E-002 66 | 3.2000000E+002,4.1267303E+002,2.6648297E-002 67 | 3.2500000E+002,4.1270062E+002,2.7361397E-002 68 | 3.3000000E+002,4.1296197E+002,2.8059204E-002 69 | 3.3500000E+002,4.1345068E+002,2.8732321E-002 70 | 3.4000000E+002,4.1409438E+002,2.9384296E-002 71 | 3.4500000E+002,4.1476006E+002,3.0030325E-002 72 | 3.5000000E+002,4.1082473E+002,3.0777523E-002 73 | 3.5500000E+002,4.1161608E+002,3.4147442E-002 74 | 3.6000000E+002,4.1282090E+002,3.3644375E-002 75 | 3.6500000E+002,4.1343087E+002,3.3148104E-002 76 | 3.7000000E+002,4.1391763E+002,3.2889512E-002 77 | 3.7500000E+002,4.1431882E+002,3.2754485E-002 78 | 3.8000000E+002,4.1466501E+002,3.2703387E-002 79 | 3.8500000E+002,4.1494991E+002,3.2725617E-002 80 | 3.9000000E+002,4.1151136E+002,3.2733237E-002 81 | 3.9500000E+002,4.1249363E+002,3.3318171E-002 82 | 4.0000000E+002,4.1338241E+002,3.3258401E-002 83 | 4.0500000E+002,4.1407100E+002,3.3254620E-002 84 | 4.1000000E+002,4.1463478E+002,3.3298248E-002 85 | 4.1500000E+002,4.1513105E+002,3.3376853E-002 86 | 4.2000000E+002,4.1558445E+002,3.3488855E-002 87 | 4.2500000E+002,4.1468544E+002,3.3568560E-002 88 | 4.3000000E+002,4.1279947E+002,3.4130044E-002 89 | 4.3500000E+002,4.1413312E+002,3.4082631E-002 90 | 4.4000000E+002,4.1522569E+002,3.4069475E-002 91 | 4.4500000E+002,4.1478070E+002,3.4008804E-002 92 | 4.5000000E+002,4.1554608E+002,3.4154011E-002 93 | 4.5500000E+002,4.1763870E+002,3.4060838E-002 94 | 4.6000000E+002,4.2003837E+002,3.3809097E-002 95 | 4.6500000E+002,4.2155611E+002,3.3158747E-002 96 | 4.7000000E+002,4.2673445E+002,3.1835081E-002 97 | 4.7500000E+002,4.3322112E+002,2.7985583E-002 98 | 4.8000000E+002,4.4237782E+002,2.2000457E-002 99 | 4.8500000E+002,4.5173894E+002,1.2435374E-002 100 | 4.9000000E+002,4.5401308E+002,3.8755528E-003 101 | 4.9500000E+002,4.4198270E+002,7.6689526E-004 102 | 5.0000000E+002,4.1941387E+002,1.5515328E-004 103 | 5.0500000E+002,3.9967828E+002,4.7199421E-005 104 | 5.1000000E+002,3.8663536E+002,2.1940887E-005 105 | 5.1500000E+002,3.7799413E+002,1.3196090E-005 106 | 5.2000000E+002,3.7181383E+002,9.1446337E-006 107 | 5.2500000E+002,3.6708527E+002,6.8887197E-006 108 | 5.3000000E+002,3.6328687E+002,5.4755274E-006 109 | 5.3500000E+002,3.6012719E+002,4.5165943E-006 110 | 5.4000000E+002,3.5743017E+002,3.8275766E-006 111 | 5.4500000E+002,3.5508234E+002,3.3108392E-006 112 | 5.5000000E+002,3.5300670E+002,2.9102343E-006 113 | 5.5500000E+002,3.5114878E+002,2.5913473E-006 114 | 5.6000000E+002,3.4946872E+002,2.3319871E-006 115 | 5.6500000E+002,3.4793654E+002,2.1172411E-006 116 | 5.7000000E+002,3.4652914E+002,1.9367389E-006 117 | 5.7500000E+002,3.4522837E+002,1.7830576E-006 118 | 5.8000000E+002,3.4401972E+002,1.6507515E-006 119 | 5.8500000E+002,3.4289140E+002,1.5357395E-006 120 | 5.9000000E+002,3.4183374E+002,1.4349062E-006 121 | 5.9500000E+002,3.4083868E+002,1.3458337E-006 122 | 6.0000000E+002,3.3989945E+002,1.2666184E-006 123 | -------------------------------------------------------------------------------- /examples/tga_analysis_02/input.py: -------------------------------------------------------------------------------- 1 | # define variable 'ops': optimisation parameter set 2 | # define variable 'setups': simulation setup set 3 | # define variable 'optimiser': properties for the optimiser 4 | 5 | 6 | # Goal of this file is to provide an example on how to run propti while 7 | # taking multiple experiments into account. Thus, multiple simulation setups 8 | # need to be created. 9 | 10 | import numpy as np 11 | 12 | # Import just for IDE convenience. 13 | import propti as pr 14 | 15 | # Set the character ID. 16 | CHID = 'TGA_analysis_02' 17 | TEND = 9360 18 | 19 | # Define heating rates. 20 | HeatingRatesTGA = [5, 10, 15] 21 | 22 | # Define template file name. 23 | template_file = "tga_analysis_02.fds" 24 | 25 | # Define 26 | experimental_data_file_list = ['tga_5K_exp.csv', 27 | 'tga_10K_exp.csv', 28 | 'tga_15K_exp.csv'] 29 | 30 | # Define the optimisation parameters. 31 | # name: internal reference name 32 | # place_holder: what to look for in the template, the '#' are added 33 | # automatically by propti 34 | # min_value: defines the minimum limit of the range the optimisation is 35 | # allowed to sample from 36 | # max_value: defines the maximum limit of the range the optimisation is 37 | # allowed to sample from 38 | op1 = pr.Parameter(name='ref_temp_comp_01', 39 | place_holder='rtc01', 40 | min_value=200, max_value=400) 41 | op2 = pr.Parameter(name='ref_rate_comp_01', 42 | place_holder='rrc01', 43 | min_value=0.001, max_value=0.01) 44 | op3 = pr.Parameter(name='ref_temp_comp_02', 45 | place_holder='rtc02', 46 | min_value=300, max_value=600) 47 | op4 = pr.Parameter(name='ref_rate_comp_02', 48 | place_holder='rrc02', 49 | min_value=0.001, max_value=0.01) 50 | 51 | # Collect all the defined parameters from above, just for convenience. 52 | set_of_parameters = [op1, op2, op3, op4] 53 | 54 | 55 | # Definition of parameters, which is used by propti_prepare.py later on. 56 | # It has no further meaning here. 57 | ops = pr.ParameterSet(params=set_of_parameters) 58 | 59 | 60 | # Function to provide basic parameters for one simulation setup. 61 | def create_mod_par_setup(para_set): 62 | # Provide optimisation parameters to the model parameter setups. 63 | ps = pr.ParameterSet(params=set_of_parameters) 64 | 65 | # Add different heating rates (5, 10, 15). 66 | ps.append(pr.Parameter(name='heating_rate_{}K'.format(str(HeatingRatesTGA[ 67 | para_set])), 68 | place_holder='hr', 69 | value=HeatingRatesTGA[para_set])) 70 | 71 | # Add individual character ID to distinguish the simulation data. 72 | ps.append(pr.Parameter(name='chid', 73 | place_holder='CHID', 74 | value='{}_{}K'.format(CHID, 75 | str(HeatingRatesTGA[ 76 | para_set])))) 77 | return ps 78 | 79 | # Calls the above function to create multiple parameter sets for the different 80 | # simulation setups. The parameter sets (objects) are then stored in a list. 81 | model_parameter_setups = [create_mod_par_setup(i) for i in range( 82 | len(HeatingRatesTGA))] 83 | 84 | 85 | # Create a list of relations between experimental and model (simulation) data, 86 | # for each experimental data series. (Could also be nested, if there would be 87 | # multiple repetitions for each experiment.) 88 | r = [] 89 | for i in range(len(HeatingRatesTGA)): 90 | # Initialise a relation. 91 | relation = pr.Relation() 92 | # Information on simulation data. 93 | relation.model.file_name = '{}_{}K_tga.csv'.format(CHID, 94 | str(HeatingRatesTGA[i])) 95 | relation.model.label_x = 'Time' 96 | relation.model.label_y = 'Total MLR' 97 | relation.model.header_line = 1 98 | 99 | # Information on experimental data. 100 | relation.experiment.file_name = experimental_data_file_list[i] 101 | relation.experiment.label_x = 'Time' 102 | relation.experiment.label_y = 'MassLossRate' 103 | relation.experiment.header_line = 0 104 | 105 | # Define definition set for data comparison. Basically providing the 106 | # amount of data points in x-axis. 107 | relation.fitness_method=pr.FitnessMethodRMSE(n_points=int(TEND/12)) 108 | 109 | # Collect the different relations. 110 | r.append(relation) 111 | 112 | 113 | # Create simulation setups by joining all the necessary information: 114 | # parameters, working directory, template file , relations and simulation 115 | # software executable. 116 | ssetups = [] 117 | for i in range(len(HeatingRatesTGA)): 118 | sn = "{}_{}K_tga".format(CHID, str(HeatingRatesTGA[i])) 119 | s = pr.SimulationSetup(name=sn, 120 | work_dir=sn, 121 | model_template=template_file, 122 | model_parameter=model_parameter_setups[i], 123 | model_executable='fds', 124 | relations=r[i]) 125 | 126 | ssetups.append(s) 127 | 128 | 129 | # Initialise empty simulation setup sets. 130 | setups = pr.SimulationSetupSet() 131 | 132 | 133 | # Append above objects to simulation setup set. 134 | for i in range(len(HeatingRatesTGA)): 135 | setups.append(ssetups[i]) 136 | 137 | 138 | print('** setups generated') 139 | 140 | 141 | # Provide values for optimiser. 142 | optimiser = pr.OptimiserProperties(algorithm='sceua', 143 | repetitions=150, 144 | ngs=4, 145 | # Sub-processes would be used for 146 | # repetitions of an experiment. 147 | num_subprocesses=1, 148 | mpi=False) 149 | 150 | 151 | print('** input file processed') 152 | -------------------------------------------------------------------------------- /examples/cone_01/SimpleConeLaunchTest_Alu_BestParaSet_Toast.fds: -------------------------------------------------------------------------------- 1 | 2 | &HEAD CHID='#filename#', TITLE='Cone Calorimeter (NBSIR 82-2611), November 1992_ALU' / 3 | 4 | &MESH ID='MESH01', IJK=#i#,#j#,#k#, XB=-0.15,0.15,-0.15,0.15,0.0,0.4/ 5 | 6 | 7 | 8 | &TIME T_END=#tend#, WALL_INCREMENT=1, DT=1.0, / 9 | 10 | &MISC BNDF_DEFAULT=.FALSE., SOLID_PHASE_ONLY=.TRUE./ 11 | 12 | 13 | 14 | ***** Open Vents ******************** 15 | 16 | &VENT MB = 'XMIN',SURF_ID = 'OPEN'/ 17 | &VENT MB = 'XMAX',SURF_ID = 'OPEN'/ 18 | &VENT MB = 'YMIN',SURF_ID = 'OPEN'/ 19 | &VENT MB = 'YMAX',SURF_ID = 'OPEN'/ 20 | VENT MB = 'ZMIN',SURF_ID = 'OPEN'/ 21 | &VENT MB = 'ZMAX',SURF_ID = 'OPEN'/ 22 | 23 | 24 | 25 | ################## 26 | #### Reaction #### 27 | ################## 28 | 29 | ***** REAC Parameters ****************** 30 | 31 | &REAC FUEL = 'TOLUENE' 32 | SOOT_YIELD = 0.178 / 33 | 34 | 35 | 36 | ################################ 37 | #### Materials and Surfaces #### 38 | ################################ 39 | 40 | ***** Surfaces ******************** 41 | 42 | Layer divide used to describe which layers generate fuel and on which side 43 | of the model the fuel gases are emitted. It is just the number of layers 44 | which should emit on a specific side. 45 | All layers shall emit to the top surface. 46 | 47 | Different surfaces are defined with different values for EXTERNAL_FLUX, to 48 | mimic the three different Cone Calorimetry measurements performed during 49 | CHRISTIFIRE Phase 1 (25 kW, 50 kW and 75 kW). 50 | 51 | &SURF ID = 'Cable219' 52 | EXTERNAL_FLUX=#exflux#, 53 | RGB =200,100,0, 54 | BACKING = 'EXPOSED', 55 | CELL_SIZE_FACTOR = 0.25, 56 | BURN_AWAY=.TRUE., 57 | THICKNESS(1:4) = #thickness1#, #thickness2#, #thickness1#, 0.025, 58 | HEAT_TRANSFER_COEFFICIENT = 0., 59 | MATL_ID(1,1) = 'Cable 219 Jacket Component A', 60 | MATL_ID(1,2) = 'Cable 219 Jacket Component B', 61 | MATL_MASS_FRACTION(1,1:2) = 0.35,0.65 , 62 | MATL_ID(2,1) = 'Cable 219 Insulation Component A', 63 | MATL_ID(2,2) = 'Cable 219 Insulation Component B', 64 | MATL_MASS_FRACTION(2,1:2) = 0.25,0.75, 65 | MATL_ID(3,1) = 'Cable 219 Jacket Component A', 66 | MATL_ID(3,2) = 'Cable 219 Jacket Component B', 67 | MATL_MASS_FRACTION(3,1:2) = 0.35,0.65 68 | MATL_ID(4,1:1) = 'BACKING', 69 | MATL_MASS_FRACTION(4,1:1) = 1.0/ 70 | 71 | 72 | 73 | ***** Materials ******************** 74 | 75 | &MATL ID = 'Cable 219 Jacket Component A' 76 | EMISSIVITY = 0.415155053407 77 | DENSITY = 948.295891575 78 | CONDUCTIVITY = 0.204072778663 79 | SPECIFIC_HEAT = 1.20664363566 80 | N_REACTIONS = 1 81 | A(1) = 1745.08192198 82 | E(1) = 50956.6593506 83 | N_S(1) = 1.24929166824 84 | NU_MATL(1,1) = 0.46 85 | MATL_ID(1,1) = 'ResidueJacket' 86 | NU_SPEC(1,1) = 0.54 87 | SPEC_ID(1,1) = 'TOLUENE' 88 | HEAT_OF_COMBUSTION = 29091.8418822 89 | HEAT_OF_REACTION(1) = 1348.76323363 / 90 | 91 | &MATL ID = 'Cable 219 Jacket Component B' 92 | EMISSIVITY = 0.415155053407 93 | DENSITY = 948.295891575 94 | CONDUCTIVITY = 0.204072778663 95 | SPECIFIC_HEAT = 1.20664363566 96 | N_REACTIONS = 1 97 | A(1) = 4.15507348631e+14 98 | E(1) = 256110.737631 99 | N_S(1) = 1.95213014504 100 | NU_MATL(1,1) = 0.46 101 | MATL_ID(1,1) = 'ResidueJacket' 102 | NU_SPEC(1,1) = 0.54 103 | SPEC_ID(1,1) = 'TOLUENE' 104 | HEAT_OF_COMBUSTION = 29091.8418822 105 | HEAT_OF_REACTION(1) = 1348.76323363 / 106 | 107 | &MATL ID = 'ResidueJacket' 108 | EMISSIVITY = 0.802736884624 109 | DENSITY = 599.624933313 110 | CONDUCTIVITY = 0.197261299686 111 | SPECIFIC_HEAT = 1.02228105353 / 112 | 113 | 114 | 115 | &MATL ID = 'Cable 219 Insulation Component A' 116 | EMISSIVITY = 0.441464196011 117 | DENSITY = 1106.37735215 118 | CONDUCTIVITY = 0.198665140279 119 | SPECIFIC_HEAT = 1.50727683777 120 | N_REACTIONS = 1 121 | A(1) = 40.7945749748 122 | E(1) = 38992.2989777 123 | N_S(1) = 1.71554579991 124 | NU_MATL(1,1) = 0.49 125 | MATL_ID(1,1) = 'ResidueInsulator' 126 | NU_SPEC(1,1) = 0.51 127 | SPEC_ID(1,1) = 'TOLUENE' 128 | HEAT_OF_COMBUSTION = 40669.5835367 129 | HEAT_OF_REACTION(1) = 777.615793651 / 130 | 131 | &MATL ID = 'Cable 219 Insulation Component B' 132 | EMISSIVITY = 0.441464196011 133 | DENSITY = 1106.37735215 134 | CONDUCTIVITY = 0.198665140279 135 | SPECIFIC_HEAT = 1.50727683777 136 | N_REACTIONS = 1 137 | A(1) = 4.54116434444e+20 138 | E(1) = 240478.595147 139 | N_S(1) = 1.99178769428 140 | NU_MATL(1,1) = 0.49 141 | MATL_ID(1,1) = 'ResidueInsulator' 142 | NU_SPEC(1,1) = 0.51 143 | SPEC_ID(1,1) = 'TOLUENE' 144 | HEAT_OF_COMBUSTION = 40669.5835367 145 | HEAT_OF_REACTION(1) = 777.615793651 / 146 | 147 | &MATL ID = 'ResidueInsulator' 148 | EMISSIVITY = 0.539583203677 149 | DENSITY = 298.555727008 150 | CONDUCTIVITY = 0.196313437302 151 | SPECIFIC_HEAT = 0.933963731639 / 152 | 153 | 154 | 155 | 156 | &MATL ID = 'BACKING' 157 | DENSITY = 2698.9, 158 | CONDUCTIVITY = 0.235, 159 | SPECIFIC_HEAT = 0.897/ 160 | 161 | 162 | 163 | ################## 164 | #### Geometry #### 165 | ################## 166 | 167 | *** Specimen *** 168 | 169 | &VENT SURF_ID='Cable219', XB=-0.05,0.05,-0.05,0.05,0.0,0.0/ Sample 170 | 171 | 172 | 173 | ################## 174 | #### Analysis #### 175 | ################## 176 | 177 | ***** Energy Release Rates ******************* 178 | 179 | &BNDF QUANTITY='WALL TEMPERATURE' / 180 | &BNDF QUANTITY='GAUGE HEAT FLUX' / 181 | 182 | 183 | &DEVC ID='heater temp', QUANTITY='WALL TEMPERATURE', XYZ=0.0,0.0,0.0, IOR=3/ 184 | &DEVC ID='sample temp', QUANTITY='WALL TEMPERATURE', XYZ=0.0,0.0,0.0, IOR=3/ 185 | 186 | &DEVC ID='net heat flux', QUANTITY='NET HEAT FLUX', XYZ=0.0,0.0,0.0, IOR=3/ 187 | &DEVC ID='gauge heat flux', QUANTITY='GAUGE HEAT FLUX', XYZ=0.0,0.0,0.0, IOR=3/ 188 | 189 | 190 | &PROF ID='wall_temp_prof', QUANTITY='TEMPERATURE', XYZ=0.0,-0.00,0.0, IOR=3/ 191 | &PROF ID='wall_temp_prof_fin', QUANTITY='TEMPERATURE', XYZ=0.0,-0.00,0.0, IOR=3, FORMAT_INDEX=2/ 192 | 193 | 194 | 195 | &SLCF QUANTITY='TEMPERATURE', PBX=0.0/ 196 | 197 | 198 | 199 | &SLCF QUANTITY='TEMPERATURE', PBY=0.0/ 200 | 201 | 202 | 203 | &SLCF QUANTITY='VELOCITY', VECTOR=.TRUE., PBX=0.0/ 204 | 205 | &SLCF QUANTITY='VELOCITY', VECTOR=.TRUE., PBY=0.0/ 206 | 207 | 208 | 209 | &TAIL / 210 | -------------------------------------------------------------------------------- /examples/cone_01/SimpleConeLaunchTest_ISO_BestParaSet_Toast.fds: -------------------------------------------------------------------------------- 1 | Simulation of the Cone Calorimeter by Vytenis Babrauskas (NBSIR 82-2611), November 1992. 2 | Testsimulation to estimate time to perform one single simulation over a time of 2400 s, on JSC computing facilities. 3 | Heater temperature maintained corresponding to irradiance level of ca. 50 kW/m2 at sample surface. 4 | Combustion simulated by vent at sample surface according to CHRISTIFIRE pase 1 cable no. 219. 5 | 6 | 7 | &HEAD CHID='#filename#', TITLE='Cone Calorimeter (NBSIR 82-2611), November 1992' / 8 | 9 | &MESH ID='MESH01', IJK=#i#,#j#,#k#, XB=-0.15,0.15,-0.15,0.15,0.0,0.4/ 10 | 11 | 12 | 13 | &TIME T_END=#tend#, WALL_INCREMENT=1, DT=1.0, / 14 | 15 | &MISC BNDF_DEFAULT=.FALSE., SOLID_PHASE_ONLY=.TRUE./ 16 | 17 | 18 | 19 | ***** Open Vents ******************** 20 | 21 | &VENT MB = 'XMIN',SURF_ID = 'OPEN'/ 22 | &VENT MB = 'XMAX',SURF_ID = 'OPEN'/ 23 | &VENT MB = 'YMIN',SURF_ID = 'OPEN'/ 24 | &VENT MB = 'YMAX',SURF_ID = 'OPEN'/ 25 | VENT MB = 'ZMIN',SURF_ID = 'OPEN'/ 26 | &VENT MB = 'ZMAX',SURF_ID = 'OPEN'/ 27 | 28 | 29 | 30 | ################## 31 | #### Reaction #### 32 | ################## 33 | 34 | ***** REAC Parameters ****************** 35 | 36 | &REAC FUEL = 'TOLUENE' 37 | SOOT_YIELD = 0.178 / 38 | 39 | 40 | 41 | ################################ 42 | #### Materials and Surfaces #### 43 | ################################ 44 | 45 | ***** Surfaces ******************** 46 | 47 | Layer divide used to describe which layers generate fuel and on which side 48 | of the model the fuel gases are emitted. It is just the number of layers 49 | which should emit on a specific side. 50 | All layers shall emit to the top surface. 51 | 52 | Different surfaces are defined with different values for EXTERNAL_FLUX, to 53 | mimic the three different Cone Calorimetry measurements performed during 54 | CHRISTIFIRE Phase 1 (25 kW, 50 kW and 75 kW). 55 | 56 | &SURF ID = 'Cable219' 57 | EXTERNAL_FLUX=#exflux#, 58 | RGB =200,100,0, 59 | BACKING = 'EXPOSED', 60 | CELL_SIZE_FACTOR = 0.25, 61 | BURN_AWAY=.TRUE., 62 | THICKNESS(1:4) = #thickness1#, #thickness2#, #thickness1#, 0.025 63 | HEAT_TRANSFER_COEFFICIENT = 0., 64 | MATL_ID(1,1) = 'Cable 219 Jacket Component A', 65 | MATL_ID(1,2) = 'Cable 219 Jacket Component B', 66 | MATL_MASS_FRACTION(1,1:2) = 0.35,0.65 , 67 | MATL_ID(2,1) = 'Cable 219 Insulation Component A', 68 | MATL_ID(2,2) = 'Cable 219 Insulation Component B', 69 | MATL_MASS_FRACTION(2,1:2) = 0.25,0.75, 70 | MATL_ID(3,1) = 'Cable 219 Jacket Component A', 71 | MATL_ID(3,2) = 'Cable 219 Jacket Component B', 72 | MATL_MASS_FRACTION(3,1:2) = 0.35,0.65 73 | MATL_ID(4,1:1) = 'BACKING', 74 | MATL_MASS_FRACTION(4,1:1) = 1.0/ 75 | 76 | 77 | 78 | ***** Materials ******************** 79 | 80 | &MATL ID = 'Cable 219 Jacket Component A' 81 | EMISSIVITY = 0.415155053407 82 | DENSITY = 948.295891575 83 | CONDUCTIVITY = 0.204072778663 84 | SPECIFIC_HEAT = 1.20664363566 85 | N_REACTIONS = 1 86 | A(1) = 1745.08192198 87 | E(1) = 50956.6593506 88 | N_S(1) = 1.24929166824 89 | NU_MATL(1,1) = 0.46 90 | MATL_ID(1,1) = 'ResidueJacket' 91 | NU_SPEC(1,1) = 0.54 92 | SPEC_ID(1,1) = 'TOLUENE' 93 | HEAT_OF_COMBUSTION = 29091.8418822 94 | HEAT_OF_REACTION(1) = 1348.76323363 / 95 | 96 | &MATL ID = 'Cable 219 Jacket Component B' 97 | EMISSIVITY = 0.415155053407 98 | DENSITY = 948.295891575 99 | CONDUCTIVITY = 0.204072778663 100 | SPECIFIC_HEAT = 1.20664363566 101 | N_REACTIONS = 1 102 | A(1) = 4.15507348631e+14 103 | E(1) = 256110.737631 104 | N_S(1) = 1.95213014504 105 | NU_MATL(1,1) = 0.46 106 | MATL_ID(1,1) = 'ResidueJacket' 107 | NU_SPEC(1,1) = 0.54 108 | SPEC_ID(1,1) = 'TOLUENE' 109 | HEAT_OF_COMBUSTION = 29091.8418822 110 | HEAT_OF_REACTION(1) = 1348.76323363 / 111 | 112 | &MATL ID = 'ResidueJacket' 113 | EMISSIVITY = 0.802736884624 114 | DENSITY = 599.624933313 115 | CONDUCTIVITY = 0.197261299686 116 | SPECIFIC_HEAT = 1.02228105353 / 117 | 118 | 119 | 120 | &MATL ID = 'Cable 219 Insulation Component A' 121 | EMISSIVITY = 0.441464196011 122 | DENSITY = 1106.37735215 123 | CONDUCTIVITY = 0.198665140279 124 | SPECIFIC_HEAT = 1.50727683777 125 | N_REACTIONS = 1 126 | A(1) = 40.7945749748 127 | E(1) = 38992.2989777 128 | N_S(1) = 1.71554579991 129 | NU_MATL(1,1) = 0.49 130 | MATL_ID(1,1) = 'ResidueInsulator' 131 | NU_SPEC(1,1) = 0.51 132 | SPEC_ID(1,1) = 'TOLUENE' 133 | HEAT_OF_COMBUSTION = 40669.5835367 134 | HEAT_OF_REACTION(1) = 777.615793651 / 135 | 136 | &MATL ID = 'Cable 219 Insulation Component B' 137 | EMISSIVITY = 0.441464196011 138 | DENSITY = 1106.37735215 139 | CONDUCTIVITY = 0.198665140279 140 | SPECIFIC_HEAT = 1.50727683777 141 | N_REACTIONS = 1 142 | A(1) = 4.54116434444e+20 143 | E(1) = 240478.595147 144 | N_S(1) = 1.99178769428 145 | NU_MATL(1,1) = 0.49 146 | MATL_ID(1,1) = 'ResidueInsulator' 147 | NU_SPEC(1,1) = 0.51 148 | SPEC_ID(1,1) = 'TOLUENE' 149 | HEAT_OF_COMBUSTION = 40669.5835367 150 | HEAT_OF_REACTION(1) = 777.615793651 / 151 | 152 | &MATL ID = 'ResidueInsulator' 153 | EMISSIVITY = 0.539583203677 154 | DENSITY = 298.555727008 155 | CONDUCTIVITY = 0.196313437302 156 | SPECIFIC_HEAT = 0.933963731639 / 157 | 158 | 159 | 160 | The backing material data is taken from Anna Matala's article 161 | 'Pyrolysis modelling of PVC cable materials', Fire Safety Science -- 162 | Proceedings of the tenth International Symposium. pp 917-930, 2011 163 | 164 | &MATL ID = 'BACKING' 165 | EMISSIVITY = 1.0 166 | DENSITY = 800.0 167 | CONDUCTIVITY = 0.1 168 | SPECIFIC_HEAT = 1.0/ 169 | 170 | 171 | 172 | ################## 173 | #### Geometry #### 174 | ################## 175 | 176 | *** Specimen *** 177 | 178 | &VENT SURF_ID='Cable219', XB=-0.05,0.05,-0.05,0.05,0.0,0.0/ Sample 179 | 180 | 181 | 182 | ################## 183 | #### Analysis #### 184 | ################## 185 | 186 | ***** Energy Release Rates ******************* 187 | 188 | &BNDF QUANTITY='WALL TEMPERATURE' / 189 | &BNDF QUANTITY='GAUGE HEAT FLUX' / 190 | 191 | 192 | &DEVC ID='heater temp', QUANTITY='WALL TEMPERATURE', XYZ=0.0,0.0,0.0, IOR=3/ 193 | &DEVC ID='sample temp', QUANTITY='WALL TEMPERATURE', XYZ=0.0,0.0,0.0, IOR=3/ 194 | 195 | &DEVC ID='net heat flux', QUANTITY='NET HEAT FLUX', XYZ=0.0,0.0,0.0, IOR=3/ 196 | &DEVC ID='gauge heat flux', QUANTITY='GAUGE HEAT FLUX', XYZ=0.0,0.0,0.0, IOR=3/ 197 | 198 | 199 | &PROF ID='wall_temp_prof', QUANTITY='TEMPERATURE', XYZ=0.0,-0.00,0.0, IOR=3/ 200 | &PROF ID='wall_temp_prof_fin', QUANTITY='TEMPERATURE', XYZ=0.0,-0.00,0.0, IOR=3, FORMAT_INDEX=2/ 201 | 202 | 203 | 204 | &SLCF QUANTITY='TEMPERATURE', PBX=0.0/ 205 | 206 | 207 | 208 | &SLCF QUANTITY='TEMPERATURE', PBY=0.0/ 209 | 210 | 211 | 212 | &SLCF QUANTITY='VELOCITY', VECTOR=.TRUE., PBX=0.0/ 213 | 214 | &SLCF QUANTITY='VELOCITY', VECTOR=.TRUE., PBY=0.0/ 215 | 216 | 217 | 218 | &TAIL / 219 | -------------------------------------------------------------------------------- /propti/basic_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import os 4 | import tempfile 5 | import copy 6 | import sys 7 | import subprocess 8 | import logging 9 | import queue 10 | import threading 11 | import datetime 12 | import numpy as np 13 | 14 | from .data_structures import Parameter, ParameterSet, SimulationSetup, \ 15 | SimulationSetupSet, Relation 16 | 17 | 18 | ##################### 19 | # INPUT FILE HANDLING 20 | 21 | def create_input_file(setup: SimulationSetup, work_dir='execution'): 22 | 23 | """ 24 | 25 | :param setup: specification of SimulationSetup on which to base the 26 | simulation run 27 | :param work_dir: flag to indicate if the regular execution of the function 28 | (in the sense of inverse modeling) is wanted or if only a simulation 29 | of the best parameter set is desired, range:['execution', 'best'] 30 | :return: Saves a file that is read by the simulation software as input file 31 | """ 32 | # 33 | # small test 34 | if work_dir == 'execution': 35 | wd = setup.execution_dir 36 | elif work_dir == 'best': 37 | wd = setup.best_dir 38 | # 39 | # 40 | 41 | # Log the set working directory 42 | logging.debug(wd) 43 | 44 | in_fn = setup.model_template 45 | template_content = read_template(in_fn) 46 | 47 | logging.debug(template_content) 48 | 49 | parameter_list = setup.model_parameter 50 | input_content = fill_place_holder(template_content, parameter_list) 51 | 52 | logging.debug(input_content) 53 | 54 | out_fn = os.path.join(wd, setup.model_input_file) 55 | 56 | write_input_file(input_content, out_fn) 57 | 58 | 59 | def write_input_file(content: str, filename: os.path): 60 | """ 61 | 62 | :param content: Information that shall be written into a file, expected 63 | to be string. 64 | :param filename: File name of the new file. 65 | :return: File written to specified location. 66 | """ 67 | try: 68 | outfile = open(filename, 'w') 69 | except OSError as err: 70 | logging.error("error writing input file: {}".format(filename)) 71 | sys.exit() 72 | 73 | outfile.write(content) 74 | 75 | 76 | def fill_place_holder(tc: str, paras: ParameterSet) -> str: 77 | # TODO: check for place holder duplicates 78 | res = tc 79 | if paras is not None: 80 | for p in paras: 81 | 82 | if p.place_holder is None: 83 | continue 84 | 85 | if p.derived and not p.evaluated: 86 | logging.error("* Parameter not evaluated: ", p) 87 | sys.exit() 88 | 89 | if type(p.value) == float or \ 90 | type(p.value) == np.float64 or \ 91 | type(p.value) == np.float32: 92 | res = res.replace("#" + p.place_holder + "#", 93 | "{:.{}E}".format(p.value, 94 | p.output_float_precision)) 95 | else: 96 | res = res.replace("#" + p.place_holder + "#", str(p.value)) 97 | else: 98 | logging.warning("using empty parameter set for place holder filling") 99 | 100 | return res 101 | 102 | 103 | def read_template(filename: os.path) -> str: 104 | try: 105 | infile = open(filename, 'r') 106 | except OSError as err: 107 | logging.error("error reading template file: {}".format(filename)) 108 | sys.exit() 109 | 110 | content = infile.read() 111 | return content 112 | 113 | 114 | def test_read_replace_template(): 115 | wd = 'tmp' 116 | if not os.path.exists(wd): 117 | os.mkdir(wd) 118 | s = SimulationSetup("reader test", work_dir=wd) 119 | s.model_template = os.path.join('.', 'templates', 'basic_01.fds') 120 | s.model_input_file = "filled_basic_01.fds" 121 | 122 | p1 = Parameter("chid", place_holder="filename", value="toast_brot") 123 | p2 = Parameter("i", place_holder="i", value=42) 124 | p3 = Parameter("xmin", place_holder="xmin", value=1.463e-6) 125 | s.model_parameter.append(p1) 126 | s.model_parameter.append(p2) 127 | s.model_parameter.append(p3) 128 | 129 | create_input_file(s) 130 | 131 | 132 | def test_missing_template(): 133 | s = SimulationSetup("reader test") 134 | s.model_template = os.path.join('.', 'templates', 'notexists_basic_01.fds') 135 | create_input_file(s) 136 | 137 | 138 | ################# 139 | # MODEL EXECUTION 140 | 141 | 142 | def run_simulations(setups: SimulationSetupSet, 143 | num_subprocesses: int = 1, 144 | best_para_run: bool=False): 145 | """ 146 | Executes each given SimulationSetup. 147 | 148 | :param setups: set of simulation setups 149 | :param num_subprocesses: determines how many sub-processes are to be used 150 | to perform the calculation, should be more than or equal to 1, 151 | default: 1, range: [integers >= 1] 152 | :param best_para_run: flag to switch to simulating the best parameter set 153 | :return: None 154 | """ 155 | 156 | # Get system time now, for debug output. 157 | time_now = datetime.datetime.now() 158 | 159 | if num_subprocesses == 1: 160 | msg_serial = '* Serial model execution started, at: {}' 161 | logging.info(msg_serial.format(time_now)) 162 | for s in setups: 163 | logging.info('start execution of simulation setup: {}' 164 | .format(s.name)) 165 | run_simulation_serial(s, best_para_run) 166 | else: 167 | msg_multiprocess = '* Multi process execution started, at: {}' 168 | logging.info(msg_multiprocess.format(time_now)) 169 | run_simulation_mp(setups, num_subprocesses) 170 | 171 | 172 | def run_simulation_serial(setup: SimulationSetup, 173 | best_para_run: bool = False): 174 | 175 | # TODO: check return status of execution 176 | 177 | if best_para_run is False: 178 | new_dir = setup.execution_dir 179 | else: 180 | new_dir = setup.best_dir 181 | 182 | exec_file = setup.model_executable 183 | in_file = setup.model_input_file 184 | log_file = open(os.path.join(new_dir, "execution.log"), "w") 185 | 186 | cmd = 'cd {} && {} {}'.format(new_dir, exec_file, in_file) 187 | 188 | logging.debug("executing command: {}".format(cmd)) 189 | 190 | subprocess.check_call(cmd, shell=True, 191 | stdout=log_file, stderr=log_file) 192 | log_file.close() 193 | 194 | 195 | def run_simulation_mp(setups: SimulationSetupSet, num_threads:int = 1): 196 | 197 | def do_work(work_item: SimulationSetup): 198 | print("processing {}".format(work_item.name)) 199 | run_simulation_serial(work_item) 200 | 201 | def worker(): 202 | while True: 203 | work_item = q.get() 204 | if work_item is None: 205 | break 206 | do_work(work_item) 207 | q.task_done() 208 | 209 | q = queue.Queue() 210 | threads = [] 211 | for i in range(num_threads): 212 | t = threading.Thread(target=worker) 213 | t.start() 214 | threads.append(t) 215 | 216 | for item in setups: 217 | q.put(item) 218 | 219 | # block until all tasks are done 220 | q.join() 221 | 222 | # stop workers 223 | for i in range(num_threads): 224 | q.put(None) 225 | for t in threads: 226 | t.join() 227 | 228 | 229 | def test_execute_fds(): 230 | wd = 'tmp' 231 | if not os.path.exists(wd): 232 | os.mkdir(wd) 233 | s = SimulationSetup(name='exec test', work_dir=wd, model_executable='fds', 234 | model_input_file=os.path.join('..', 'templates', 235 | 'basic_02.fds')) 236 | run_simulation_serial(s) 237 | 238 | 239 | ########################### 240 | # ANALYSE SIMULATION OUTPUT 241 | 242 | def extract_simulation_data(setup: SimulationSetup): 243 | # TODO: this is not general, but specific for FDS, i.e. first 244 | # TODO: line contains units, second the quantities names 245 | 246 | msg = "* From 'extract_simulation_data': execution directory: {}" 247 | logging.debug(msg.format(setup.execution_dir)) 248 | 249 | if os.path.exists(os.path.join(setup.execution_dir, 'wct.csv')): 250 | wct_file = open(os.path.join(setup.execution_dir, 'wct.csv')) 251 | line = wct_file.readline() 252 | wct_file.close() 253 | logging.debug("WCT info: {}".format(line)) 254 | 255 | for r in setup.relations: 256 | msg = "* From 'extract_simulation_data': Relation ID: {}" 257 | logging.debug(msg.format(r.id_label)) 258 | r.read_data(setup.execution_dir) 259 | 260 | msg = "* From 'extract_simulation_data': Finished reading data." 261 | logging.debug(msg) 262 | 263 | 264 | def map_data(x_def, x, y): 265 | return np.interp(x_def, x, y) 266 | 267 | 268 | def test_prepare_run_extract(): 269 | r1 = Relation() 270 | r1.model_x_label = "Time" 271 | r1.model_y_label = "VELO" 272 | r1.x_def = np.linspace(3.0, 8.5, 20) 273 | 274 | r2 = copy.deepcopy(r1) 275 | r2.model_y_label = "TEMP" 276 | 277 | relations = [r1, r2] 278 | 279 | paras = ParameterSet() 280 | paras.append(Parameter('ambient temperature', place_holder='TMPA')) 281 | paras.append(Parameter('density', place_holder='RHO')) 282 | 283 | s0 = SimulationSetup(name='ambient run', 284 | work_dir='setup', 285 | model_template=os.path.join('templates', 286 | 'template_basic_03.fds'), 287 | model_executable='fds', 288 | relations=relations, 289 | model_parameter=paras 290 | ) 291 | 292 | setups = SimulationSetupSet() 293 | isetup = 0 294 | for tmpa in [32.1, 36.7, 42.7, 44.1]: 295 | current_s = copy.deepcopy(s0) 296 | current_s.model_parameter[0].value = tmpa 297 | current_s.work_dir += '_{:02d}'.format(isetup) 298 | setups.append(current_s) 299 | isetup += 1 300 | 301 | print(setups) 302 | 303 | for s in setups: 304 | if not os.path.exists(s.work_dir): os.mkdir(s.work_dir) 305 | 306 | for s in setups: 307 | create_input_file(s) 308 | 309 | for s in setups: 310 | run_simulations(s) 311 | 312 | for s in setups: 313 | extract_simulation_data(s) 314 | for r in s.relations: 315 | print(r.x_def, r.model_y) 316 | 317 | 318 | def test_extract_data(): 319 | s = SimulationSetup('test read data') 320 | s.model_output_file = os.path.join('test_data', 'TEST_devc.csv') 321 | 322 | r1 = ['VELO', ["none", "none"]] 323 | r2 = ['TEMP', ["none", "none"]] 324 | 325 | s.relations = [r1, r2] 326 | 327 | res = extract_simulation_data(s) 328 | 329 | for r in res: 330 | print(r) 331 | 332 | 333 | ###### 334 | # MAIN 335 | 336 | # run tests if executed 337 | if __name__ == "__main__": 338 | # test_read_replace_template() 339 | # test_execute_fds() 340 | # test_missing_template() 341 | # test_extract_data() 342 | test_prepare_run_extract() 343 | pass 344 | -------------------------------------------------------------------------------- /propti/spotpy_wrapper.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import os 4 | import shutil 5 | import numpy as np 6 | import tempfile 7 | import os.path 8 | from pathlib import Path 9 | import spotpy 10 | 11 | from .data_structures import Parameter, ParameterSet, SimulationSetup, \ 12 | SimulationSetupSet, Relation, OptimiserProperties 13 | 14 | from .basic_functions import create_input_file, run_simulations, \ 15 | extract_simulation_data 16 | 17 | 18 | #################### 19 | # SPOTPY SETUP CLASS 20 | 21 | class SpotpySetup(object): 22 | def __init__(self, 23 | params: ParameterSet, 24 | setups: SimulationSetupSet, 25 | optimiser: OptimiserProperties): 26 | 27 | self.setups = setups 28 | self.params = params 29 | self.optimiser = optimiser 30 | self.spotpy_parameter = [] 31 | self.j=0 32 | 33 | for p in params: 34 | logging.debug("Setup SPOTPY parameter: {}".format(p.name)) 35 | if p.distribution == 'uniform': 36 | 37 | optguess = None 38 | step = None 39 | if p.value is not None: 40 | optguess = p.value 41 | if p.max_increment is not None: 42 | step = p.max_increment 43 | 44 | cp = spotpy.parameter.Uniform(p.place_holder, 45 | p.min_value, p.max_value, 46 | step=step, 47 | optguess=optguess, 48 | minbound=p.min_value, 49 | maxbound=p.max_value) 50 | self.spotpy_parameter.append(cp) 51 | else: 52 | 53 | logging.error( 54 | '* Parameter distribution function unknown: {}'.format( 55 | p.distribution)) 56 | 57 | def parameters(self): 58 | return spotpy.parameter.generate(self.spotpy_parameter) 59 | 60 | def simulation(self, vector): 61 | logging.debug("* Current SPOTPY simulation vector: {}".format(vector)) 62 | 63 | # Copy SPOTPY parameter vector to parameter set. 64 | for i in range(len(vector)): 65 | self.params[i].value = vector[i] 66 | 67 | # Update all simulation setup parameter sets. 68 | for s in self.setups: 69 | s.model_parameter.update(self.params) 70 | 71 | # Create run directories for all simulation setups. 72 | for s in self.setups: 73 | if s.execution_dir_prefix: 74 | tmp_dir_root = s.execution_dir_prefix 75 | else: 76 | tmp_dir_root = os.path.join(os.getcwd(), s.work_dir) 77 | s.execution_dir = tempfile.mkdtemp(prefix='rundir_', 78 | dir=tmp_dir_root) 79 | create_input_file(s) 80 | 81 | # Run all simulations. 82 | logging.debug("* Run all simulations.") 83 | run_simulations(self.setups, self.optimiser.num_subprocesses) 84 | 85 | # gather simulation data 86 | for s in self.setups: 87 | logging.debug("* Start data extraction.") 88 | extract_simulation_data(s) 89 | logging.debug("* Finished data extraction.") 90 | 91 | # Clean up temporary execution directories. 92 | for s in self.setups: 93 | logging.debug("* Clean up of temporary execution directories.") 94 | shutil.rmtree(s.execution_dir) 95 | 96 | # Initialise values needed to compute fitness. 97 | logging.debug("* Compute fitness values.") 98 | global_fitness_value = 0 99 | individual_fitness_values = list() 100 | 101 | # Compute fitness values. 102 | for s in self.setups: 103 | for r in s.relations: 104 | logging.debug("* Relation ID: {}.".format(r.id_label)) 105 | current_fitness = r.fitness_weight * r.compute_fitness() 106 | global_fitness_value += current_fitness 107 | individual_fitness_values.append(current_fitness) 108 | self.j+=1 109 | # first element of returned list is the global fitness value 110 | # note: in general this should be the simulation data, which is returned 111 | # due to our data structure, the passing of the fitness values, i.e. result 112 | # of the objective function, is most convenient approach here 113 | # last element of the list counts the number of executed simulations 114 | return [global_fitness_value] + individual_fitness_values + [self.j] 115 | 116 | def evaluation(self): 117 | logging.debug("* evaluation") 118 | for s in self.setups: 119 | for r in s.relations: 120 | r.read_data(wd='.', target='experiment') 121 | 122 | # Return dummy data. 123 | # TODO: reconsider returning proper values 124 | return [1] 125 | 126 | def objectivefunction(self, simulation, evaluation, params): 127 | 128 | # The simulation function does not return simulation data, 129 | # but directly the fitness values, just pass these values. 130 | fitness_value = simulation 131 | 132 | msg = "* From objectivefunction: fitness_value={}" 133 | logging.debug(msg.format(fitness_value)) 134 | 135 | return fitness_value 136 | 137 | 138 | def run_optimisation(params: ParameterSet, 139 | setups: SimulationSetupSet, 140 | opt: OptimiserProperties) -> ParameterSet: 141 | spot = SpotpySetup(params, setups, opt) 142 | # Check if a break file exists for restarting. 143 | break_file_name = Path('{}.break'.format(opt.db_name)) 144 | break_point = 'write' 145 | if break_file_name.is_file(): 146 | break_point = 'readandwrite' 147 | parallel = 'seq' 148 | if opt.mpi: 149 | parallel = 'mpi' 150 | if opt.algorithm == 'sceua': 151 | sampler = spotpy.algorithms.sceua(spot, 152 | dbname=opt.db_name, 153 | dbformat=opt.db_type, 154 | parallel=parallel, 155 | db_precision=np.float64, 156 | breakpoint=break_point, 157 | backup_every_rep=opt.backup_every) 158 | 159 | ngs = opt.ngs 160 | if not ngs: 161 | ngs = len(params) 162 | # Set amount of parameters as default for number of complexes 163 | # if not explicitly specified. 164 | opt.ngs = ngs 165 | results = sampler.sample(opt.repetitions, ngs=ngs, 166 | max_loop_inc=opt.max_loop_inc) 167 | # results = sampler.sample(opt.repetitions, ngs=ngs) 168 | elif opt.algorithm == 'fscabc': 169 | sampler = spotpy.algorithms.fscabc(spot, 170 | dbname=opt.db_name, 171 | dbformat=opt.db_type, 172 | parallel=parallel, 173 | db_precision=np.float64, 174 | breakpoint=break_point, 175 | backup_every_rep=opt.backup_every) 176 | eb = opt.eb 177 | if not eb: 178 | eb = 48 179 | # Set amount of parameters as default for number of complexes 180 | # if not explicitly specified. 181 | opt.eb = eb 182 | results = sampler.sample(opt.repetitions, eb=eb) 183 | elif opt.algorithm == 'abc': 184 | sampler = spotpy.algorithms.abc(spot, 185 | dbname=opt.db_name, 186 | dbformat=opt.db_type, 187 | parallel=parallel, 188 | breakpoint=break_point, 189 | backup_every_rep=opt.backup_every) 190 | eb = opt.eb 191 | if not eb: 192 | eb = 48 193 | # Set amount of parameters as default for number of complexes 194 | # if not explicitly specified. 195 | opt.eb = eb 196 | results = sampler.sample(opt.repetitions, eb=eb) 197 | elif opt.algorithm == 'mc': 198 | sampler = spotpy.algorithms.mc(spot, 199 | dbname=opt.db_name, 200 | dbformat=opt.db_type, 201 | parallel=parallel) 202 | results = sampler.sample(opt.repetitions) 203 | 204 | elif opt.algorithm == 'dream': 205 | sampler = spotpy.algorithms.dream(spot, 206 | dbname=opt.db_name, 207 | dbformat=opt.db_type, 208 | parallel=parallel) 209 | results = sampler.sample(opt.repetitions) 210 | 211 | elif opt.algorithm == 'demcz': 212 | sampler = spotpy.algorithms.demcz(spot, 213 | dbname=opt.db_name, 214 | dbformat=opt.db_type, 215 | alt_objfun=None, 216 | parallel=parallel) 217 | results = sampler.sample(opt.repetitions) 218 | elif opt.algorithm == 'mcmc': 219 | sampler = spotpy.algorithms.mcmc(spot, 220 | dbname=opt.db_name, 221 | dbformat=opt.db_type, 222 | alt_objfun=None, 223 | parallel=parallel) 224 | results = sampler.sample(opt.repetitions) 225 | elif opt.algorithm == 'mle': 226 | sampler = spotpy.algorithms.mle(spot, 227 | dbname=opt.db_name, 228 | dbformat=opt.db_type, 229 | parallel=parallel) 230 | # breakpoint=break_point, 231 | # backup_every_rep=opt.backup_every) 232 | results = sampler.sample(opt.repetitions) 233 | 234 | elif opt.algorithm == 'sa': 235 | sampler = spotpy.algorithms.sa(spot, 236 | dbname=opt.db_name, 237 | dbformat=opt.db_type, 238 | parallel=parallel) 239 | results = sampler.sample(opt.repetitions) 240 | elif opt.algorithm == 'rope': 241 | sampler = spotpy.algorithms.rope(spot, 242 | dbname=opt.db_name, 243 | dbformat=opt.db_type, 244 | parallel=parallel) 245 | results = sampler.sample(opt.repetitions) 246 | 247 | elif opt.algorithm == 'mc': 248 | sampler = spotpy.algorithms.mc(spot, 249 | dbname=opt.db_name, 250 | dbformat=opt.db_type, 251 | parallel=parallel) 252 | results = sampler.sample(opt.repetitions) 253 | 254 | elif opt.algorithm == 'fast': 255 | sampler = spotpy.algorithms.fast( 256 | spot, 257 | dbname=opt.db_name, 258 | dbformat='csv', 259 | parallel=parallel, 260 | breakpoint=break_point, 261 | backup_every_rep=opt.backup_every, 262 | optimization_direction=opt.optimization_direction) 263 | results = sampler.sample(opt.repetitions) 264 | else: 265 | return(print('No valid optimization algorithm selected')) 266 | 267 | if sampler.status.optimization_direction == 'minimize': 268 | pars = sampler.status.params_min 269 | elif sampler.status.optimization_direction == 'maximize': 270 | pars = sampler.status.params_max 271 | elif sampler.status.optimization_direction == 'grid': 272 | pars = sampler.status.params_max 273 | else: 274 | print("* Something went wrong: spotpy _algorithm 'optimization_direction' should be either 'minimize', 'maximize' or 'grid'. ") 275 | print(sampler.status) 276 | print("+++++++++\n") 277 | 278 | for i in range(len(params)): 279 | params[i].value = pars[i] 280 | for s in setups: 281 | s.model_parameter.update(params) 282 | return params 283 | 284 | 285 | def test_spotpy_setup(): 286 | p1 = Parameter("density", "RHO", min_value=1.0, max_value=2.4, 287 | distribution='uniform') 288 | p2 = Parameter("cp", place_holder="CP", min_value=4.0, max_value=7.2, 289 | distribution='uniform') 290 | 291 | ps = ParameterSet() 292 | ps.append(p1) 293 | ps.append(p2) 294 | 295 | spot = SpotpySetup(ps) 296 | 297 | for p in spot.parameter: 298 | print(p.name, p.rndargs) 299 | 300 | 301 | def test_spotpy_run(): 302 | p1 = Parameter("ambient temperature", place_holder="TMPA", min_value=0, 303 | max_value=100, 304 | distribution='uniform', value=0) 305 | 306 | ps = ParameterSet() 307 | ps.append(p1) 308 | 309 | r1 = Relation() 310 | r1.model[0].label_x = "Time" 311 | r1.model[0].label_y = "TEMP" 312 | r1.model[0].file_name = 'TEST_devc.csv' 313 | r1.model[0].header_line = 1 314 | 315 | r1.experiment[0].x = np.linspace(0, 10, 20) 316 | r1.experiment[0].y = np.ones_like(r1.experiment[0].x) * 42.1 317 | 318 | r1.x_def = np.linspace(3.0, 8.5, 3) 319 | relations = [r1] 320 | 321 | s0 = SimulationSetup(name='ambient run', 322 | work_dir='test_spotpy', 323 | model_template=os.path.join('templates', 324 | 'template_basic_03.fds'), 325 | model_executable='fds', 326 | relations=relations, 327 | model_parameter=ps 328 | ) 329 | setups = SimulationSetupSet() 330 | setups.append(s0) 331 | 332 | for s in setups: 333 | if not os.path.exists(s.work_dir): 334 | os.mkdir(s.work_dir) 335 | 336 | run_optimisation(ps, setups) 337 | 338 | 339 | if __name__ == "__main__": 340 | # test_spotpy_setup() 341 | test_spotpy_run() 342 | -------------------------------------------------------------------------------- /propti/propti_post_processing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Nov 30 15:39:13 2016 4 | 5 | @author: thehnen; based on a script from belt 6 | """ 7 | 8 | import re 9 | import os 10 | import sys 11 | import shutil as sh 12 | import logging 13 | import subprocess 14 | 15 | import propti as pr 16 | 17 | import numpy as np 18 | import pandas as pd 19 | import scipy.signal as sign 20 | from scipy.stats import norm 21 | from scipy import stats 22 | import matplotlib as mpl 23 | #mpl.use('pdf') 24 | 25 | import matplotlib.pyplot as plt 26 | 27 | 28 | setups = None # type: pr.SimulationSetupSet 29 | ops = None # type: pr.ParameterSet 30 | optimiser = None # type: pr.OptimiserProperties 31 | 32 | 33 | def run_best_para(setups_bp, ops_bp, optimiser_bp, pickle_object): 34 | """ 35 | 36 | :param setups_bp: 37 | :param ops_bp: 38 | :param optimiser_bp: 39 | :param pickle_object: 40 | :return: 41 | """ 42 | 43 | print(setups_bp, ops_bp, optimiser_bp) 44 | 45 | # Find most recent model input files created by `--create_best_input`. 46 | root_dir = os.path.dirname(os.path.abspath(pickle_object)) 47 | run_best_para_dir = os.path.join(root_dir, 'Analysis', 'Extractor', 48 | 'CurrentBestParameter') 49 | print(run_best_para_dir) 50 | 51 | rep_dir_paths = [f.name for f in os.scandir(run_best_para_dir) if f.is_dir()] 52 | rep_dir_paths.sort() 53 | print(rep_dir_paths) 54 | 55 | current_rep = rep_dir_paths[-1] 56 | current_dir = os.path.join(run_best_para_dir, current_rep) 57 | print(current_dir) 58 | 59 | # Iterate over the sub-directories (simulation setups) und run the 60 | # simulations of the respective model input file. 61 | print("* Run best parameter simulations for {} SimulationSetups.".format( 62 | len(setups_bp))) 63 | print('-----') 64 | for s_id, s in enumerate(setups_bp): 65 | sim_setup_name = s.name 66 | print(" Setup {} ({}).".format(s_id, sim_setup_name)) 67 | 68 | work_dir = os.path.join(current_dir, sim_setup_name) 69 | # print('*', work_dir) 70 | exec_file = s.model_executable 71 | # print('*', exec_file) 72 | in_file = "{}_rep{}.fds".format(sim_setup_name, 73 | current_rep.split('_')[-1]) 74 | # print('*', in_file) 75 | 76 | log_file = open(os.path.join(work_dir, "execution.log"), "w") 77 | 78 | cmd = 'cd {} && {} {}'.format(work_dir, exec_file, in_file) 79 | 80 | print(' Run Best Parameter Simulation...') 81 | subprocess.check_call(cmd, shell=True, stdout=log_file, stderr=log_file) 82 | log_file.close() 83 | print(' Done.') 84 | print('-----') 85 | 86 | print('* Best parameter simulations finished.') 87 | 88 | 89 | def plot_template(exp_data, sim_data, legend_labels=None, 90 | plot_labels=None, file_name='Plot name', 91 | file_type='png', dpi_value=320, fontsize=13, 92 | scaling=0.88, fig_size_x=6.5, fig_size_y=5.5, n_colors=10): 93 | 94 | if plot_labels is None: 95 | print('* Specify plot_labels=[x-label, y-label, title], all as string.') 96 | plot_labels = ['x-label', 'y-label', 'title'] 97 | 98 | if legend_labels is None: 99 | print('* Specify legend_labels as list of strings.') 100 | legend_labels = ['dummy label'] 101 | 102 | # Set font size and font type for plot. 103 | plt.rcParams.update({'font.size': fontsize}) 104 | plt.rcParams.update({'font.family': 'serif'}) 105 | 106 | # Prepare plotting of multiple plots in one diagram. 107 | multi_plot = plt.figure(figsize=(fig_size_x * scaling, 108 | fig_size_y * scaling)) 109 | 110 | # Call the subplots. 111 | ax = multi_plot.add_subplot(111) 112 | 113 | # Set default color map to viridis. 114 | # https://www.youtube.com/watch?v=xAoljeRJ3lU&feature=youtu.be 115 | colormap = plt.get_cmap('viridis') 116 | #ax.set_color_cycle([colormap(k) for k in np.linspace(0, 1, n_colors)]) 117 | #ax.set_prop_cycle('viridis', plt.cm.spectral(np.linspace(0, 1, 30))) 118 | 119 | for i in range(len(exp_data)): 120 | # Create multiple plots 121 | ax.plot(exp_data[i][0], 122 | exp_data[i][1], 123 | linestyle='-.', 124 | color=colormap(i)) 125 | 126 | ax.plot(sim_data[i][0], 127 | sim_data[i][1], 128 | linestyle='-', 129 | color=colormap(i)) 130 | 131 | ax.legend(legend_labels) 132 | 133 | plt.xlabel(plot_labels[0]) 134 | plt.ylabel(plot_labels[1]) 135 | # Create plot title from file name. 136 | plt.title(plot_labels[2]) 137 | plt.grid() 138 | 139 | plt.savefig(file_name + '.' + file_type, 140 | dpi=dpi_value) 141 | plt.close(multi_plot) 142 | print('Plot saved.') 143 | print('') 144 | pass 145 | 146 | 147 | def plot_hist(data_label, data_frame, plot_title, file_name, file_path, 148 | bin_num=100, file_type='png', 149 | y_label=None): 150 | 151 | """ 152 | Creates histogram plots for a given data series. Used to create 153 | histograms for each parameter over the whole inverse modelling process. 154 | 155 | * This is only a test, to figure out if this procedure helps to gain a 156 | * better insight 157 | 158 | :param data_label: label of the parameter (column label for pandas) 159 | :param data_frame: pandas data frame in which to look for the column 160 | :param plot_title: title of the plot 161 | :param file_name: name of the created PDF-file 162 | :param file_path: path to the location where the file shall be written 163 | :param bin_num: number of bins for the histogram, default: 100 164 | :param file_type: file type of the file to be saved, default: 'png' 165 | :param y_label: label for the y-axis, default: data_label 166 | :return: saves histogram plot as PDF-file 167 | """ 168 | 169 | # Prepare data for plot. 170 | x = data_frame[data_label] 171 | 172 | # Plot histogram of data points. 173 | plt.hist(x, bins=bin_num) 174 | 175 | plt.xlabel('Individuals') 176 | if y_label is None: 177 | plt.ylabel(data_label) 178 | else: 179 | plt.ylabel(y_label) 180 | 181 | plt.title(plot_title) 182 | 183 | if file_name is not None: 184 | if file_path is not None: 185 | 186 | new_path = os.path.join(file_path, 187 | file_name + '_histogram.' + file_type) 188 | else: 189 | new_path = os.path.join(file_name + '_histogram.' + file_type) 190 | plt.savefig(new_path) 191 | plt.close() 192 | 193 | 194 | # TODO: decouple the calculation of descriptive statistics 195 | # TODO: create function to call specific calculation methods 196 | ''' 197 | def descriptive_statistics(complete_sample, offset, data_label, n=1, 198 | skip_zero=False): 199 | 200 | # work in progress 201 | 202 | # Determine how much a given parameter changes during an optimisation 203 | # run. For small changes set parameter to a fixed value, which is the 204 | # mean of the sample (descriptive statistics) without the first n 205 | # generations. 206 | # 207 | # It calculates and returns a couple of descriptive statistics: 208 | # mean, standard deviation, skewness, mode, 209 | # 210 | # CompleteSample is supposed to be an array-like (Pandas data frame 211 | # column). 212 | # Offset is supposed to be an integer which will exclude the first n 213 | # individuals from the analysis. It is meant to be a number which 214 | # represents the first n generations, to exclude strong fluctuations 215 | # during the beginning of the optimisation process. 216 | # 217 | 218 | # Prepare data for plot. 219 | sub_set = complete_sample[offset:].tolist() 220 | 221 | # Cut away the burn-in (generation 0). 222 | if skip_zero is True: 223 | complete_sample = complete_sample[GenerationSize:].tolist() 224 | 225 | # Some descriptive statistics on the data set, for the whole data 226 | # set and the subset: 227 | # 228 | # Prepare list for statistic data. 229 | statistic_data = [] 230 | 231 | # Calculate mean (mu) and standard deviation (std) of both, the 232 | # complete sample, as well as the subset. 233 | mu_complete, std_complete = norm.fit(complete_sample) 234 | mu_sub_set, std_sub_set = norm.fit(sub_set) 235 | # fit = norm.pdf(sub_set, mu_sub_set, std_sub_set) 236 | statistic_data.append(mu_complete) 237 | statistic_data.append(std_complete) 238 | statistic_data.append(mu_sub_set) 239 | statistic_data.append(std_sub_set) 240 | 241 | # Calculate skewness. 242 | skew_complete = stats.skew(complete_sample, axis=0, bias=True) 243 | skew_sub_set = stats.skew(sub_set, axis=0, bias=True) 244 | statistic_data.append(skew_complete) 245 | statistic_data.append(skew_sub_set) 246 | 247 | # Calculate kurtosis. 248 | kurt_complete = stats.kurtosis(complete_sample, axis=0, fisher=True, 249 | bias=True) 250 | kurt_sub_set = stats.kurtosis(sub_set, axis=0, fisher=True, bias=True) 251 | statistic_data.append(kurt_complete) 252 | statistic_data.append(kurt_sub_set) 253 | 254 | # Calculate mode. 255 | mode_complete = stats.mode(complete_sample, axis=0) 256 | mode_sub_set = stats.mode(sub_set, axis=0) 257 | statistic_data.append(mode_complete) 258 | statistic_data.append(mode_sub_set) 259 | 260 | print("Parameter: ", data_label) 261 | 262 | # Calculate range of n standard deviations around the mean value of 263 | # the subset. 264 | high = mu_sub_set + n * std_sub_set 265 | low = mu_sub_set - n * std_sub_set 266 | print("High subset: ", high) 267 | print("Mean complete: ", mu_complete) 268 | print("Low subset: ", low) 269 | print("Mean complete: ", statistic_data[0]) 270 | print("Mean subset: ", statistic_data[1]) 271 | print("Std deviation complete: ", statistic_data[2]) 272 | print("Std deviation subset: ", statistic_data[3]) 273 | print("Skewness complete: ", statistic_data[4]) 274 | print("Skewness subset: ", statistic_data[5]) 275 | print("Kurtosis complete: ", statistic_data[6]) 276 | print("Kurtosis subset: ", statistic_data[7]) 277 | print("Mode complete: ", statistic_data[8][0]) 278 | print("Mode subset: ", statistic_data[9][0]) 279 | 280 | return reduced, rejected, rejectedParaName, \ 281 | rejectedParaValue, reducedParaName, reducedParaValue, \ 282 | statistic_data 283 | 284 | 285 | def calc_mode(): 286 | # Calculate mode. 287 | mode_complete = stats.mode(complete_sample, axis=0) 288 | mode_sub_set = stats.mode(sub_set, axis=0) 289 | statistic_data.append(mode_complete) 290 | statistic_data.append(mode_sub_set) 291 | pass 292 | 293 | ''' 294 | 295 | 296 | def calc_pearson_coefficient(data_series): 297 | corr_mat = np.corrcoef(data_series) 298 | 299 | print('') 300 | print('----') 301 | print('Pearson coefficient matrix:') 302 | print(corr_mat) 303 | print('----') 304 | print('') 305 | 306 | return corr_mat 307 | 308 | 309 | def collect_best_para_multi(data_file, label_list, distance=0.8e-4): 310 | """ 311 | 312 | :param data_file: Assumed to be a CSV-file and containing the data base 313 | information provided by SPOTPY 314 | :param label_list: List of labels (string) which are indexing the columns 315 | of the shape: 316 | [fitness values, parameter_1, parameter_2, ..., parameter_n] 317 | :param distance: Half of the range in which to look for parameter sets 318 | around the best fitness value 319 | :return: para_collection: Pandas DataFrame with the collected parameter 320 | sets and their respective fitness values 321 | """ 322 | 323 | # Read Pandas DataFrame and convert the content of one column into 324 | # a numpy array. 325 | fit_vals_raw = pd.read_csv(data_file, usecols=label_list) 326 | fit_vals = fit_vals_raw[label_list[0]].values 327 | 328 | # Find min value in the array. 329 | fit_min = min(fit_vals) 330 | 331 | # Calculate the range in which to collect the samples. 332 | upper = fit_min + distance 333 | lower = fit_min - distance 334 | 335 | # Collect indices and values. 336 | multi_fit = [] 337 | row_indices = [] 338 | for num_i in range(len(fit_vals)): 339 | new_element = [] 340 | if lower <= fit_vals[num_i] <= upper: 341 | new_element.append(num_i) 342 | row_indices.append(num_i) 343 | new_element.append(fit_vals[num_i]) 344 | multi_fit.append(new_element) 345 | 346 | print('-------------') 347 | print('Range around the best fitness value') 348 | print('----') 349 | print('Best fitness: {}'.format(fit_min)) 350 | print('Distance: {}'.format(distance)) 351 | print('Upper bound: {}'.format(upper)) 352 | print('Lower bound: {}'.format(lower)) 353 | print('') 354 | 355 | for i in range(len(multi_fit)): 356 | print(' ', fit_vals_raw.loc[multi_fit[i][0], 'like1']) 357 | print(multi_fit[i]) 358 | print('') 359 | print('-------------') 360 | 361 | # Create a Pandas DataFrame with the samples which are 362 | # within the range. 363 | para_collection = fit_vals_raw.loc[row_indices, label_list] 364 | 365 | return para_collection 366 | 367 | 368 | def plot_best_sim_exp(setup_plot, pickle_object): 369 | 370 | root_dir = os.path.dirname(os.path.abspath(pickle_object)) 371 | cdir = os.path.join(root_dir, setup_plot.best_dir) 372 | 373 | # Check if best parameter simulation directories exist 374 | if not os.path.exists(cdir): 375 | print('* No directory of best parameter simulation found.') 376 | print('* Hint: Use run_best_para method for that simulation.') 377 | return 378 | 379 | # Show relation information. 380 | for r in setup_plot.relations: 381 | print(r) 382 | 383 | # Determine amount of relations to give every plot its own color 384 | # without duplicates. 385 | lr = len(setup_plot.relations) 386 | 387 | # Extract data from simulation and experiment to be plotted. 388 | model_data = [] 389 | experimental_data = [] 390 | for r in setup_plot.relations: 391 | 392 | mod_file = os.path.join(cdir, r.model.file_name) 393 | model_data_raw = pd.read_csv(mod_file, 394 | header=r.model.header_line, 395 | usecols=[r.model.label_x, 396 | r.model.label_y]) 397 | 398 | experimental_data_raw = pd.read_csv(r.experiment.file_name, 399 | header=r.experiment.header_line, 400 | usecols=[r.experiment.label_x, 401 | r.experiment.label_y]) 402 | 403 | md_interm = [(model_data_raw[r.model.label_x]*r.model.xfactor+r.model.xoffset).tolist(), 404 | (model_data_raw[r.model.label_y]*r.model.yfactor+r.model.yoffset).tolist()] 405 | 406 | ed_interm = [(experimental_data_raw[r.experiment.label_x]*r.experiment.xfactor+r.experiment.xoffset).tolist(), 407 | (experimental_data_raw[r.experiment.label_y]*r.experiment.yfactor+r.experiment.yoffset).tolist()] 408 | model_data.append(md_interm) 409 | experimental_data.append(ed_interm) 410 | 411 | leg_lab = ['experiment', 'simulation'] 412 | plot_template(experimental_data, 413 | model_data, 414 | legend_labels=leg_lab, 415 | n_colors=lr) 416 | 417 | 418 | # Create lists of column headers and labels. 419 | def create_column_headers(col_infos): 420 | 421 | new_column_data = list() 422 | 423 | for col_info in col_infos: 424 | if col_info is None: 425 | new_column_data.append("None") 426 | else: 427 | new_column_data.append(col_info) 428 | 429 | return new_column_data 430 | 431 | 432 | # Create a database used for subsequent analysis procedures. 433 | def create_base_analysis_db(db_file_name, 434 | new_file_name, 435 | output_dir, 436 | parameter_headers, 437 | fitness_headers, 438 | progress_headers, 439 | new_file_type="csv"): 440 | 441 | # Create lists of column headers to read the data from the `propti_db`. 442 | db_headers = list() # from original database 443 | new_headers = list() # for new database 444 | labels = list() # human-readable labels 445 | parameter_units = list() # unit information for parameter 446 | 447 | 448 | # Process the fitness information. 449 | for fit_id, fitness_header in enumerate(fitness_headers): 450 | # Collect headers of original database file. 451 | db_headers.append(fitness_header) 452 | # Create new fitness headers. 453 | new_headers.append("Fit_{:03d}".format(fit_id)) 454 | # 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | # Add the different iteration labels. 466 | # TODO: progress/iteration info into pickle file - e.g. generations and 467 | # complexes for sce. 468 | iteration_ids = ["chain"] 469 | 470 | #create_column_headers() 471 | 472 | 473 | 474 | 475 | # for iteration_id in iteration_ids: 476 | # new_headers.append(iteration_id) 477 | # 478 | # # Add different fitness labels. 479 | # # TODO: fitness info into pickle file. 480 | # fitness_labels = ["Total_fitness"] 481 | # 482 | # for fitness_label in fitness_labels: 483 | # new_headers.append(fitness_label) 484 | # 485 | # # Add different parameter labels. 486 | # for parameter in ops: 487 | # par_label = "par{}".format(parameter.place_holder) 488 | # new_headers.append(par_label) 489 | # par_label = "{}".format(parameter.name) 490 | # labels.append(par_label) 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | # # TODO: Adjust pickle to contain information about different fitness values. 503 | # cols = ['like1'] 504 | # 505 | # 506 | # 507 | # # Create new headers. 508 | # pars = list() 509 | # for parameter in ops: 510 | # par_label = "par{}".format(parameter.place_holder) 511 | # cols.append(par_label) 512 | # pars.append(par_label) 513 | 514 | # Read data for the plot. 515 | db_content = pd.read_csv(db_file_name, usecols=cols) 516 | 517 | # Create file name to save the new database file. 518 | new_db_name = new_file_name + "." + new_file_type 519 | output = os.path.join(output_dir, new_db_name) 520 | 521 | return 522 | -------------------------------------------------------------------------------- /propti/fitness_methods.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | import numpy as np 4 | 5 | from mpi4py import MPI 6 | 7 | 8 | class FitnessMethodInterface: 9 | 10 | def __init__(self, scale_fitness=True): 11 | self.scale_fitness = scale_fitness 12 | 13 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 14 | print("using undefined function") 15 | 16 | 17 | class FitnessMethodRMSE(FitnessMethodInterface): 18 | 19 | def __init__(self, n_points=None, x_def_range=None, scale_fitness=True, 20 | check_model_length=True, penalty=3.5, difference=0.05): 21 | #TODO set default of check_model_length to False 22 | """ 23 | Constructor, setting up basic parameters. 24 | 25 | :param n_points: number of evenly spaced data points, endpoint included 26 | :param x_def_range: 27 | :param scale_fitness: default=True 28 | :param check_model_length: flag to control if a check for premature 29 | model termination is to be conducted. 30 | :param penalty: pre-defined RMSE value that is high enough to nudge 31 | the optimiser away from this parameter set. 32 | :param difference: percentage that is used to calculate the lower limit. 33 | """ 34 | 35 | self.n_points = n_points 36 | self.x_def = None 37 | self.x_def_range = x_def_range 38 | self.scale_fitness = scale_fitness 39 | self.check_model_length = check_model_length 40 | self.penalty = penalty 41 | self.difference = difference 42 | 43 | FitnessMethodInterface.__init__(self, scale_fitness=scale_fitness) 44 | 45 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 46 | """ 47 | Calculates the root mean squared error between two data series. 48 | 49 | This method calculates the root mean squared error (RMSE) between 50 | two data series. It can also scale the RMSE value based on different 51 | aspects of the experiment data. 52 | Furthermore, it can check if the end of the experiment and model 53 | x-values are close together in an effort to find data series as a 54 | result of premature model termination, e.g. numerical instabilites 55 | with FDS. This step is necessary in some cases, where the simulation 56 | crashes but still some data points are written. Primarily, because 57 | this method scales the x-range to map both data series to it, 58 | which allows proper comparison in the RMSE step, when no explicit 59 | x-range or x-def were provided. The RMSE values are then manually 60 | set to a high enough value (penalty) to nudge the optimiser away 61 | from this parameter set. 62 | 63 | :param x_e: x-values of the experiment data 64 | :param y_e: y-values of the experiment data 65 | :param y2_e: y-values of the experiment data 66 | :param x_m: x-values of the model data 67 | :param y_m: y-values of the model data 68 | :return: root mean squared error, possibly scaled 69 | """ 70 | 71 | msg = "* Compute FitnessMethodRMSE." 72 | logging.debug(msg) 73 | 74 | # Check for premature model termination. 75 | if self.check_model_length is True: 76 | # Determine max x-value. 77 | x_e_max = np.max(x_e) # x_e[-1] 78 | x_m_max = np.max(x_m) 79 | # Calculate lower limit. 80 | epsilon = self.difference * x_e_max 81 | threshold = x_e_max - epsilon 82 | 83 | # Check if the model x-values are below the lower limit. 84 | if x_m_max < threshold: 85 | # Award a penalty. 86 | rmse = self.penalty 87 | return rmse 88 | 89 | # Determine the length of the 90 | if self.x_def is None: 91 | if self.x_def_range is None: 92 | # Find minimum and maximum values that are in both data series. 93 | x_min = np.max([np.min(x_e), np.min(x_m)]) 94 | x_max = np.min([np.max(x_e), np.max(x_m)]) 95 | self.x_def_range = [x_min, x_max] 96 | 97 | # Create an equidistant interval over n_points, including the 98 | # endpoint. 99 | self.x_def = np.linspace(self.x_def_range[0], 100 | self.x_def_range[1], 101 | self.n_points, 102 | endpoint=True) 103 | 104 | # Map both y data series on the same x-values, to allow for a 105 | # meaningful comparison. 106 | y_e_mapped = np.interp(self.x_def, x_e, y_e) 107 | y_m_mapped = np.interp(self.x_def, x_m, y_m) 108 | 109 | # Calculate the root mean squared error (RMSE). 110 | rmse = np.sqrt(((y_e_mapped - y_m_mapped) ** 2).mean()) 111 | 112 | # Scale the RMSE value. 113 | if self.scale_fitness == 'mean' or self.scale_fitness is True: 114 | # Return the RMSE scaled by the mean value of the data series. 115 | return rmse / np.abs(np.mean(y_e_mapped)) 116 | elif self.scale_fitness == 'minmax': 117 | return rmse / np.abs(np.max(y_e_mapped) - np.min(y_e_mapped)) 118 | elif self.scale_fitness == 'interquartile': 119 | # Return the RMSE as is. 120 | return rmse 121 | else: 122 | # Return the RMSE as is. 123 | return rmse 124 | 125 | 126 | class FitnessMethodRangeRMSE(FitnessMethodInterface): 127 | 128 | def __init__(self, n_points=None, x_def_range=None, y_relative_range=None, 129 | scale_fitness=True): 130 | self.n = n_points 131 | self.x_def = None 132 | self.x_def_range = x_def_range 133 | self.scale_fitness = scale_fitness 134 | if y_relative_range is None: 135 | self.y_relative_range = 0.05 136 | else: 137 | self.y_relative_range = abs(y_relative_range) 138 | FitnessMethodInterface.__init__(self, scale_fitness=scale_fitness) 139 | 140 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 141 | """ 142 | 143 | compute x array on which the data sets shall be mapped to in order 144 | to compute the RMSE on the same definition range 145 | 146 | :param x_e: 147 | :param y_e: 148 | :param y2_e: 149 | :param x_m: 150 | :param y_m: 151 | :return: 152 | """ 153 | 154 | msg = "* Compute FitnessMethodRangeRMSE." 155 | logging.debug(msg) 156 | 157 | if self.x_def is None: 158 | if self.x_def_range is None: 159 | x_min = np.max([np.min(x_e), np.min(x_m)]) 160 | x_max = np.min([np.max(x_e), np.max(x_m)]) 161 | self.x_def_range = [x_min, x_max] 162 | self.x_def = np.linspace(self.x_def_range[0], 163 | self.x_def_range[1], 164 | self.n, endpoint=True) 165 | 166 | y_e_mapped = np.interp(self.x_def, x_e, y_e) 167 | y_m_mapped = np.interp(self.x_def, x_m, y_m) 168 | y_rmse = np.zeros(y_e_mapped.shape) 169 | for i, value in enumerate(y_e_mapped): 170 | if (y_e_mapped[i]*(1-self.y_relative_range)) <= y_m_mapped[i] <=\ 171 | (y_e_mapped[i]*(1+self.y_relative_range)): 172 | y_rmse[i] = 0 173 | else: 174 | y_rmse[i] = (y_e_mapped[i] - y_m_mapped[i]) ** 2 175 | rmse = np.sqrt(y_rmse.mean()) 176 | if self.scale_fitness == 'mean' or self.scale_fitness is True: 177 | return rmse / np.abs(np.mean(y_e_mapped)) 178 | elif self.scale_fitness == 'minmax': 179 | return rmse / np.abs(np.max(y_e_mapped) - np.min(y_e_mapped)) 180 | elif self.scale_fitness == 'interquartile': 181 | return rmse 182 | else: 183 | return rmse 184 | 185 | 186 | class FitnessMethodBandRMSE(FitnessMethodInterface): 187 | 188 | def __init__(self, n_points=None, x_def_range=None, scale_fitness=True): 189 | self.n = n_points 190 | self.x_def = None 191 | self.x_def_range = x_def_range 192 | self.scale_fitness = scale_fitness 193 | FitnessMethodInterface.__init__(self, scale_fitness=scale_fitness) 194 | 195 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 196 | """ 197 | 198 | compute x array on which the data sets shall be mapped to in order to 199 | compute the RMSE on the same definition range 200 | 201 | :param x_e: 202 | :param y_e: 203 | :param y2_e: 204 | :param x_m: 205 | :param y_m: 206 | :return: 207 | """ 208 | 209 | msg = "* Compute FitnessMethodBandRMSE." 210 | logging.debug(msg) 211 | 212 | if self.x_def is None: 213 | if self.x_def_range is None: 214 | x_min = np.max([np.min(x_e), np.min(x_m)]) 215 | x_max = np.min([np.max(x_e), np.max(x_m)]) 216 | self.x_def_range = [x_min, x_max] 217 | self.x_def = np.linspace(self.x_def_range[0], 218 | self.x_def_range[1], 219 | self.n, endpoint=True) 220 | 221 | y_e_mapped = np.interp(self.x_def, x_e, y_e) 222 | y_e_mapped_b2 = np.interp(self.x_def, x_e, y2_e) 223 | y_m_mapped = np.interp(self.x_def, x_m, y_m) 224 | y_rmse = np.zeros(y_e_mapped.shape) 225 | for i, value in enumerate(y_e_mapped): 226 | if np.min((y_e_mapped[i], y_e_mapped_b2[i])) <= y_m_mapped[i] <= \ 227 | np.max((y_e_mapped[i], y_e_mapped_b2[i])): 228 | y_rmse[i] = 0 229 | else: 230 | y_rmse[i] = np.min((((y_e_mapped[i] - y_m_mapped[i]) ** 2), 231 | ((y_e_mapped_b2[i] - y_m_mapped[i]) ** 2))) 232 | rmse = np.sqrt(y_rmse.mean()) 233 | if self.scale_fitness == 'mean' or self.scale_fitness is True: 234 | return rmse / np.abs(np.mean(y_e_mapped)) 235 | elif self.scale_fitness == 'minmax': 236 | return rmse / np.abs(np.max(y_e_mapped) - np.min(y_e_mapped)) 237 | elif self.scale_fitness == 'interquartile': 238 | return rmse 239 | else: 240 | return rmse 241 | 242 | 243 | class FitnessMethodThreshold(FitnessMethodInterface): 244 | 245 | def __init__(self, threshold_type, threshold_target_value=None, 246 | threshold_value=None, threshold_range=None, 247 | scale_fitness=True): 248 | 249 | super().__init__(scale_fitness) 250 | 251 | threshold_types = ['upper', 'lower', 'range_minmax'] 252 | if threshold_type not in threshold_types: 253 | print("wrong threshold type, available types are:", threshold_types) 254 | # TODO handle this? 255 | self.type = threshold_type 256 | self.threshold_target_value = threshold_target_value 257 | self.value = threshold_value 258 | self.range = threshold_range 259 | self.scale_fitness = scale_fitness 260 | 261 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 262 | """ 263 | 264 | :param x_e: 265 | :param y_e: 266 | :param y2_e: 267 | :param x_m: 268 | :param y_m: 269 | :return: 270 | """ 271 | 272 | msg = "* Compute FitnessMethodThreshold." 273 | logging.debug(msg) 274 | 275 | x_e_threshold = None 276 | x_m_threshold = None 277 | if self.type == "upper" or self.type == "lower": 278 | # only needed for experimental data if no target value was specified 279 | if self.threshold_target_value is None: 280 | x_e_threshold = self.simple_threshold(self.type, 281 | self.value, 282 | x_e, 283 | y_e) 284 | else: 285 | x_e_threshold = self.threshold_target_value 286 | x_m_threshold = self.simple_threshold(self.type, 287 | self.value, 288 | x_m, 289 | y_m) 290 | 291 | if self.type == "range_minmax": 292 | # only needed for experimental data if no target value was specified 293 | if self.threshold_target_value is None: 294 | x_e_threshold_lower = self.simple_threshold("lower", 295 | self.range[0], 296 | x_e, y_e) 297 | x_e_threshold_upper = self.simple_threshold("upper", 298 | self.range[1], 299 | x_e, y_e) 300 | x_m_threshold_lower = self.simple_threshold("lower", 301 | self.range[0], 302 | x_m, y_m) 303 | x_m_threshold_upper = self.simple_threshold("upper", 304 | self.range[1], 305 | x_m, y_m) 306 | 307 | # check if target value was explicitly passed 308 | if self.threshold_target_value is not None: 309 | x_e_threshold = self.threshold_target_value 310 | else: 311 | # result is the smallest value in x when the range was left 312 | if x_e_threshold_lower is not None and \ 313 | x_e_threshold_upper is not None: 314 | x_e_threshold = np.min(x_e_threshold_lower, 315 | x_e_threshold_upper) 316 | if x_m_threshold_lower is not None and \ 317 | x_m_threshold_upper is not None: 318 | x_m_threshold = np.min(x_m_threshold_lower, x_m_threshold_upper) 319 | 320 | # check if the experimental data returns a valid threshold evaluation 321 | if x_e_threshold is None: 322 | print("ERROR: rethink your fitness method choice") 323 | logging.error("rethink your fitness method choice") 324 | sys.exit(1) 325 | 326 | # if the model data never reaches the threshold, 327 | # return maximal deviation w.r.t. the experimental value, 328 | # i.e. maximal model x-value minus experimental threshold position 329 | if x_m_threshold is None: 330 | x_m_max_distance = np.abs(np.max(x_m) - x_e_threshold) 331 | if self.scale_fitness: 332 | return np.abs(x_m_max_distance / x_e_threshold) 333 | else: 334 | return x_m_max_distance 335 | 336 | if self.scale_fitness: 337 | return np.abs((x_e_threshold - x_m_threshold) / x_e_threshold) 338 | 339 | return np.abs(x_e_threshold - x_m_threshold) 340 | 341 | def simple_threshold(self, t, v, x, y): 342 | """ 343 | 344 | :param t: 345 | :param v: 346 | :param x: 347 | :param y: 348 | :return: 349 | """ 350 | indices = None 351 | if t == "upper": 352 | indices = np.where(y > v) 353 | if t == "lower": 354 | indices = np.where(y < v) 355 | 356 | if len(indices[0]) > 0: 357 | result_index = indices[0][0] 358 | result_x = x[result_index] 359 | else: 360 | print("threshold was not reached") 361 | result_x = None 362 | 363 | return result_x 364 | 365 | 366 | class FitnessMethodIntegrate(FitnessMethodInterface): 367 | """ 368 | Integrate a data series and determine the distance to a target value. 369 | For instance to get the heat of combustion from MCC data. 370 | """ 371 | 372 | def __init__(self, n_points, x_def_range=None, scale_fitness=True, 373 | integrate_factor=1.0): 374 | """ 375 | Constructor. 376 | :param n_points: number of data points on which to interpolate the 377 | data series. 378 | :param x_def_range: 379 | :param scale_fitness: 380 | :param integrate_factor: multiply the integration result, default: 1.0 381 | """ 382 | 383 | msg = "* From FitnessMethodIntegrate.__init__" 384 | logging.debug(msg) 385 | 386 | self.n = n_points 387 | self.x_def = None 388 | self.x_def_range = x_def_range 389 | self.scale_fitness = scale_fitness 390 | self.integrate_factor = integrate_factor 391 | FitnessMethodInterface.__init__(self, scale_fitness=scale_fitness) 392 | 393 | # TODO: implement parameter check in propti_prepare 394 | if self.n is None: 395 | msg = "* Note: 'n_points' is None, please choose a number!" 396 | logging.error(msg) 397 | # Is supposed to stop the whole MPI job, i.e. communicate 398 | # "upwards" to the main process that it shuts down. 399 | comm = MPI.COMM_WORLD 400 | comm.abort() 401 | # sys.exit() 402 | 403 | def compute(self, x_e, y_e, y2_e, x_m, y_m): 404 | """ 405 | Compute x array on which the data sets shall be mapped to, 406 | in order to compute the RMSE on the same definition range. 407 | """ 408 | 409 | msg = "* From FitnessMethodIntegrate.compute" 410 | logging.debug(msg) 411 | 412 | if self.x_def is None: 413 | msg = "* Note: 'x_def' is None." 414 | logging.debug(msg) 415 | if self.x_def_range is None: 416 | msg = "* Note: 'x_def_range' is None." 417 | logging.debug(msg) 418 | x_min = np.max([np.min(x_e), np.min(x_m)]) 419 | x_max = np.min([np.max(x_e), np.max(x_m)]) 420 | self.x_def_range = [x_min, x_max] 421 | msg = "* From 'compute': 'x_def_range' is now: {}." 422 | logging.debug(msg.format(self.x_def_range)) 423 | 424 | self.x_def = np.linspace(self.x_def_range[0], 425 | self.x_def_range[1], 426 | self.n, 427 | endpoint=True) 428 | msg = "* From 'compute': 'x_def' is now: {}." 429 | logging.debug(msg.format(self.x_def)) 430 | 431 | msg = "* Mapping data..." 432 | logging.debug(msg) 433 | 434 | # Map data series to the same definition range. 435 | y_e_mapped = np.interp(self.x_def, x_e, y_e) 436 | y_m_mapped = np.interp(self.x_def, x_m, y_m) 437 | msg = "* FitnessMethodIntegrate.compute: y_e_mapped={}, y_m_mapped={}" 438 | logging.debug(msg.format(y_e_mapped, y_m_mapped)) 439 | 440 | # Integrate experiment and model data series. 441 | value_e = np.trapz(y_e_mapped, self.x_def) * self.integrate_factor 442 | value_m = np.trapz(y_m_mapped, self.x_def) * self.integrate_factor 443 | msg = "* FitnessMethodIntegrate.compute: value_e={}, value_m={}" 444 | logging.debug(msg.format(value_e, value_m)) 445 | 446 | # Compare experiment and model data. 447 | rmse = np.abs(value_e - value_m) 448 | 449 | msg = "* FitnessMethodIntegrate.compute: value_e={}, value_m={}, rmse={}" 450 | logging.debug(msg.format(value_e, value_m, rmse)) 451 | 452 | # Scale the fitness value, if required. 453 | # TODO: Find better way for scaling 454 | if self.scale_fitness is True: 455 | # return rmse / np.abs(np.mean(y_e_mapped)) 456 | return rmse / value_e 457 | # elif self.scale_fitness == 'minmax': 458 | # return rmse / np.abs(y_e_mapped[-1] - y_e_mapped[0]) 459 | # elif self.scale_fitness == 'interquartile': 460 | # return rmse 461 | else: 462 | return rmse 463 | -------------------------------------------------------------------------------- /resources/jureca/propti.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | propti 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | spotpy 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | FDS 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | returnsoptimi-zationresults 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | returns results of a single run 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | setup file 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | generates FDS Input based on chosenoptimization strategyuntil convergence 141 | 142 | 143 | 144 | 145 | 146 | 147 | simulation data 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | optimization data 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | setup fileFDS templateanalysis routinesmultiprocessorhandling 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | simulation of best parameter set(s)analysis of optimization processplot of gathered data 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | --------------------------------------------------------------------------------