├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .pyup.yml ├── .travis.yml ├── LICENSE ├── README.md ├── RELEASE.md ├── demo ├── distributions.ipynb └── time_equivalence_vis.ipynb ├── profiler └── mcs0.py ├── requirements.txt ├── setup.py ├── sfeprapy ├── __init__.py ├── cli │ ├── __init__.py │ └── __main__.py ├── dists.py ├── func │ ├── __init__.py │ ├── csv.py │ ├── erf.py │ └── xlsx.py ├── input_parser.py ├── mcs │ └── __init__.py ├── mcs0 │ ├── __init__.py │ ├── calcs.py │ ├── inputs.py │ └── run.py ├── mcs1 │ ├── __init__.py │ ├── calcs.py │ └── inputs.py ├── mcs2 │ ├── __init__.py │ ├── calc.py │ └── inputs.py └── project_info.py └── test ├── __init__.py ├── car_park.py ├── test_mcs.py ├── test_mcs0.py ├── test_mcs1.py ├── test_mcs2.py └── test_misc.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Supply the problem definition file 16 | 2. OS spec and SfePrapy version 17 | 3. Error code 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **Additional context** 26 | Add any other context about the problem here. 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled python modules. 2 | *.pyc 3 | 4 | # Setuptools distribution folder and automatically generated files 5 | /dist/ 6 | /MANIFEST 7 | /build 8 | 9 | # Python egg metadata, regenerated from source files by setuptools. 10 | /*.egg-info 11 | 12 | # Python virtual environment 13 | /venv 14 | 15 | # Pycharm development files 16 | *.idea 17 | .idea* 18 | 19 | # Visual Studio Code development files 20 | /.vscode 21 | .DS_Store 22 | 23 | # Environment files 24 | *.env 25 | 26 | # Testing files 27 | /benchmarks 28 | 29 | # Python unittest files 30 | /.pytest_cache 31 | 32 | # Jupyter notebook temp files 33 | /.ipynb_checkpoints 34 | 35 | # Input templates, delete all test outputs 36 | /input_template/* 37 | !/input_template/config.json 38 | !/input_template/mc0* 39 | !/input_template/mc1* 40 | !/input_template/mcr1* 41 | !/input_template/benchmarks.csv 42 | 43 | # Dev files 44 | /sfeprapy/dev 45 | /sfeprapy_demo 46 | /.coverage 47 | /demo/media 48 | /demos/*.png 49 | /demo/fig 50 | /demo/mfput.log 51 | /demo/.ipynb_checkpoints -------------------------------------------------------------------------------- /.pyup.yml: -------------------------------------------------------------------------------- 1 | # configure updates globally 2 | # default: all 3 | # allowed: all, insecure, False 4 | update: all 5 | 6 | # configure dependency pinning globally 7 | # default: True 8 | # allowed: True, False 9 | pin: True 10 | 11 | # set the default branch 12 | # default: empty, the default branch on GitHub 13 | branch: dev 14 | 15 | # update schedule 16 | # default: empty 17 | # allowed: "every day", "every week", .. 18 | schedule: "every day" 19 | 20 | # search for requirement files 21 | # default: True 22 | # allowed: True, False 23 | search: True 24 | 25 | # Specify requirement files by hand, default is empty 26 | # default: empty 27 | # allowed: list 28 | requirements: empty 29 | 30 | # add a label to pull requests, default is not set 31 | # requires private repo permissions, even on public repos 32 | # default: empty 33 | label_prs: update 34 | 35 | # assign users to pull requests, default is not set 36 | # requires private repo permissions, even on public repos 37 | # default: empty 38 | assignees: ianf 39 | 40 | # configure the branch prefix the bot is using 41 | # default: pyup- 42 | branch_prefix: pyup/ 43 | 44 | # set a global prefix for PRs 45 | # default: empty 46 | pr_prefix: empty 47 | 48 | # allow to close stale PRs 49 | # default: True 50 | close_prs: True -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.7" 4 | - "3.8" 5 | - "3.9" 6 | install: 7 | - pip install -r requirements.txt 8 | - pip install --upgrade "git+https://github.com/fsepy/fsetools.git@dev" 9 | - pip install . 10 | - pip install codecov 11 | - pip install pytest-cov 12 | script: 13 | - pytest --cov-report term --cov=sfeprapy/ 14 | after_success: 15 | - codecov 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 PYSFE 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SFEPRAPY 2 | [![GitHub version](https://badge.fury.io/gh/fsepy%2Fsfeprapy.svg)](https://github.com/fsepy/SfePrapy) 3 | [![Build Status](https://img.shields.io/travis/fsepy/SfePrapy.svg?branch=master&label=build%20(master)&style=flat)](https://travis-ci.org/fsepy/SfePrapy) 4 | [![Build Status](https://img.shields.io/travis/fsepy/SfePrapy.svg?branch=dev&label=build%20(dev)&style=flat)](https://travis-ci.org/fsepy/SfePrapy) 5 | [![codecov](https://codecov.io/gh/fsepy/SfePrapy/branch/dev/graph/badge.svg)](https://codecov.io/gh/fsepy/SfePrapy) 6 | 7 | Structural fire engineering (Sfe) probabilistic reliability assessment (Pra) Python (py) is a probabilistic analysis tool. It calculates equivalent of time exposure to ISO 834 standard fire and this can be used to assess the appropriate fire resistance rating for structural elements using reliability based methods. 8 | 9 | `sfeprapy` is under continuous development and actively used in research and real engineering design problems. 10 | 11 | Legacy wiki can be found [here](https://github.com/fsepy/SfePrapy/wiki). 12 | 13 | Documentation (WIP) can be found [here](https://sfeprapy-doc.readthedocs.io/en/latest/index.html) 14 | 15 | A publication summarising the capabilities can be found [here](https://www.researchgate.net/publication/333202825_APPLICATION_OF_PYTHON_PROGRAMMING_LANGUAGE_IN_STRUCTURAL_FIRE_ENGINEERING_-_MONTE_CARLO_SIMULATION). 16 | 17 | ## Getting Started 18 | 19 | ### Installation 20 | 21 | Chose one of the following installation path. 22 | 23 | [Python](https://www.python.org/downloads/) 3.7 or later is required. [Anaconda Distribution](https://www.anaconda.com/distribution/#download-section) is recommended for new starters, it includes Python and few useful packages including a package management tool pip (see below). 24 | 25 | [pip](https://pypi.org/) is a package management system for installing and updating Python packages. pip comes with Python, so you get pip simply by installing Python. On Ubuntu and Fedora Linux, you can simply use your system package manager to install the `python3-pip` package. [The Hitchhiker's Guide to Python](https://docs.python-guide.org/starting/installation/) provides some guidance on how to install Python on your system if it isn't already; you can also install Python directly from [python.org](https://www.python.org/getit/). You might want to [upgrade pip](https://pip.pypa.io/en/stable/installing/) before using it to install other programs. 26 | 27 | 1. to use `pip` install from PyPI: 28 | 29 | [![Downloads](https://pepy.tech/badge/sfeprapy)](https://pepy.tech/project/sfeprapy) 30 | 31 | ```sh 32 | pip install --upgrade sfeprapy 33 | ``` 34 | 35 | 2. to use `pip` install from GitHub (requires [git](https://git-scm.com/downloads)): 36 | 37 | *Note installing `SfePrapy` via this route will include the lastest commits/changes to the library.* 38 | 39 | ```sh 40 | pip install --upgrade "git+https://github.com/fsepy/SfePrapy.git@master" 41 | ``` 42 | 43 | 44 | ### Command line interface 45 | 46 | `sfeprapy` command line interface (CLI) uses the current working directory to obtain and/or save files. 47 | 48 | #### To get help 49 | 50 | ```sh 51 | sfeprapy -h 52 | ``` 53 | 54 | #### To produce a `sfeprapy.mcs0` example input file 55 | 56 | ```sh 57 | sfeprapy mcs0 template example_input.csv 58 | ``` 59 | 60 | #### To run `sfeprapy.mcs0` simulation 61 | 62 | ```sh 63 | sfeprapy mcs0 -p 4 example_input.csv 64 | ``` 65 | 66 | `sfeprapy.mcs0` uses the [multiprocessing](https://docs.python.org/3.4/library/multiprocessing.html#module-multiprocessing) library to utilise full potential performance of multi-core CPUs. The `-p 4` defines 4 threads will be used in running the simulation, 1 is the default value. 67 | 68 | ## License 69 | 70 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details 71 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release 2 | 3 | Checklist before release of new versions: 4 | 5 | - Run tests in `sfeprapy.test`. 6 | - Version is up to date in `sfeprapy.__init__:__version__`. 7 | 8 | ## Known issues 9 | 10 | - Log normal distribution in `sfeprapy.func.mcs_gen` needs to be validated. 11 | - `sfeprapy.mcs0` currently does capture column element. 12 | - (WIP) `sfeprapy.mcs2`, similar to `sfeprapy.mcs0` but does not solve structural protection thickness for a predefined failure temperature, i.e. structural protection thickness is predefined. 13 | 14 | ## Version history 15 | 16 | ### xx/xx/2020 VERSION: 0.7.2 17 | 18 | - [ ] Added: 1D heat transfer module. 19 | - [x] Improved: `cli` mcs0 template save as `.xlsx`. 20 | - [x] Fixed: `cli` mcs0 template blank lines when saved as `.csv`. 21 | 22 | ### 01/02/2020 VERSION: 0.7.0 23 | 24 | - New: GUI added to `sfeprapy.mcs0`, can be summoned in CLI by `sfeprapy mcs0 gui`. 25 | - Improved: various improvements see repository commits. 26 | 27 | ### 28/10/2019 VERSION: 0.6.9 28 | 29 | - New: repository feature, codecov integration. 30 | - New: repository feature, travis integration. 31 | - Improved: CLI commands simplified and updated. 32 | - Improved: updated and some new test functions in \test directory. 33 | 34 | ### 23/10/2019 VERSION: 0.6.8 35 | 36 | - New: use `sfeprapy` to trigger recently refreshed CLI. Previously this is `python -m sfeprapy.mcs0`. 37 | - Improved: CLI, added figure and save template input file features. Use `sfeprapy -h` to find more information. 38 | - Depreciated: `sfeprapy.mcs0` module can no longer be triggered directly as `python -m sfeprapy.mcs0` (i.e. codes are removed after `if __name__ == '__main__'` . 39 | - Depreciated: `sfeprapy.mcs0` GUI to select input file, use CLI instead. 40 | 41 | ### 10/10/2019 VERSION: 0.6.7 42 | 43 | - New: `sfeprapy.mcs0` added exposure time dependent timber charring rate. 44 | - New: `sfeprapy.func.mcs_gen` added `ramp` input variable type for time dependent variables. the fill value should be csv formatted string without headers, consists of two columns, 1st column is the time and 2nd column is the value at the specific time. 45 | - New: `sfeprapy.mcs0` added plotting feature, to activate `python -m sfeprapy.mcs0 {fp} mp2 fig` 46 | - Depreciated: `sfeprapy.mc0`. This module is imported/combined into `sfeprapy.mcs0` at version 0.6. 47 | - Depreciated: `sfeprapy.mcs2`. This module is imported/combined into `sfeprapy.mcs0` at version 0.6.5. 48 | 49 | ### 12/09/2019 VERSION: 0.6.6 50 | 51 | - New: system arguments added to `python -m sfeprapy.mcs0` command line call. For example, calling `python -m sfeprapy.mcs0 example_input.csv mp4` will run the problem definition file `example_input.csv` with 4 processes. 52 | 53 | ### 27/08/2019 VERSION: 0.6.5 54 | 55 | - New: `phi_teq` is added in `sfeprapy.mcs0` to include Model Uncertainty factor, see [README](README) for details. 56 | - New: `sfeprapy.mcs0` implemented timber fuel contribution. 57 | - New: `timber_exposed_area` is added in `sfeprapy.mcs0`, see [README](README) for details. 58 | - New: `timber_charring_rate` is added in `sfeprapy.mcs0`, see [README](README) for details. 59 | - New: `timber_density` is added in `sfeprapy.mcs0`, see [README](README) for details. 60 | - New: `timber_hc` is added in `sfeprapy.mcs0`, see [README](README) for details. 61 | - New: `timber_solver_ilim` is added in `sfeprapy.mcs0`, see [README](README) for details. 62 | - New: `timber_solver_tol` is added in `sfeprapy.mcs0`, see [README](README) for details. 63 | - New: `sfeprapy.mcs0.test` automated tests for `sfeprapy.mcs0`. 64 | - Improved: `sfeprapy.mcs0` changed criteria for parametric fire when `fire_mode` is set to 3. `opening_factor` should be within 0.01 and 0.2 (instead 0.02 and 0.2) to compliant to UK NA to Eurocode 1991-1-2. 65 | 66 | ### 18/08/2019 VERSION: 0.6.4 67 | 68 | - Fixed: `window_open_fraction_permanent` in `sfeprapy.mcs0`. 69 | - Fixed: added `long_description_content_type` to setup.py to change description type to markdown. 70 | - Improved: README.md updated to reflect the new `sfeprapy.mcs0`. 71 | 72 | ### 25/07/2019 VERSION: 0.6.1 73 | 74 | - New: `sfeprapy.func.mcs_gen` general purpose stochastic variable generator. 75 | - New: `sfeprapy.func_mcs_obj` general purpose Monte Carlo Simulation object. 76 | - New: `sfeprapy.mcs0` to implement `mcs_gen` and `mcs_obj` into the time equivalence calculation. 77 | - New: `sfeprapy.mcs0` when `beam_loc` is removed, the most onerous location will be calculated and used based on specific fire curve. 78 | - Improved: `sfeprapy.mc1` MCS routine is converted to an object, MonteCarloCase and MonteCarlo classes are provided to substitute existing factions. 79 | - Fixed: `sfeprapy.mc1` convergence of protection thickness maybe failed to find. 80 | - Fixed: `sfeprapy.dat.ec_3_1_2kyT` units fixed, used degree K but should be degree C. 81 | 82 | ### 15/04/2019 VERSION: 0.5 83 | 84 | - New: `sfeprapy.pd6688.annex_b_equivalent_time_of_fire_exposure` PD 6688 equivalent time exposure calculation. Manual can be found in its docstring `annex_b_equivalent_time_of_fire_exposure.__doc__`. 85 | - New: (WIP) `sfeprapy.mc1` new equivalent time exposure procedure. 86 | - Improved: `sfeprapy.mc` optimised temperature dependent steel heat capacity routine, resulted in 65% less simulation time. Tested case shows 32.8 seconds reduced to 16.7 seconds for 1000 simulations on i7-7660U with 2 threads. 87 | - Fixed: `sfeprapy.mc.mc_inputs_generator.py:mc_inputs_generator` eliminated nan values in sampled stochastic variables - it is discovered negative or positive values are sampled even with predefined boundary limits, these extreme values (i.e. ±inf) are replaced with prescribed (user defined) limits (i.e. lbound and ubound). 88 | 89 | ### 31/03/2019 VERSION: 0.4 90 | 91 | - New: `sfeprapy.mc` figure size is customisable in config.json file. 92 | - New: `sfeprapy.mc` figure x-axis limits (i.e. t_eq) is customisable in config.json file. 93 | - Fixed: `sfeprapy.mc` final time equivalence 'stepping' issue due to tolerance of the solver is now fixed by replacing existing bisection by a secant. 94 | - Improved: `sfeprapy.mc` input parameter (in master .csv file) `fire_mode` is added, replacing `fire_type_enforced`. 95 | - Improved: `sfeprapy.mc` simulation time improved. Tested case shows 256 seconds reduced to 26 seconds for 1000 simulations on i7-3770k with 6 threads. 96 | - Improved: `sfeprapy.mc` refreshed data flow - master .csv input file -> individual case *.json file -> individual case MC *_in.csv file -> individual case result *_out.csv file -> *.png files. 97 | 98 | ### 11/03/2019 VERSION: 0.3 99 | 100 | - Improved: Updated the decision making for choosing between travelling fire and parametric fire, introduced q_td limit criteria, i.e. 50 <= q_td <= 1000. 101 | 102 | ### 11/03/2019 VERSION: 0.2 103 | 104 | - SfePrapy is now in beta testing phase; 105 | - New: Eurocode DIN Annex parametric fire curve is now available, force simulations to use only this fire curve by set `fire_type_enforced` to 2 (see new input file template); and 106 | - New: Feature to find best fit distribution function - fire this tool by `python -m sfeprapy.dist_fit`. 107 | 108 | ### 07/03/2019 VERSION: 0.0.8 109 | 110 | - Improved: Able to combine individual time equivalence curves into one with corresponding `probability_weight` (see new input file template). 111 | 112 | ### 17/02/2019 VERSION: 0.0.7 113 | 114 | - New: Additional figure named `t_eq_merged.png` is produced to show merged time equivalency of all cases. 115 | 116 | ### 22/01/2019 VERSION: 0.0.6 117 | 118 | - Fixed: Final time equivalence plot legend text is set to case name. Previously '_out' was included. 119 | - Improved: Final time equivalence plot will be produced without running any simulation, e.g. all `is_live` is set to 0. 120 | 121 | ### 01/01/2019 VERSION: 0.0.5 122 | 123 | - New: More configuration parameters can be defined in 'config.json' file located in the same folder as the selected input file; 124 | - New: Optional MC simulation calculation for each case (as in the input *.csv file); 125 | - New: beam_loc_z (beam element height), room_opening_permanent_ratio (permanent free window opening area); 126 | - Improved: Plot background transparent. 127 | 128 | ### 31/10/2018 VERSION: 0.0.4 129 | 130 | - Additional returned results from the Monte Carlo simulation tool `sfeprapy.time_equivalence_core.grouped_a_b`. Window opening factor `opening_factor` is added to be returned from the function. 131 | - `sfeprapy.time_equivalence.app` is now able to main_args a single simulation. When 'simulations=1' is defined, all distributed variables are disabled and mean or mean(upper, lower) is used for stochastic parameters. 132 | - New testing input file 'benchmark_file_1' is added for single simulation testing, all other parameters are identical to 'benchmark_file_0'. Benchmark files are moved to validation folder, contained in root directory. 133 | 134 | ### 21/08/2018 VERSION: 0.0.3 135 | 136 | - Updated code relating simulation output result \*.p and \*res.csv files. This is to fix an issue which output fires do not align with input / output index numbering. The new \*.p and \*res.csv files are sorted by time equivalence. The new output files are significantly larger than previous versions due to more variables are being passed in and out from the grouped_a_b calculation function `sfeprapy.time_equivalence_core.grouped_a_b()`. 137 | - Fire duration `fire_duration` is checked and reassigned if necessary so that the slowest travelling fire is able to travel the entire room `room_depth`. `fire_duration` defined in the input file will be the minimum fire duration. 138 | - Verification procedures are added for part of the project, including parametric fire testing, travelling fire testing and Eurocode protected steel heat transfer. 139 | 140 | ### 15/08/2018 VERSION: 0.0.2 141 | 142 | - Graphical folder select dialog is available; 143 | - Fixed an issue associated with `sfeprapy.time_equivalence.main_args()` where would not ask for new input folder directory when main_args more than once without re-import the module; 144 | - Fixed window opening fraction factor distribution. Previously the mean $\mu$ and standard deviation $\sigma$ are adopted based on $x$, however, the `scipy.stats.lognorm` module takes $\mu$ and $\sigma$ based on $ln(x)$. This has been corrected; 145 | 146 | ### 04/08/2018 VERSION: 0.0.1 147 | 148 | - Renamed the packaged from `sfepy` to `sfeprapy` (Structural Fire Engineering Probabilistic Risk Assessment Python); 149 | - Github repository created; 150 | - Updated progress bar appearance in `sfeprapy.time_equivalence.main_args()`; 151 | - Implemented new window opening fraction distribution `window_open_fraction`, linear distribution is now replaced by inverse truncated log normal distribution; 152 | - Updated plot appearance; and 153 | - Project now can be installed through `pip install sfeprapy`. 154 | 155 | ### 02/01/2018 VERSION: 0.0.0 156 | 157 | - Implemented Latin hypercube sampling function, `pyDOE` external library is no longer required; 158 | - Boundary for `q_fd`, defined as `q_fd_ubound` and `q_fd_lbound` (upper and lower limit); 159 | - Now output plot for peak steel temperature according to input 'protection_thickness'; 160 | - Inputs arguments are packed in a pandas `DataFrame` object instead of a list; 161 | - Automatically generate fires inline with selected percentile `select_fires_teq` ±tolerance `select_fires_teq_tol` and save as .png and .csv. 162 | -------------------------------------------------------------------------------- /demo/distributions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true, 7 | "pycharm": { 8 | "name": "#%% md\n" 9 | } 10 | }, 11 | "source": [ 12 | "Generates minimalistic plots for a variety of distribution types.\n", 13 | "\n", 14 | "15th April 2021, Yan Fu" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "pycharm": { 22 | "name": "#%%\n" 23 | } 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "import matplotlib.pyplot as plt\n", 28 | "import numpy as np\n", 29 | "\n", 30 | "from sfeprapy.mcs.mcs_gen_2 import InputParser\n", 31 | "plt.style.use('seaborn-white')" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": { 38 | "pycharm": { 39 | "name": "#%%\n" 40 | } 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "# Sample defined distributions using `InputParser`, a thin layer built on top of scipy.stats module.\n", 45 | "\n", 46 | "dist_obj = InputParser()\n", 47 | "df_dists = dist_obj.inputs2samples(\n", 48 | " dist_params={\n", 49 | " 'Gumbel Type I': dict(\n", 50 | " dist = 'gumbel_r_',\n", 51 | " mean = 0,\n", 52 | " sd = 1,\n", 53 | " ubound=4,\n", 54 | " lbound=-4,\n", 55 | " ),\n", 56 | " 'Normal': dict(\n", 57 | " dist = 'norm_',\n", 58 | " mean=0,\n", 59 | " sd=1,\n", 60 | " ubound=4,\n", 61 | " lbound=-4,\n", 62 | " ),\n", 63 | " 'Uniform': dict(\n", 64 | " dist = 'uniform_',\n", 65 | " ubound=4,\n", 66 | " lbound=-4,\n", 67 | " ),\n", 68 | " 'Lognorm': dict(\n", 69 | " dist = 'lognorm_',\n", 70 | " mean=1,\n", 71 | " sd=1,\n", 72 | " ubound=4,\n", 73 | " lbound=-4,\n", 74 | " ),\n", 75 | " 'Complementary Lognorm': dict(\n", 76 | " dist='lognorm_mod_',\n", 77 | " ubound=1,\n", 78 | " lbound=0,\n", 79 | " mean=0.2,\n", 80 | " sd=0.2,\n", 81 | " )\n", 82 | " },\n", 83 | " num_samples = 10000\n", 84 | ")\n", 85 | "\n", 86 | "df_dists.drop('index', axis=1, inplace=True) # index column not used" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 3, 92 | "metadata": { 93 | "pycharm": { 94 | "name": "#%%\n" 95 | } 96 | }, 97 | "outputs": [ 98 | { 99 | "data": { 100 | "text/plain": "
", 101 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAABPCAYAAAAjiVjGAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAANZUlEQVR4nO3de0xTZx8H8C9t5dKCFyKI0zGYumaEqSiZcSmwZCxxQzdm3A0WwzKMsM14GQutDAUcAf/wEtmiMVnC8opxN8BXzDLclm2ZLixeN3ACczfr5JLBMoG11LbvH76nVmlR2tOec+r3808rx3N5fj3n+T3nnOc5J8zpdDpBREQkApXUG0BERKGDSYWIiETDpEJERKJhUiEiItEwqRARkWg03iZYLBa0t7cjLi4OarU6mNsUVHa7Hf39/UhNTUVkZKSoy2YM/cP4+Y8x9B9jODFek0p7ezvy8/N9XrDSNDQ0ID09XdRlMob+Yfz8xxj6jzGcGK9JJS4uzrWChIQEn1cgdz09PcjPz3eVV0yMoX8YP/8xhv5jDCfGa1IRTvMSEhIwe/Zsv1aiBIE4rWUMxVke4+f/MhlD/5fJGN4Zr0klGFIWpeGy+TIAYNbsWTh/+oyUm6No7rG0jFoRGR4RcjH1VEbh0/1v/k4PxDK9TZ81exZa/3skgFHzjMfe3Un43QP5m0va++uy+TJ0+3ZCt28nOru6MCU+HimL0qTcJMVyj+WodRS6fTtdlUao8FRG4dPbd1+mB2KZ3qZL9Ru5xzLU9hPyTvjdA/mby6ZLscPu4A5OJIGhkWFMiY9no45EIenlLyKSntCgA4DLRZsk3hpSOtmcqRARkfIxqRARkWiYVIiISDRMKgqXsigNU+LjMTQ8NGYab8ASUbAxqSic0EXQ4Rj7Ak/hBix71RFRsLD3FxFRCHMf6Do0PARdgNfHpEJEFMKEqxkA8E/+moCvT3aXv3gfgIhIuWSXVHgfgEg6QqOODTryVdCTitBbyVuPJSKSDh+XRP4K+j2VYF/fIyKi4JHd5S8iIlIuJhUiIhINkwoREYmGSYWIiETDwY8K5MsIWaGrKMDXxxJR4DCpKJAvPej4IiYiCgZe/iIiItHIOqlwdC+RNPi4JOUb77UYgSTrpMLRvUTS4OOSlG+812IEkqyTChERKQuTChERiYZJhYiIRMOkQkREomFSISK6ywSydx8HPxIR3WUCORiaZypENC6OWaGJ4JmKQvjyvC9vhEqCzwCjO8FH/CiHmPWErxRxpsKW0o2BTGIMZuKgUqLQJGY94StFnKmwpUQkDqElK1UrlkJf0M5UpHoODRHdINWjO+juEbSkwp2ZSPn4kFe6HUXcUyEieeD9OLodRdxTISIiz+TQ48sdkwoRkYL58ibYQFLc5S92LyaSHo9D8kZxZyrsXkwkPR6H5I3izlTuNoHsis3WJpFyyXWYBpOKzAWyKzZfGUtiYDdjach1mAaTChH5hd2MyZ2ikwpbSETywcupgSdc8pLjZS+BopMKW0hE8sHLqYEnhwdG3o7ien8RkfwJZy0A+IoFP8ltcOPthERSCbUdWIqdKNRiSNJy73LcuXot39/jB7kNbrydgCaVYFWOodZnXoqdKNRiSPIh7FtCcgHYcLkTSn1NQUCTihSVI1vcRPLk6ewFACyjVkSGR9y1x6t741uIBXC9IT7jP/sVcXbizmtSsdvtAICenh6fF65WqRA2+Pf1FanVCBv82/Xp/rfbTZ/IPCqEIbpmCwDg4qbNiE1IgHV0FBHh4QCAGQkzcKzlqGsbhfIJ5RXTRGL4+PIc9Pb0AgAsln8RHeS4uX+3WC2ITUgYEytvAhXDW+Mn1v50u+mBWKa36WqVKqj7oBBDqePmfpyOrDciescW1/EK4KZjVviuhOPYG+H49lSu4ZFhTN9VA+BGLITvwfit1CoVzGazaDEMczqdHrsQnDx5Evn5+X4tXEkaGhqQnp4u6jIZQ/8wfv5jDP3HGE6M16RisVjQ3t6OuLg4qNVqn1cgd3a7Hf39/UhNTUVkZKSoy2YM/cP4+Y8x9B9jODFekwoREdFEKXrwIxERyQuTChGRjPT29kq9CX7xOanYbDZUVVXBZDLhzTffREtLyx3P29jYiLa2Nq/TjUaj6/vBgwdRUlKC7OxslJSUoLu7e8Lb2tbWhsbGxgnPJyd1dXV45513AIhTnrq6OpjNZjE2TRHcy+spfvv27QMAFBQUoLW1NejbJ2ehuq9YLBZs3boVJpMJRUVFuHTpks/Lcq+z/LV79+4Jz2M2m1FXVyfaNvjD53EqH374IR599FFkZmYCAE6cOAGz2YympiasW7cORqMRzzzzDA4cOIDo6GjodDpMmjQJer0eAHDgwAEcPnwYTz/9NCIiItDc3Ayr1YqCgoKb1pOXl4e8vDwYjUbU1tbCZDKhsrISzc3N0Ov1KCsrQ25uLhwOBwoKClBbWwuVSoXExESsXr3a98jIUHt7Ozo7OwFcb82YTCbY7XYUFxejvr4edrsdqampOH78+JiY6/V6vP/++/j333/x4osvSlwS6VVUVODPP/9EV1cXKisr8dtvv+Hbb7/FwMAAEhMTYTKZoNVqcf/992Pu3LnYv38/nnjiCRw5cgRpaWn45ZdfoNfrYTabUVNTI3Vxguazzz7D8ePHYbFYYDQa0draiu7ubly8eBHFxcU4dOgQ5syZg+7ubuzYsQN79+7FP//8A4fDgfLycqxYsQJZWVm4evUqYmNj8dNPP2H+/Pm4cOECKioqEBsbG7SyNDQ0YNmyZVi6dCn6+vrQ2dmJvr4+fPzxxzcdV562s6KiAvPmzcPIyAg2btwIABgcHMSuXbug0WiQlpYGm82GU6dOYWhoCPfddx8GBwfx5JNPIioq6qb67uDBg4iLi0NXVxfKysrQ0dGBzz//HL/++it6e3vR09ODnTt3orCwEElJSbDZbDfVgQsWLBhTtsHBQVRXV0Or1cJgMODBBx/E7t27MW3aNIyMjCA9PR1nz56FzWbDypUroVKpxpR7vPokNzfXa1x9PlPp7OzEokWLMDAwAKPR6Grp3WrJkiUoKipCVFQUSkpKXGcozz77LLZt24bm5mZX4omOjsa5c+fGXe/jjz+Or776CufOncOCBQug1+tRWFiIgYEBfPPNN+jr60NkZCQ6Ojp8LZpsbdiwAXv27IHD4cCePXuwbds2GI1GHDp0CFarFaWlpUhOTvYY86lTp2L58uV44IEHcPLkSamLEnRhYWE3/dtqteKVV16BwWDAzz//DAAwGAxISUnBlStXkJmZifLycpw5c30wXmZmJlatWoUpU6Zgw4YNAIDXXnsNDocjuAWR2Jdffomqqiq89NJLaGlpQXt7O8rLy2EwGAAATqcTr7/+OpKSktDb24vBwUGUlZVh5syZuHDhAhITE1FSUgKr1YrCwkLo9XosW7YMGRkZuHjxYlDLcunSJVeFHB8fj4yMDHzyySdjjitP2zlp0iSsW7cOixcvxnfffQcAOHr0KGw2G7RaLX744QcAQHZ2Np5//nncc889WLNmDU6dOjWmvnPfF//44w+kpKQgOzsbDz/8MJYuXYrh4WH09/cjJiYGVVVVY+pAT44ePYrVq1ejqqoKra2t+OCDD/DWW2/h1Vdfde2zjz32GNauXYu2tjaP5R6vPhmPz0ll7ty5OHHiBGJjY1FbW4vp06dDrVbj2rVrcDqdGB4eBgBotVqo1WqEh4dDrVZD6GwWHh6OsLAwaDQaOBwOFBcX4+WXX8acOXPGXW9WVhZaWlqQnJwMAK7ljY6OArh+8JeUlLh28lASHR2N5557Du+9956r3O6VWkxMDADPMW9qasKVK1ewcOFC3I0d/mJiYtDX1wfg+lne5MmTERUV5dr/3DmdTlcSEmLlHlv3z1uTVagT4iF82mw2ADfiIHRF1Wg0uHr1quvvQoyFOAKATqeDRqNx7afBTtDz5s1zNRp6e3tRX1/v8bgabztHR0eh0Whc8yxfvhwbN25EWtr1R/+7H4sqlQpOp9Njfee+Lwox27t3LyZPnozk5GQ4nU5X7G6tAz25dR8W6mV3kZGR0Gg0cDqdE65PxuPz5a8XXngBNTU1rnspKSkpiI+PR1dXF6qrqxERETHu/E1NTfj666+Rm5sLp9OJ8vJyjI6OorS0dNz51Go1pk6diqeeegoA0NHRge3bt2PGjBnIyMiAyWTC2bNnMX/+fF+LJmtZWVn49NNPsX79epSXl0OlUqGoqAjvvvvuuPPNnDkT33//PX788UdotVrodEp6mpD/VqxYgYqKCjQ0NODatWt45JFHvP5fg8GAiooKnDlzBkuWLAniVspXTU0NoqKiYDAYsGXLFjidTmzatAlhYWF4++23cf78eSxevPimeaKjozFt2jTXJWnh0rdcrFy5Elu3bsXhw4cxNDSE0tJSPPTQQ3d0XI2MjKC6uho2mw2bN2/GkSNHkJOTg8rKSjQ2NiInJwcWi8XjvHl5eePWd/feey8aGhqg0+lw7NgxdHd3Y2joxrtTbq0DBa2trfj9998RERGBN954A9u3b4dOp0NOTg6SkpJQU1MDrVaL8P+P6He3atWqCdUn41HcOJWPPvoIZrPZdR1TuNdCRMFXX1+Pv/76CyMjIygtLfVYYYUiKeudW+vAO3H69Gl88cUXGB4eRm5uLhYuXBiw7VNcUiEiIvniOBUiIhINkwoREYmGSYWIiETDpEJERKJhUiEiItEwqRARkWj+B0uEMtj6KzjqAAAAAElFTkSuQmCC\n" 102 | }, 103 | "metadata": {}, 104 | "output_type": "display_data" 105 | } 106 | ], 107 | "source": [ 108 | "# Plots\n", 109 | "\n", 110 | "fig, axes = plt.subplots(nrows=1, ncols=len(df_dists.columns), figsize=(1.15*len(df_dists.columns), 1.2))\n", 111 | "\n", 112 | "for i, k in enumerate(df_dists.columns):\n", 113 | " try:\n", 114 | " ax = axes[i]\n", 115 | " except TypeError:\n", 116 | " ax = axes\n", 117 | "\n", 118 | " data = np.array(df_dists[k].values)\n", 119 | " bins=np.linspace(data.min(), data.max(), 20)\n", 120 | " ax.hist(data, bins=20, alpha=0.9, histtype='bar', ec='k', color=(0, 164/255, 153/255))\n", 121 | "\n", 122 | " ax.get_yaxis().set_visible(False)\n", 123 | " ax.set_xticks([])\n", 124 | " ax.set_xlabel(k, fontsize='x-small')\n", 125 | "\n", 126 | "plt.tight_layout()\n", 127 | "plt.show()\n", 128 | "# fig.savefig('dists.png', dpi=300, bbox_inches='tight', pad_inches=0.015)" 129 | ] 130 | } 131 | ], 132 | "metadata": { 133 | "kernelspec": { 134 | "display_name": "Python 3", 135 | "language": "python", 136 | "name": "python3" 137 | }, 138 | "language_info": { 139 | "codemirror_mode": { 140 | "name": "ipython", 141 | "version": 3 142 | }, 143 | "file_extension": ".py", 144 | "mimetype": "text/x-python", 145 | "name": "python", 146 | "nbconvert_exporter": "python", 147 | "pygments_lexer": "ipython3", 148 | "version": "3.7.9" 149 | } 150 | }, 151 | "nbformat": 4, 152 | "nbformat_minor": 1 153 | } -------------------------------------------------------------------------------- /profiler/mcs0.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import cProfile 3 | 4 | 5 | def profile_standard_case(): 6 | from sfeprapy.mcs0 import MCS0, EXAMPLE_INPUT 7 | 8 | # increase the number of simulations so it gives sensible results 9 | mcs_input = dict() 10 | mcs_input['CASE_1'] = EXAMPLE_INPUT['CASE_1'].copy() 11 | for k in list(mcs_input.keys()): 12 | mcs_input[k]["phi_teq"] = 1 13 | mcs_input[k]["n_simulations"] = 10_000 14 | mcs_input[k]["timber_exposed_area"] = 0 15 | mcs_input[k].pop("beam_position_horizontal") 16 | mcs_input[k]["beam_position_horizontal:dist"] = "uniform_" 17 | mcs_input[k]["beam_position_horizontal:ubound"] = mcs_input[k]["room_depth"] * 0.9 18 | mcs_input[k]["beam_position_horizontal:lbound"] = mcs_input[k]["room_depth"] * 0.6 19 | 20 | # increase the number of threads so it runs faster 21 | mcs = MCS0() 22 | mcs.set_inputs_dict(mcs_input) 23 | mcs.run(4, save=False) 24 | 25 | 26 | if __name__ == "__main__": 27 | # Profile for the standard case input sfeprapy.mcs0.EXAMPLE_INPUT_CSV 28 | cProfile.run("profile_standard_case()", sort="cumtime") 29 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.17.1 2 | xlrd>=1.2.0 3 | openpyxl>=3.0.3 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from codecs import open # To use a consistent encoding 5 | 6 | import setuptools 7 | import sfeprapy 8 | 9 | with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "README.md")) as f: 10 | long_description = f.read() 11 | 12 | try: 13 | with open("requirements.txt") as f: 14 | requirements = f.read().splitlines() 15 | except FileNotFoundError: 16 | with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")) as f: 17 | requirements = f.read().splitlines() 18 | 19 | setuptools.setup( 20 | name="SFEPRAPY", 21 | version=sfeprapy.__version__, 22 | description="Structural Fire Engineering - Probabilistic Reliability Assessment (Equivalent Time Exposure)", 23 | author="Ian Fu", 24 | author_email="fuyans@gmail.com", 25 | url="https://github.com/fsepy/sfeprapy", 26 | download_url="https://github.com/fsepy/sfeprapy/archive/master.zip", 27 | keywords=["fire safety", "structural fire engineering"], 28 | classifiers=[ 29 | "Programming Language :: Python :: 3", 30 | "Development Status :: 4 - Beta", 31 | "Environment :: Other Environment", 32 | "Intended Audience :: Developers", 33 | "License :: OSI Approved :: MIT License", 34 | "Operating System :: OS Independent", 35 | "Topic :: Scientific/Engineering", 36 | ], 37 | long_description=long_description, 38 | long_description_content_type="text/markdown", 39 | packages=[ 40 | "sfeprapy", 41 | "sfeprapy.dists", 42 | "sfeprapy.mcs", 43 | "sfeprapy.mcs0", 44 | "sfeprapy.mcs1", 45 | "sfeprapy.mcs2", 46 | "sfeprapy.func", 47 | "sfeprapy.cli", 48 | ], 49 | install_requires=requirements, 50 | include_package_data=True, 51 | entry_points={"console_scripts": ["sfeprapy=sfeprapy.cli.__main__:main"]}, 52 | ) 53 | -------------------------------------------------------------------------------- /sfeprapy/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | 5 | # setup logger 6 | def get_logger(f_handler_fp: str = None, f_handler_level=logging.WARNING, c_handler_level=logging.INFO): 7 | logger_ = logging.getLogger('sfeprapy') 8 | 9 | c_handler = logging.StreamHandler() 10 | c_handler.setLevel(c_handler_level) 11 | c_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')) 12 | logger_.addHandler(c_handler) 13 | 14 | logger_.setLevel(logging.DEBUG) 15 | 16 | return logger_ 17 | 18 | 19 | logger = get_logger() 20 | 21 | # make root directory of this app which will be used 1. when running the app; 2. pyinstaller at compiling the app. 22 | if os.path.exists(os.path.dirname(__file__)): 23 | # this path should be used when running the app as a Python package (non compiled) and/or pyinstaller at compiling 24 | # stage. 25 | __root_dir__ = os.path.realpath(os.path.dirname(__file__)) 26 | elif os.path.exists(os.path.dirname(os.path.dirname(__file__))): 27 | # the path will become invalid when the app run after compiled as the dirname `fsetoolsGUI` will disappear. 28 | # instead, the parent folder of the project dir will be used. 29 | __root_dir__ = os.path.realpath(os.path.dirname(os.path.dirname(__file__))) 30 | else: 31 | raise IsADirectoryError( 32 | f'Project root directory undefined: ' 33 | f'{os.path.dirname(__file__)} nor ' 34 | f'{os.path.dirname(os.path.dirname(__file__))}' 35 | ) 36 | 37 | """ 38 | VERSION IDENTIFICATION RULES DOCUMENTED IN PEP 440 ARE FOLLOWED. 39 | 40 | Version scheme 41 | ============== 42 | 43 | Distributions are identified by a public version identifier which supports all defined version comparison operations 44 | 45 | The version scheme is used both to describe the distribution version provided by a particular distribution archive, as 46 | well as to place constraints on the version of dependencies needed in order to build or run the software. 47 | 48 | Public version identifiers 49 | -------------------------- 50 | 51 | The canonical public version identifiers MUST comply with the following scheme: 52 | 53 | `[N!]N(.N)*[{a|b|rc}N][.postN][.devN]` 54 | 55 | Public version identifiers MUST NOT include leading or trailing whitespace. 56 | 57 | Public version identifiers MUST be unique within a given distribution. 58 | 59 | See also Appendix B : Parsing version strings with regular expressions which provides a regular expression to check 60 | strict conformance with the canonical format, as well as a more permissive regular expression accepting inputs that may 61 | require subsequent normalization. 62 | 63 | Public version identifiers are separated into up to five segments: 64 | 65 | - Epoch segment: N! 66 | - Release segment: N(.N)* 67 | - Pre-release segment: {a|b|rc}N 68 | - Post-release segment: .postN 69 | - Development release segment: .devN 70 | 71 | """ 72 | 73 | __version__ = "0.8.1" 74 | 75 | 76 | def _test_version_canonical(version=__version__): 77 | import re 78 | check = re.match( 79 | r"^([1-9][0-9]*!)?" 80 | r"(0|[1-9][0-9]*)" 81 | r"(\.(0|[1-9][0-9]*))*" 82 | r"((a|b|rc)(0|[1-9][0-9]*))?" 83 | r"(\.post(0|[1-9][0-9]*))?" 84 | r"(\.dev(0|[1-9][0-9]*))?$", 85 | version, 86 | ) 87 | assert check is not None 88 | -------------------------------------------------------------------------------- /sfeprapy/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fsepy/SFEPRAPY/02c2864fc4788cb42588c7d455c135389febb578/sfeprapy/cli/__init__.py -------------------------------------------------------------------------------- /sfeprapy/cli/__main__.py: -------------------------------------------------------------------------------- 1 | def main(): 2 | import argparse 3 | import os 4 | 5 | parser = argparse.ArgumentParser( 6 | description='SFEPRAPY - Structural Fire Engineering Probabilistic Reliability Analysis' 7 | ) 8 | subparsers = parser.add_subparsers(dest='sub_parser') 9 | 10 | p_mcs0 = subparsers.add_parser('mcs0', help='Monte Carlo simulation Type 0') 11 | p_mcs1 = subparsers.add_parser('mcs1', help='Monte Carlo simulation Type 1') 12 | p_mcs2 = subparsers.add_parser('mcs2', help='Monte Carlo simulation Type 2') 13 | for p_mcs in (p_mcs0, p_mcs1, p_mcs2): 14 | p_mcs.add_argument('-r', '--run', 15 | help='run Monte Carlo simulation from input file filepath', 16 | action='store_true', ) 17 | p_mcs.add_argument('-e', '--template', 18 | help='save example input file to filepath', 19 | action='store_true', ) 20 | p_mcs.add_argument('-p', '--processor', 21 | help='number of processors to run the simulation, use no more than available logical ' 22 | 'processors, default 1', 23 | default=1, 24 | type=int, 25 | metavar='Integer') 26 | p_mcs.add_argument('filepath', 27 | help=f'input file name (including extension).', 28 | type=str) 29 | 30 | p_distfit = subparsers.add_parser('distfit', help='distribution fit') 31 | p_distfit.add_argument('-t', '--type', 32 | help='an integer indicating data type\n' 33 | '0 (default) samples only, a single column data.\n' 34 | '1 PDF, two columns data containing x, y, without heading.\n' 35 | '2 CDF, two columns data containing x, y, without heading.', 36 | default=0, 37 | type=int, ) 38 | p_distfit.add_argument('-g', '--group', 39 | help='an integer indicating what distribution group to be used for fitting the data:\n' 40 | '0 fit to all available distributions.\n' 41 | '1 (default) fit to common distribution types.\n', 42 | default=1, 43 | type=int, ) 44 | p_distfit.add_argument('filepath', 45 | help=f'Input file name (including extension).', 46 | type=str) 47 | 48 | args = parser.parse_args() 49 | 50 | if args.sub_parser == 'mcs0': 51 | from sfeprapy.mcs0 import cli_main as mcs0 52 | from sfeprapy.mcs0 import EXAMPLE_INPUT_CSV 53 | from sfeprapy.mcs0 import EXAMPLE_INPUT_DF 54 | 55 | if args.template: 56 | if args.filepath.endswith('.xlsx'): 57 | EXAMPLE_INPUT_DF.to_excel(args.filepath) 58 | else: 59 | with open(args.filepath, "w+", encoding='utf-8') as f: 60 | f.write(EXAMPLE_INPUT_CSV) 61 | 62 | if args.run: 63 | mcs0(fp_mcs_in=os.path.realpath(args.filepath), n_threads=int(args.processor)) 64 | return 65 | 66 | if args.sub_parser == 'mcs1': 67 | from sfeprapy.mcs1 import cli_main as mcs1 68 | from sfeprapy.mcs1 import EXAMPLE_INPUT_CSV, EXAMPLE_INPUT_DF 69 | 70 | if args.template: 71 | if args.filepath.endswith('.xlsx'): 72 | EXAMPLE_INPUT_DF.to_excel(args.filepath) 73 | else: 74 | with open(args.filepath, "w+", encoding='utf-8') as f: 75 | f.write(EXAMPLE_INPUT_CSV) 76 | 77 | if args.run: 78 | mcs1(fp_mcs_in=os.path.realpath(args.filepath), n_threads=int(args.processor)) 79 | return 80 | 81 | if args.sub_parser == 'mcs2': 82 | from sfeprapy.mcs2 import cli_main as mcs2 83 | from sfeprapy.mcs2 import EXAMPLE_INPUT_CSV, EXAMPLE_INPUT_DF 84 | 85 | if args.template: 86 | if args.filepath.endswith('.xlsx'): 87 | EXAMPLE_INPUT_DF.to_excel(args.filepath) 88 | else: 89 | with open(args.filepath, "w+", encoding='utf-8') as f: 90 | f.write(EXAMPLE_INPUT_CSV) 91 | 92 | if args.run: 93 | mcs2(fp_mcs_in=os.path.realpath(args.filepath), n_threads=int(args.processor)) 94 | return 95 | 96 | # DEPRECIATED 9th May 2023 97 | # if args.sub_parser == 'distfit': 98 | # from sfeprapy.func.stats_dist_fit import auto_fit 99 | # auto_fit( 100 | # data_type=int(args.type), 101 | # distribution_list=int(args.group), 102 | # data=args.filepath, 103 | # ) 104 | # return 105 | 106 | 107 | if __name__ == '__main__': 108 | main() 109 | -------------------------------------------------------------------------------- /sfeprapy/dists.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from inspect import getfullargspec 3 | from typing import Union 4 | 5 | import numpy as np 6 | 7 | from sfeprapy.func.erf import erf, erfinv 8 | 9 | __all__ = ('Normal', 'Gumbel', 'Lognormal', 'Arcsine', 'Cauchy', 'HyperbolicSecant', 'HalfCauchy', 'Logistic', 10 | 'Uniform', 'DistFunc', 'Constant', 'LognormalMod', 'Discrete', 'Br187FuelLoadDensity', 'Br187HrrDensity') 11 | 12 | 13 | class DistFunc(ABC): 14 | def __init__(self, *args, **kwargs): 15 | self.args = args 16 | self.kwargs = kwargs 17 | if 'lbound' in kwargs and 'lim_1' not in kwargs: 18 | kwargs['lim_1'] = kwargs.pop('lbound') 19 | if 'ubound' in kwargs and 'lim_2' not in kwargs: 20 | kwargs['lim_2'] = kwargs.pop('ubound') 21 | 22 | def pdf(self, x: Union[int, float, np.ndarray]): 23 | args = list() 24 | for i, name in enumerate(getfullargspec(self._pdf).args[1:]): 25 | if name in self.kwargs: 26 | args.append(self.kwargs[name]) 27 | else: 28 | args.append(self.args[i]) 29 | return self._pdf(x, *args) 30 | 31 | def cdf(self, x: Union[int, float, np.ndarray]): 32 | args = list() 33 | for i, name in enumerate(getfullargspec(self._cdf).args[1:]): 34 | if name in self.kwargs: 35 | args.append(self.kwargs[name]) 36 | else: 37 | args.append(self.args[i]) 38 | return self._cdf(x, *args) 39 | 40 | def ppf(self, p: Union[int, float, np.ndarray]): 41 | args = list() 42 | for i, name in enumerate(getfullargspec(self._ppf).args[1:]): 43 | if name in self.kwargs: 44 | args.append(self.kwargs[name]) 45 | else: 46 | args.append(self.args[i]) 47 | return self._ppf(p, *args) 48 | 49 | def sampling(self, n: int, lim_1: float = None, lim_2: float = None, shuffle: bool = True): 50 | padding = 1. / n 51 | 52 | if lim_1 is None: 53 | lim_1 = padding 54 | else: 55 | lim_1 = self.cdf(lim_1) 56 | 57 | if lim_2 is None: 58 | lim_2 = 1 - padding 59 | else: 60 | lim_2 = self.cdf(lim_2) 61 | 62 | samples = self.ppf(np.linspace(lim_1, lim_2, n)) 63 | if shuffle: 64 | np.random.shuffle(samples) 65 | return samples 66 | 67 | @staticmethod 68 | @abstractmethod 69 | def _pdf(x: Union[int, float, np.ndarray], *args, **kwargs) -> Union[int, float, np.ndarray]: 70 | raise NotImplementedError 71 | 72 | @staticmethod 73 | @abstractmethod 74 | def _cdf(x: Union[int, float, np.ndarray], *args, **kwargs) -> Union[int, float, np.ndarray]: 75 | raise NotImplementedError 76 | 77 | @staticmethod 78 | @abstractmethod 79 | def _ppf(p: Union[int, float, np.ndarray], *args, **kwargs) -> Union[int, float, np.ndarray]: 80 | raise NotImplementedError 81 | 82 | 83 | def assert_func(r_, a_, tol=1e-1): 84 | assert abs(r_ - a_) < tol, ValueError(f'{r_} != {a_}') 85 | 86 | 87 | def test_erf(): 88 | try: 89 | from scipy.special import erf as erf_ 90 | except ImportError: 91 | raise ImportError('SciPy is required for testing') 92 | 93 | i = np.linspace(-2, 2, 11)[1:-1] 94 | a = erf_(i) 95 | b = erf(i) 96 | for _ in range(len(i)): 97 | print(f'{a[_]:7.5f} {b[_]:7.5f}') 98 | 99 | 100 | def test_erfinv(): 101 | try: 102 | from scipy.special import erfinv as erfinv_ 103 | except ImportError: 104 | raise ImportError('SciPy is required for testing') 105 | 106 | i = np.linspace(-1, 1, 11)[1:-1] 107 | a = erfinv_(i) 108 | b = erfinv(i) 109 | for _ in range(len(i)): 110 | print(f'{a[_]:7.5f} {b[_]:7.5f}') 111 | 112 | 113 | class Constant(DistFunc): 114 | def __init__(self, value: Union[int, float], *_, **__): 115 | self.value = value 116 | super().__init__(value) 117 | 118 | def sampling(self, n: int, lim_1: float = None, lim_2: float = None, shuffle: bool = True): 119 | return np.full((n,), self.value, ) 120 | 121 | @staticmethod 122 | def _pdf(): 123 | raise ValueError('PDF not available to Constant DistFunc') 124 | 125 | @staticmethod 126 | def _cdf(): 127 | raise ValueError('CDF not available to Constant DistFunc') 128 | 129 | @staticmethod 130 | def _ppf(): 131 | raise ValueError('PPF not available to Constant DistFunc') 132 | 133 | 134 | class Discrete(DistFunc): 135 | def __init__(self, values, weights, lbound=None, ubound=None): 136 | ''' 137 | lbound and ubound not used! 138 | Args: 139 | values: 140 | weights: 141 | lbound: 142 | ubound: 143 | ''' 144 | if isinstance(values, str): 145 | assert ',' in values, f'`discrete_ distribution `values` parameter is not a list separated by comma.' 146 | values = [float(i.strip()) for i in values.split(',')] 147 | 148 | if isinstance(weights, str): 149 | assert ',' in weights, f'`discrete_`:`weights` is not a list of numbers separated by comma.' 150 | weights = [float(i.strip()) for i in weights.split(',')] 151 | 152 | assert len(values) == len( 153 | weights), f'Length of values ({len(values)}) and weights ({len(values)}) do not match.' 154 | assert sum(weights) == 1., f'Sum of all weights should be unity, got {sum(weights)}.' 155 | 156 | super().__init__(values, weights) 157 | 158 | def sampling(self, n: int, lim_1: float = None, lim_2: float = None, shuffle: bool = True): 159 | samples = self._sampling(n, *self.args, **self.kwargs) 160 | if shuffle: 161 | np.random.shuffle(samples) 162 | return samples 163 | 164 | @staticmethod 165 | def _sampling(n, values, weights): 166 | weights = [int(round(i * n)) for i in weights] 167 | if (sum_sampled := sum(weights)) < n: 168 | for i in np.random.choice(np.arange(len(weights)), size=sum_sampled - n): 169 | weights[i] += 1 170 | elif sum_sampled > n: 171 | for i in np.random.choice(np.arange(len(weights)), size=sum_sampled - n): 172 | weights[i] -= 1 173 | weights = np.cumsum(weights) 174 | assert weights[-1] == n, f'Total weight length does not match `num_samples`.' 175 | samples = np.empty((n,), dtype=float) 176 | for i, v__ in enumerate((values)): 177 | if i == 0: 178 | samples[0:weights[i]] = v__ 179 | else: 180 | samples[weights[i - 1]:weights[i]] = v__ 181 | return samples 182 | 183 | @staticmethod 184 | def _pdf(x, v, w): 185 | v = np.asarray(v) 186 | w = np.asarray(w) 187 | 188 | if np.sum(w) != 1.0: 189 | w = w / np.sum(w) 190 | 191 | pdf_dict = dict(zip(v, w)) 192 | 193 | # Check if x exists in v and return corresponding w 194 | return pdf_dict.get(x, 0) 195 | 196 | @staticmethod 197 | def _cdf(x, v, w): 198 | v = np.asarray(v) 199 | w = np.asarray(w) 200 | 201 | if np.sum(w) != 1.0: 202 | w = w / np.sum(w) 203 | 204 | cdf_dict = dict(zip(v, np.cumsum(w))) 205 | 206 | # If x is not in v, return 1 for values larger than max(v) and 0 for values smaller than min(v) 207 | if x not in cdf_dict: 208 | if x < min(v): 209 | return 0 210 | elif x > max(v): 211 | return 1 212 | 213 | return cdf_dict[x] 214 | 215 | @staticmethod 216 | def _ppf(x, v, w): 217 | v = np.asarray(v) 218 | w = np.asarray(w) 219 | 220 | if np.sum(w) != 1.0: 221 | w = w / np.sum(w) 222 | 223 | cdf = np.cumsum(w) 224 | 225 | # If x is not between 0 and 1, return None 226 | if x < 0 or x > 1: 227 | return None 228 | 229 | return v[np.argwhere(cdf >= x)[0][0]] 230 | 231 | 232 | class Gumbel(DistFunc): 233 | @staticmethod 234 | def _pdf(x, mean, sd): 235 | mean, sd = Gumbel._convert_params(mean, sd) 236 | z = (x - mean) / sd 237 | return (1 / sd) * np.exp(-(z + np.exp(-z))) 238 | 239 | @staticmethod 240 | def _cdf(x, mean, sd): 241 | mean, sd = Gumbel._convert_params(mean, sd) 242 | z = (x - mean) / sd 243 | return np.exp(-np.exp(-z)) 244 | 245 | @staticmethod 246 | def _ppf(q, mean, sd): 247 | mean, sd = Gumbel._convert_params(mean, sd) 248 | return mean - sd * np.log(-np.log(q)) 249 | 250 | @staticmethod 251 | def _convert_params(mean, sd): 252 | sd = sd * np.sqrt(6) / np.pi 253 | mean = mean - 0.57721566490153286060 * sd 254 | return mean, sd 255 | 256 | @staticmethod 257 | def test(): 258 | d = Gumbel(420, 126) 259 | assert_func(d.pdf(316.541), 0.00327648, tol=1e-3) 260 | assert_func(d.pdf(365.069), 0.00374402, tol=1e-3) 261 | assert_func(d.pdf(413.596), 0.00335019, tol=1e-3) 262 | assert_func(d.pdf(462.123), 0.00258223, tol=1e-3) 263 | assert_func(d.pdf(510.650), 0.00181710, tol=1e-3) 264 | assert_func(d.cdf(316.541), 0.20000, tol=1e-3) 265 | assert_func(d.cdf(365.069), 0.37453, tol=1e-3) 266 | assert_func(d.cdf(413.596), 0.54921, tol=1e-3) 267 | assert_func(d.cdf(462.123), 0.69372, tol=1e-3) 268 | assert_func(d.cdf(510.650), 0.80000, tol=1e-3) 269 | assert_func(d.ppf(0.20000), 316.541) 270 | assert_func(d.ppf(0.37453), 365.069) 271 | assert_func(d.ppf(0.54921), 413.596) 272 | assert_func(d.ppf(0.69372), 462.123) 273 | assert_func(d.ppf(0.80000), 510.650) 274 | 275 | 276 | if __name__ == '__main__': 277 | Gumbel.test() 278 | 279 | 280 | class Normal(DistFunc): 281 | @staticmethod 282 | def _pdf(x, mean, sd): 283 | return (1 / (sd * np.sqrt(2 * np.pi))) * np.exp(-((x - mean) ** 2) / (2 * sd ** 2)) 284 | 285 | @staticmethod 286 | def _cdf(x, mean, sd): 287 | return 0.5 * (1 + erf((x - mean) / (np.sqrt(2) * sd))) 288 | 289 | @staticmethod 290 | def _ppf(p, mean, sd): 291 | return mean + sd * np.sqrt(2) * erfinv(2 * p - 1) 292 | 293 | @staticmethod 294 | def test(): 295 | d = Normal(420, 126) 296 | assert_func(d.pdf(313.956), 0.00222192, tol=1e-3) 297 | assert_func(d.pdf(366.978), 0.00289792, tol=1e-3) 298 | assert_func(d.pdf(420.000), 0.00316621, tol=1e-3) 299 | assert_func(d.pdf(473.022), 0.00289792, tol=1e-3) 300 | assert_func(d.pdf(526.044), 0.00222192, tol=1e-3) 301 | assert_func(d.cdf(313.956), 0.20000, tol=1e-3) 302 | assert_func(d.cdf(366.978), 0.33695, tol=1e-3) 303 | assert_func(d.cdf(420.000), 0.50000, tol=1e-3) 304 | assert_func(d.cdf(473.022), 0.66305, tol=1e-3) 305 | assert_func(d.cdf(526.044), 0.80000, tol=1e-3) 306 | assert_func(d.ppf(0.20000), 313.956) 307 | assert_func(d.ppf(0.33695), 366.978) 308 | assert_func(d.ppf(0.50000), 420.000) 309 | assert_func(d.ppf(0.66305), 473.022) 310 | assert_func(d.ppf(0.80000), 526.044) 311 | 312 | 313 | if __name__ == '__main__': 314 | Normal.test() 315 | 316 | 317 | class Lognormal(DistFunc): 318 | SQRT_2PI = np.sqrt(2 * np.pi) 319 | 320 | @staticmethod 321 | def _pdf(x, mean, sd): 322 | # Convert M and S to mean and sd of the underlying normal distribution 323 | mu = np.log(mean ** 2 / np.sqrt(mean ** 2 + sd ** 2)) 324 | sigma = np.sqrt(np.log(1 + sd ** 2 / mean ** 2)) 325 | 326 | # Probability Density Function 327 | return (1 / (x * sigma * np.sqrt(2 * np.pi))) * np.exp(-(np.log(x) - mu) ** 2 / (2 * sigma ** 2)) 328 | 329 | @staticmethod 330 | def _cdf(x, mean, sd): 331 | # Convert M and S to mean and sd of the underlying normal distribution 332 | mu = np.log(mean ** 2 / np.sqrt(mean ** 2 + sd ** 2)) 333 | sigma = np.sqrt(np.log(1 + sd ** 2 / mean ** 2)) 334 | 335 | # Cumulative Distribution Function 336 | return 0.5 + 0.5 * erf((np.log(x) - mu) / (sigma * np.sqrt(2))) 337 | 338 | @staticmethod 339 | def _ppf(q, mean, sd): 340 | # Convert M and S to mean and sd of the underlying normal distribution 341 | mu = np.log(mean ** 2 / np.sqrt(mean ** 2 + sd ** 2)) 342 | sigma = np.sqrt(np.log(1 + sd ** 2 / mean ** 2)) 343 | 344 | # Percent Point Function (Quantile Function) 345 | return np.exp(mu + sigma * np.sqrt(2) * erfinv(2 * q - 1)) 346 | 347 | @staticmethod 348 | def test(): 349 | d = Lognormal(420, 126) 350 | assert_func(d.pdf(314.222), 0.00303505, tol=1e-3) 351 | assert_func(d.pdf(364.425), 0.00352359, tol=1e-3) 352 | assert_func(d.pdf(414.628), 0.00326027, tol=1e-3) 353 | assert_func(d.pdf(464.831), 0.00259000, tol=1e-3) 354 | assert_func(d.pdf(515.034), 0.00185168, tol=1e-3) 355 | assert_func(d.cdf(314.222), 0.20000, tol=1e-3) 356 | assert_func(d.cdf(364.425), 0.36817, tol=1e-3) 357 | assert_func(d.cdf(414.628), 0.54099, tol=1e-3) 358 | assert_func(d.cdf(464.831), 0.68873, tol=1e-3) 359 | assert_func(d.cdf(515.034), 0.80000, tol=1e-3) 360 | assert_func(d.ppf(0.20000), 314.222) 361 | assert_func(d.ppf(0.36817), 364.425) 362 | assert_func(d.ppf(0.54099), 414.628) 363 | assert_func(d.ppf(0.68873), 464.831) 364 | assert_func(d.ppf(0.80000), 515.034) 365 | 366 | 367 | if __name__ == '__main__': 368 | Lognormal.test() 369 | 370 | 371 | class Br187HrrDensity(DistFunc): 372 | @staticmethod 373 | def _pdf(*_, **__): 374 | pass 375 | 376 | @staticmethod 377 | def _cdf(*_, **__): 378 | pass 379 | 380 | @staticmethod 381 | def _ppf(*_, **__): 382 | pass 383 | 384 | def sampling(self, n: int, lim_1: float = None, lim_2: float = None, shuffle: bool = True): 385 | a, b = 0.32, 0.57 386 | mean, sd = (a + b) / 2, (b - a) / (2 * np.sqrt(3)) 387 | samples_1 = Uniform(mean=mean, sd=sd).sampling(n, lim_1=lim_1, lim_2=lim_2, shuffle=shuffle) 388 | a, b = 0.15, 0.65 389 | mean, sd = (a + b) / 2, (b - a) / (2 * np.sqrt(3)) 390 | samples_2 = Uniform(mean=mean, sd=sd).sampling(n, lim_1=lim_1, lim_2=lim_2, shuffle=shuffle) 391 | samples = np.random.choice(np.append(samples_1, samples_2), n, replace=False) 392 | return samples 393 | 394 | 395 | class LognormalMod(Lognormal): 396 | def sampling(self, n: int, lim_1: float = None, lim_2: float = None, shuffle: bool = True): 397 | return 1 - super().sampling(n=n, lim_1=lim_1, lim_2=lim_2, shuffle=shuffle) 398 | 399 | 400 | class Arcsine(DistFunc): 401 | @staticmethod 402 | def _pdf(x, mean, sd): 403 | # Calculate a and b from the mean and standard deviation 404 | a = mean - np.sqrt(2) * sd 405 | b = mean + np.sqrt(2) * sd 406 | 407 | # Ensure that x is within the range [a, b] 408 | # if x < a or x > b: 409 | # raise ValueError("x must be within the range [a, b]") 410 | 411 | # Compute the PDF of the arcsine distribution 412 | pdf_value = 1 / (np.pi * np.sqrt((x - a) * (b - x))) 413 | 414 | return pdf_value 415 | 416 | @staticmethod 417 | def _cdf(x, mean, sd): 418 | # Calculate a and b from the mean and standard deviation 419 | a = mean - np.sqrt(2) * sd 420 | b = mean + np.sqrt(2) * sd 421 | 422 | # Ensure that x is within the range [a, b] 423 | # if x < a or x > b: 424 | # raise ValueError("x must be within the range [a, b]") 425 | 426 | # Standardize x 427 | x_std = (x - a) / (b - a) 428 | 429 | # Compute the CDF of the standardized arcsine distribution 430 | cdf_value = (2 / np.pi) * np.arcsin(np.sqrt(x_std)) 431 | 432 | # CDF of the generalized arcsine distribution 433 | return cdf_value 434 | 435 | @staticmethod 436 | def _ppf(p, mean, sd): 437 | # Calculate a and b from the mean and standard deviation 438 | a = mean - np.sqrt(2) * sd 439 | b = mean + np.sqrt(2) * sd 440 | 441 | # Compute the PPF of the arcsine distribution 442 | ppf_value = a + (b - a) * (np.sin(np.pi * p / 2)) ** 2 443 | 444 | return ppf_value 445 | 446 | @staticmethod 447 | def test(): 448 | d = Arcsine(420, 126) 449 | assert_func(d.pdf(275.84), 0.00303911, tol=1e-3) 450 | assert_func(d.pdf(347.92), 0.00195328, tol=1e-3) 451 | assert_func(d.pdf(420.00), 0.00178634, tol=1e-3) 452 | assert_func(d.pdf(492.08), 0.00195328, tol=1e-3) 453 | assert_func(d.pdf(564.16), 0.00303911, tol=1e-3) 454 | assert_func(d.cdf(250), 0.09689, tol=1e-3) 455 | assert_func(d.cdf(300), 0.26482, tol=1e-3) 456 | assert_func(d.cdf(400), 0.46420, tol=1e-3) 457 | assert_func(d.cdf(500), 0.64820, tol=1e-3) 458 | assert_func(d.cdf(550), 0.76027, tol=1e-3) 459 | assert_func(d.ppf(0.09689), 250) 460 | assert_func(d.ppf(0.26482), 300) 461 | assert_func(d.ppf(0.46420), 400) 462 | assert_func(d.ppf(0.64820), 500) 463 | assert_func(d.ppf(0.76027), 550) 464 | 465 | 466 | if __name__ == '__main__': 467 | Arcsine.test() 468 | 469 | 470 | class Cauchy(DistFunc): 471 | @staticmethod 472 | def _pdf(x, mean, sd): 473 | return 1 / (np.pi * sd * (1 + ((x - mean) / sd) ** 2)) 474 | 475 | @staticmethod 476 | def _cdf(x, mean, sd): 477 | return 1 / np.pi * np.arctan((x - mean) / sd) + 0.5 478 | 479 | @staticmethod 480 | def _ppf(p, mean, sd): 481 | return mean + sd * np.tan(np.pi * (p - 0.5)) 482 | 483 | @staticmethod 484 | def test(): 485 | d = Cauchy(420, 126) 486 | assert_func(d.pdf(246.576), 0.00087280, tol=1e-3) 487 | assert_func(d.pdf(333.288), 0.00171434, tol=1e-3) 488 | assert_func(d.pdf(420.000), 0.00252627, tol=1e-3) 489 | assert_func(d.pdf(506.712), 0.00171434, tol=1e-3) 490 | assert_func(d.pdf(593.424), 0.00087280, tol=1e-3) 491 | assert_func(d.cdf(246.576), 0.20000, tol=1e-3) 492 | assert_func(d.cdf(333.288), 0.30814, tol=1e-3) 493 | assert_func(d.cdf(420.000), 0.50000, tol=1e-3) 494 | assert_func(d.cdf(506.712), 0.69186, tol=1e-3) 495 | assert_func(d.cdf(593.424), 0.80000, tol=1e-3) 496 | assert_func(d.ppf(0.20000), 246.576) 497 | assert_func(d.ppf(0.30814), 333.288) 498 | assert_func(d.ppf(0.50000), 420.000) 499 | assert_func(d.ppf(0.69186), 506.712) 500 | assert_func(d.ppf(0.80000), 593.424) 501 | 502 | 503 | if __name__ == '__main__': 504 | Cauchy.test() 505 | 506 | 507 | class HyperbolicSecant(DistFunc): 508 | @staticmethod 509 | def _pdf(x, sd, mean): 510 | return (1 / (2 * mean)) * (1 / np.cosh(np.pi / 2 * ((x - sd) / mean))) 511 | 512 | @staticmethod 513 | def _cdf(x, sd, mean): 514 | return (2 / np.pi) * np.arctan(np.exp(np.pi / 2 * ((x - sd) / mean))) 515 | 516 | @staticmethod 517 | def _ppf(p, sd, mean): 518 | # Check for valid input 519 | if p <= 0 or p >= 1: 520 | raise ValueError("p must be in (0, 1)") 521 | 522 | # Define the function we want to find the root of 523 | def f(x): 524 | return HyperbolicSecant._cdf(x, sd, mean) - p 525 | 526 | # Initial boundaries for bisection method 527 | lower, upper = sd - 10 * mean, sd + 10 * mean 528 | 529 | # Bisection method 530 | while upper - lower > 1e-6: # 1e-6 is the desired accuracy 531 | midpoint = (upper + lower) / 2 532 | if f(midpoint) > 0: # If the function at the midpoint is > 0, the root must be in the left interval 533 | upper = midpoint 534 | else: # Otherwise, the root must be in the right interval 535 | lower = midpoint 536 | 537 | return (upper + lower) / 2 538 | 539 | @staticmethod 540 | def test(): 541 | d = HyperbolicSecant(420, 126) 542 | 543 | assert_func(d.pdf(329.825), 0.00233248, tol=1e-3) 544 | assert_func(d.pdf(374.913), 0.00341451, tol=1e-3) 545 | assert_func(d.pdf(420.000), 0.00396825, tol=1e-3) 546 | assert_func(d.pdf(465.087), 0.00341451, tol=1e-3) 547 | assert_func(d.pdf(510.175), 0.00233248, tol=1e-3) 548 | assert_func(d.cdf(329.825), 0.20000, tol=1e-3) 549 | assert_func(d.cdf(374.913), 0.32982, tol=1e-3) 550 | assert_func(d.cdf(420.000), 0.50000, tol=1e-3) 551 | assert_func(d.cdf(465.087), 0.67018, tol=1e-3) 552 | assert_func(d.cdf(510.175), 0.80000, tol=1e-3) 553 | assert_func(d.ppf(0.20000), 329.825) 554 | assert_func(d.ppf(0.32982), 374.913) 555 | assert_func(d.ppf(0.50000), 420.000) 556 | assert_func(d.ppf(0.67018), 465.087) 557 | assert_func(d.ppf(0.80000), 510.175) 558 | 559 | 560 | if __name__ == '__main__': 561 | HyperbolicSecant.test() 562 | 563 | 564 | class HalfCauchy(DistFunc): 565 | 566 | @staticmethod 567 | def _pdf(x, mean, sd): 568 | if isinstance(x, (int, float)): 569 | if x < mean: 570 | return 0 571 | else: 572 | return (2 / (np.pi * sd)) / (1 + ((x - mean) / sd) ** 2) 573 | elif isinstance(x, np.ndarray): 574 | y = np.where(x < mean, 0, (2 / (np.pi * sd)) / (1 + ((x - mean) / sd) ** 2)) 575 | return y 576 | 577 | @staticmethod 578 | def _cdf(x, mean, sd): 579 | if isinstance(x, (int, float)): 580 | if x < mean: 581 | return 0 582 | else: 583 | return 2 / np.pi * np.arctan((x - mean) / sd) 584 | elif isinstance(x, np.ndarray): 585 | y = np.where(x < mean, 0, 2 / np.pi * np.arctan((x - mean) / sd)) 586 | return y 587 | 588 | @staticmethod 589 | def _ppf(q, mean, sd): 590 | """ 591 | Returns the value of the percent point function (also called inverse cumulative function) for half-Cauchy distribution. 592 | """ 593 | return mean + sd * np.tan(np.pi / 2 * q) 594 | 595 | @staticmethod 596 | def test(): 597 | d = HalfCauchy(420, 126) 598 | assert_func(d.pdf(460.940), 0.004570060, tol=1e-3) 599 | assert_func(d.pdf(547.652), 0.002493360, tol=1e-3) 600 | assert_func(d.pdf(634.364), 0.001297380, tol=1e-3) 601 | assert_func(d.pdf(721.076), 0.000753023, tol=1e-3) 602 | assert_func(d.pdf(807.788), 0.000482474, tol=1e-3) 603 | assert_func(d.cdf(460.940), 0.20000, tol=1e-3) 604 | assert_func(d.cdf(547.652), 0.50415, tol=1e-3) 605 | assert_func(d.cdf(634.364), 0.66171, tol=1e-3) 606 | assert_func(d.cdf(721.076), 0.74767, tol=1e-3) 607 | assert_func(d.cdf(807.788), 0.80000, tol=1e-3) 608 | assert_func(d.ppf(0.20000), 460.940) 609 | assert_func(d.ppf(0.50415), 547.652) 610 | assert_func(d.ppf(0.66171), 634.364) 611 | assert_func(d.ppf(0.74767), 721.076) 612 | assert_func(d.ppf(0.80000), 807.788) 613 | 614 | 615 | if __name__ == '__main__': 616 | HalfCauchy.test() 617 | 618 | 619 | class Logistic(DistFunc): 620 | @staticmethod 621 | def _pdf(x, mean, sd): 622 | # Convert sd to s 623 | s = sd * np.sqrt(3.) / np.pi 624 | 625 | # Probability Density Function 626 | return np.exp(-(x - mean) / s) / (s * (1 + np.exp(-(x - mean) / s)) ** 2) 627 | 628 | @staticmethod 629 | def _cdf(x, mean, sd): 630 | # Convert sd to s 631 | s = sd * np.sqrt(3.) / np.pi 632 | 633 | # Cumulative Distribution Function 634 | return 1 / (1 + np.exp(-(x - mean) / s)) 635 | 636 | @staticmethod 637 | def _ppf(q, mean, sd): 638 | # Convert sd to s 639 | s = sd * np.sqrt(3.) / np.pi 640 | 641 | # Percent-Point Function (Quantile Function) 642 | return mean - s * np.log((1 / q) - 1) 643 | 644 | @staticmethod 645 | def test(): 646 | d = Logistic(420, 126) 647 | assert_func(d.pdf(323.698), 0.00230324, tol=1e-3) 648 | assert_func(d.pdf(371.849), 0.00319894, tol=1e-3) 649 | assert_func(d.pdf(420.000), 0.00359881, tol=1e-3) 650 | assert_func(d.pdf(468.151), 0.00319894, tol=1e-3) 651 | assert_func(d.pdf(516.302), 0.00230324, tol=1e-3) 652 | assert_func(d.cdf(323.698), 0.20000, tol=1e-3) 653 | assert_func(d.cdf(371.849), 0.33333, tol=1e-3) 654 | assert_func(d.cdf(420.000), 0.50000, tol=1e-3) 655 | assert_func(d.cdf(468.151), 0.66667, tol=1e-3) 656 | assert_func(d.cdf(516.302), 0.80000, tol=1e-3) 657 | assert_func(d.ppf(0.20000), 323.698) 658 | assert_func(d.ppf(0.33333), 371.849) 659 | assert_func(d.ppf(0.50000), 420.000) 660 | assert_func(d.ppf(0.66667), 468.151) 661 | assert_func(d.ppf(0.80000), 516.302) 662 | 663 | 664 | if __name__ == '__main__': 665 | Logistic.test() 666 | 667 | 668 | class Uniform(DistFunc): 669 | @staticmethod 670 | def _pdf(x, mean, sd): 671 | """Probability density function""" 672 | a = mean - np.sqrt(3) * sd 673 | b = mean + np.sqrt(3) * sd 674 | return np.where((x >= a) & (x <= b), 1 / (b - a), 0) 675 | 676 | @staticmethod 677 | def _cdf(x, mean, sd): 678 | """Cumulative distribution function""" 679 | a = mean - np.sqrt(3) * sd 680 | b = mean + np.sqrt(3) * sd 681 | return np.where(x < a, 0, np.where(x > b, 1, (x - a) / (b - a))) 682 | 683 | @staticmethod 684 | def _ppf(p, mean, sd): 685 | """Percent-point function (Inverse of cdf)""" 686 | # Ensure p is in [0, 1] 687 | p = np.clip(p, 0, 1) 688 | a = mean - np.sqrt(3) * sd 689 | b = mean + np.sqrt(3) * sd 690 | return a + p * (b - a) 691 | 692 | @staticmethod 693 | def test(): 694 | d = Uniform(420, 126) 695 | assert_func(d.pdf(289.057), 0.00229107, tol=1e-3) 696 | assert_func(d.pdf(354.528), 0.00229107, tol=1e-3) 697 | assert_func(d.pdf(420.000), 0.00229107, tol=1e-3) 698 | assert_func(d.pdf(485.472), 0.00229107, tol=1e-3) 699 | assert_func(d.pdf(550.943), 0.00229107, tol=1e-3) 700 | assert_func(d.cdf(289.057), 0.20000, tol=1e-3) 701 | assert_func(d.cdf(354.528), 0.35000, tol=1e-3) 702 | assert_func(d.cdf(420.000), 0.50000, tol=1e-3) 703 | assert_func(d.cdf(485.472), 0.65000, tol=1e-3) 704 | assert_func(d.cdf(550.943), 0.80000, tol=1e-3) 705 | assert_func(d.ppf(0.20000), 289.057) 706 | assert_func(d.ppf(0.35000), 354.528) 707 | assert_func(d.ppf(0.50000), 420.000) 708 | assert_func(d.ppf(0.65000), 485.472) 709 | assert_func(d.ppf(0.80000), 550.943) 710 | 711 | 712 | if __name__ == '__main__': 713 | Uniform.test() 714 | -------------------------------------------------------------------------------- /sfeprapy/func/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fsepy/SFEPRAPY/02c2864fc4788cb42588c7d455c135389febb578/sfeprapy/func/__init__.py -------------------------------------------------------------------------------- /sfeprapy/func/csv.py: -------------------------------------------------------------------------------- 1 | import csv 2 | from typing import List 3 | 4 | 5 | def csv_to_list_of_dicts(fp: str) -> List[dict]: 6 | """This is used to read csv file containing measurement data exported from Bluebeam. 7 | 8 | :param fp: 9 | :return: 10 | """ 11 | with open(fp, newline='', encoding='utf-8-sig') as csvfile: 12 | # Read the CSV file 13 | reader = csv.reader(csvfile) 14 | 15 | # Get the headers (first row) 16 | headers = next(reader) 17 | 18 | # Initialize the result list 19 | result = [] 20 | 21 | # Iterate through the CSV file and create the list of dictionaries 22 | for row in reader: 23 | row_dict = {header: value for header, value in zip(headers, row)} 24 | result.append(row_dict) 25 | 26 | return result 27 | -------------------------------------------------------------------------------- /sfeprapy/func/erf.py: -------------------------------------------------------------------------------- 1 | # credit: https://github.com/dougthor42/PyErf 2 | # modified by Ian 2023, implemented numpy vectorisation 3 | 4 | import math 5 | from math import inf 6 | 7 | import numpy as np 8 | 9 | ROOT_2PI = math.sqrt(2 * np.pi) 10 | EXP_NEG2 = math.exp(-2) 11 | MAXVAL = 1e50 #: Inputs above this value are considered infinity. 12 | 13 | 14 | @np.vectorize 15 | def erf(x): 16 | """ 17 | Port of cephes ``ndtr.c`` ``erf`` function. 18 | 19 | See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c 20 | """ 21 | T = [ 22 | 9.60497373987051638749E0, 23 | 9.00260197203842689217E1, 24 | 2.23200534594684319226E3, 25 | 7.00332514112805075473E3, 26 | 5.55923013010394962768E4, 27 | ] 28 | 29 | U = [ 30 | 3.35617141647503099647E1, 31 | 5.21357949780152679795E2, 32 | 4.59432382970980127987E3, 33 | 2.26290000613890934246E4, 34 | 4.92673942608635921086E4, 35 | ] 36 | 37 | # Shorcut special cases 38 | if x == 0: 39 | return 0 40 | if x >= MAXVAL: 41 | return 1 42 | if x <= -MAXVAL: 43 | return -1 44 | 45 | if abs(x) > 1: 46 | return 1 - erfc(x) 47 | 48 | z = x * x 49 | return x * _polevl(z, T, 4) / _p1evl(z, U, 5) 50 | 51 | 52 | @np.vectorize 53 | def erfc(a): 54 | """ 55 | Port of cephes ``ndtr.c`` ``erfc`` function. 56 | 57 | See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtr.c 58 | """ 59 | # approximation for abs(a) < 8 and abs(a) >= 1 60 | P = [ 61 | 2.46196981473530512524E-10, 62 | 5.64189564831068821977E-1, 63 | 7.46321056442269912687E0, 64 | 4.86371970985681366614E1, 65 | 1.96520832956077098242E2, 66 | 5.26445194995477358631E2, 67 | 9.34528527171957607540E2, 68 | 1.02755188689515710272E3, 69 | 5.57535335369399327526E2, 70 | ] 71 | 72 | Q = [ 73 | 1.32281951154744992508E1, 74 | 8.67072140885989742329E1, 75 | 3.54937778887819891062E2, 76 | 9.75708501743205489753E2, 77 | 1.82390916687909736289E3, 78 | 2.24633760818710981792E3, 79 | 1.65666309194161350182E3, 80 | 5.57535340817727675546E2, 81 | ] 82 | 83 | # approximation for abs(a) >= 8 84 | R = [ 85 | 5.64189583547755073984E-1, 86 | 1.27536670759978104416E0, 87 | 5.01905042251180477414E0, 88 | 6.16021097993053585195E0, 89 | 7.40974269950448939160E0, 90 | 2.97886665372100240670E0, 91 | ] 92 | 93 | S = [ 94 | 2.26052863220117276590E0, 95 | 9.39603524938001434673E0, 96 | 1.20489539808096656605E1, 97 | 1.70814450747565897222E1, 98 | 9.60896809063285878198E0, 99 | 3.36907645100081516050E0, 100 | ] 101 | 102 | # Shortcut special cases 103 | if a == 0: 104 | return 1 105 | if a >= MAXVAL: 106 | return 0 107 | if a <= -MAXVAL: 108 | return 2 109 | 110 | x = a 111 | if a < 0: 112 | x = -a 113 | 114 | # computationally cheaper to calculate erf for small values, I guess. 115 | if x < 1: 116 | return 1 - erf(a) 117 | 118 | z = -a * a 119 | 120 | z = math.exp(z) 121 | 122 | if x < 8: 123 | p = _polevl(x, P, 8) 124 | q = _p1evl(x, Q, 8) 125 | else: 126 | p = _polevl(x, R, 5) 127 | q = _p1evl(x, S, 6) 128 | 129 | y = (z * p) / q 130 | 131 | if a < 0: 132 | y = 2 - y 133 | 134 | return y 135 | 136 | 137 | def _polevl(x, coefs, N): 138 | """ 139 | Port of cephes ``polevl.c``: evaluate polynomial 140 | 141 | See https://github.com/jeremybarnes/cephes/blob/master/cprob/polevl.c 142 | """ 143 | ans = 0 144 | power = len(coefs) - 1 145 | for coef in coefs: 146 | try: 147 | ans += coef * x ** power 148 | except OverflowError: 149 | pass 150 | power -= 1 151 | return ans 152 | 153 | 154 | def _p1evl(x, coefs, N): 155 | """ 156 | Port of cephes ``polevl.c``: evaluate polynomial, assuming coef[N] = 1 157 | 158 | See https://github.com/jeremybarnes/cephes/blob/master/cprob/polevl.c 159 | """ 160 | return _polevl(x, [1] + coefs, N) 161 | 162 | 163 | def _ndtri(y): 164 | """ 165 | Port of cephes ``ndtri.c``: inverse normal distribution function. 166 | 167 | See https://github.com/jeremybarnes/cephes/blob/master/cprob/ndtri.c 168 | """ 169 | # approximation for 0 <= abs(z - 0.5) <= 3/8 170 | P0 = [ 171 | -5.99633501014107895267E1, 172 | 9.80010754185999661536E1, 173 | -5.66762857469070293439E1, 174 | 1.39312609387279679503E1, 175 | -1.23916583867381258016E0, 176 | ] 177 | 178 | Q0 = [ 179 | 1.95448858338141759834E0, 180 | 4.67627912898881538453E0, 181 | 8.63602421390890590575E1, 182 | -2.25462687854119370527E2, 183 | 2.00260212380060660359E2, 184 | -8.20372256168333339912E1, 185 | 1.59056225126211695515E1, 186 | -1.18331621121330003142E0, 187 | ] 188 | 189 | # Approximation for interval z = sqrt(-2 log y ) between 2 and 8 190 | # i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14. 191 | P1 = [ 192 | 4.05544892305962419923E0, 193 | 3.15251094599893866154E1, 194 | 5.71628192246421288162E1, 195 | 4.40805073893200834700E1, 196 | 1.46849561928858024014E1, 197 | 2.18663306850790267539E0, 198 | -1.40256079171354495875E-1, 199 | -3.50424626827848203418E-2, 200 | -8.57456785154685413611E-4, 201 | ] 202 | 203 | Q1 = [ 204 | 1.57799883256466749731E1, 205 | 4.53907635128879210584E1, 206 | 4.13172038254672030440E1, 207 | 1.50425385692907503408E1, 208 | 2.50464946208309415979E0, 209 | -1.42182922854787788574E-1, 210 | -3.80806407691578277194E-2, 211 | -9.33259480895457427372E-4, 212 | ] 213 | 214 | # Approximation for interval z = sqrt(-2 log y ) between 8 and 64 215 | # i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. 216 | P2 = [ 217 | 3.23774891776946035970E0, 218 | 6.91522889068984211695E0, 219 | 3.93881025292474443415E0, 220 | 1.33303460815807542389E0, 221 | 2.01485389549179081538E-1, 222 | 1.23716634817820021358E-2, 223 | 3.01581553508235416007E-4, 224 | 2.65806974686737550832E-6, 225 | 6.23974539184983293730E-9, 226 | ] 227 | 228 | Q2 = [ 229 | 6.02427039364742014255E0, 230 | 3.67983563856160859403E0, 231 | 1.37702099489081330271E0, 232 | 2.16236993594496635890E-1, 233 | 1.34204006088543189037E-2, 234 | 3.28014464682127739104E-4, 235 | 2.89247864745380683936E-6, 236 | 6.79019408009981274425E-9, 237 | ] 238 | 239 | sign_flag = 1 240 | 241 | if y > (1 - EXP_NEG2): 242 | y = 1 - y 243 | sign_flag = 0 244 | 245 | # Shortcut case where we don't need high precision 246 | # between -0.135 and 0.135 247 | if y > EXP_NEG2: 248 | y -= 0.5 249 | y2 = y ** 2 250 | x = y + y * (y2 * _polevl(y2, P0, 4) / _p1evl(y2, Q0, 8)) 251 | x = x * ROOT_2PI 252 | return x 253 | 254 | x = math.sqrt(-2.0 * math.log(y)) 255 | x0 = x - math.log(x) / x 256 | 257 | z = 1.0 / x 258 | if x < 8.0: # y > exp(-32) = 1.2664165549e-14 259 | x1 = z * _polevl(z, P1, 8) / _p1evl(z, Q1, 8) 260 | else: 261 | x1 = z * _polevl(z, P2, 8) / _p1evl(z, Q2, 8) 262 | 263 | x = x0 - x1 264 | if sign_flag != 0: 265 | x = -x 266 | 267 | return x 268 | 269 | 270 | @np.vectorize 271 | def erfinv(z): 272 | """ 273 | Calculate the inverse error function at point ``z``. 274 | 275 | This is a direct port of the SciPy ``erfinv`` function, originally 276 | written in C. 277 | 278 | Parameters 279 | ---------- 280 | z : numeric 281 | 282 | Returns 283 | ------- 284 | float 285 | 286 | References 287 | ---------- 288 | + https://en.wikipedia.org/wiki/Error_function#Inverse_functions 289 | + http://functions.wolfram.com/GammaBetaErf/InverseErf/ 290 | 291 | Examples 292 | -------- 293 | >>> round(erfinv(0.1), 12) 294 | 0.088855990494 295 | >>> round(erfinv(0.5), 12) 296 | 0.476936276204 297 | >>> round(erfinv(-0.5), 12) 298 | -0.476936276204 299 | >>> round(erfinv(0.95), 12) 300 | 1.38590382435 301 | >>> round(erf(erfinv(0.3)), 3) 302 | 0.3 303 | >>> round(erfinv(erf(0.5)), 3) 304 | 0.5 305 | >>> erfinv(0) 306 | 0 307 | >>> erfinv(1) 308 | inf 309 | >>> erfinv(-1) 310 | -inf 311 | """ 312 | if abs(z) > 1: 313 | raise ValueError("`z` must be between -1 and 1 inclusive") 314 | 315 | # Shortcut special cases 316 | if z == 0: 317 | return 0 318 | if z == 1: 319 | return inf 320 | if z == -1: 321 | return -inf 322 | 323 | # otherwise calculate things. 324 | return _ndtri((z + 1) / 2.0) / math.sqrt(2) 325 | -------------------------------------------------------------------------------- /sfeprapy/func/xlsx.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | import openpyxl 4 | 5 | 6 | def xlsx_to_dict(fp: str) -> Dict[str, dict]: 7 | # Load the workbook 8 | wb = openpyxl.load_workbook(fp) 9 | 10 | # Select the first worksheet 11 | sheet = wb.active 12 | 13 | # Get the headers (first row) and the row headers (first column) 14 | headers = [cell.value for cell in sheet[1]] 15 | headers = headers[1:] 16 | row_headers = [cell.value for cell in sheet['A'][1:]] 17 | 18 | # Iterate through the sheet and create the nested dictionary 19 | result = {} 20 | for header, col_idx in zip(headers, range(2, len(headers) + 2)): 21 | col_values = {} 22 | for row_header, row_idx in zip(row_headers, range(2, len(row_headers) + 2)): 23 | col_values[row_header] = sheet.cell(row=row_idx, column=col_idx).value 24 | result[header] = col_values 25 | 26 | return result 27 | 28 | 29 | def dict_to_xlsx(data: Dict[str, dict], fp: str): 30 | """ 31 | 32 | :param data: 33 | :param fp: 34 | :return: 35 | 36 | data = { 37 | "Column1": {"Row1": "A1", "Row2": "A2", "Row3": "A3"}, 38 | "Column2": {"Row1": "B1", "Row2": "B2", "Row3": "B3"}, 39 | "Column3": {"Row1": "C1", "Row2": "C2", "Row3": "C3"}, 40 | } 41 | """ 42 | # check all nested dict 43 | row_headers = None 44 | for k, v in data.items(): 45 | if row_headers: 46 | if row_headers != v.keys(): 47 | missing_keys = list() 48 | missing_keys.extend([_ for _ in row_headers if _ not in v.keys()]) 49 | missing_keys.extend([_ for _ in v.keys() if _ not in row_headers]) 50 | raise IndexError(f'keys do not match between dicts in the list: {missing_keys}') 51 | row_headers = v.keys() 52 | row_headers = tuple(row_headers) 53 | 54 | # Create a new workbook and add a worksheet 55 | wb = openpyxl.Workbook() 56 | ws = wb.active 57 | 58 | # Write the column headers 59 | for col, column_name in enumerate(data.keys(), start=1): 60 | ws.cell(row=1, column=col + 1, value=column_name) 61 | 62 | # Write the row headers 63 | for row, row_name in enumerate(row_headers): 64 | ws.cell(row=row + 2, column=1, value=row_name) 65 | 66 | # Write the data to the worksheet 67 | for col, (column_name, data_) in enumerate(data.items(), start=1): 68 | for row, (row_name, value) in enumerate(data_.items(), start=2): 69 | ws.cell(row=row_headers.index(row_name) + 2, column=col + 1, value=value) 70 | 71 | # Save the workbook to an XLSX file 72 | wb.save(fp) 73 | -------------------------------------------------------------------------------- /sfeprapy/input_parser.py: -------------------------------------------------------------------------------- 1 | # Monte Carlo Simulation Multi-Process Implementation 2 | # Yan Fu, October 2017 3 | 4 | from io import StringIO 5 | from typing import Union, Any, Type 6 | 7 | import numpy as np 8 | 9 | import sfeprapy.dists as dists 10 | from sfeprapy.func.xlsx import dict_to_xlsx 11 | 12 | 13 | class TrueToScipy: 14 | """Converts 'normal' distribution parameters, e.g. normal, standard deviation etc., to Scipy recognisable 15 | parameters, e.g. loc, scale etc. 16 | """ 17 | 18 | @staticmethod 19 | def gumbel_r_(mean: float, sd: float, **_): 20 | # parameters Gumbel W&S 21 | alpha = 1.282 / sd 22 | u = mean - 0.5772 / alpha 23 | 24 | # parameters Gumbel scipy 25 | scale = 1 / alpha 26 | loc = u 27 | 28 | return dict(loc=loc, scale=scale) 29 | 30 | @staticmethod 31 | def lognorm_(mean: float, sd: float, **_): 32 | cov = sd / mean 33 | 34 | sigma_ln = np.sqrt(np.log(1 + cov ** 2)) 35 | miu_ln = np.log(mean) - 1 / 2 * sigma_ln ** 2 36 | 37 | s = sigma_ln 38 | loc = 0 39 | scale = np.exp(miu_ln) 40 | 41 | return dict(s=s, loc=loc, scale=scale) 42 | 43 | @staticmethod 44 | def lognorm_mod_(mean: float, sd: float, **_): 45 | return TrueToScipy.lognorm_(mean, sd, **_) 46 | 47 | @staticmethod 48 | def norm_(mean: float, sd: float, **_): 49 | loc = mean 50 | scale = sd 51 | 52 | return dict(loc=loc, scale=scale) 53 | 54 | @staticmethod 55 | def uniform_(ubound: float, lbound: float, **_): 56 | if lbound > ubound: 57 | lbound += ubound 58 | ubound = lbound - ubound 59 | lbound -= ubound 60 | 61 | loc = lbound 62 | scale = ubound - lbound 63 | 64 | return dict(loc=loc, scale=scale) 65 | 66 | 67 | class InputParser: 68 | """Converts """ 69 | 70 | def __init__(self, dist_params: dict, n: int): 71 | assert isinstance(dist_params, dict) 72 | assert isinstance(n, int) 73 | 74 | self.__n = n 75 | self.__in_raw = dist_params 76 | self.__in = InputParser.unflatten_dict(dist_params) 77 | 78 | def to_dict(self): 79 | n = self.__n 80 | dist_params = self.__in 81 | dict_out = dict() 82 | 83 | for k, v in dist_params.items(): 84 | if isinstance(v, float) or isinstance(v, int) or isinstance(v, float): 85 | dict_out[k] = np.full((n,), v, dtype=float) 86 | elif isinstance(v, str): 87 | dict_out[k] = np.full( 88 | (n,), v, dtype=np.dtype("U{:d}".format(len(v))) 89 | ) 90 | elif isinstance(v, np.ndarray) or isinstance(v, list): 91 | dict_out[k] = list(np.full((n, len(v)), v, dtype=float)) 92 | elif isinstance(v, dict): 93 | if "dist" in v: 94 | try: 95 | dict_out[k] = InputParser._sampling(v, n) 96 | except KeyError: 97 | raise KeyError(f"Missing parameters in input variable {k}.") 98 | elif "ramp" in v: 99 | s_ = StringIO(v["ramp"]) 100 | d_ = np.loadtxt(s_, delimiter=',') 101 | t_ = d_[:, 0] 102 | v_ = d_[:, 1] 103 | if all(v_ == v_[0]): 104 | f_interp = v_[0] 105 | else: 106 | def f_interp(x): 107 | return np.interp(x, t_, v_) 108 | dict_out[k] = np.full((n,), f_interp) 109 | else: 110 | raise ValueError(f"Unknown input data type for {k}. {v}.") 111 | elif v is None: 112 | dict_out[k] = np.full((n,), np.nan, dtype=float) 113 | else: 114 | raise TypeError(f"Unknown input data type for {k}.") 115 | 116 | dict_out["index"] = np.arange(0, n, 1) 117 | return dict_out 118 | 119 | def to_xlsx(self, fp: str): 120 | dict_to_xlsx({i: InputParser.flatten_dict(v) for i, v in self.to_dict().items()}, fp) 121 | 122 | @staticmethod 123 | def unflatten_dict(dict_in: dict) -> dict: 124 | """Invert flatten_dict. 125 | 126 | :param dict_in: 127 | :return dict_out: 128 | """ 129 | dict_out = dict() 130 | 131 | for k, v in dict_in.items(): 132 | InputParser.__unflatten_dict(k, v, dict_out) 133 | 134 | return dict_out 135 | 136 | @staticmethod 137 | def __unflatten_dict(k: str, v: Any, dict_out: dict): 138 | if ":" in k: 139 | k1, *k2 = k.split(':') 140 | if k1 not in dict_out: 141 | dict_out[k1] = dict() 142 | InputParser.__unflatten_dict(':'.join(k2), v, dict_out[k1]) 143 | else: 144 | dict_out[k] = v 145 | 146 | @staticmethod 147 | def flatten_dict(dict_in: dict) -> dict: 148 | dict_out = dict() 149 | InputParser.__flatten_dict(dict_in, dict_out) 150 | return dict_out 151 | 152 | @staticmethod 153 | def __flatten_dict(dict_in: dict, dict_out: dict, history: str = None): 154 | """Converts two levels dict to single level dict. Example input and output see _test_dict_flatten. 155 | >>> dict_in = { 156 | >>> 'a': 1, 157 | >>> 'b': {'b1': 21, 'b2': 22}, 158 | >>> 'c': {'c1': 31, 'c2': 32, 'c3': 33} 159 | >>> } 160 | >>> output = { 161 | >>> 'a': 1, 162 | >>> 'b:b1': 21, 163 | >>> 'b:b2': 22, 164 | >>> 'c:c1': 31, 165 | >>> 'c:c2': 32, 166 | >>> 'c:c3': 33, 167 | >>> } 168 | >>> assert InputParser.flatten_dict(dict_in) == output # True 169 | 170 | :param dict_in: Any two levels (or less) dict. 171 | :return dict_out: Single level dict. 172 | """ 173 | for k, v in dict_in.items(): 174 | if isinstance(v, dict): 175 | InputParser.__flatten_dict(v, dict_out=dict_out, history=k if history is None else f'{history}:{k}') 176 | else: 177 | dict_out[f'{k}' if history is None else f'{history}:{k}'] = v 178 | 179 | @staticmethod 180 | def _sampling(dist_params: dict, num_samples: int, randomise: bool = True) -> Union[float, np.ndarray]: 181 | """A reimplementation of _sampling_scipy but without scipy""" 182 | dist_name = ''.join(dist_params.pop('dist').replace('_', ' ').strip().title().split()) 183 | if dist_name == 'Norm': 184 | dist_name = 'Normal' 185 | elif dist_name == 'GumbelR': 186 | dist_name = 'Gumbel' 187 | elif dist_name == 'Uniform': 188 | if ( 189 | 'lbound' in dist_params and 'ubound' in dist_params and 190 | 'mean' not in dist_params and 'sd' not in dist_params 191 | ): 192 | a = dist_params.pop('lbound') 193 | b = dist_params.pop('ubound') 194 | mean = (a + b) / 2 195 | sd = (b - a) / (2 * np.sqrt(3)) 196 | dist_params['mean'] = mean 197 | dist_params['sd'] = sd 198 | elif dist_name == 'LognormMod': 199 | dist_name = 'LognormalMod' 200 | elif dist_name == 'Lognorm': 201 | dist_name = 'Lognormal' 202 | elif dist_name == 'Constant': 203 | if 'ubound' in dist_params and 'lbound' in dist_params: 204 | dist_params['value'] = (dist_params.pop('lbound') + dist_params.pop('ubound')) / 2. 205 | 206 | dist_cls: Type[dists.DistFunc] = getattr(dists, dist_name) 207 | dist_obj: dists.DistFunc = dist_cls(**dist_params) 208 | lim_1 = None if 'lbound' not in dist_params else dist_params['lbound'] 209 | lim_2 = None if 'ubound' not in dist_params else dist_params['ubound'] 210 | return dist_obj.sampling(n=num_samples, lim_1=lim_1, lim_2=lim_2, shuffle=randomise) 211 | 212 | @staticmethod 213 | def _sampling_scipy(dist_params: dict, num_samples: int, randomise: bool = True) -> Union[float, np.ndarray]: 214 | """Evacuate sampled values based on a defined distribution. This is build upon `scipy.stats` library. 215 | 216 | :param dist_params: Distribution inputs, required keys are distribution dependent, should be aligned with inputs 217 | required in the scipy.stats. Additional compulsory keys are: 218 | `dist`: str, distribution type. 219 | :param num_samples: Number of samples to be generated. 220 | :param randomise: Whether to randomise the sampled values. 221 | :return samples: Sampled values based upon `dist` in the range [`lbound`, `ubound`] with `num_samples` number 222 | of values. 223 | """ 224 | import scipy.stats as stats 225 | 226 | if dist_params['dist'] == 'discrete_': 227 | v_ = dist_params['values'] 228 | if isinstance(v_, str): 229 | assert ',' in v_, f'`discrete_ distribution `values` parameter is not a list separated by comma.' 230 | v_ = [float(i.strip()) for i in v_.split(',')] 231 | 232 | w_ = dist_params['weights'] 233 | if isinstance(w_, str): 234 | assert ',' in w_, f'`discrete_`:`weights` is not a list of numbers separated by comma.' 235 | w_ = [float(i.strip()) for i in w_.split(',')] 236 | 237 | assert len(v_) == len(w_), f'Length of values ({len(v_)}) and weights ({len(v_)}) do not match.' 238 | assert sum(w_) == 1., f'Sum of all weights should be unity, got {sum(w_)}.' 239 | 240 | w_ = [int(round(i * num_samples)) for i in w_] 241 | if (sum_sampled := sum(w_)) < num_samples: 242 | for i in np.random.choice(np.arange(len(w_)), size=sum_sampled - num_samples): 243 | w_[i] += 1 244 | elif sum_sampled > num_samples: 245 | for i in np.random.choice(np.arange(len(w_)), size=sum_sampled - num_samples): 246 | w_[i] -= 1 247 | w_ = np.cumsum(w_) 248 | assert w_[-1] == num_samples, f'Total weight length does not match `num_samples`.' 249 | samples = np.empty((num_samples,), dtype=float) 250 | for i, v__ in enumerate((v_)): 251 | if i == 0: 252 | samples[0:w_[i]] = v__ 253 | else: 254 | samples[w_[i - 1]:w_[i]] = v__ 255 | 256 | if randomise: 257 | np.random.shuffle(samples) 258 | 259 | return samples 260 | 261 | if dist_params['dist'] == 'constant_': 262 | return np.full((num_samples,), (dist_params['lbound'] + dist_params['ubound']) / 2, dtype=float) 263 | 264 | # sample CDF points (y-axis value) 265 | def generate_cfd_q(dist, dist_params_scipy, lbound, ubound, num_samples_=None): 266 | num_samples_ = num_samples if num_samples_ is None else num_samples_ 267 | cfd_q_ = np.linspace( 268 | getattr(stats, dist).cdf(x=lbound, **dist_params_scipy), 269 | getattr(stats, dist).cdf(x=ubound, **dist_params_scipy), 270 | num_samples_, 271 | ) 272 | samples_ = getattr(stats, dist).ppf(q=cfd_q_, **dist_params_scipy) 273 | return samples_ 274 | 275 | # convert true distribution parameters to scipy distribution parameters 276 | try: 277 | if dist_params['dist'] == 'lognorm_mod_': 278 | dist_params_scipy = getattr(TrueToScipy, 'lognorm_')( 279 | **dist_params 280 | ) 281 | samples = generate_cfd_q( 282 | dist='lognorm', dist_params_scipy=dist_params_scipy, lbound=dist_params['lbound'], 283 | ubound=dist_params['ubound'] 284 | ) 285 | samples = 1 - samples 286 | elif dist_params['dist'] == 'br187_fuel_load_density_': 287 | dist_params_list = list() 288 | dist_params_list.append( 289 | dict(dist='gumbel_r_', lbound=dist_params['lbound'], ubound=dist_params['ubound'], mean=780, 290 | sd=234)) 291 | dist_params_list.append( 292 | dict(dist='gumbel_r_', lbound=dist_params['lbound'], ubound=dist_params['ubound'], mean=420, 293 | sd=126)) 294 | samples_ = list() 295 | for dist_params in dist_params_list: 296 | dist_params_scipy = getattr(TrueToScipy, dist_params['dist'])(**dist_params) 297 | samples__ = generate_cfd_q( 298 | dist=dist_params['dist'].rstrip('_'), dist_params_scipy=dist_params_scipy, 299 | lbound=dist_params['lbound'], ubound=dist_params['ubound'] 300 | ) 301 | samples_.append(samples__) 302 | samples = np.random.choice(np.append(*samples_), num_samples, replace=False) 303 | elif dist_params['dist'] == 'br187_hrr_density_': 304 | dist_params_list = list() 305 | dist_params_list.append(dict(dist='uniform_', lbound=0.32, ubound=0.57)) 306 | dist_params_list.append(dict(dist='uniform_', lbound=0.15, ubound=0.65)) 307 | samples_ = list() 308 | for dist_params in dist_params_list: 309 | dist_params_scipy = getattr(TrueToScipy, dist_params['dist'])(**dist_params) 310 | samples__ = generate_cfd_q( 311 | dist=dist_params['dist'].rstrip('_'), dist_params_scipy=dist_params_scipy, 312 | lbound=dist_params['lbound'], ubound=dist_params['ubound'] 313 | ) 314 | samples_.append(samples__) 315 | samples = np.random.choice(np.append(*samples_), num_samples, replace=False) 316 | else: 317 | dist_params_scipy = getattr(TrueToScipy, dist_params['dist'])(**dist_params) 318 | samples = generate_cfd_q( 319 | dist=dist_params['dist'].rstrip('_'), dist_params_scipy=dist_params_scipy, 320 | lbound=dist_params['lbound'], ubound=dist_params['ubound'] 321 | ) 322 | 323 | except Exception as e: 324 | try: 325 | samples = generate_cfd_q( 326 | dist=dist_params['dist'], dist_params_scipy=dist_params, lbound=dist_params['lbound'], 327 | ubound=dist_params['ubound'] 328 | ) 329 | except AttributeError: 330 | raise ValueError(f"Unknown distribution type {dist_params['dist']}, {e}") 331 | 332 | samples[samples == np.inf] = dist_params['ubound'] 333 | samples[samples == -np.inf] = dist_params['lbound'] 334 | 335 | if "permanent" in dist_params: 336 | samples += dist_params["permanent"] 337 | 338 | if randomise: 339 | np.random.shuffle(samples) 340 | 341 | return samples 342 | -------------------------------------------------------------------------------- /sfeprapy/mcs/__init__.py: -------------------------------------------------------------------------------- 1 | # Monte Carlo Simulation Multi-Process Implementation 2 | # Yan Fu, October 2017 3 | 4 | import multiprocessing as mp 5 | import random 6 | import shutil 7 | import time 8 | import zipfile 9 | from abc import ABC, abstractmethod 10 | from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed 11 | from inspect import getfullargspec 12 | from io import BytesIO, TextIOWrapper 13 | from os import path, remove, makedirs 14 | from typing import Callable, Optional, Dict 15 | 16 | import numpy as np 17 | 18 | from sfeprapy.input_parser import InputParser 19 | 20 | 21 | class MCSSingle(ABC): 22 | def __init__(self, name: str, n_simulations: int, sim_kwargs: dict, save_dir: Optional[str] = None): 23 | assert n_simulations > 0 24 | 25 | self.name: str = name # case name 26 | self.n_sim: int = n_simulations # number of simulation 27 | self.input = InputParser(sim_kwargs, n_simulations) 28 | self.save_dir: Optional[str] = save_dir 29 | 30 | # {'var1': [0, ...], 'var2': [2, 3, ...], ...} 31 | self.__output: Optional[np.ndarray] = None # {'res1': [...], 'res2': [...], ...} 32 | 33 | @property 34 | def input_keys(self) -> tuple[tuple, tuple]: 35 | """Placeholder method. The input value names from the Monte Carlo Simulation deterministic calculation 36 | routine. 37 | :return: (args, n_args) where `args` is a list of argument names and `n_args` is the number of compulsory 38 | arguments 39 | """ 40 | _ = getfullargspec(self.worker) 41 | return tuple(_.args), _.defaults 42 | 43 | def run(self, p: mp.Pool = None, set_progress: Optional[Callable] = None, progress_0: int = 0, save: bool = False, 44 | save_archive: bool = False): 45 | # ====================== 46 | # prepare input iterable 47 | # ====================== 48 | kwargs_from_input = self.input.to_dict() 49 | keys_from_worker, defaults_from_worker = self.input_keys 50 | n_keys_from_worker = len(keys_from_worker) - (0 if defaults_from_worker is None else len(defaults_from_worker)) 51 | keys_from_worker_required = keys_from_worker[:n_keys_from_worker] 52 | keys_from_worker_optional = keys_from_worker[n_keys_from_worker:] 53 | 54 | # check if all the required arguments are provided 55 | missing_args = list() 56 | for k in keys_from_worker_required: 57 | if k not in keys_from_worker: 58 | missing_args.append(k) 59 | if len(missing_args) > 0: 60 | raise ValueError(f'Missing arguments: {missing_args}.') 61 | 62 | nested_args = list(kwargs_from_input[k] for k in keys_from_worker_required) 63 | 64 | for i, k in enumerate(keys_from_worker_optional): 65 | if k in kwargs_from_input: 66 | nested_args.append(kwargs_from_input[k]) 67 | else: 68 | nested_args.append((defaults_from_worker[i] for __ in range(self.n_sim))) 69 | 70 | # =============== 71 | # start processes 72 | # =============== 73 | output = list() 74 | if p is None: 75 | if set_progress is not None: 76 | for i, arg in enumerate(zip(*nested_args)): 77 | print(i, arg) 78 | output.append(self.worker(*arg)) 79 | set_progress(progress_0 + i) 80 | else: 81 | for arg in zip(*nested_args): 82 | output.append(self.worker(*arg)) 83 | else: 84 | futures = {p.submit(self.worker, *arg) for arg in zip(*nested_args)} 85 | if set_progress is not None: 86 | for i, future in enumerate(as_completed(futures), start=1): 87 | output.append(future.result()) 88 | set_progress(progress_0 + i) 89 | else: 90 | for i, future in enumerate(as_completed(futures), start=1): 91 | output.append(future.result()) 92 | 93 | self.__output = np.array(output) 94 | 95 | if save: 96 | self.save_csv(dir_save=None, archive=save_archive) 97 | 98 | return self.__output 99 | 100 | @property 101 | @abstractmethod 102 | def worker(self) -> Callable: 103 | """Placeholder method. The Monte Carlo Simulation deterministic calculation routine. 104 | :return: 105 | """ 106 | raise NotImplementedError('This method should be overridden by a child class') 107 | 108 | @property 109 | @abstractmethod 110 | def output_keys(self) -> tuple: 111 | """Placeholder method. The returned value names from the Monte Carlo Simulation deterministic calculation 112 | routine. 113 | :return: 114 | """ 115 | raise NotImplementedError('This method should be overridden by a child class') 116 | 117 | @property 118 | def output(self): 119 | return self.__output 120 | 121 | @output.setter 122 | def output(self, d: np.ndarray): 123 | self.__output = d 124 | 125 | @staticmethod 126 | def make_pdf(data: np.ndarray, bin_width: float = 0.2) -> (np.ndarray, np.ndarray, np.ndarray): 127 | # Set all time equivalence to be no more than 5 hours (18000 seconds) 128 | data[data >= 18000.] = 17999.999 129 | 130 | # Set all zero time equivalence to 1e-3 (cannot recall why I did this) 131 | data[data <= 0] = 1e-3 132 | 133 | # [s] -> [min] 134 | data /= 60. 135 | 136 | assert np.nanmax(data) < 300. 137 | assert np.nanmin(data) > 0. 138 | 139 | edges = np.arange(0, 300 + bin_width, bin_width) 140 | x = (edges[1:] + edges[:-1]) / 2 # make x-axis values, i.e. time equivalence 141 | 142 | y_pdf = np.histogram(data, edges)[0] / len(data) 143 | 144 | return x, y_pdf 145 | 146 | @staticmethod 147 | def make_cdf(data: np.ndarray, bin_width: float = 0.2): 148 | x, y_pdf = MCSSingle.make_pdf(data=data, bin_width=bin_width) 149 | return x, np.cumsum(y_pdf) 150 | 151 | def save_csv(self, dir_save: Optional[str] = None, archive: bool = True): 152 | """Saves simulation output as a csv file, either in a folder (if `dir_name` is a folder) or in a zip file (if 153 | `dir_name` is a zip file path). `dir_name` should be cleaned properly before passing into this method.""" 154 | if dir_save is None: 155 | dir_save = self.save_dir 156 | assert dir_save 157 | assert path.exists(dir_save), f'Directory does not exist {dir_save}' 158 | assert self.__output is not None 159 | 160 | # create byte object representing the save data/results 161 | if isinstance(self.__output, np.ndarray): 162 | content = BytesIO() 163 | np.savetxt(content, self.__output, delimiter=",", header=','.join(self.output_keys), fmt='%g', comments='') 164 | content.seek(0) 165 | elif self.__output is None: 166 | raise ValueError('No results to save') 167 | else: 168 | raise ValueError(f'Unknown results data type {type(self.__output)}') 169 | 170 | # save result to file 171 | if archive: 172 | # in a zip file 173 | for i in range(40): 174 | try: 175 | with zipfile.ZipFile(dir_save, 'a', compression=zipfile.ZIP_DEFLATED) as f_zip: 176 | f_zip.writestr(f'{self.name}.csv', content.read(), compress_type=zipfile.ZIP_DEFLATED) 177 | return 178 | except Exception: 179 | time.sleep(random.randint(1, 5)) 180 | else: 181 | # in a folder 182 | with open(path.join(dir_save, f'{self.name}.csv'), 'wb+') as f: 183 | shutil.copyfileobj(content, f) 184 | return 185 | 186 | def load_output_from_file(self, fp: str): 187 | fp = path.realpath(fp) 188 | 189 | if zipfile.is_zipfile(fp): 190 | with zipfile.ZipFile(fp, 'r') as f_zip: 191 | with f_zip.open(f'{self.name}.csv') as f: 192 | self.__output = np.genfromtxt(TextIOWrapper(f), delimiter=',', skip_header=1, ) 193 | else: 194 | fp = path.join(fp, f'{self.name}.csv') 195 | self.__output = np.genfromtxt(fp, delimiter=',', skip_header=1, ) 196 | 197 | assert (self.n_sim, len(self.output_keys)) == tuple(self.__output.shape) 198 | 199 | 200 | class MCS(ABC): 201 | """An abstract class purposed to provide infrastructure in supporting time equivalence stochastic analysis utilising 202 | Monte Carlo simulation. 203 | 204 | To help to understand this class, a brief process of a MCS is outlined below: 205 | 206 | 1. stochastic problem definition (i.e. raw input parameters from the user). 207 | 2. sample deterministic parameters from the stochastic inputs 208 | 3. iterate the sampled parameters and run deterministic calculation 209 | 4. summarise results 210 | 211 | Following above, this object consists of four primary methods, each matching one of the above step: 212 | 213 | `MCS.mcs_inputs` 214 | A method to intake user inputs, restructure data so usable within MCS. 215 | `MCS.mcs_sampler` 216 | A method to sample deterministic parameters from the stochastic parameters, produces input to be used in 217 | the next step. 218 | `MCS.mcs_deterministic_calc` 219 | A method to carry out deterministic calculation. 220 | NOTE! This method needs to be re-defined in a child class. 221 | `MCS.mcs_post_per_case` 222 | A method to post-processing results. 223 | NOTE! This method needs to be re-defined in a child class. 224 | """ 225 | 226 | # DEFAULT_SAVE_FOLDER_NAME = "{}.pra" # 227 | 228 | def __init__(self): 229 | self.__in_fp: str = '' 230 | self.__in_dict: Optional[dict] = None # input parameters 231 | self.__mp_pool: Optional[np.Pool] = None 232 | self.mcs_cases: Dict[str, MCSSingle] = dict() 233 | 234 | def get_save_dir(self): 235 | return path.join(path.dirname(self.__in_fp), f'{path.splitext(path.basename(self.__in_fp))[0]}.out') 236 | 237 | def get_inputs_dict(self): 238 | return self.__in_dict 239 | 240 | def get_inputs_file_path(self): 241 | return self.__in_fp 242 | 243 | def set_inputs_dict(self, data: dict): 244 | for case_name_, kwargs_ in data.items(): 245 | self.mcs_cases[case_name_] = self.new_mcs_case( 246 | name=case_name_, n_simulations=kwargs_['n_simulations'], sim_kwargs=kwargs_, 247 | save_dir=path.join(path.dirname(self.__in_fp), self.get_save_dir()) 248 | ) 249 | 250 | self.__in_dict = data 251 | 252 | def set_inputs_file_path(self, fp): 253 | fp = path.realpath(fp) 254 | assert path.exists(fp) 255 | 256 | if fp.endswith(".xlsx"): 257 | from openpyxl import load_workbook 258 | # Get the values from the worksheet 259 | rows = load_workbook(fp).active.values 260 | 261 | # Convert the rows to a dictionary 262 | data = {} 263 | keys = next(rows) 264 | for k in keys[1:]: 265 | data[k] = dict(case_name=k) 266 | for row in rows: 267 | for key_, row_ in zip(keys[1:], row[1:]): 268 | data[key_][row[0]] = row_ 269 | self.__in_fp = fp 270 | self.__in_dict = data 271 | 272 | elif fp.endswith(".xls"): 273 | from xlrd import open_workbook 274 | 275 | # Get the first worksheet 276 | worksheet = open_workbook(fp).sheet_by_index(0) 277 | 278 | # Extract the headers from the first row 279 | headers = [worksheet.cell_value(0, col) for col in range(worksheet.ncols)][1:] 280 | 281 | # Convert the rows to a dictionary 282 | data = {} 283 | for col_index, case_name_ in enumerate(headers): 284 | data_ = dict(case_name=case_name_) 285 | for row_index in range(1, worksheet.nrows): 286 | data_[worksheet.cell_value(row_index, 0)] = worksheet.cell_value(row_index, col_index + 1) 287 | data[case_name_] = data_ 288 | 289 | elif fp.endswith(".csv"): 290 | raise NotImplementedError() 291 | else: 292 | raise ValueError(f"Unknown input file format, {path.basename(fp)}") 293 | 294 | if len(set((data.keys()))) != len(data.keys()): 295 | raise ValueError(f'case_name not unique') 296 | 297 | self.__in_fp = fp 298 | 299 | self.set_inputs_dict(data) 300 | 301 | return data 302 | 303 | @property 304 | @abstractmethod 305 | def new_mcs_case(self) -> MCSSingle: 306 | raise NotImplementedError('This method should be implemented by the child class.') 307 | 308 | def run( 309 | self, 310 | n_proc: int = 1, 311 | set_progress: Optional[Callable] = None, 312 | set_progress_max: Optional[Callable] = None, 313 | save: bool = False, 314 | save_archive: bool = False, 315 | cases_to_run: Optional[list] = None, 316 | concurrency_strategy: Optional[int] = 0, 317 | ): 318 | # check if all `cases_to_run` exist in `self.mcs_cases` 319 | if cases_to_run: 320 | undefined_case_name_by_user = list() 321 | for case_name in cases_to_run: 322 | if case_name not in self.mcs_cases: 323 | undefined_case_name_by_user.append(case_name) 324 | if undefined_case_name_by_user: 325 | raise ValueError( 326 | f'The following case are specified to run but they do not exist: {undefined_case_name_by_user}. ' 327 | f'Available case names: {self.mcs_cases.keys()}.' 328 | ) 329 | else: 330 | del undefined_case_name_by_user 331 | 332 | '''Concurrency Strategy 333 | 334 | Procedure description: 335 | 336 | 1. Go through each simulation case and to the following. 337 | 1.1 Task 1: Prepare arguments for simulation iterations. 338 | 1.2 Go through each simulation iteration in the simulation case, for each simulation iteration do the 339 | following. 340 | 1.2.1 Task 2: Perform the computation. 341 | 1.3 Write output to disk. 342 | 343 | Terminology: 344 | 345 | 1. Simulation iteration: This refers to a single run of the simulation. The computation-intensive task (task 346 | 2) happens here. 347 | 2. Simulation case: This refers to a set of simulation iterations. It involves multiple iterations of 348 | task 2 but also includes the preparation/sampling of inputs (task 1) and the output saving operation 349 | (task 3). 350 | 3. The E/W ratio, which reflects the balance of computational workloads across available processing units. 351 | 4. The overhead cost (O), which involves the expenses associated with managing multiple processes. 352 | 5. The blocking cost (B), which reflects the performance impact of I/O bound operations blocking CPU-bound 353 | tasks. 354 | 355 | In the application of this class, many simulation cases are typically present. Each simulation case consists 356 | of multiple simulation iterations. To improve computation performance, multiprocessing is used to leverage 357 | the multi-core nature of modern CPUs. There are two possible strategies to apply multiprocessing, 358 | based on the level of parallelism: 359 | 360 | Strategy 1: Case-level Parallelism 361 | 362 | In this strategy, each simulation case is allocated to a separate process. This strategy is beneficial 363 | when the ratio of executions (simulation iterations) to available workers (E/W) is significant. It can 364 | efficiently utilise CPU resources when execution times within each case are roughly equal. However, 365 | if there's significant variation in execution times within each case, some CPU cores might be 366 | underutilized if they finish their tasks much earlier than others. 367 | 368 | Strategy 2: Iteration-level Parallelism 369 | 370 | In this strategy, each simulation iteration is allocated to a separate process. This can provide a more 371 | balanced workload across processes, especially when there's high variability in the execution times of 372 | iterations. However, there's additional overhead involved in managing more processes, and the main 373 | process could become a bottleneck if it's blocked by I/O operations. 374 | 375 | Things to Consider: 376 | 377 | Workload Distribution and Balance: Workload balance is a critical aspect in multiprocessing. A skewed 378 | workload can lead to idle CPU resources when some processes finish much earlier than others. For better 379 | load balancing, dynamic task scheduling strategies might be required and this is done through the 380 | `ProcessPoolExecutor` but may raise cost in strategy 1. 381 | 382 | Overhead of Multiprocessing: While multiprocessing can speed up computation, it also introduces overhead 383 | when perform `submit()` in `ProcessPoolExecutor`. 384 | It's crucial to ensure that the gain from multiprocessing outweighs the overhead costs. Otherwise, 385 | the overall performance might be worse than a single-process implementation. 386 | 387 | I/O Bound Operations: Task 3 (saving output to disk) is I/O bound and could potentially block CPU-bound 388 | tasks. Consider moving the I/O operations to a separate process, or use asynchronous I/O if possible, 389 | to prevent blocking the CPU-bound tasks. 390 | 391 | Execution Time Variation: The estimation of E/W seems to be based on the assumption that each execution 392 | takes roughly the same amount of time. If the execution times vary significantly, this estimation might 393 | not accurately reflect the workload. 394 | 395 | Strategy Selection: 396 | 397 | To assist in this decision-making process, it can be useful to represent these factors in a cost function, 398 | where the total cost is the sum of the E/W ratio, overhead cost, and blocking cost: 399 | 400 | Cost = E/W + O + B 401 | 402 | The strategy with the lowest computed cost could be selected as the optimal strategy for a given situation. 403 | However, keep in mind that quantifying costs like O and B can be challenging, and they may vary depending on 404 | factors such as hardware characteristics, operating system behavior, and the specific nature of the tasks. 405 | 406 | As a temporary measure, an E/W ratio threshold of 1.5 is currently being used as the sole determinant of the 407 | strategy, with the understanding that this approach may not fully capture the complexities of the costs 408 | involved. Further research and testing are required to refine the cost function and its use in strategy 409 | selection. 410 | 411 | Moreover, it can be beneficial to experiment with different strategies and configurations, as well as to 412 | perform benchmark tests under a variety of conditions, to find the optimal solution for your particular 413 | scenario. Remember that this cost function and threshold might not be optimal for all scenarios and may need 414 | to be adjusted based on empirical data.''' 415 | 416 | if cases_to_run: 417 | n_case = sum([1 if k in cases_to_run else 0 for k, v in self.mcs_cases.items()]) 418 | n_sim = sum([v.n_sim if k in cases_to_run else 0 for k, v in self.mcs_cases.items()]) 419 | else: 420 | n_case = len(self.mcs_cases) 421 | n_sim = sum([v.n_sim for k, v in self.mcs_cases.items()]) 422 | 423 | if concurrency_strategy == 0: 424 | if 4 < n_proc < n_case: 425 | concurrency_strategy = 1 426 | else: 427 | concurrency_strategy = 2 428 | 429 | if save: 430 | self.save_init(archive=save_archive) 431 | 432 | progress_0 = None 433 | if n_proc == 0: 434 | for mcs_case_name, mcs_case in self.mcs_cases.items(): # Reuse the executor for 3 sets of tasks 435 | if cases_to_run and mcs_case_name not in cases_to_run: 436 | continue 437 | progress_0 = 0 if progress_0 is None else progress_0 + mcs_case.n_sim 438 | mcs_case.run() 439 | if save: 440 | mcs_case.save_csv(archive=save_archive) 441 | elif concurrency_strategy == 1: 442 | try: 443 | set_progress_max(n_case) 444 | except TypeError: 445 | pass 446 | 447 | output = [None] * len(self.mcs_cases) # Pre-allocate a list to hold results in order 448 | futures_to_case = {} # Map each future to its corresponding case name 449 | 450 | with ThreadPoolExecutor(max_workers=1) as t_save_output: 451 | with ProcessPoolExecutor(max_workers=min(n_proc, n_case)) as p_executor: 452 | # Submit all tasks and remember their order 453 | mcs_cases_keys = list(self.mcs_cases.keys()) # Get a list of all case names 454 | for i, case_name in enumerate(mcs_cases_keys): 455 | future = p_executor.submit(self.mcs_cases[case_name].run, save=False) 456 | futures_to_case[future] = case_name 457 | 458 | if set_progress is not None: 459 | # Wait for the futures to complete and collect their results in submission order 460 | for future in as_completed(futures_to_case): 461 | case_name = futures_to_case[future] 462 | index = mcs_cases_keys.index(case_name) # Find the index of the case based on its name 463 | result = future.result() 464 | output[index] = result 465 | self.mcs_cases[case_name].output = result # Directly access the case by its name 466 | if save: 467 | t_save_output.submit(self.mcs_cases[case_name].save_csv, None, save_archive) 468 | set_progress(index + 1) 469 | else: 470 | for future in as_completed(futures_to_case): 471 | case_name = futures_to_case[future] 472 | index = mcs_cases_keys.index(case_name) 473 | result = future.result() 474 | output[index] = result 475 | self.mcs_cases[case_name].output = result # Directly access the case by its name 476 | if save: 477 | t_save_output.submit(self.mcs_cases[case_name].save_csv, None, save_archive) 478 | 479 | elif concurrency_strategy == 2: 480 | try: 481 | set_progress_max(n_sim) 482 | except TypeError: 483 | pass 484 | 485 | with ThreadPoolExecutor(max_workers=1) as t_executor: 486 | with ProcessPoolExecutor(max_workers=n_proc) as p_executor: 487 | for mcs_case_name, mcs_case in self.mcs_cases.items(): # Reuse the executor for 3 sets of tasks 488 | if cases_to_run and mcs_case_name not in cases_to_run: 489 | continue 490 | progress_0 = 0 if progress_0 is None else progress_0 + mcs_case.n_sim 491 | mcs_case.run(p_executor, set_progress=set_progress, progress_0=progress_0) 492 | if save: 493 | t_executor.submit(mcs_case.save_csv, None, save_archive) 494 | else: 495 | raise NotImplementedError(f'Unknown `concurrency_strategy` {concurrency_strategy}.') 496 | 497 | def save_init(self, archive: bool): 498 | # clean existing files 499 | try: 500 | remove(self.get_save_dir()) 501 | except: 502 | pass 503 | try: 504 | shutil.rmtree(self.get_save_dir()) 505 | except: 506 | pass 507 | 508 | # create empty folder or zip 509 | if archive: 510 | with open(self.get_save_dir(), 'wb+') as f: 511 | f.write(b'PK\x05\x06\x00l\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') 512 | else: 513 | makedirs(self.get_save_dir()) 514 | 515 | def save_all(self, archive: bool = True): 516 | self.save_init(archive=archive) 517 | 518 | # write results 519 | for k, v in self.mcs_cases.items(): 520 | v.save_csv(archive=archive) 521 | return 522 | 523 | def load_from_file(self, fp_in: str, fp_out: str = None): 524 | self.__in_fp: str = path.realpath(fp_in) 525 | self.set_inputs_file_path(fp_in) # input parameters 526 | for name, mcs_case in self.mcs_cases.items(): 527 | mcs_case.load_output_from_file(fp_out) 528 | -------------------------------------------------------------------------------- /sfeprapy/mcs0/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | 'EXAMPLE_INPUT', 3 | 'decide_fire', 'evaluate_fire_temperature', 'solve_time_equivalence_iso834', 'solve_protection_thickness', 4 | 'teq_main', 5 | 'MCS0', 'MCS0Single', 6 | ) 7 | 8 | import os 9 | from typing import Callable 10 | 11 | import numpy as np 12 | 13 | from .calcs import ( 14 | decide_fire, evaluate_fire_temperature, solve_time_equivalence_iso834, solve_protection_thickness, teq_main, 15 | ) 16 | from .inputs import EXAMPLE_INPUT 17 | from ..mcs import MCSSingle, MCS 18 | 19 | 20 | class MCS0Single(MCSSingle): 21 | OUTPUT_KEYS = ( 22 | 'index', 'beam_position_horizontal', 'fire_combustion_efficiency', 'fire_hrr_density', 'fire_nft_limit', 23 | 'fire_spread_speed', 'window_open_fraction', 'fire_load_density', 'fire_type', 't1', 't2', 't3', 24 | 'solver_steel_temperature_solved', 'solver_time_critical_temp_solved', 'solver_protection_thickness', 25 | 'solver_iter_count', 'solver_time_equivalence_solved', 'timber_charring_rate', 'timber_exposed_duration', 26 | 'timber_solver_iter_count', 'timber_fire_load', 'timber_charred_depth', 'timber_charred_mass', 27 | 'timber_charred_volume', 28 | ) 29 | 30 | def __init__(self, name, n_simulations, sim_kwargs, save_dir): 31 | super().__init__(name=name, n_simulations=n_simulations, sim_kwargs=sim_kwargs, save_dir=save_dir) 32 | 33 | @property 34 | def worker(self) -> Callable: 35 | return teq_main 36 | 37 | def get_pdf(self, bin_width: float = 0.2) -> (np.ndarray, np.ndarray, np.ndarray): 38 | teq: np.ndarray = None 39 | for i in range(len(self.output_keys)): 40 | if self.output_keys[i] == 'solver_time_equivalence_solved': 41 | teq = self.output[:, i] 42 | return MCS0Single.make_pdf(teq, bin_width=bin_width) 43 | 44 | def get_cdf(self, bin_width: float = 0.2): 45 | x, y_pdf = self.get_pdf(bin_width=bin_width) 46 | return x, np.cumsum(y_pdf) 47 | 48 | @property 49 | def output_keys(self) -> tuple: 50 | return MCS0Single.OUTPUT_KEYS 51 | 52 | 53 | class MCS0(MCS): 54 | def __getitem__(self, item) -> MCS0Single: 55 | return self.mcs_cases[item] 56 | 57 | @property 58 | def new_mcs_case(self): 59 | return MCS0Single 60 | 61 | 62 | def cli_main(fp_mcs_in: str, n_threads: int = 1): 63 | fp_mcs_in = os.path.realpath(fp_mcs_in) 64 | 65 | mcs = MCS0() 66 | mcs.set_inputs_file_path(fp_mcs_in) 67 | mcs.run(n_proc=n_threads) 68 | mcs.save_all(True) 69 | -------------------------------------------------------------------------------- /sfeprapy/mcs0/calcs.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | 'decide_fire', 'evaluate_fire_temperature', 'solve_time_equivalence_iso834', 'solve_protection_thickness', 3 | 'teq_main' 4 | ) 5 | 6 | from random import random 7 | from typing import Union, Callable 8 | 9 | import numpy as np 10 | from fsetools.lib.fse_bs_en_1991_1_2_parametric_fire import temperature as _fire_param 11 | from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import ( 12 | protection_thickness_2 as _protection_thickness_2 13 | ) 14 | from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import temperature as _steel_temperature 15 | from fsetools.lib.fse_din_en_1991_1_2_parametric_fire import temperature as _fire_param_ger 16 | from fsetools.lib.fse_travelling_fire import temperature as fire_travelling 17 | 18 | 19 | def decide_fire( 20 | window_height: float, 21 | window_width: float, 22 | window_open_fraction: float, 23 | room_breadth: float, 24 | room_depth: float, 25 | room_height: float, 26 | fire_mode: int, 27 | fire_load_density: float, 28 | fire_combustion_efficiency: float, 29 | fire_hrr_density: float, 30 | fire_spread_speed: float, 31 | ) -> int: 32 | """Calculates equivalent time exposure for a protected steel element member in more realistic fire environment 33 | opposing to the standard fire curve ISO 834. 34 | 35 | PARAMETERS: 36 | :param window_height: [m], weighted window opening height 37 | :param window_width: [m], total window opening width 38 | :param window_open_fraction: [-], a factor is multiplied with the given total window opening area 39 | :param room_breadth: [m], room breadth (shorter direction of the floor plan) 40 | :param room_depth: [m], room depth (longer direction of the floor plan) 41 | :param room_height: [m], room height from floor to soffit (structural), disregard any non fire resisting floors 42 | :param fire_hrr_density: [MW/m], fire maximum release rate per unit area 43 | :param fire_load_density: 44 | :param fire_combustion_efficiency: [-] 45 | :param fire_spread_speed: [m/s], TRAVELLING FIRE, fire spread speed 46 | :param fire_mode: 0 - parametric, 1 - travelling, 2 - ger parametric, 3 - (0 & 1), 4 (1 & 2) 47 | :return: 48 | EXAMPLE: 49 | """ 50 | 51 | # PERMEABLE AND INPUT CHECKS 52 | 53 | fire_load_density_deducted = fire_load_density * fire_combustion_efficiency 54 | 55 | # Total window opening area 56 | window_area = window_height * window_width * window_open_fraction 57 | 58 | # Room floor area 59 | room_floor_area = room_breadth * room_depth 60 | 61 | # Room internal surface area, total, including window openings 62 | room_total_area = (2 * room_floor_area) + ((room_breadth + room_depth) * 2 * room_height) 63 | 64 | # Fire load density related to the total surface area A_t 65 | fire_load_density_total = ( 66 | fire_load_density_deducted * room_floor_area / room_total_area 67 | ) 68 | 69 | # Opening factor 70 | opening_factor = window_area * np.sqrt(window_height) / room_total_area 71 | 72 | # Spread speed - Does the fire spread to involve the full compartment? 73 | fire_spread_entire_room_time = room_depth / fire_spread_speed 74 | burn_out_time = max([fire_load_density_deducted / fire_hrr_density, 900.0]) 75 | 76 | if fire_mode == 0 or fire_mode == 1 or fire_mode == 2: 77 | # enforced to selected fire, i.e. 0 is ec parametric; 1 is travelling; and 2 is din ec parametric 78 | fire_type = fire_mode 79 | elif fire_mode == 3: 80 | # enforced to ec parametric + travelling 81 | if ( 82 | fire_spread_entire_room_time < burn_out_time 83 | and 0.01 < opening_factor <= 0.2 84 | and 50 <= fire_load_density_total <= 1000 85 | ): 86 | fire_type = 0 # parametric fire 87 | else: # Otherwise, it is a travelling fire 88 | fire_type = 1 # travelling fire 89 | elif fire_mode == 4: 90 | # enforced to german parametric + travelling 91 | # If fire spreads throughout compartment and ventilation is within EC limits = Parametric fire 92 | if ( 93 | fire_spread_entire_room_time < burn_out_time 94 | and 0.125 <= (window_area / room_floor_area) <= 0.5 95 | and 100 <= fire_load_density_total <= 1300 96 | ): 97 | fire_type = 2 # german parametric 98 | else: 99 | # Otherwise, it is a travelling fire 100 | fire_type = 1 # travelling fire 101 | else: 102 | raise ValueError("Unknown fire mode {fire_mode}.".format(fire_mode=fire_mode)) 103 | 104 | return fire_type 105 | 106 | 107 | def evaluate_fire_temperature( 108 | window_height: float, 109 | window_width: float, 110 | window_open_fraction: float, 111 | room_breadth: float, 112 | room_depth: float, 113 | room_height: float, 114 | room_wall_thermal_inertia: float, 115 | fire_tlim: float, 116 | fire_type: float, 117 | fire_time: np.ndarray, 118 | fire_nft_limit: float, 119 | fire_load_density: float, 120 | fire_combustion_efficiency: float, 121 | fire_hrr_density: float, 122 | fire_spread_speed: float, 123 | fire_t_alpha: float, 124 | fire_gamma_fi_q: float, 125 | beam_position_vertical: float, 126 | beam_position_horizontal: Union[np.ndarray, list, float] = -1.0, 127 | ) -> tuple: 128 | """Calculate temperature array of pre-defined fire type `fire_type`. 129 | 130 | PARAMETERS: 131 | :param window_height: [m], weighted window opening height 132 | :param window_width: [m], total window opening width 133 | :param window_open_fraction: [-], a factor is multiplied with the given total window opening area 134 | :param room_breadth: [m], room breadth (shorter direction of the floor plan) 135 | :param room_depth: [m], room depth (longer direction of the floor plan) 136 | :param room_height: [m], room height from floor to soffit (structural), disregard any non fire resisting floors 137 | :param room_wall_thermal_inertia: [J/m2/K/s0.5], thermal inertia of room lining material 138 | :param fire_tlim: [s], PARAMETRIC FIRE, see parametric fire function for details 139 | :param fire_type: [-], 140 | :param fire_time: [K], 141 | :param fire_load_density: 142 | :param fire_combustion_efficiency: 143 | :param fire_t_alpha: 144 | :param fire_gamma_fi_q: 145 | :param beam_position_vertical: 146 | :param fire_hrr_density: [MW/m2], fire maximum release rate per unit area 147 | :param fire_spread_speed: [m/s], TRAVELLING FIRE, fire spread speed 148 | :param beam_position_horizontal: [s], beam location, will be solved for the worst case if less than 0. 149 | :param fire_nft_limit: [K], TRAVELLING FIRE, maximum temperature of near field temperature 150 | :return: 151 | EXAMPLE: 152 | """ 153 | 154 | fire_load_density_deducted = fire_load_density * fire_combustion_efficiency 155 | 156 | # Total window opening area 157 | window_area = window_height * window_width * window_open_fraction 158 | 159 | # Room floor area 160 | room_floor_area = room_breadth * room_depth 161 | 162 | # Room internal surface area, total, including window openings 163 | room_total_area = 2 * room_floor_area + (room_breadth + room_depth) * 2 * room_height 164 | 165 | if fire_type == 0: 166 | fire_temperature = _fire_param( 167 | t=fire_time, 168 | A_t=room_total_area, 169 | A_f=room_floor_area, 170 | A_v=window_area, 171 | h_eq=window_height, 172 | q_fd=fire_load_density_deducted * 1e6, 173 | lbd=room_wall_thermal_inertia ** 2, 174 | rho=1, 175 | c=1, 176 | t_lim=fire_tlim, 177 | T_0=20 + 273.15, 178 | ) 179 | t1, t2, t3 = np.nan, np.nan, np.nan 180 | 181 | elif fire_type == 1: 182 | kwargs_fire_1_travel = dict( 183 | t=fire_time, 184 | fire_load_density_MJm2=fire_load_density_deducted, 185 | fire_hrr_density_MWm2=fire_hrr_density, 186 | room_length_m=room_depth, 187 | room_width_m=room_breadth, 188 | fire_spread_rate_ms=fire_spread_speed, 189 | beam_location_height_m=beam_position_vertical, 190 | beam_location_length_m=beam_position_horizontal, 191 | fire_nft_limit_c=fire_nft_limit - 273.15, 192 | ) 193 | fire_temperature = fire_travelling(**kwargs_fire_1_travel) + 273.15 194 | 195 | t1 = min(room_depth / fire_spread_speed, fire_load_density_deducted / fire_hrr_density) 196 | t2 = max(room_depth / fire_spread_speed, fire_load_density_deducted / fire_hrr_density) 197 | t3 = t1 + t2 198 | 199 | elif fire_type == 2: 200 | o_ = dict(t_1=-1, t_2_x=-1, t_3_x=-1) 201 | fire_temperature = _fire_param_ger( 202 | t=fire_time, 203 | A_w=window_area, 204 | h_w=window_height, 205 | A_t=room_total_area, 206 | A_f=room_floor_area, 207 | t_alpha=fire_t_alpha, 208 | b=room_wall_thermal_inertia, 209 | q_x_d=fire_load_density_deducted * 1e6, 210 | gamma_fi_Q=fire_gamma_fi_q, 211 | outputs=o_ 212 | ) 213 | t1 = o_['t_1'] 214 | t2 = o_['t_2_x'] 215 | t3 = o_['t_3_x'] 216 | 217 | else: 218 | fire_temperature = np.nan 219 | t1, t2, t3 = np.nan, np.nan, np.nan 220 | 221 | return fire_temperature, beam_position_horizontal, t1, t2, t3 222 | 223 | 224 | def solve_time_equivalence_iso834( 225 | fire_time: np.ndarray, 226 | beam_cross_section_area: float, 227 | beam_rho: float, 228 | protection_k: float, 229 | protection_rho: float, 230 | protection_c: float, 231 | protection_protected_perimeter: float, 232 | solver_temperature_goal: float, 233 | solver_protection_thickness: float, 234 | phi_teq: float, 235 | ) -> float: 236 | """ 237 | Calculates equivalent time exposure for a protected steel element member in more realistic fire environment (i.e. travelling fire, parameteric fires) 238 | opposing to the standard fire curve ISO 834. 239 | 240 | PARAMETERS: 241 | :param beam_cross_section_area: [m2], the steel beam element cross section area 242 | :param beam_rho: [kg/m3], steel beam element density 243 | :param protection_k: [], steel beam element protection material thermal conductivity 244 | :param protection_rho: [kg/m3], steel beam element protection material density 245 | :param protection_c: [], steel beam element protection material specific heat 246 | :param protection_protected_perimeter: [m], steel beam element protection material perimeter 247 | :param solver_temperature_goal: [K], steel beam element expected failure temperature 248 | :param solver_protection_thickness: [m], steel section protection layer thickness 249 | :param phi_teq: [-], model uncertainty factor 250 | :return results: A dict containing `solver_time_equivalence_solved` which is ,[s], solved equivalent time exposure 251 | EXAMPLE: 252 | """ 253 | 254 | # ============================================ 255 | # GOAL SEEK TO MATCH STEEL FAILURE TEMPERATURE 256 | # ============================================ 257 | 258 | # MATCH PEAK STEEL TEMPERATURE BY ADJUSTING PROTECTION LAYER THICKNESS 259 | 260 | # Solve equivalent time exposure in ISO 834 261 | solver_d_p = solver_protection_thickness 262 | 263 | if -np.inf < solver_d_p < np.inf: 264 | fire_temperature_iso834 = (345.0 * np.log10((fire_time / 60.0) * 8.0 + 1.0) + 20.0) + 273.15 # in [K] 265 | steel_temperature = _steel_temperature( 266 | fire_time=fire_time, 267 | fire_temperature=fire_temperature_iso834, 268 | beam_rho=beam_rho, 269 | beam_cross_section_area=beam_cross_section_area, 270 | protection_k=protection_k, 271 | protection_rho=protection_rho, 272 | protection_c=protection_c, 273 | protection_thickness=solver_d_p, 274 | protection_protected_perimeter=protection_protected_perimeter, 275 | ) 276 | 277 | # Check whether steel temperature (when exposed to ISO 834 fire temperature) contains `solver_temperature_goal` 278 | if solver_temperature_goal < np.amin(steel_temperature): 279 | # critical temperature is lower than exposed steel temperature 280 | # this shouldn't be theoretically possible unless the given critical temperature is less than ambient 281 | # temperature 282 | solver_time_equivalence_solved = np.nan 283 | elif solver_temperature_goal > np.amax(steel_temperature): 284 | solver_time_equivalence_solved = np.inf 285 | else: 286 | # func_teq = interp1d(steel_temperature, fire_time, kind="linear", bounds_error=False, fill_value=-1) 287 | # solver_time_equivalence_solved = func_teq(solver_temperature_goal) 288 | solver_time_equivalence_solved = np.interp(solver_temperature_goal, steel_temperature, fire_time) 289 | solver_time_equivalence_solved = solver_time_equivalence_solved * phi_teq 290 | 291 | elif solver_d_p == np.inf: 292 | solver_time_equivalence_solved = np.inf 293 | elif solver_d_p == -np.inf: 294 | solver_time_equivalence_solved = -np.inf 295 | elif solver_d_p is np.nan: 296 | solver_time_equivalence_solved = np.nan 297 | else: 298 | raise ValueError(f'This error should not occur, solver_d_p = {solver_d_p}') 299 | 300 | return solver_time_equivalence_solved 301 | 302 | 303 | def solve_protection_thickness( 304 | fire_time: Union[list, np.ndarray], 305 | fire_temperature: Union[list, np.ndarray], 306 | beam_cross_section_area: float, 307 | beam_rho: float, 308 | protection_k: float, 309 | protection_rho: float, 310 | protection_c: float, 311 | protection_protected_perimeter: float, 312 | solver_temperature_goal: float, 313 | solver_max_iter: int, 314 | solver_thickness_ubound: float, 315 | solver_thickness_lbound: float, 316 | solver_tol: float, 317 | *_, 318 | **__, 319 | ) -> tuple: 320 | """ 321 | Calculates equivalent time exposure for a protected steel element member in more realistic fire environment 322 | opposing to the standard fire curve ISO 834. 323 | 324 | PARAMETERS: 325 | :param fire_time: [s], time array 326 | :param fire_temperature: [K], temperature array 327 | :param beam_cross_section_area: [m2], the steel beam element cross section area 328 | :param beam_rho: [kg/m3], steel beam element density 329 | :param protection_k: [], steel beam element protection material thermal conductivity 330 | :param protection_rho: [kg/m3], steel beam element protection material density 331 | :param protection_c: [], steel beam element protection material specific heat 332 | :param protection_protected_perimeter: [m], steel beam element protection material perimeter 333 | :param solver_temperature_goal: [K], steel beam element expected failure temperature 334 | :param solver_max_iter: Maximum allowable iteration counts for seeking solution for time equivalence 335 | :param solver_thickness_ubound: [m], protection layer thickness upper bound initial condition for solving time equivalence 336 | :param solver_thickness_lbound: [m], protection layer thickness lower bound initial condition for solving time equivalence 337 | :param solver_tol: [K], tolerance for solving time equivalence 338 | :param phi_teq: [-], model uncertainty factor 339 | :return results: 340 | A dict containing the following items. 341 | solver_convergence_status: [-], True if time equivalence has been successfully solved. 342 | solver_steel_temperature_solved 343 | solver_time_critical_temp_solved 344 | solver_protection_thickness 345 | solver_iter_count 346 | EXAMPLE: 347 | """ 348 | 349 | # ============================================ 350 | # GOAL SEEK TO MATCH STEEL FAILURE TEMPERATURE 351 | # ============================================ 352 | 353 | # MATCH PEAK STEEL TEMPERATURE BY ADJUSTING PROTECTION LAYER THICKNESS 354 | 355 | # Solve protection properties for `solver_temperature_goal` 356 | # solver_d_p, solver_T_max_a, solver_t, solver_iter_count = _protection_thickness( 357 | # fire_time=fire_time, 358 | # fire_temperature=fire_temperature, 359 | # beam_rho=beam_rho, 360 | # beam_cross_section_area=beam_cross_section_area, 361 | # protection_k=protection_k, 362 | # protection_rho=protection_rho, 363 | # protection_c=protection_c, 364 | # protection_protected_perimeter=protection_protected_perimeter, 365 | # solver_temperature_goal=solver_temperature_goal, 366 | # solver_temperature_goal_tol=solver_tol, 367 | # solver_max_iter=solver_max_iter, 368 | # d_p_1=solver_thickness_lbound, 369 | # d_p_2=solver_thickness_ubound, 370 | # ) 371 | # return solver_T_max_a, solver_t, solver_d_p, solver_iter_count 372 | 373 | solver_d_p, solver_T_max_a, solver_t, solver_iter_count, solver_status = _protection_thickness_2( 374 | fire_time=fire_time, 375 | fire_temperature=fire_temperature, 376 | beam_rho=beam_rho, 377 | beam_cross_section_area=beam_cross_section_area, 378 | protection_k=protection_k, 379 | protection_rho=protection_rho, 380 | protection_c=protection_c, 381 | protection_protected_perimeter=protection_protected_perimeter, 382 | solver_temperature_goal=solver_temperature_goal, 383 | solver_temperature_goal_tol=solver_tol, 384 | solver_max_iter=100, 385 | d_p_1=0.0001, 386 | d_p_2=0.0801, 387 | d_p_i=0.0025 + random() * 0.0025, 388 | ) 389 | # print(solver_d_p, solver_T_max_a, solver_t, solver_iter_count, solver_status) 390 | 391 | if solver_status == 0: 392 | return solver_T_max_a, solver_t, solver_d_p, solver_iter_count 393 | elif solver_status == 1: 394 | return -np.inf, solver_t, solver_d_p, solver_iter_count 395 | elif solver_status == 2: 396 | return np.inf, solver_t, solver_d_p, solver_iter_count 397 | elif solver_status == 3: 398 | return np.nan, np.nan, np.nan, solver_iter_count 399 | 400 | 401 | def teq_main( 402 | index: int, 403 | beam_cross_section_area: float, 404 | beam_position_vertical: float, 405 | beam_position_horizontal: float, 406 | beam_rho: float, 407 | fire_time_duration: float, 408 | fire_time_step: float, 409 | fire_combustion_efficiency: float, 410 | fire_gamma_fi_q: float, 411 | fire_hrr_density: float, 412 | fire_load_density: float, 413 | fire_mode: int, 414 | fire_nft_limit: float, 415 | fire_spread_speed: float, 416 | fire_t_alpha: float, 417 | fire_tlim: float, 418 | protection_c: float, 419 | protection_k: float, 420 | protection_protected_perimeter: float, 421 | protection_rho: float, 422 | room_breadth: float, 423 | room_depth: float, 424 | room_height: float, 425 | room_wall_thermal_inertia: float, 426 | solver_temperature_goal: float, 427 | solver_max_iter: int, 428 | solver_thickness_lbound: float, 429 | solver_thickness_ubound: float, 430 | solver_tol: float, 431 | window_height: float, 432 | window_open_fraction: float, 433 | window_width: float, 434 | window_open_fraction_permanent: float, 435 | phi_teq: float = 1.0, 436 | timber_exposed_area: float = 0., 437 | timber_charred_depth=None, 438 | timber_charring_rate=None, 439 | timber_hc: float = None, 440 | timber_density: float = None, 441 | timber_depth: float = None, 442 | timber_solver_tol: float = None, 443 | timber_solver_ilim: float = None, 444 | occupancy_type: str = None, 445 | car_cluster_size: int = None, 446 | ) -> tuple: 447 | # Make the longest dimension between (room_depth, room_breadth) as room_depth 448 | if room_depth < room_breadth: 449 | room_depth += room_breadth 450 | room_breadth = room_depth - room_breadth 451 | room_depth -= room_breadth 452 | 453 | # todo: wip for car park!!! 454 | if occupancy_type == '__CAR_PARK__': 455 | fire_mode = 1 # force to travelling fire only 456 | # work out new room_depth_car based on how many cars are involved in fire 457 | if car_cluster_size is not None and car_cluster_size >= 0: 458 | car_cluster_size = int(car_cluster_size) + 1 459 | room_depth_original = float(room_depth) 460 | parking_bay_width = 2.3 461 | n_parking_bay_row = 2 462 | average_area_per_parking_bay = 4283 / 202 463 | 464 | room_depth = car_cluster_size * parking_bay_width / n_parking_bay_row 465 | room_floor_area = car_cluster_size * average_area_per_parking_bay 466 | room_breadth = room_floor_area / room_depth 467 | 468 | beam_position_horizontal = (beam_position_horizontal / room_depth_original) * room_depth 469 | 470 | window_open_fraction = ( 471 | window_open_fraction * (1 - window_open_fraction_permanent) + window_open_fraction_permanent 472 | ) 473 | 474 | # Fix ventilation opening size, so it doesn't exceed wall area 475 | if window_height > room_height: 476 | window_height = room_height 477 | 478 | # Calculate fire time, this is used for all fire curves in the calculation 479 | fire_time = np.arange(0, fire_time_duration + fire_time_step, fire_time_step) 480 | 481 | # Calculate ISO 834 fire temperature 482 | # fire_time_iso834 = fire_time 483 | # fire_temperature_iso834 = (345.0 * np.log10((fire_time / 60.0) * 8.0 + 1.0) + 20.0) + 273.15 # in [K] 484 | 485 | # initialise solver iteration count for timber fuel contribution 486 | timber_solver_iter_count = -1 487 | timber_exposed_duration = 0 # initial condition, timber exposed duration 488 | _fire_load_density_ = float(fire_load_density) # preserve original fire load density 489 | 490 | while True: 491 | timber_solver_iter_count += 1 492 | # the following `if` decide whether to calculate `timber_charred_depth_i` from `timber_charring_rate` or 493 | if ( 494 | timber_exposed_area is not None and 495 | timber_exposed_area > 0 and 496 | (timber_charred_depth is not None or timber_charring_rate is not None) 497 | ): 498 | if timber_charred_depth is None: 499 | # calculate from timber charring rate 500 | if isinstance(timber_charring_rate, (float, int)): 501 | timber_charring_rate_i = timber_charring_rate 502 | elif isinstance(timber_charring_rate, Callable): 503 | timber_charring_rate_i = timber_charring_rate(timber_exposed_duration) 504 | else: 505 | raise TypeError('`timber_charring_rate_i` is not numerical nor Callable type') 506 | timber_charring_rate_i *= 1. / 1000. # [mm/min] -> [m/min] 507 | timber_charring_rate_i *= 1. / 60. # [m/min] -> [m/s] 508 | timber_charred_depth_i = timber_charring_rate_i * timber_exposed_duration 509 | else: 510 | # calculate from timber charred depth 511 | if isinstance(timber_charred_depth, (float, int)): 512 | timber_charred_depth_i = timber_charred_depth 513 | elif isinstance(timber_charred_depth, Callable): 514 | timber_charred_depth_i = timber_charred_depth(timber_exposed_duration) 515 | else: 516 | raise TypeError('`timber_charring_rate_i` is not numerical nor Callable type') 517 | timber_charred_depth_i /= 1000. 518 | 519 | # make sure the calculated charred depth does not exceed the available timber depth 520 | if timber_depth is not None: 521 | timber_charred_depth_i = min(timber_charred_depth_i, timber_depth) 522 | 523 | timber_charred_volume = timber_charred_depth_i * timber_exposed_area 524 | timber_charred_mass = timber_density * timber_charred_volume 525 | timber_fire_load = timber_charred_mass * timber_hc 526 | timber_fire_load_density = timber_fire_load / (room_breadth * room_depth) 527 | else: 528 | timber_charred_volume = np.nan 529 | timber_charred_depth_i = np.nan 530 | timber_charred_mass = np.nan 531 | timber_fire_load = np.nan 532 | timber_fire_load_density = np.nan 533 | 534 | if np.isnan(timber_fire_load_density): 535 | fire_load_density = _fire_load_density_ 536 | else: 537 | fire_load_density = _fire_load_density_ + timber_fire_load_density 538 | 539 | # To check what design fire to use 540 | fire_type = decide_fire( 541 | window_height=window_height, window_width=window_width, window_open_fraction=window_open_fraction, 542 | room_breadth=room_breadth, room_depth=room_depth, room_height=room_height, fire_mode=fire_mode, 543 | fire_load_density=fire_load_density, fire_combustion_efficiency=fire_combustion_efficiency, 544 | fire_hrr_density=fire_hrr_density, fire_spread_speed=fire_spread_speed 545 | ) 546 | 547 | # To calculate design fire temperature 548 | fire_temperature, beam_position_horizontal, t1, t2, t3 = evaluate_fire_temperature( 549 | window_height=window_height, window_width=window_width, window_open_fraction=window_open_fraction, 550 | room_breadth=room_breadth, room_depth=room_depth, room_height=room_height, 551 | room_wall_thermal_inertia=room_wall_thermal_inertia, fire_tlim=fire_tlim, fire_type=fire_type, 552 | fire_time=fire_time, fire_nft_limit=fire_nft_limit, fire_load_density=fire_load_density, 553 | fire_combustion_efficiency=fire_combustion_efficiency, fire_hrr_density=fire_hrr_density, 554 | fire_spread_speed=fire_spread_speed, fire_t_alpha=fire_t_alpha, fire_gamma_fi_q=fire_gamma_fi_q, 555 | beam_position_vertical=beam_position_vertical, beam_position_horizontal=beam_position_horizontal 556 | ) 557 | 558 | # To solve protection thickness at critical temperature 559 | # inputs.update(solve_protection_thickness(**inputs)) 560 | ( 561 | solver_steel_temperature_solved, solver_time_critical_temp_solved, solver_protection_thickness, 562 | solver_iter_count 563 | ) = solve_protection_thickness( 564 | fire_time=fire_time, fire_temperature=fire_temperature, beam_cross_section_area=beam_cross_section_area, 565 | beam_rho=beam_rho, protection_k=protection_k, protection_rho=protection_rho, protection_c=protection_c, 566 | protection_protected_perimeter=protection_protected_perimeter, 567 | solver_temperature_goal=solver_temperature_goal, solver_max_iter=solver_max_iter, 568 | solver_thickness_ubound=solver_thickness_ubound, solver_thickness_lbound=solver_thickness_lbound, 569 | solver_tol=solver_tol 570 | ) 571 | 572 | # To solve time equivalence in ISO 834 573 | solver_time_equivalence_solved = solve_time_equivalence_iso834( 574 | fire_time=fire_time, beam_cross_section_area=beam_cross_section_area, beam_rho=beam_rho, 575 | protection_k=protection_k, protection_rho=protection_rho, protection_c=protection_c, 576 | protection_protected_perimeter=protection_protected_perimeter, 577 | solver_temperature_goal=solver_temperature_goal, solver_protection_thickness=solver_protection_thickness, 578 | phi_teq=phi_teq 579 | ) 580 | 581 | # additional fuel contribution from timber 582 | if timber_exposed_area <= 0 or timber_exposed_area is None: # no timber exposed 583 | # Exit timber fuel contribution solver if: 584 | # 1. no timber exposed 585 | # 2. timber exposed area undefined 586 | break 587 | elif timber_solver_iter_count >= timber_solver_ilim: 588 | solver_convergence_status = np.nan 589 | solver_time_critical_temp_solved = np.nan 590 | solver_time_equivalence_solved = np.nan 591 | solver_steel_temperature_solved = np.nan 592 | solver_protection_thickness = np.nan 593 | solver_iter_count = np.nan 594 | timber_exposed_duration = np.nan 595 | break 596 | elif not -np.inf < solver_protection_thickness < np.inf: 597 | # no protection thickness solution 598 | timber_exposed_duration = solver_protection_thickness 599 | break 600 | elif abs(timber_exposed_duration - solver_time_equivalence_solved) <= timber_solver_tol: 601 | # convergence sought successfully 602 | break 603 | else: 604 | timber_exposed_duration = solver_time_equivalence_solved 605 | 606 | timber_charring_rate = timber_charred_depth_i / timber_exposed_duration if timber_exposed_duration else 0 607 | timber_exposed_duration = timber_exposed_duration 608 | timber_solver_iter_count = timber_solver_iter_count 609 | timber_fire_load = timber_fire_load 610 | timber_charred_depth = timber_charred_depth_i 611 | 612 | return ( 613 | index, beam_position_horizontal, fire_combustion_efficiency, fire_hrr_density, fire_nft_limit, 614 | fire_spread_speed, window_open_fraction, fire_load_density, fire_type, t1, t2, t3, 615 | solver_steel_temperature_solved, solver_time_critical_temp_solved, solver_protection_thickness, 616 | solver_iter_count, solver_time_equivalence_solved, timber_charring_rate, timber_exposed_duration, 617 | timber_solver_iter_count, timber_fire_load, timber_charred_depth, timber_charred_mass, timber_charred_volume, 618 | ) 619 | -------------------------------------------------------------------------------- /sfeprapy/mcs0/inputs.py: -------------------------------------------------------------------------------- 1 | EXAMPLE_INPUT = { 2 | 'CASE_1': dict( 3 | case_name="CASE_1", 4 | n_simulations=2500, 5 | fire_time_step=10, 6 | fire_time_duration=18000, 7 | fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), 8 | fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), 9 | fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), 10 | fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=1473.15, mean=1323.15, sd=93), 11 | fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), 12 | window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), 13 | phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), 14 | beam_cross_section_area=0.017, 15 | beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), 16 | beam_position_vertical=3.1, 17 | beam_rho=7850, 18 | fire_mode=3, 19 | fire_gamma_fi_q=1, 20 | fire_t_alpha=300, 21 | fire_tlim=0.333, 22 | protection_c=1700, 23 | protection_k=0.2, 24 | protection_protected_perimeter=2.14, 25 | protection_rho=800, 26 | room_breadth=16, 27 | room_depth=31.25, 28 | room_height=3.1, 29 | room_wall_thermal_inertia=720, 30 | solver_temperature_goal=823.15, 31 | solver_max_iter=20, 32 | solver_thickness_lbound=0.0001, 33 | solver_thickness_ubound=0.0400, 34 | solver_tol=1.0, 35 | window_height=2.8, 36 | window_width=72, 37 | window_open_fraction_permanent=0, 38 | timber_exposed_area=0, 39 | timber_charring_rate=0.7, # mm/min 40 | timber_hc=13.2, # MJ/kg 41 | timber_density=400, # [kg/m3] 42 | timber_solver_ilim=20, 43 | timber_solver_tol=1, 44 | timber_charred_depth=None, 45 | timber_depth=None, 46 | occupancy_type=None, 47 | car_cluster_size=None, 48 | p1=3e-7, 49 | p2=0.1, 50 | p3=0.25, 51 | p4=0.09, 52 | general_room_floor_area=500, 53 | ), 54 | 'CASE_2_teq_phi': dict( 55 | case_name="CASE_2_teq_phi", 56 | n_simulations=2500, 57 | fire_time_step=10, 58 | fire_time_duration=18000, 59 | fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), 60 | fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), 61 | fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), 62 | fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=1473.15, mean=1323.15, sd=93), 63 | fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), 64 | window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), 65 | phi_teq=dict(dist="lognorm_", ubound=3, lbound=0.00001, mean=1, sd=0.25), 66 | beam_cross_section_area=0.017, 67 | beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), 68 | beam_position_vertical=3.1, 69 | beam_rho=7850, 70 | fire_mode=3, 71 | fire_gamma_fi_q=1, 72 | fire_t_alpha=300, 73 | fire_tlim=0.333, 74 | protection_c=1700, 75 | protection_k=0.2, 76 | protection_protected_perimeter=2.14, 77 | protection_rho=800, 78 | room_breadth=16, 79 | room_depth=31.25, 80 | room_height=3.1, 81 | room_wall_thermal_inertia=720, 82 | solver_temperature_goal=823.15, 83 | solver_max_iter=20, 84 | solver_thickness_lbound=0.0001, 85 | solver_thickness_ubound=0.0400, 86 | solver_tol=1.0, 87 | window_height=2.8, 88 | window_width=72, 89 | window_open_fraction_permanent=0, 90 | timber_exposed_area=0, 91 | timber_charring_rate=0.7, # [mm/min] 92 | timber_hc=13.2, # [MJ/kg] 93 | timber_density=400, # [kg/m3] 94 | timber_solver_ilim=20, 95 | timber_solver_tol=1, 96 | timber_charred_depth=None, 97 | timber_depth=None, 98 | occupancy_type=None, 99 | car_cluster_size=None, 100 | p1=3e-7, 101 | p2=0.1, 102 | p3=0.25, 103 | p4=0.09, 104 | general_room_floor_area=500, 105 | ), 106 | 'CASE_3_timber': dict( 107 | case_name="CASE_3_timber", 108 | n_simulations=2500, 109 | fire_time_step=10, 110 | fire_time_duration=18000, 111 | fire_hrr_density=dict(dist="uniform_", lbound=0.25 - 0.001, ubound=0.25 + 0.001), 112 | fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), 113 | fire_spread_speed=dict(dist="uniform_", lbound=0.0035, ubound=0.0190), 114 | fire_nft_limit=dict(dist="norm_", lbound=623.15, ubound=1473.15, mean=1323.15, sd=93), 115 | fire_combustion_efficiency=dict(dist="uniform_", lbound=0.8, ubound=1.0), 116 | window_open_fraction=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), 117 | phi_teq=dict(dist="constant_", ubound=1, lbound=1, mean=0, sd=0), 118 | beam_cross_section_area=0.017, 119 | beam_position_horizontal=dict(dist="uniform_", lbound=0.6 * 31.25, ubound=0.9 * 31.25), 120 | beam_position_vertical=3.1, 121 | beam_rho=7850, 122 | fire_mode=3, 123 | fire_gamma_fi_q=1, 124 | fire_t_alpha=300, 125 | fire_tlim=0.333, 126 | protection_c=1700, 127 | protection_k=0.2, 128 | protection_protected_perimeter=2.14, 129 | protection_rho=800, 130 | room_breadth=16, 131 | room_depth=31.25, 132 | room_height=3.1, 133 | room_wall_thermal_inertia=720, 134 | solver_temperature_goal=823.15, 135 | solver_max_iter=20, 136 | solver_thickness_lbound=0.0001, 137 | solver_thickness_ubound=0.0400, 138 | solver_tol=1.0, 139 | window_height=2.8, 140 | window_width=72, 141 | window_open_fraction_permanent=0, 142 | timber_exposed_area=500., 143 | timber_charring_rate=0.7, # mm/min 144 | timber_hc=13.2, # MJ/kg 145 | timber_density=400, # [kg/m3] 146 | timber_solver_ilim=20, 147 | timber_solver_tol=1, 148 | timber_charred_depth=None, 149 | timber_depth=None, 150 | occupancy_type=None, 151 | car_cluster_size=None, 152 | p1=3e-7, 153 | p2=0.1, 154 | p3=0.25, 155 | p4=0.09, 156 | general_room_floor_area=500, 157 | ), 158 | } 159 | -------------------------------------------------------------------------------- /sfeprapy/mcs0/run.py: -------------------------------------------------------------------------------- 1 | import concurrent.futures 2 | import csv 3 | import json 4 | import multiprocessing as mp 5 | import pathlib 6 | from copy import deepcopy 7 | from typing import Dict, List, Tuple, Any, Optional 8 | 9 | import numpy as np 10 | from tqdm import tqdm 11 | 12 | from .calcs import teq_main as calcs_main 13 | from .. import logger 14 | 15 | 16 | def calculation_function(params: Dict[str, Any]) -> Tuple: 17 | """ 18 | Call the calculation function with appropriate parameters. 19 | 20 | Args: 21 | params: Dictionary of parameters (both stochastic and static) 22 | 23 | Returns: 24 | Tuple containing (index, result1, result2) 25 | """ 26 | # Create a copy to avoid modifying the input dictionary 27 | params_copy = deepcopy(params) 28 | 29 | # Extract the index before passing to calculation function 30 | index = params_copy.pop('index', -1) 31 | 32 | # Call the main calculation function and return results with index 33 | try: 34 | return (index, *calcs_main(**params_copy)) 35 | except Exception as e: 36 | logger.error(f"Calculation error with params {params_copy}: {str(e)}") 37 | raise 38 | 39 | 40 | def process_iteration(task_data: Dict[str, Any]) -> Tuple: 41 | """ 42 | Process a single row of data with shared parameters. 43 | 44 | Args: 45 | task_data: Dictionary containing combined parameters 46 | 47 | Returns: 48 | Tuple containing calculation results 49 | """ 50 | try: 51 | # Call the calculation function 52 | return calculation_function(task_data) 53 | except Exception as e: 54 | row_index = task_data.get('index', 'unknown') 55 | logger.error(f"Error processing row {row_index}: {str(e)}") 56 | # Return None values for results with the correct tuple length 57 | # Adjust the number of None values to match your actual result structure 58 | return (row_index, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan) 59 | 60 | 61 | def convert_numeric(value: str) -> Any: 62 | """ 63 | Convert string value to int or float if possible. 64 | 65 | Args: 66 | value: String value to convert 67 | 68 | Returns: 69 | Converted value (int, float, or original string) 70 | """ 71 | if not isinstance(value, str): 72 | return value 73 | 74 | value = value.strip() 75 | 76 | # Check for empty strings or null values 77 | if not value or value.lower() in ('null', 'nan', 'none'): 78 | return None 79 | 80 | try: 81 | # Try to convert to integer 82 | return int(value) 83 | except ValueError: 84 | try: 85 | # Try float conversion 86 | return float(value) 87 | except ValueError: 88 | # Return original if conversion fails 89 | return value 90 | 91 | 92 | def read_input_files(csv_path: pathlib.Path, json_path: pathlib.Path) -> List[Dict[str, Any]]: 93 | """ 94 | Read and combine input files into a list of parameter dictionaries. 95 | 96 | Args: 97 | csv_path: Path to CSV file with stochastic parameters 98 | json_path: Path to JSON file with static parameters 99 | 100 | Returns: 101 | List of combined parameter dictionaries 102 | """ 103 | # Read static parameters 104 | try: 105 | with open(json_path, 'r') as jsonfile: 106 | static_params = json.load(jsonfile) 107 | except (json.JSONDecodeError, FileNotFoundError) as e: 108 | logger.error(f"Error loading static parameters: {str(e)}") 109 | raise 110 | 111 | # Read stochastic parameters 112 | stochastic_params = [] 113 | try: 114 | with open(csv_path, 'r', newline='') as csvfile: 115 | reader = csv.DictReader(csvfile) 116 | 117 | # Validate required columns 118 | required_cols = ['index'] # Add your essential columns here 119 | missing_cols = [col for col in required_cols if col not in reader.fieldnames] 120 | 121 | if missing_cols: 122 | logger.error(f"Missing required columns in CSV: {missing_cols}") 123 | raise ValueError(f"Missing required columns: {missing_cols}") 124 | 125 | # Process each row 126 | for i, row in enumerate(reader): 127 | # Add row number as index if not present 128 | if 'index' not in row or not row['index']: 129 | row['index'] = i 130 | 131 | # Convert string values to appropriate types 132 | numeric_row = {field: convert_numeric(value) for field, value in row.items()} 133 | stochastic_params.append(numeric_row) 134 | except FileNotFoundError as e: 135 | logger.error(f"CSV file not found: {str(e)}") 136 | raise 137 | 138 | # Combine parameters for processing 139 | combined_params = [dict(**row, **static_params) for row in stochastic_params] 140 | return combined_params 141 | 142 | 143 | def write_results_to_csv(results: List[Tuple], output_path: pathlib.Path, column_names: List[str]) -> None: 144 | """ 145 | Write calculation results to CSV file. 146 | 147 | Args: 148 | results: List of result tuples 149 | output_path: Path to output CSV file 150 | column_names: Names of columns for CSV header 151 | """ 152 | # Sort results by index (first element of each tuple) 153 | sorted_results = sorted(results, key=lambda x: x[0]) 154 | 155 | try: 156 | with open(output_path, 'w', newline='') as csvfile: 157 | writer = csv.writer(csvfile) 158 | writer.writerow(column_names) # Write header 159 | 160 | # Write data rows with progress bar 161 | for result in sorted_results: 162 | formatted_result = list() 163 | for value in result: 164 | try: 165 | # Format the float to the specified number of decimal places 166 | # f-string formatting: :.nf where n is decimal_places 167 | formatted_value = f"{value:.{5}f}" 168 | formatted_result.append(formatted_value) 169 | except ValueError: 170 | # In case formatting fails for some unexpected reason 171 | # Append the original value and let csv.writer handle it 172 | formatted_result.append(value) 173 | writer.writerow(result) 174 | except IOError as e: 175 | logger.error(f"Error writing results to CSV: {str(e)}") 176 | raise 177 | 178 | 179 | def process_single_case(case_dir: pathlib.Path, executor: concurrent.futures.ProcessPoolExecutor) -> None: 180 | """ 181 | Process a single simulation case using the provided executor. 182 | 183 | Args: 184 | case_dir: Directory containing simulation case files 185 | executor: ProcessPoolExecutor to use for parallel processing 186 | """ 187 | case_name = case_dir.name 188 | logger.info(f"Processing simulation case: {case_name}") 189 | 190 | # Define file paths 191 | fp_in_stochastic = case_dir / f'{case_name}.csv' 192 | fp_in_static = case_dir / f'{case_name}.json' 193 | fp_out = case_dir / f'{case_name}_out.csv' 194 | 195 | # Read input files and combine parameters 196 | try: 197 | process_args = read_input_files(fp_in_stochastic, fp_in_static) 198 | except Exception as e: 199 | logger.error(f"Failed to read input files for case {case_name}: {str(e)}") 200 | return 201 | 202 | # Process using the provided ProcessPoolExecutor with batching for large datasets 203 | batch_size = 10000 # Adjust based on your memory constraints 204 | results = [] 205 | 206 | # Process in batches to avoid memory issues 207 | for i in range(0, len(process_args), batch_size): 208 | batch = process_args[i:i + batch_size] 209 | 210 | # Submit all tasks in batch and get futures 211 | futures = [executor.submit(process_iteration, arg | dict(dir_temp=case_dir)) for arg in batch] 212 | 213 | # Process results as they complete with tqdm 214 | for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures), 215 | desc=f"Batch {i // batch_size + 1}", ): 216 | try: 217 | result = future.result() 218 | results.append(result) 219 | except Exception as e: 220 | logger.error(f"Error in future: {str(e)}") 221 | 222 | # Define result column names - replace with appropriate names 223 | result_cols = ['index', 'q_inc', 't_ig_ftp', 'ftp', 't_ig_safir', 't_max_safir', 'T_max_safir', 't_d', ] 224 | 225 | # Save results to CSV 226 | try: 227 | write_results_to_csv(results, fp_out, result_cols) 228 | except Exception as e: 229 | logger.error(f"Failed to write results for case {case_name}: {str(e)}") 230 | 231 | 232 | def process_multiple_cases(case_dirs: List[pathlib.Path], n_proc: Optional[int] = 0) -> None: 233 | """ 234 | Process multiple simulation cases with a single process pool. 235 | 236 | Args: 237 | case_dirs: List of directories containing simulation cases 238 | """ 239 | # Determine number of processes (leave one core free for the OS) 240 | num_cores = n_proc or max(1, mp.cpu_count() - 2) 241 | logger.info(f"Using {num_cores} worker processes") 242 | 243 | # Create a single ProcessPoolExecutor for all cases 244 | with concurrent.futures.ProcessPoolExecutor(max_workers=num_cores) as executor: 245 | for case_dir in case_dirs: 246 | if not case_dir.is_dir(): 247 | logger.warning(f"Skipping non-directory: {case_dir}") 248 | continue 249 | 250 | try: 251 | # Process each case with the same executor 252 | process_single_case(case_dir, executor) 253 | except Exception as e: 254 | logger.error(f"Failed to process case {case_dir.name}: {str(e)}") 255 | 256 | 257 | def process_single_case_2(case_dir: pathlib.Path) -> None: 258 | """Process a single simulation case in its entirety within one process.""" 259 | case_name = case_dir.name 260 | 261 | # Define file paths 262 | fp_in_stochastic = case_dir / f'{case_name}.csv' 263 | fp_in_static = case_dir / f'{case_name}.json' 264 | fp_out = case_dir / f'{case_name}_out.csv' 265 | 266 | try: 267 | # Read input files and combine parameters 268 | process_args = read_input_files(fp_in_stochastic, fp_in_static) 269 | 270 | # Process all rows sequentially within this process, without progress tracking 271 | results = [] 272 | for arg in process_args: 273 | # Add the directory path to the arguments 274 | arg_with_dir = arg | dict(dir_temp=case_dir) 275 | result = process_iteration(arg_with_dir) 276 | results.append(result) 277 | 278 | # Define result column names - replace with appropriate names 279 | result_cols = ['index', 'q_inc', 't_ig_ftp', 'ftp', 't_ig_safir', 't_max_safir', 'T_max_safir', 't_d'] 280 | 281 | # Save results to CSV 282 | write_results_to_csv(results, fp_out, result_cols) 283 | 284 | except Exception as e: 285 | logger.error(f"Failed to process case {case_name}: {str(e)}") 286 | 287 | 288 | def process_multiple_cases_2(case_dirs: List[pathlib.Path], n_proc: Optional[int] = None) -> None: 289 | """ 290 | Process multiple simulation cases in parallel with one dedicated process per case. 291 | 292 | Instead of processing rows of a case in parallel, this function processes each 293 | entire case in its own process. 294 | 295 | Args: 296 | case_dirs: List of directories containing simulation cases 297 | n_proc: Maximum number of concurrent processes to use. If None, will use available cores. 298 | """ 299 | # Determine number of processes (leave some cores free for the OS) 300 | max_processes = n_proc or max(1, mp.cpu_count() - 2) 301 | 302 | # Filter valid directories 303 | valid_case_dirs = [case_dir for case_dir in case_dirs if case_dir.is_dir()] 304 | 305 | if not valid_case_dirs: 306 | logger.warning("No valid case directories found!") 307 | return 308 | 309 | if n_proc == 1: 310 | for case_dir in tqdm(valid_case_dirs): 311 | process_single_case_2(case_dir) 312 | return 313 | 314 | # Use ProcessPoolExecutor to run each case as a separate process 315 | with concurrent.futures.ProcessPoolExecutor(max_workers=max_processes) as executor: 316 | # Submit all cases to the executor 317 | futures = {executor.submit(process_single_case_2, case_dir): case_dir.name for case_dir in valid_case_dirs} 318 | 319 | # Process results as they complete 320 | for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)): 321 | case_name = futures[future] 322 | try: 323 | # Get the result (None for the processing function) 324 | future.result() 325 | except Exception as e: 326 | logger.error(f"Error processing case {case_name}: {str(e)}") 327 | -------------------------------------------------------------------------------- /sfeprapy/mcs1/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | 'MCS1Single', 'MCS1', 'cli_main', 3 | 4 | ) 5 | 6 | import os 7 | 8 | import numpy as np 9 | 10 | from .calcs import teq_main 11 | from .inputs import EXAMPLE_INPUT 12 | from ..mcs import MCSSingle 13 | from ..mcs0 import MCS0 14 | 15 | 16 | class MCS1Single(MCSSingle): 17 | OUTPUT_KEYS = ( 18 | 'index', 'beam_position_horizontal', 'fire_combustion_efficiency', 'fire_hrr_density', 'fire_nft_limit', 19 | 'fire_spread_speed', 'window_open_fraction', 'epsilon_q', 'fire_load_density', 'fire_type', 't1', 't2', 't3', 20 | 'solver_temperature_goal', 'solver_time_equivalence_solved', 'timber_charring_rate', 'timber_exposed_duration', 21 | 'timber_solver_iter_count', 'timber_fire_load', 'timber_charred_depth', 'timber_charred_mass', 22 | 'timber_charred_volume', 'T_max_t', 'k_y_theta_t', 23 | ) 24 | 25 | def __init__(self, name, n_simulations, sim_kwargs, save_dir): 26 | super().__init__(name=name, n_simulations=n_simulations, sim_kwargs=sim_kwargs, save_dir=save_dir) 27 | 28 | @property 29 | def worker(self): 30 | return teq_main 31 | 32 | def get_pdf(self, bin_width: float = 0.2) -> (np.ndarray, np.ndarray, np.ndarray): 33 | teq: np.ndarray = None 34 | for i in range(len(self.output_keys)): 35 | if self.output_keys[i] == 'solver_time_equivalence_solved': 36 | teq = self.output[:, i] 37 | 38 | return MCS1Single.make_pdf(teq, bin_width=bin_width) 39 | 40 | def get_cdf(self, bin_width: float = 0.2): 41 | x, y_pdf = self.get_pdf(bin_width=bin_width) 42 | 43 | return x, np.cumsum(y_pdf) 44 | 45 | @property 46 | def output_keys(self) -> tuple: 47 | return MCS1Single.OUTPUT_KEYS 48 | 49 | 50 | class MCS1(MCS0): 51 | @property 52 | def new_mcs_case(self): 53 | return MCS1Single 54 | 55 | 56 | def cli_main(fp_mcs_in: str, n_threads: int = 1): 57 | fp_mcs_in = os.path.realpath(fp_mcs_in) 58 | 59 | mcs = MCS1() 60 | mcs.inputs = fp_mcs_in 61 | mcs.n_threads = n_threads 62 | mcs.run() 63 | -------------------------------------------------------------------------------- /sfeprapy/mcs1/calcs.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | 'decide_fire', 'evaluate_fire_temperature', 'solve_time_equivalence_iso834', 3 | 'teq_main' 4 | ) 5 | 6 | import logging 7 | from typing import Callable, Any 8 | 9 | import numpy as np 10 | from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import temperature as _steel_temperature 11 | from fsetools.lib.fse_bs_en_1993_1_2_strength_reduction_factor import k_y_theta_prob 12 | 13 | from ..mcs0 import decide_fire, evaluate_fire_temperature 14 | 15 | logger = logging.getLogger('gui') 16 | 17 | 18 | def solve_time_equivalence_iso834( 19 | fire_time: np.ndarray, 20 | fire_temperature: np.ndarray, 21 | beam_cross_section_area: float, 22 | beam_rho: float, 23 | protection_k: float, 24 | protection_rho: float, 25 | protection_c: float, 26 | protection_protected_perimeter: float, 27 | solver_protection_thickness: float, 28 | phi_teq: float, 29 | ) -> tuple[Any, Any, float | Any]: 30 | """ 31 | Calculates equivalent time exposure for a protected steel element member in more realistic fire environment (i.e. travelling fire, parameteric fires) 32 | opposing to the standard fire curve ISO 834. 33 | 34 | PARAMETERS: 35 | :param beam_cross_section_area: [m2], the steel beam element cross-section area 36 | :param beam_rho: [kg/m3], steel beam element density 37 | :param protection_k: [], steel beam element protection material thermal conductivity 38 | :param protection_rho: [kg/m3], steel beam element protection material density 39 | :param protection_c: [], steel beam element protection material specific heat 40 | :param protection_protected_perimeter: [m], steel beam element protection material perimeter 41 | :param solver_max_iter: [-], Maximum allowable iteration counts for seeking solution for time equivalence 42 | :param solver_thickness_ubound: [m], protection layer thickness upper bound initial condition for solving time equivalence 43 | :param solver_thickness_lbound: [m], protection layer thickness lower bound initial condition for solving time equivalence 44 | :param solver_tol: [K], tolerance for solving time equivalence 45 | :param solver_protection_thickness: [m], steel section protection layer thickness 46 | :param phi_teq: [-], model uncertainty factor 47 | :return results: A dict containing `solver_time_equivalence_solved` which is ,[s], solved equivalent time exposure 48 | EXAMPLE: 49 | """ 50 | 51 | steel_temperature = _steel_temperature( 52 | fire_time=fire_time, 53 | fire_temperature=fire_temperature, 54 | beam_rho=beam_rho, 55 | beam_cross_section_area=beam_cross_section_area, 56 | protection_k=protection_k, 57 | protection_rho=protection_rho, 58 | protection_c=protection_c, 59 | protection_thickness=solver_protection_thickness, 60 | protection_protected_perimeter=protection_protected_perimeter, 61 | ) 62 | solver_temperature_goal = max(steel_temperature) 63 | 64 | # Solve equivalent time exposure in ISO 834 65 | solver_time_equivalence_solved = 1.06011 * 2.71828182845905 ** (0.00667416 * solver_temperature_goal) - 7.5 66 | 67 | return steel_temperature, solver_temperature_goal, solver_time_equivalence_solved * phi_teq 68 | 69 | 70 | def teq_main( 71 | index: int, 72 | beam_cross_section_area: float, 73 | beam_position_vertical: float, 74 | beam_position_horizontal: float, 75 | beam_rho: float, 76 | fire_time_duration: float, 77 | fire_time_step: float, 78 | fire_combustion_efficiency: float, 79 | fire_gamma_fi_q: float, 80 | fire_hrr_density: float, 81 | fire_load_density: float, 82 | fire_mode: int, 83 | fire_nft_limit: float, 84 | fire_spread_speed: float, 85 | fire_t_alpha: float, 86 | fire_tlim: float, 87 | protection_c: float, 88 | protection_k: float, 89 | protection_protected_perimeter: float, 90 | protection_rho: float, 91 | protection_d_p: float, 92 | room_breadth: float, 93 | room_depth: float, 94 | room_height: float, 95 | room_wall_thermal_inertia: float, 96 | # solver_temperature_goal: float, 97 | # solver_max_iter: int, 98 | # solver_thickness_lbound: float, 99 | # solver_thickness_ubound: float, 100 | # solver_tol: float, 101 | window_height: float, 102 | window_open_fraction: float, 103 | window_width: float, 104 | window_open_fraction_permanent: float, 105 | epsilon_q: float, 106 | t_k_y_theta: int, 107 | phi_teq: float = 1.0, 108 | timber_charring_rate=np.nan, 109 | timber_charred_depth=np.nan, 110 | timber_hc: float = np.nan, 111 | timber_density: float = np.nan, 112 | timber_exposed_area: float = np.nan, 113 | timber_depth: float = np.nan, 114 | timber_solver_tol: float = np.nan, 115 | timber_solver_ilim: float = np.nan, 116 | occupancy_type: str = np.nan, 117 | car_cluster_size: int = np.nan, 118 | ) -> tuple: 119 | # Make the longest dimension between (room_depth, room_breadth) as room_depth 120 | if room_depth < room_breadth: 121 | room_depth += room_breadth 122 | room_breadth = room_depth - room_breadth 123 | room_depth -= room_breadth 124 | 125 | # todo: wip for car park!!! 126 | if occupancy_type == '__CAR_PARK__': 127 | fire_mode = 1 # force to travelling fire only 128 | # work out new room_depth_car based on how many cars are involved in fire 129 | if (car_cluster_size is not None or car_cluster_size != np.nan) and car_cluster_size >= 0: 130 | car_cluster_size = int(car_cluster_size) + 1 131 | room_depth_original = float(room_depth) 132 | parking_bay_width = 2.3 133 | n_parking_bay_row = 2 134 | average_area_per_parking_bay = 4283 / 202 135 | 136 | room_depth = car_cluster_size * parking_bay_width / n_parking_bay_row 137 | room_floor_area = car_cluster_size * average_area_per_parking_bay 138 | room_breadth = room_floor_area / room_depth 139 | 140 | beam_position_horizontal = (beam_position_horizontal / room_depth_original) * room_depth 141 | 142 | window_open_fraction = ( 143 | window_open_fraction * (1 - window_open_fraction_permanent) + window_open_fraction_permanent) 144 | 145 | # Fix ventilation opening size, so it doesn't exceed wall area 146 | if window_height > room_height: 147 | window_height = room_height 148 | 149 | # Calculate fire time, this is used for all fire curves in the calculation 150 | fire_time = np.arange(0, fire_time_duration + fire_time_step, fire_time_step) 151 | 152 | # Calculate ISO 834 fire temperature 153 | # fire_time_iso834 = fire_time 154 | # fire_temperature_iso834 = (345.0 * np.log10((fire_time / 60.0) * 8.0 + 1.0) + 20.0) + 273.15 # in [K] 155 | 156 | # initialise solver iteration count for timber fuel contribution 157 | timber_solver_iter_count = -1 158 | timber_exposed_duration = 0 # initial condition, timber exposed duration 159 | _fire_load_density_ = float(fire_load_density) # preserve original fire load density 160 | 161 | while True: 162 | timber_solver_iter_count += 1 163 | # the following `if` decide whether to calculate `timber_charred_depth_i` from `timber_charring_rate` or 164 | # `timber_charred_depth` 165 | if timber_charred_depth is None or timber_charred_depth != np.nan: 166 | # calculate from timber charring rate 167 | if isinstance(timber_charring_rate, (float, int)): 168 | timber_charring_rate_i = timber_charring_rate 169 | elif isinstance(timber_charring_rate, Callable): 170 | timber_charring_rate_i = timber_charring_rate(timber_exposed_duration) 171 | else: 172 | raise TypeError('`timber_charring_rate_i` is not numerical nor Callable type') 173 | timber_charring_rate_i *= 1. / 1000. # [mm/min] -> [m/min] 174 | timber_charring_rate_i *= 1. / 60. # [m/min] -> [m/s] 175 | timber_charred_depth_i = timber_charring_rate_i * timber_exposed_duration 176 | else: 177 | # calculate from timber charred depth 178 | if isinstance(timber_charred_depth, (float, int)): 179 | timber_charred_depth_i = timber_charred_depth 180 | elif isinstance(timber_charred_depth, Callable): 181 | timber_charred_depth_i = timber_charred_depth(timber_exposed_duration) 182 | else: 183 | raise TypeError('`timber_charring_rate_i` is not numerical nor Callable type') 184 | timber_charred_depth_i /= 1000. 185 | 186 | # make sure the calculated charred depth does not exceed the the available timber depth 187 | if timber_depth is not None or timber_depth != np.nan: 188 | timber_charred_depth_i = min(timber_charred_depth_i, timber_depth) 189 | 190 | timber_charred_volume = timber_charred_depth_i * timber_exposed_area 191 | timber_charred_mass = timber_density * timber_charred_volume 192 | timber_fire_load = timber_charred_mass * timber_hc 193 | timber_fire_load_density = timber_fire_load / (room_breadth * room_depth) 194 | 195 | if np.isnan(timber_fire_load_density): 196 | fire_load_density = _fire_load_density_ 197 | else: 198 | fire_load_density = _fire_load_density_ + timber_fire_load_density 199 | 200 | # To check what design fire to use 201 | fire_type = decide_fire( 202 | window_height=window_height, window_width=window_width, window_open_fraction=window_open_fraction, 203 | room_breadth=room_breadth, room_depth=room_depth, room_height=room_height, fire_mode=fire_mode, 204 | fire_load_density=fire_load_density, fire_combustion_efficiency=fire_combustion_efficiency, 205 | fire_hrr_density=fire_hrr_density, fire_spread_speed=fire_spread_speed 206 | ) 207 | 208 | # To calculate design fire temperature 209 | fire_temperature, beam_position_horizontal, t1, t2, t3 = evaluate_fire_temperature( 210 | window_height=window_height, window_width=window_width, window_open_fraction=window_open_fraction, 211 | room_breadth=room_breadth, room_depth=room_depth, room_height=room_height, 212 | room_wall_thermal_inertia=room_wall_thermal_inertia, fire_tlim=fire_tlim, fire_type=fire_type, 213 | fire_time=fire_time, fire_nft_limit=fire_nft_limit, fire_load_density=fire_load_density, 214 | fire_combustion_efficiency=fire_combustion_efficiency, fire_hrr_density=fire_hrr_density, 215 | fire_spread_speed=fire_spread_speed, fire_t_alpha=fire_t_alpha, fire_gamma_fi_q=fire_gamma_fi_q, 216 | beam_position_vertical=beam_position_vertical, beam_position_horizontal=beam_position_horizontal 217 | ) 218 | 219 | # To solve protection thickness at critical temperature 220 | # inputs.update(solve_protection_thickness(**inputs)) 221 | 222 | # To solve time equivalence in ISO 834 223 | steel_temperature, solver_temperature_goal, solver_time_equivalence_solved = solve_time_equivalence_iso834( 224 | fire_time=fire_time, fire_temperature=fire_temperature, beam_cross_section_area=beam_cross_section_area, 225 | beam_rho=beam_rho, protection_k=protection_k, protection_rho=protection_rho, protection_c=protection_c, 226 | protection_protected_perimeter=protection_protected_perimeter, solver_protection_thickness=protection_d_p, 227 | phi_teq=phi_teq 228 | ) 229 | 230 | # additional fuel contribution from timber 231 | if timber_exposed_area <= 0 or timber_exposed_area is None or timber_exposed_area != np.nan: # no timber exposed 232 | # Exit timber fuel contribution solver if: 233 | # 1. no timber exposed 234 | # 2. timber exposed area undefined 235 | break 236 | elif timber_solver_iter_count >= timber_solver_ilim: 237 | solver_convergence_status = np.nan 238 | solver_time_critical_temp_solved = np.nan 239 | solver_time_equivalence_solved = np.nan 240 | solver_steel_temperature_solved = np.nan 241 | solver_protection_thickness = np.nan 242 | solver_iter_count = np.nan 243 | timber_exposed_duration = np.nan 244 | break 245 | elif not -np.inf <= protection_d_p <= np.inf: 246 | # no protection thickness solution 247 | timber_exposed_duration = protection_d_p 248 | break 249 | elif abs(timber_exposed_duration - solver_time_equivalence_solved) <= timber_solver_tol: 250 | # convergence sought successfully 251 | break 252 | else: 253 | timber_exposed_duration = solver_time_equivalence_solved 254 | 255 | timber_charring_rate = timber_charred_depth_i / timber_exposed_duration if timber_exposed_duration else 0 256 | timber_exposed_duration = timber_exposed_duration 257 | timber_solver_iter_count = timber_solver_iter_count 258 | timber_fire_load = timber_fire_load 259 | timber_charred_depth = timber_charred_depth_i 260 | timber_charred_mass = timber_charred_mass 261 | timber_charred_volume = timber_charred_volume 262 | 263 | t_k_y_theta = int(t_k_y_theta) 264 | T_max_t = list() 265 | k_y_theta_t = list() 266 | for t_ in range(t_k_y_theta, int(fire_time_duration + t_k_y_theta / 2), t_k_y_theta): 267 | T_max_t.append(np.max(steel_temperature[fire_time <= t_])) 268 | 269 | for t_ in range(t_k_y_theta, int(fire_time_duration + t_k_y_theta / 2), t_k_y_theta): 270 | k_y_theta_t.append(k_y_theta_prob(T_max_t[-1], epsilon_q=epsilon_q)) 271 | 272 | return ( 273 | index, beam_position_horizontal, fire_combustion_efficiency, fire_hrr_density, fire_nft_limit, 274 | fire_spread_speed, window_open_fraction, epsilon_q, fire_load_density, fire_type, t1, t2, t3, 275 | solver_temperature_goal, solver_time_equivalence_solved, timber_charring_rate, timber_exposed_duration, 276 | timber_solver_iter_count, timber_fire_load, timber_charred_depth, timber_charred_mass, timber_charred_volume, 277 | *T_max_t, *k_y_theta_t, 278 | ) 279 | -------------------------------------------------------------------------------- /sfeprapy/mcs1/inputs.py: -------------------------------------------------------------------------------- 1 | from ..mcs0 import EXAMPLE_INPUT as __EXAMPLE_INPUT 2 | 3 | EXAMPLE_INPUT = {'CASE_1': __EXAMPLE_INPUT['CASE_1'].copy()} 4 | for i in ( 5 | 'p1', 'p2', 'p3', 'p4', 'solver_temperature_goal', 'solver_max_iter', 'solver_thickness_lbound', 6 | 'solver_thickness_ubound', 'solver_tol' 7 | ): 8 | EXAMPLE_INPUT['CASE_1'].pop(i) 9 | EXAMPLE_INPUT['CASE_1']['epsilon_q'] = dict(ubound=1 - 1e-9, lbound=1e-9, dist='uniform_') 10 | EXAMPLE_INPUT['CASE_1']['t_k_y_theta'] = 5 * 60. 11 | EXAMPLE_INPUT['CASE_1']['protection_d_p'] = 0.01 12 | 13 | if __name__ == "__main__": 14 | import pprint 15 | 16 | pprint.pprint(EXAMPLE_INPUT) 17 | -------------------------------------------------------------------------------- /sfeprapy/mcs2/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .calc import teq_main 4 | from .inputs import EXAMPLE_INPUT 5 | from ..mcs0 import MCS0, MCS0Single 6 | 7 | 8 | class MCS2Single(MCS0Single): 9 | OUTPUT_KEYS = ( 10 | 'index', 'beam_position_horizontal', 'fire_combustion_efficiency', 'fire_hrr_density', 'fire_nft_limit', 11 | 'fire_spread_speed', 'window_open_fraction', 'fire_load_density', 'fire_type', 't1', 't2', 't3', 12 | 'solver_steel_temperature_solved', 'solver_time_critical_temp_solved', 'solver_protection_thickness', 13 | 'solver_iter_count', 'solver_time_equivalence_solved', 'timber_charring_rate', 'timber_exposed_duration', 14 | 'timber_solver_iter_count', 'timber_fire_load', 'timber_charred_depth', 'timber_charred_mass', 15 | 'timber_charred_volume', 16 | 17 | 'room_depth', 'room_breadth', 'window_height', 'window_width', 'beam_position_vertical', 18 | ) 19 | 20 | @property 21 | def worker(self): 22 | return teq_main 23 | 24 | @property 25 | def output_keys(self) -> tuple: 26 | return MCS2Single.OUTPUT_KEYS 27 | 28 | 29 | class MCS2(MCS0): 30 | @property 31 | def new_mcs_case(self): 32 | return MCS2Single 33 | 34 | 35 | def cli_main(fp_mcs_in: str, n_threads: int = 1): 36 | fp_mcs_in = os.path.realpath(fp_mcs_in) 37 | 38 | mcs = MCS2() 39 | mcs.inputs = fp_mcs_in 40 | mcs.n_threads = n_threads 41 | mcs.run() 42 | -------------------------------------------------------------------------------- /sfeprapy/mcs2/calc.py: -------------------------------------------------------------------------------- 1 | from ..mcs0 import teq_main as teq_main_mcs0 2 | 3 | 4 | def teq_main( 5 | index: int, 6 | beam_cross_section_area: float, 7 | beam_position_vertical: float, 8 | # beam_position_horizontal, # depreciated from mcs0 9 | beam_position_horizontal_ratio: float, # new from mcs0 10 | beam_rho: float, 11 | fire_time_duration: float, 12 | fire_time_step: float, 13 | fire_combustion_efficiency: float, 14 | fire_gamma_fi_q: float, 15 | fire_hrr_density: float, 16 | fire_load_density: float, 17 | fire_mode: int, 18 | fire_nft_limit: float, 19 | fire_spread_speed: float, 20 | fire_t_alpha: float, 21 | fire_tlim: float, 22 | protection_c: float, 23 | protection_k: float, 24 | protection_protected_perimeter: float, 25 | protection_rho: float, 26 | # room_breadth: float, # depreciated from mcs0 27 | # room_depth: float, # depreciated from mcs0 28 | room_height: float, 29 | room_wall_thermal_inertia: float, 30 | room_floor_area: float, # new from mcs0 31 | room_breadth_depth_ratio: float, # new from mcs0 32 | solver_temperature_goal: float, 33 | solver_max_iter: int, 34 | solver_thickness_lbound: float, 35 | solver_thickness_ubound: float, 36 | solver_tol: float, 37 | # window_width: float, # depreciated from mcs0 38 | window_open_fraction: float, 39 | # window_height: float, # depreciated from mcs0 40 | window_open_fraction_permanent: float, 41 | window_height_room_height_ratio: float, # new from mcs0 42 | window_area_floor_ratio: float, # new from mcs0 43 | phi_teq: float = 1.0, 44 | timber_charring_rate=None, 45 | timber_charred_depth=None, 46 | timber_hc: float = None, 47 | timber_density: float = None, 48 | timber_exposed_area: float = None, 49 | timber_depth: float = None, 50 | timber_solver_tol: float = None, 51 | timber_solver_ilim: float = None, 52 | occupancy_type: str = None, 53 | car_cluster_size: int = None, 54 | ) -> tuple: 55 | # ----------------------------------------- 56 | # Calculate `room_breadth` and `room_depth` 57 | # ----------------------------------------- 58 | # room_depth * room_breadth = room_floor_area 59 | # room_breadth / room_depth = room_breadth_depth_ratio 60 | 61 | # room_breadth = room_breadth_depth_ratio * room_depth 62 | # room_depth * room_breadth_depth_ratio * room_depth = room_floor_area 63 | room_depth = (room_floor_area / room_breadth_depth_ratio) ** 0.5 64 | room_breadth = room_breadth_depth_ratio * (room_floor_area / room_breadth_depth_ratio) ** 0.5 65 | assert 0 < room_breadth_depth_ratio <= 1. # ensure within (0, 1] 66 | assert abs( 67 | room_depth * room_breadth - room_floor_area) < 1e-5 # ensure calculated room floor dimensions match the prescribed floor area 68 | 69 | # ----------------------------------------- 70 | # Calculate window opening width and height 71 | # ----------------------------------------- 72 | window_height = window_height_room_height_ratio * room_height 73 | window_width = room_floor_area * window_area_floor_ratio / window_height 74 | assert 0 < window_height_room_height_ratio <= 1. # ensure within (0, 1] 75 | 76 | # -------------------------------- 77 | # Calculate beam vertical location 78 | # -------------------------------- 79 | beam_position_vertical = min(beam_position_vertical, room_height) 80 | beam_position_horizontal = room_depth * beam_position_horizontal_ratio 81 | 82 | return (*teq_main_mcs0( 83 | index=index, 84 | beam_cross_section_area=beam_cross_section_area, 85 | beam_position_vertical=beam_position_vertical, 86 | beam_position_horizontal=beam_position_horizontal, 87 | beam_rho=beam_rho, 88 | fire_time_duration=fire_time_duration, 89 | fire_time_step=fire_time_step, 90 | fire_combustion_efficiency=fire_combustion_efficiency, 91 | fire_gamma_fi_q=fire_gamma_fi_q, 92 | fire_hrr_density=fire_hrr_density, 93 | fire_load_density=fire_load_density, 94 | fire_mode=fire_mode, 95 | fire_nft_limit=fire_nft_limit, 96 | fire_spread_speed=fire_spread_speed, 97 | fire_t_alpha=fire_t_alpha, 98 | fire_tlim=fire_tlim, 99 | protection_c=protection_c, 100 | protection_k=protection_k, 101 | protection_protected_perimeter=protection_protected_perimeter, 102 | protection_rho=protection_rho, 103 | room_breadth=room_breadth, 104 | room_depth=room_depth, 105 | room_height=room_height, 106 | room_wall_thermal_inertia=room_wall_thermal_inertia, 107 | solver_temperature_goal=solver_temperature_goal, 108 | solver_max_iter=solver_max_iter, 109 | solver_thickness_lbound=solver_thickness_lbound, 110 | solver_thickness_ubound=solver_thickness_ubound, 111 | solver_tol=solver_tol, 112 | window_height=window_height, 113 | window_open_fraction=window_open_fraction, 114 | window_width=window_width, 115 | window_open_fraction_permanent=window_open_fraction_permanent, 116 | phi_teq=phi_teq, 117 | timber_charring_rate=timber_charring_rate, 118 | timber_charred_depth=timber_charred_depth, 119 | timber_hc=timber_hc, 120 | timber_density=timber_density, 121 | timber_exposed_area=timber_exposed_area, 122 | timber_depth=timber_depth, 123 | timber_solver_tol=timber_solver_tol, 124 | timber_solver_ilim=timber_solver_ilim, 125 | occupancy_type=occupancy_type, 126 | car_cluster_size=car_cluster_size, 127 | ), 128 | room_depth, room_breadth, window_height, window_width, beam_position_vertical 129 | ) 130 | -------------------------------------------------------------------------------- /sfeprapy/mcs2/inputs.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from sfeprapy.mcs0 import EXAMPLE_INPUT as __EXAMPLE_INPUT 4 | 5 | # Create base case from `sfeprapy.mcs0` 6 | # only use "CASE_1" 7 | EXAMPLE_INPUT = {'Residential': __EXAMPLE_INPUT['CASE_1'].copy()} 8 | # remove items which are no longer used in `sfeprapy.mcs2` (comparing to `sfeprapy.mcs0`) 9 | for i in ['room_breadth', 'room_depth', 'window_width', 'window_height', 'p1', 'p2', 'p3', 'p4', 10 | 'general_room_floor_area', 'beam_position_horizontal']: 11 | EXAMPLE_INPUT['Residential'].pop(i) 12 | # create variable for dumping new inputs 13 | 14 | # Residential 15 | EXAMPLE_INPUT['Residential'].update(dict( 16 | case_name='Residential', 17 | beam_position_horizontal_ratio=dict(dist='uniform_', lbound=0.6, ubound=0.9), 18 | fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1200, mean=780, sd=234), 19 | fire_hrr_density=dict(dist="uniform_", lbound=0.32, ubound=0.57), 20 | phi_teq=dict(dist='lognorm_', mean=1, sd=0.25, ubound=3, lbound=1e-4), 21 | room_floor_area=dict(dist='uniform_', lbound=9., ubound=30.), 22 | room_height=dict(dist='constant_', lbound=2.4, ubound=2.4), 23 | room_breadth_depth_ratio=dict(dist='uniform_', lbound=0.4, ubound=0.6), # todo 24 | window_height_room_height_ratio=dict(dist='uniform_', lbound=0.3, ubound=0.9), 25 | window_area_floor_ratio=dict(dist='uniform_', lbound=0.05, ubound=0.20), 26 | )) 27 | 28 | # Office 29 | EXAMPLE_INPUT['Office'] = EXAMPLE_INPUT['Residential'].copy() 30 | EXAMPLE_INPUT['Office'].update(dict( 31 | case_name='Office', 32 | beam_position_horizontal_ratio=dict(dist='uniform_', lbound=0.6, ubound=0.9), 33 | fire_load_density=dict(dist="gumbel_r_", lbound=10, ubound=1200, mean=420, sd=126), 34 | fire_hrr_density=dict(dist="uniform_", lbound=0.15, ubound=0.65), 35 | room_floor_area=dict(dist='uniform_', lbound=50., ubound=1000.), 36 | room_height=dict(dist='uniform_', lbound=2.8, ubound=4.5), 37 | room_breadth_depth_ratio=dict(dist='uniform_', lbound=0.4, ubound=0.6), # todo 38 | window_height_room_height_ratio=dict(dist='uniform_', lbound=0.3, ubound=0.9), 39 | window_area_floor_ratio=dict(dist='uniform_', lbound=0.05, ubound=0.40), 40 | phi_teq=dict(dist='lognorm_', mean=1, sd=0.25, ubound=3, lbound=1e-4), 41 | )) 42 | 43 | # Retail 44 | 45 | EXAMPLE_INPUT['Retail'] = EXAMPLE_INPUT['Residential'].copy() 46 | EXAMPLE_INPUT['Retail'].update(dict( 47 | case_name='Retail', 48 | beam_position_horizontal_ratio=dict(dist='uniform_', lbound=0.6, ubound=0.9), 49 | fire_load_density=dict(dist="gumbel_r_", lbound=10., ubound=2000., mean=600., sd=180.), 50 | fire_hrr_density=dict(dist="uniform_", lbound=0.27, ubound=1.0), 51 | room_floor_area=dict(dist='uniform_', lbound=50., ubound=1000.), 52 | room_height=dict(dist='constant_', lbound=4.5, ubound=7.0), 53 | room_breadth_depth_ratio=dict(dist='uniform_', lbound=0.4, ubound=0.6), # todo 54 | window_height_room_height_ratio=dict(dist='uniform_', lbound=0.5, ubound=1.0), 55 | window_area_floor_ratio=dict(dist='uniform_', lbound=0.05, ubound=0.40), 56 | phi_teq=dict(dist='lognorm_', mean=1, sd=0.25, ubound=3, lbound=1e-4), 57 | )) 58 | 59 | if __name__ == "__main__": 60 | import pprint 61 | 62 | pprint.pprint(EXAMPLE_INPUT) 63 | -------------------------------------------------------------------------------- /sfeprapy/project_info.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger('sfeprapy') 4 | c_handler = logging.StreamHandler() 5 | c_handler.setLevel(logging.DEBUG) 6 | c_handler.setFormatter( 7 | logging.Formatter(fmt='{asctime} {levelname:8.8s} [{filename:15.15s}:{lineno:05d}] {message:s}', style='{')) 8 | logger.addHandler(c_handler) 9 | logger.setLevel(logging.DEBUG) 10 | 11 | __version__ = "0.0.1" 12 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fsepy/SFEPRAPY/02c2864fc4788cb42588c7d455c135389febb578/test/__init__.py -------------------------------------------------------------------------------- /test/car_park.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import stats 3 | 4 | # Fire growth coefficient, Q_dot_max [kW] 5 | # Obtained from Table 5-4 in Mohd (2015) 6 | peak_heat_release_rate_stats = dict( 7 | mini=dict(dist='weibull_min', c=5.19, scale=3809), 8 | light=dict(dist='weibull_min', c=1.66, scale=5078), 9 | compact=dict(dist='weibull_min', c=2.40, scale=4691), 10 | medium=dict(dist='weibull_min', c=3.18, scale=7688), 11 | heavy=dict(dist='weibull_min', c=4.25, scale=4588), 12 | ) 13 | 14 | # Fire growth coefficient, alpha_peak [kW/min**2] 15 | # Obtained from Table 5-4 in Mohd (2015) 16 | fire_growth_stats = dict( 17 | mini=dict(dist='gamma', a=1.39, scale=11.86), 18 | light=dict(dist='gamma', a=1.23, scale=14.78), 19 | compact=dict(dist='gamma', a=1.18, scale=5.14), 20 | medium=dict(dist='gamma', a=2.24, scale=2.75), 21 | heavy=dict(dist='gamma', a=0.36, scale=159.18), 22 | ) 23 | 24 | # Fire decay coefficient, beta_exp [min**-1] 25 | # Obtained from Table 5-4 in Mohd (2015) 26 | fire_decay_stats = dict( 27 | mini=dict(dist='weibull_min', c=0.93, scale=0.17), 28 | light=dict(dist='weibull_min', c=1.21, scale=0.11), 29 | compact=dict(dist='weibull_min', c=3.93, scale=0.08), 30 | medium=dict(dist='weibull_min', c=1.38, scale=0.11), 31 | heavy=dict(dist='weibull_min', c=2.51, scale=0.08), 32 | ) 33 | 34 | # Vehicle weight, [kg] 35 | # Table 3-1, ANSI classification of vehicles by curb weight 36 | car_curb_weight_stats = dict( 37 | mini=dict(dist='uniform', loc=680, scale=906), 38 | light=dict(dist='uniform', loc=907, scale=1134), 39 | compact=dict(dist='uniform', loc=1135, scale=1360), 40 | medium=dict(dist='uniform', loc=1361, scale=1587), 41 | heavy=dict(dist='uniform', loc=1588, scale=1), 42 | ) 43 | 44 | # Car width, [m] 45 | # https://www.nimblefins.co.uk/cheap-car-insurance/average-car-dimensions 46 | car_width_stats = dict( 47 | mini=dict(dist='uniform', loc=1.615, scale=1.670), # city car 48 | light=dict(dist='uniform', loc=1.735, scale=1.805), # hatchback 49 | compact=dict(dist='uniform', loc=1.735, scale=1.805), # hatch back 50 | medium=dict(dist='uniform', loc=1.825, scale=1.842), # saloon 51 | heavy=dict(dist='uniform', loc=1.900, scale=2.012), # SUV 52 | ) 53 | 54 | 55 | def car_fire_hrr(Q_dot_max: float, alpha_peak: float, beta_exp: float, t: np.ndarray): 56 | """ 57 | :param Q_dot_max: 58 | :param alpha_peak: 59 | :param beta_exp: 60 | :param t: 61 | :return: [kW] 62 | """ 63 | t = t / 60. # s -> min 64 | 65 | Q_dot = np.zeros_like(t) 66 | t_max = (Q_dot_max / alpha_peak) ** 0.5 67 | t_end = t_max - np.log(50 / Q_dot_max) / beta_exp 68 | Q_dot[t <= t_max] = alpha_peak * (t[t <= t_max] ** 2) 69 | Q_dot[(t > t_max) & (t <= t_end)] = Q_dot_max * np.exp(beta_exp * (t_max - t[(t > t_max) & (t <= t_end)])) 70 | 71 | return Q_dot 72 | 73 | 74 | def _test_car_fire_hrr(t=np.arange(0, 3600)): 75 | return car_fire_hrr( 76 | Q_dot_max=4000, 77 | alpha_peak=11, 78 | beta_exp=0.17, 79 | t=t 80 | ) 81 | 82 | 83 | def point_source_hrr(Q_dot, lambda_r, R): 84 | q_dot_fl = Q_dot * lambda_r / (4 * 3.1415926 * R ** 2) 85 | return q_dot_fl 86 | 87 | 88 | def _test_point_source_hrr(Q_dot=_test_car_fire_hrr()): 89 | return point_source_hrr( 90 | Q_dot=Q_dot, 91 | lambda_r=0.6, 92 | R=1.2 93 | ) 94 | 95 | 96 | def flux_time_product(t, q_dot, q_dot_crit, n): 97 | q_dot_ave = (q_dot[1:] + q_dot[:-1]) / 2 98 | ftp_i = (np.abs(q_dot_ave - q_dot_crit) ** n) * (t[1:] - t[:-1]) 99 | ftp = np.zeros_like(t) 100 | ftp[1:] = np.cumsum(ftp_i) 101 | return ftp 102 | 103 | 104 | def _test_flux_time_product(t=np.arange(0, 3600), q_dot=_test_point_source_hrr()): 105 | ftp = flux_time_product(t=t, q_dot=q_dot, q_dot_crit=5.7, n=1.5) 106 | import matplotlib.pyplot as plt 107 | fig, ax = plt.subplots() 108 | ax.plot(t, ftp) 109 | fig.show() 110 | return ftp 111 | 112 | 113 | def car_time_to_ignition(t, q_dot, q_dot_crit, n, ftp): 114 | ftp_ = flux_time_product(t=t, q_dot=q_dot, q_dot_crit=q_dot_crit, n=n) 115 | if any(ftp_ >= ftp): 116 | t_ig = np.amin(t[ftp_ >= ftp]) 117 | else: 118 | t_ig = 0 119 | return t_ig 120 | 121 | 122 | def _test_car_time_to_ignition(): 123 | t = np.arange(0, 3600) 124 | q_dot_crit = 5.7 125 | n = 1.5 126 | Q_dot_max = 4000 127 | alpha_peak = 11. 128 | beta_exp = 0.17 129 | lambda_r = 0.6 130 | R = 1.5 131 | ftp = 3258 132 | 133 | Q_dot = car_fire_hrr(Q_dot_max=Q_dot_max, alpha_peak=alpha_peak, beta_exp=beta_exp, t=t) 134 | q_dot = point_source_hrr(Q_dot=Q_dot, lambda_r=lambda_r, R=R) 135 | t_ig = car_time_to_ignition(t=t, q_dot=q_dot, q_dot_crit=q_dot_crit, n=n, ftp=ftp) 136 | print(t_ig) 137 | assert abs(t_ig - 437) <= 1. 138 | 139 | 140 | def generate_rvs(n_samples: int, dist_param): 141 | dist_type = dist_param.pop('dist') 142 | try: 143 | dist = getattr(stats, dist_type)(**dist_param) 144 | except Exception as e: 145 | raise TypeError(f'Failed to generate random variables {dist_type}, {dist_param}, {e}') 146 | return dist.rvs(size=n_samples) 147 | 148 | 149 | if __name__ == '__main__': 150 | import matplotlib.pyplot as plt 151 | 152 | n_samples = 1000 153 | arr_parking_width = np.linspace(2.2, 3.2, 1000) 154 | np.random.shuffle(arr_parking_width) 155 | 156 | # params = peak_heat_release_rate_stats['mini'] 157 | # values = generate_rvs(n_samples=1000, dist_param=params) 158 | 159 | car_type_probability = dict(mini=.09, light=.22, compact=.27, medium=.27, heavy=.15) 160 | 161 | # ========== 162 | # Fire curve 163 | # ========== 164 | 165 | # hrr curve parameters based upon car type statistics 166 | arr_Q_dot_max = np.array([]) 167 | arr_alpha_peak = np.array([]) 168 | arr_beta_exp = np.array([]) 169 | arr_car_width = np.array([]) 170 | for car_type, prob in car_type_probability.items(): 171 | n_samples_i = int(prob * n_samples) 172 | Q_dot_max = generate_rvs(n_samples=n_samples_i, dist_param=peak_heat_release_rate_stats[car_type].copy()) 173 | alpha_peak = generate_rvs(n_samples=n_samples_i, dist_param=fire_growth_stats[car_type].copy()) 174 | beta_exp = generate_rvs(n_samples=n_samples_i, dist_param=fire_decay_stats[car_type].copy()) 175 | car_width = generate_rvs(n_samples=n_samples_i, dist_param=car_width_stats[car_type].copy()) 176 | arr_Q_dot_max = np.hstack((arr_Q_dot_max, Q_dot_max)) 177 | arr_alpha_peak = np.hstack((arr_alpha_peak, alpha_peak)) 178 | arr_beta_exp = np.hstack((arr_beta_exp, beta_exp)) 179 | arr_car_width = np.hstack((arr_car_width, car_width)) 180 | 181 | print([f'{i:.2f}' for i in arr_alpha_peak]) 182 | 183 | # make fire curves, find ignition time 184 | t = np.arange(0, 3600, 1) 185 | arr_t_ig = list() 186 | arr_spread_speed = list() 187 | for i in range(len(arr_Q_dot_max)): 188 | Q_dot_max = arr_Q_dot_max[i] 189 | alpha_peak = arr_alpha_peak[i] 190 | beta_exp = arr_beta_exp[i] 191 | car_width = arr_car_width[i] 192 | parking_width = arr_parking_width[i] 193 | hrr = car_fire_hrr(Q_dot_max=Q_dot_max, alpha_peak=alpha_peak, beta_exp=beta_exp, t=t) 194 | hrr_received = point_source_hrr(hrr, lambda_r=0.6, R=parking_width) 195 | t_ig = car_time_to_ignition(t=t, q_dot=hrr_received, q_dot_crit=3.1, n=2, ftp=21862) # page 207 196 | arr_t_ig.append(t_ig) 197 | if t_ig > 0: 198 | arr_spread_speed.append(parking_width / t_ig) 199 | 200 | print(max(arr_t_ig), min(arr_t_ig), sum(arr_t_ig)/len(arr_t_ig)) 201 | print(max(arr_spread_speed), min(arr_spread_speed), sum(arr_spread_speed)/len(arr_spread_speed)) 202 | print(','.join([f'{i:.5f}' for i in arr_spread_speed])) 203 | 204 | # from sfeprapy.func.stats_dist_fit import fit 205 | # fig, ax = plt.subplots() 206 | # 207 | # list_dist, list_params, list_sse = fit(arr_spread_speed, ax = ax, distribution_list=0) 208 | # 209 | # # FINDING THE BEST FIT 210 | # 211 | # list_dist = np.asarray(list_dist)[np.argsort(list_sse)] 212 | # list_params = np.asarray(list_params)[np.argsort(list_sse)] 213 | # list_sse = np.asarray(list_sse)[np.argsort(list_sse)] 214 | # 215 | # print( 216 | # "\n{:30.30}{}".format( 217 | # "Distribution (sorted)", 218 | # "Loss (Residual sum of squares) and distribution parameters", 219 | # ) 220 | # ) 221 | # 222 | # for i, v in enumerate(list_dist): 223 | # dist_name = v.name 224 | # sse = list_sse[i] 225 | # dist_params = ", ".join(["{:10.2f}".format(j) for j in list_params[i]]) 226 | # print(f"{dist_name:30.30}{sse:10.5E} - [{dist_params}]") 227 | # 228 | # dist_best = list_dist[0] 229 | # params_best = list_params[0] 230 | 231 | # PRODUCE FIGURES 232 | 233 | # ax_fitting.set_xlabel("Sample value") 234 | # ax_fitting.set_ylabel("PDF") 235 | # ax_fitting.legend().set_visible(True) 236 | # fig_fitting.savefig(os.path.join(dir_work, "distfit_fitting.png")) 237 | # 238 | # cdf_x_sampled = np.sort(samples) 239 | # cdf_y_sampled = np.linspace(0, 1, len(cdf_x_sampled)) 240 | # cdf_y_fitted = np.linspace(0, 1, 1000) 241 | # cdf_x_fitted = dist_best.ppf(cdf_y_fitted, *params_best) 242 | # fig.show() -------------------------------------------------------------------------------- /test/test_mcs.py: -------------------------------------------------------------------------------- 1 | from sfeprapy.mcs import * 2 | 3 | 4 | def test_input_parser_flatten(): 5 | x = dict(A=dict(a=0, b=1), B=dict(c=2, d=3)) 6 | y_expected = {"A:a": 0, "A:b": 1, "B:c": 2, "B:d": 3} 7 | y = InputParser.flatten_dict(x) 8 | print(y) 9 | assert y == y_expected 10 | 11 | 12 | def test_input_parser_flatten_v2(): 13 | x = dict(A=dict(a=0, b=1), B=dict(c=2, d=dict(e=3, f=4))) 14 | y_expected = {"A:a": 0, "A:b": 1, "B:c": 2, "B:d:e": 3, "B:d:f": 4} 15 | y = InputParser.flatten_dict(x) 16 | print(y) 17 | assert y == y_expected 18 | 19 | 20 | def test_input_parser_unflatten(): 21 | x = {"A:a": 0, "A:b": 1, "B:c": 2, "B:d": 3} 22 | y_expected = dict(A=dict(a=0, b=1), B=dict(c=2, d=3)) 23 | y = InputParser.unflatten_dict(x) 24 | assert y == y_expected, f'{y}!={y_expected}' 25 | 26 | 27 | def test_input_parser_unflatten_v2(): 28 | x = {"A:a": 0, "A:b": 1, "B:a": 2, "B:b": 3, "C:a": 4, "C:b:a": 5, "C:b:b": 6} 29 | y_expected = dict(A=dict(a=0, b=1), B=dict(a=2, b=3), C=dict(a=4, b=dict(a=5, b=6))) 30 | y = InputParser.unflatten_dict(x) 31 | assert y == y_expected, f'{y}!={y_expected}' 32 | 33 | 34 | def test_input_parser_sampling(): 35 | y = InputParser(dict(v=np.pi), 1000).to_dict() 36 | assert len(y["v"]) == 1000 37 | assert all([v == np.pi for v in y["v"]]) 38 | 39 | y = InputParser(dict(v="hello world."), 1000).to_dict() 40 | assert len(y["v"]) == 1000 41 | assert all([v == "hello world." for v in y["v"]]) 42 | 43 | y = InputParser(dict(v=[0.0, 1.0, 2.0]), 1000).to_dict() 44 | assert len(y["v"]) == 1000 45 | assert all([all(v == np.array([0.0, 1.0, 2.0])) for v in y["v"]]) 46 | 47 | y = InputParser(dict(v=dict(dist="uniform_", mean=4, sd=6)), 50000).to_dict() 48 | mean = 4 49 | sd = 6 50 | a = mean - np.sqrt(3) * sd 51 | b = mean + np.sqrt(3) * sd 52 | assert len(y["v"]) == 50000 53 | assert abs(np.max(y["v"]) - b) < 1e-3, f'{np.max(y["v"])} != {b}' 54 | assert abs(np.min(y["v"]) - a) < 1e-3, f'{np.min(y["v"])} != {a}' 55 | assert abs(np.mean(y["v"]) - (a + b) / 2) <= 1e-3 56 | 57 | y = InputParser(dict(v=dict(dist="norm_", ubound=5 + 1, lbound=5 - 1, mean=5, sd=1)), 1000).to_dict() 58 | assert len(y["v"]) == 1000 59 | assert np.max(y["v"]) == 6 60 | assert np.min(y["v"]) == 4 61 | assert abs(np.mean(y["v"]) - 5) <= 0.00001 62 | 63 | y = InputParser(dict(v=dict(dist="gumbel_r_", ubound=2000, lbound=50, mean=420, sd=126)), 1000).to_dict() 64 | assert len(y["v"]) == 1000 65 | assert abs(np.max(y["v"]) - 2000) <= 1 66 | assert abs(np.min(y["v"]) - 50) <= 1 67 | assert abs(np.mean(y["v"]) - 420) <= 1 68 | 69 | y = InputParser(dict(v=dict(dist="discrete_", values='1,2,3,4', weights='0.1,0.2,0.3,0.4')), 999).to_dict() 70 | assert len(y["v"]) == 999 71 | assert (len(y['v'][y['v'] == 1.]) - round(0.1 * 999)) <= 1. 72 | assert (len(y['v'][y['v'] == 2.]) - round(0.2 * 999)) <= 1. 73 | assert (len(y['v'][y['v'] == 3.]) - round(0.3 * 999)) <= 1. 74 | assert (len(y['v'][y['v'] == 4.]) - round(0.4 * 999)) <= 1. 75 | 76 | InputParser(dict( 77 | string="hello world", 78 | number=10., 79 | dist_uniform_=dict(dist="uniform_", lbound=0., ubound=100.), 80 | dist_gumbel_r_=dict(dist="gumbel_r_", lbound=10, ubound=1500, mean=420, sd=126), 81 | dist_norm_=dict(dist="norm_", lbound=623.15, ubound=2023.15, mean=1323.15, sd=93), 82 | dist_lognorm_mod_=dict(dist="lognorm_mod_", ubound=0.9999, lbound=0.0001, mean=0.2, sd=0.2), 83 | ), n=1000).to_dict() 84 | 85 | 86 | if __name__ == '__main__': 87 | test_input_parser_flatten() 88 | test_input_parser_flatten_v2() 89 | test_input_parser_unflatten() 90 | test_input_parser_unflatten_v2() 91 | test_input_parser_sampling() 92 | -------------------------------------------------------------------------------- /test/test_mcs0.py: -------------------------------------------------------------------------------- 1 | from sfeprapy.input_parser import InputParser 2 | from sfeprapy.mcs0 import * 3 | 4 | 5 | def test_teq_phi(): 6 | import warnings 7 | warnings.filterwarnings("ignore") 8 | 9 | input_param = dict(index=0, fire_time_step=1., fire_time_duration=5. * 60 * 60, beam_cross_section_area=0.017, 10 | beam_position_vertical=2.5, beam_position_horizontal=18, beam_rho=7850., 11 | fire_combustion_efficiency=0.8, 12 | fire_gamma_fi_q=1, fire_hrr_density=0.25, fire_load_density=420, fire_mode=0, 13 | fire_nft_limit=1050, 14 | fire_spread_speed=0.01, fire_t_alpha=300, fire_tlim=0.333, protection_c=1700., protection_k=0.2, 15 | protection_protected_perimeter=2.14, protection_rho=800., room_breadth=16, room_depth=31.25, 16 | room_height=3, 17 | room_wall_thermal_inertia=720, solver_temperature_goal=620 + 273.15, solver_max_iter=200, 18 | solver_thickness_lbound=0.0001, solver_thickness_ubound=0.0500, solver_tol=0.01, window_height=2, 19 | window_open_fraction=0.8, window_width=72, window_open_fraction_permanent=0, phi_teq=0.1, 20 | timber_charring_rate=0.7, timber_exposed_area=0, timber_hc=400, timber_density=500, 21 | timber_solver_ilim=20, 22 | timber_solver_tol=1, ) 23 | 24 | input_param["phi_teq"] = 1.0 25 | teq_10 = teq_main(**input_param)[16] 26 | 27 | input_param["phi_teq"] = 0.1 28 | teq_01 = teq_main(**input_param)[16] 29 | 30 | print(f'Time equivalence at phi_teq=0.1: {teq_01:<8.3f}\n' 31 | f'Time equivalence at phi_teq=1.0: {teq_10:<8.3f}\n' 32 | f'Ratio between the above: {teq_10 / teq_01:<8.3f}\n') 33 | 34 | assert abs(teq_10 / teq_01 - 10) < 0.01 35 | 36 | 37 | def test_standard_case(): 38 | import numpy as np 39 | import copy 40 | from sfeprapy.mcs0 import EXAMPLE_INPUT 41 | from tqdm import tqdm 42 | 43 | # increase the number of simulations so it gives sensible results 44 | mcs_input = copy.deepcopy(EXAMPLE_INPUT) 45 | mcs_input['CASE_1']['n_simulations'] = 10_000 46 | mcs_input['CASE_2_teq_phi']['n_simulations'] = 10_000 47 | mcs_input['CASE_3_timber']['n_simulations'] = 2_500 48 | 49 | mcs = MCS0() 50 | mcs.set_inputs_dict({ 51 | 'CASE_1': mcs_input.pop('CASE_1'), 52 | 'CASE_2_teq_phi': mcs_input.pop('CASE_2_teq_phi'), 53 | 'CASE_3_timber': mcs_input.pop('CASE_3_timber'), 54 | }) 55 | pbar = tqdm() 56 | mcs.run(1, set_progress=lambda _: pbar.update(1), set_progress_max=lambda _: setattr(pbar, 'total', _), save=True) 57 | pbar.close() 58 | 59 | # 60 minutes based on Kirby et al. 60 | x, y = mcs['CASE_1'].get_cdf() 61 | print(np.amax(y[x < 60])) 62 | assert abs(np.amax(y[x < 60]) - 0.8) <= 0.5 63 | 64 | # # 63 minutes based on a test run on 16th Aug 2022 65 | x, y = mcs['CASE_2_teq_phi'].get_cdf() 66 | assert abs(np.amax(y[x < 64.5]) - 0.8) <= 0.5 67 | 68 | # # 78 minutes based on a test run on 16th Aug 2022 69 | x, y = mcs['CASE_3_timber'].get_cdf() 70 | assert abs(np.amax(y[x < 81]) - 0.8) <= 0.5 71 | 72 | 73 | def test_file_input(): 74 | import tempfile 75 | import time 76 | import os 77 | 78 | from sfeprapy.func.xlsx import dict_to_xlsx 79 | 80 | # save input as .xlsx 81 | with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as dir_work: 82 | print('A temporary folder has been created:', dir_work) 83 | 84 | time.sleep(0.5) 85 | fp_in = os.path.join(dir_work, 'input.xlsx') 86 | dict_to_xlsx({k: InputParser.flatten_dict(v) for k, v in EXAMPLE_INPUT.items()}, fp_in) 87 | print(f"A temporary input file has been created: {fp_in}") # 4 88 | time.sleep(0.5) 89 | 90 | mcs = MCS0() 91 | mcs.set_inputs_file_path(fp_in) 92 | mcs.run(2) 93 | time.sleep(0.5) 94 | mcs.save_all(False) 95 | time.sleep(1) 96 | mcs['CASE_1'].load_output_from_file(mcs.get_save_dir()) 97 | mcs.save_all(True) 98 | time.sleep(0.5) 99 | 100 | 101 | def _test_performance(): 102 | from os import path 103 | from sfeprapy.mcs0 import EXAMPLE_INPUT 104 | from sfeprapy.func.xlsx import dict_to_xlsx 105 | from tqdm import tqdm 106 | import time 107 | # increase the number of simulations so it gives sensible results 108 | EXAMPLE_INPUT_ = EXAMPLE_INPUT['CASE_1'].copy() 109 | EXAMPLE_INPUT_['n_simulations'] = 10_000 110 | mcs_input = {'CASE_1': EXAMPLE_INPUT_, 111 | 'CASE_2': EXAMPLE_INPUT_, 112 | 'CASE_3': EXAMPLE_INPUT_, 113 | 'CASE_4': EXAMPLE_INPUT_, 114 | 'CASE_5': EXAMPLE_INPUT_, 115 | 'CASE_6': EXAMPLE_INPUT_, 116 | 'CASE_7': EXAMPLE_INPUT_, 117 | 'CASE_8': EXAMPLE_INPUT_, } 118 | 119 | import tempfile 120 | times = list() 121 | with tempfile.TemporaryDirectory() as dir_work: 122 | print(dir_work) 123 | fp_input = path.join(dir_work, 'test.xlsx') 124 | dict_to_xlsx({k: InputParser.flatten_dict(v) for k, v in mcs_input.items()}, fp_input) 125 | 126 | for i in range(1, 10, 1): 127 | t0 = time.time() 128 | mcs = MCS0() 129 | mcs.set_inputs_file_path(fp_input) 130 | pbar = tqdm(desc=f'{i:03d}') 131 | mcs.run(i, lambda x: pbar.update(1), lambda x: setattr(pbar, 'total', x), True) 132 | pbar.close() 133 | times.append(time.time() - t0) 134 | print(times) 135 | from fsetools.etc import asciiplot 136 | p = asciiplot.AsciiPlot(size=(45, 120)) 137 | p.plot(range(1, len(times) + 1, 1), times) 138 | p.show() 139 | 140 | 141 | def multiprocessing_strategy_research(): 142 | from os import path 143 | from sfeprapy.mcs0 import EXAMPLE_INPUT 144 | from sfeprapy.func.xlsx import dict_to_xlsx 145 | from itertools import product 146 | import time 147 | 148 | mps = [1, 2] 149 | n_cases = list(range(0, 25, 4))[1:] 150 | n_sims = [1000, 2000, 3000] 151 | n_workers = [1, 2, 3, 4, 5, 6] 152 | 153 | res = list() 154 | iters = list(product(mps, n_cases, n_sims, n_workers)) 155 | print(len(iters)) 156 | for a, b, c, d in iters: 157 | EXAMPLE_INPUT_ = EXAMPLE_INPUT['CASE_1'].copy() 158 | EXAMPLE_INPUT_['n_simulations'] = c 159 | mcs_input = {f'CASE_{i}': EXAMPLE_INPUT_ for i in range(b)} 160 | 161 | import tempfile 162 | times = list() 163 | with tempfile.TemporaryDirectory() as dir_work: 164 | fp_input = path.join(dir_work, 'test.xlsx') 165 | dict_to_xlsx({k: InputParser.flatten_dict(v) for k, v in mcs_input.items()}, fp_input) 166 | 167 | t0 = time.time() 168 | mcs = MCS0() 169 | mcs.set_inputs_file_path(fp_input) 170 | mcs.run(d, save=True, save_archive=True, concurrency_strategy=a) 171 | times.append(time.time() - t0) 172 | res.append((a, b, c, d, times[-1])) 173 | print(f'{res[-1]}') 174 | 175 | for i in res: 176 | print(i) 177 | 178 | 179 | if __name__ == '__main__': 180 | test_standard_case() 181 | -------------------------------------------------------------------------------- /test/test_mcs1.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | def test_teq_phi(): 5 | from sfeprapy.mcs1.calcs import teq_main 6 | 7 | inputs = dict( 8 | index=0, 9 | fire_time_step=1., 10 | fire_time_duration=5. * 60 * 60, 11 | beam_cross_section_area=0.017, 12 | beam_position_vertical=2.5, 13 | beam_position_horizontal=18, 14 | beam_rho=7850., 15 | fire_combustion_efficiency=0.8, 16 | fire_gamma_fi_q=1, 17 | fire_hrr_density=0.25, 18 | fire_load_density=420, 19 | fire_mode=0, 20 | fire_nft_limit=1050, 21 | fire_spread_speed=0.01, 22 | fire_t_alpha=300, 23 | fire_tlim=0.333, 24 | protection_c=1700., 25 | protection_k=0.2, 26 | protection_protected_perimeter=2.14, 27 | protection_rho=800., 28 | protection_d_p=0.01, 29 | room_breadth=16, 30 | room_depth=31.25, 31 | room_height=3, 32 | room_wall_thermal_inertia=720, 33 | # solver_temperature_goal=620 + 273.15, 34 | # solver_max_iter=200, 35 | # solver_thickness_lbound=0.0001, 36 | # solver_thickness_ubound=0.0500, 37 | # solver_tol=0.01, 38 | window_height=2, 39 | window_open_fraction=0.8, 40 | window_width=72, 41 | window_open_fraction_permanent=0, 42 | timber_charring_rate=0.7, 43 | timber_exposed_area=0, 44 | timber_hc=400, 45 | timber_density=500, 46 | timber_solver_ilim=20, 47 | timber_solver_tol=1, 48 | epsilon_q=0.5, 49 | t_k_y_theta=3600, 50 | ) 51 | 52 | inputs["phi_teq"] = 1.0 53 | teq_10 = teq_main(**inputs)[14] 54 | 55 | inputs["phi_teq"] = 0.1 56 | teq_01 = teq_main(**inputs)[14] 57 | 58 | print( 59 | f'Time equivalence at phi_teq=0.1: {teq_01:<8.3f}\n' 60 | f'Time equivalence at phi_teq=1.0: {teq_10:<8.3f}\n' 61 | f'Ratio between the above: {teq_10 / teq_01:<8.3f}\n' 62 | ) 63 | 64 | assert abs(teq_10 / teq_01 - 10) < 0.01 65 | 66 | 67 | def test_standard_case(skip_3: bool = False): 68 | from sfeprapy.mcs1 import EXAMPLE_INPUT, MCS1 69 | import numpy as np 70 | import copy 71 | 72 | # increase the number of simulations so it gives sensible results 73 | mcs_input = copy.deepcopy(EXAMPLE_INPUT) 74 | mcs_input['CASE_1']['n_simulations'] = 10_000 75 | 76 | mcs = MCS1() 77 | mcs.set_inputs_dict({ 78 | 'CASE_1': mcs_input.pop('CASE_1'), 79 | }) 80 | mcs.run(2) 81 | 82 | # 60 minutes based on Kirby et al 83 | x, y = mcs['CASE_1'].get_cdf() 84 | assert abs(np.amax(y[x < 60]) - 0.2) > 0. 85 | assert abs(np.amax(y[x < 60]) - 0.4) > 0. 86 | assert abs(np.amax(y[x < 60]) - 0.6) > 0. 87 | assert abs(np.amax(y[x < 60]) - 0.8) > 0. 88 | 89 | 90 | def test_file_input(): 91 | import tempfile 92 | import time 93 | import os 94 | import copy 95 | 96 | from sfeprapy.mcs1 import EXAMPLE_INPUT, MCS1 97 | from sfeprapy.mcs import InputParser 98 | from sfeprapy.func.xlsx import dict_to_xlsx 99 | 100 | # save input as .xlsx 101 | with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as dir_work: 102 | print('A temporary folder has been created:', dir_work) 103 | 104 | time.sleep(0.5) 105 | fp_in = os.path.join(dir_work, 'input.xlsx') 106 | dict_to_xlsx({k: InputParser.flatten_dict(v) for k, v in copy.deepcopy(EXAMPLE_INPUT).items()}, fp_in) 107 | print(f"A temporary input file has been created: {fp_in}") # 4 108 | time.sleep(0.5) 109 | 110 | mcs = MCS1() 111 | mcs.set_inputs_file_path(fp_in) 112 | mcs.run(2) 113 | time.sleep(0.5) 114 | mcs.save_all(False) 115 | time.sleep(0.5) 116 | mcs.save_all(True) 117 | time.sleep(0.5) 118 | 119 | 120 | if __name__ == '__main__': 121 | test_teq_phi() 122 | test_standard_case() 123 | test_file_input() 124 | -------------------------------------------------------------------------------- /test/test_mcs2.py: -------------------------------------------------------------------------------- 1 | def test_standard_case(): 2 | import copy 3 | 4 | import numpy as np 5 | 6 | from sfeprapy.mcs2 import EXAMPLE_INPUT 7 | from sfeprapy.mcs2 import MCS2 8 | 9 | # increase the number of simulations so it gives sensible results 10 | mcs_input = copy.deepcopy(EXAMPLE_INPUT) 11 | mcs_input.pop('Residential') 12 | mcs_input.pop('Retail') 13 | for k in list(mcs_input.keys()): 14 | mcs_input[k]['n_simulations'] = 50_000 15 | 16 | # increase the number of threads so it runs faster 17 | mcs2 = MCS2() 18 | mcs2.set_inputs_dict(mcs_input) 19 | mcs2.run(4) 20 | 21 | x, y = mcs2['Office'].get_cdf() 22 | 23 | def func_teq(v): 24 | return np.interp(v, x, y) 25 | 26 | for fire_rating in [30, 45, 60, 75, 90, 105, 120]: 27 | print(f'{fire_rating:<8.0f} {func_teq(fire_rating):<.8f}') 28 | 29 | assert abs(func_teq(30) - 0.08871437) <= 5e-3 30 | assert abs(func_teq(60) - 0.65500191) <= 5e-3 31 | assert abs(func_teq(90) - 0.92701250) <= 5e-3 32 | 33 | 34 | if __name__ == '__main__': 35 | test_standard_case() 36 | -------------------------------------------------------------------------------- /test/test_misc.py: -------------------------------------------------------------------------------- 1 | from sfeprapy import _test_version_canonical 2 | 3 | test_version_canonical = _test_version_canonical 4 | --------------------------------------------------------------------------------