├── src
└── asdm
│ ├── Inference
│ ├── __init__.py
│ ├── Payoff.py
│ ├── Probability.py
│ ├── Optimizer.py
│ └── MCMC.py
│ ├── simulator
│ ├── __init__.py
│ ├── __main__.py
│ ├── static
│ │ ├── css
│ │ │ └── main.css
│ │ └── js
│ │ │ └── main.js
│ ├── templates
│ │ └── index.html
│ └── app.py
│ ├── __init__.py
│ ├── utilities.py
│ └── cli.py
├── requirements.txt
├── media
└── asdm_simulator.png
├── tests
├── test_sdmodel.py
├── test_logisticbound.py
├── test_expbound.py
└── test_unary_parsing.py
├── LICENSE
├── .github
└── workflows
│ └── python-compatibility.yml
├── pyproject.toml
├── .gitignore
├── README.md
├── Documentation.md
└── demo
├── Model_goal_gap_array.stmx
└── Demo_SD_modelling.ipynb
/src/asdm/Inference/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/asdm/simulator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | networkx
2 | scipy
3 | beautifulsoup4
4 | lxml
5 | pandas
6 | matplotlib
--------------------------------------------------------------------------------
/media/asdm_simulator.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/wzh1895/ASDM/HEAD/media/asdm_simulator.png
--------------------------------------------------------------------------------
/src/asdm/simulator/__main__.py:
--------------------------------------------------------------------------------
1 | # src/asdm/simulator/__main__.py
2 |
3 | from .app import main
4 |
5 | if __name__ == "__main__":
6 | main()
7 |
--------------------------------------------------------------------------------
/src/asdm/__init__.py:
--------------------------------------------------------------------------------
1 | from importlib.metadata import version, PackageNotFoundError
2 |
3 | try:
4 | __version__ = version("asdm")
5 | except PackageNotFoundError:
6 | # Package is not installed, use a fallback
7 | __version__ = "unknown"
8 |
9 | from .asdm import sdmodel, Parser, Solver
10 |
--------------------------------------------------------------------------------
/tests/test_sdmodel.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from asdm import sdmodel
3 | from asdm.utilities import plot_time_series
4 |
5 | class GoalGap(sdmodel):
6 | def __init__(self):
7 | super(GoalGap, self).__init__()
8 | self.add_stock("Stock", 100, in_flows=['Flow'])
9 | self.add_aux("Goal", 20)
10 | self.add_aux("Adjustment_time", 5)
11 | self.add_aux("Gap", "Goal-Stock")
12 | self.add_flow("Flow", "Gap/Adjustment_time")
13 |
14 | @pytest.fixture
15 | def goal_gap_model():
16 | return GoalGap()
17 |
18 | def test_init(goal_gap_model):
19 | pass
20 |
21 | def test_simulation(goal_gap_model):
22 | goal_gap_model.simulate(time=20, dt=1)
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 ASDM Developers
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/.github/workflows/python-compatibility.yml:
--------------------------------------------------------------------------------
1 | name: Test Python Compatibility
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - dev
8 | pull_request:
9 | branches:
10 | - main
11 | - dev
12 |
13 | jobs:
14 | test:
15 | runs-on: ${{ matrix.os }}
16 | strategy:
17 | matrix:
18 | os: [ubuntu-latest, windows-latest, macos-latest]
19 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
20 | fail-fast: false
21 |
22 | steps:
23 | - name: Check out code
24 | uses: actions/checkout@v3
25 |
26 | - name: Set up Python
27 | uses: actions/setup-python@v4
28 | with:
29 | python-version: ${{ matrix.python-version }}
30 |
31 | - name: Install dependencies
32 | run: |
33 | pip install build
34 |
35 | - name: Build and test package
36 | run: |
37 | python -m build
38 |
39 | - name: Locate and install the built package
40 | run: |
41 | # Locate the .whl file and install it
42 | python -c "import os, subprocess; files = [f for f in os.listdir('dist') if f.endswith('.whl')]; subprocess.run(['python', '-m', 'pip', 'install', 'dist/'+str(files[0])], check=True);"
43 |
44 | - name: Test the installed package
45 | run: |
46 | python -c "from asdm import sdmodel; print('Success:', sdmodel())"
47 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=61.0", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "asdm"
7 | version = "0.7.0"
8 | description = "A Python package for System Dynamics Modeling"
9 | readme = {file = "README.md", content-type = "text/markdown"}
10 | license = {text = "MIT"}
11 | authors = [
12 | {name = "Wang Zhao", email = "wzh1895@outlook.com"},
13 | {name = "Matt Stammers"}
14 | ]
15 | classifiers = [
16 | "Programming Language :: Python :: 3",
17 | "License :: OSI Approved :: MIT License",
18 | "Operating System :: OS Independent"
19 | ]
20 | requires-python = ">=3.9"
21 | dependencies = [
22 | "numpy",
23 | "pandas",
24 | "matplotlib",
25 | "networkx",
26 | "lxml",
27 | "beautifulsoup4",
28 | "scipy",
29 | "Flask",
30 | ]
31 |
32 | [tool.setuptools.packages.find]
33 | where = ["src"]
34 |
35 | [tool.setuptools.package-data]
36 | "asdm.simulator" = [
37 | "templates/*.html",
38 | "static/js/*.js",
39 | "static/css/*.css"
40 | ]
41 |
42 | [project.scripts]
43 | asdm = "asdm.cli:main"
44 | "asdm.simulator" = "asdm.cli:main_legacy"
45 |
46 | [project.urls]
47 | Homepage = "https://github.com/wzh1895/ASDM"
48 | "Source Code" = "https://github.com/wzh1895/ASDM"
49 | "Matt Stammers's GitHub" = "https://github.com/MattStammers"
50 |
51 | [tool.pytest.ini_options]
52 | # Show all test results: passed, failed, skipped, etc.
53 | addopts = "-rA -v"
54 | testpaths = ["tests", "resources"]
55 |
--------------------------------------------------------------------------------
/src/asdm/simulator/static/css/main.css:
--------------------------------------------------------------------------------
1 | /* main.css */
2 |
3 | /* Basic styling for the body */
4 | body {
5 | font-family: Arial, sans-serif;
6 | margin: 2rem;
7 | }
8 |
9 | /* Bold "asdm" */
10 | .asdm {
11 | font-weight: bold;
12 | }
13 |
14 | /* "simulator" in red */
15 | .simulator {
16 | color: red;
17 | }
18 |
19 | /* Drag-and-drop area styling */
20 | #drop-area {
21 | width: 100%;
22 | height: 200px;
23 | border: 2px dashed #ccc;
24 | display: flex;
25 | justify-content: center;
26 | align-items: center;
27 | margin-bottom: 1rem;
28 | text-align: center; /* centre the text inside the area */
29 | }
30 |
31 | /* If you want a highlight effect on dragover */
32 | #drop-area.highlight {
33 | border-color: purple;
34 | }
35 |
36 | /* Table styling */
37 | table {
38 | border-collapse: collapse;
39 | width: 100%;
40 | margin-top: 2rem;
41 | }
42 | th, td {
43 | border: 1px solid #ccc;
44 | padding: 8px;
45 | }
46 | th {
47 | background-color: #f2f2f2;
48 | }
49 |
50 | /* Optional: styling for details summary */
51 | details summary {
52 | font-weight: bold;
53 | cursor: pointer;
54 | margin: 0.5em 0;
55 | }
56 |
57 | #varSelect {
58 | margin: 0.5em 0;
59 | font-size: 1em;
60 | }
61 |
62 | /* Plot container styling - ensure full width */
63 | #plot {
64 | width: 100% !important;
65 | min-height: 400px;
66 | box-sizing: border-box;
67 | }
68 |
69 | footer {
70 | text-align: center;
71 | margin-top: 2rem;
72 | font-size: 0.9em;
73 | color: #555;
74 | }
75 | footer a {
76 | color: #007bff;
77 | text-decoration: none;
78 | }
79 | footer a:hover {
80 | text-decoration: underline;
81 | }
--------------------------------------------------------------------------------
/tests/test_logisticbound.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Test script for LOGISTICBOUND function implementation
4 | """
5 |
6 | import sys
7 | import os
8 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
9 |
10 | from asdm.asdm import Parser, Solver
11 |
12 | def test_logisticbound():
13 | """Test the LOGISTICBOUND function"""
14 | parser = Parser()
15 | solver = Solver()
16 |
17 | # Set up solver
18 | solver.name_space = {}
19 | solver.sim_specs = {'dt': 1}
20 |
21 | test_cases = [
22 | # Test basic functionality: LOGISTICBOUND(yfrom, yto, x, xmiddle, speed)
23 | "LOGISTICBOUND(1, 9, 5, 5, 1)", # x at middle, should be around 5
24 | "LOGISTICBOUND(1, 9, 0, 5, 1)", # x before middle, should be closer to 1
25 | "LOGISTICBOUND(1, 9, 10, 5, 1)", # x after middle, should be closer to 9
26 | "LOGISTICBOUND(1, 9, 5, 5, 10)", # higher speed, steeper curve
27 | "LOGISTICBOUND(0, 1, 0, 0, 1)", # simple 0-1 transition
28 | ]
29 |
30 | print("Testing LOGISTICBOUND function...")
31 | print("-" * 60)
32 |
33 | for expr in test_cases:
34 | try:
35 | # Test tokenization
36 | tokens = parser.tokenise(expr)
37 | print(f"✓ Tokenized: {expr}")
38 |
39 | # Test parsing
40 | parsed = parser.parse(expr)
41 | print(f"✓ Parsed: {expr}")
42 |
43 | # Test evaluation
44 | result = solver.calculate_node('test_expr', parsed, 'root')
45 | print(f"✓ Result: {expr} = {result:.4f}")
46 |
47 | except Exception as e:
48 | print(f"✗ ERROR in {expr}: {e}")
49 | import traceback
50 | traceback.print_exc()
51 |
52 | print()
53 |
54 | if __name__ == "__main__":
55 | test_logisticbound()
56 |
--------------------------------------------------------------------------------
/tests/test_expbound.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Test script for EXPBOUND function implementation
4 | """
5 |
6 | import sys
7 | import os
8 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
9 |
10 | from asdm.asdm import Parser, Solver
11 |
12 | def test_expbound():
13 | """Test the EXPBOUND function"""
14 | parser = Parser()
15 | solver = Solver()
16 |
17 | # Set up solver
18 | solver.name_space = {}
19 | solver.sim_specs = {'dt': 1}
20 |
21 | test_cases = [
22 | # Test basic functionality: EXPBOUND(yfrom, yto, x, exponent, xstart, xfinish)
23 | ("EXPBOUND(1, 9, 5.5, 0, 1, 10)", "Linear transition (exp=0)"),
24 | ("EXPBOUND(1, 9, 3, 2, 1, 10)", "Positive exponent: slow start"),
25 | ("EXPBOUND(1, 9, 7, 2, 1, 10)", "Positive exponent: fast finish"),
26 | ("EXPBOUND(1, 9, 3, -2, 1, 10)", "Negative exponent: fast start"),
27 | ("EXPBOUND(1, 9, 7, -2, 1, 10)", "Negative exponent: slow finish"),
28 | ("EXPBOUND(0, 100, 5, 1, 0, 10)", "0-100 transition"),
29 | ("EXPBOUND(1, 9, 0.5, 2, 1, 10)", "Boundary test: x < xstart"),
30 | ("EXPBOUND(1, 9, 15, 2, 1, 10)", "Boundary test: x > xfinish"),
31 | ]
32 |
33 | print("Testing EXPBOUND function...")
34 | print("-" * 70)
35 |
36 | for expr, description in test_cases:
37 | try:
38 | # Test tokenization
39 | tokens = parser.tokenise(expr)
40 | assert tokens[0][0] == 'FUNC', f"EXPBOUND not recognized as FUNC: {tokens[0]}"
41 |
42 | # Test parsing
43 | parsed = parser.parse(expr)
44 |
45 | # Test evaluation
46 | result = solver.calculate_node('test_expr', parsed, 'root')
47 | print(f"✓ {description:30} | {expr:30} = {result:.4f}")
48 |
49 | except Exception as e:
50 | print(f"✗ {description:30} | {expr:30} = ERROR: {e}")
51 |
52 | print("-" * 70)
53 | print("EXPBOUND implementation complete!")
54 | print("\nFunction signature: EXPBOUND(yfrom, yto, x, exponent, xstart, xfinish)")
55 | print("- exponent = 0: Linear transition")
56 | print("- exponent > 0: Slow start, fast finish")
57 | print("- exponent < 0: Fast start, slow finish")
58 |
59 | if __name__ == "__main__":
60 | test_expbound()
61 |
--------------------------------------------------------------------------------
/src/asdm/Inference/Payoff.py:
--------------------------------------------------------------------------------
1 | # Likelihood payoffs for a pair of (observation, simulation) data points at one time point.
2 |
3 | import numpy as np
4 | from scipy.stats import poisson
5 |
6 |
7 | # poisson
8 | # payoff contribution is always negative, see: https://www.vensim.com/documentation/payoffcomputation.html
9 | def poisson_log_likelihood_payoff_contribution(observed, simulated, weight=1.0):
10 |
11 | if observed < 0:
12 | observed = 0 # data_n_events should be an integer >= 0
13 |
14 | if simulated <= 1: # the modelled frequency of events must be at least 1
15 | simulated = 1
16 |
17 | log_likelihood = observed * np.log(simulated) - simulated
18 | return log_likelihood * weight
19 |
20 |
21 | def poisson_log_likelihood(observed, simulated, weight=1.0):
22 | if observed < 0:
23 | observed = 0 # data_n_events should be an integer >= 0
24 |
25 | if simulated <= 1: # the modelled frequency of events must be at least 1
26 | simulated = 1
27 |
28 | log_likelihood = poisson(simulated).logpmf(observed)
29 | # print('loglk', 'sim', simulated, 'obs', observed, 'loglk', log_likelihood)
30 | return log_likelihood * weight
31 |
32 |
33 | def poisson_likelihood(observed, simulated, weight=1.0):
34 |
35 | if observed < 0:
36 | observed = 0 # data_n_events should be an integer >= 0
37 |
38 | if simulated <= 1: # the modelled frequency of events must be at least 1
39 | simulated = 1
40 |
41 | likelihood = poisson(simulated).pmf(observed)
42 |
43 | # print('lk', 'sim', simulated, 'obs', observed, 'lk', likelihood)
44 | return likelihood * weight
45 |
46 |
47 | # binomial
48 | # payoff contribution is always negative, see: https://www.vensim.com/documentation/payoffcomputation.html
49 | def binomial_log_likelihood_payoff_contribution(observed, simulated, p=0.5, weight=1.0):
50 | if observed < 0:
51 | observed = 0 # data_n_events should be an integer >= 0
52 |
53 | if simulated < 1: # the modelled frequency of events must be an integer >= 0
54 | simulated = 1
55 |
56 | if observed > simulated: # the observed frequency of events shoud not be greater than simulated (trails)
57 | observed = simulated
58 |
59 | return np.log((p**observed) * ((1-p)**(simulated - observed)))
60 |
61 |
62 | # absolute error
63 | # payoff contribution is always negative, see: https://www.vensim.com/documentation/payoffcomputation.html
64 | def absolute_error_payoff_contribution(observed, simulated, weight=1.0):
65 | return (-1)*(((simulated-observed)*weight)**2)
66 |
--------------------------------------------------------------------------------
/src/asdm/simulator/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | asdm.simulator
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | asdm.simulator
15 |
16 |
17 |
18 |
19 |
Drag & drop your model file here, or click to choose a file
20 |
21 |
22 |
23 |
24 |
25 | Simulation Results
26 |
27 |
28 |
29 |
30 |
31 | Download Results
32 |
35 |
36 |
37 |
38 |
39 | Visualise Results
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 | Error Logs
53 |
54 |
55 |
56 |
57 |
58 |
59 |
66 |
67 |
--------------------------------------------------------------------------------
/src/asdm/utilities.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 |
3 | def plot_time_series(time_series_dict, separate_ax=True):
4 | """
5 | The function takes input like:
6 |
7 | time_series_dict = {
8 | 'Hospitalise Rate': {
9 | 'Observed': observed_hospitalise_rate,
10 | 'Simulated': simulated_hospitalise_rate,
11 | 'Errors': error_hospitalise_rate
12 | },
13 | 'Recover Rate': {
14 | 'Observed': observed_recover_rate,
15 | 'Simulated': simulated_recover_rate,
16 | 'Errors': error_recover_rate
17 | }
18 | }
19 |
20 | """
21 |
22 | max_value_length = 1
23 | for _, tss in time_series_dict.items():
24 | for _, ts in tss.items():
25 | if len(ts) > max_value_length:
26 | max_value_length = len(ts)
27 | # print(max_value_length)
28 | ax_width = (max(max_value_length, 20) // 20 * 8)
29 | ax_height = int(ax_width * 0.8)
30 |
31 | if separate_ax:
32 |
33 | n_of_vars = len(time_series_dict)
34 |
35 | figsize_x = min(50, ax_width * n_of_vars)
36 | figsize_y = min(40, ax_height)
37 |
38 | fig = plt.figure(figsize=(figsize_x, figsize_y), dpi=100)
39 |
40 | index = 1
41 |
42 | for name, time_series in time_series_dict.items():
43 | ax = fig.add_subplot(1, n_of_vars, index)
44 | for nm, ts in time_series.items():
45 | line, = plt.plot(ts, label=nm, linestyle='--', marker='o')
46 | x, y = line.get_data()
47 | for i, j in zip(x, y):
48 | ax.annotate(round(j), xy=(i,j))
49 | ax.set_title(name)
50 | ax.legend()
51 | ax.set_xlabel('Time unit')
52 | ax.set_ylabel('Quantity / Time unit')
53 | index+=1
54 | else:
55 |
56 | fig = plt.figure(figsize=(min(50, ax_width), min(40, ax_height)))
57 | ax = fig.add_subplot(1, 1, 1)
58 | for name, time_series in time_series_dict.items():
59 | for nm, ts in time_series.items():
60 | line, = plt.plot(ts, label=nm +' '+name, linestyle='--', marker='o')
61 | x, y = line.get_data()
62 | for i, j in zip(x, y):
63 | ax.annotate(round(j), xy=(i,j))
64 | ax.legend()
65 | ax.set_xlabel('Time unit')
66 | ax.set_ylabel('Quantity / Time unit')
67 |
68 | plt.show()
69 |
70 |
71 | if __name__ == "__main__":
72 | a = [106.49738145, 107.55297435, 118.88731299, 128.80812551, 149.87163052, 151.84484521, 184.13534706]
73 | plot_time_series({'a': {'aa':a}})
74 |
--------------------------------------------------------------------------------
/tests/test_unary_parsing.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Test script to verify unary minus/plus parsing implementation
4 | """
5 |
6 | import sys
7 | import os
8 |
9 | # Add the src directory to the path so we can import asdm
10 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
11 |
12 | from asdm.asdm import Parser, Solver
13 |
14 | def test_unary_operators():
15 | """Test unary operators parsing"""
16 | parser = Parser()
17 | solver = Solver()
18 |
19 | test_cases = [
20 | # Expression, Expected Result, Description
21 | ("-5", -5, "Simple negative number"),
22 | ("+5", 5, "Simple positive number"),
23 | ("--5", 5, "Double negative"),
24 | ("+-5", -5, "Plus then minus"),
25 | ("-+5", -5, "Minus then plus"),
26 | ("1 - 2", -1, "Binary minus"),
27 | ("1 + -2", -1, "Binary plus with unary minus"),
28 | ("(-5)", -5, "Negative in parentheses"),
29 | ("-(5)", -5, "Unary minus applied to parentheses"),
30 | ("-(-5)", 5, "Unary minus applied to negative"),
31 | ("2 * -3", -6, "Multiplication with unary minus"),
32 | ("-2 * 3", -6, "Unary minus with multiplication"),
33 | ]
34 |
35 | print("Testing unary operator parsing...")
36 | print("-" * 50)
37 |
38 | for expr, expected, description in test_cases:
39 | # Parse the expression
40 | parsed = parser.parse(expr)
41 |
42 | # Create a simple namespace for evaluation
43 | solver.name_space = {}
44 | solver.sim_specs = {'dt': 1}
45 |
46 | # Evaluate the parsed expression (correct parameter order)
47 | result = solver.calculate_node('test_expr', parsed, 'root')
48 |
49 | # Check if result matches expected (using pytest assertion)
50 | assert abs(result - expected) < 1e-10, f"Expression '{expr}' returned {result}, expected {expected} ({description})"
51 | print(f"✓ {expr:12} = {result:6} ({description})")
52 |
53 | print("-" * 50)
54 | print("All tests passed! ✓")
55 |
56 | def test_tokenization():
57 | """Test tokenization of expressions with unary operators"""
58 | parser = Parser()
59 |
60 | test_cases = [
61 | ("-5", [['MINUS', '-'], ['NUMBER', '5']]),
62 | ("+5", [['PLUS', '+'], ['NUMBER', '5']]),
63 | ("1-2", [['NUMBER', '1'], ['MINUS', '-'], ['NUMBER', '2']]),
64 | ("1+-2", [['NUMBER', '1'], ['PLUS', '+'], ['MINUS', '-'], ['NUMBER', '2']]),
65 | ]
66 |
67 | print("\nTesting tokenization...")
68 | print("-" * 50)
69 |
70 | for expr, expected in test_cases:
71 | tokens = parser.tokenise(expr)
72 | assert tokens == expected, f"Expression '{expr}' tokenized as {tokens}, expected {expected}"
73 | print(f"✓ {expr:8} -> {tokens}")
74 |
75 | print("-" * 50)
76 | print("Tokenization tests passed! ✓")
77 |
78 | if __name__ == "__main__":
79 | try:
80 | test_tokenization()
81 | test_unary_operators()
82 | print("\n🎉 All tests passed!")
83 | sys.exit(0)
84 | except AssertionError as e:
85 | print(f"\n❌ Test failed: {e}")
86 | sys.exit(1)
87 | except Exception as e:
88 | print(f"\n❌ Unexpected error: {e}")
89 | sys.exit(1)
90 |
--------------------------------------------------------------------------------
/src/asdm/cli.py:
--------------------------------------------------------------------------------
1 | # src/asdm/cli.py
2 |
3 | """
4 | ASDM Command Line Interface
5 |
6 | Main entry point for the unified ASDM CLI tool.
7 | """
8 |
9 | import sys
10 | import argparse
11 | from asdm import __version__
12 |
13 |
14 | def create_parser():
15 | """Create the main argument parser with subcommands."""
16 | parser = argparse.ArgumentParser(
17 | prog='asdm',
18 | description='ASDM - A Python package for System Dynamics Modeling',
19 | formatter_class=argparse.RawDescriptionHelpFormatter,
20 | epilog="""
21 | Examples:
22 | asdm simulator Launch the web-based simulator
23 | asdm simulator --port 9000 Launch simulator on custom port
24 | asdm --version Show version information
25 |
26 | For more information: https://github.com/wzh1895/ASDM
27 | """
28 | )
29 |
30 | parser.add_argument(
31 | '--version',
32 | action='version',
33 | version=f'ASDM v{__version__}'
34 | )
35 |
36 | # Create subparsers for subcommands
37 | subparsers = parser.add_subparsers(
38 | title='Available commands',
39 | dest='command',
40 | help='Command to execute',
41 | metavar=''
42 | )
43 |
44 | # Add simulator subcommand
45 | simulator_parser = subparsers.add_parser(
46 | 'simulator',
47 | help='Launch the ASDM web-based simulator',
48 | description='Start a local web server with an interactive System Dynamics model simulator.'
49 | )
50 |
51 | simulator_parser.add_argument(
52 | 'model_file',
53 | nargs='?',
54 | default=None,
55 | help='Optional: Path to model file (.stmx or .xmile) to load and run automatically'
56 | )
57 |
58 | simulator_parser.add_argument(
59 | '--host',
60 | default='127.0.0.1',
61 | help='Host/IP address to bind to (default: 127.0.0.1)'
62 | )
63 |
64 | simulator_parser.add_argument(
65 | '--port',
66 | type=int,
67 | default=8080,
68 | help='Port to run the server on (default: 8080)'
69 | )
70 |
71 | # Future subcommands can be added here:
72 | # run_parser = subparsers.add_parser('run', help='Run a model from command line')
73 | # optimize_parser = subparsers.add_parser('optimize', help='Run model optimization')
74 | # etc.
75 |
76 | return parser
77 |
78 |
79 | def cmd_simulator(args):
80 | """Handle the 'simulator' subcommand."""
81 | from asdm.simulator.app import run_simulator
82 | run_simulator(args.host, args.port, args.model_file)
83 |
84 |
85 | def main():
86 | """Main entry point for the unified ASDM CLI."""
87 | parser = create_parser()
88 | args = parser.parse_args()
89 |
90 | # If no command specified, show help
91 | if args.command is None:
92 | parser.print_help()
93 | sys.exit(1)
94 |
95 | # Dispatch to appropriate command handler
96 | if args.command == 'simulator':
97 | cmd_simulator(args)
98 | else:
99 | # This shouldn't happen given the subparsers, but just in case
100 | parser.print_help()
101 | sys.exit(1)
102 |
103 |
104 | def main_legacy():
105 | """
106 | Legacy entry point for 'asdm.simulator' command.
107 | Shows deprecation warning and redirects to new command.
108 | """
109 | import warnings
110 |
111 | # Show deprecation warning
112 | print("=" * 70)
113 | print("⚠️ DEPRECATION WARNING")
114 | print("=" * 70)
115 | print("The command 'asdm.simulator' is deprecated and will be removed soon.")
116 | print("Please use the new command instead:")
117 | print()
118 | print(" New: asdm simulator")
119 | print(" Old: asdm.simulator (this command)")
120 | print()
121 | print("All functionality remains the same, just the command name changes.")
122 | print("=" * 70)
123 | print()
124 |
125 | # Run the simulator with the legacy entry point
126 | from asdm.simulator.app import main as legacy_main
127 | legacy_main()
128 |
129 |
130 | if __name__ == '__main__':
131 | main()
132 |
133 |
--------------------------------------------------------------------------------
/src/asdm/Inference/Probability.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class PosteriorDistApprox(object):
4 | def __init__(self, observations, prior_func, likelihood_func):
5 | """
6 | observations: observations
7 |
8 | prior_func: function that yields the prior probability of x
9 |
10 | likelihood_func: function that yields the likelihood of y given x
11 |
12 | return: posterior probability of x
13 |
14 | """
15 |
16 | self.observations = observations
17 | self.samples = list()
18 |
19 | self.priors = list()
20 | self.likelihoods = list()
21 | self.joints=list()
22 | self.aggregated_joints = 0
23 |
24 | self.prior_func = prior_func
25 | self.likelihood_func = likelihood_func
26 |
27 | # self.posterior_trace = list()
28 |
29 | # consider new \theta_i, influencing the total probability
30 | def update(self, sample_points): # sample_points needs to be a list of points
31 | for sample_point in sample_points:
32 | log_prior = self.prior_func(sample_point)
33 | self.priors.append(log_prior)
34 |
35 | log_likelihood = self.likelihood_func(sample_point, self.observations)
36 | self.likelihoods.append(log_likelihood)
37 |
38 | joint = log_prior + log_likelihood
39 | self.joints.append(joint)
40 |
41 | self.aggregated_joints += joint # not sure if it's OK not to modify 'sum' to something else...
42 |
43 | # posterior = joint / self.aggregated_joints
44 | # self.posterior_trace.append(posterior)
45 | # print('pos', posterior)
46 | print("Bayesian update ready.")
47 |
48 | def get_posterior(self, sample_point):
49 | prior = self.prior_func(sample_point)
50 | likelihood = self.likelihood_func(sample_point, self.observations)
51 | # posterior = (prior+likelihood)/self.aggregated_joints
52 | posterior = (prior+likelihood) - self.aggregated_joints
53 | # print('post', posterior)
54 | return posterior
55 |
56 |
57 | class PosteriorDist(object):
58 | def __init__(self, observations, prior_func, likelihood_func):
59 | """
60 | observations: observations
61 |
62 | prior_func: function that yields the prior probability of x
63 |
64 | likelihood_func: function that yields the likelihood of y given x
65 |
66 | return: posterior probability of x
67 | """
68 |
69 | self.observations = observations
70 | self.samples = list()
71 |
72 | self.priors = list()
73 | self.likelihoods = list()
74 | self.joints=list()
75 | self.aggregated_joints = 1
76 |
77 | self.prior_func = prior_func
78 | self.likelihood_func = likelihood_func
79 |
80 | # self.posterior_trace = list()
81 |
82 | # consider new \theta_i, influencing the total probability
83 | def update(self, sample_points): # sample_points needs to be a list of points
84 | for sample_point in sample_points:
85 | prior = self.prior_func(sample_point)
86 | self.priors.append(prior)
87 |
88 | likelihood = self.likelihood_func(sample_point, self.observations)
89 | self.likelihoods.append(likelihood)
90 |
91 | joint = prior * likelihood
92 | self.joints.append(joint)
93 |
94 | self.aggregated_joints *= joint # not sure if it's OK not to modify 'sum' to something else...
95 |
96 | # posterior = joint / self.aggregated_joints
97 | # self.posterior_trace.append(posterior)
98 | # print('pos', posterior)
99 | print("Bayesian update ready.")
100 |
101 | def get_posterior(self, sample_point):
102 | prior = self.prior_func(sample_point)
103 | likelihood = self.likelihood_func(sample_point, self.observations)
104 | posterior = (prior+likelihood)/self.aggregated_joints
105 | # print('post', posterior)
106 | return posterior
107 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | # OS
165 | .DS_Store
166 |
167 | # Customised
168 | resources/
169 | debug_tests/
170 | scripts/
171 |
172 | # IDE
173 | .vscode/
174 |
175 | # Cross-platform
176 | clear.bat
177 |
178 | # Data files
179 | *.csv
180 | *.xls
181 | *.xlsx
182 | *.isdb
183 | run_tests.sh
184 |
--------------------------------------------------------------------------------
/src/asdm/Inference/Optimizer.py:
--------------------------------------------------------------------------------
1 | from logging import exception
2 | from Payoff import poisson_log_likelihood_payoff_contribution, absolute_error_payoff_contribution
3 | from MCMC import LogLike, LogLikeWithGrad
4 | import pymc3 as pm
5 | import theano.tensor as tt
6 |
7 |
8 | class Optimizer(object):
9 | def __init__(
10 | self,
11 | model,
12 | parameters,
13 | output_vars,
14 | time_units,
15 | dt=1,
16 | payoff_type='Poisson',
17 | inference_method='MLE',
18 | ndraws=200,
19 | nburn=800,
20 | step_method='NUTS',
21 | cores=1,
22 | chains=1
23 | ):
24 |
25 | self.model = model
26 | self.parameters = parameters
27 | self.output_vars = output_vars
28 | self.time_units = time_units
29 | self.dt = dt
30 |
31 | if payoff_type == 'Squared error':
32 | self.payoff_function = absolute_error_payoff_contribution
33 | elif payoff_type == 'Poisson':
34 | self.payoff_function = poisson_log_likelihood_payoff_contribution
35 | else:
36 | raise Exception('Payoff function not specified')
37 |
38 | self.inference_method = inference_method
39 | self.ndraws = ndraws
40 | self.nburn = nburn
41 |
42 | self.step_method = step_method
43 |
44 | self.cores = cores
45 | self.chains = chains
46 |
47 | def calculate_payoff(self, params):
48 | self.model.clear_last_run()
49 |
50 | # set parameters
51 | for i in range(len(params)):
52 | p = list(self.parameters.keys())[i] # taking advantage of the ordered dict since Python 3.7
53 | self.model.replace_element_equation(p, params[i])
54 |
55 | # set stock initial values
56 | for ov, ts in self.output_vars.items():
57 | self.model.replace_element_equation(ov, ts[0])
58 |
59 | # simulate the sd model using parameters
60 | self.model.simulate(simulation_time=self.time_units, dt=self.dt)
61 |
62 | integral_payoff_over_simulation = 0
63 |
64 | for ov, ts in self.output_vars.items():
65 | sim = self.model.get_element_simulation_result(ov)
66 | for t in range(int(self.time_units/self.dt)):
67 | payoff_infected_t = self.payoff_function(ts[t], sim[t], weight=1.0/len(params))
68 | integral_payoff_over_simulation += payoff_infected_t * self.dt
69 |
70 | # whole payoff function
71 | if self.inference_method == 'MLE':
72 | payoff = integral_payoff_over_simulation
73 | elif self.inference_method == 'MAP':
74 | log_prior = query_joint_log_prior(inf, rt, log=True)
75 | payoff = log_prior + integral_payoff_over_simulation
76 | elif self.inference_method == 'prior': # only sample the prior, no data include, for testing
77 | payoff = log_prior
78 | else:
79 | raise exception('Error: Estimate method {} not defined'.format(self.inference_method))
80 |
81 | # print('LL overall', log_posterior)
82 | # print(param, '\n',payoff, '\n')
83 | return payoff
84 |
85 | def run(self):
86 | # create likelihood Op
87 | # logl = LogLike(payoff_function)
88 | logl = LogLikeWithGrad(self.calculate_payoff)
89 |
90 | # use PyMC3 to sample from log-likelihood
91 | with pm.Model() as model:
92 | # set priors on theta
93 | params = list()
94 | for p, b in self.parameters.items():
95 | params.append(pm.Uniform(p, lower=b[0], upper=b[1]))
96 |
97 | theta = tt.as_tensor_variable(params)
98 |
99 | # create custom distribution
100 | # don't use DensityDist - use Potential. see: https://github.com/pymc-devs/pymc3/issues/4057#issuecomment-675589228
101 | pm.Potential('likelihood', logl(theta))
102 |
103 | # use a sampling method
104 | if self.step_method == 'Metropolis':
105 | self.step = pm.Metropolis()
106 | elif self.step_method == 'Slice':
107 | self.step = pm.Slice()
108 | elif self.step_method == 'NUTS':
109 | self.step = pm.NUTS()
110 | elif self.step_method == 'HamiltonianMC':
111 | self.step = pm.HamiltonianMC()
112 | else:
113 | print('Warning: Sampling method not specified. Falling back.')
114 |
115 | # use trace to collect all accepted samples
116 | with model:
117 | trace = pm.sample(self.ndraws,
118 | tune=self.nburn,
119 | discard_tuned_samples=True,
120 | step=self.step,
121 | cores=self.cores,
122 | chains=self.chains,
123 | return_inferencedata=False)
124 |
125 | return trace
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # asdm
2 |
3 | ## **Agile System Dynamics Modelling**
4 |
5 | ASDM is a Python library that enables users to create and simulate System Dynamics (SD) models. It also supports SD models saved in the XMILE format, including advanced features such as arrays and conveyors. The support is being continuously improved.
6 |
7 | ### **ASDM's Contribution & Impact**
8 |
9 | Check out this presentation: [Project Care Home Demand](https://www.youtube.com/watch?v=tP1X38h8Ks4), given by **Sally Thompson**, Senior Healthcare Analyst at *The Strategy Unit* (part of NHS Midlands and Lancashire CSU). The presentation [highlights](https://www.youtube.com/watch?v=tP1X38h8Ks4&t=492s) the role of ASDM in developing an [**online SD model-based simulator**](https://connect.strategyunitwm.nhs.uk/care-home-demand/).
10 |
11 |
12 | ### **Library Structure**
13 | - `asdm/asdm.py` contains the main functionalities, including the lexer, parser, and interpreter.
14 | - `asdm/utilities.py` provides a data visualisation tool.
15 | - `asdm/inference/` consists of tools for model calibration.
16 | - `asdm/simulator/` provides a **web-based simulation interface** for easy model execution, result downloading, and visualisation.
17 |
18 | ---
19 | ## **Installation**
20 | ### **Install from PyPi**
21 | ```sh
22 | pip install asdm
23 | ```
24 | ASDM and its required dependencies will be automatically installed.
25 |
26 | ---
27 |
28 | ## **Basic Usage**
29 | To create a new SD model using ASDM:
30 | ```python
31 | from asdm import sdmodel
32 |
33 | model = sdmodel()
34 | ```
35 | `sdmodel` is the core class for System Dynamics models.
36 |
37 | Alternatively, you can load an SD model saved in XMILE format, including `.stmx` models:
38 | ```python
39 | model = sdmodel(from_xmile='example_model.stmx')
40 | ```
41 |
42 | Run the simulation:
43 | ```python
44 | model.simulate()
45 | ```
46 |
47 | Export simulation results:
48 | - As a **pandas DataFrame**:
49 | ```python
50 | result = model.export_simulation_result(format='df')
51 | ```
52 | - As a **Python dictionary**:
53 | ```python
54 | result = model.export_simulation_result(format='dict')
55 | ```
56 |
57 | ---
58 |
59 | ## **Web-Based Simulation Interface**
60 | ASDM now includes a **web-based simulation interface** that allows users to:
61 | - Upload `.stmx` or `.xmile` models for simulation.
62 | - Download simulation results as a **CSV file**.
63 | - Select variables and visualise them on an **interactive chart**.
64 |
65 | 
66 |
67 | ### **Quick Start**
68 | Run the ASDM web simulator with:
69 | ```sh
70 | asdm simulator
71 | ```
72 | By default, this starts a local server at `http://127.0.0.1:8080`. If port 8080 is unavailable, specify a different port, for example:
73 | ```sh
74 | asdm simulator --port 8081
75 | ```
76 | You can also bind to all network interfaces to allow access from others:
77 | ```sh
78 | asdm simulator --host 0.0.0.0
79 | ```
80 | Once started, the browser will automatically open the simulator page.
81 |
82 | You can also provide a model file directly to run it automatically:
83 | ```sh
84 | asdm simulator path/to/model.stmx
85 | ```
86 | This will launch the simulator and automatically run the specified model, displaying results immediately.
87 |
88 | ### **Features**
89 | - **Drag-and-drop file upload**: Upload your `.stmx` or `.xmile` model file.
90 | - **Simulation results in a table**: Automatically display after the model runs.
91 | - **CSV download**: You can download simulation results as a CSV file.
92 | - **Interactive charting**:
93 | - Select variables from a dropdown list.
94 | - Automatically detects the **time column name** (e.g., "Years", "Months", etc.).
95 | - Uses **Plotly.js** to generate interactive line charts.
96 |
97 | ---
98 |
99 | ## **Functionalities**
100 | Please refer to [Documentation](Documentation.md) for detailed function descriptions.
101 |
102 | ---
103 |
104 | ## **Tutorial Jupyter Notebooks**
105 | Jupyter Notebooks demonstrate ASDM's functionalities:
106 |
107 | ### **[SD Modelling](demo/Demo_SD_modelling.ipynb)**
108 | - Creating an SD model from scratch:
109 | - Adding **stocks, flows, auxiliaries**.
110 | - Support for **nonlinear** and **stochastic** functions.
111 | - Running simulations.
112 | - Exporting and examining simulation results.
113 | - Visualising results.
114 |
115 | ### **[Support for .stmx Models](demo/Demo_stmx_support.ipynb)**
116 | - Load and simulate `.stmx` models.
117 | - Support for **arrays**.
118 | - Modify equations and re-run simulations.
119 |
120 | More tutorial notebooks will be added.
121 | Feel free to contribute your own via **pull requests**—please ensure they do not contain sensitive data.
122 |
123 | ---
124 |
125 | ## **Licence**
126 | ASDM is open-source and released under the **MIT licence**.
127 |
128 | ---
129 |
130 | ## **Contributors**
131 | ### **Wang Zhao** (`main author`)
132 | - Postgraduate research student & research assistant at **University of Strathclyde, UK**.
133 | - Software engineer at **Newcastle Marine Services, UK**.
134 | - Speaker at multiple conferences on SD modelling.
135 | - Contact: [wang.zhao@strath.ac.uk](mailto:wang.zhao@strath.ac.uk); [wzh1895@outlook.com](mailto:wzh1895@outlook.com)
136 | - Conference talk: [Watch Here on YouTube](https://www.youtube.com/watch?v=I_0YpIKc3yI&t=2321s).
137 |
138 | ### **Matt Stammers** (`contributor`)
139 | - Consultant Gastroenterologist & open-source developer at **University Hospital Southampton, UK**.
140 | - Developed **Streamlit-powered web apps** using ASDM for healthcare modelling.
141 | - Part of the **Really Useful Models** initiative: [Learn More](https://opendatasaveslives.org/news/2022-01-05-really-useful-models).
142 | - GitHub: [Matt's Homepage](https://github.com/MattStammers).
143 |
144 | ---
145 |
--------------------------------------------------------------------------------
/Documentation.md:
--------------------------------------------------------------------------------
1 | # asdm
2 |
3 | ## Documentation
4 | `Version 25 Feb 2024`
5 |
6 | **Note:** The documentation shown below heavily relied on OpenAI's `ChatGPT`, and may contain undiscovered errors. We recommend that you also refer to the source code of [asdm](asdm/asdm.py) for the exact useage of the functionalities. Please open an `issue` or contact me if you believe there is a bug. You are also welcomed to open a `pull request` if you come up with a fix, a patch, or a new feature.
7 |
8 | ### Creation of SD Models
9 | ```
10 | def __init__(self, from_xmile=None):
11 | """
12 | Initializes the sdmodel instance, optionally loading a model from an XMILE file.
13 |
14 | Parameters:
15 | - from_xmile (str, optional): The file path to an XMILE model file. If provided,
16 | the class will attempt to load and parse the XMILE file to set up the model's
17 | initial configuration.
18 |
19 | Attributes initialized here include debug settings, simulation specifications,
20 | model components (stocks, flows, auxiliaries, etc.), and the simulation environment.
21 | Additionally, parsers and solvers for the model equations are set up.
22 | """
23 | ```
24 | ### Model Building Methods
25 | ```
26 | def add_stock(self, name, equation, non_negative=True, is_conveyor=False, in_flows=[], out_flows=[]):
27 | """
28 | Adds a stock variable to the model.
29 |
30 | Parameters:
31 | - name (str): The name of the stock variable.
32 | - equation: The equation defining the stock's behavior.
33 | - non_negative (bool, optional): Ensures that the stock value cannot be negative.
34 | Defaults to True.
35 | - is_conveyor (bool, optional): Specifies if the stock acts as a conveyor.
36 | Defaults to False.
37 | - in_flows (list, optional): A list of inflow variable names to the stock.
38 | - out_flows (list, optional): A list of outflow variable names from the stock.
39 |
40 | This method does not return any value.
41 | """
42 | ```
43 | ```
44 | def add_flow(self, name, equation, leak=None, non_negative=False):
45 | """
46 | Adds a flow variable to the model.
47 |
48 | Parameters:
49 | - name (str): The name of the flow variable.
50 | - equation: The equation defining the flow's behavior.
51 | - leak (optional): Specifies if the flow acts as a leakage. Defaults to None.
52 | - non_negative (bool, optional): Ensures that the flow value cannot be negative.
53 | Defaults to False.
54 |
55 | This method does not return any value.
56 | """
57 | ```
58 | ```
59 | def add_aux(self, name, equation):
60 | """
61 | Adds an auxiliary variable to the model.
62 |
63 | Parameters:
64 | - name (str): The name of the auxiliary variable.
65 | - equation: The equation defining the auxiliary variable's behavior.
66 |
67 | This method does not return any value.
68 | """
69 | ```
70 | ### Model Modification Methods
71 | ```
72 | def replace_element_equation(self, name, new_equation):
73 | """
74 | Replaces the equation of a specified model element (stock, flow, or auxiliary variable)
75 | with a new equation.
76 |
77 | Parameters:
78 | - name (str): The name of the model element (stock, flow, or auxiliary variable) whose
79 | equation is to be replaced.
80 | - new_equation: The new equation to replace the existing one. This can be a string
81 | representation of the equation or a numerical value. The type of `new_equation`
82 | must be either `str`, `int`, `float`, or a compatible numpy numeric type.
83 |
84 | This method updates the model's internal representation of the specified element's
85 | equation to the new equation provided.
86 |
87 | Raises:
88 | - Exception: If the new equation's type is unsupported or if the specified element
89 | name does not exist within the current model.
90 |
91 | This method does not return any value.
92 | """
93 | ```
94 | ```
95 | def overwrite_graph_function_points(self, name, new_xpts=None, new_xscale=None, new_ypts=None):
96 | """
97 | Overwrites the points or scale of a graph function associated with a model element
98 | (stock, flow, or auxiliary variable).
99 |
100 | This method is specifically useful for dynamically modifying the behavior of elements
101 | that are defined using graph functions within the simulation model.
102 |
103 | Parameters:
104 | - name (str): The name of the model element (stock, flow, or auxiliary variable) associated
105 | with the graph function to be modified.
106 | - new_xpts (list of float, optional): A new list of x points for the graph function. If None,
107 | the x points are not modified.
108 | - new_xscale (tuple of float, optional): A new x scale (min, max) for the graph function. If None,
109 | the x scale is not modified.
110 | - new_ypts (list of float, optional): A new list of y points for the graph function. If None,
111 | the y points are not modified.
112 |
113 | This method allows for dynamic adjustments to the graph functions used in the model,
114 | enabling scenarios such as sensitivity analysis or scenario testing.
115 |
116 | Raises:
117 | - Exception: If all input parameters (`new_xpts`, `new_xscale`, `new_ypts`) are None,
118 | indicating that there are no modifications to make.
119 |
120 | This method does not return any value but updates the graph function of the specified
121 | element with the new points or scale provided.
122 | """
123 | ```
124 | ### Simulation Methods
125 | ```
126 | def simulate(self, time=None, dt=None, dynamic=True, verbose=False, debug_against=None):
127 | """
128 | Runs the simulation of the model over the specified time.
129 |
130 | Parameters:
131 | - time (optional): The total time to simulate. If None, uses the simulation time
132 | specified in sim_specs.
133 | - dt (optional): The time step to use for the simulation. If None, uses the dt
134 | specified in sim_specs.
135 | - dynamic (bool, optional): If True, allows dynamic adjustment of simulation
136 | parameters. Defaults to True.
137 | - verbose (bool, optional): If True, prints detailed logs of the simulation process.
138 | Defaults to False.
139 | - debug_against (optional): Specifies a file or a flag for debugging purposes.
140 |
141 | This method updates the model's state based on the simulation results.
142 | """
143 | ```
144 | ### Result Management Methods
145 | ```
146 | def export_simulation_result(self, flatten=False, format='dict', to_csv=False, dt=False):
147 | """
148 | Exports the results of the simulation in the specified format.
149 |
150 | Parameters:
151 | - flatten (bool, optional): Only useful when the model uses arrays. Normally results of arrayed variables are stored as dictionaries like {dimension: value}. If True, flattens the result structure. Flattened result treats each dimension as a separate variable with name 'variable_dimension'. Defaults to False.
152 | - format (str, optional): The format of the output ('dict' or 'df' for DataFrame).
153 | Defaults to 'dict'.
154 | - to_csv (bool or str, optional): If True or a file path is provided, exports the
155 | results to a CSV file. Defaults to False.
156 | - dt (bool, optional): If True, includes the simulation time in the results.
157 | Defaults to False.
158 |
159 | Returns:
160 | - The simulation results in the specified format.
161 | """
162 | ```
163 | ```
164 | def display_results(self, variables=None):
165 | """
166 | Displays the simulation results for the specified variables using a line plot.
167 |
168 | Parameters:
169 | - variables (list or str, optional): The names of the variables to display. If None,
170 | displays results for all variables.
171 |
172 | This method does not return any value but shows a plot of the selected variables'
173 | values over time.
174 | """
175 | ```
--------------------------------------------------------------------------------
/src/asdm/Inference/MCMC.py:
--------------------------------------------------------------------------------
1 | # import theano.tensor as tt
2 | import pytensor.tensor as pt
3 | # import cython
4 | # cimport cython
5 | import numpy as np
6 | # cimport numpy as np
7 | import warnings
8 | from scipy.optimize import approx_fprime
9 |
10 |
11 | class LogLike(pt.Op):
12 |
13 | """
14 | Adapted from https://docs.pymc.io/notebooks/blackbox_external_likelihood.html
15 |
16 | Specify what type of object will be passed and returned to the Op when it is
17 | called. In our case we will be passing it a vector of values (the parameters
18 | that define our model) and returning a single "scalar" value (the
19 | log-likelihood)
20 | """
21 |
22 | itypes = [pt.dvector] # expects a vector of parameter values when called
23 | otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)
24 |
25 | def __init__(self, loglike):
26 | # add inputs as class attributes
27 | self.likelihood = loglike
28 | # self.data = data
29 | # self.mju = mju
30 |
31 | def perform(self, node, inputs, outputs):
32 | # the method that is used when calling the Op
33 | (theta, ) = inputs # this will contain my variables
34 |
35 | # call the log-likelihood function
36 | logl = self.likelihood(theta)
37 |
38 | outputs[0][0] = np.array(logl) # output the log-likelihood
39 |
40 |
41 | def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
42 | epsscale=0.5):
43 | """
44 | Calculate the partial derivatives of a function at a set of values. The
45 | derivatives are calculated using the central difference, using an iterative
46 | method to check that the values converge as step size decreases.
47 |
48 | Parameters
49 | ----------
50 | vals: array_like
51 | A set of values, that are passed to a function, at which to calculate
52 | the gradient of that function
53 | func:
54 | A function that takes in an array of values.
55 | releps: float, array_like, 1e-3
56 | The initial relative step size for calculating the derivative.
57 | abseps: float, array_like, None
58 | The initial absolute step size for calculating the derivative.
59 | This overrides `releps` if set.
60 | `releps` is set then that is used.
61 | mineps: float, 1e-9
62 | The minimum relative step size at which to stop iterations if no
63 | convergence is achieved.
64 | epsscale: float, 0.5
65 | The factor by which releps if scaled in each iteration.
66 |
67 | Returns
68 | -------
69 | grads: array_like
70 | An array of gradients for each non-fixed value.
71 | """
72 |
73 | grads = np.zeros(len(vals))
74 |
75 | # maximum number of times the gradient can change sign
76 | flipflopmax = 10.
77 |
78 | # set steps
79 | if abseps is None:
80 | if isinstance(releps, float):
81 | eps = np.abs(vals)*releps
82 | eps[eps == 0.] = releps # if any values are zero set eps to releps
83 | teps = releps*np.ones(len(vals))
84 | elif isinstance(releps, (list, np.ndarray)):
85 | if len(releps) != len(vals):
86 | raise ValueError("Problem with input relative step sizes")
87 | eps = np.multiply(np.abs(vals), releps)
88 | eps[eps == 0.] = np.array(releps)[eps == 0.]
89 | teps = releps
90 | else:
91 | raise RuntimeError("Relative step sizes are not a recognised type!")
92 | else:
93 | if isinstance(abseps, float):
94 | eps = abseps*np.ones(len(vals))
95 | elif isinstance(abseps, (list, np.ndarray)):
96 | if len(abseps) != len(vals):
97 | raise ValueError("Problem with input absolute step sizes")
98 | eps = np.array(abseps)
99 | else:
100 | raise RuntimeError("Absolute step sizes are not a recognised type!")
101 | teps = eps
102 |
103 | # for each value in vals calculate the gradient
104 | count = 0
105 | for i in range(len(vals)):
106 | # initial parameter diffs
107 | leps = eps[i]
108 | cureps = teps[i]
109 |
110 | flipflop = 0
111 |
112 | # get central finite difference
113 | fvals = np.copy(vals)
114 | bvals = np.copy(vals)
115 |
116 | # central difference
117 | fvals[i] += 0.5*leps # change forwards distance to half eps
118 | bvals[i] -= 0.5*leps # change backwards distance to half eps
119 | cdiff = (func(fvals)-func(bvals))/leps
120 |
121 | while 1:
122 | fvals[i] -= 0.5*leps # remove old step
123 | bvals[i] += 0.5*leps
124 |
125 | # change the difference by a factor of two
126 | cureps *= epsscale
127 | if cureps < mineps or flipflop > flipflopmax:
128 | # if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
129 | warnings.warn("Derivative calculation did not converge: setting flat derivative.")
130 | grads[count] = 0.
131 | break
132 | leps *= epsscale
133 |
134 | # central difference
135 | fvals[i] += 0.5*leps # change forwards distance to half eps
136 | bvals[i] -= 0.5*leps # change backwards distance to half eps
137 | cdiffnew = (func(fvals)-func(bvals))/leps
138 |
139 | if cdiffnew == cdiff:
140 | grads[count] = cdiff
141 | break
142 |
143 | # check whether previous diff and current diff are the same within reltol
144 | rat = (cdiff/cdiffnew)
145 | if np.isfinite(rat) and rat > 0.:
146 | # gradient has not changed sign
147 | if np.abs(1.-rat) < reltol:
148 | grads[count] = cdiffnew
149 | break
150 | else:
151 | cdiff = cdiffnew
152 | continue
153 | else:
154 | cdiff = cdiffnew
155 | flipflop += 1
156 | continue
157 |
158 | count += 1
159 |
160 | return grads
161 |
162 |
163 | def gradients_scipy(vals, func, releps=1e-3):
164 | return approx_fprime(xk=vals, f=func, epsilon=releps)
165 |
166 |
167 | # define a theano Op for our likelihood function
168 | class LogLikeWithGrad(pt.Op):
169 |
170 | itypes = [pt.dvector] # expects a vector of parameter values when called
171 | otypes = [pt.dscalar] # outputs a single scalar value (the log likelihood)
172 |
173 | # def __init__(self, loglike, data, x, sigma):
174 | def __init__(self, loglike):
175 | """
176 | Initialise with various things that the function requires. Below
177 | are the things that are needed in this particular example.
178 |
179 | Parameters
180 | ----------
181 | loglike:
182 | The log-likelihood (or whatever) function we've defined
183 | data:
184 | The "observed" data that our log-likelihood function takes in
185 | x:
186 | The dependent variable (aka 'x') that our model requires
187 | sigma:
188 | The noise standard deviation that out function requires.
189 | """
190 |
191 | # add inputs as class attributes
192 | self.likelihood = loglike
193 | # self.data = data
194 | # self.x = x
195 | # self.sigma = sigma
196 |
197 | # initialise the gradient Op (below)
198 | # self.logpgrad = LogLikeGradOp(self.likelihood, self.data, self.x, self.sigma)
199 | self.logpgrad = LogLikeGradOp(self.likelihood)
200 |
201 | def perform(self, node, inputs, outputs):
202 | # the method that is used when calling the Op
203 | (theta,) = inputs # this will contain my variables
204 |
205 | # call the log-likelihood function
206 | # logl = self.likelihood(theta, self.x, self.data, self.sigma)
207 | logl = self.likelihood(theta)
208 |
209 | outputs[0][0] = np.array(logl) # output the log-likelihood
210 |
211 | def grad(self, inputs, g):
212 | # the method that calculates the gradients - it actually returns the
213 | # vector-Jacobian product - g[0] is a vector of parameter values
214 | (theta,) = inputs # our parameters
215 | return [g[0] * self.logpgrad(theta)]
216 |
217 |
218 | class LogLikeGradOp(pt.Op):
219 |
220 | """
221 | This Op will be called with a vector of values and also return a vector of
222 | values - the gradients in each dimension.
223 | """
224 |
225 | itypes = [pt.dvector]
226 | otypes = [pt.dvector]
227 |
228 | # def __init__(self, loglike, data, x, sigma):
229 | def __init__(self, loglike):
230 | """
231 | Initialise with various things that the function requires. Below
232 | are the things that are needed in this particular example.
233 |
234 | Parameters
235 | ----------
236 | loglike:
237 | The log-likelihood (or whatever) function we've defined
238 | data:
239 | The "observed" data that our log-likelihood function takes in
240 | x:
241 | The dependent variable (aka 'x') that our model requires
242 | sigma:
243 | The noise standard deviation that out function requires.
244 | """
245 |
246 | # add inputs as class attributes
247 | self.likelihood = loglike
248 | # self.data = data
249 | # self.x = x
250 | # self.sigma = sigma
251 |
252 | def perform(self, node, inputs, outputs):
253 | (theta,) = inputs
254 |
255 | # define version of likelihood function to pass to derivative function
256 | def lnlike(values):
257 | # return self.likelihood(values, self.x, self.data, self.sigma)
258 | return self.likelihood(values)
259 |
260 | # calculate gradients
261 | grads = gradients(theta, lnlike)
262 |
263 | outputs[0][0] = grads
264 |
--------------------------------------------------------------------------------
/src/asdm/simulator/app.py:
--------------------------------------------------------------------------------
1 | # src/asdm/simulator/app.py
2 |
3 | import os
4 | import sys
5 | import socket
6 | import tempfile
7 | import webbrowser
8 | import threading
9 | import logging
10 | import argparse
11 | import uuid
12 | import traceback
13 | import multiprocessing
14 | from flask import Flask, request, jsonify, make_response, render_template
15 | from werkzeug.utils import secure_filename
16 | from concurrent.futures import ProcessPoolExecutor
17 |
18 | from asdm import sdmodel
19 |
20 | app = Flask(__name__)
21 | logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] %(asctime)s - %(message)s')
22 |
23 | executor = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count())
24 |
25 | # A simple dictionary mapping a unique ID -> CSV content
26 | DOWNLOAD_CACHE = {}
27 |
28 | # Store pre-loaded model file path (if provided via CLI)
29 | PRELOADED_MODEL_PATH = None
30 |
31 | @app.route('/')
32 | def index():
33 | return render_template("index.html", preloaded_model=PRELOADED_MODEL_PATH)
34 |
35 | @app.route('/simulate', methods=['POST'])
36 | def simulate_model():
37 | """
38 | Endpoint to handle the simulation request:
39 | - Receives an uploaded file
40 | - Uses a process pool to run the simulation
41 | - Returns the JSON result + a link to download CSV
42 | - Includes an error log section for debugging
43 | """
44 | if 'model_file' not in request.files:
45 | logging.error("No file part in request.")
46 | return jsonify({'error': 'No file found', 'error_log': 'No file uploaded'}), 400
47 |
48 | file = request.files['model_file']
49 | if file.filename == '':
50 | logging.error("Filename is empty.")
51 | return jsonify({'error': 'Empty filename', 'error_log': 'Uploaded file has no name'}), 400
52 |
53 | filename = secure_filename(file.filename)
54 | logging.info(f"Received file: {filename}")
55 |
56 | # Create a temporary directory to hold the file
57 | with tempfile.TemporaryDirectory() as tmpdir:
58 | filepath = os.path.join(tmpdir, filename)
59 | file.save(filepath)
60 | logging.info(f"File saved at: {filepath}")
61 |
62 | # Offload the simulation to the process pool
63 | future = executor.submit(run_simulation_and_csv, filepath)
64 |
65 | try:
66 | logging.debug("Starting simulation in a separate process...")
67 | df_records, csv_data, time_col = future.result() # df_records is the JSON-friendly data, csv_data is the CSV, time_col is the time column name
68 | logging.debug("Simulation completed successfully.")
69 | error_log = "" # No errors if successful
70 | except Exception as e:
71 | logging.exception("Error during simulation:")
72 | error_log = traceback.format_exc() # Capture full traceback
73 | return jsonify({'error': str(e), 'error_log': error_log}), 500
74 |
75 | # Store the CSV data in memory with a unique ID
76 | download_id = str(uuid.uuid4())
77 | DOWNLOAD_CACHE[download_id] = csv_data
78 |
79 | return jsonify({
80 | "data": df_records,
81 | "time_col": time_col,
82 | "download_url": f"/download_csv/{download_id}",
83 | "error_log": error_log # Include error logs even if empty
84 | })
85 |
86 | def run_simulation_and_csv(filepath):
87 | """
88 | Runs simulation and also returns CSV data for download.
89 | """
90 | model = sdmodel(from_xmile=filepath) # or from_xmile=filepath, whichever you use
91 | model.simulate()
92 | df = model.export_simulation_result(format='df')
93 |
94 | # Convert DataFrame to JSON-serialisable and CSV forms
95 | df_records = df.to_dict(orient='records')
96 | csv_data = df.to_csv(index=False)
97 |
98 | # Grabbing the time column name from sim_specs
99 | # (If it doesn't exist, fallback to "Time" or something else)
100 | time_col = model.sim_specs['time_units']
101 |
102 | return df_records, csv_data, time_col
103 |
104 | @app.route('/download_csv/')
105 | def download_csv(download_id):
106 | """
107 | Serve the CSV file from memory when the user clicks "Download."
108 | """
109 | csv_data = DOWNLOAD_CACHE.get(download_id)
110 | if not csv_data:
111 | return "File not found or expired", 404
112 |
113 | # Optional: remove from cache to prevent indefinite memory usage
114 | # DOWNLOAD_CACHE.pop(download_id, None)
115 |
116 | response = make_response(csv_data)
117 | response.headers["Content-Disposition"] = "attachment; filename=simulation_result.csv"
118 | response.headers["Content-Type"] = "text/csv"
119 | return response
120 |
121 | @app.route('/simulate_preloaded', methods=['POST'])
122 | def simulate_preloaded():
123 | """
124 | Endpoint to simulate a pre-loaded model from CLI.
125 | Runs the model from its original location to preserve relative paths (e.g., CSV dependencies).
126 | """
127 | if not PRELOADED_MODEL_PATH:
128 | logging.error("No pre-loaded model available.")
129 | return jsonify({'error': 'No pre-loaded model', 'error_log': 'No model was pre-loaded via CLI'}), 404
130 |
131 | if not os.path.exists(PRELOADED_MODEL_PATH):
132 | logging.error(f"Pre-loaded model file not found: {PRELOADED_MODEL_PATH}")
133 | return jsonify({'error': 'Pre-loaded model file not found', 'error_log': f'File does not exist: {PRELOADED_MODEL_PATH}'}), 404
134 |
135 | logging.info(f"Simulating pre-loaded model: {PRELOADED_MODEL_PATH}")
136 |
137 | # Offload the simulation to the process pool
138 | future = executor.submit(run_simulation_from_path, PRELOADED_MODEL_PATH)
139 |
140 | try:
141 | logging.debug("Starting simulation in a separate process...")
142 | df_records, csv_data, time_col = future.result()
143 | logging.debug("Simulation completed successfully.")
144 | error_log = "" # No errors if successful
145 | except Exception as e:
146 | logging.exception("Error during simulation:")
147 | error_log = traceback.format_exc() # Capture full traceback
148 | return jsonify({'error': str(e), 'error_log': error_log}), 500
149 |
150 | # Store the CSV data in memory with a unique ID
151 | download_id = str(uuid.uuid4())
152 | DOWNLOAD_CACHE[download_id] = csv_data
153 |
154 | return jsonify({
155 | "data": df_records,
156 | "time_col": time_col,
157 | "download_url": f"/download_csv/{download_id}",
158 | "error_log": error_log # Include error logs even if empty
159 | })
160 |
161 | def run_simulation_from_path(filepath):
162 | """
163 | Runs simulation from the original file path.
164 | Changes working directory to the model's directory to resolve relative paths.
165 | """
166 | # Get the directory containing the model
167 | model_dir = os.path.dirname(os.path.abspath(filepath))
168 | original_cwd = os.getcwd()
169 |
170 | try:
171 | # Change to model directory so relative paths work
172 | os.chdir(model_dir)
173 | logging.info(f"Changed working directory to: {model_dir}")
174 |
175 | # Run simulation
176 | model = sdmodel(from_xmile=filepath)
177 | model.simulate()
178 | df = model.export_simulation_result(format='df')
179 |
180 | # Convert DataFrame to JSON-serialisable and CSV forms
181 | df_records = df.to_dict(orient='records')
182 | csv_data = df.to_csv(index=False)
183 |
184 | # Grabbing the time column name from sim_specs
185 | time_col = model.sim_specs['time_units']
186 |
187 | return df_records, csv_data, time_col
188 | finally:
189 | # Always restore original working directory
190 | os.chdir(original_cwd)
191 | logging.info(f"Restored working directory to: {original_cwd}")
192 |
193 | def open_browser(host, port):
194 | webbrowser.open_new(f"http://{host}:{port}")
195 |
196 | def is_port_in_use(port):
197 | """Check if a port is in use (Cross-platform)."""
198 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
199 | return s.connect_ex(('127.0.0.1', port)) == 0
200 |
201 | def run_simulator(host="127.0.0.1", port=8080, model_file=None):
202 | """
203 | Core function to run the ASDM simulator web server.
204 |
205 | Args:
206 | host (str): Host/IP address to bind to
207 | port (int): Port to run the server on
208 | model_file (str): Optional path to a model file to load automatically
209 | """
210 | global PRELOADED_MODEL_PATH
211 |
212 | # Validate and set pre-loaded model path if provided
213 | if model_file:
214 | abs_model_path = os.path.abspath(model_file)
215 | if not os.path.exists(abs_model_path):
216 | print(f"Error: Model file not found: {model_file}")
217 | sys.exit(1)
218 |
219 | if not (abs_model_path.endswith('.stmx') or abs_model_path.endswith('.xmile')):
220 | print(f"Error: Model file must be .stmx or .xmile format: {model_file}")
221 | sys.exit(1)
222 |
223 | PRELOADED_MODEL_PATH = abs_model_path
224 | logging.info(f"Pre-loading model: {abs_model_path}")
225 |
226 | # Check if the server is already running
227 | if is_port_in_use(port):
228 | print(f"ASDM simulator is already running on port {port}. Exiting.")
229 | return
230 |
231 | logging.basicConfig(level=logging.INFO)
232 | logging.info(f"Starting ASDM simulator on {host}:{port} ...")
233 |
234 | threading.Timer(1, open_browser, [host, port]).start()
235 |
236 | app.run(debug=False, host=host, port=port)
237 |
238 |
239 | def main():
240 | """Legacy entry point with argument parsing."""
241 | parser = argparse.ArgumentParser(description="Run the ASDM simulator web server.")
242 | parser.add_argument("--host", default="127.0.0.1",
243 | help="Host/IP address to bind to (default: 127.0.0.1)")
244 | parser.add_argument("--port", type=int, default=8080,
245 | help="Port to run the server on (default: 8080)")
246 | args = parser.parse_args()
247 |
248 | run_simulator(args.host, args.port)
249 |
--------------------------------------------------------------------------------
/src/asdm/simulator/static/js/main.js:
--------------------------------------------------------------------------------
1 | // main.js
2 |
3 | // Grab DOM elements
4 | const dropArea = document.getElementById('drop-area');
5 | const fileInput = document.getElementById('fileElem');
6 | const resultsContent = document.getElementById('results-content');
7 | const downloadLink = document.getElementById('download-link');
8 | const downloadSection = document.getElementById('download-section');
9 |
10 | // New references for charting
11 | const chartSection = document.getElementById('chart-section');
12 | const varSelect = document.getElementById('varSelect');
13 | const plotDiv = document.getElementById('plot');
14 |
15 | // We'll store the simulation records in a global variable
16 | let globalRecords = null;
17 | let globalTimeCol = null;
18 |
19 | // Prevent default drag behaviours on the page
20 | ['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
21 | dropArea.addEventListener(eventName, preventDefaults, false);
22 | document.body.addEventListener(eventName, preventDefaults, false);
23 | });
24 |
25 | function preventDefaults(e) {
26 | e.preventDefault();
27 | e.stopPropagation();
28 | }
29 |
30 | // Highlight drop area when file is dragged over it
31 | ['dragenter', 'dragover'].forEach(eventName => {
32 | dropArea.addEventListener(eventName, highlight, false);
33 | });
34 | ['dragleave', 'drop'].forEach(eventName => {
35 | dropArea.addEventListener(eventName, unhighlight, false);
36 | });
37 |
38 | function highlight(e) {
39 | dropArea.classList.add('highlight');
40 | }
41 | function unhighlight(e) {
42 | dropArea.classList.remove('highlight');
43 | }
44 |
45 | // Handle dropped files
46 | dropArea.addEventListener('drop', handleDrop, false);
47 | // Also handle click on the drop area -> triggers file open dialog
48 | dropArea.addEventListener('click', () => fileInput.click());
49 | fileInput.addEventListener('change', () => handleFiles(fileInput.files));
50 |
51 | function handleDrop(e) {
52 | let dt = e.dataTransfer;
53 | let files = dt.files;
54 | handleFiles(files);
55 | }
56 |
57 | function handleFiles(files) {
58 | const file = files[0];
59 | if (!file) return;
60 |
61 | // Prepare form data
62 | let formData = new FormData();
63 | formData.append('model_file', file);
64 |
65 | // Send to server
66 | fetch('/simulate', {
67 | method: 'POST',
68 | body: formData
69 | })
70 | .then(res => res.json())
71 | .then(data => {
72 | console.log("Server response:", data);
73 |
74 | // Handle errors
75 | if (data.error) {
76 | resultsContent.innerHTML = `Error: ${data.error}
`;
77 |
78 | // Show full error logs (call stack)
79 | let errorLog = data.error_log ? `${data.error_log}` : "No additional error details.";
80 | document.getElementById('error-content').innerHTML = errorLog; // Use innerHTML to preserve formatting
81 | document.getElementById('error-section').style.display = 'block';
82 | document.getElementById('error-section').open = true; // Unfold error section
83 | return;
84 | }
85 |
86 | // Store the results globally
87 | globalRecords = data.data || [];
88 | globalTimeCol = data.time_col || "Time";
89 |
90 | // 1) Show the results section
91 | if (globalRecords.length > 0) {
92 | document.getElementById('results').style.display = 'block';
93 | displayResults(globalRecords);
94 | }
95 |
96 | // 2) Show the download section if there's a CSV file
97 | if (data?.download_url) {
98 | downloadLink.href = data.download_url;
99 | downloadSection.style.display = 'block';
100 | downloadSection.open = true;
101 | }
102 |
103 | // 3) Show the visualization section
104 | if (globalRecords.length > 0) {
105 | chartSection.style.display = 'block';
106 | chartSection.open = true;
107 |
108 | // Use setTimeout to ensure the DOM has updated before plotting
109 | setTimeout(() => {
110 | setupChartOptions(globalRecords, globalTimeCol);
111 | }, 100);
112 | }
113 |
114 | // Hide error logs if the previous run had errors
115 | document.getElementById('error-section').style.display = 'none';
116 | })
117 | .catch(err => {
118 | console.error(err);
119 | resultsContent.innerHTML = 'Error occurred.
';
120 |
121 | // Show full error message for client-side issues
122 | let errorLog = err.stack ? `${err.stack}` : "Unexpected error occurred.";
123 | document.getElementById('error-content').innerHTML = errorLog;
124 | document.getElementById('error-section').style.display = 'block';
125 | document.getElementById('error-section').open = true; // Unfold error section
126 | });
127 | }
128 |
129 | function displayResults(records) {
130 | // If there's no 'data' or it's empty
131 | if (!records) {
132 | resultsContent.innerHTML = 'No data returned.
';
133 | return;
134 | }
135 | if (!records.length) {
136 | resultsContent.innerHTML = 'No data returned (empty array).
';
137 | return;
138 | }
139 |
140 | // Build a table
141 | let html = '';
142 | const headers = Object.keys(records[0]);
143 | headers.forEach(h => {
144 | html += `| ${h} | `;
145 | });
146 | html += '
';
147 |
148 | records.forEach(row => {
149 | html += '';
150 | headers.forEach(h => {
151 | html += `| ${row[h]} | `;
152 | });
153 | html += '
';
154 | });
155 | html += '
';
156 |
157 | resultsContent.innerHTML = html;
158 | }
159 |
160 | function setupChartOptions(records) {
161 | // Clear any existing