├── .github └── workflows │ ├── ci.yml │ └── ci_test_all.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── docs ├── MDF_function_specifications.json ├── MDF_function_specifications.md ├── MDF_function_specifications.yaml ├── MDF_specification.json ├── MDF_specification.yaml ├── README.md ├── contributors.py ├── generate.py ├── images │ ├── MDFgraph1.png │ └── MDFgraph2.png └── sphinx │ ├── Makefile │ ├── README.md │ ├── images │ ├── logo_dark_bg.png │ ├── logo_dark_bg_mono.png │ ├── logo_light_bg.png │ └── logo_light_bg_mono.png │ ├── make.bat │ ├── requirements.txt │ └── source │ ├── _static │ ├── pydata-custom.css │ └── rtd-custom.css │ ├── _templates │ ├── custom-class-template.rst │ └── custom-module-template.rst │ ├── api │ ├── Contributing.md │ ├── Contributors.md │ ├── Export.md │ ├── Installation.md │ ├── Introduction.md │ ├── MDF_function_specifications.md │ ├── MDFpaper.md │ ├── QuickStart.md │ ├── Specification.rst │ ├── export_format │ │ ├── ACT-R │ │ │ ├── ACT-R.md │ │ │ ├── actr.png │ │ │ ├── addition.json │ │ │ ├── addition.lisp │ │ │ ├── addition.png │ │ │ ├── addition.py │ │ │ ├── addition.yaml │ │ │ ├── count.json │ │ │ ├── count.lisp │ │ │ ├── count.png │ │ │ ├── count.py │ │ │ └── count.yaml │ │ ├── MDF │ │ │ ├── MDF.md │ │ │ └── images │ │ │ │ ├── abc_conditions.png │ │ │ │ ├── abcd.png │ │ │ │ ├── abcd_3.png │ │ │ │ ├── arrays.png │ │ │ │ ├── params_funcs.png │ │ │ │ ├── simple.png │ │ │ │ ├── simple_3.png │ │ │ │ ├── states.png │ │ │ │ ├── switched_rlc_circuit.png │ │ │ │ └── switched_rlc_plot.png │ │ ├── NeuroML │ │ │ ├── ABCD.1.mdf.png │ │ │ ├── ABCD.mdf.png │ │ │ ├── ABCD.nmllite.png │ │ │ ├── FN.gv.png │ │ │ ├── FNmulti.png │ │ │ ├── Izh.png │ │ │ ├── Izh_run.png │ │ │ ├── IzhikevichTest.gv.png │ │ │ ├── LEMS_SimABCD.png │ │ │ ├── LEMS_SimFN.png │ │ │ ├── LEMS_SimIzhikevichTest.png │ │ │ ├── LemsFNrun.png │ │ │ ├── MDFFNrun.multi.png │ │ │ ├── MDFFNrun.png │ │ │ └── NeuroML.md │ │ ├── ONNX │ │ │ ├── ONNX.md │ │ │ ├── ab.png │ │ │ ├── abc.png │ │ │ ├── simple_ab.py │ │ │ ├── simple_abc.py │ │ │ └── simple_abcd.py │ │ ├── PsyNeuLink │ │ │ └── PsyNeuLink.md │ │ ├── PyTorch │ │ │ ├── ABCD.svg │ │ │ ├── PyTorch.md │ │ │ ├── ab.png │ │ │ ├── abc.png │ │ │ ├── ddm.png │ │ │ ├── inception.png │ │ │ ├── inception.svg │ │ │ ├── mlp_pure_mdf.png │ │ │ ├── mlp_pure_mdf.results.png │ │ │ ├── simple_pytorch_to_mdf.1.png │ │ │ ├── simple_pytorch_to_mdf.png │ │ │ ├── simple_pytorch_to_mdf.svg │ │ │ └── simple_pytorch_to_mdf_torchviz.png │ │ ├── Quantum │ │ │ └── Quantum.md │ │ └── WebGME │ │ │ └── WebGME.md │ ├── modules.rst │ └── readme_link.rst │ ├── conf.py │ └── index.rst ├── examples ├── ACT-R │ ├── README.md │ ├── actr.png │ ├── addition.json │ ├── addition.lisp │ ├── addition.png │ ├── addition.py │ ├── addition.yaml │ ├── count.json │ ├── count.lisp │ ├── count.png │ ├── count.py │ └── count.yaml ├── MDF │ ├── ABCD.json │ ├── ABCD.yaml │ ├── Arrays.json │ ├── Arrays.yaml │ ├── FN.mdf.json │ ├── NewtonCoolingModel.json │ ├── NewtonCoolingModel.yaml │ ├── ParametersFunctions.json │ ├── ParametersFunctions.yaml │ ├── README.md │ ├── RNN │ │ ├── IAF_net.json │ │ ├── IAF_net.yaml │ │ ├── IAF_net2.json │ │ ├── IAF_net2.yaml │ │ ├── IAFs.json │ │ ├── IAFs.nmllite.json │ │ ├── IAFs.yaml │ │ ├── IaF.net.run.png │ │ ├── IaF.net2.run.png │ │ ├── IaF.run.png │ │ ├── LEMS_Simiaf_example.png │ │ ├── README.md │ │ ├── RNN.run.png │ │ ├── RNNs.json │ │ ├── RNNs.yaml │ │ ├── Simiaf_example.json │ │ ├── generate_iaf.py │ │ ├── generate_rnn.py │ │ ├── iaf.net.png │ │ ├── iaf.net2.png │ │ ├── iaf.png │ │ ├── regenerate.sh │ │ ├── rnn.png │ │ ├── rnn_pytorch.py │ │ └── utils.py │ ├── Simple.bson │ ├── Simple.json │ ├── Simple.yaml │ ├── States.json │ ├── States.yaml │ ├── SwitchedRLC_Circuit.json │ ├── SwitchedRLC_Circuit.yaml │ ├── abc_conditions.json │ ├── abc_conditions.py │ ├── abc_conditions.yaml │ ├── abcd.py │ ├── abcd_python.py │ ├── abcd_torch.py │ ├── arrays.py │ ├── conditions │ │ ├── Composite_mdf_condition.json │ │ ├── Composite_mdf_condition.yaml │ │ ├── README.md │ │ ├── composite_condition_example.py │ │ ├── everyNCalls.py │ │ ├── everyncalls_condition.json │ │ ├── everyncalls_condition.yaml │ │ ├── images │ │ │ ├── composite_example.png │ │ │ ├── everyncalls.png │ │ │ ├── threshold.png │ │ │ └── timeinterval.png │ │ ├── threshold.py │ │ ├── threshold_condition.json │ │ ├── threshold_condition.yaml │ │ ├── timeInterval.py │ │ ├── timeinterval_condition.json │ │ └── timeinterval_condition.yaml │ ├── images │ │ ├── Readme.md │ │ ├── abc_conditions.png │ │ ├── abcd.png │ │ ├── abcd_3.png │ │ ├── arrays.png │ │ ├── newton.png │ │ ├── newton_plot.png │ │ ├── params_funcs.png │ │ ├── simple.png │ │ ├── simple_3.png │ │ ├── states.png │ │ ├── switched_rlc_circuit.png │ │ └── switched_rlc_plot.png │ ├── newton.py │ ├── params_funcs.py │ ├── scaling.py │ ├── simple.py │ ├── states.py │ ├── switched_rlc.py │ └── translation │ │ ├── Readme.md │ │ ├── Translated_ABCD.json │ │ ├── Translated_Arrays.json │ │ ├── Translated_FN.mdf.json │ │ ├── Translated_Simple.json │ │ ├── Translated_States.json │ │ ├── Translated_abc_conditions.json │ │ ├── run_translated_ABCD.py │ │ ├── run_translated_Arrays.py │ │ ├── run_translated_FN.py │ │ ├── run_translated_abc_conditions.py │ │ ├── run_translated_simple.py │ │ ├── run_translated_states_json.py │ │ ├── translated_FN_stateful_vw_plot.jpg │ │ └── translated_levelratesineplot.jpg ├── ModECI_MDF.png ├── ModECI_MDF.svg ├── NeuroML │ ├── ABCD.1.mdf.png │ ├── ABCD.json │ ├── ABCD.mdf.json │ ├── ABCD.mdf.png │ ├── ABCD.mdf.yaml │ ├── ABCD.net.nml │ ├── ABCD.nmllite.png │ ├── ABCD.py │ ├── ABCD__lems.xml │ ├── FN.gv.png │ ├── FN.json │ ├── FN.mdf.json │ ├── FN.mdf.yaml │ ├── FN.net.nml │ ├── FN.py │ ├── FN_Definitions.xml │ ├── FN__lems.xml │ ├── FNmulti.png │ ├── FNrun.py │ ├── Izh.png │ ├── Izh_run.png │ ├── Izh_run.py │ ├── Izhikevich.py │ ├── IzhikevichTest.gv.png │ ├── IzhikevichTest.mdf.json │ ├── IzhikevichTest.mdf.yaml │ ├── IzhikevichTest.net.nml │ ├── IzhikevichTest.nmllite.yaml │ ├── LEMS_SimABCD.png │ ├── LEMS_SimABCD.xml │ ├── LEMS_SimFN.png │ ├── LEMS_SimFN.xml │ ├── LEMS_SimIzhikevichTest.png │ ├── LEMS_SimIzhikevichTest.xml │ ├── LemsFNrun.png │ ├── MDFFNrun.multi.png │ ├── MDFFNrun.png │ ├── PNL.xml │ ├── PyNN │ │ ├── Generate.py │ │ ├── HH.mdf.png │ │ ├── HH.mdf.yaml │ │ ├── HH.net.nml │ │ ├── InputWeights.mdf.png │ │ ├── InputWeights.mdf.yaml │ │ ├── InputWeights.net.nml │ │ ├── Net1.mdf.png │ │ ├── Net1.mdf.yaml │ │ ├── Net1.net.nml │ │ ├── OneCell.mdf.png │ │ ├── OneCell.mdf.yaml │ │ ├── OneCell.net.nml │ │ ├── RunInMDF.py │ │ ├── SimpleNet.mdf.png │ │ ├── SimpleNet.mdf.yaml │ │ ├── SimpleNet.net.nml │ │ ├── clean.sh │ │ └── regenerateAndTest.sh │ ├── README.md │ ├── SimABCD.json │ ├── SimFN.json │ ├── SimIzhikevichTest.yaml │ └── regenerateAndTest.sh ├── Newton_Law_of_Cooling.ipynb ├── ONNX │ ├── README.md │ ├── ab.json │ ├── ab.png │ ├── ab.yaml │ ├── ab_torch-jit-export-m2o.onnx │ ├── abc.json │ ├── abc.png │ ├── abc.yaml │ ├── abc_basic-mdf.json │ ├── abc_basic-mdf.yaml │ ├── abc_basic.py │ ├── abcd.json │ ├── abcd.yaml │ ├── convnet_onnx_example.ipynb │ ├── simple_ab.py │ ├── simple_abc.py │ └── simple_abcd.py ├── PsyNeuLink │ ├── README.md │ ├── SimpleBranching-conditional.json │ ├── SimpleBranching-conditional.py │ ├── SimpleBranching-conditional.reconstructed.py │ ├── SimpleBranching-timing.json │ ├── SimpleBranching-timing.py │ ├── SimpleBranching-timing.reconstructed.py │ ├── SimpleFN-conditional.json │ ├── SimpleFN-conditional.py │ ├── SimpleFN-conditional.reconstructed.py │ ├── SimpleFN-timing.json │ ├── SimpleFN-timing.py │ ├── SimpleFN-timing.reconstructed.py │ ├── SimpleFN.json │ ├── SimpleFN.py │ ├── SimpleFN.reconstructed.py │ ├── SimpleLinear-conditional.json │ ├── SimpleLinear-conditional.py │ ├── SimpleLinear-conditional.reconstructed.py │ ├── SimpleLinear-timing.json │ ├── SimpleLinear-timing.py │ ├── SimpleLinear-timing.reconstructed.py │ ├── generate_json_and_scripts.py │ ├── model_ABCD.json │ ├── model_ABCD.py │ ├── model_ABCD.reconstructed.py │ ├── model_nested_comp_with_scheduler.json │ ├── model_nested_comp_with_scheduler.py │ ├── model_nested_comp_with_scheduler.reconstructed.py │ ├── model_with_nested_graph.json │ ├── model_with_nested_graph.py │ ├── model_with_nested_graph.reconstructed.py │ ├── stroop_conflict_monitoring.json │ ├── stroop_conflict_monitoring.py │ └── stroop_conflict_monitoring.reconstructed.py ├── PyTorch │ ├── ABCD.svg │ ├── MDF_PyTorch │ │ ├── ABCD.onnx │ │ ├── ABCD_pytorch.py │ │ ├── Arrays.onnx │ │ ├── Arrays_pytorch.py │ │ ├── MDF_to_PyTorch.py │ │ ├── Simple.onnx │ │ ├── Simple_pytorch.py │ │ ├── translated_ABCD.onnx │ │ ├── translated_ABCD_pytorch.py │ │ ├── translated_Arrays.onnx │ │ ├── translated_Arrays_pytorch.py │ │ ├── translated_Simple.onnx │ │ └── translated_Simple_pytorch.py │ ├── MDF_PyTorch_Benchmark │ │ └── pytorch_and_mdf_speed_comparison.ipynb │ ├── PyTorch_MDF │ │ ├── benchmark_results.json │ │ ├── benchmark_script │ │ │ ├── __init__.py │ │ │ ├── benchmark.py │ │ │ ├── benchmark_results.json │ │ │ ├── readme.md │ │ │ └── squeezenet1_1_benchmark.json │ │ ├── convolution.json │ │ ├── convolution.py │ │ ├── example.onnx.png │ │ ├── example.png │ │ ├── imagenet_labels.txt │ │ ├── mnasNet1_3.json │ │ ├── mnasNet1_3.py │ │ ├── mobilenetv2.json │ │ ├── mobilenetv2.py │ │ ├── pytorch_example_images │ │ │ └── a │ │ │ │ ├── img1.jpeg │ │ │ │ ├── img2.jpeg │ │ │ │ ├── img3.jpeg │ │ │ │ ├── img4.jpeg │ │ │ │ └── img5.jpeg │ │ ├── pytorch_mdf_examples.ipynb │ │ ├── resNext.json │ │ ├── resNext.py │ │ ├── resnet.json │ │ ├── resnet.py │ │ ├── shufflenet_v2.json │ │ ├── shufflenet_v2.py │ │ ├── simple_Convolution.py │ │ ├── simple_convolution.json │ │ ├── simple_pytorch_to_mdf.png │ │ ├── squeezenet1_1.json │ │ ├── squeezenet1_1.py │ │ ├── vgg16.json │ │ ├── vgg16.py │ │ ├── vgg19.json │ │ └── vgg19.py │ ├── README.md │ ├── ddm.json │ ├── ddm.png │ ├── example_data │ │ ├── imgs.npy │ │ └── labels.npy │ ├── inception.json │ ├── inception.png │ ├── inception.py │ ├── inception.svg │ ├── mlp_pure_mdf.json │ ├── mlp_pure_mdf.png │ ├── mlp_pure_mdf.py │ ├── mlp_pure_mdf.results.png │ ├── mlp_pure_mdf.yaml │ ├── mod_torch_builtins.py │ ├── pytorch_ddm.py │ ├── regenerate.sh │ ├── run_translated_mlp_pure_mdf.py │ ├── simple_pytorch_to_mdf.1.png │ ├── simple_pytorch_to_mdf.json │ ├── simple_pytorch_to_mdf.png │ ├── simple_pytorch_to_mdf.py │ ├── simple_pytorch_to_mdf.svg │ ├── simple_pytorch_to_mdf_torchviz.png │ └── weights.h5 ├── Quantum │ └── README.md ├── README.md ├── SimpleExample.ipynb ├── TensorFlow │ ├── Keras │ │ ├── IRIS │ │ │ ├── README.md │ │ │ ├── keras_model.py │ │ │ ├── keras_to_MDF.1.png │ │ │ ├── keras_to_MDF.json │ │ │ ├── keras_to_MDF.png │ │ │ ├── keras_to_MDF.py │ │ │ ├── keras_to_MDF.yaml │ │ │ ├── layers_netron.png │ │ │ ├── model_on_iris_plot.png │ │ │ └── summary.png │ │ ├── MNIST │ │ │ ├── 3.png │ │ │ ├── README.md │ │ │ ├── keras_model.py │ │ │ ├── keras_to_MDF.1.png │ │ │ ├── keras_to_MDF.json │ │ │ ├── keras_to_MDF.png │ │ │ ├── keras_to_MDF.py │ │ │ ├── keras_to_MDF.yaml │ │ │ ├── layers_netron.png │ │ │ ├── model_plot.png │ │ │ └── summary.png │ │ ├── README.md │ │ └── regenerate.sh │ └── README.md └── WebGME │ ├── .gitignore │ ├── README.md │ ├── app.js │ ├── bin │ └── instance_converter │ ├── config │ ├── README.md │ ├── config.default.js │ ├── config.test.js │ ├── config.webgme.js │ └── index.js │ ├── examples │ ├── README.md │ ├── gme │ │ ├── ABCD.json │ │ ├── Inception.json │ │ └── Simple.json │ ├── inception_webgme.png │ └── mdf │ │ ├── ABCD.json │ │ └── Simple.json │ ├── mdf_meta.json │ ├── output.py │ ├── package-lock.json │ ├── package.json │ ├── spec_to_gme.js │ ├── src │ ├── common │ │ ├── README.md │ │ └── instance-converter.js │ ├── plugins │ │ └── ExportToMDFPython │ │ │ ├── ExportToMDFPython.js │ │ │ ├── metadata.json │ │ │ └── template.py.ejs │ ├── seeds │ │ └── MDF │ │ │ └── MDF.webgmex │ └── visualizers │ │ └── Visualizers.json │ ├── test │ └── globals.js │ └── webgme-setup.json ├── install_on_osbv2.sh ├── pyproject.toml ├── setup.cfg ├── src └── modeci_mdf │ ├── README.md │ ├── __init__.py │ ├── execution_engine.py │ ├── full_translator.py │ ├── functions │ ├── __init__.py │ ├── actr │ │ ├── __init__.py │ │ └── ccm │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── buffer.py │ │ │ ├── dm.py │ │ │ ├── logger.py │ │ │ ├── model.py │ │ │ ├── pattern.py │ │ │ └── scheduler.py │ ├── ddm.py │ ├── onnx.py │ └── standard.py │ ├── interfaces │ ├── __init__.py │ ├── actr │ │ ├── __init__.py │ │ └── importer.py │ ├── graphviz │ │ ├── __init__.py │ │ └── exporter.py │ ├── keras │ │ ├── __init__.py │ │ └── importer.py │ ├── neuroml │ │ ├── __init__.py │ │ ├── exporter.py │ │ └── syn_definitions.xml │ ├── onnx │ │ ├── __init__.py │ │ ├── exporter.py │ │ └── importer.py │ └── pytorch │ │ ├── __init__.py │ │ ├── builtins.py │ │ ├── exporter.py │ │ ├── importer.py │ │ └── mod_torch_builtins.py │ ├── mdf.py │ └── utils.py ├── test_all.sh └── tests ├── conftest.py ├── interfaces ├── onnx │ └── test_importer.py └── pytorch │ ├── conftest.py │ ├── test_export.py │ └── test_import.py ├── test_examples.py ├── test_execution.py ├── test_helpers.py ├── test_mdf_functions.py ├── test_model.py ├── test_onnx_functions.py ├── test_parameters.py └── test_scheduler.py /.github/workflows/ci_test_all.yml: -------------------------------------------------------------------------------- 1 | name: CI Test script 2 | 3 | on: 4 | push: 5 | branches: [ main, development, experimental, test*, nml* ] 6 | pull_request: 7 | branches: [ main, development, experimental, test*, nml* ] 8 | 9 | jobs: 10 | 11 | checks: 12 | name: Check Python ${{ matrix.python-version }} on ${{ matrix.runs-on }} 13 | runs-on: ${{ matrix.runs-on }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | python-version: [ "3.10"] 18 | runs-on: [ubuntu-latest] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - uses: actions/setup-python@v5 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Install HDF5 for pytables on macos-14/latest 28 | if: ${{ matrix.runs-on == 'macos-latest' }} 29 | run: | 30 | brew install hdf5 31 | 32 | - name: Install graphviz 33 | uses: ts-graphviz/setup-graphviz@v2 34 | with: 35 | # Skip to run brew update command on macOS. 36 | macos-skip-brew-update: 'true' # default false 37 | 38 | - name: Run test script 39 | run: | 40 | export NEURON_HOME=$pythonLocation 41 | ./test_all.sh 42 | 43 | - name: Version info for installed packages 44 | run: | 45 | pip list 46 | 47 | 48 | 49 | 50 | - name: Final version info for optional installed packages 51 | run: | 52 | pip list 53 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | 3 | #- repo: https://github.com/psf/black 4 | # rev: 20.8b1 5 | # hooks: 6 | # - id: black 7 | 8 | - repo: https://github.com/pre-commit/pre-commit-hooks 9 | rev: v3.4.0 10 | hooks: 11 | - id: check-added-large-files 12 | args: ['--maxkb=3000'] 13 | - id: check-case-conflict 14 | - id: check-merge-conflict 15 | - id: check-symlinks 16 | - id: check-yaml 17 | - id: check-json 18 | - id: debug-statements 19 | - id: end-of-file-fixer 20 | - id: mixed-line-ending 21 | - id: requirements-txt-fixer 22 | - id: trailing-whitespace 23 | 24 | #- repo: https://github.com/PyCQA/isort 25 | # rev: 5.7.0 26 | # hooks: 27 | # - id: isort 28 | 29 | - repo: https://github.com/asottile/pyupgrade 30 | rev: v2.7.4 31 | hooks: 32 | - id: pyupgrade 33 | args: ["--py36-plus"] 34 | 35 | #- repo: https://github.com/pycqa/flake8 36 | # rev: 3.8.4 37 | # hooks: 38 | # - id: flake8 39 | # exclude: docs/conf.py 40 | # additional_dependencies: [flake8-bugbear, flake8-print] 41 | # 42 | #- repo: https://github.com/pre-commit/mirrors-mypy 43 | # rev: v0.800 44 | # hooks: 45 | # - id: mypy 46 | # files: src 47 | 48 | - repo: https://github.com/psf/black 49 | rev: 22.3.0 50 | hooks: 51 | - id: black 52 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/sphinx/source/conf.py 11 | 12 | # Just generate htmlzip version, as pdf failing due to inclusion of svg files (in badges) 13 | formats: 14 | - htmlzip 15 | 16 | python: 17 | install: 18 | - requirements: docs/sphinx/requirements.txt 19 | 20 | submodules: 21 | exclude: all 22 | -------------------------------------------------------------------------------- /docs/contributors.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import pandas as pd 3 | import json 4 | import textwrap 5 | from datetime import date 6 | 7 | 8 | url = "https://api.github.com/orgs/ModECI/repos" 9 | 10 | response = requests.get(url=url, auth=("", "")) 11 | 12 | json_data = response.json() 13 | 14 | df = pd.DataFrame(json_data) 15 | 16 | repo_name = df["name"] 17 | html_url = df["html_url"] 18 | repo_url = dict(zip(repo_name, html_url)) 19 | 20 | list_url = list(df["contributors_url"]) 21 | list_url.remove("https://api.github.com/repos/ModECI/PsyNeuLink/contributors") 22 | list_range = len(list_url) 23 | 24 | empty_list = [] 25 | for i in range(list_range): 26 | url = list_url[i] 27 | data = requests.get(url=url) 28 | empty_list.append(data.json()) 29 | 30 | con_json = [] 31 | for item in empty_list: 32 | for i in item: 33 | con_json.append(i) 34 | 35 | df1 = pd.DataFrame(con_json) 36 | 37 | per_info = list(df1["url"].unique()) 38 | len_per_info = len(per_info) 39 | 40 | empty_list = [] 41 | for i in range(len_per_info): 42 | url = per_info[i] 43 | data = requests.get(url=url) 44 | empty_list.append(data.json()) 45 | 46 | df2 = pd.DataFrame(empty_list) 47 | df2["name"] = df2["name"].fillna("") 48 | name = df2["name"] 49 | login = df2["login"] 50 | url_html = df2["html_url"] 51 | url_id = df2["id"] 52 | 53 | login_html = list(zip(name, login, url_html)) 54 | zip_dict = dict(zip(url_id, login_html)) 55 | 56 | 57 | if 49699333 in zip_dict: 58 | del zip_dict[49699333] 59 | 60 | file = "sphinx/source/api/Contributors.md" 61 | with open(file, "w") as f: 62 | print( 63 | textwrap.dedent( 64 | """\ 65 | (ModECI:contributors)= 66 | 67 | # ModECI contributors 68 | 69 | This page list names and Github profiles of contributors to the various ModECI repositories, listed in no particular order. 70 | This page is generated periodically, most recently on {}.""".format( 71 | date.today() 72 | ) 73 | ), 74 | file=f, 75 | ) 76 | 77 | print("", file=f) 78 | 79 | for key, val in zip_dict.items(): 80 | print("- {} ([@{}]({}))".format(val[0], val[1], val[2]), file=f) 81 | 82 | print( 83 | textwrap.dedent( 84 | """ 85 | ## Repositories 86 | 87 | """ 88 | ), 89 | file=f, 90 | ) 91 | 92 | for key, val in repo_url.items(): 93 | print(f"- [{key}]({val})", file=f) 94 | -------------------------------------------------------------------------------- /docs/images/MDFgraph1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/images/MDFgraph1.png -------------------------------------------------------------------------------- /docs/images/MDFgraph2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/images/MDFgraph2.png -------------------------------------------------------------------------------- /docs/sphinx/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/sphinx/README.md: -------------------------------------------------------------------------------- 1 | This is the directory for the Sphinx based documentation which gets generated at https://mdf.readthedocs.io. 2 | 3 | See https://github.com/ModECI/MDF/issues/357 for details. 4 | -------------------------------------------------------------------------------- /docs/sphinx/images/logo_dark_bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/images/logo_dark_bg.png -------------------------------------------------------------------------------- /docs/sphinx/images/logo_dark_bg_mono.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/images/logo_dark_bg_mono.png -------------------------------------------------------------------------------- /docs/sphinx/images/logo_light_bg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/images/logo_light_bg.png -------------------------------------------------------------------------------- /docs/sphinx/images/logo_light_bg_mono.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/images/logo_light_bg_mono.png -------------------------------------------------------------------------------- /docs/sphinx/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/sphinx/requirements.txt: -------------------------------------------------------------------------------- 1 | # Install all the dependencies 2 | -e .[optional] 3 | -------------------------------------------------------------------------------- /docs/sphinx/source/_static/pydata-custom.css: -------------------------------------------------------------------------------- 1 | /*Tweaks to the Pydata default CSS */ 2 | 3 | /*No yellow background highlight when targeted by summary tables */ 4 | /*dt:target { background-color: #f8f8f8; border: 1px solid black, }*/ 5 | dt:target { background: transparent;} 6 | /*More space between H1s and signatures in API reference*/ 7 | h1 { margin-bottom: 40px; } 8 | 9 | /*No line underneath summary table headings (clashes with line above first member)*/ 10 | p.rubric { border-bottom: 0px; } 11 | -------------------------------------------------------------------------------- /docs/sphinx/source/_static/rtd-custom.css: -------------------------------------------------------------------------------- 1 | /* Override nav bar color */ 2 | /*.wy-side-nav-search { 3 | background-color: #fbfbb6; 4 | } 5 | .wy-side-nav-search > a { 6 | color: #b2355c 7 | }*/ 8 | 9 | /* Override text bar color */ 10 | /*.caption-text { 11 | color: #b2355c; 12 | }*/ 13 | 14 | /* Override code signature colour */ 15 | /*.rst-content dl:not(.docutils) dt { 16 | background: #fbfbb6; 17 | color: #b2355c; 18 | border-top: solid 3px #b2355c; 19 | }*/ 20 | 21 | /* Override hyperlink colour */ 22 | /* a { 23 | color: #b2355c; 24 | }*/ 25 | 26 | /* Make content width wider*/ 27 | .wy-nav-content { 28 | max-width: 75% !important; 29 | } 30 | 31 | /* Word-wrap tables */ 32 | .wy-table-responsive table td, 33 | .wy-table-responsive table th { 34 | white-space: normal; 35 | } 36 | -------------------------------------------------------------------------------- /docs/sphinx/source/_templates/custom-class-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :show-inheritance: 8 | :special-members: __call__, __add__, __mul__ 9 | 10 | {% block methods %} 11 | {% if methods %} 12 | .. rubric:: {{ _('Methods') }} 13 | 14 | .. autosummary:: 15 | {% for item in methods %} 16 | {%- if not item.startswith('_') %} 17 | {%- if item not in inherited_members %} 18 | ~{{ name }}.{{ item }} 19 | {%- endif -%} 20 | {%- endif -%} 21 | {%- endfor %} 22 | {% endif %} 23 | {% endblock %} 24 | -------------------------------------------------------------------------------- /docs/sphinx/source/_templates/custom-module-template.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | 19 | {% if functions %} 20 | .. rubric:: {{ _('Functions') }} 21 | 22 | .. autosummary:: 23 | :toctree: 24 | {% for item in functions %} 25 | {%- if not item.startswith('_') %} 26 | {{ item }} 27 | {% endif %} 28 | {%- endfor %} 29 | {% endif %} 30 | {% endblock %} 31 | 32 | {% block classes %} 33 | {% if classes %} 34 | .. rubric:: {{ _('Classes') }} 35 | 36 | .. autosummary:: 37 | :toctree: 38 | :template: custom-class-template.rst 39 | {% for item in classes %} 40 | {{ item }} 41 | {%- endfor %} 42 | {% endif %} 43 | {% endblock %} 44 | 45 | {% block exceptions %} 46 | {% if exceptions %} 47 | .. rubric:: {{ _('Exceptions') }} 48 | 49 | .. autosummary:: 50 | :toctree: 51 | {% for item in exceptions %} 52 | {{ item }} 53 | {%- endfor %} 54 | {% endif %} 55 | {% endblock %} 56 | 57 | {% block modules %} 58 | {% if modules %} 59 | .. autosummary:: 60 | :toctree: 61 | :template: custom-module-template.rst 62 | :recursive: 63 | {% for item in modules %} 64 | {{ item }} 65 | {%- endfor %} 66 | {% endif %} 67 | {% endblock %} 68 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/Contributors.md: -------------------------------------------------------------------------------- 1 | (ModECI:contributors)= 2 | 3 | # ModECI contributors 4 | 5 | This page list names and Github profiles of contributors to the various ModECI repositories, listed in no particular order. 6 | This page is generated periodically, most recently on 2023-03-06. 7 | 8 | - Padraig Gleeson ([@pgleeson](https://github.com/pgleeson)) 9 | - David Turner ([@davidt0x](https://github.com/davidt0x)) 10 | - Katherine Mantel ([@kmantel](https://github.com/kmantel)) 11 | - Ivy ([@Ivy8127](https://github.com/Ivy8127)) 12 | - ([@mraunak](https://github.com/mraunak)) 13 | - Shanka Subhra Mondal ([@Shanka123](https://github.com/Shanka123)) 14 | - Onabajo Monsurat ([@Monsurat-Onabajo](https://github.com/Monsurat-Onabajo)) 15 | - Parikshit Singh Rathore ([@parikshit14](https://github.com/parikshit14)) 16 | - Patrick Stock ([@patrickstock](https://github.com/patrickstock)) 17 | - Jeremy Lee ([@jeremyrl7](https://github.com/jeremyrl7)) 18 | - Raghavendra Pradyumna Pothukuchi ([@rpradyumna](https://github.com/rpradyumna)) 19 | - Marble Kusanele Mpofu ([@kusanele](https://github.com/kusanele)) 20 | - Somya Agrawal ([@somyagr](https://github.com/somyagr)) 21 | - ([@jdcpni](https://github.com/jdcpni)) 22 | - Riya Saxena ([@29riyasaxena](https://github.com/29riyasaxena)) 23 | - ([@FatimaArshad-DS](https://github.com/FatimaArshad-DS)) 24 | - Megha Bose ([@Megha-Bose](https://github.com/Megha-Bose)) 25 | - Pranav Gokhale ([@singular-value](https://github.com/singular-value)) 26 | - ([@sakshikaushik717](https://github.com/sakshikaushik717)) 27 | - Esraa Abdelmaksoud ([@esraa-abdelmaksoud](https://github.com/esraa-abdelmaksoud)) 28 | - Shivani Rana ([@shivani6320](https://github.com/shivani6320)) 29 | - ([@vidhya-metacell](https://github.com/vidhya-metacell)) 30 | - ([@nicholwkprinceton](https://github.com/nicholwkprinceton)) 31 | - Matteo Cantarelli ([@tarelli](https://github.com/tarelli)) 32 | - Brian Broll ([@brollb](https://github.com/brollb)) 33 | 34 | ## Repositories 35 | 36 | 37 | - [MDF](https://github.com/ModECI/MDF) 38 | - [Website](https://github.com/ModECI/Website) 39 | - [MDFTests](https://github.com/ModECI/MDFTests) 40 | - [modelspec](https://github.com/ModECI/modelspec) 41 | - [PsyNeuLink](https://github.com/ModECI/PsyNeuLink) 42 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/Export.md: -------------------------------------------------------------------------------- 1 | # Supported export and import formats 2 | 3 | This is the list of currently supported export/import formats which MDF supports 4 | 5 | - ACT-R [README](https://github.com/ModECI/MDF/tree/main/examples/ACT-R#readme) 6 | - NeuroML [README](https://github.com/ModECI/MDF/tree/main/examples/NeuroML#readme) 7 | - ONNX [README](https://github.com/ModECI/MDF/tree/main/examples/ONNX#readme) 8 | - PsyNeuLink [README](https://github.com/ModECI/MDF/tree/main/examples/PsyNeuLink#readme) 9 | - PyTorch [README](https://github.com/ModECI/MDF/tree/main/examples/PyTorch#readme) 10 | - Quantum [README](https://github.com/ModECI/MDF/tree/main/examples/Quantum#readme) 11 | - WebGME [README](https://github.com/ModECI/MDF/tree/main/examples/WebGME#readme) 12 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/MDFpaper.md: -------------------------------------------------------------------------------- 1 | (ModECI:mdf_intro_paper)= 2 | 3 | # Paper introducing MDF 4 | 5 | The background to the ModECI project, the motivation for developing the Model Description Format, and the initial Python implementation of the language have been described in a NeuroView article in the Neuron journal: 6 | 7 | *Integrating model development across computational neuroscience, cognitive science, and machine learning* 8 | Padraig Gleeson, Sharon Crook, David Turner, Katherine Mantel, Mayank Raunak, Ted Willke and Jonathan D. Cohen, April 25, 2023 DOI: [https://doi.org/10.1016/j.neuron.2023.03.037](https://doi.org/10.1016/j.neuron.2023.03.037) 9 | 10 | *Neuroscience, cognitive science, and computer science are increasingly benefiting through their interactions. This could be accelerated by direct sharing of computational models across disparate modeling software used in each. We describe a Model Description Format designed to meet this challenge.* 11 | 12 | The paper will be freely downloadable from [here](https://www.cell.com/neuron/fulltext/S0896-6273(23)00261-1) in April 2024. If you do not have access to this via your institution, please [download the preprint of the paper here](https://github.com/ModECI/MDFpaper/blob/main/GleesonEtAl23_ModECI_NeuroView.pdf). 13 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/actr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/ACT-R/actr.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/addition.lisp: -------------------------------------------------------------------------------- 1 | 2 | (clear-all) 3 | 4 | (define-model addition 5 | 6 | (sgp :esc t :lf .05) 7 | 8 | (chunk-type number number next) 9 | (chunk-type add arg1 arg2 sum count) 10 | 11 | (add-dm 12 | (zero isa number number zero next one) 13 | (one isa number number one next two) 14 | (two isa number number two next three) 15 | (three isa number number three next four) 16 | (four isa number number four next five) 17 | (five isa number number five next six) 18 | (six isa number number six next seven) 19 | (seven isa number number seven next eight) 20 | (eight isa number number eight next nine) 21 | (nine isa number number nine next ten) 22 | (ten isa number number ten) 23 | (second-goal ISA add arg1 five arg2 two)) 24 | 25 | (P initialize-addition 26 | =goal> 27 | ISA add 28 | arg1 =num1 29 | arg2 =num2 30 | sum nil 31 | ==> 32 | =goal> 33 | ISA add 34 | sum =num1 35 | count zero 36 | +retrieval> 37 | ISA number 38 | number =num1 39 | ) 40 | 41 | (P terminate-addition 42 | =goal> 43 | ISA add 44 | count =num 45 | arg2 =num 46 | sum =answer 47 | =retrieval> 48 | ISA number 49 | number =answer 50 | ==> 51 | =goal> 52 | ISA add 53 | count nil 54 | !output! =answer 55 | ) 56 | 57 | (P increment-count 58 | =goal> 59 | ISA add 60 | sum =sum 61 | count =count 62 | =retrieval> 63 | ISA number 64 | number =count 65 | next =newcount 66 | ==> 67 | =goal> 68 | ISA add 69 | count =newcount 70 | +retrieval> 71 | ISA number 72 | number =sum 73 | ) 74 | 75 | (P increment-sum 76 | =goal> 77 | ISA add 78 | sum =sum 79 | count =count 80 | - arg2 =count 81 | =retrieval> 82 | ISA number 83 | number =sum 84 | next =newsum 85 | ==> 86 | =goal> 87 | ISA add 88 | sum =newsum 89 | +retrieval> 90 | ISA number 91 | number =count 92 | ) 93 | 94 | (goal-focus second-goal) 95 | ) 96 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/addition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/ACT-R/addition.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/addition.py: -------------------------------------------------------------------------------- 1 | """Create the MDF files for the addition example and run using the scheduler.""" 2 | 3 | import os 4 | from modeci_mdf.interfaces.actr import actr_to_mdf 5 | from modeci_mdf.execution_engine import EvaluableGraph 6 | from modeci_mdf.utils import load_mdf 7 | 8 | 9 | def main(): 10 | """Takes addition.lisp, converts to MDF, and runs using the scheduler.""" 11 | file_name = os.path.dirname(os.path.realpath(__file__)) + "/addition.lisp" 12 | print(file_name) 13 | mod = actr_to_mdf(file_name) 14 | mdf_graph = load_mdf(file_name[:-5] + ".json").graphs[0] 15 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 16 | term = False 17 | goal = {} 18 | retrieval = {} 19 | while not term: 20 | eg.evaluate(initializer={"goal_input": goal, "dm_input": retrieval}) 21 | term = ( 22 | eg.enodes["check_termination"].evaluable_outputs["check_output"].curr_value 23 | ) 24 | goal = ( 25 | eg.enodes["fire_production"] 26 | .evaluable_outputs["fire_prod_output_to_goal"] 27 | .curr_value 28 | ) 29 | retrieval = ( 30 | eg.enodes["fire_production"] 31 | .evaluable_outputs["fire_prod_output_to_retrieval"] 32 | .curr_value 33 | ) 34 | print("Final Goal:") 35 | print(eg.enodes["goal_buffer"].evaluable_outputs["goal_output"].curr_value) 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/count.lisp: -------------------------------------------------------------------------------- 1 | (clear-all) 2 | 3 | (define-model count 4 | 5 | (sgp :esc t :lf .05 :trace-detail high) 6 | 7 | 8 | (chunk-type number number next) 9 | (chunk-type count-from start end count) 10 | 11 | (add-dm 12 | (one ISA number number one next two) 13 | (two ISA number number two next three) 14 | (three ISA number number three next four) 15 | (four ISA number number four next five) 16 | (five ISA number number five) 17 | (first-goal ISA count-from start two end four)) 18 | 19 | (goal-focus first-goal) 20 | 21 | (p start 22 | =goal> 23 | ISA count-from 24 | start =num1 25 | count nil 26 | ==> 27 | =goal> 28 | ISA count-from 29 | count =num1 30 | +retrieval> 31 | ISA number 32 | number =num1 33 | ) 34 | 35 | (P increment 36 | =goal> 37 | ISA count-from 38 | count =num1 39 | - end =num1 40 | =retrieval> 41 | ISA number 42 | number =num1 43 | next =num2 44 | ==> 45 | =goal> 46 | ISA count-from 47 | count =num2 48 | +retrieval> 49 | ISA number 50 | number =num2 51 | !output! (=num1) 52 | ) 53 | 54 | (P stop 55 | =goal> 56 | ISA count-from 57 | count =num 58 | end =num 59 | =retrieval> 60 | ISA number 61 | number =num 62 | ==> 63 | -goal> 64 | !output! (=num) 65 | ) 66 | ) 67 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/count.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/ACT-R/count.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ACT-R/count.py: -------------------------------------------------------------------------------- 1 | """Create the MDF files for the count example and run using the scheduler.""" 2 | import os 3 | from modeci_mdf.interfaces.actr import actr_to_mdf 4 | from modeci_mdf.execution_engine import EvaluableGraph 5 | from modeci_mdf.utils import load_mdf 6 | 7 | 8 | def main(): 9 | """Takes count.lisp, converts to MDF, and runs using the scheduler.""" 10 | file_name = os.path.dirname(os.path.realpath(__file__)) + "/count.lisp" 11 | print(file_name) 12 | mod = actr_to_mdf(file_name) 13 | mdf_graph = load_mdf(file_name[:-5] + ".json").graphs[0] 14 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 15 | term = False 16 | goal = {} 17 | retrieval = {} 18 | while not term: 19 | eg.evaluate(initializer={"goal_input": goal, "dm_input": retrieval}) 20 | term = ( 21 | eg.enodes["check_termination"].evaluable_outputs["check_output"].curr_value 22 | ) 23 | goal = ( 24 | eg.enodes["fire_production"] 25 | .evaluable_outputs["fire_prod_output_to_goal"] 26 | .curr_value 27 | ) 28 | retrieval = ( 29 | eg.enodes["fire_production"] 30 | .evaluable_outputs["fire_prod_output_to_retrieval"] 31 | .curr_value 32 | ) 33 | print("Final Goal:") 34 | print(eg.enodes["goal_buffer"].evaluable_outputs["goal_output"].curr_value) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/abc_conditions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/abc_conditions.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/abcd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/abcd.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/abcd_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/abcd_3.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/arrays.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/arrays.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/params_funcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/params_funcs.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/simple.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/simple_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/simple_3.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/states.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/states.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/switched_rlc_circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/switched_rlc_circuit.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/MDF/images/switched_rlc_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/MDF/images/switched_rlc_plot.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/ABCD.1.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/ABCD.1.mdf.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/ABCD.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/ABCD.mdf.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/ABCD.nmllite.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/ABCD.nmllite.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/FN.gv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/FN.gv.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/FNmulti.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/FNmulti.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/Izh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/Izh.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/Izh_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/Izh_run.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/IzhikevichTest.gv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/IzhikevichTest.gv.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/LEMS_SimABCD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/LEMS_SimABCD.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/LEMS_SimFN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/LEMS_SimFN.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/LEMS_SimIzhikevichTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/LEMS_SimIzhikevichTest.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/LemsFNrun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/LemsFNrun.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/MDFFNrun.multi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/MDFFNrun.multi.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/NeuroML/MDFFNrun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/NeuroML/MDFFNrun.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ONNX/ab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/ONNX/ab.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ONNX/abc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/ONNX/abc.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/ONNX/simple_abc.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file does three things: 3 | - It implements a simple PyTorch model. 4 | - Exports in to ONNX using a combination of tracing and scripting 5 | - Converts it to MDF 6 | """ 7 | import torch 8 | import onnx 9 | import os 10 | 11 | 12 | from modeci_mdf.interfaces.onnx import onnx_to_mdf 13 | 14 | 15 | class A(torch.nn.Module): 16 | def forward(self, x): 17 | return x + 1 18 | 19 | 20 | @torch.jit.script 21 | def loop_b(x, y): 22 | for i in range(int(y)): 23 | x = x / 10 24 | return x 25 | 26 | 27 | class B(torch.nn.Module): 28 | def forward(self, x, y): 29 | return loop_b(x, y) 30 | 31 | 32 | class C(torch.nn.Module): 33 | def forward(self, x): 34 | return x * 100 35 | 36 | 37 | class ABC(torch.nn.Module): 38 | def __init__(self): 39 | super().__init__() 40 | self.A = A() 41 | self.B = B() 42 | self.C = C() 43 | 44 | def forward(self, x, B_loop_count): 45 | 46 | # Run A 47 | y = self.A(x) 48 | 49 | # Run B (loop_count times) 50 | y = self.B(y, B_loop_count) 51 | 52 | # Run C 53 | y = self.C(y) 54 | 55 | return y 56 | 57 | 58 | def main(): 59 | 60 | model = ABC() 61 | dummy_input = torch.zeros(2, 3) 62 | loop_count = torch.tensor(5, dtype=torch.long) 63 | torch.onnx.export( 64 | model, 65 | (dummy_input, loop_count), 66 | "abc.onnx", 67 | verbose=True, 68 | input_names=["input", "B_loop_count"], 69 | opset_version=9, 70 | ) 71 | 72 | # Load it back in using ONNX package 73 | onnx_model = onnx.load("abc.onnx") 74 | onnx.checker.check_model(onnx_model) 75 | 76 | mdf_model = onnx_to_mdf(onnx_model) 77 | 78 | mdf_model.to_json_file("abc.json") 79 | mdf_model.to_yaml_file("abc.yaml") 80 | mdf_model.to_graph_image( 81 | engine="dot", 82 | output_format="png", 83 | view_on_render=False, 84 | level=3, 85 | filename_root="abc", 86 | only_warn_on_fail=( 87 | os.name == "nt" 88 | ), # Makes sure test of this doesn't fail on Windows on GitHub Actions 89 | ) 90 | 91 | 92 | if __name__ == "__main__": 93 | main() 94 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/ab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/ab.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/abc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/abc.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/ddm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/ddm.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/inception.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/inception.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/mlp_pure_mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/mlp_pure_mdf.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/mlp_pure_mdf.results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/mlp_pure_mdf.results.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf.1.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf_torchviz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/docs/sphinx/source/api/export_format/PyTorch/simple_pytorch_to_mdf_torchviz.png -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/Quantum/Quantum.md: -------------------------------------------------------------------------------- 1 | # Interactions between MDF and Quantum computing technologies 2 | 3 | Starting summer 2021, we will develop tools for interfacing between MDF and quantum computers. This interface is motivated by expectations that quantum hardware will provide speedups for solving Ising-type MDF problems. We will address both gate- and annealing- based quantum computers: 4 | * for gate-based quantum computers, we will bridge from MDF to [OpenQASM](https://github.com/Qiskit/openqasm), the leading quantum Intermediate Representation. 5 | * for annealing-based quantum computers, we will target platforms such as [D-Wave Ocean](https://docs.ocean.dwavesys.com/en/stable/). 6 | 7 | Our work will be agnostic to the exact quantum algorithm/solver used, though we will provide sample implementations using Variational Quantum Eigensolver ([VQE](https://www.nature.com/articles/ncomms5213)) and [Quantum Approximate Optimization Algorithm](https://arxiv.org/abs/1411.4028). 8 | 9 | As a first step, we have begun developing implementations targeting quantum hardware for the key computations in several cognitive models as listed below. Next, we will extend MDF so that quantum implementations such as the ones we develop, can be expressed in it. 10 | 11 | | Tasks | Models | Key computations | Quantum algorithms | 12 | |-------------------------------|----------------------------|------------------------|----------------------------------------------------| 13 | | Two alternative forced choice | Quantum walk | Evolution, Projection | Unitary evolution, Hamiltonian simulation | 14 | | Multiple alternative models | Potential wells | Eigenstates and values | Variational methods (e.g., subspace and deflation) | 15 | | Bistable perception | Quantum walk | Evolution, projection | Unitary evolution, Hamiltonian simulation | 16 | | Control | Leaky Competing Integrator | Optimization | Quantum annealing | 17 | | Parameter estimation | Data fitting | Optimization | Quantum annealing | 18 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/export_format/WebGME/WebGME.md: -------------------------------------------------------------------------------- 1 | # MDF in WebGME 2 | This contains a tool for converting the 3 | [MDF specification](https://github.com/ModECI/MDF/blob/documentation/docs/MDF_specification.json) into JSON 4 | compatible with [JSON importer](https://github.com/deepforge-dev/webgme-json-importer/tree/master/src/common). 5 | This allows us to programmatically create a metamodel and, as a result, use WebGME as a design environment for MDF. 6 | 7 | ## Quick Start 8 | 9 | ### Starting WebGME app 10 | First, install the mdf_gme following: 11 | - [NodeJS](https://nodejs.org/en/) (LTS recommended) 12 | - [MongoDB](https://www.mongodb.com/) 13 | 14 | Second, start mongodb locally by running the `mongod` executable in your mongodb installation 15 | (you may need to create a `data` directory or set `--dbpath`). 16 | 17 | Then, run `webgme start` from the project root to start . Finally, navigate to `http://localhost:8888` to start using 18 | mdf_gme! 19 | 20 | ### Loading the spec into WebGME 21 | First, install dependencies with `npm install`. Then convert the MDF specification using 22 | ``` 23 | node spec_to_gme.js path/to/MDF/spec.json 24 | ``` 25 | 26 | Finally, import the JSON into WebGME just like the 27 | [examples](https://github.com/deepforge-dev/webgme-json-importer/tree/master/examples) (suffixed with "\_meta")! 28 | 29 | ### Loading instances to and from WebGME importable JSON and MDF 30 | ``` 31 | node bin/instance_converter path/to/MDForGME/instance.json 32 | ``` 33 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/modules.rst: -------------------------------------------------------------------------------- 1 | .. 2 | DO NOT DELETE THIS FILE! It contains the all-important `.. autosummary::` directive with `:recursive:` option, without 3 | which API documentation wouldn't get extracted from docstrings by the `sphinx.ext.autosummary` engine. It is hidden 4 | (not declared in any toctree) to remove an unnecessary intermediate page; index.rst instead points directly to the 5 | package page. DO NOT REMOVE THIS FILE! 6 | 7 | .. autosummary:: 8 | :toctree: _autosummary 9 | :template: custom-module-template.rst 10 | :recursive: 11 | 12 | modeci_mdf 13 | -------------------------------------------------------------------------------- /docs/sphinx/source/api/readme_link.rst: -------------------------------------------------------------------------------- 1 | ----------- 2 | Readme File 3 | ----------- 4 | 5 | .. mdinclude:: ../../../../README.md 6 | -------------------------------------------------------------------------------- /examples/ACT-R/actr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ACT-R/actr.png -------------------------------------------------------------------------------- /examples/ACT-R/addition.lisp: -------------------------------------------------------------------------------- 1 | 2 | (clear-all) 3 | 4 | (define-model addition 5 | 6 | (sgp :esc t :lf .05) 7 | 8 | (chunk-type number number next) 9 | (chunk-type add arg1 arg2 sum count) 10 | 11 | (add-dm 12 | (zero isa number number zero next one) 13 | (one isa number number one next two) 14 | (two isa number number two next three) 15 | (three isa number number three next four) 16 | (four isa number number four next five) 17 | (five isa number number five next six) 18 | (six isa number number six next seven) 19 | (seven isa number number seven next eight) 20 | (eight isa number number eight next nine) 21 | (nine isa number number nine next ten) 22 | (ten isa number number ten) 23 | (second-goal ISA add arg1 five arg2 two)) 24 | 25 | (P initialize-addition 26 | =goal> 27 | ISA add 28 | arg1 =num1 29 | arg2 =num2 30 | sum nil 31 | ==> 32 | =goal> 33 | ISA add 34 | sum =num1 35 | count zero 36 | +retrieval> 37 | ISA number 38 | number =num1 39 | ) 40 | 41 | (P terminate-addition 42 | =goal> 43 | ISA add 44 | count =num 45 | arg2 =num 46 | sum =answer 47 | =retrieval> 48 | ISA number 49 | number =answer 50 | ==> 51 | =goal> 52 | ISA add 53 | count nil 54 | !output! =answer 55 | ) 56 | 57 | (P increment-count 58 | =goal> 59 | ISA add 60 | sum =sum 61 | count =count 62 | =retrieval> 63 | ISA number 64 | number =count 65 | next =newcount 66 | ==> 67 | =goal> 68 | ISA add 69 | count =newcount 70 | +retrieval> 71 | ISA number 72 | number =sum 73 | ) 74 | 75 | (P increment-sum 76 | =goal> 77 | ISA add 78 | sum =sum 79 | count =count 80 | - arg2 =count 81 | =retrieval> 82 | ISA number 83 | number =sum 84 | next =newsum 85 | ==> 86 | =goal> 87 | ISA add 88 | sum =newsum 89 | +retrieval> 90 | ISA number 91 | number =count 92 | ) 93 | 94 | (goal-focus second-goal) 95 | ) 96 | -------------------------------------------------------------------------------- /examples/ACT-R/addition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ACT-R/addition.png -------------------------------------------------------------------------------- /examples/ACT-R/addition.py: -------------------------------------------------------------------------------- 1 | """Create the MDF files for the addition example and run using the scheduler.""" 2 | 3 | import os 4 | from modeci_mdf.interfaces.actr import actr_to_mdf 5 | from modeci_mdf.execution_engine import EvaluableGraph 6 | from modeci_mdf.utils import load_mdf 7 | 8 | 9 | def main(): 10 | """Takes addition.lisp, converts to MDF, and runs using the scheduler.""" 11 | file_name = os.path.dirname(os.path.realpath(__file__)) + "/addition.lisp" 12 | print(file_name) 13 | mod = actr_to_mdf(file_name) 14 | mdf_graph = load_mdf(file_name[:-5] + ".json").graphs[0] 15 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 16 | term = False 17 | goal = {} 18 | retrieval = {} 19 | while not term: 20 | eg.evaluate(initializer={"goal_input": goal, "dm_input": retrieval}) 21 | term = ( 22 | eg.enodes["check_termination"].evaluable_outputs["check_output"].curr_value 23 | ) 24 | goal = ( 25 | eg.enodes["fire_production"] 26 | .evaluable_outputs["fire_prod_output_to_goal"] 27 | .curr_value 28 | ) 29 | retrieval = ( 30 | eg.enodes["fire_production"] 31 | .evaluable_outputs["fire_prod_output_to_retrieval"] 32 | .curr_value 33 | ) 34 | print("Final Goal:") 35 | print(eg.enodes["goal_buffer"].evaluable_outputs["goal_output"].curr_value) 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /examples/ACT-R/count.lisp: -------------------------------------------------------------------------------- 1 | (clear-all) 2 | 3 | (define-model count 4 | 5 | (sgp :esc t :lf .05 :trace-detail high) 6 | 7 | 8 | (chunk-type number number next) 9 | (chunk-type count-from start end count) 10 | 11 | (add-dm 12 | (one ISA number number one next two) 13 | (two ISA number number two next three) 14 | (three ISA number number three next four) 15 | (four ISA number number four next five) 16 | (five ISA number number five) 17 | (first-goal ISA count-from start two end four)) 18 | 19 | (goal-focus first-goal) 20 | 21 | (p start 22 | =goal> 23 | ISA count-from 24 | start =num1 25 | count nil 26 | ==> 27 | =goal> 28 | ISA count-from 29 | count =num1 30 | +retrieval> 31 | ISA number 32 | number =num1 33 | ) 34 | 35 | (P increment 36 | =goal> 37 | ISA count-from 38 | count =num1 39 | - end =num1 40 | =retrieval> 41 | ISA number 42 | number =num1 43 | next =num2 44 | ==> 45 | =goal> 46 | ISA count-from 47 | count =num2 48 | +retrieval> 49 | ISA number 50 | number =num2 51 | !output! (=num1) 52 | ) 53 | 54 | (P stop 55 | =goal> 56 | ISA count-from 57 | count =num 58 | end =num 59 | =retrieval> 60 | ISA number 61 | number =num 62 | ==> 63 | -goal> 64 | !output! (=num) 65 | ) 66 | ) 67 | -------------------------------------------------------------------------------- /examples/ACT-R/count.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ACT-R/count.png -------------------------------------------------------------------------------- /examples/ACT-R/count.py: -------------------------------------------------------------------------------- 1 | """Create the MDF files for the count example and run using the scheduler.""" 2 | import os 3 | from modeci_mdf.interfaces.actr import actr_to_mdf 4 | from modeci_mdf.execution_engine import EvaluableGraph 5 | from modeci_mdf.utils import load_mdf 6 | 7 | 8 | def main(): 9 | """Takes count.lisp, converts to MDF, and runs using the scheduler.""" 10 | file_name = os.path.dirname(os.path.realpath(__file__)) + "/count.lisp" 11 | print(file_name) 12 | mod = actr_to_mdf(file_name) 13 | mdf_graph = load_mdf(file_name[:-5] + ".json").graphs[0] 14 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 15 | term = False 16 | goal = {} 17 | retrieval = {} 18 | while not term: 19 | eg.evaluate(initializer={"goal_input": goal, "dm_input": retrieval}) 20 | term = ( 21 | eg.enodes["check_termination"].evaluable_outputs["check_output"].curr_value 22 | ) 23 | goal = ( 24 | eg.enodes["fire_production"] 25 | .evaluable_outputs["fire_prod_output_to_goal"] 26 | .curr_value 27 | ) 28 | retrieval = ( 29 | eg.enodes["fire_production"] 30 | .evaluable_outputs["fire_prod_output_to_retrieval"] 31 | .curr_value 32 | ) 33 | print("Final Goal:") 34 | print(eg.enodes["goal_buffer"].evaluable_outputs["goal_output"].curr_value) 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /examples/MDF/Arrays.yaml: -------------------------------------------------------------------------------- 1 | Arrays: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | array_example: 6 | nodes: 7 | input_node: 8 | parameters: 9 | input_level: 10 | value: 11 | - - 1 12 | - 2.0 13 | - - 3 14 | - 4 15 | output_ports: 16 | out_port: 17 | value: input_level 18 | middle_node: 19 | input_ports: 20 | input_port1: 21 | shape: 22 | - 2 23 | - 2 24 | parameters: 25 | slope: 26 | value: 0.5 27 | intercept: 28 | value: 29 | - - 0.0 30 | - 1.0 31 | - - 2.0 32 | - 2.0 33 | linear_1: 34 | function: linear 35 | args: 36 | variable0: input_port1 37 | slope: slope 38 | intercept: intercept 39 | output_ports: 40 | output_1: 41 | value: linear_1 42 | edges: 43 | input_edge: 44 | sender: input_node 45 | receiver: middle_node 46 | sender_port: out_port 47 | receiver_port: input_port1 48 | parameters: 49 | weight: 50 | - - 1 51 | - 0 52 | - - 0 53 | - 1 54 | -------------------------------------------------------------------------------- /examples/MDF/NewtonCoolingModel.json: -------------------------------------------------------------------------------- 1 | { 2 | "NewtonCoolingModel": { 3 | "format": "ModECI MDF v0.4", 4 | "generating_application": "Python modeci-mdf v0.4.11", 5 | "graphs": { 6 | "cooling_process": { 7 | "nodes": { 8 | "cool_node": { 9 | "parameters": { 10 | "cooling_coeff": { 11 | "value": 0.1 12 | }, 13 | "T_a": { 14 | "value": 20 15 | }, 16 | "T_curr": { 17 | "default_initial_value": 90, 18 | "time_derivative": "dT_dt" 19 | }, 20 | "dT_dt": { 21 | "value": "-cooling_coeff*(T_curr - T_a)", 22 | "default_initial_value": 0 23 | } 24 | }, 25 | "output_ports": { 26 | "out_port": { 27 | "value": "T_curr" 28 | }, 29 | "out_port2": { 30 | "value": "dT_dt" 31 | } 32 | } 33 | } 34 | } 35 | } 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /examples/MDF/NewtonCoolingModel.yaml: -------------------------------------------------------------------------------- 1 | NewtonCoolingModel: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | cooling_process: 6 | nodes: 7 | cool_node: 8 | parameters: 9 | cooling_coeff: 10 | value: 0.1 11 | T_a: 12 | value: 20 13 | T_curr: 14 | default_initial_value: 90 15 | time_derivative: dT_dt 16 | dT_dt: 17 | value: -cooling_coeff*(T_curr - T_a) 18 | default_initial_value: 0 19 | output_ports: 20 | out_port: 21 | value: T_curr 22 | out_port2: 23 | value: dT_dt 24 | -------------------------------------------------------------------------------- /examples/MDF/ParametersFunctions.yaml: -------------------------------------------------------------------------------- 1 | ParametersFunctions: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | params_funcs_example: 6 | nodes: 7 | node0: 8 | metadata: 9 | color: .8 .8 .8 10 | functions: 11 | function_inbuilt_with_args: 12 | function: linear 13 | args: 14 | variable0: 1 15 | slope: 2 16 | intercept: 3 17 | function_with_value_args: 18 | args: 19 | A: 1 20 | B: 2 21 | C: 3 22 | value: A + B + C 23 | parameters: 24 | param_fixed_int: 25 | value: 1 26 | param_fixed_float: 27 | value: 2.0 28 | param_array_list: 29 | value: 30 | - 3 31 | - 4.0 32 | param_expression: 33 | value: param_fixed_int + param_fixed_float 34 | param_stateful: 35 | value: param_stateful + 1 36 | param_function: 37 | function: linear 38 | args: 39 | variable0: 1 40 | slope: 2 41 | intercept: 3 42 | param_time_deriv: 43 | default_initial_value: 0 44 | time_derivative: '1' 45 | -------------------------------------------------------------------------------- /examples/MDF/RNN/IaF.net.run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/IaF.net.run.png -------------------------------------------------------------------------------- /examples/MDF/RNN/IaF.net2.run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/IaF.net2.run.png -------------------------------------------------------------------------------- /examples/MDF/RNN/IaF.run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/IaF.run.png -------------------------------------------------------------------------------- /examples/MDF/RNN/LEMS_Simiaf_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/LEMS_Simiaf_example.png -------------------------------------------------------------------------------- /examples/MDF/RNN/README.md: -------------------------------------------------------------------------------- 1 | **These examples are work in progress!** 2 | 3 | ## Recurrent Neural Network (RNN) model 4 | 5 | The script [generate_rnn.py](generate_rnn.py) can be used to generate an example RNN model. The MDF is in [RNNs.yaml](RNNs.yaml) and `python generate_rnn.py -graph` will produce the following graph of the network: 6 | 7 |

rnn.png

8 | 9 | To run the network using the MDF execution engine type `python generate_rnn.py -run`, producing: 10 | 11 |

RNN.run.png

12 | 13 | 14 | ## Integrate and Fire (IaF) neuron model 15 | 16 | The script [generate_iaf.py](generate_iaf.py) can be used to generate an example Integrate and Fire model. The MDF is in [IAFs.yaml](IAFs.yaml) and `python generate_iaf.py -graph` will produce the following graph of the network: 17 | 18 |

iaf.png

19 | 20 | To run the network with 1 input, a pre and a post IAF node (each with 1 cell) using the MDF execution engine type `python generate_iaf.py -run`, producing: 21 | 22 |

IaF.run.png

23 | 24 | To run a network (2 populations, 8 cells each) with an array for the state variable v, type `python generate_iaf.py -run -net`, producing: 25 | 26 |

IaF.net.run.png

27 | 28 | To run a similar network (2 populations, 8 cells each) with timed pulses into the pre cells, and weighed connections to the post cells, type `python generate_iaf.py -run -net2`, producing: 29 | 30 |

IaF.net2.run.png

31 | -------------------------------------------------------------------------------- /examples/MDF/RNN/RNN.run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/RNN.run.png -------------------------------------------------------------------------------- /examples/MDF/RNN/Simiaf_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "Simiaf_example": { 3 | "version": "NeuroMLlite v0.6.0", 4 | "network": "IAFs.nmllite.json", 5 | "duration": 100000.0, 6 | "dt": 10.0, 7 | "seed": 123, 8 | "record_variables": { 9 | "time": { 10 | "current_input_node": 0 11 | }, 12 | "current_output": { 13 | "current_input_node": 0, 14 | "syn_post": 0 15 | }, 16 | "current_input": { 17 | "pre": 0, 18 | "post": 0 19 | }, 20 | "spiking": { 21 | "pre": 0, 22 | "post": 0 23 | }, 24 | "v": { 25 | "pre": 0, 26 | "post": 0 27 | }, 28 | "v_output": { 29 | "pre": 0, 30 | "post": 0 31 | }, 32 | "spiking_output": { 33 | "pre": 0, 34 | "post": 0 35 | }, 36 | "spike_input": { 37 | "syn_post": 0 38 | }, 39 | "syn_i": { 40 | "syn_post": 0 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /examples/MDF/RNN/iaf.net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/iaf.net.png -------------------------------------------------------------------------------- /examples/MDF/RNN/iaf.net2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/iaf.net2.png -------------------------------------------------------------------------------- /examples/MDF/RNN/iaf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/iaf.png -------------------------------------------------------------------------------- /examples/MDF/RNN/regenerate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | python generate_rnn.py -graph 5 | python generate_rnn.py -run -nogui 6 | 7 | python generate_iaf.py -graph 8 | python generate_iaf.py -net -graph 9 | python generate_iaf.py -net2 -graph 10 | python generate_iaf.py -run -nogui 11 | python generate_iaf.py -run -net -nogui 12 | python generate_iaf.py -run -net2 -nogui 13 | #Fix dimensions! 14 | #python generate_iaf.py -neuroml 15 | #pynml LEMS_Simiaf_example.xml -lems-graph 16 | -------------------------------------------------------------------------------- /examples/MDF/RNN/rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/RNN/rnn.png -------------------------------------------------------------------------------- /examples/MDF/RNN/rnn_pytorch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | input_size = 1 5 | hidden_size = 1 6 | num_layers = 1 7 | 8 | in_x = 1 9 | in_y = 1 10 | 11 | rnn = nn.RNN( 12 | input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bias=False 13 | ) 14 | 15 | print("RNN: {}; {}".format(rnn, type(rnn))) 16 | 17 | print("Model: %s" % rnn) 18 | 19 | 20 | for i in range(num_layers): 21 | h = torch.zeros(hidden_size, input_size) 22 | h[0][0] = 1 23 | exec("rnn.weight_ih_l%i = torch.nn.Parameter(h)" % i) 24 | # exec('rnn.weight_ih_l%i[0] = 1'%i) 25 | 26 | for l in range(num_layers): 27 | exec( 28 | "rnn.weight_hh_l%i = torch.nn.Parameter(torch.zeros(hidden_size,hidden_size))" 29 | % l 30 | ) 31 | # exec('rnn.weight_hh_l%i[0][0] = 1'%l) 32 | 33 | 34 | print("State_dict:") 35 | for s in rnn.state_dict(): 36 | print(" > {} = {}".format(s, rnn.state_dict()[s])) 37 | 38 | input = torch.zeros(in_x, in_y, input_size) 39 | input[0][0][0] = 3 40 | print("Input: %s" % input) 41 | 42 | h0 = torch.randn(num_layers, in_y, hidden_size) 43 | h0 = torch.zeros(num_layers, in_y, hidden_size) 44 | # h0[0][0]=0.5 45 | print("h0: %s" % h0) 46 | 47 | output, hn = rnn(input, h0) 48 | 49 | print("\nOutput calculated by pyTorch, output: %s" % output.detach().numpy()) 50 | print("hn: %s" % hn.detach().numpy()) 51 | 52 | 53 | """ 54 | print('State_dict:') 55 | for s in rnn.state_dict(): 56 | print(' > %s = %s'%(s,rnn.state_dict()[s])) 57 | 58 | 59 | # Export the model 60 | fn = "rnn.onnx" 61 | torch_out = torch.onnx._export(rnn, # model being run 62 | input, # model input (or a tuple for multiple inputs) 63 | fn, # where to save the model (can be a file or file-like object) 64 | export_params=True) # store the trained parameter weights inside the model file 65 | 66 | print('Done! Exported to: %s'%fn) 67 | """ 68 | -------------------------------------------------------------------------------- /examples/MDF/RNN/utils.py: -------------------------------------------------------------------------------- 1 | from modeci_mdf.mdf import * 2 | import numpy as np 3 | 4 | 5 | def create_rnn_node(id, N, g, seed=1234): 6 | 7 | np.random.seed(seed) 8 | 9 | ## RNN node... 10 | rnn_node = Node(id=id) 11 | ipr1 = InputPort(id="ext_input", shape="(%i,)" % N) 12 | rnn_node.input_ports.append(ipr1) 13 | 14 | ipr2 = InputPort(id="fb_input", shape="(%i,)" % N) 15 | rnn_node.input_ports.append(ipr2) 16 | 17 | default_initial_value = np.zeros(N) 18 | default_initial_value = 2 * np.random.random(N) - 1 19 | 20 | M = Parameter(id="M", value=2 * np.random.random((N, N)) - 1) 21 | rnn_node.parameters.append(M) 22 | 23 | g = Parameter(id="g", value=g) 24 | rnn_node.parameters.append(g) 25 | 26 | x = Parameter( 27 | id="x", 28 | default_initial_value=default_initial_value, 29 | time_derivative="-x + g*int_fb + %s" % ipr1.id, 30 | ) 31 | rnn_node.parameters.append(x) 32 | 33 | r = Parameter(id="r", function="tanh", args={"variable0": x.id, "scale": 1}) 34 | # r = Parameter(id="r", value="x") 35 | rnn_node.parameters.append(r) 36 | 37 | int_fb = Parameter(id="int_fb", function="MatMul", args={"A": "M", "B": "r"}) 38 | rnn_node.parameters.append(int_fb) 39 | 40 | op_x = OutputPort(id="out_port_x", value="x") 41 | rnn_node.output_ports.append(op_x) 42 | 43 | op_r = OutputPort(id="out_port_r", value="r") 44 | rnn_node.output_ports.append(op_r) 45 | 46 | return rnn_node 47 | -------------------------------------------------------------------------------- /examples/MDF/Simple.bson: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/Simple.bson -------------------------------------------------------------------------------- /examples/MDF/Simple.yaml: -------------------------------------------------------------------------------- 1 | Simple: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | simple_example: 6 | nodes: 7 | input_node: 8 | parameters: 9 | input_level: 10 | value: 0.5 11 | output_ports: 12 | out_port: 13 | value: input_level 14 | processing_node: 15 | input_ports: 16 | input_port1: {} 17 | parameters: 18 | lin_slope: 19 | value: 0.5 20 | lin_intercept: 21 | value: 0 22 | log_gain: 23 | value: 3 24 | linear_1: 25 | function: linear 26 | args: 27 | variable0: input_port1 28 | slope: lin_slope 29 | intercept: lin_intercept 30 | logistic_1: 31 | function: logistic 32 | args: 33 | variable0: linear_1 34 | gain: log_gain 35 | bias: 0 36 | offset: 0 37 | output_ports: 38 | output_1: 39 | value: logistic_1 40 | edges: 41 | input_edge: 42 | sender: input_node 43 | receiver: processing_node 44 | sender_port: out_port 45 | receiver_port: input_port1 46 | parameters: 47 | weight: 0.55 48 | -------------------------------------------------------------------------------- /examples/MDF/States.json: -------------------------------------------------------------------------------- 1 | { 2 | "States": { 3 | "format": "ModECI MDF v0.4", 4 | "generating_application": "Python modeci-mdf v0.4.11", 5 | "graphs": { 6 | "state_example": { 7 | "nodes": { 8 | "counter_node": { 9 | "parameters": { 10 | "increment": { 11 | "value": 1 12 | }, 13 | "count": { 14 | "value": "count + increment" 15 | } 16 | }, 17 | "output_ports": { 18 | "out_port": { 19 | "value": "count" 20 | } 21 | } 22 | }, 23 | "sine_node": { 24 | "parameters": { 25 | "amp": { 26 | "value": 3 27 | }, 28 | "period": { 29 | "value": 0.4 30 | }, 31 | "level": { 32 | "default_initial_value": 0, 33 | "time_derivative": "6.283185 * rate / period" 34 | }, 35 | "rate": { 36 | "default_initial_value": 1, 37 | "time_derivative": "-1 * 6.283185 * level / period" 38 | } 39 | }, 40 | "output_ports": { 41 | "out_port": { 42 | "value": "amp * level" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /examples/MDF/States.yaml: -------------------------------------------------------------------------------- 1 | States: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | state_example: 6 | nodes: 7 | counter_node: 8 | parameters: 9 | increment: 10 | value: 1 11 | count: 12 | value: count + increment 13 | output_ports: 14 | out_port: 15 | value: count 16 | sine_node: 17 | parameters: 18 | amp: 19 | value: 3 20 | period: 21 | value: 0.4 22 | level: 23 | default_initial_value: 0 24 | time_derivative: 6.283185 * rate / period 25 | rate: 26 | default_initial_value: 1 27 | time_derivative: -1 * 6.283185 * level / period 28 | output_ports: 29 | out_port: 30 | value: amp * level 31 | -------------------------------------------------------------------------------- /examples/MDF/SwitchedRLC_Circuit.yaml: -------------------------------------------------------------------------------- 1 | SwitchedRLC_Circuit: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | metadata: 5 | preferred_duration: 2 6 | preferred_dt: 0.001 7 | graphs: 8 | SwitchedRLC_Circuit: 9 | nodes: 10 | V: 11 | parameters: 12 | Vs: 13 | conditions: 14 | - id: 'off' 15 | test: time<0.5 16 | value: '0' 17 | - id: 'on' 18 | test: time>=0.5 19 | value: 0.1 20 | R: 21 | metadata: 22 | description: Resistance in Ohms 23 | value: 100 24 | L: 25 | metadata: 26 | description: Inductance in Henrys 27 | value: 1 28 | C: 29 | value: 0.001 30 | time: 31 | default_initial_value: 0 32 | time_derivative: '1' 33 | V: 34 | default_initial_value: 0 35 | time_derivative: i_C /C 36 | i_R: 37 | value: V / R 38 | i_L: 39 | default_initial_value: 0 40 | time_derivative: (Vs - V)/L 41 | i_C: 42 | value: i_L-i_R 43 | output_ports: 44 | V_out: 45 | value: V 46 | i_L_out: 47 | value: i_L 48 | -------------------------------------------------------------------------------- /examples/MDF/abcd_python.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | A simple set of nodes connected A->B->C->D, with one function at each, calculating 4 | inputs/outputs 5 | 6 | """ 7 | 8 | import math 9 | 10 | # A - Linear 11 | 12 | A_slope = 1.1 13 | A_intercept = 1.2 14 | 15 | # B - Logistic 16 | 17 | B_gain = 2.1 18 | B_bias = 2.2 19 | B_offset = 2.3 20 | 21 | # C - Exponential 22 | 23 | C_scale = 3.1 24 | C_rate = 3.2 25 | C_bias = 3.3 26 | C_offset = 3.4 27 | 28 | # D - Sine 29 | 30 | D_scale = 4.0 31 | 32 | test_values = [-1.0, 0.0, 1.0, 5.0] 33 | 34 | 35 | def evaluate(input): 36 | A = A_slope * input + A_intercept 37 | B = 1 / (1 + math.exp(-1 * B_gain * (A + B_bias) + B_offset)) 38 | C = C_scale * math.exp((C_rate * B) + C_bias) + C_offset 39 | D = D_scale * math.sin(C) 40 | print(f" Input value {input}:\tA={A},\tB={B},\tC={C},\tD={D}") 41 | return A, B, C, D 42 | 43 | 44 | if __name__ == "__main__": 45 | 46 | print("Evaluating ABCD net in Python, with values %s" % test_values) 47 | for i in test_values: 48 | evaluate(i) 49 | -------------------------------------------------------------------------------- /examples/MDF/conditions/everyncalls_condition.yaml: -------------------------------------------------------------------------------- 1 | everyncalls_condition: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | everyncalls_example: 6 | nodes: 7 | A: 8 | input_ports: 9 | input_port1: 10 | shape: 11 | - 1 12 | parameters: 13 | param_A: 14 | value: param_A + 1 15 | output_ports: 16 | output_1: 17 | value: input_port1 18 | B: 19 | input_ports: 20 | input_port1: 21 | shape: 22 | - 1 23 | parameters: 24 | param_B: 25 | value: param_B + 1 26 | output_ports: 27 | output_1: 28 | value: input_port1 29 | C: 30 | input_ports: 31 | input_port1: 32 | shape: 33 | - 1 34 | parameters: 35 | param_C: 36 | value: param_C + 1 37 | output_ports: 38 | output_1: 39 | value: input_port1 40 | edges: 41 | edge_A_B: 42 | sender: A 43 | receiver: B 44 | sender_port: output_1 45 | receiver_port: input_port1 46 | edge_B_C: 47 | sender: B 48 | receiver: C 49 | sender_port: output_1 50 | receiver_port: input_port1 51 | conditions: 52 | node_specific: 53 | A: 54 | type: Always 55 | kwargs: {} 56 | B: 57 | type: EveryNCalls 58 | kwargs: 59 | dependencies: A 60 | n: 2 61 | C: 62 | type: EveryNCalls 63 | kwargs: 64 | dependencies: B 65 | n: 3 66 | -------------------------------------------------------------------------------- /examples/MDF/conditions/images/composite_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/conditions/images/composite_example.png -------------------------------------------------------------------------------- /examples/MDF/conditions/images/everyncalls.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/conditions/images/everyncalls.png -------------------------------------------------------------------------------- /examples/MDF/conditions/images/threshold.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/conditions/images/threshold.png -------------------------------------------------------------------------------- /examples/MDF/conditions/images/timeinterval.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/conditions/images/timeinterval.png -------------------------------------------------------------------------------- /examples/MDF/conditions/threshold_condition.json: -------------------------------------------------------------------------------- 1 | { 2 | "threshold_condition": { 3 | "format": "ModECI MDF v0.4", 4 | "generating_application": "Python modeci-mdf v0.4.11", 5 | "graphs": { 6 | "threshold_example": { 7 | "nodes": { 8 | "A": { 9 | "input_ports": { 10 | "input_port1": { 11 | "shape": [ 12 | 1 13 | ] 14 | } 15 | }, 16 | "parameters": { 17 | "param_A": { 18 | "value": "param_A + 1", 19 | "default_initial_value": 0 20 | } 21 | }, 22 | "output_ports": { 23 | "output_1": { 24 | "value": "param_A" 25 | } 26 | } 27 | } 28 | }, 29 | "conditions": { 30 | "termination": { 31 | "environment_state_update": { 32 | "type": "Threshold", 33 | "kwargs": { 34 | "dependency": "A", 35 | "parameter": "param_A", 36 | "threshold": 5, 37 | "comparator": ">=" 38 | } 39 | } 40 | } 41 | } 42 | } 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /examples/MDF/conditions/threshold_condition.yaml: -------------------------------------------------------------------------------- 1 | threshold_condition: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | threshold_example: 6 | nodes: 7 | A: 8 | input_ports: 9 | input_port1: 10 | shape: 11 | - 1 12 | parameters: 13 | param_A: 14 | value: param_A + 1 15 | default_initial_value: 0 16 | output_ports: 17 | output_1: 18 | value: param_A 19 | conditions: 20 | termination: 21 | environment_state_update: 22 | type: Threshold 23 | kwargs: 24 | dependency: A 25 | parameter: param_A 26 | threshold: 5 27 | comparator: '>=' 28 | -------------------------------------------------------------------------------- /examples/MDF/conditions/timeinterval_condition.yaml: -------------------------------------------------------------------------------- 1 | timeinterval_condition: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | timeinterval_example: 6 | nodes: 7 | A: 8 | input_ports: 9 | input_port1: 10 | shape: 11 | - 1 12 | parameters: 13 | param_A: 14 | value: param_A + 1 15 | output_ports: 16 | output_1: 17 | value: param_A 18 | B: 19 | input_ports: 20 | input_port1: 21 | shape: 22 | - 1 23 | parameters: 24 | param_B: 25 | value: param_B + 1 26 | output_ports: 27 | output_1: 28 | value: param_B 29 | C: 30 | input_ports: 31 | input_port1: 32 | shape: 33 | - 1 34 | parameters: 35 | param_C: 36 | value: param_C + 1 37 | output_ports: 38 | output_1: 39 | value: param_C 40 | edges: 41 | edge_A_B: 42 | sender: A 43 | receiver: B 44 | sender_port: output_1 45 | receiver_port: input_port1 46 | edge_B_C: 47 | sender: B 48 | receiver: C 49 | sender_port: output_1 50 | receiver_port: input_port1 51 | conditions: 52 | node_specific: 53 | A: 54 | type: Always 55 | kwargs: {} 56 | B: 57 | type: AfterPass 58 | kwargs: 59 | n: 1 60 | C: 61 | type: AfterPass 62 | kwargs: 63 | n: 4 64 | -------------------------------------------------------------------------------- /examples/MDF/images/Readme.md: -------------------------------------------------------------------------------- 1 | Contains images created by running the graph from MDF json files. 2 | -------------------------------------------------------------------------------- /examples/MDF/images/abc_conditions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/abc_conditions.png -------------------------------------------------------------------------------- /examples/MDF/images/abcd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/abcd.png -------------------------------------------------------------------------------- /examples/MDF/images/abcd_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/abcd_3.png -------------------------------------------------------------------------------- /examples/MDF/images/arrays.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/arrays.png -------------------------------------------------------------------------------- /examples/MDF/images/newton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/newton.png -------------------------------------------------------------------------------- /examples/MDF/images/newton_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/newton_plot.png -------------------------------------------------------------------------------- /examples/MDF/images/params_funcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/params_funcs.png -------------------------------------------------------------------------------- /examples/MDF/images/simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/simple.png -------------------------------------------------------------------------------- /examples/MDF/images/simple_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/simple_3.png -------------------------------------------------------------------------------- /examples/MDF/images/states.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/states.png -------------------------------------------------------------------------------- /examples/MDF/images/switched_rlc_circuit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/switched_rlc_circuit.png -------------------------------------------------------------------------------- /examples/MDF/images/switched_rlc_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/images/switched_rlc_plot.png -------------------------------------------------------------------------------- /examples/MDF/translation/Readme.md: -------------------------------------------------------------------------------- 1 | Translator files to convert to MDF_0 format 2 | -------------------------------------------------------------------------------- /examples/MDF/translation/run_translated_ABCD.py: -------------------------------------------------------------------------------- 1 | import json 2 | import ntpath 3 | 4 | from modeci_mdf.functions.standard import mdf_functions, create_python_expression 5 | from typing import List, Tuple, Dict, Optional, Set, Any, Union 6 | from modeci_mdf.utils import load_mdf, print_summary 7 | from modeci_mdf.mdf import * 8 | from modeci_mdf.full_translator import * 9 | from modeci_mdf.execution_engine import EvaluableGraph 10 | from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW 11 | import argparse 12 | import sys 13 | 14 | 15 | def main(): 16 | 17 | file_path = "ABCD.json" 18 | data = convert_states_to_stateful_parameters("../" + file_path) 19 | # print(data) 20 | with open("Translated_" + file_path, "w") as fp: 21 | json.dump(data, fp, indent=4) 22 | 23 | if "-run" in sys.argv: 24 | 25 | verbose = True 26 | 27 | mod_graph = load_mdf("Translated_%s" % file_path).graphs[0] 28 | eg = EvaluableGraph(mod_graph, verbose) 29 | 30 | mod_graph_old = load_mdf("../" + file_path).graphs[0] 31 | eg_old = EvaluableGraph(mod_graph_old, verbose) 32 | 33 | format = FORMAT_NUMPY 34 | 35 | eg.evaluate(array_format=format) 36 | 37 | eg_old.evaluate(array_format=format) 38 | 39 | print( 40 | "New file output value>>>", 41 | eg.enodes["D"].evaluable_outputs["out_port"].curr_value, 42 | ) 43 | 44 | print( 45 | "Old file output value>>>", 46 | eg_old.enodes["D"].evaluable_outputs["out_port"].curr_value, 47 | ) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /examples/MDF/translation/run_translated_Arrays.py: -------------------------------------------------------------------------------- 1 | import json 2 | import ntpath 3 | 4 | from modeci_mdf.functions.standard import mdf_functions, create_python_expression 5 | from typing import List, Tuple, Dict, Optional, Set, Any, Union 6 | from modeci_mdf.utils import load_mdf, print_summary 7 | from modeci_mdf.mdf import * 8 | from modeci_mdf.full_translator import * 9 | from modeci_mdf.execution_engine import EvaluableGraph 10 | from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW 11 | import argparse 12 | import sys 13 | 14 | 15 | def main(): 16 | 17 | file_path = "Arrays.json" 18 | data = convert_states_to_stateful_parameters("../" + file_path) 19 | # print(data) 20 | with open("Translated_" + file_path, "w") as fp: 21 | json.dump(data, fp, indent=4) 22 | 23 | if "-run" in sys.argv: 24 | 25 | verbose = True 26 | 27 | mod_graph = load_mdf("Translated_%s" % file_path).graphs[0] 28 | eg = EvaluableGraph(mod_graph, verbose) 29 | 30 | mod_graph_old = load_mdf("../" + file_path).graphs[0] 31 | eg_old = EvaluableGraph(mod_graph_old, verbose) 32 | 33 | format = FORMAT_NUMPY 34 | 35 | eg.evaluate(array_format=format) 36 | 37 | eg_old.evaluate(array_format=format) 38 | print( 39 | "New file output value>>>", 40 | eg.enodes["middle_node"].evaluable_outputs["output_1"].curr_value, 41 | ) 42 | 43 | print( 44 | "Old file output value>>>", 45 | eg_old.enodes["middle_node"].evaluable_outputs["output_1"].curr_value, 46 | ) 47 | 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /examples/MDF/translation/run_translated_abc_conditions.py: -------------------------------------------------------------------------------- 1 | import json 2 | import ntpath 3 | 4 | from modeci_mdf.functions.standard import mdf_functions, create_python_expression 5 | from typing import List, Tuple, Dict, Optional, Set, Any, Union 6 | from modeci_mdf.utils import load_mdf, print_summary 7 | from modeci_mdf.mdf import * 8 | from modeci_mdf.full_translator import * 9 | from modeci_mdf.execution_engine import EvaluableGraph 10 | from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW 11 | import argparse 12 | import sys 13 | 14 | 15 | def main(): 16 | 17 | file_path = "abc_conditions.json" 18 | data = convert_states_to_stateful_parameters("../" + file_path) 19 | # print(data) 20 | with open("Translated_" + file_path, "w") as fp: 21 | json.dump(data, fp, indent=4) 22 | 23 | if "-run" in sys.argv: 24 | 25 | verbose = True 26 | 27 | mod_graph = load_mdf("Translated_%s" % file_path).graphs[0] 28 | eg = EvaluableGraph(mod_graph, verbose) 29 | 30 | mod_graph_old = load_mdf("../" + file_path).graphs[0] 31 | eg_old = EvaluableGraph(mod_graph_old, verbose) 32 | 33 | format = FORMAT_NUMPY 34 | 35 | eg.evaluate(array_format=format) 36 | 37 | eg_old.evaluate(array_format=format) 38 | 39 | print( 40 | "New file output value>>>", 41 | eg.enodes["C"].evaluable_outputs["output_1"].curr_value, 42 | ) 43 | 44 | print( 45 | "Old file output value>>>", 46 | eg_old.enodes["C"].evaluable_outputs["output_1"].curr_value, 47 | ) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /examples/MDF/translation/run_translated_simple.py: -------------------------------------------------------------------------------- 1 | import json 2 | import ntpath 3 | 4 | from modeci_mdf.functions.standard import mdf_functions, create_python_expression 5 | from typing import List, Tuple, Dict, Optional, Set, Any, Union 6 | from modeci_mdf.utils import load_mdf, print_summary 7 | from modeci_mdf.mdf import * 8 | from modeci_mdf.full_translator import * 9 | from modeci_mdf.execution_engine import EvaluableGraph 10 | from modelspec.utils import FORMAT_NUMPY, FORMAT_TENSORFLOW 11 | import argparse 12 | import sys 13 | 14 | 15 | def main(): 16 | 17 | file_path = "Simple.json" 18 | data = convert_states_to_stateful_parameters("../" + file_path) 19 | # print(data) 20 | with open("Translated_" + file_path, "w") as fp: 21 | json.dump(data, fp, indent=4) 22 | 23 | if "-run" in sys.argv: 24 | 25 | verbose = True 26 | 27 | mod_graph = load_mdf("Translated_%s" % file_path).graphs[0] 28 | eg = EvaluableGraph(mod_graph, verbose) 29 | 30 | mod_graph_old = load_mdf("../" + file_path).graphs[0] 31 | eg_old = EvaluableGraph(mod_graph_old, verbose) 32 | 33 | format = FORMAT_NUMPY 34 | 35 | eg.evaluate(array_format=format) 36 | 37 | eg_old.evaluate(array_format=format) 38 | 39 | print( 40 | "New file output value>>>", 41 | eg.enodes["processing_node"].evaluable_outputs["output_1"].curr_value, 42 | ) 43 | 44 | print( 45 | "Old file output value>>>", 46 | eg_old.enodes["processing_node"].evaluable_outputs["output_1"].curr_value, 47 | ) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /examples/MDF/translation/translated_FN_stateful_vw_plot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/translation/translated_FN_stateful_vw_plot.jpg -------------------------------------------------------------------------------- /examples/MDF/translation/translated_levelratesineplot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/MDF/translation/translated_levelratesineplot.jpg -------------------------------------------------------------------------------- /examples/ModECI_MDF.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ModECI_MDF.png -------------------------------------------------------------------------------- /examples/NeuroML/ABCD.1.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/ABCD.1.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/ABCD.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/ABCD.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/ABCD.nmllite.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/ABCD.nmllite.png -------------------------------------------------------------------------------- /examples/NeuroML/FN.gv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/FN.gv.png -------------------------------------------------------------------------------- /examples/NeuroML/FN.json: -------------------------------------------------------------------------------- 1 | { 2 | "FN": { 3 | "version": "NeuroMLlite v0.6.0", 4 | "notes": "FitzHugh Nagumo cell model - originally specified in NeuroML/LEMS", 5 | "parameters": { 6 | "initial_w": 0.0, 7 | "initial_v": -1, 8 | "a_v": -0.3333333333333333, 9 | "b_v": 0.0, 10 | "c_v": 1.0, 11 | "d_v": 1, 12 | "e_v": -1.0, 13 | "f_v": 1.0, 14 | "time_constant_v": 1.0, 15 | "a_w": 1.0, 16 | "b_w": -0.8, 17 | "c_w": 0.7, 18 | "time_constant_w": 12.5, 19 | "threshold": -1.0, 20 | "mode": 1.0, 21 | "uncorrelated_activity": 0.0, 22 | "Iext": 0 23 | }, 24 | "cells": { 25 | "fn": { 26 | "parameters": { 27 | "initial_w": "initial_w", 28 | "initial_v": "initial_v", 29 | "a_v": "a_v", 30 | "b_v": "b_v", 31 | "c_v": "c_v", 32 | "d_v": "d_v", 33 | "e_v": "e_v", 34 | "f_v": "f_v", 35 | "time_constant_v": "time_constant_v", 36 | "a_w": "a_w", 37 | "b_w": "b_w", 38 | "c_w": "c_w", 39 | "time_constant_w": "time_constant_w", 40 | "threshold": "threshold", 41 | "mode": "mode", 42 | "uncorrelated_activity": "uncorrelated_activity", 43 | "Iext": "Iext" 44 | }, 45 | "lems_source_file": "FN_Definitions.xml" 46 | } 47 | }, 48 | "regions": { 49 | "region1": { 50 | "x": 0.0, 51 | "y": 0.0, 52 | "z": 0.0, 53 | "width": 1000.0, 54 | "height": 100.0, 55 | "depth": 1000.0 56 | } 57 | }, 58 | "populations": { 59 | "FNpop": { 60 | "size": "1", 61 | "component": "fn", 62 | "properties": { 63 | "color": "0.2 0.2 0.2", 64 | "radius": 3 65 | }, 66 | "random_layout": { 67 | "region": "region1" 68 | } 69 | } 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /examples/NeuroML/FN.net.nml: -------------------------------------------------------------------------------- 1 | 2 | Generated by NeuroMLlite v0.6.0 3 | Generated network: FN 4 | Generation seed: 1234 5 | NeuroMLlite parameters: 6 | Iext = 0 7 | a_v = -0.3333333333333333 8 | a_w = 1.0 9 | b_v = 0.0 10 | b_w = -0.8 11 | c_v = 1.0 12 | c_w = 0.7 13 | d_v = 1 14 | e_v = -1.0 15 | f_v = 1.0 16 | initial_v = -1 17 | initial_w = 0.0 18 | mode = 1.0 19 | threshold = -1.0 20 | time_constant_v = 1.0 21 | time_constant_w = 12.5 22 | uncorrelated_activity = 0.0 23 | 24 | 25 | FitzHugh Nagumo cell model - originally specified in NeuroML/LEMS 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /examples/NeuroML/FNmulti.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/FNmulti.png -------------------------------------------------------------------------------- /examples/NeuroML/Izh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/Izh.png -------------------------------------------------------------------------------- /examples/NeuroML/Izh_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/Izh_run.png -------------------------------------------------------------------------------- /examples/NeuroML/IzhikevichTest.gv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/IzhikevichTest.gv.png -------------------------------------------------------------------------------- /examples/NeuroML/IzhikevichTest.net.nml: -------------------------------------------------------------------------------- 1 | 2 | Generated by NeuroMLlite v0.6.0 3 | Generated network: IzhikevichTest 4 | Generation seed: 1234 5 | NeuroMLlite parameters: 6 | C = 100 pF 7 | N = 1 8 | a = 0.03 per_ms 9 | b = -2 nS 10 | c = -50 mV 11 | d = 100 pA 12 | delay = 100ms 13 | duration = 500ms 14 | k = 0.7 nS_per_mV 15 | stim_amp = 100pA 16 | v0 = -80mV 17 | vpeak = 35 mV 18 | vr = -60 mV 19 | vt = -40 mV 20 | 21 | 22 | 23 | Example Izhikevich 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /examples/NeuroML/IzhikevichTest.nmllite.yaml: -------------------------------------------------------------------------------- 1 | IzhikevichTest: 2 | version: NeuroMLlite v0.6.0 3 | notes: Example Izhikevich 4 | parameters: 5 | N: 1 6 | v0: -80mV 7 | C: 100 pF 8 | k: 0.7 nS_per_mV 9 | vr: -60 mV 10 | vt: -40 mV 11 | vpeak: 35 mV 12 | a: 0.03 per_ms 13 | b: -2 nS 14 | c: -50 mV 15 | d: 100 pA 16 | delay: 100ms 17 | stim_amp: 100pA 18 | duration: 500ms 19 | cells: 20 | izhCell: 21 | parameters: 22 | v0: v0 23 | C: C 24 | k: k 25 | vr: vr 26 | vt: vt 27 | vpeak: vpeak 28 | a: a 29 | b: b 30 | c: c 31 | d: d 32 | neuroml2_cell: izhikevich2007Cell 33 | input_sources: 34 | iclamp_0: 35 | parameters: 36 | amplitude: stim_amp 37 | delay: delay 38 | duration: duration 39 | neuroml2_input: pulseGenerator 40 | populations: 41 | izhPop: 42 | size: '1' 43 | component: izhCell 44 | properties: 45 | color: .7 0 0 46 | inputs: 47 | stim: 48 | input_source: iclamp_0 49 | population: izhPop 50 | percentage: 100 51 | weight: 1 52 | -------------------------------------------------------------------------------- /examples/NeuroML/LEMS_SimABCD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/LEMS_SimABCD.png -------------------------------------------------------------------------------- /examples/NeuroML/LEMS_SimFN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/LEMS_SimFN.png -------------------------------------------------------------------------------- /examples/NeuroML/LEMS_SimFN.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /examples/NeuroML/LEMS_SimIzhikevichTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/LEMS_SimIzhikevichTest.png -------------------------------------------------------------------------------- /examples/NeuroML/LEMS_SimIzhikevichTest.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /examples/NeuroML/LemsFNrun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/LemsFNrun.png -------------------------------------------------------------------------------- /examples/NeuroML/MDFFNrun.multi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/MDFFNrun.multi.png -------------------------------------------------------------------------------- /examples/NeuroML/MDFFNrun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/MDFFNrun.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/HH.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/PyNN/HH.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/HH.net.nml: -------------------------------------------------------------------------------- 1 | 2 | Generated by NeuroMLlite v0.6.0 3 | Generated network: HH 4 | Generation seed: 1234 5 | NeuroMLlite parameters: 6 | input_amp = 0 7 | 8 | 9 | 10 | 11 | Example: HH 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/InputWeights.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/PyNN/InputWeights.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/Net1.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/PyNN/Net1.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/OneCell.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/PyNN/OneCell.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/OneCell.net.nml: -------------------------------------------------------------------------------- 1 | 2 | Generated by NeuroMLlite v0.6.0 3 | Generated network: OneCell 4 | Generation seed: 1234 5 | NeuroMLlite parameters: 6 | input_amp = 0.99 7 | 8 | 9 | 10 | 11 | Example: OneCell 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/SimpleNet.mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/NeuroML/PyNN/SimpleNet.mdf.png -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/SimpleNet.net.nml: -------------------------------------------------------------------------------- 1 | 2 | Generated by NeuroMLlite v0.6.0 3 | Generated network: SimpleNet 4 | Generation seed: 1234 5 | NeuroMLlite parameters: 6 | input_amp = 0.99 7 | 8 | 9 | 10 | 11 | Example: SimpleNet 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/clean.sh: -------------------------------------------------------------------------------- 1 | rm -rf *gv* *dat *spikes x86_64 arm64 2 | 3 | mv *pkl *txt *mdf* *png *mod LEMS* *nml All* Input* One* Sim* HH* Net1* /tmp 4 | -------------------------------------------------------------------------------- /examples/NeuroML/PyNN/regenerateAndTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | run_gui_examples=true 5 | 6 | if [[ ($# -eq 1) && ($1 == '-nogui') ]]; then 7 | run_gui_examples=false 8 | fi 9 | 10 | #### Generate single cell example 11 | python Generate.py -one -nml 12 | pynml -validate OneCell.net.nml 13 | 14 | python Generate.py -one -mdf 15 | python RunInMDF.py OneCell.mdf.json -nogui 16 | 17 | 18 | #### Generate HH example 19 | python Generate.py -hh -nml 20 | pynml -validate HH.net.nml 21 | 22 | python Generate.py -hh -mdf 23 | python RunInMDF.py HH.mdf.json -nogui 24 | 25 | 26 | #### Generate multiple input example 27 | python Generate.py -input_weights -nml 28 | pynml -validate InputWeights.net.nml 29 | 30 | python Generate.py -input_weights -mdf 31 | python RunInMDF.py InputWeights.mdf.json -nogui 32 | 33 | 34 | #### Generate simple net example 35 | python Generate.py -simple_net -nml 36 | pynml -validate SimpleNet.net.nml 37 | 38 | python Generate.py -simple_net -mdf 39 | python RunInMDF.py SimpleNet.mdf.json -nogui 40 | 41 | 42 | #### Generate bigger net example 43 | python Generate.py -net1 -nml 44 | pynml -validate Net1.net.nml 45 | 46 | python Generate.py -net1 -mdf 47 | python RunInMDF.py Net1.mdf.json -nogui 48 | -------------------------------------------------------------------------------- /examples/NeuroML/SimABCD.json: -------------------------------------------------------------------------------- 1 | { 2 | "SimABCD": { 3 | "version": "NeuroMLlite v0.6.0", 4 | "network": "ABCD.json", 5 | "duration": 5000.0, 6 | "dt": 100.0, 7 | "seed": 123, 8 | "record_variables": { 9 | "OUTPUT": { 10 | "all": "*" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/NeuroML/SimFN.json: -------------------------------------------------------------------------------- 1 | { 2 | "SimFN": { 3 | "version": "NeuroMLlite v0.6.0", 4 | "network": "FN.json", 5 | "duration": 100.0, 6 | "dt": 0.05, 7 | "seed": 123, 8 | "record_variables": { 9 | "V": { 10 | "all": "*" 11 | }, 12 | "W": { 13 | "all": "*" 14 | } 15 | }, 16 | "plots2D": { 17 | "VW": { 18 | "x_axis": "FNpop/0/fn/V", 19 | "y_axis": "FNpop/0/fn/W" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/NeuroML/SimIzhikevichTest.yaml: -------------------------------------------------------------------------------- 1 | SimIzhikevichTest: 2 | version: NeuroMLlite v0.6.0 3 | network: IzhikevichTest.nmllite.yaml 4 | duration: 700.0 5 | dt: 0.025 6 | record_variables: 7 | v: 8 | all: '*' 9 | u: 10 | all: '*' 11 | -------------------------------------------------------------------------------- /examples/ONNX/ab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ONNX/ab.png -------------------------------------------------------------------------------- /examples/ONNX/ab.yaml: -------------------------------------------------------------------------------- 1 | ONNX Model: 2 | format: ModECI MDF v0.4 3 | generating_application: Python modeci-mdf v0.4.11 4 | graphs: 5 | main_graph: 6 | nodes: 7 | /A/Add: 8 | input_ports: 9 | input: 10 | shape: 11 | - 2 12 | - 3 13 | type: float 14 | parameters: 15 | B: 16 | value: 1.0 17 | /A/Add: 18 | function: onnx::Add 19 | args: 20 | A: input 21 | B: B 22 | output_ports: 23 | _A_Add_output_0: 24 | value: /A/Add 25 | /B/Mul: 26 | input_ports: 27 | _A_Add_output_0: 28 | shape: 29 | - 2 30 | - 3 31 | type: float 32 | parameters: 33 | B: 34 | value: 5.0 35 | /B/Mul: 36 | function: onnx::Mul 37 | args: 38 | A: _A_Add_output_0 39 | B: B 40 | output_ports: 41 | _4: 42 | value: /B/Mul 43 | edges: 44 | /A/Add._A_Add_output_0_/B/Mul._A_Add_output_0: 45 | sender: /A/Add 46 | receiver: /B/Mul 47 | sender_port: _A_Add_output_0 48 | receiver_port: _A_Add_output_0 49 | -------------------------------------------------------------------------------- /examples/ONNX/ab_torch-jit-export-m2o.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ONNX/ab_torch-jit-export-m2o.onnx -------------------------------------------------------------------------------- /examples/ONNX/abc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/ONNX/abc.png -------------------------------------------------------------------------------- /examples/ONNX/abc_basic.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file does three things: 3 | - It implements a simple PyTorch model. 4 | - Exports in to ONNX using a combination of tracing and scripting 5 | - Converts it to MDF 6 | """ 7 | import torch 8 | import onnx 9 | 10 | from onnx import helper 11 | 12 | from modeci_mdf.interfaces.onnx import onnx_to_mdf, convert_file 13 | 14 | 15 | class A(torch.nn.Module): 16 | def forward(self, x): 17 | return torch.sin(x) 18 | 19 | 20 | class B(torch.nn.Module): 21 | def forward(self, x): 22 | return torch.sin(x) 23 | 24 | 25 | class C(torch.nn.Module): 26 | def forward(self, x): 27 | return torch.cos(x) 28 | 29 | 30 | class ABC(torch.nn.Module): 31 | def __init__(self): 32 | super().__init__() 33 | self.A = A() 34 | self.B = B() 35 | self.C = C() 36 | 37 | def forward(self, x): 38 | 39 | # Run A 40 | y = self.A(x) 41 | 42 | # Run B (loop_count times) 43 | y = self.B(y) 44 | 45 | # Run C 46 | y = self.C(y) 47 | 48 | return y 49 | 50 | 51 | def main(): 52 | 53 | model = ABC() 54 | dummy_input = torch.zeros(2, 3) 55 | # loop_count = torch.tensor(5, dtype=torch.long) 56 | torch.onnx.export( 57 | model, (dummy_input), "abc_basic.onnx", verbose=True, input_names=["input"] 58 | ) 59 | 60 | # Load it back in using ONNX package 61 | onnx_model = onnx.load("abc_basic.onnx") 62 | print(onnx_model) 63 | onnx.checker.check_model(onnx_model) 64 | 65 | # Extract the loop or if body as a sub-model, this is just because I want 66 | # to view it in netron and sub-graphs can't be rendered 67 | for node in [ 68 | node for node in onnx_model.graph.node if node.op_type in ["Loop", "If"] 69 | ]: 70 | 71 | # Get the GraphProto of the body 72 | body_graph = node.attribute[0].g 73 | 74 | # Turn it into a model 75 | model_def = helper.make_model(body_graph, producer_name="abc_basic.py") 76 | 77 | onnx.save(model_def, f"examples/{node.name}_body.onnx") 78 | 79 | convert_file("abc_basic.onnx") 80 | 81 | 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /examples/ONNX/simple_abc.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file does three things: 3 | - It implements a simple PyTorch model. 4 | - Exports in to ONNX using a combination of tracing and scripting 5 | - Converts it to MDF 6 | """ 7 | import torch 8 | import onnx 9 | import os 10 | 11 | 12 | from modeci_mdf.interfaces.onnx import onnx_to_mdf 13 | 14 | 15 | class A(torch.nn.Module): 16 | def forward(self, x): 17 | return x + 1 18 | 19 | 20 | @torch.jit.script 21 | def loop_b(x, y): 22 | for i in range(int(y)): 23 | x = x / 10 24 | return x 25 | 26 | 27 | class B(torch.nn.Module): 28 | def forward(self, x, y): 29 | return loop_b(x, y) 30 | 31 | 32 | class C(torch.nn.Module): 33 | def forward(self, x): 34 | return x * 100 35 | 36 | 37 | class ABC(torch.nn.Module): 38 | def __init__(self): 39 | super().__init__() 40 | self.A = A() 41 | self.B = B() 42 | self.C = C() 43 | 44 | def forward(self, x, B_loop_count): 45 | 46 | # Run A 47 | y = self.A(x) 48 | 49 | # Run B (loop_count times) 50 | y = self.B(y, B_loop_count) 51 | 52 | # Run C 53 | y = self.C(y) 54 | 55 | return y 56 | 57 | 58 | def main(): 59 | 60 | model = ABC() 61 | dummy_input = torch.zeros(2, 3) 62 | loop_count = torch.tensor(5, dtype=torch.long) 63 | torch.onnx.export( 64 | model, 65 | (dummy_input, loop_count), 66 | "abc.onnx", 67 | verbose=True, 68 | input_names=["input", "B_loop_count"], 69 | opset_version=9, 70 | ) 71 | 72 | # Load it back in using ONNX package 73 | onnx_model = onnx.load("abc.onnx") 74 | onnx.checker.check_model(onnx_model) 75 | 76 | mdf_model = onnx_to_mdf(onnx_model) 77 | 78 | mdf_model.to_json_file("abc.json") 79 | mdf_model.to_yaml_file("abc.yaml") 80 | """ 81 | Can't be exported to graph as Loop not supported... 82 | mdf_model.to_graph_image( 83 | engine="dot", 84 | output_format="png", 85 | view_on_render=False, 86 | level=3, 87 | filename_root="abc", 88 | only_warn_on_fail= (os.name=='nt'), # Makes sure test of this doesn't fail on Windows on GitHub Actions 89 | )""" 90 | 91 | 92 | if __name__ == "__main__": 93 | main() 94 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleBranching-conditional.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | A = pnl.TransferMechanism(name="A") 5 | B = pnl.TransferMechanism(name="B") 6 | C = pnl.TransferMechanism(name="C") 7 | D = pnl.TransferMechanism(name="D") 8 | 9 | comp.add_linear_processing_pathway([A, B, C]) 10 | comp.add_linear_processing_pathway([A, B, D]) 11 | 12 | comp.scheduler.add_condition_set( 13 | { 14 | A: pnl.AtNCalls(A, 0), 15 | B: pnl.Always(), 16 | C: pnl.EveryNCalls(B, 5), 17 | D: pnl.EveryNCalls(C, 2), 18 | } 19 | ) 20 | 21 | comp.run(inputs={A: 1}) 22 | 23 | # A, B, B, B, B, B, C, A, B, B, B, B, B, {C, D} 24 | print( 25 | [ 26 | {node.name for node in time_step} 27 | for time_step in comp.scheduler.execution_list[comp.default_execution_id] 28 | ] 29 | ) 30 | 31 | # comp.show_graph() 32 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleBranching-timing.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | A = pnl.TransferMechanism(name="A") 5 | B = pnl.TransferMechanism(name="B") 6 | C = pnl.TransferMechanism(name="C") 7 | D = pnl.TransferMechanism(name="D") 8 | 9 | comp.add_linear_processing_pathway([A, B, C]) 10 | comp.add_linear_processing_pathway([A, B, D]) 11 | 12 | # TimeInterval is not yet implemented in PsyNeuLink 13 | comp.scheduler.add_condition_set( 14 | { 15 | A: pnl.TimeInterval(repeat=7, unit="ms"), 16 | B: pnl.All( 17 | pnl.TimeInterval(start=1, repeat=1, unit="ms"), 18 | pnl.Not(pnl.TimeInterval(start=6, repeat=7, unit="ms")), 19 | pnl.Not(pnl.TimeInterval(start=7, repeat=7, unit="ms")), 20 | ), 21 | C: pnl.TimeInterval(start=6, repeat=7, unit="ms"), 22 | D: pnl.TimeInterval(start=13, repeat=7, unit="ms"), 23 | } 24 | ) 25 | 26 | comp.run(inputs={A: 1}, scheduling_mode=pnl.SchedulingMode.EXACT_TIME) 27 | 28 | print( 29 | "\n".join( 30 | [ 31 | "{:~}: {}".format( 32 | comp.scheduler.execution_timestamps[comp.default_execution_id][ 33 | i 34 | ].absolute, 35 | {node.name for node in time_step}, 36 | ) 37 | for i, time_step in enumerate( 38 | comp.scheduler.execution_list[comp.default_execution_id] 39 | ) 40 | ] 41 | ) 42 | ) 43 | 44 | # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 45 | # A B B B B B C A B B B B B CD 46 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleFN-conditional.reconstructed.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | 5 | fn = pnl.IntegratorMechanism( 6 | name="fn", 7 | function=pnl.FitzHughNagumoIntegrator( 8 | name="FitzHughNagumoIntegrator_Function_0", 9 | d_v=1, 10 | initial_v=-1, 11 | initializer=[[0]], 12 | default_variable=[[0]], 13 | ), 14 | ) 15 | im = pnl.IntegratorMechanism( 16 | name="im", 17 | function=pnl.AdaptiveIntegrator( 18 | initializer=[[0]], rate=0.5, default_variable=[[0]] 19 | ), 20 | ) 21 | 22 | comp.add_node(fn) 23 | comp.add_node(im) 24 | 25 | comp.add_projection( 26 | projection=pnl.MappingProjection( 27 | name="MappingProjection_from_fn_OutputPort_0__to_im_InputPort_0_", 28 | function=pnl.LinearMatrix(default_variable=[-1.0], matrix=[[1.0]]), 29 | ), 30 | sender=fn, 31 | receiver=im, 32 | ) 33 | 34 | comp.scheduler.add_condition(fn, pnl.Always()) 35 | comp.scheduler.add_condition( 36 | im, 37 | pnl.All( 38 | pnl.EveryNCalls(dependency=fn, n=20.0), 39 | pnl.AfterNCalls( 40 | dependency=fn, n=1600.0, time_scale=pnl.TimeScale.ENVIRONMENT_STATE_UPDATE 41 | ), 42 | ), 43 | ) 44 | 45 | comp.scheduler.termination_conds = { 46 | pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(), 47 | pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.AfterNCalls( 48 | dependency=fn, n=2000, time_scale=pnl.TimeScale.ENVIRONMENT_STATE_UPDATE 49 | ), 50 | } 51 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleFN-timing.reconstructed.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | 5 | fn = pnl.IntegratorMechanism( 6 | name="fn", 7 | function=pnl.FitzHughNagumoIntegrator( 8 | name="FitzHughNagumoIntegrator_Function_0", 9 | d_v=1, 10 | initial_v=-1, 11 | initializer=[[0]], 12 | default_variable=[[0]], 13 | ), 14 | ) 15 | im = pnl.IntegratorMechanism( 16 | name="im", 17 | function=pnl.AdaptiveIntegrator( 18 | initializer=[[0]], rate=0.5, default_variable=[[0]] 19 | ), 20 | ) 21 | 22 | comp.add_node(fn) 23 | comp.add_node(im) 24 | 25 | comp.add_projection( 26 | projection=pnl.MappingProjection( 27 | name="MappingProjection_from_fn_OutputPort_0__to_im_InputPort_0_", 28 | function=pnl.LinearMatrix(default_variable=[-1.0], matrix=[[1.0]]), 29 | ), 30 | sender=fn, 31 | receiver=im, 32 | ) 33 | 34 | comp.scheduler.add_condition( 35 | fn, 36 | pnl.TimeInterval( 37 | repeat="50 microsecond", 38 | start=None, 39 | end=None, 40 | unit="ms", 41 | start_inclusive=True, 42 | end_inclusive=True, 43 | ), 44 | ) 45 | comp.scheduler.add_condition( 46 | im, 47 | pnl.TimeInterval( 48 | repeat="1 millisecond", 49 | start="80 millisecond", 50 | end=None, 51 | unit="ms", 52 | start_inclusive=True, 53 | end_inclusive=True, 54 | ), 55 | ) 56 | 57 | comp.scheduler.termination_conds = { 58 | pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(), 59 | pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.TimeTermination( 60 | t="100 millisecond", inclusive=True, unit="millisecond" 61 | ), 62 | } 63 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleFN.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | import sys 3 | 4 | dt = 0.05 5 | simtime = 100 6 | 7 | time_step_size = dt 8 | num_trials = int(simtime / dt) 9 | 10 | fhn = pnl.FitzHughNagumoIntegrator( 11 | initial_v=-1, 12 | initial_w=0, 13 | d_v=1, 14 | time_step_size=time_step_size, 15 | ) 16 | 17 | print(f"Running simple model of FitzHugh Nagumo cell for {simtime}ms: {fhn}") 18 | 19 | fn = pnl.IntegratorMechanism(name="fn", function=fhn) 20 | 21 | comp = pnl.Composition(name="comp") 22 | comp.add_linear_processing_pathway([fn]) 23 | 24 | print("Running the SimpleFN model...") 25 | 26 | comp.run(inputs={fn: 0}, log=True, num_trials=num_trials) 27 | 28 | 29 | print("Finished running the SimpleFN model") 30 | 31 | 32 | for node in comp.nodes: 33 | print(f"=== {node} {node.name}: {node.parameters.value.get(comp)}") 34 | 35 | import matplotlib.pyplot as plt 36 | 37 | 38 | def generate_time_array(node, context=comp.default_execution_id, param="value"): 39 | return [entry.time.trial for entry in getattr(node.parameters, param).log[context]] 40 | 41 | 42 | def generate_value_array(node, index, context=comp.default_execution_id, param="value"): 43 | return [ 44 | float(entry.value[index]) 45 | for entry in getattr(node.parameters, param).log[context] 46 | ] 47 | 48 | 49 | """ 50 | for node in comp.nodes: 51 | print(f'>> {node}: {generate_time_array(node)}') 52 | 53 | for i in [0,1,2]: 54 | print(f'>> {node}: {generate_value_array(node,i)}')""" 55 | 56 | 57 | fig, axes = plt.subplots() 58 | for i in [0, 1]: 59 | x_values = {node: generate_time_array(node) for node in comp.nodes} 60 | y_values = {node: generate_value_array(node, i) for node in comp.nodes} 61 | 62 | fout = open("SimpleFN_%i.dat" % i, "w") 63 | for index in range(len(x_values[node])): 64 | # 1000 to convert ms to s 65 | fout.write( 66 | "%s\t%s\n" 67 | % (x_values[node][index] * time_step_size / 1000.0, y_values[node][index]) 68 | ) 69 | fout.close() 70 | 71 | for node in comp.nodes: 72 | axes.plot( 73 | [t * time_step_size / 1000.0 for t in x_values[node]], 74 | y_values[node], 75 | # label=f'Value of {i} {node.name}, {node.function.__class__.__name__}' 76 | ) 77 | 78 | axes.set_xlabel("Time (s)") 79 | axes.legend() 80 | plt.show() 81 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleFN.reconstructed.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | 5 | fn = pnl.IntegratorMechanism( 6 | name="fn", 7 | function=pnl.FitzHughNagumoIntegrator( 8 | name="FitzHughNagumoIntegrator_Function_0", 9 | d_v=1, 10 | initial_v=-1, 11 | initializer=[[0]], 12 | default_variable=[[0]], 13 | ), 14 | ) 15 | 16 | comp.add_node(fn) 17 | 18 | 19 | comp.scheduler.add_condition(fn, pnl.Always()) 20 | 21 | comp.scheduler.termination_conds = { 22 | pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(), 23 | pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.AllHaveRun(), 24 | } 25 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleLinear-conditional.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | A = pnl.TransferMechanism(name="A") 5 | B = pnl.TransferMechanism(name="B") 6 | C = pnl.TransferMechanism(name="C") 7 | 8 | comp.add_linear_processing_pathway([A, B, C]) 9 | 10 | comp.scheduler.add_condition_set( 11 | { 12 | A: pnl.AtNCalls(A, 0), 13 | B: pnl.Always(), 14 | C: pnl.EveryNCalls(B, 5), 15 | } 16 | ) 17 | 18 | comp.run(inputs={A: 1}) 19 | 20 | # A, B, B, B, B, B, C 21 | print( 22 | [ 23 | {node.name for node in time_step} 24 | for time_step in comp.scheduler.execution_list[comp.default_execution_id] 25 | ] 26 | ) 27 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleLinear-conditional.reconstructed.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | 5 | A = pnl.TransferMechanism( 6 | name="A", 7 | function=pnl.Linear(default_variable=[[0]]), 8 | integrator_function=pnl.AdaptiveIntegrator( 9 | initializer=[[0]], rate=0.5, default_variable=[[0]] 10 | ), 11 | termination_measure=pnl.Distance( 12 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 13 | ), 14 | ) 15 | B = pnl.TransferMechanism( 16 | name="B", 17 | function=pnl.Linear(default_variable=[[0]]), 18 | integrator_function=pnl.AdaptiveIntegrator( 19 | initializer=[[0]], rate=0.5, default_variable=[[0]] 20 | ), 21 | termination_measure=pnl.Distance( 22 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 23 | ), 24 | ) 25 | C = pnl.TransferMechanism( 26 | name="C", 27 | function=pnl.Linear(default_variable=[[0]]), 28 | integrator_function=pnl.AdaptiveIntegrator( 29 | initializer=[[0]], rate=0.5, default_variable=[[0]] 30 | ), 31 | termination_measure=pnl.Distance( 32 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 33 | ), 34 | ) 35 | 36 | comp.add_node(A) 37 | comp.add_node(B) 38 | comp.add_node(C) 39 | 40 | comp.add_projection( 41 | projection=pnl.MappingProjection( 42 | name="MappingProjection_from_A_RESULT__to_B_InputPort_0_", 43 | function=pnl.LinearMatrix(matrix=[[1.0]]), 44 | ), 45 | sender=A, 46 | receiver=B, 47 | ) 48 | comp.add_projection( 49 | projection=pnl.MappingProjection( 50 | name="MappingProjection_from_B_RESULT__to_C_InputPort_0_", 51 | function=pnl.LinearMatrix(matrix=[[1.0]]), 52 | ), 53 | sender=B, 54 | receiver=C, 55 | ) 56 | 57 | comp.scheduler.add_condition( 58 | A, 59 | pnl.AtNCalls(dependency=A, n=0, time_scale=pnl.TimeScale.ENVIRONMENT_STATE_UPDATE), 60 | ) 61 | comp.scheduler.add_condition(B, pnl.Always()) 62 | comp.scheduler.add_condition(C, pnl.EveryNCalls(dependency=B, n=5)) 63 | 64 | comp.scheduler.termination_conds = { 65 | pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(), 66 | pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.AllHaveRun(), 67 | } 68 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/SimpleLinear-timing.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | A = pnl.TransferMechanism(name="A") 5 | B = pnl.TransferMechanism(name="B") 6 | C = pnl.TransferMechanism(name="C") 7 | 8 | comp.add_linear_processing_pathway([A, B, C]) 9 | 10 | comp.scheduler.add_condition_set( 11 | { 12 | A: pnl.TimeInterval(repeat=7, unit="ms"), 13 | B: pnl.All( 14 | pnl.TimeInterval(start=1, repeat=1, unit="ms"), 15 | pnl.Not(pnl.TimeInterval(start=6, repeat=7, unit="ms")), 16 | pnl.Not(pnl.TimeInterval(start=7, repeat=7, unit="ms")), 17 | ), 18 | C: pnl.TimeInterval(start=6, repeat=7, unit="ms"), 19 | } 20 | ) 21 | 22 | comp.run(inputs={A: 1}, scheduling_mode=pnl.SchedulingMode.EXACT_TIME) 23 | 24 | print( 25 | "\n".join( 26 | [ 27 | "{:~}: {}".format( 28 | comp.scheduler.execution_timestamps[comp.default_execution_id][ 29 | i 30 | ].absolute, 31 | {node.name for node in time_step}, 32 | ) 33 | for i, time_step in enumerate( 34 | comp.scheduler.execution_list[comp.default_execution_id] 35 | ) 36 | ] 37 | ) 38 | ) 39 | 40 | # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 41 | # A B B B B B C A B B B B B C 42 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/generate_json_and_scripts.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import runpy 4 | import subprocess 5 | import psyneulink as pnl 6 | 7 | 8 | def main(): 9 | reconstructed_identifer = "reconstructed" 10 | 11 | for example in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")): 12 | if reconstructed_identifer in example or example == __file__: 13 | continue 14 | 15 | pnl.clear_registry() 16 | border = "=" * len(example) 17 | print(f"{border}\n{example}\n{border}") 18 | base_fname = example.replace(".py", "") 19 | script_globals = runpy.run_path(example) 20 | 21 | compositions = list( 22 | filter(lambda v: isinstance(v, pnl.Composition), script_globals.values()) 23 | ) 24 | nonnested_comps = [] 25 | 26 | for x in compositions: 27 | for y in compositions: 28 | if x in y.nodes: 29 | break 30 | else: 31 | nonnested_comps.append(x) 32 | 33 | try: 34 | comp = nonnested_comps[0] 35 | except IndexError: 36 | continue 37 | 38 | json_summary = pnl.generate_json(comp) 39 | 40 | with open(f"{base_fname}.json", "w") as outfi: 41 | outfi.write(json_summary) 42 | outfi.write("\n") 43 | 44 | reconstructed_fname = f"{base_fname}.{reconstructed_identifer}.py" 45 | with open(reconstructed_fname, "w") as outfi: 46 | outfi.write(pnl.generate_script_from_json(json_summary)) 47 | outfi.write("\n") 48 | subprocess.run(["black", reconstructed_fname]) 49 | subprocess.run(["python", reconstructed_fname]) 50 | 51 | 52 | if __name__ == "__main__": 53 | main() 54 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/model_ABCD.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="ABCD") 4 | 5 | A = pnl.TransferMechanism(function=pnl.Linear(slope=2.0, intercept=2.0), name="A") 6 | B = pnl.TransferMechanism(function=pnl.Logistic, name="B") 7 | C = pnl.TransferMechanism(function=pnl.Exponential, name="C") 8 | D = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(rate=0.05), name="D") 9 | 10 | for m in [A, B, C, D]: 11 | comp.add_node(m) 12 | 13 | comp.add_linear_processing_pathway([A, B, D]) 14 | comp.add_linear_processing_pathway([A, C, D]) 15 | 16 | comp.run(inputs={A: 0}, log=True, num_trials=50) 17 | 18 | print("Finished running model") 19 | 20 | print(comp.results) 21 | for node in comp.nodes: 22 | print(f"{node} {node.name}: {node.parameters.value.get(comp)}") 23 | 24 | # comp.show_graph() 25 | 26 | try: 27 | import matplotlib.pyplot as plt 28 | 29 | def generate_time_array(node, context="ABCD", param="value"): 30 | return [ 31 | entry.time.trial for entry in getattr(node.parameters, param).log[context] 32 | ] 33 | 34 | def generate_value_array(node, context="ABCD", param="value"): 35 | return [ 36 | float(entry.value) for entry in getattr(node.parameters, param).log[context] 37 | ] 38 | 39 | x_values = {node: generate_time_array(node) for node in comp.nodes} 40 | y_values = {node: generate_value_array(node) for node in comp.nodes} 41 | 42 | fig, axes = plt.subplots() 43 | 44 | for node in comp.nodes: 45 | axes.plot( 46 | x_values[node], 47 | y_values[node], 48 | label=f"Value of {node.name}, {node.function.__class__.__name__}", 49 | ) 50 | 51 | axes.set_xlabel("Trial") 52 | axes.legend() 53 | plt.savefig("model_ABCD.png") 54 | except ImportError: 55 | pass 56 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/model_nested_comp_with_scheduler.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | inner_comp = pnl.Composition(name="Inner Composition") 5 | A = pnl.TransferMechanism(function=pnl.Linear(slope=5.0, intercept=2.0), name="A") 6 | B = pnl.TransferMechanism(function=pnl.Logistic, name="B") 7 | C = pnl.RecurrentTransferMechanism(name="C") 8 | D = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator, name="D") 9 | 10 | E = pnl.TransferMechanism(name="E") 11 | F = pnl.TransferMechanism(name="F") 12 | 13 | 14 | for m in [E, F]: 15 | inner_comp.add_node(m) 16 | 17 | 18 | for m in [A, B, C, D, inner_comp]: 19 | comp.add_node(m) 20 | 21 | comp.add_projection(pnl.MappingProjection(), A, B) 22 | comp.add_projection(pnl.MappingProjection(), A, C) 23 | comp.add_projection(pnl.MappingProjection(), B, D) 24 | comp.add_projection(pnl.MappingProjection(), C, D) 25 | comp.add_projection(pnl.MappingProjection(), C, inner_comp) 26 | 27 | inner_comp.add_projection(pnl.MappingProjection(), E, F) 28 | 29 | comp.scheduler.add_condition_set( 30 | {A: pnl.EveryNPasses(1), B: pnl.EveryNCalls(A, 2), C: pnl.EveryNCalls(B, 2)} 31 | ) 32 | 33 | comp.termination_processing = { 34 | pnl.TimeScale.RUN: pnl.AfterNTrials(1), 35 | pnl.TimeScale.TRIAL: pnl.AfterNCalls(D, 4), 36 | } 37 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/model_with_nested_graph.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | inner_comp = pnl.Composition(name="Inner Composition") 5 | A = pnl.TransferMechanism(function=pnl.Linear(slope=5.0, intercept=2.0), name="A") 6 | B = pnl.TransferMechanism(function=pnl.Logistic, name="B") 7 | C = pnl.TransferMechanism(function=pnl.Exponential, name="C") 8 | 9 | E = pnl.TransferMechanism(name="E", function=pnl.Linear(slope=2.0)) 10 | F = pnl.TransferMechanism(name="F", function=pnl.Linear(intercept=2.0)) 11 | 12 | 13 | for m in [E, F]: 14 | inner_comp.add_node(m) 15 | 16 | 17 | for m in [A, B, C, inner_comp]: 18 | comp.add_node(m) 19 | 20 | comp.add_projection(pnl.MappingProjection(), A, B) 21 | comp.add_projection(pnl.MappingProjection(), A, C) 22 | comp.add_projection(pnl.MappingProjection(), C, inner_comp) 23 | 24 | inner_comp.add_projection(pnl.MappingProjection(), E, F) 25 | 26 | comp.run(inputs={A: 1}, log=True) 27 | 28 | print(comp.results) 29 | for node in comp.nodes + inner_comp.nodes: 30 | print(f"{node.name}: {node.parameters.value.get(comp)}") 31 | -------------------------------------------------------------------------------- /examples/PsyNeuLink/model_with_nested_graph.reconstructed.py: -------------------------------------------------------------------------------- 1 | import psyneulink as pnl 2 | 3 | comp = pnl.Composition(name="comp") 4 | 5 | A = pnl.TransferMechanism( 6 | name="A", 7 | function=pnl.Linear(intercept=2.0, slope=5.0, default_variable=[[0]]), 8 | integrator_function=pnl.AdaptiveIntegrator( 9 | initializer=[[0]], rate=0.5, default_variable=[[0]] 10 | ), 11 | termination_measure=pnl.Distance( 12 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 13 | ), 14 | ) 15 | B = pnl.TransferMechanism( 16 | name="B", 17 | function=pnl.Logistic(default_variable=[[0]]), 18 | integrator_function=pnl.AdaptiveIntegrator( 19 | initializer=[[0]], rate=0.5, default_variable=[[0]] 20 | ), 21 | termination_measure=pnl.Distance( 22 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 23 | ), 24 | ) 25 | C = pnl.TransferMechanism( 26 | name="C", 27 | function=pnl.Exponential(default_variable=[[0]]), 28 | integrator_function=pnl.AdaptiveIntegrator( 29 | initializer=[[0]], rate=0.5, default_variable=[[0]] 30 | ), 31 | termination_measure=pnl.Distance( 32 | metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] 33 | ), 34 | ) 35 | Inner_Composition = pnl.Composition(name="Inner_Composition") 36 | 37 | comp.add_node(A) 38 | comp.add_node(B) 39 | comp.add_node(C) 40 | comp.add_node(Inner_Composition) 41 | 42 | comp.add_projection( 43 | projection=pnl.MappingProjection( 44 | name="MappingProjection_from_A_RESULT__to_B_InputPort_0_", 45 | function=pnl.LinearMatrix(default_variable=[2.0], matrix=[[1.0]]), 46 | ), 47 | sender=A, 48 | receiver=B, 49 | ) 50 | comp.add_projection( 51 | projection=pnl.MappingProjection( 52 | name="MappingProjection_from_A_RESULT__to_C_InputPort_0_", 53 | function=pnl.LinearMatrix(default_variable=[2.0], matrix=[[1.0]]), 54 | ), 55 | sender=A, 56 | receiver=C, 57 | ) 58 | 59 | comp.scheduler.add_condition(A, pnl.Always()) 60 | comp.scheduler.add_condition(B, pnl.EveryNCalls(dependency=A, n=1)) 61 | comp.scheduler.add_condition(C, pnl.EveryNCalls(dependency=A, n=1)) 62 | comp.scheduler.add_condition(Inner_Composition, pnl.EveryNCalls(dependency=C, n=1)) 63 | 64 | comp.scheduler.termination_conds = { 65 | pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(), 66 | pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.AllHaveRun(), 67 | } 68 | -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/ABCD.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/ABCD.onnx -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/Arrays.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/Arrays.onnx -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/MDF_to_PyTorch.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import os 3 | import sys 4 | from modeci_mdf.utils import load_mdf 5 | from modeci_mdf.interfaces.pytorch.exporter import mdf_to_pytorch 6 | 7 | 8 | def main(filename): 9 | base_path = Path(__file__).parent 10 | file_path = str((base_path / "../../.." / filename).resolve()) 11 | 12 | print("Converting MDF model in %s to PyTorch" % file_path) 13 | 14 | model_input = file_path.replace(os.sep, "/") 15 | 16 | mdf_model = load_mdf(model_input) 17 | 18 | if "Translated" in model_input: 19 | pytorch_model = mdf_to_pytorch( 20 | mdf_model, 21 | model_input, 22 | eval_models=False, 23 | version="mdf.0", # (MDF "zero" - a simplified form of MDF) 24 | ) 25 | else: 26 | pytorch_model = mdf_to_pytorch( 27 | mdf_model, 28 | model_input, 29 | eval_models=False, 30 | version="mdf.s", # (MDF "stateful" - full MDF allowing stateful parameters) 31 | ) 32 | 33 | 34 | if __name__ == "__main__": 35 | 36 | sample_examples = [ 37 | "examples/MDF/Simple.json", 38 | "examples/MDF/ABCD.json", 39 | "examples/MDF/Arrays.json", 40 | "examples/MDF/translation/Translated_Arrays.json", 41 | "examples/MDF/translation/Translated_Simple.json", 42 | "examples/MDF/translation/Translated_ABCD.json", 43 | ] 44 | 45 | if "-all" in sys.argv: 46 | for ex in sample_examples: 47 | main(ex) 48 | 49 | elif "-test" in sys.argv: 50 | for ex in sample_examples: 51 | if not "Translated" in ex: 52 | main(ex) 53 | else: 54 | filename = "examples/MDF/Simple.json" 55 | main(filename) 56 | -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/Simple.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/Simple.onnx -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/translated_ABCD.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/translated_ABCD.onnx -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/translated_Arrays.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/translated_Arrays.onnx -------------------------------------------------------------------------------- /examples/PyTorch/MDF_PyTorch/translated_Simple.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/MDF_PyTorch/translated_Simple.onnx -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/benchmark_script/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/benchmark_script/__init__.py -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/benchmark_script/squeezenet1_1_benchmark.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "model_name": "squeezenet1_1", 4 | "model_type": "SqueezeNet", 5 | "pytorch_time": 0.067, 6 | "pytorch_predictions": 10, 7 | "mdf_time": 3.6506, 8 | "mdf_predictions": 10, 9 | "node density": 66, 10 | "mdf : pytorch ratio": "54.51" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/convolution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn.functional as F 4 | from torch import nn 5 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 6 | 7 | # Simple CNN 8 | class CNN(nn.Module): 9 | def __init__(self, in_channels=1, num_classes=10): 10 | super().__init__() 11 | self.conv1 = nn.Conv2d( 12 | in_channels=in_channels, 13 | out_channels=8, 14 | kernel_size=(3, 3), 15 | stride=(1, 1), 16 | padding=(1, 1), 17 | ) 18 | self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)) 19 | self.conv2 = nn.Conv2d( 20 | in_channels=8, 21 | out_channels=16, 22 | kernel_size=(3, 3), 23 | stride=(1, 1), 24 | padding=(1, 1), 25 | ) 26 | self.fc1 = nn.Linear(16 * 7 * 7, num_classes) 27 | 28 | def forward(self, x): 29 | x = F.relu(self.conv1(x)) 30 | x = self.pool(x) 31 | x = F.relu(self.conv2(x)) 32 | x = self.pool(x) 33 | x = x.reshape(x.shape[0], -1) 34 | x = self.fc1(x) 35 | return x 36 | 37 | 38 | # Hyperparameters 39 | in_channels = 1 40 | num_classes = 10 41 | 42 | 43 | def get_pytorch_model(): 44 | model = CNN(in_channels=in_channels, num_classes=num_classes) 45 | return model 46 | 47 | 48 | def get_example_input(): 49 | x = torch.zeros((1, 1, 28, 28)) 50 | return x 51 | 52 | 53 | def main(): 54 | # changed import call 55 | from modeci_mdf.execution_engine import EvaluableGraph 56 | 57 | # Create some test inputs for the model 58 | x = get_example_input() 59 | ebv_output = torch.zeros((10,)) 60 | 61 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 62 | model = get_pytorch_model() 63 | model.eval() 64 | 65 | # Run the model once to get some ground truth outpot (from PyTorch) 66 | output = model(x).detach().numpy() 67 | 68 | # Convert to MDF 69 | mdf_model, params_dict = pytorch_to_mdf( 70 | model=model, 71 | args=(x), 72 | trace=True, 73 | ) 74 | # Get the graph 75 | mdf_graph = mdf_model.graphs[0] 76 | # Output the model to JSON 77 | mdf_model.to_json_file("convolution.json") 78 | 79 | 80 | if __name__ == "__main__": 81 | main() 82 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/example.onnx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/example.onnx.png -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/example.png -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/mnasNet1_3.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | mnasnet1_3 = models.mnasnet1_3(pretrained=False) 6 | 7 | 8 | def get_pytorch_model(): 9 | model = models.mnasnet1_3(pretrained=False) 10 | return model 11 | 12 | 13 | def get_example_input(): 14 | x = torch.zeros((1, 3, 224, 224)) 15 | return x 16 | 17 | 18 | def main(): 19 | # changed import call 20 | from modeci_mdf.execution_engine import EvaluableGraph 21 | 22 | # Create some test inputs for the model 23 | x = get_example_input() 24 | ebv_output = torch.zeros((1,)) 25 | 26 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 27 | model = get_pytorch_model() 28 | model.eval() 29 | 30 | # Run the model once to get some ground truth outpot (from PyTorch) 31 | 32 | output = model(x).detach().numpy() 33 | 34 | # Convert to MDF 35 | mdf_model, params_dict = pytorch_to_mdf( 36 | model=model, 37 | args=(x), 38 | trace=True, 39 | ) 40 | # Get the graph 41 | mdf_graph = mdf_model.graphs[0] 42 | # Output the model to JSON 43 | mdf_model.to_json_file("mnasNet1_3.json") 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/mobilenetv2.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | 6 | def get_pytorch_model(): 7 | model = models.mobilenet_v2(pretrained=False) 8 | return model 9 | 10 | 11 | def get_example_input(): 12 | x = torch.zeros((1, 3, 224, 224)) 13 | return x 14 | 15 | 16 | def main(): 17 | # changed import call 18 | from modeci_mdf.execution_engine import EvaluableGraph 19 | 20 | # Create some test inputs for the model 21 | x = get_example_input() 22 | ebv_output = torch.zeros((1,)) 23 | 24 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 25 | model = get_pytorch_model() 26 | model.eval() 27 | 28 | # Run the model once to get some ground truth outpot (from PyTorch) 29 | output = model(x).detach().numpy() 30 | 31 | # Convert to MDF 32 | mdf_model, params_dict = pytorch_to_mdf( 33 | model=model, 34 | args=(x), 35 | trace=True, 36 | ) 37 | # Get the graph 38 | mdf_graph = mdf_model.graphs[0] 39 | # Output the model to JSON 40 | mdf_model.to_json_file("mobilenetv2.json") 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img1.jpeg -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img2.jpeg -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img3.jpeg -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img4.jpeg -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/pytorch_example_images/a/img5.jpeg -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/resNext.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | 6 | def get_pytorch_model(): 7 | model = models.resnext50_32x4d(pretrained=False) 8 | return model 9 | 10 | 11 | def get_example_input(): 12 | x = torch.zeros((1, 3, 224, 224)) 13 | return x 14 | 15 | 16 | def main(): 17 | # changed import call 18 | from modeci_mdf.execution_engine import EvaluableGraph 19 | 20 | # Create some test inputs for the model 21 | x = get_example_input() 22 | ebv_output = torch.zeros((1,)) 23 | 24 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 25 | model = get_pytorch_model() 26 | model.eval() 27 | 28 | # Run the model once to get some ground truth outpot (from PyTorch) 29 | output = model(x).detach().numpy() 30 | 31 | # Convert to MDF 32 | mdf_model, params_dict = pytorch_to_mdf( 33 | model=model, 34 | args=(x), 35 | trace=True, 36 | ) 37 | # Get the graph 38 | mdf_graph = mdf_model.graphs[0] 39 | # Output the model to JSON 40 | mdf_model.to_json_file("resNext.json") 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/resnet.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | 3 | import torch 4 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 5 | 6 | 7 | def get_pytorch_model(): 8 | model = models.resnet18(pretrained=False) 9 | return model 10 | 11 | 12 | def get_example_input(): 13 | x = torch.rand((5, 3, 224, 224)) 14 | return x 15 | 16 | 17 | def main(): 18 | # changed import call 19 | from modeci_mdf.execution_engine import EvaluableGraph 20 | 21 | # Create some test inputs for the model 22 | x = get_example_input() 23 | 24 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 25 | model = get_pytorch_model() 26 | model.eval() 27 | 28 | # Run the model once to get some ground truth output (from PyTorch) 29 | # with torch.no_grad(): 30 | output = model(x).detach().numpy() 31 | # print(output) 32 | 33 | # Convert to MDF 34 | mdf_model, params_dict = pytorch_to_mdf( 35 | model=model, 36 | args=(x), 37 | trace=True, 38 | ) 39 | # Get the graph 40 | mdf_graph = mdf_model.graphs[0] 41 | # Output the model to JSON 42 | mdf_model.to_json_file("resnet.json") 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/shufflenet_v2.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | 6 | def get_pytorch_model(): 7 | model = models.shufflenet_v2_x0_5(pretrained=False) 8 | return model 9 | 10 | 11 | def get_example_input(): 12 | x = torch.zeros((1, 3, 224, 224)) 13 | return x 14 | 15 | 16 | def main(): 17 | # changed import call 18 | from modeci_mdf.execution_engine import EvaluableGraph 19 | 20 | # Create some test inputs for the model 21 | x = get_example_input() 22 | ebv_output = torch.zeros((1,)) 23 | 24 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 25 | model = get_pytorch_model() 26 | model.eval() 27 | 28 | # Run the model once to get some ground truth outpot (from PyTorch) 29 | output = model(x).detach().numpy() 30 | 31 | # Convert to MDF 32 | mdf_model, params_dict = pytorch_to_mdf( 33 | model=model, 34 | args=(x), 35 | trace=True, 36 | ) 37 | # Get the graph 38 | mdf_graph = mdf_model.graphs[0] 39 | # Output the model to JSON 40 | mdf_model.to_json_file("shufflenet_v2.json") 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/simple_Convolution.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import torch.nn.functional as F 4 | from torch import nn # All neural network modules 5 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 6 | 7 | # Simple CNN 8 | class CNN(nn.Module): 9 | def __init__(self, in_channels=1, num_classes=10): 10 | super().__init__() 11 | self.conv1 = nn.Conv2d( 12 | in_channels=in_channels, 13 | out_channels=8, 14 | kernel_size=(3, 3), 15 | ) 16 | self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(1, 1)) 17 | 18 | self.fc1 = nn.Linear(8 * 25 * 25, num_classes) 19 | 20 | def forward(self, x): 21 | x = F.relu(self.conv1(x)) 22 | x = self.pool(x) 23 | x = x.reshape(x.shape[0], -1) 24 | x = self.fc1(x) 25 | return x 26 | 27 | 28 | # Hyperparameters 29 | in_channels = 1 30 | num_classes = 10 31 | 32 | 33 | def get_pytorch_model(): 34 | model = CNN(in_channels=in_channels, num_classes=num_classes) 35 | return model 36 | 37 | 38 | def get_example_input(): 39 | x = torch.zeros((1, 1, 28, 28)) 40 | return x 41 | 42 | 43 | def main(): 44 | # changed import call 45 | from modeci_mdf.execution_engine import EvaluableGraph 46 | 47 | # Create some test inputs for the model 48 | x = get_example_input() 49 | ebv_output = torch.zeros((10,)) 50 | 51 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 52 | model = get_pytorch_model() 53 | model.eval() 54 | 55 | # Run the model once to get some ground truth outpot (from PyTorch) 56 | output = model(x) 57 | 58 | # Convert to MDF 59 | mdf_model, params_dict = pytorch_to_mdf( 60 | model=model, 61 | args=(x), 62 | trace=True, 63 | ) 64 | # Get the graph 65 | mdf_graph = mdf_model.graphs[0] 66 | # Output the model to JSON 67 | mdf_model.to_json_file("simple_convolution.json") 68 | 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/simple_pytorch_to_mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/PyTorch_MDF/simple_pytorch_to_mdf.png -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/squeezenet1_1.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | 6 | def get_pytorch_model(): 7 | model = models.squeezenet1_1(pretrained=False) 8 | return model 9 | 10 | 11 | def get_example_input(): 12 | x = torch.zeros((1, 3, 224, 224)) 13 | return x 14 | 15 | 16 | def main(): 17 | # changed import call 18 | from modeci_mdf.execution_engine import EvaluableGraph 19 | 20 | # Create some test inputs for the model 21 | x = get_example_input() 22 | ebv_output = torch.zeros((1,)) 23 | 24 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 25 | model = get_pytorch_model() 26 | model.eval() 27 | 28 | # Run the model once to get some ground truth outpot (from PyTorch) 29 | output = model(x).detach().numpy() 30 | 31 | # Convert to MDF 32 | mdf_model, params_dict = pytorch_to_mdf( 33 | model=model, 34 | args=(x), 35 | trace=True, 36 | ) 37 | # Get the graph 38 | mdf_graph = mdf_model.graphs[0] 39 | # Output the model to JSON 40 | mdf_model.to_json_file("squeezenet1_1.json") 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /examples/PyTorch/PyTorch_MDF/vgg19.py: -------------------------------------------------------------------------------- 1 | import torchvision.models as models 2 | import torch 3 | from modeci_mdf.interfaces.pytorch import pytorch_to_mdf 4 | 5 | vgg19 = models.vgg19(pretrained=False) 6 | 7 | 8 | def get_pytorch_model(): 9 | model = models.vgg19(pretrained=False) 10 | return model 11 | 12 | 13 | def get_example_input(): 14 | x = torch.zeros((1, 3, 224, 224)) 15 | return x 16 | 17 | 18 | def main(): 19 | # changed import call 20 | from modeci_mdf.execution_engine import EvaluableGraph 21 | 22 | # Create some test inputs for the model 23 | x = get_example_input() 24 | ebv_output = torch.zeros((1,)) 25 | 26 | # Turn on eval mode for model to get rid of any randomization due to things like BatchNorm or Dropout 27 | model = get_pytorch_model() 28 | model.eval() 29 | 30 | # Run the model once to get some ground truth outpot (from PyTorch) 31 | output = vgg19(x).detach().numpy() 32 | 33 | # Convert to MDF 34 | mdf_model, params_dict = pytorch_to_mdf( 35 | model=model, 36 | args=(x), 37 | trace=True, 38 | ) 39 | # Get the graph 40 | mdf_graph = mdf_model.graphs[0] 41 | # Output the model to JSON 42 | mdf_model.to_json_file("vgg19.json") 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /examples/PyTorch/ddm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/ddm.png -------------------------------------------------------------------------------- /examples/PyTorch/example_data/imgs.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/example_data/imgs.npy -------------------------------------------------------------------------------- /examples/PyTorch/example_data/labels.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/example_data/labels.npy -------------------------------------------------------------------------------- /examples/PyTorch/inception.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/inception.png -------------------------------------------------------------------------------- /examples/PyTorch/mlp_pure_mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/mlp_pure_mdf.png -------------------------------------------------------------------------------- /examples/PyTorch/mlp_pure_mdf.results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/mlp_pure_mdf.results.png -------------------------------------------------------------------------------- /examples/PyTorch/regenerate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | ## MDF to Pytorch 5 | 6 | cd MDF_PyTorch 7 | python MDF_to_PyTorch.py -test 8 | 9 | cd .. 10 | python mlp_pure_mdf.py -graph 11 | 12 | 13 | python simple_pytorch_to_mdf.py -graph -graph-torch 14 | 15 | python inception.py -graph 16 | 17 | #python pytorch_ddm.py -graph 18 | 19 | cd PyTorch_MDF/benchmark_script 20 | python benchmark.py squeezenet1_1 count 10 21 | cd ../.. 22 | -------------------------------------------------------------------------------- /examples/PyTorch/simple_pytorch_to_mdf.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/simple_pytorch_to_mdf.1.png -------------------------------------------------------------------------------- /examples/PyTorch/simple_pytorch_to_mdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/simple_pytorch_to_mdf.png -------------------------------------------------------------------------------- /examples/PyTorch/simple_pytorch_to_mdf_torchviz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/simple_pytorch_to_mdf_torchviz.png -------------------------------------------------------------------------------- /examples/PyTorch/weights.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/PyTorch/weights.h5 -------------------------------------------------------------------------------- /examples/Quantum/README.md: -------------------------------------------------------------------------------- 1 | # Interactions between MDF and Quantum computing technologies 2 | 3 | Starting summer 2021, we will develop tools for interfacing between MDF and quantum computers. This interface is motivated by expectations that quantum hardware will provide speedups for solving Ising-type MDF problems. We will address both gate- and annealing- based quantum computers: 4 | * for gate-based quantum computers, we will bridge from MDF to [OpenQASM](https://github.com/Qiskit/openqasm), the leading quantum Intermediate Representation. 5 | * for annealing-based quantum computers, we will target platforms such as [D-Wave Ocean](https://docs.ocean.dwavesys.com/en/stable/). 6 | 7 | Our work will be agnostic to the exact quantum algorithm/solver used, though we will provide sample implementations using Variational Quantum Eigensolver ([VQE](https://www.nature.com/articles/ncomms5213)) and [Quantum Approximate Optimization Algorithm](https://arxiv.org/abs/1411.4028). 8 | 9 | As a first step, we have begun developing implementations targeting quantum hardware for the key computations in several cognitive models as listed below. Next, we will extend MDF so that quantum implementations such as the ones we develop, can be expressed in it. 10 | 11 | | Tasks | Models | Key computations | Quantum algorithms | 12 | |-------------------------------|----------------------------|------------------------|----------------------------------------------------| 13 | | Two alternative forced choice | Quantum walk | Evolution, Projection | Unitary evolution, Hamiltonian simulation | 14 | | Multiple alternative models | Potential wells | Eigenstates and values | Variational methods (e.g., subspace and deflation) | 15 | | Bistable perception | Quantum walk | Evolution, projection | Unitary evolution, Hamiltonian simulation | 16 | | Control | Leaky Competing Integrator | Optimization | Quantum annealing | 17 | | Parameter estimation | Data fitting | Optimization | Quantum annealing | 18 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples of MDF files and conversions to target environments 2 | 3 |

4 | 5 | - [MDF files](MDF): A number of Python scripts using the MDF Python API, as well as the JSON and YAML files. 6 | - [NeuroML](NeuroML): Examples of interactions between NeuroML and MDF. 7 | - [ONNX](ONNX): Examples of interactions between MDF and ONNX. 8 | - [PyTorch](PyTorch): Examples of interactions between PyTorch and MDF. 9 | - [PsyNeuLink](PsyNeuLink): Examples of interactions between PsyNeuLink and MDF. 10 | - [WebGME](WebGME): Examples of interactions between WebGME and MDF. 11 | - [ACT-R](ACT-R): Examples of interactions between ACT-R and MDF. 12 | - [Quantum](Quantum): Examples of interactions between MDF and Quantum computing technologies. 13 | -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/README.md: -------------------------------------------------------------------------------- 1 | # Keras to MDF example: IRIS dataset 2 | 3 | **For more details on Keras and the current state of the Keras->MDF mapping see the [MNIST example](../MNIST).** 4 | 5 | This model uses the [IRIS dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set) in the [trained Keras model](keras_model.py) and the MDF equivalent. 6 | 7 | ### Summarize Model 8 | 9 | Below is the summary image of the trained Keras model. We can clearly see the output shape and number of weights in each layer: 10 | 11 | ![summary](summary.png) 12 | 13 | 14 | ### Keras Model 15 | 16 | Visualization of the model from Keras: 17 | 18 |

19 |
20 | 21 | ### MDF Model 22 | 23 | Graphviz is used to generate visualization for the MDF graph. Below is the visualization of the MDF graph after converting the keras model to MDF. 24 | 25 | ![keras_to_MDF](keras_to_MDF.1.png) 26 | 27 | More detailed graphical representation of the MDF: 28 | 29 |

30 | 31 | ##### Netron 32 | Below is the visualization of this model using netron 33 | 34 | ![keras-model-to-netron](layers_netron.png) 35 | -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/keras_to_MDF.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/IRIS/keras_to_MDF.1.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/keras_to_MDF.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/IRIS/keras_to_MDF.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/keras_to_MDF.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from sklearn.datasets import load_iris 7 | from sklearn.model_selection import train_test_split 8 | from sklearn.preprocessing import StandardScaler 9 | 10 | from modeci_mdf.interfaces.keras import keras_to_mdf 11 | from modelspec.utils import _val_info 12 | from modeci_mdf.execution_engine import EvaluableGraph 13 | 14 | exec(open("./keras_model.py").read()) 15 | 16 | # load the keras model 17 | # model = tf.keras.models.load_model("keras_model_on_iris.keras") 18 | 19 | # get the test data from iris dataset 20 | iris = load_iris() 21 | 22 | X, y = iris.data, iris.target 23 | 24 | X_train, X_test, y_train, y_test = train_test_split( 25 | X, y, test_size=0.2, random_state=42 26 | ) 27 | 28 | # standardize the test data 29 | scaler = StandardScaler() 30 | X_test = scaler.fit_transform(X_test) 31 | 32 | 33 | # get the output of predicting with the keras model 34 | output = model.predict(X_test) 35 | print("Output of network when predicted with Keras directly: %s" % output) 36 | 37 | # Convert the Keras model to MDF 38 | mdf_model, params_dict = keras_to_mdf(model=model, args=X_test) 39 | 40 | 41 | # Save the MDF to JSON & YAML 42 | mdf_model.to_json_file("keras_to_MDF.json") 43 | mdf_model.to_yaml_file("keras_to_MDF.yaml") 44 | 45 | # Get mdf graph 46 | mdf_graph = mdf_model.graphs[0] 47 | 48 | # visualize mdf graph-image 49 | mdf_model.to_graph_image( 50 | engine="dot", 51 | output_format="png", 52 | view_on_render=False, 53 | level=1, 54 | filename_root="keras_to_MDF.1", 55 | is_horizontal=True, 56 | solid_color=True, 57 | ) 58 | # visualize mdf graph-image 59 | mdf_model.to_graph_image( 60 | engine="dot", 61 | output_format="png", 62 | view_on_render=False, 63 | level=3, 64 | filename_root="keras_to_MDF", 65 | is_horizontal=False, 66 | solid_color=True, 67 | ) 68 | 69 | 70 | # Evaluate the model via the MDF scheduler 71 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 72 | eg.evaluate() 73 | output_mdf = eg.output_enodes[0].get_output() 74 | print("Evaluated the graph in MDF, output: %s" % (_val_info(output_mdf))) 75 | 76 | # Assert that the results are the same for Keras and MDF 77 | try: 78 | assert np.allclose( 79 | output, 80 | output_mdf, 81 | ) 82 | print("Passed all comparison tests!") 83 | except AssertionError: 84 | print("Failed all comparison tests") 85 | sys.exit(1) 86 | -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/layers_netron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/IRIS/layers_netron.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/model_on_iris_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/IRIS/model_on_iris_plot.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/IRIS/summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/IRIS/summary.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/3.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/keras_to_MDF.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/keras_to_MDF.1.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/keras_to_MDF.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/keras_to_MDF.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/keras_to_MDF.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from modeci_mdf.interfaces.keras import keras_to_mdf 7 | from modelspec.utils import _val_info 8 | from modeci_mdf.execution_engine import EvaluableGraph 9 | 10 | exec(open("./keras_model.py").read()) 11 | 12 | # load the keras model 13 | # model = tf.keras.models.load_model("kr_N_model.keras") 14 | 15 | # get 20 of the test images from the mnnist test dataset 16 | _, (x_test, y_test) = tf.keras.datasets.mnist.load_data() 17 | x_test = tf.keras.utils.normalize(x_test, axis=1) 18 | twenty_x_test = x_test[:20, :, :] 19 | 20 | # get the output of predicting with the keras model 21 | output = model.predict(twenty_x_test) 22 | print("Output of network when predicted with Keras directly: %s" % output) 23 | 24 | # Convert the Keras model to MDF 25 | mdf_model, params_dict = keras_to_mdf(model=model, args=twenty_x_test) 26 | 27 | 28 | # Save the MDF to JSON & YAML 29 | mdf_model.to_json_file("keras_to_MDF.json") 30 | mdf_model.to_yaml_file("keras_to_MDF.yaml") 31 | 32 | # Get mdf graph 33 | mdf_graph = mdf_model.graphs[0] 34 | 35 | # visualize mdf graph-image 36 | mdf_model.to_graph_image( 37 | engine="dot", 38 | output_format="png", 39 | view_on_render=False, 40 | level=1, 41 | filename_root="keras_to_MDF.1", 42 | is_horizontal=True, 43 | solid_color=True, 44 | ) 45 | # visualize mdf graph-image 46 | mdf_model.to_graph_image( 47 | engine="dot", 48 | output_format="png", 49 | view_on_render=False, 50 | level=3, 51 | filename_root="keras_to_MDF", 52 | is_horizontal=False, 53 | solid_color=True, 54 | ) 55 | 56 | 57 | # Evaluate the model via the MDF scheduler 58 | eg = EvaluableGraph(graph=mdf_graph, verbose=False) 59 | eg.evaluate() 60 | output_mdf = eg.output_enodes[0].get_output() 61 | print("Evaluated the graph in Keras, output: %s" % (_val_info(output_mdf))) 62 | 63 | # Assert that the results are the same for Keras and MDF 64 | assert np.allclose( 65 | output, 66 | output_mdf, 67 | ) 68 | print("Passed all comparison tests!") 69 | -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/layers_netron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/layers_netron.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/model_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/model_plot.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/MNIST/summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/TensorFlow/Keras/MNIST/summary.png -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/README.md: -------------------------------------------------------------------------------- 1 | ## Keras to/from MDF 2 | 3 | Examples are presented here for mapping between Keras and MDF 4 | 5 | [An example using the MNIST dataset](MNIST) 6 | 7 | 8 | [An example using the IRIS dataset](IRIS) 9 | -------------------------------------------------------------------------------- /examples/TensorFlow/Keras/regenerate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | # Script to regenerate and test the examples 5 | 6 | cd MNIST 7 | 8 | python keras_model.py -nogui 9 | 10 | python keras_to_MDF.py -nogui 11 | 12 | cd ../IRIS 13 | 14 | python keras_model.py 15 | 16 | python keras_to_MDF.py -nogui 17 | -------------------------------------------------------------------------------- /examples/TensorFlow/README.md: -------------------------------------------------------------------------------- 1 | 2 | # TensorFlow to MDF 3 | 4 | [TensorFlow](https://www.tensorflow.org/) is a powerful and flexible open-source software library for numerical computation and data flow programming. In TensorFlow, data is represented as tensors, which are multidimensional arrays. TensorFlow allows you to define complex computation graphs with a large number of nodes (operations) connected by edges (tensors). These computation graphs can be executed on a variety of hardware platforms, including CPUs, GPUs, and TPUs. 5 | 6 | However, it can be challenging and time-consuming to build and train complex deep learning models using TensorFlow's low-level APIs. Due to that, Keras emerged. 7 | [Keras](https://keras.io/) serves as a frontend to TensorFlow. It was developed as a more user-friendly and higher-level API for building and training deep learning models. It provides a simple, consistent interface for building and training neural networks, which can be used to perform a variety of tasks such as image classification. See [Keras to/from MDF](Keras). 8 | -------------------------------------------------------------------------------- /examples/WebGME/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | -------------------------------------------------------------------------------- /examples/WebGME/README.md: -------------------------------------------------------------------------------- 1 | # MDF in WebGME 2 | This contains a tool for converting the 3 | [MDF specification](https://github.com/ModECI/MDF/blob/documentation/docs/MDF_specification.json) into JSON 4 | compatible with [JSON importer](https://github.com/deepforge-dev/webgme-json-importer/tree/master/src/common). 5 | This allows us to programmatically create a metamodel and, as a result, use WebGME as a design environment for MDF. 6 | 7 | ## Quick Start 8 | 9 | ### Starting WebGME app 10 | First, install the mdf_gme following: 11 | - [NodeJS](https://nodejs.org/en/) (LTS recommended) 12 | - [MongoDB](https://www.mongodb.com/) 13 | 14 | Second, start mongodb locally by running the `mongod` executable in your mongodb installation 15 | (you may need to create a `data` directory or set `--dbpath`). 16 | 17 | Then, run `webgme start` from the project root to start . Finally, navigate to `http://localhost:8888` to start using 18 | mdf_gme! 19 | 20 | ### Loading the spec into WebGME 21 | First, install dependencies with `npm install`. Then convert the MDF specification using 22 | ``` 23 | node spec_to_gme.js path/to/MDF/spec.json 24 | ``` 25 | 26 | Finally, import the JSON into WebGME just like the 27 | [examples](https://github.com/deepforge-dev/webgme-json-importer/tree/master/examples) (suffixed with "\_meta")! 28 | 29 | ### Loading instances to and from WebGME importable JSON and MDF 30 | ``` 31 | node bin/instance_converter path/to/MDForGME/instance.json 32 | ``` 33 | -------------------------------------------------------------------------------- /examples/WebGME/app.js: -------------------------------------------------------------------------------- 1 | // jshint node: true 2 | 'use strict'; 3 | process.chdir(__dirname); 4 | 5 | var gmeConfig = require('./config'), 6 | webgme = require('webgme'), 7 | myServer; 8 | 9 | webgme.addToRequireJsPaths(gmeConfig); 10 | 11 | myServer = new webgme.standaloneServer(gmeConfig); 12 | myServer.start(function () { 13 | //console.log('server up'); 14 | }); 15 | -------------------------------------------------------------------------------- /examples/WebGME/bin/instance_converter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const {Model} = require('../src/common/instance-converter'); 3 | const path = require('path'); 4 | 5 | if (process.argv.length < 3) { 6 | console.error(`usage: ${process.argv[1]} `); 7 | process.exit(1); 8 | } 9 | 10 | const json = require(path.resolve(process.argv[2])); 11 | const isGMEJSON = json.hasOwnProperty('attributes'); 12 | const output = isGMEJSON ? Object.fromEntries([Model.toMDF(json)]) : 13 | Object.entries(json).map(entry => Model.toGME(...entry)).shift(); 14 | console.log(JSON.stringify(output, null, 2)); 15 | -------------------------------------------------------------------------------- /examples/WebGME/config/README.md: -------------------------------------------------------------------------------- 1 | # WebGME Configuration Settings 2 | On `npm start`, the webgme app will load `config.default.js` which will override the configuration [defaults](https://github.com/webgme/webgme/tree/master/config). 3 | 4 | If `NODE_ENV` is set, it will first try to load the configuration settings from `config/config.ENV.js` where `ENV` is the value of `NODE_ENV`. For example, 5 | ``` 6 | NODE_ENV=debug npm start 7 | ``` 8 | will load the configuration settings from `config/config.debug.js` if it exists and fallback to `config/config.default.js` otherwise 9 | -------------------------------------------------------------------------------- /examples/WebGME/config/config.default.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var config = require('./config.webgme'), 4 | validateConfig = require('webgme/config/validator'); 5 | 6 | // Add/overwrite any additional settings here 7 | // config.server.port = 8080; 8 | // config.mongo.uri = 'mongodb://127.0.0.1:27017/webgme_my_app'; 9 | 10 | validateConfig(config); 11 | module.exports = config; 12 | -------------------------------------------------------------------------------- /examples/WebGME/config/config.test.js: -------------------------------------------------------------------------------- 1 | /*jshint node: true*/ 2 | /** 3 | * @author lattmann / https://github.com/lattmann 4 | */ 5 | 6 | var config = require('./config.default'); 7 | 8 | config.server.port = 9001; 9 | config.mongo.uri = 'mongodb://127.0.0.1:27017/webgme_tests'; 10 | 11 | module.exports = config; 12 | -------------------------------------------------------------------------------- /examples/WebGME/config/config.webgme.js: -------------------------------------------------------------------------------- 1 | // DO NOT EDIT THIS FILE 2 | // This file is automatically generated from the webgme-setup-tool. 3 | 'use strict'; 4 | 5 | 6 | var config = require('webgme/config/config.default'), 7 | validateConfig = require('webgme/config/validator'); 8 | 9 | // The paths can be loaded from the webgme-setup.json 10 | config.plugin.basePaths.push(__dirname + '/../src/plugins'); 11 | config.plugin.basePaths.push(__dirname + '/../node_modules/webgme-json-importer/src/plugins'); 12 | config.seedProjects.basePaths.push(__dirname + '/../src/seeds/MDF'); 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | // Visualizer descriptors 21 | 22 | // Add requirejs paths 23 | config.requirejsPaths = { 24 | 'SetStateFromJSON': 'node_modules/webgme-json-importer/src/plugins/SetStateFromJSON', 25 | 'ExportToJSON': 'node_modules/webgme-json-importer/src/plugins/ExportToJSON', 26 | 'webgme-json-importer': './node_modules/webgme-json-importer/src/common', 27 | 'mdf_gme': './src/common' 28 | }; 29 | 30 | 31 | config.mongo.uri = 'mongodb://127.0.0.1:27017/mdf_gme'; 32 | validateConfig(config); 33 | module.exports = config; 34 | -------------------------------------------------------------------------------- /examples/WebGME/config/index.js: -------------------------------------------------------------------------------- 1 | /*jshint node: true*/ 2 | /** 3 | * @author lattmann / https://github.com/lattmann 4 | * @author pmeijer / https://github.com/pmeijer 5 | */ 6 | 7 | var env = process.env.NODE_ENV || 'default', 8 | configFilename = __dirname + '/config.' + env + '.js', 9 | config = require(configFilename), 10 | validateConfig = require('webgme/config/validator'), 11 | overrideFromEnv = require('webgme/config/overridefromenv'); 12 | 13 | overrideFromEnv(config); 14 | 15 | validateConfig(config); 16 | module.exports = config; 17 | -------------------------------------------------------------------------------- /examples/WebGME/examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains examples of models in both MDF and importable GME JSON. 2 | To convert an MDF format JSON to GME use the `bin/instance_converter` script. 3 | For example: 4 | 5 | ```bash 6 | node bin/instance_converter ../PyTorch/inception.json > examples/gme/Inception.json 7 | ``` 8 | 9 | Below is screenshot taken from the WebGME GUI displaying the converted model: 10 | 11 | ![InceptionBlocks MDF WegGME](inception_webgme.png) 12 | -------------------------------------------------------------------------------- /examples/WebGME/examples/inception_webgme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/WebGME/examples/inception_webgme.png -------------------------------------------------------------------------------- /examples/WebGME/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mdf_gme", 3 | "scripts": { 4 | "start": "node app.js", 5 | "test": "node ./node_modules/mocha/bin/mocha --recursive test", 6 | "apply": "node ./node_modules/webgme-engine/src/bin/apply.js", 7 | "diff": "node ./node_modules/webgme-engine/src/bin/diff.js", 8 | "export": "node ./node_modules/webgme-engine/src/bin/export.js", 9 | "import": "node ./node_modules/webgme-engine/src/bin/import.js", 10 | "merge": "node ./node_modules/webgme-engine/src/bin/merge.js", 11 | "plugin": "node ./node_modules/webgme-engine/src/bin/run_plugin.js", 12 | "pluginHook": "node ./node_modules/webgme-engine/src/bin/plugin_hook.js", 13 | "users": "node ./node_modules/webgme-engine/src/bin/usermanager.js", 14 | "clean_up": "node ./node_modules/webgme-engine/src/bin/clean_up.js" 15 | }, 16 | "version": "1.0.0", 17 | "peerDependencies": { 18 | "webgme": "^2.23.0", 19 | "lodash.merge": "^4.6.2" 20 | }, 21 | "devDependencies": { 22 | "mocha": "^5.2.0", 23 | "webgme": "^2.23.0" 24 | }, 25 | "dependencies": { 26 | "webgme-json-importer": "github:deepforge-dev/webgme-json-importer" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /examples/WebGME/src/common/README.md: -------------------------------------------------------------------------------- 1 | # Common Files 2 | This directory is for the shared files between different components (such as plugins, visualizers, etc) and is available in requirejs under your webgme app name (mdf_gme) 3 | -------------------------------------------------------------------------------- /examples/WebGME/src/plugins/ExportToMDFPython/ExportToMDFPython.js: -------------------------------------------------------------------------------- 1 | /*globals define*/ 2 | /*eslint-env node, browser*/ 3 | 4 | define([ 5 | 'text!./metadata.json', 6 | 'mdf_gme/instance-converter', 7 | 'webgme-json-importer/JSONImporter', 8 | 'plugin/PluginBase', 9 | 'text!./template.py.ejs', 10 | 'underscore', 11 | ], function ( 12 | pluginMetadata, 13 | MDFConverter, 14 | JSONImporter, 15 | PluginBase, 16 | PythonCodeTpl, 17 | _, 18 | ) { 19 | 'use strict'; 20 | 21 | pluginMetadata = JSON.parse(pluginMetadata); 22 | PythonCodeTpl = _.template(PythonCodeTpl); 23 | 24 | class ExportToMDFPython extends PluginBase { 25 | constructor() { 26 | super(); 27 | this.pluginMetadata = pluginMetadata; 28 | } 29 | 30 | async main(callback) { 31 | const mdfJson = await this.getMDFJson(this.activeNode); 32 | const code = PythonCodeTpl({mdfJson}); 33 | const hash = await this.blobClient.putFile('output.py', code); 34 | this.result.addArtifact(hash); 35 | this.result.setSuccess(true); 36 | callback(null, this.result); 37 | } 38 | 39 | async getMDFJson(node) { 40 | const importer = new JSONImporter(this.core, this.rootNode); 41 | const json = await importer.toJSON(this.activeNode); 42 | await this.setBasePtrsToMetaTag(json); 43 | return Object.fromEntries([MDFConverter.Model.toMDF(json)]); 44 | } 45 | 46 | async setBasePtrsToMetaTag(json) { 47 | const {base} = json.pointers; 48 | const baseNode = await this.core.loadByPath(this.rootNode, base); 49 | const metaTag = `@meta:${this.core.getAttribute(baseNode, 'name')}`; 50 | json.pointers.base = metaTag; 51 | 52 | if (json.children) { 53 | json.children = await Promise.all( 54 | json.children.map(child => this.setBasePtrsToMetaTag(child)) 55 | ); 56 | } 57 | return json; 58 | } 59 | } 60 | 61 | ExportToMDFPython.metadata = pluginMetadata; 62 | 63 | return ExportToMDFPython; 64 | }); 65 | -------------------------------------------------------------------------------- /examples/WebGME/src/plugins/ExportToMDFPython/metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "ExportToMDFPython", 3 | "name": "ExportToMDFPython", 4 | "version": "0.1.0", 5 | "description": "", 6 | "icon": { 7 | "class": "glyphicon glyphicon-cog", 8 | "src": "" 9 | }, 10 | "disableServerSideExecution": false, 11 | "disableBrowserSideExecution": false, 12 | "dependencies": [], 13 | "writeAccessRequired": false, 14 | "configStructure": [] 15 | } 16 | -------------------------------------------------------------------------------- /examples/WebGME/src/plugins/ExportToMDFPython/template.py.ejs: -------------------------------------------------------------------------------- 1 | from neuromllite.utils import _parse_element 2 | from modeci_mdf.mdf import Model 3 | from modeci_mdf.simple_scheduler import EvaluableGraph 4 | import json 5 | 6 | data = json.loads("""<%= JSON.stringify(mdfJson) %>""") 7 | model = Model() 8 | model = _parse_element(data, model) 9 | graph = model.graphs[0] 10 | egraph = EvaluableGraph(graph, False) 11 | result = graph # DeepForge requires the concept of interest to be available as "result" 12 | -------------------------------------------------------------------------------- /examples/WebGME/src/seeds/MDF/MDF.webgmex: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/examples/WebGME/src/seeds/MDF/MDF.webgmex -------------------------------------------------------------------------------- /examples/WebGME/src/visualizers/Visualizers.json: -------------------------------------------------------------------------------- 1 | [] 2 | -------------------------------------------------------------------------------- /examples/WebGME/test/globals.js: -------------------------------------------------------------------------------- 1 | // This is used by the test/plugins tests 2 | /*globals requireJS*/ 3 | /*jshint node:true*/ 4 | /** 5 | * @author pmeijer / https://github.com/pmeijer 6 | */ 7 | 8 | var testFixture = require('webgme/test/_globals'), 9 | WEBGME_CONFIG_PATH = '../config'; 10 | 11 | // This flag will make sure the config.test.js is being used 12 | // process.env.NODE_ENV = 'test'; // This is set by the require above, overwrite it here. 13 | 14 | var WebGME = testFixture.WebGME, 15 | gmeConfig = require(WEBGME_CONFIG_PATH), 16 | getGmeConfig = function () { 17 | 'use strict'; 18 | // makes sure that for each request it returns with a unique object and tests will not interfere 19 | if (!gmeConfig) { 20 | // if some tests are deleting or unloading the config 21 | gmeConfig = require(WEBGME_CONFIG_PATH); 22 | } 23 | return JSON.parse(JSON.stringify(gmeConfig)); 24 | }; 25 | 26 | WebGME.addToRequireJsPaths(gmeConfig); 27 | 28 | testFixture.getGmeConfig = getGmeConfig; 29 | 30 | module.exports = testFixture; 31 | -------------------------------------------------------------------------------- /examples/WebGME/webgme-setup.json: -------------------------------------------------------------------------------- 1 | { 2 | "components": { 3 | "plugins": { 4 | "ExportToMDFPython": { 5 | "src": "src/plugins/ExportToMDFPython", 6 | "test": "test/plugins/ExportToMDFPython" 7 | } 8 | }, 9 | "seeds": { 10 | "MDF": { 11 | "src": "src/seeds/MDF" 12 | } 13 | } 14 | }, 15 | "dependencies": { 16 | "plugins": { 17 | "ExportToJSON": { 18 | "project": "webgme-json-importer", 19 | "path": "node_modules/webgme-json-importer/src/plugins/ExportToJSON" 20 | }, 21 | "SetStateFromJSON": { 22 | "project": "webgme-json-importer", 23 | "path": "node_modules/webgme-json-importer/src/plugins/SetStateFromJSON" 24 | } 25 | }, 26 | "seeds": {} 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /install_on_osbv2.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | # Temporary file for helping installation of MDF on OSBv2 4 | 5 | # To use this: 6 | # 1) Go to https://www.v2.opensourcebrain.org 7 | # 2) Log in (register first if you're not an OSBv2 user) 8 | # 3) Go to https://www.v2.opensourcebrain.org/repositories/1 9 | # 4) Click on Create new workspace. Dialog opens 10 | # 5) Give it a unique name (e.g. with your username). Description and image are optional 11 | # 6) Click on Create a new workspace. 12 | # DO accept the offer to copy all files from the GitHub repo to the new workspace (Click OK) 13 | # DON'T accept the offer to open workspace straight away; opens in wrong application.. (Click CANCEL instead) 14 | # 7) Back at https://www.v2.opensourcebrain.org, find your new workspace, click the ... (3 dots, top right), open with JupyterLab 15 | # 8) When JupyterLab opens, click on Terminal in the launcher 16 | # 9) Type: 17 | # cd Mod*/main/ 18 | # ./install_on_osbv2.sh 19 | # cd examples/MDF/ 20 | # python simple.py -run 21 | # 10) Hey presto, you've just run your first MDF model! 22 | 23 | pip install dask==2.30.0 distributed==2.30.1 protobuf==3.17.0 torch==1.8.0 24 | pip install . 25 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel", 5 | "toml", 6 | ] 7 | 8 | build-backend = "setuptools.build_meta" 9 | 10 | #[tool.setuptools_scm] 11 | #write_to = "src/modeci_mdf/version.py" 12 | 13 | [tool.pytest.ini_options] 14 | markers = [ 15 | "coremdf: marks tests which are core to the MDF specification", 16 | "actr: marks tests which require ACT-R to be installed.", 17 | "pytorch: marks test which require PyTorch to be installed.')", 18 | "psyneulink: marks tests which require PsyNueLink to be installed.", 19 | "neuroml: marks tests which require NeuroML to be installed." 20 | ] 21 | -------------------------------------------------------------------------------- /src/modeci_mdf/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | MDF is intended to be an open source, community-supported standard and associated library of tools for expressing 3 | computational models in a form that allows them to be exchanged between diverse programming languages and execution 4 | environments. The MDF Python API can be used to create or load an MDF model for inspection and validation. It also 5 | includes a basic execution engine for simulating models in the format. However, this is not intended as a general 6 | purpose simulation environment, nor is MDF intended as a programming language. Rather, the primary purpose of the 7 | Python API is to facilitate and validate the exchange of models between existing environments that serve different 8 | communities. Accordingly, these Python tools include bi-directional support for importing to and exporting from 9 | widely-used programming environments in a range of disciplines, and for easily extending these to other environments. 10 | """ 11 | 12 | # Version of the specification for MDF 13 | MODECI_MDF_VERSION = "0.4" 14 | 15 | # Version of the Python module. 16 | __version__ = "0.4.11" 17 | -------------------------------------------------------------------------------- /src/modeci_mdf/functions/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Specifies and implements the MDF the function ontology; a collection of builtin functions that can be used in 3 | MDF :class:`~modeci.mdf.Function` and :class:`~modeci.mdf.Parameter` objects. Code for registering standard functions 4 | are with the ontology is implemented in :mod:`~modeci_mdf.functions.standard`. If you want to add functions to the 5 | ontology that can be used during execution see how this has been done for ONNX (:mod:`~modeci_mdf.functions.onnx`) and 6 | ACT-R (:mod:`~modeci_mdf.functions.actr`) 7 | """ 8 | -------------------------------------------------------------------------------- /src/modeci_mdf/functions/actr/ccm/README.md: -------------------------------------------------------------------------------- 1 | The current implementation of ACT-R functions in the MDF standard library uses the 2 | [ccmsuite](https://github.com/tcstewar/ccmsuite) library. Several modules from 3 | this library are replicated here so that they do not have to be installed as a separate dependency, but this is temporary and will change in the future. 4 | -------------------------------------------------------------------------------- /src/modeci_mdf/functions/actr/ccm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ModECI/MDF/375823b179c6d657dadbdc2f0b3fbd1d26b6ad48/src/modeci_mdf/functions/actr/ccm/__init__.py -------------------------------------------------------------------------------- /src/modeci_mdf/functions/actr/ccm/buffer.py: -------------------------------------------------------------------------------- 1 | from .model import Model 2 | from collections import UserDict 3 | 4 | 5 | class Chunk(UserDict): 6 | def __init__(self, contents, bound=None): 7 | UserDict.__init__(self) 8 | if isinstance(contents, Chunk): 9 | self.update(contents) 10 | elif isinstance(contents, str): 11 | for i, x in enumerate(contents.split()): 12 | if ":" in x: 13 | i, x = x.split(":", 1) 14 | if x.startswith("?"): 15 | key = x[1:] 16 | x = bound[key] 17 | self[i] = x 18 | elif hasattr(contents, "__dict__"): 19 | for k, v in contents.__dict__.items(): 20 | if type(v) in [str, float, int, bool]: 21 | self[k] = v 22 | else: 23 | try: 24 | self.update(contents) 25 | except: 26 | raise Exception("Unknown contents for chunk:", contents) 27 | 28 | def __repr__(self): 29 | r = [] 30 | keys = self.keys() 31 | i = 0 32 | while i in keys: 33 | r.append("%s" % self[i]) 34 | keys.remove(i) 35 | i += 1 36 | keys.sort() 37 | for k in keys: 38 | if k[0] != "_": 39 | r.append("{}:{}".format(k, self[k])) 40 | return " ".join(r) 41 | 42 | 43 | class Buffer(Model): 44 | def __init__(self): 45 | self.chunk = None 46 | 47 | def set(self, chunk): 48 | try: 49 | self.chunk = Chunk(chunk, self.sch.bound) 50 | except AttributeError: 51 | self.chunk = Chunk(chunk, {}) 52 | 53 | def modify(self, **args): 54 | for k, v in args.items(): 55 | if k.startswith("_"): 56 | k = int(k[1:]) 57 | if k not in self.chunk: 58 | raise Exception(f'No slot "{k}" to modify to "{v}"') 59 | self.chunk[k] = v 60 | self.chunk = self.chunk 61 | 62 | def __getitem__(self, key): 63 | return self.chunk[key] 64 | 65 | def clear(self): 66 | self.chunk = None 67 | 68 | def __eq__(self, other): 69 | return self.chunk == other 70 | 71 | def __hash__(self): 72 | return id(self) 73 | 74 | def __len__(self): 75 | if self.chunk is None: 76 | return 0 77 | return len(self.chunk) 78 | 79 | def isEmpty(self): 80 | return len(self) == 0 81 | -------------------------------------------------------------------------------- /src/modeci_mdf/functions/ddm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __all__ = ["drift_diffusion_integrator"] 4 | 5 | 6 | def drift_diffusion_integrator( 7 | starting_point: float, 8 | non_decision_time: float, 9 | drift_rate, 10 | threshold: float, 11 | noise: float = 1.0, 12 | dt: float = 0.01, 13 | ) -> float: 14 | """ 15 | Integrates the drift diffusion model for a single trial using and implementation of 16 | the using the Euler-Maruyama method. This is a proof of concept implementation and 17 | is not optimized for speed. 18 | 19 | Args: 20 | starting_point: The starting point of the particle 21 | non_decision_time: The non-decision time 22 | drift_rate: The deterministic drift rate of the particle 23 | threshold: The threshold to cross, the boundary is assumed to be symmetric. 24 | noise: The standard deviation of the noise 25 | dt: The time step to use for the integration 26 | 27 | Returns: 28 | The time it took to cross the threshold, with the sign indicating the direction 29 | """ 30 | particle = starting_point 31 | 32 | # Integrate the Wiener process until it crosses the threshold 33 | t = 0 34 | while abs(particle) < threshold: 35 | particle = particle + np.random.normal( 36 | loc=drift_rate * dt, scale=noise * np.sqrt(dt) 37 | ) 38 | t = t + 1 39 | 40 | # Return the time it took to cross the threshold, with the sign indicating the direction 41 | # Add the non-decision time to the RT 42 | return ( 43 | non_decision_time + t * dt 44 | if particle > threshold 45 | else -non_decision_time - t * dt 46 | ) 47 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementations of importers and exporters for supported environments; fulfilling the 3 | `hub and spoke model `_ of MDF by 4 | allowing exchange between different modeling environments via MDF. 5 | 6 | Most of these exporters and importers are currently a work and progress. 7 | """ 8 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/actr/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `ACT-R `_ models""" 2 | 3 | from .importer import actr_to_mdf 4 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/graphviz/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `GraphViz `_ models""" 2 | 3 | from .exporter import mdf_to_graphviz 4 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/keras/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `Keras `_ models""" 2 | 3 | from .importer import keras_to_mdf 4 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/neuroml/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `NeuroML `_ models""" 2 | 3 | from .exporter import mdf_to_neuroml 4 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/neuroml/syn_definitions.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 8 | 9 | 10 | 11 | 12 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/onnx/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `ONNX `_ models""" 2 | 3 | from .importer import ( 4 | onnx_to_mdf, 5 | find_subgraphs, 6 | convert_file, 7 | get_color_for_onnx_category, 8 | get_category_of_onnx_node, 9 | ) 10 | 11 | from .exporter import mdf_to_onnx 12 | -------------------------------------------------------------------------------- /src/modeci_mdf/interfaces/pytorch/__init__.py: -------------------------------------------------------------------------------- 1 | """Import and export code for `PyTorch `_ models""" 2 | 3 | from .importer import pytorch_to_mdf 4 | 5 | from .exporter import mdf_to_pytorch 6 | from . import mod_torch_builtins 7 | -------------------------------------------------------------------------------- /tests/test_helpers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from modeci_mdf.execution_engine import get_required_variables_from_expression 3 | 4 | 5 | @pytest.mark.parametrize( 6 | "expression, expected_variables", 7 | [ 8 | (0, []), 9 | ((), []), 10 | ("", []), 11 | ("x", ["x"]), 12 | ("x.y", ["x"]), 13 | ("3*x + y", ["x", "y"]), 14 | ("x[y]", ["x", "y"]), 15 | ("x[y[z]]", ["x", "y", "z"]), 16 | ("x[y[z + 1]]", ["x", "y", "z"]), 17 | ("x[y] + z[a]", ["x", "y", "z", "a"]), 18 | ("x[y[z]] + a[b]", ["x", "y", "z", "a", "b"]), 19 | ("x[y[z[1]]] + a[0]", ["x", "y", "z", "a"]), 20 | ("x[y[z] + 1]", ["x", "y", "z"]), 21 | ("x[y[z[1 + a] + b[c]]] + d[0]", ["x", "y", "z", "a", "b", "c", "d"]), 22 | ("[x, 0, 1]", ["x"]), 23 | ], 24 | ) 25 | def test_expression_parsing(expression, expected_variables): 26 | assert set(get_required_variables_from_expression(expression)) == set( 27 | expected_variables 28 | ) 29 | assert set(get_required_variables_from_expression(f"[{expression}]")) == set( 30 | expected_variables 31 | ) 32 | -------------------------------------------------------------------------------- /tests/test_mdf_functions.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import numpy 4 | import pytest 5 | 6 | import modeci_mdf.functions.standard as stdf 7 | 8 | 9 | @pytest.mark.parametrize( 10 | "name, parameters, expected_result", 11 | [ 12 | ("linear", {"variable0": 1, "slope": 2, "intercept": 3}, 5), 13 | ( 14 | "logistic", 15 | {"variable0": 1, "gain": 2, "bias": 3, "offset": 4}, 16 | 0.9820137900379085, 17 | ), 18 | ( 19 | "exponential", 20 | {"variable0": 1, "scale": 2, "rate": 3, "bias": 4, "offset": 5}, 21 | 2198.266316856917, 22 | ), 23 | ("sin", {"variable0": math.pi / 2, "scale": 2}, 2.0), 24 | ("cos", {"variable0": math.pi, "scale": 2}, -2.0), 25 | ( 26 | "MatMul", 27 | {"A": numpy.array([[1, 2], [3, 4]]), "B": numpy.array([[1, 2], [3, 4]])}, 28 | numpy.array([[7, 10], [15, 22]]), 29 | ), 30 | ("Relu", {"A": 1}, 1), 31 | ("Relu", {"A": -1}, 0), 32 | ("arctan", {"variable0": 1, "scale": 1}, math.atan(1)), 33 | ("arctan", {"variable0": 0, "scale": 2}, 2 * math.atan(0)), 34 | ("arcsin", {"variable0": 0.5, "scale": 1}, math.asin(0.5)), 35 | ("arcsin", {"variable0": 0, "scale": 2}, 2 * math.asin(0)), 36 | ("arccos", {"variable0": 0.5, "scale": 1}, math.acos(0.5)), 37 | ("arccos", {"variable0": 0, "scale": 2}, 2 * math.acos(0)), 38 | ], 39 | ) 40 | def test_std_functions(name, expected_result, parameters): 41 | try: 42 | assert stdf.mdf_functions[name]["function"](**parameters) == expected_result 43 | except ValueError: 44 | assert numpy.array_equal( 45 | stdf.mdf_functions[name]["function"](**parameters), expected_result 46 | ) 47 | --------------------------------------------------------------------------------