├── .github └── workflows │ ├── ebrains.yml │ └── full-test.yml ├── .gitignore ├── AUTHORS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── README.rst ├── changelog ├── codemeta.json ├── doc ├── Makefile ├── README ├── _static │ └── custom.css ├── api_reference.txt ├── architecture_of_PyNN.svg ├── backends.txt ├── backends │ ├── Brian.txt │ ├── NEST.txt │ ├── NEURON.txt │ ├── NeuroML.txt │ ├── NineML.txt │ └── neuromorphic.txt ├── build_examples.py ├── building_networks.txt ├── conf.py ├── connections.txt ├── contributors.txt ├── data_handling.txt ├── descriptions.txt ├── developers │ ├── adding_backend.txt │ ├── bug_reports.txt │ ├── contributing.txt │ └── governance.txt ├── developers_guide.txt ├── download.txt ├── examples.txt ├── examples │ ├── Izhikevich.txt │ ├── VAbenchmarks.txt │ ├── cell_type_demonstration.txt │ ├── current_injection.txt │ ├── random_distributions.txt │ ├── random_numbers.txt │ ├── simple_STDP.txt │ ├── small_network.txt │ ├── stochastic_deterministic_comparison.txt │ ├── stochastic_synapses.txt │ ├── synaptic_input.txt │ ├── tsodyksmarkram.txt │ └── varying_poisson.txt ├── images │ ├── ac_source.png │ ├── continuous_time_spiking.png │ ├── dc_source.png │ ├── examples │ │ ├── Izhikevich_nest_np1_20170505-150315.png │ │ ├── VAbenchmarks_CUBA_20170505-150538.png │ │ ├── cell_type_demonstration_nest_20170505-150320.png │ │ ├── current_injection_neuron_20170505-150317.png │ │ ├── random_distributions.png │ │ ├── random_numbers_neuron_20170505-150323.png │ │ ├── simple_stdp_neuron_20170505-150331.png │ │ ├── small_network_nest_np1_20170505-150334.png │ │ ├── stochastic_comparison_neuron_20170505-150418.png │ │ ├── stochastic_synapses__nest_20170505-150345.png │ │ ├── synaptic_input_neuron_20170505-150337.png │ │ ├── tsodyksmarkram_nest_20170505-150340.png │ │ └── varying_poisson_neuron_20170505-150343.png │ ├── neo_example.png │ ├── noise_source.png │ ├── ongridoffgrid.png │ ├── release_0.8b1_example.png │ ├── reset_example.png │ ├── step_source.png │ └── tmp.png ├── import_export.txt ├── index.txt ├── injecting_current.txt ├── installation.txt ├── introduction.txt ├── logging.txt ├── make.bat ├── mc_aims.txt ├── mc_api.txt ├── mc_examples.txt ├── mc_installation.txt ├── neurons.txt ├── nineml.txt ├── parallel.txt ├── parameters.txt ├── publications.txt ├── pyNN_icon.ico ├── pyNN_logo.png ├── pyplots │ ├── ac_source.py │ ├── continuous_time_spiking.py │ ├── dc_source.py │ ├── neo_example.py │ ├── noise_source.py │ ├── plot_current_source.py │ ├── plot_helper.py │ ├── reset_example.py │ └── step_source.py ├── quickstart.txt ├── random_numbers.txt ├── recording.txt ├── reference │ ├── connectors.txt │ ├── electrodes.txt │ ├── neuronmodels.txt │ ├── parameters.txt │ ├── plasticitymodels.txt │ ├── populations.txt │ ├── projections.txt │ ├── random.txt │ ├── simulationcontrol.txt │ ├── space.txt │ └── utility.txt ├── release_notes.txt ├── releases │ ├── 0.10.0.txt │ ├── 0.10.1.txt │ ├── 0.11.0.txt │ ├── 0.12.0.txt │ ├── 0.12.1.txt │ ├── 0.12.2.txt │ ├── 0.12.3.txt │ ├── 0.12.4.txt │ ├── 0.6.txt │ ├── 0.7.txt │ ├── 0.8-alpha-1.txt │ ├── 0.8-alpha-2.txt │ ├── 0.8-beta-1.txt │ ├── 0.8-beta-2.txt │ ├── 0.8.0-rc-1.txt │ ├── 0.8.0.txt │ ├── 0.8.1.txt │ ├── 0.8.2.txt │ ├── 0.8.3.txt │ ├── 0.9.0.txt │ ├── 0.9.1.txt │ ├── 0.9.2.txt │ ├── 0.9.3.txt │ ├── 0.9.4.txt │ ├── 0.9.5.txt │ └── 0.9.6.txt ├── roadmap.txt ├── simulation_control.txt ├── space.txt ├── standardmodels.txt ├── testdocs.py └── units.txt ├── examples ├── HH_cond_exp2.py ├── Izhikevich.py ├── Potjans2014 │ ├── README.txt │ ├── connectivity.py │ ├── microcircuit.py │ ├── network.py │ ├── network_params.py │ ├── plotting.py │ ├── run_microcircuit.py │ ├── scaling.py │ ├── sim_params.py │ └── validation_microcircuit.py ├── README ├── StepCurrentSource.py ├── VAbenchmarks.py ├── brunel.py ├── cell_type_demonstration.py ├── connections.py ├── current_injection.py ├── distrib_example.py ├── gif_neuron.py ├── iaf_sfa_relref │ ├── README │ ├── backend_comparison.py │ ├── iaf_sfa_network_INH_GAMMA.py │ ├── iaf_sfa_network_STATIC.py │ ├── mcb.py │ ├── myFigure_expected.pdf │ └── standard_neurons.yaml ├── inhomogeneous_network.py ├── mc │ ├── NMLCL000641 │ │ ├── CaDynamics_E2_NML2.nml │ │ ├── Ca_HVA.channel.nml │ │ ├── Ca_LVAst.channel.nml │ │ ├── Ih.channel.nml │ │ ├── Im.channel.nml │ │ ├── K_Pst.channel.nml │ │ ├── K_Tst.channel.nml │ │ ├── NaTa_t.channel.nml │ │ ├── NaTs2_t.channel.nml │ │ ├── Nap_Et2.channel.nml │ │ ├── SK_E2.channel.nml │ │ ├── SKv3_1.channel.nml │ │ ├── cADpyr229_L23_PC_c2e79db05a_0_0.cell.nml │ │ ├── neuroml_cell.py │ │ └── pas.channel.nml │ ├── current_injection_mc.py │ ├── current_injection_mc_swc.py │ ├── mc_network.py │ └── two_cells_mc.py ├── monolith_vs_composed.py ├── multi_synapse.py ├── multiquantal_synapses.py ├── nineml_brunel.py ├── nineml_neuron.py ├── nrn_artificial_cell.py ├── parameter_changes.py ├── random_distributions.py ├── random_numbers.py ├── simpleRandomNetwork.py ├── simpleRandomNetwork_csa.py ├── simple_STDP.py ├── small_network.py ├── specific_network.py ├── stdp_network.py ├── stochastic_deterministic_comparison.py ├── stochastic_synapses.py ├── stochastic_tsodyksmarkram.py ├── synaptic_input.py ├── tools │ ├── VAbenchmark_graphs.py │ ├── comparison_plot.py │ ├── plot_results.py │ └── run_all_examples.py ├── tsodyksmarkram.py ├── update_spike_source_array.py └── varying_poisson.py ├── pyNN ├── __init__.py ├── arbor │ ├── __init__.py │ ├── cells.py │ ├── control.py │ ├── morphology.py │ ├── nmodl │ │ ├── expsyn.mod │ │ ├── kdr.mod │ │ ├── leak.mod │ │ ├── na.mod │ │ ├── pas.mod │ │ └── pas2.mod │ ├── populations.py │ ├── procedural_api.py │ ├── projections.py │ ├── recording.py │ ├── simulator.py │ └── standardmodels.py ├── brian2 │ ├── __init__.py │ ├── cells.py │ ├── control.py │ ├── populations.py │ ├── procedural_api.py │ ├── projections.py │ ├── recording.py │ ├── simulator.py │ └── standardmodels │ │ ├── __init__.py │ │ ├── cells.py │ │ ├── electrodes.py │ │ ├── receptors.py │ │ └── synapses.py ├── common │ ├── __init__.py │ ├── control.py │ ├── populations.py │ ├── procedural_api.py │ └── projections.py ├── connectors.py ├── core.py ├── descriptions │ ├── __init__.py │ └── templates │ │ ├── cheetah │ │ ├── assembly_default.txt │ │ ├── modeltype_default.txt │ │ ├── population_default.txt │ │ ├── populationview_default.txt │ │ ├── projection_default.txt │ │ ├── structure_default.txt │ │ └── synapsedynamics_default.txt │ │ ├── jinja2 │ │ ├── assembly_default.txt │ │ ├── modeltype_default.txt │ │ ├── population_default.txt │ │ ├── populationview_default.txt │ │ ├── projection_default.txt │ │ ├── structure_default.txt │ │ └── synapsedynamics_default.txt │ │ └── string │ │ ├── assembly_default.txt │ │ ├── modeltype_default.txt │ │ ├── population_default.txt │ │ ├── populationview_default.txt │ │ ├── projection_default.txt │ │ ├── structure_default.txt │ │ └── synapsedynamics_default.txt ├── errors.py ├── hardware │ ├── __init__.py │ └── auxiliary.py ├── mock │ ├── __init__.py │ ├── control.py │ ├── populations.py │ ├── procedural_api.py │ ├── projections.py │ ├── recording.py │ ├── simulator.py │ └── standardmodels.py ├── models.py ├── morphology.py ├── multisim.py ├── nest │ ├── __init__.py │ ├── cells.py │ ├── connectors.py │ ├── control.py │ ├── conversion.py │ ├── electrodes.py │ ├── extensions │ │ ├── CMakeLists.txt │ │ ├── pynn_extensions.cpp │ │ ├── simple_stochastic_synapse.h │ │ ├── stochastic_stp_synapse.h │ │ └── stochastic_stp_synapse_impl.h │ ├── nineml.py │ ├── populations.py │ ├── procedural_api.py │ ├── projections.py │ ├── random.py │ ├── recording.py │ ├── simulator.py │ ├── standardmodels │ │ ├── __init__.py │ │ ├── cells.py │ │ ├── electrodes.py │ │ ├── receptors.py │ │ └── synapses.py │ └── synapses.py ├── network.py ├── neuroml │ ├── __init__.py │ ├── populations.py │ ├── projections.py │ ├── recording.py │ ├── simulator.py │ └── standardmodels │ │ ├── __init__.py │ │ ├── cells.py │ │ ├── electrodes.py │ │ └── synapses.py ├── neuron │ ├── __init__.py │ ├── cells.py │ ├── connectors.py │ ├── control.py │ ├── morphology.py │ ├── neuroml.py │ ├── nineml.py │ ├── nmodl │ │ ├── __init__.py │ │ ├── adexp.mod │ │ ├── alphaisyn.mod │ │ ├── alphasyn.mod │ │ ├── expisyn.mod │ │ ├── gammastim.mod │ │ ├── gap.mod │ │ ├── gif.mod │ │ ├── gsfa_grr.mod │ │ ├── hh_traub.mod │ │ ├── izhikevich.mod │ │ ├── netstim2.mod │ │ ├── poisson_stim_refractory.mod │ │ ├── quantal_stp.mod │ │ ├── refrac.mod │ │ ├── reset.mod │ │ ├── stdwa_guetig.mod │ │ ├── stdwa_softlimits.mod │ │ ├── stdwa_songabbott.mod │ │ ├── stdwa_symm.mod │ │ ├── stdwa_vogels2011.mod │ │ ├── stochastic_synapse.mod │ │ ├── stochastic_tsodyksmarkram.mod │ │ ├── tmgsyn.mod │ │ ├── tmisyn.mod │ │ ├── tsodyksmarkram.mod │ │ └── vecstim.mod │ ├── populations.py │ ├── procedural_api.py │ ├── projections.py │ ├── random.py │ ├── recording.py │ ├── simulator.py │ └── standardmodels │ │ ├── __init__.py │ │ ├── cells.py │ │ ├── electrodes.py │ │ ├── ion_channels.py │ │ ├── receptors.py │ │ └── synapses.py ├── nineml │ ├── __init__.py │ ├── cells.py │ ├── connectors.py │ ├── populations.py │ ├── projections.py │ ├── read.py │ ├── recording.py │ ├── simulator.py │ ├── standardmodels.py │ ├── synapses.py │ └── utility.py ├── parameters.py ├── random.py ├── recording │ ├── __init__.py │ └── files.py ├── serialization │ ├── __init__.py │ └── sonata.py ├── space.py ├── spiNNaker.py ├── standardmodels │ ├── __init__.py │ ├── base.py │ ├── cells.py │ ├── electrodes.py │ ├── ion_channels.py │ ├── receptors.py │ └── synapses.py └── utility │ ├── __init__.py │ ├── build.py │ ├── plotting.py │ ├── progress_bar.py │ ├── script_tools.py │ └── timer.py ├── pyproject.toml ├── setup.py └── test ├── README ├── benchmarks ├── Benchmark_PyNN-0.8dev_FixedNumberPost.py ├── README.txt ├── all_to_all_network.param ├── connectors_benchmark.py ├── ddpc.py ├── neurons_no_recording.param ├── neurons_with_recording.param ├── plot_figure.py └── simple_network.py ├── system ├── __init__.py ├── scenarios │ ├── __init__.py │ ├── fixtures.py │ ├── test__simulation_control.py │ ├── test_cell_types.py │ ├── test_connection_handling.py │ ├── test_connectors.py │ ├── test_electrodes.py │ ├── test_issue231.py │ ├── test_issue274.py │ ├── test_parameter_handling.py │ ├── test_procedural_api.py │ ├── test_recording.py │ ├── test_scenario1.py │ ├── test_scenario2.py │ ├── test_scenario3.py │ ├── test_scenario4.py │ ├── test_scenario5.py │ ├── test_synapse_types.py │ └── test_ticket166.py ├── test_brian2.py ├── test_hardware_brainscales.py ├── test_nest.py ├── test_neuroml.py ├── test_neuron.py └── test_serialization.py └── unittests ├── __init__.py ├── mocks.py ├── test_assembly.py ├── test_brian.py ├── test_connectors_parallel.py ├── test_connectors_serial.py ├── test_core.py ├── test_descriptions.py ├── test_files.py ├── test_idmixin.py ├── test_lowlevelapi.py ├── test_morphology.py ├── test_nest.py ├── test_neuron.py ├── test_parameters.py ├── test_population.py ├── test_populationview.py ├── test_projection.py ├── test_random.py ├── test_recording.py ├── test_simulation_control.py ├── test_space.py ├── test_standardmodels.py └── test_utility_functions.py /.github/workflows/ebrains.yml: -------------------------------------------------------------------------------- 1 | name: Mirror to EBRAINS 2 | 3 | # Configure the events that are going to trigger tha automated update of the mirror 4 | on: 5 | push: 6 | branches: [ master ] 7 | 8 | # Configure what will be updated 9 | jobs: 10 | # set the job name 11 | to_ebrains: 12 | runs-on: ubuntu-latest 13 | steps: 14 | # this task will push the master branch of the source_repo (github) to the 15 | # destination_repo (ebrains gitlab) 16 | - name: syncmaster 17 | uses: wei/git-sync@v3 18 | with: 19 | source_repo: https://github.com/NeuralEnsemble/PyNN 20 | source_branch: "master" 21 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/NeuralEnsemble/PyNN.git" 22 | destination_branch: "main" 23 | # this task will push all tags from the source_repo to the destination_repo 24 | - name: synctags 25 | uses: wei/git-sync@v3 26 | with: 27 | source_repo: https://github.com/NeuralEnsemble/PyNN 28 | source_branch: "refs/tags/*" 29 | destination_repo: "https://ghpusher:${{ secrets.EBRAINS_GITLAB_ACCESS_TOKEN }}@gitlab.ebrains.eu/NeuralEnsemble/PyNN.git" 30 | destination_branch: "refs/tags/*" 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /examples/PyNN_NeuroML2_Export.nml 2 | # Editor temporary/working/backup files 3 | .#* 4 | [#]*# 5 | *~ 6 | *$ 7 | *.bak 8 | *.kdev4 9 | *.komodoproject 10 | *.orig 11 | .project 12 | .pydevproject 13 | .spyderproject 14 | .settings 15 | *.tmp* 16 | *.swp 17 | .idea 18 | .vscode 19 | 20 | # Compiled source 21 | *.py[ocd] 22 | x86_64 23 | i386 24 | i686 25 | arm64 26 | _build 27 | 28 | # Python files 29 | build 30 | dist 31 | MANIFEST 32 | doc/_build 33 | # Egg metadata 34 | *.egg-info 35 | *.egg 36 | *.EGG 37 | *.EGG-INFO 38 | *.pkl 39 | 40 | # OS generated files 41 | .directory 42 | .gdb_history 43 | .DS_Store? 44 | Icon? 45 | Thumbs.db 46 | 47 | # coverage files 48 | .coverage 49 | cover 50 | 51 | # files from running examples 52 | examples/Results 53 | examples/tools/Results 54 | examples/nineml_mechanisms 55 | examples/*.dat 56 | examples/*.svg 57 | /examples/LEMS_Sim_PyNN_NeuroML2_Export.xml 58 | /examples/Potjans2014/*.svg 59 | /examples/Potjans2014/*.mod 60 | /examples/Potjans2014/*.png 61 | /examples/Potjans2014/results/* 62 | /examples/Potjans2014/*.nml 63 | /examples/Potjans2014/LEMS*.xml 64 | /examples/Potjans2014/*netpyne.py 65 | /examples/Potjans2014/*.ini 66 | /examples/Potjans2014/*.h5 67 | /examples/Potjans2014/*.pov 68 | /examples/Potjans2014/*main.json 69 | /examples/Potjans2014/*.so 70 | /test/system/LEMS_Sim_Test0.xml 71 | /test/system/Test0.net.nml 72 | /examples/Potjans2014/*.spikes 73 | /examples/Potjans2014/*.dat 74 | /examples/Potjans2014/*_nrn.py 75 | /examples/PyNN_NeuroML2_Export.net.nml 76 | /examples/input.spikes 77 | /examples/output.spikes 78 | /examples/Potjans2014/clean.sh 79 | /test/benchmarks/*.h5 80 | 81 | # Other 82 | *.btr 83 | *.log 84 | tmp 85 | /pynnpg.sh 86 | /cleanpynn.sh 87 | doc/logos 88 | docker_*.eggs/ 89 | /test/system/tmp_serialization_test 90 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | See http://neuralensemble.org/docs/PyNN/developers/contributing.html 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include pyNN/arbor/nmodl/*.mod 2 | include pyNN/neuron/nmodl/*.mod 3 | include pyNN/nest/extensions/*.h 4 | include pyNN/nest/extensions/*.cpp 5 | include pyNN/nest/extensions/CMakeLists.txt 6 | include pyNN/nest/extensions/sli/* 7 | include pyNN/descriptions/templates/*/*.txt 8 | include test/system/*.py 9 | include test/system/scenarios/*.py 10 | include test/unittests/*.py 11 | include examples/*.py 12 | include doc/*.txt 13 | include doc/*/*.txt 14 | include LICENSE 15 | include AUTHORS 16 | include README.rst 17 | include changelog 18 | include requirements.txt 19 | -------------------------------------------------------------------------------- /doc/README: -------------------------------------------------------------------------------- 1 | =========================== 2 | Notes on PyNN documentation 3 | =========================== 4 | 5 | PyNN documentation is generated using Sphinx_. 6 | 7 | To build the documentation in HTML format, run:: 8 | 9 | $ make html 10 | 11 | Many of the files contain examples of interactive Python sessions. The validity of this code can be tested by running:: 12 | 13 | $ make doctest 14 | 15 | To copy the built docs to a checkout of the NeuralEnsemble server sources: 16 | 17 | $ rsync -avz doc/_build/html/ ~/dev/web/neuralensemble.github.io/docs/PyNN/ 18 | 19 | .. _Sphinx: http://sphinx.pocoo.org/ 20 | -------------------------------------------------------------------------------- /doc/_static/custom.css: -------------------------------------------------------------------------------- 1 | #content ul:not(.search) { 2 | list-style-type: disc; 3 | margin-left: 1.5rem; 4 | margin-top: 0.8rem; 5 | } 6 | 7 | #content ul:not(.search) p, 8 | #content ul:not(.search)>li { 9 | margin-top: 0.8rem 10 | } 11 | 12 | #content section>p { 13 | line-height: 1.75rem; 14 | margin-top: 0.8rem 15 | } 16 | 17 | .contents, 18 | .toctree-wrapper { 19 | font-size: 1rem; 20 | line-height: 1.25rem 21 | } 22 | 23 | .contents ul li a.reference, 24 | .toctree-wrapper ul li a.reference { 25 | color: hsl(var(--foreground)) !important; 26 | display: inline-block; 27 | font-weight: 600 !important; 28 | text-decoration-line: none !important; 29 | transition-duration: .15s; 30 | transition-property: color, background-color, border-color, text-decoration-color, fill, stroke; 31 | transition-timing-function: cubic-bezier(.4, 0, .2, 1) 32 | } -------------------------------------------------------------------------------- /doc/api_reference.txt: -------------------------------------------------------------------------------- 1 | ============= 2 | API reference 3 | ============= 4 | 5 | .. toctree:: 6 | :maxdepth: 3 7 | 8 | reference/populations 9 | reference/connectors 10 | reference/projections 11 | reference/neuronmodels 12 | reference/plasticitymodels 13 | reference/electrodes 14 | reference/simulationcontrol 15 | reference/random 16 | reference/parameters 17 | reference/space 18 | reference/utility 19 | -------------------------------------------------------------------------------- /doc/backends.txt: -------------------------------------------------------------------------------- 1 | ======== 2 | Backends 3 | ======== 4 | 5 | The PyNN API provides a uniform interface to different simulators, but 6 | nevertheless each simulator has features that are not available in other 7 | simulators, and we aim to make these features accessible, as much as possible, 8 | from PyNN. 9 | 10 | For each simulator backend, this section presents the configuration options 11 | specific to that backend and explains how to use "native" neuron and synapse 12 | models within the PyNN framework. 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | 17 | backends/NEURON 18 | backends/NEST 19 | backends/Brian 20 | backends/NeuroML 21 | backends/NineML 22 | backends/neuromorphic 23 | -------------------------------------------------------------------------------- /doc/backends/Brian.txt: -------------------------------------------------------------------------------- 1 | ===== 2 | Brian 3 | ===== 4 | 5 | -------------------------------------------------------------------------------- /doc/backends/NeuroML.txt: -------------------------------------------------------------------------------- 1 | ======= 2 | NeuroML 3 | ======= 4 | 5 | A subset of models specified in PyNN can be exported into NeuroML 2 format. 6 | 7 | See https://github.com/NeuroML/NeuroML2/issues/73 for latest status. 8 | -------------------------------------------------------------------------------- /doc/backends/NineML.txt: -------------------------------------------------------------------------------- 1 | ====== 2 | NineML 3 | ====== 4 | 5 | The NineML backend is described in :doc:`../nineml` 6 | -------------------------------------------------------------------------------- /doc/backends/neuromorphic.txt: -------------------------------------------------------------------------------- 1 | ===================== 2 | Neuromorphic hardware 3 | ===================== 4 | -------------------------------------------------------------------------------- /doc/building_networks.txt: -------------------------------------------------------------------------------- 1 | ================= 2 | Building networks 3 | ================= 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | neurons 9 | connections 10 | space 11 | mc_api 12 | -------------------------------------------------------------------------------- /doc/contributors.txt: -------------------------------------------------------------------------------- 1 | ================================= 2 | Contributors, licence and funding 3 | ================================= 4 | 5 | .. include:: ../AUTHORS 6 | 7 | 8 | Licence 9 | ======= 10 | 11 | PyNN is freely available under the CeCILL v2 license, which is equivalent to, and 12 | compatible with, the GNU GPL license, but conforms to French law (and is also 13 | perfectly suited to international projects) - see 14 | ``_ for more information. The choice 15 | of GPL-equivalence was made to match the licenses of other widely-used simulation 16 | software in computational neuroscience, such as NEURON (GPL), NEST (GPL) and Brian (CeCILL). 17 | 18 | If you are interested in using PyNN, but the choice of licence is a problem for you, please contact us to discuss dual-licensing. 19 | 20 | 21 | .. centered:: LICENSE AGREEMENT 22 | 23 | .. include:: ../LICENSE 24 | 25 | 26 | Funding 27 | ======= 28 | 29 | Development of PyNN has been partially funded by the European Union Sixth Framework Program (FP6) under 30 | grant agreement FETPI-015879 (FACETS), by the European Union Seventh Framework Program (FP7/2007­-2013) 31 | under grant agreements no. 269921 (BrainScaleS) and no. 604102 (HBP), 32 | and by the European Union’s Horizon 2020 Framework Programme for 33 | Research and Innovation under the Specific Grant Agreements No. 720270 (Human Brain Project SGA1) 34 | , No. 785907 (Human Brain Project SGA2) and No. 945539 (Human Brain Project SGA3). 35 | -------------------------------------------------------------------------------- /doc/developers/bug_reports.txt: -------------------------------------------------------------------------------- 1 | ================================ 2 | Bug reports and feature requests 3 | ================================ 4 | 5 | If you find a bug or would like to add a new feature to PyNN, please go to 6 | https://github.com/NeuralEnsemble/PyNN/issues/. First check that there is not an 7 | existing ticket for your bug or request, then click on "New issue" to create a 8 | new ticket (you will need a GitHub account, but creating one is simple and painless). 9 | 10 | -------------------------------------------------------------------------------- /doc/developers_guide.txt: -------------------------------------------------------------------------------- 1 | 2 | ================= 3 | Developers' guide 4 | ================= 5 | 6 | This guide contains information about contributing to PyNN development, and aims 7 | to explain the overall architecture and some of the internal details of the 8 | PyNN codebase. 9 | 10 | PyNN is open-source software, with a community-based development model: 11 | contributions from users are welcomed, and the direction that PyNN development 12 | should take in the future is determined by the needs of its users. 13 | 14 | There are several ways to contribute to PyNN: 15 | 16 | * reporting bugs, errors and other mistakes in the code or documentation; 17 | * making suggestions for improvements; 18 | * fixing bugs and other mistakes; 19 | * adding or maintaining a simulator backend; 20 | * major refactoring to improve performance, reduce code complexity, or both. 21 | * becoming a maintainer 22 | 23 | The following sections contain guidelines for each of these. 24 | 25 | .. toctree:: 26 | :maxdepth: 3 27 | 28 | developers/bug_reports 29 | developers/contributing 30 | developers/governance 31 | 32 | 33 | .. developers/adding_backend 34 | 35 | .. _`NeuralEnsemble Google Group`: http://groups.google.com/group/neuralensemble 36 | -------------------------------------------------------------------------------- /doc/download.txt: -------------------------------------------------------------------------------- 1 | ========= 2 | Downloads 3 | ========= 4 | 5 | Source distributions 6 | -------------------- 7 | 8 | The `latest stable version of PyNN`_ (0.11.0) may be downloaded from the 9 | `Python Package Index`_. This is recommended for 10 | anyone using PyNN for the first time. 11 | 12 | If you need support for a previous version of the API, the packages can be downloaded from 13 | the links below. 14 | 15 | Older versions: 16 | 17 | * `0.10.1 `_ 18 | * `0.9.6 `_ 19 | * `0.8.0 `_ 20 | * `0.7.5 `_ 21 | * `0.6.0 `_ 22 | * `0.5.0 `_ 23 | 24 | 25 | Latest source code from GitHub 26 | ------------------------------ 27 | 28 | See :doc:`developers/contributing`. 29 | 30 | 31 | .. _`latest stable version of PyNN`: https://pypi.python.org/pypi/PyNN/ 32 | .. _`Python Package Index`: https://pypi.python.org/pypi/PyNN/ 33 | -------------------------------------------------------------------------------- /doc/examples.txt: -------------------------------------------------------------------------------- 1 | ======== 2 | Examples 3 | ======== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | examples/Izhikevich 9 | examples/current_injection 10 | examples/cell_type_demonstration 11 | examples/random_numbers 12 | examples/random_distributions 13 | examples/simple_STDP 14 | examples/small_network 15 | examples/synaptic_input 16 | examples/tsodyksmarkram 17 | examples/varying_poisson 18 | examples/stochastic_synapses 19 | examples/stochastic_deterministic_comparison 20 | examples/VAbenchmarks 21 | -------------------------------------------------------------------------------- /doc/examples/Izhikevich.txt: -------------------------------------------------------------------------------- 1 | A selection of Izhikevich neurons 2 | ================================= 3 | 4 | .. image:: ../images/examples/Izhikevich_nest_np1_20170505-150315.png 5 | 6 | .. literalinclude:: ../../examples/Izhikevich.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/VAbenchmarks.txt: -------------------------------------------------------------------------------- 1 | Balanced network of excitatory and inhibitory neurons 2 | ===================================================== 3 | 4 | .. image:: ../images/examples/VAbenchmarks_CUBA_20170505-150538.png 5 | 6 | .. literalinclude:: ../../examples/VAbenchmarks.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/cell_type_demonstration.txt: -------------------------------------------------------------------------------- 1 | A demonstration of the responses of different standard neuron models to current injection 2 | ========================================================================================= 3 | 4 | .. image:: ../images/examples/cell_type_demonstration_nest_20170505-150320.png 5 | 6 | .. literalinclude:: ../../examples/cell_type_demonstration.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/current_injection.txt: -------------------------------------------------------------------------------- 1 | Injecting time-varying current into a cell 2 | ========================================== 3 | 4 | .. image:: ../images/examples/current_injection_neuron_20170505-150317.png 5 | 6 | .. literalinclude:: ../../examples/current_injection.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/random_distributions.txt: -------------------------------------------------------------------------------- 1 | Illustration of the different standard random distributions and different random number generators 2 | ================================================================================================== 3 | 4 | .. image:: ../images/examples/random_distributions.png 5 | 6 | .. literalinclude:: ../../examples/random_distributions.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/random_numbers.txt: -------------------------------------------------------------------------------- 1 | An example to illustrate random number handling in PyNN 2 | ======================================================= 3 | 4 | .. image:: ../images/examples/random_numbers_neuron_20170505-150323.png 5 | 6 | .. literalinclude:: ../../examples/random_numbers.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/simple_STDP.txt: -------------------------------------------------------------------------------- 1 | A very simple example of using STDP 2 | =================================== 3 | 4 | .. image:: ../images/examples/simple_stdp_neuron_20170505-150331.png 5 | 6 | .. literalinclude:: ../../examples/simple_STDP.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/small_network.txt: -------------------------------------------------------------------------------- 1 | Small network created with the Population and Projection classes 2 | ================================================================ 3 | 4 | .. image:: ../images/examples/small_network_nest_np1_20170505-150334.png 5 | 6 | .. literalinclude:: ../../examples/small_network.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/stochastic_deterministic_comparison.txt: -------------------------------------------------------------------------------- 1 | Example of facilitating and depressing synapses in deterministic and stochastic versions 2 | ======================================================================================== 3 | 4 | .. image:: ../images/examples/stochastic_comparison_neuron_20170505-150418.png 5 | 6 | .. literalinclude:: ../../examples/stochastic_deterministic_comparison.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/stochastic_synapses.txt: -------------------------------------------------------------------------------- 1 | Example of simple stochastic synapses 2 | ===================================== 3 | 4 | .. image:: ../images/examples/stochastic_synapses__nest_20170505-150345.png 5 | 6 | .. literalinclude:: ../../examples/stochastic_synapses.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/synaptic_input.txt: -------------------------------------------------------------------------------- 1 | A demonstration of the responses of different standard neuron models to synaptic input 2 | ====================================================================================== 3 | 4 | .. image:: ../images/examples/synaptic_input_neuron_20170505-150337.png 5 | 6 | .. literalinclude:: ../../examples/synaptic_input.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/tsodyksmarkram.txt: -------------------------------------------------------------------------------- 1 | Example of depressing and facilitating synapses 2 | =============================================== 3 | 4 | .. image:: ../images/examples/tsodyksmarkram_nest_20170505-150340.png 5 | 6 | .. literalinclude:: ../../examples/tsodyksmarkram.py 7 | 8 | -------------------------------------------------------------------------------- /doc/examples/varying_poisson.txt: -------------------------------------------------------------------------------- 1 | A demonstration of the use of callbacks to vary the rate of a SpikeSourcePoisson 2 | ================================================================================ 3 | 4 | .. image:: ../images/examples/varying_poisson_neuron_20170505-150343.png 5 | 6 | .. literalinclude:: ../../examples/varying_poisson.py 7 | 8 | -------------------------------------------------------------------------------- /doc/images/ac_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/ac_source.png -------------------------------------------------------------------------------- /doc/images/continuous_time_spiking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/continuous_time_spiking.png -------------------------------------------------------------------------------- /doc/images/dc_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/dc_source.png -------------------------------------------------------------------------------- /doc/images/examples/Izhikevich_nest_np1_20170505-150315.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/Izhikevich_nest_np1_20170505-150315.png -------------------------------------------------------------------------------- /doc/images/examples/VAbenchmarks_CUBA_20170505-150538.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/VAbenchmarks_CUBA_20170505-150538.png -------------------------------------------------------------------------------- /doc/images/examples/cell_type_demonstration_nest_20170505-150320.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/cell_type_demonstration_nest_20170505-150320.png -------------------------------------------------------------------------------- /doc/images/examples/current_injection_neuron_20170505-150317.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/current_injection_neuron_20170505-150317.png -------------------------------------------------------------------------------- /doc/images/examples/random_distributions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/random_distributions.png -------------------------------------------------------------------------------- /doc/images/examples/random_numbers_neuron_20170505-150323.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/random_numbers_neuron_20170505-150323.png -------------------------------------------------------------------------------- /doc/images/examples/simple_stdp_neuron_20170505-150331.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/simple_stdp_neuron_20170505-150331.png -------------------------------------------------------------------------------- /doc/images/examples/small_network_nest_np1_20170505-150334.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/small_network_nest_np1_20170505-150334.png -------------------------------------------------------------------------------- /doc/images/examples/stochastic_comparison_neuron_20170505-150418.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/stochastic_comparison_neuron_20170505-150418.png -------------------------------------------------------------------------------- /doc/images/examples/stochastic_synapses__nest_20170505-150345.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/stochastic_synapses__nest_20170505-150345.png -------------------------------------------------------------------------------- /doc/images/examples/synaptic_input_neuron_20170505-150337.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/synaptic_input_neuron_20170505-150337.png -------------------------------------------------------------------------------- /doc/images/examples/tsodyksmarkram_nest_20170505-150340.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/tsodyksmarkram_nest_20170505-150340.png -------------------------------------------------------------------------------- /doc/images/examples/varying_poisson_neuron_20170505-150343.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/examples/varying_poisson_neuron_20170505-150343.png -------------------------------------------------------------------------------- /doc/images/neo_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/neo_example.png -------------------------------------------------------------------------------- /doc/images/noise_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/noise_source.png -------------------------------------------------------------------------------- /doc/images/ongridoffgrid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/ongridoffgrid.png -------------------------------------------------------------------------------- /doc/images/release_0.8b1_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/release_0.8b1_example.png -------------------------------------------------------------------------------- /doc/images/reset_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/reset_example.png -------------------------------------------------------------------------------- /doc/images/step_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/step_source.png -------------------------------------------------------------------------------- /doc/images/tmp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/images/tmp.png -------------------------------------------------------------------------------- /doc/import_export.txt: -------------------------------------------------------------------------------- 1 | ============================================= 2 | Importing from and exporting to other formats 3 | ============================================= 4 | 5 | Other formats for representing spiking network models are also available. 6 | 7 | PyNN currently supports NeuroML_, NineML_ and SONATA_. 8 | 9 | NeuroML 10 | ------- 11 | 12 | See section on :doc:`backends/NeuroML`. 13 | 14 | NineML 15 | ------ 16 | 17 | See section on :doc:`nineml`. 18 | 19 | .. _sec-sonata: 20 | 21 | SONATA 22 | ------ 23 | 24 | SONATA_ is a data format for representing/storing data-driven spiking neuronal network models, 25 | experimental protocols (injecting spikes, currents) and simulation outputs. 26 | 27 | In the network representation, all connections are represented explicity, as in PyNN's 28 | :class:`FromFileConnector` and :class:`FromListConnector`. 29 | 30 | A PyNN model/simulation script can be exported in SONATA format using: 31 | 32 | .. code-block:: python 33 | 34 | from pyNN.network import Network 35 | from pyNN.serialization import export_to_sonata 36 | 37 | sim.setup() 38 | ... 39 | # create populations, projections, etc. 40 | ... 41 | 42 | # add populations and projections to a Network 43 | net = Network(pop1, pop2, ...., prj1, prj2, ...) 44 | 45 | export_to_sonata(net, "sonata_output_dir") 46 | 47 | 48 | A SONATA model/simulation can be read and executed through PyNN provided the cell types 49 | used in the model are compatible with PyNN, i.e. they must be point neurons. 50 | (SONATA also supports biophysically/morphologically detailed neuron models). 51 | 52 | .. code-block:: python 53 | 54 | from pyNN.serialization import import_from_sonata, load_sonata_simulation_plan 55 | import pyNN.neuron as sim 56 | 57 | simulation_plan = load_sonata_simulation_plan("simulation_config.json") 58 | simulation_plan.setup(sim) 59 | net = import_from_sonata("circuit_config.json", sim) 60 | simulation_plan.execute(net) 61 | 62 | Simulation results from such a simulation are stored in the SONATA outputs format. 63 | Support for this format will soon be added to Neo_, but for the time being you 64 | can read the results as follows: 65 | 66 | .. code-block:: python 67 | 68 | from pyNN.serialization.sonata import SonataIO 69 | 70 | data = SonataIO("sonata_output_dir").read() 71 | 72 | 73 | 74 | .. _NeuroML: http://neuroml.org 75 | .. _NineML: http://nineml.net 76 | .. _SONATA: https://github.com/AllenInstitute/sonata 77 | .. _Neo: http://neuralensemble.org/neo 78 | -------------------------------------------------------------------------------- /doc/index.txt: -------------------------------------------------------------------------------- 1 | =================== 2 | PyNN: documentation 3 | =================== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | introduction 9 | installation 10 | building_networks 11 | injecting_current 12 | recording 13 | data_handling 14 | simulation_control 15 | parameters 16 | random_numbers 17 | mc_api 18 | backends 19 | parallel 20 | units 21 | import_export 22 | examples 23 | publications 24 | contributors 25 | release_notes 26 | 27 | .. note:: 28 | 29 | This is the documentation for version |release|. Earlier versions: 30 | 31 | - `version 0.11`_ 32 | - `version 0.10`_ 33 | - `version 0.9`_ 34 | - `version 0.8`_ 35 | - `version 0.7 and earlier`_ 36 | 37 | .. add 'logging' after 'units' once configure_logging() implemented. 38 | 39 | .. add 'descriptions' after logging 40 | 41 | Developers' Guide 42 | ================= 43 | 44 | .. toctree:: 45 | :maxdepth: 2 46 | 47 | developers_guide 48 | 49 | 50 | API reference 51 | ============= 52 | 53 | .. toctree:: 54 | :maxdepth: 2 55 | 56 | api_reference 57 | 58 | 59 | Old documents 60 | ============= 61 | 62 | .. toctree:: 63 | :maxdepth: 1 64 | 65 | standardmodels 66 | 67 | 68 | Indices and tables 69 | ================== 70 | 71 | * :ref:`genindex` 72 | * :ref:`modindex` 73 | * :ref:`search` 74 | 75 | .. _`version 0.11`: http://neuralensemble.org/docs/PyNN/0.11/ 76 | .. _`version 0.10`: http://neuralensemble.org/docs/PyNN/0.10/ 77 | .. _`version 0.9`: http://neuralensemble.org/docs/PyNN/0.9/ 78 | .. _`version 0.8`: http://neuralensemble.org/docs/PyNN/0.8/ 79 | .. _`version 0.7 and earlier`: http://neuralensemble.org/docs/PyNN/0.7/ -------------------------------------------------------------------------------- /doc/injecting_current.txt: -------------------------------------------------------------------------------- 1 | ================= 2 | Injecting current 3 | ================= 4 | 5 | .. testsetup:: 6 | 7 | from pyNN.mock import * 8 | setup() 9 | population = Population(30, IF_cond_exp()) 10 | 11 | Current waveforms are represented in PyNN by :class:`CurrentSource` classes. 12 | There are four built-in source types, and it is straightforward to 13 | implement your own. 14 | 15 | There are two ways to inject a current waveform into the cells of a 16 | :class:`Population`, :class:`PopulationView` or :class:`Assembly`: either the 17 | :meth:`inject_into()` method of the :class:`CurrentSource` or the 18 | :meth:`inject()` method of the :class:`Population`, :class:`Assembly`, etc. 19 | 20 | .. doctest:: 21 | 22 | >>> pulse = DCSource(amplitude=0.5, start=20.0, stop=80.0) 23 | >>> pulse.inject_into(population[3:7]) 24 | 25 | .. .. plot:: pyplots/dc_source.py 26 | 27 | .. image:: images/dc_source.png 28 | 29 | 30 | .. doctest:: 31 | 32 | >>> sine = ACSource(start=50.0, stop=450.0, amplitude=1.0, offset=1.0, 33 | ... frequency=10.0, phase=180.0) 34 | >>> population.inject(sine) 35 | 36 | .. .. plot:: pyplots/ac_source.py 37 | 38 | .. image:: images/ac_source.png 39 | 40 | 41 | .. doctest:: 42 | 43 | >>> steps = StepCurrentSource(times=[50.0, 110.0, 150.0, 210.0], 44 | ... amplitudes=[0.4, 0.6, -0.2, 0.2]) 45 | >>> steps.inject_into(population[(6,11,27)]) 46 | 47 | .. .. plot:: pyplots/step_source.py 48 | 49 | .. image:: images/step_source.png 50 | 51 | 52 | .. doctest:: 53 | 54 | >>> noise = NoisyCurrentSource(mean=1.5, stdev=1.0, start=50.0, stop=450.0, dt=1.0) 55 | >>> population.inject(noise) 56 | 57 | .. .. plot:: pyplots/noise_source.py 58 | 59 | .. image:: images/noise_source.png 60 | 61 | 62 | For a full description of all the built-in current source classes, see the 63 | :doc:`API reference `. 64 | 65 | .. todo:: write "implementing-your-own-current-source" (e.g., implement "chirp") 66 | -------------------------------------------------------------------------------- /doc/logging.txt: -------------------------------------------------------------------------------- 1 | ======= 2 | Logging 3 | ======= 4 | 5 | When developing a complex model with a long simulation time, it is unlikely that 6 | everything will work correctly the first time, and a fair amount of debugging 7 | will be necessary. Such debugging can often be helped by having a log file 8 | containing information about the progress of the simulation. 9 | 10 | Equally, it is useful to print information to the screen about the progress of 11 | the simulation, but here we generally do not want such a fine grain of detail 12 | as in the log file. 13 | 14 | For both debugging and status updates it is possible to use the ``print`` 15 | statement. In general, however, it is better to use Python's :mod:`logging` 16 | module, as this allows you to both print to the screen and write to a file with 17 | the same command, and to independently control the level of detail written to 18 | each destination. More concretely, if you are using ``print`` statements for 19 | debugging, you will have to find and remove all these statements once debugging 20 | is complete, whereas if using :mod:`logging` you only have to change one 21 | configuration option at one point in your code. 22 | 23 | Configuration 24 | ============= 25 | 26 | The :mod:`logging` module allows almost unlimited flexibility in configuring 27 | logging. For the common use case in simulations - sending status updates to the 28 | screen and logging at a higher-level of detail to file, PyNN provides a 29 | shortcut function:: 30 | 31 | >>> from pyNN.utility import configure_logging 32 | >>> configure_logging(console='HEADER', logging='INFO', filename="log.txt", with_color=True) 33 | 34 | This will print a high-level overview of the progress of your simulation to the 35 | console, in colour, and a more detailed report to :file:`log.txt`. To get more detail, 36 | use ``'DEBUG'`` instead of ``'INFO'``, for less detail, use ``'WARNING'``. 37 | 38 | 39 | Adding logging to your own code 40 | =============================== 41 | 42 | Using logging in your own code is very simple. At the top of each file, put 43 | something like:: 44 | 45 | import logging 46 | logger = logging.getLogger("MySimulation") 47 | 48 | then add statements like:: 49 | 50 | logger.header("Creating the network....") 51 | logger.info("Creating population A") 52 | logger.debug("Trying to figure out why this isn't working. p = %s" % p) 53 | logger.warning("This could be a problem.") 54 | 55 | at appropriate points in your code. 56 | 57 | .. todo:: implement this, and give an example of what the log file looks like. 58 | -------------------------------------------------------------------------------- /doc/mc_aims.txt: -------------------------------------------------------------------------------- 1 | ==================================================== 2 | Multicompartmental modelling with PyNN: design goals 3 | ==================================================== 4 | 5 | The three principal aims of the PyNN project are: 6 | 7 | (i) to make it easy to run models on different simulators (and on neuromorphic hardware), in order to facilitate cross-checking, reproducibility and re-use; 8 | (ii) to provide a simulator-independent platform on which to build an ecosystem of tools for computational neuroscience (visualization libraries, workflow engines, etc.) rather than having tools that only work with one simulator; 9 | (iii) to support modelling at a high-level of abstraction (populations of neurons, layers, columns and the connections between them) while still allowing access to the details of individual neurons and synapses when required. 10 | 11 | 12 | The scope of PyNN was originally limited to networks of point neurons (integrate-and-fire and related models). 13 | The primary reason for this was that at the time only the NEURON_ simulator had both support for multicompartment models and a Python interface. 14 | 15 | This situation has now changed, with the release of `Brian 2`_, the addition of Python support to MOOSE_, development of the Arbor_ simulation library, and support for multicompartment models in the future versions of the BrainScaleS and SpiNNaker neuromorphic chips. 16 | 17 | 18 | We are therefore adapting the PyNN API to support both point neuron models and morphologically-and-biophysically-detailed neuron models (and mixed networks of both model types). The principal design goals are: 19 | 20 | (i) maintain the same main level of abstraction: populations of neurons and the sets of connections between populations (projections); 21 | (ii) backwards compatibility (point neuron models created with PyNN 1.0 (not yet released) or later should work with no, or minimal, changes); 22 | (iii) integrate with other open-source simulation tools and standards (e.g. NeuroML) wherever possible, rather than reinventing the wheel; 23 | (iv) support neuromorphic hardware systems. 24 | 25 | 26 | .. _NEURON: https://www.neuron.yale.edu/neuron/ 27 | .. _Arbor: https://github.com/eth-cscs/arbor 28 | .. _MOOSE: https://moose.ncbs.res.in 29 | .. _`Brian 2`: http://briansimulator.org 30 | -------------------------------------------------------------------------------- /doc/mc_examples.txt: -------------------------------------------------------------------------------- 1 | ================================================ 2 | Multicompartmental modelling with PyNN: examples 3 | ================================================ 4 | 5 | 6 | Injecting time-varying current into two-compartment cells 7 | --------------------------------------------------------- 8 | 9 | .. literalinclude:: ../examples/mc/current_injection_mc.py 10 | 11 | 12 | Injecting time-varying current into multi-compartment cells 13 | ----------------------------------------------------------- 14 | 15 | .. literalinclude:: ../examples/mc/current_injection_mc_swc.py 16 | 17 | 18 | A network of multi-compartment cells 19 | ------------------------------------ 20 | 21 | .. literalinclude:: ../examples/mc/mc_network.py 22 | -------------------------------------------------------------------------------- /doc/pyNN_icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/pyNN_icon.ico -------------------------------------------------------------------------------- /doc/pyNN_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/doc/pyNN_logo.png -------------------------------------------------------------------------------- /doc/pyplots/ac_source.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from plot_helper import plot_current_source 6 | import pyNN.neuron as sim 7 | 8 | sim.setup() 9 | 10 | population = sim.Population(10, sim.IF_cond_exp(tau_m=10.0)) 11 | population[0:1].record_v() 12 | 13 | sine = sim.ACSource(start=50.0, stop=450.0, amplitude=1.0, offset=1.0, 14 | frequency=10.0, phase=180.0) 15 | population.inject(sine) 16 | sine._record() 17 | 18 | sim.run(500.0) 19 | 20 | t, i_inj = sine._get_data() 21 | v = population.get_data().segments[0].analogsignals[0] 22 | 23 | plot_current_source(t, i_inj, v, 24 | v_range=(-66, -49), 25 | v_ticks=(-65, -60, -55, -50), 26 | i_range=(-0.1, 2.1), 27 | i_ticks=(0.0, 0.5, 1.0, 1.5), 28 | t_range=(0, 500)) 29 | -------------------------------------------------------------------------------- /doc/pyplots/continuous_time_spiking.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pyNN.nest import * 3 | import matplotlib.pyplot as plt 4 | 5 | 6 | def test_sim(on_or_off_grid, sim_time): 7 | setup(timestep=1.0, min_delay=1.0, max_delay=1.0, spike_precision=on_or_off_grid) 8 | src = Population(1, SpikeSourceArray(spike_times=[0.5])) 9 | cm = 250.0 10 | tau_m = 10.0 11 | tau_syn_E = 1.0 12 | weight = cm / tau_m * np.power(tau_syn_E / tau_m, -tau_m / (tau_m - tau_syn_E)) * 20.5 13 | nrn = Population(1, IF_curr_exp(cm=cm, tau_m=tau_m, tau_syn_E=tau_syn_E, 14 | tau_refrac=2.0, v_thresh=20.0, v_rest=0.0, 15 | v_reset=0.0, i_offset=0.0)) 16 | nrn.initialize(v=0.0) 17 | prj = Projection(src, nrn, OneToOneConnector(), StaticSynapse(weight=weight)) 18 | nrn.record('v') 19 | run(sim_time) 20 | return nrn.get_data().segments[0].analogsignals[0] 21 | 22 | 23 | sim_time = 10.0 24 | off = test_sim('off_grid', sim_time) 25 | on = test_sim('on_grid', sim_time) 26 | 27 | 28 | def plot_data(pos, on, off, ylim, with_legend=False): 29 | ax = plt.subplot(1, 2, pos) 30 | ax.plot(off.times, off, color='0.7', linewidth=7, label='off-grid') 31 | ax.plot(on.times, on, 'k', label='on-grid') 32 | ax.set_ylim(*ylim) 33 | ax.set_xlim(0, 9) 34 | ax.set_xlabel('time [ms]') 35 | ax.set_ylabel('V [mV]') 36 | if with_legend: 37 | plt.legend() 38 | 39 | 40 | plot_data(1, on, off, (-0.5, 21), with_legend=True) 41 | plot_data(2, on, off, (-0.05, 2.1)) 42 | plt.show() 43 | -------------------------------------------------------------------------------- /doc/pyplots/dc_source.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from plot_helper import plot_current_source 6 | import pyNN.neuron as sim 7 | 8 | sim.setup() 9 | 10 | population = sim.Population(10, sim.IF_cond_exp(tau_m=10.0)) 11 | population[3:4].record_v() 12 | 13 | pulse = sim.DCSource(amplitude=0.5, start=20.0, stop=80.0) 14 | pulse.inject_into(population[3:7]) 15 | pulse._record() 16 | 17 | sim.run(100.0) 18 | 19 | t, i_inj = pulse._get_data() 20 | v = population.get_data().segments[0].analogsignals[0] 21 | 22 | plot_current_source(t, i_inj, v, 23 | v_range=(-65.5, -59.5), 24 | v_ticks=(-65, -64, -63, -62, -61, -60), 25 | i_range=(-0.1, 0.55), 26 | i_ticks=(0.0, 0.2, 0.4)) 27 | -------------------------------------------------------------------------------- /doc/pyplots/neo_example.py: -------------------------------------------------------------------------------- 1 | import pyNN.neuron as sim # can of course replace `neuron` with `nest`, `brian`, etc. 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | sim.setup(timestep=0.01) 6 | p_in = sim.Population(10, sim.SpikeSourcePoisson(rate=10.0), label="input") 7 | p_out = sim.Population(10, sim.EIF_cond_exp_isfa_ista(), label="AdExp neurons") 8 | 9 | syn = sim.StaticSynapse(weight=0.05) 10 | random = sim.FixedProbabilityConnector(p_connect=0.5) 11 | connections = sim.Projection(p_in, p_out, random, syn, receptor_type='excitatory') 12 | 13 | p_in.record('spikes') 14 | p_out.record('spikes') # record spikes from all neurons 15 | p_out[0:2].record(['v', 'w', 'gsyn_exc']) # record other variables from first two neurons 16 | 17 | sim.run(500.0) 18 | 19 | spikes_in = p_in.get_data() 20 | data_out = p_out.get_data() 21 | 22 | fig_settings = { 23 | 'lines.linewidth': 0.5, 24 | 'axes.linewidth': 0.5, 25 | 'axes.labelsize': 'small', 26 | 'legend.fontsize': 'small', 27 | 'font.size': 8 28 | } 29 | plt.rcParams.update(fig_settings) 30 | plt.figure(1, figsize=(6, 8)) 31 | 32 | 33 | def plot_spiketrains(segment): 34 | for spiketrain in segment.spiketrains: 35 | y = np.ones_like(spiketrain) * spiketrain.annotations['channel_id'] 36 | plt.plot(spiketrain, y, '.') 37 | plt.ylabel(segment.name) 38 | plt.setp(plt.gca().get_xticklabels(), visible=False) 39 | 40 | 41 | def plot_signal(signal, index, colour='b'): 42 | label = "Neuron %d" % signal.annotations['channel_ids'][index] 43 | plt.plot(signal.times, signal[:, index], colour, label=label) 44 | plt.ylabel("%s (%s)" % (signal.name, signal.units._dimensionality.string)) 45 | plt.setp(plt.gca().get_xticklabels(), visible=False) 46 | plt.legend() 47 | 48 | 49 | n_panels = sum(a.shape[1] for a in data_out.segments[0].analogsignals) + 2 50 | plt.subplot(n_panels, 1, 1) 51 | plot_spiketrains(spikes_in.segments[0]) 52 | plt.subplot(n_panels, 1, 2) 53 | plot_spiketrains(data_out.segments[0]) 54 | panel = 3 55 | for array in data_out.segments[0].analogsignals: 56 | for i in range(array.shape[1]): 57 | plt.subplot(n_panels, 1, panel) 58 | plot_signal(array, i, colour='bg'[panel % 2]) 59 | panel += 1 60 | plt.xlabel("time (%s)" % array.times.units._dimensionality.string) 61 | plt.setp(plt.gca().get_xticklabels(), visible=True) 62 | 63 | plt.show() 64 | -------------------------------------------------------------------------------- /doc/pyplots/noise_source.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from plot_helper import plot_current_source 6 | import pyNN.neuron as sim 7 | 8 | sim.setup() 9 | 10 | population = sim.Population(30, sim.IF_cond_exp(tau_m=10.0)) 11 | population[0:1].record_v() 12 | 13 | noise = sim.NoisyCurrentSource(mean=1.5, stdev=1.0, start=50.0, stop=450.0, 14 | dt=1.0) 15 | population.inject(noise) 16 | noise._record() 17 | 18 | sim.run(500.0) 19 | 20 | t, i_inj = noise._get_data() 21 | v = population.get_data().segments[0].analogsignals[0] 22 | 23 | plot_current_source(t, i_inj, v, 24 | v_range=(-66, -48), 25 | v_ticks=(-65, -60, -55, -50), 26 | i_range=(-3, 5), 27 | i_ticks=range(-2, 6, 2), 28 | t_range=(0, 500)) 29 | -------------------------------------------------------------------------------- /doc/pyplots/plot_current_source.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper function for drawing current-source plots 3 | """ 4 | 5 | 6 | def plot_current_source(t, i_inj, v): 7 | """ 8 | Plot voltage and current traces 9 | """ 10 | fig = plt.figure(figsize=(8, 3)) 11 | fig.dpi = 120 12 | 13 | ax = fig.add_axes((0.1, 0.4, 0.85, 0.5), frameon=False) 14 | ax.plot(t, v, 'b') 15 | ax.set_ylim(-65.5, -59.5) 16 | ax.set_ylabel('V (mV)') 17 | ax.xaxis.set_visible(False) 18 | ax.yaxis.set_ticks_position('left') 19 | ax.yaxis.set_ticks((-65, -64, -63, -62, -61, -60)) 20 | 21 | # add the left axis line back in 22 | xmin, xmax = ax.get_xaxis().get_view_interval() 23 | ymin, ymax = ax.get_yaxis().get_view_interval() 24 | ax.add_artist(plt.Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=3)) 25 | 26 | ax = fig.add_axes((0.1, 0.14, 0.85, 0.25), frameon=False) 27 | ax.plot(t, i_inj, 'g') 28 | ax.set_ylim(-0.1, 0.55) 29 | ax.set_ylabel('I (nA)') 30 | ax.set_xlabel('Time (ms)') 31 | ax.yaxis.tick_left() 32 | ax.xaxis.tick_bottom() 33 | ax.yaxis.set_ticks((0.0, 0.2, 0.4)) 34 | 35 | # add the bottom and left axis lines back in 36 | xmin, xmax = ax.get_xaxis().get_view_interval() 37 | ymin, ymax = ax.get_yaxis().get_view_interval() 38 | ax.add_artist(plt.Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=3)) 39 | ax.add_artist(plt.Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=3)) 40 | 41 | fig.show() 42 | return fig 43 | -------------------------------------------------------------------------------- /doc/pyplots/plot_helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper function for drawing current-source plots 3 | """ 4 | 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | def plot_current_source(t, i_inj, v, i_range=None, v_range=None, 9 | i_ticks=None, v_ticks=None, t_range=None): 10 | """ 11 | Plot voltage and current traces 12 | """ 13 | fig = plt.figure(figsize=(8, 3)) 14 | fig.dpi = 120 15 | 16 | ax = fig.add_axes((0.1, 0.4, 0.85, 0.5), frameon=False) 17 | ax.plot(t, v, 'b') 18 | if v_range: 19 | ax.set_ylim(*v_range) 20 | ax.set_ylabel('V (mV)') 21 | ax.xaxis.set_visible(False) 22 | ax.yaxis.set_ticks_position('left') 23 | if v_ticks: 24 | ax.yaxis.set_ticks(v_ticks) 25 | if t_range: 26 | ax.set_xlim(*t_range) 27 | 28 | # add the left axis line back in 29 | xmin, xmax = ax.get_xaxis().get_view_interval() 30 | ymin, ymax = ax.get_yaxis().get_view_interval() 31 | ax.add_artist(plt.Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=3)) 32 | 33 | ax = fig.add_axes((0.1, 0.14, 0.85, 0.25), frameon=False) 34 | ax.plot(t, i_inj, 'g') 35 | if i_range: 36 | ax.set_ylim(*i_range) 37 | ax.set_ylabel('I (nA)') 38 | ax.set_xlabel('Time (ms)') 39 | ax.yaxis.tick_left() 40 | ax.xaxis.tick_bottom() 41 | if i_ticks: 42 | ax.yaxis.set_ticks(i_ticks) 43 | if t_range: 44 | ax.set_xlim(*t_range) 45 | 46 | # add the bottom and left axis lines back in 47 | xmin, xmax = ax.get_xaxis().get_view_interval() 48 | ymin, ymax = ax.get_yaxis().get_view_interval() 49 | ax.add_artist(plt.Line2D((xmin, xmax), (ymin, ymin), color='black', linewidth=3)) 50 | ax.add_artist(plt.Line2D((xmin, xmin), (ymin, ymax), color='black', linewidth=3)) 51 | 52 | plt.savefig("tmp.png") 53 | return fig 54 | -------------------------------------------------------------------------------- /doc/pyplots/reset_example.py: -------------------------------------------------------------------------------- 1 | import pyNN.neuron as sim # can of course replace `nest` with `neuron`, `brian`, etc. 2 | import matplotlib.pyplot as plt 3 | from quantities import nA 4 | 5 | sim.setup() 6 | 7 | cell = sim.Population(1, sim.HH_cond_exp()) 8 | step_current = sim.DCSource(start=20.0, stop=80.0) 9 | step_current.inject_into(cell) 10 | 11 | cell.record('v') 12 | 13 | for amp in (-0.2, -0.1, 0.0, 0.1, 0.2): 14 | step_current.amplitude = amp 15 | sim.run(100.0) 16 | sim.reset(annotations={"amplitude": amp * nA}) 17 | 18 | data = cell.get_data() 19 | 20 | sim.end() 21 | 22 | for segment in data.segments: 23 | vm = segment.analogsignals[0] 24 | plt.plot(vm.times, vm, 25 | label=str(segment.annotations["amplitude"])) 26 | plt.legend(loc="upper left") 27 | plt.xlabel("Time (%s)" % vm.times.units._dimensionality) 28 | plt.ylabel("Membrane potential (%s)" % vm.units._dimensionality) 29 | 30 | plt.show() 31 | -------------------------------------------------------------------------------- /doc/pyplots/step_source.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from plot_helper import plot_current_source 6 | import pyNN.neuron as sim 7 | 8 | sim.setup() 9 | 10 | population = sim.Population(30, sim.IF_cond_exp(tau_m=10.0)) 11 | population[27:28].record_v() 12 | 13 | steps = sim.StepCurrentSource(times=[50.0, 110.0, 150.0, 210.0], 14 | amplitudes=[0.4, 0.6, -0.2, 0.2]) 15 | steps.inject_into(population[(6, 11, 27)]) 16 | steps._record() 17 | 18 | sim.run(250.0) 19 | 20 | t, i_inj = steps._get_data() 21 | v = population.get_data().segments[0].analogsignals[0] 22 | 23 | plot_current_source(t, i_inj, v, 24 | #v_range=(-66, -49), 25 | v_ticks=(-66, -64, -62, -60), 26 | i_range=(-0.3, 0.7), 27 | i_ticks=(-0.2, 0.0, 0.2, 0.4, 0.6), 28 | t_range=(0, 250)) 29 | -------------------------------------------------------------------------------- /doc/quickstart.txt: -------------------------------------------------------------------------------- 1 | ========== 2 | Quickstart 3 | ========== 4 | 5 | to write... 6 | -------------------------------------------------------------------------------- /doc/reference/connectors.txt: -------------------------------------------------------------------------------- 1 | ========== 2 | Connectors 3 | ========== 4 | 5 | .. module:: pyNN.connectors 6 | 7 | Base class 8 | ========== 9 | 10 | .. autoclass:: Connector 11 | :members: 12 | :undoc-members: 13 | 14 | Built-in connectors 15 | =================== 16 | 17 | .. autoclass:: AllToAllConnector 18 | 19 | .. autoclass:: OneToOneConnector 20 | 21 | .. autoclass:: FixedProbabilityConnector 22 | 23 | .. autoclass:: FromListConnector 24 | 25 | .. autoclass:: FromFileConnector 26 | 27 | .. autoclass:: ArrayConnector 28 | 29 | .. autoclass:: FixedNumberPreConnector 30 | 31 | .. autoclass:: FixedNumberPostConnector 32 | 33 | .. autoclass:: FixedTotalNumberConnector 34 | 35 | .. autoclass:: DistanceDependentProbabilityConnector 36 | 37 | .. autoclass:: IndexBasedProbabilityConnector 38 | 39 | .. autoclass:: DisplacementDependentProbabilityConnector 40 | 41 | .. autoclass:: SmallWorldConnector 42 | 43 | .. autoclass:: CSAConnector 44 | 45 | .. autoclass:: CloneConnector 46 | -------------------------------------------------------------------------------- /doc/reference/electrodes.txt: -------------------------------------------------------------------------------- 1 | =============== 2 | Current sources 3 | =============== 4 | 5 | .. currentmodule:: pyNN.neuron.standardmodels.electrodes 6 | 7 | .. autoclass:: DCSource 8 | :members: 9 | :show-inheritance: 10 | 11 | .. automethod:: inject_into 12 | .. automethod:: get_parameters 13 | .. automethod:: set_parameters 14 | 15 | 16 | .. autoclass:: ACSource 17 | :members: 18 | :show-inheritance: 19 | 20 | .. automethod:: inject_into 21 | .. automethod:: get_parameters 22 | .. automethod:: set_parameters 23 | 24 | 25 | .. autoclass:: StepCurrentSource 26 | :members: 27 | :show-inheritance: 28 | 29 | .. automethod:: inject_into 30 | .. automethod:: get_parameters 31 | .. automethod:: set_parameters 32 | 33 | 34 | .. autoclass:: NoisyCurrentSource 35 | :members: 36 | :show-inheritance: 37 | 38 | .. automethod:: inject_into 39 | .. automethod:: get_parameters 40 | .. automethod:: set_parameters 41 | -------------------------------------------------------------------------------- /doc/reference/projections.txt: -------------------------------------------------------------------------------- 1 | =========== 2 | Projections 3 | =========== 4 | 5 | .. currentmodule:: pyNN.neuron 6 | 7 | .. autoclass:: Projection 8 | :members: 9 | :undoc-members: 10 | :inherited-members: 11 | :show-inheritance: 12 | 13 | .. attribute:: pre 14 | 15 | The pre-synaptic Population, PopulationView or Assembly. 16 | 17 | .. attribute:: post 18 | 19 | The post-synaptic Population, PopulationView or Assembly. 20 | 21 | .. attribute:: source 22 | 23 | A string specifying which attribute of the presynaptic cell signals 24 | action potentials. 25 | 26 | .. attribute:: target 27 | 28 | Name of the postsynaptic mechanism type (e.g. 'excitatory', 'NMDA'). 29 | 30 | .. attribute:: label 31 | 32 | A label for the Projection. 33 | 34 | .. attribute:: rng 35 | 36 | The RNG object that was used by the Connector. 37 | 38 | .. attribute:: synapse_dynamics 39 | 40 | The SynapseDynamics object that was used to specify the synaptic 41 | plasticity mechanism, or `None` if the synapses are static. 42 | 43 | .. automethod:: __len__ 44 | .. automethod:: __getitem__ 45 | .. automethod:: __iter__ 46 | 47 | 48 | .. autofunction:: connect -------------------------------------------------------------------------------- /doc/reference/random.txt: -------------------------------------------------------------------------------- 1 | ============== 2 | Random numbers 3 | ============== 4 | 5 | .. testsetup:: * 6 | 7 | from pyNN.random import NumpyRNG, RandomDistribution 8 | 9 | 10 | .. module:: pyNN.random 11 | 12 | .. autoclass:: RandomDistribution 13 | :members: 14 | :undoc-members: 15 | 16 | 17 | .. autoclass:: NumpyRNG 18 | :members: 19 | :undoc-members: 20 | :inherited-members: 21 | :show-inheritance: 22 | 23 | 24 | .. autoclass:: GSLRNG 25 | :members: 26 | :undoc-members: 27 | :inherited-members: 28 | :show-inheritance: 29 | 30 | 31 | .. autoclass:: NativeRNG 32 | :members: 33 | :undoc-members: 34 | :inherited-members: 35 | :show-inheritance: 36 | 37 | 38 | Adapting a different random number generator to work with PyNN 39 | -------------------------------------------------------------- 40 | 41 | .. todo:: write this -------------------------------------------------------------------------------- /doc/reference/simulationcontrol.txt: -------------------------------------------------------------------------------- 1 | ================== 2 | Simulation control 3 | ================== 4 | 5 | .. autofunction:: pyNN.neuron.setup 6 | 7 | .. autofunction:: pyNN.neuron.run 8 | 9 | .. autofunction:: pyNN.neuron.run_until 10 | 11 | .. autofunction:: pyNN.neuron.reset 12 | 13 | .. autofunction:: pyNN.neuron.end 14 | 15 | .. autofunction:: pyNN.neuron.get_time_step 16 | 17 | .. autofunction:: pyNN.neuron.get_current_time 18 | 19 | .. autofunction:: pyNN.neuron.get_min_delay 20 | 21 | .. autofunction:: pyNN.neuron.get_max_delay 22 | 23 | .. autofunction:: pyNN.neuron.num_processes 24 | 25 | .. autofunction:: pyNN.neuron.rank 26 | 27 | .. autofunction:: pyNN.neuron.set 28 | 29 | .. autofunction:: pyNN.neuron.initialize 30 | 31 | .. autofunction:: pyNN.neuron.record 32 | -------------------------------------------------------------------------------- /doc/reference/space.txt: -------------------------------------------------------------------------------- 1 | ================= 2 | Spatial structure 3 | ================= 4 | 5 | .. module:: pyNN.space 6 | 7 | Structure classes 8 | ----------------- 9 | 10 | :class:`Structure` classes all inherit from the following base class, and 11 | inherit its methods: 12 | 13 | .. autoclass:: BaseStructure 14 | :members: 15 | :undoc-members: 16 | :inherited-members: 17 | :show-inheritance: 18 | 19 | .. autoclass:: Line 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | .. autoclass:: Grid2D 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | 29 | .. autoclass:: Grid3D 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | .. autoclass:: RandomStructure 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | Shape classes 40 | ------------- 41 | 42 | .. autoclass:: Cuboid 43 | :members: 44 | :undoc-members: 45 | :inherited-members: 46 | :show-inheritance: 47 | 48 | .. autoclass:: Sphere 49 | :members: 50 | :undoc-members: 51 | :inherited-members: 52 | :show-inheritance: 53 | 54 | The Space class 55 | --------------- 56 | 57 | .. autoclass:: Space 58 | :members: 59 | :undoc-members: 60 | :inherited-members: 61 | 62 | Implementing your own Shape 63 | --------------------------- 64 | 65 | .. todo:: write this 66 | 67 | 68 | Implementing your own Structure 69 | ------------------------------- 70 | 71 | .. todo:: write this 72 | 73 | -------------------------------------------------------------------------------- /doc/reference/utility.txt: -------------------------------------------------------------------------------- 1 | ============================= 2 | Utility classes and functions 3 | ============================= 4 | 5 | 6 | .. autofunction:: pyNN.utility.init_logging 7 | 8 | .. autofunction:: pyNN.utility.get_simulator 9 | 10 | 11 | .. autoclass:: pyNN.utility.Timer 12 | :members: 13 | :undoc-members: 14 | 15 | .. autoclass:: pyNN.utility.ProgressBar 16 | :members: 17 | :undoc-members: 18 | 19 | .. autofunction:: pyNN.utility.notify 20 | 21 | .. autofunction:: pyNN.utility.save_population 22 | 23 | .. autofunction:: pyNN.utility.load_population 24 | 25 | 26 | Basic plotting 27 | ============== 28 | 29 | .. autoclass:: pyNN.utility.plotting.Figure 30 | :members: 31 | :undoc-members: 32 | 33 | .. autoclass:: pyNN.utility.plotting.Panel 34 | :members: 35 | :undoc-members: 36 | 37 | .. autofunction:: pyNN.utility.plotting.comparison_plot 38 | -------------------------------------------------------------------------------- /doc/release_notes.txt: -------------------------------------------------------------------------------- 1 | ============= 2 | Release notes 3 | ============= 4 | 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | releases/0.12.4.txt 10 | releases/0.12.3.txt 11 | releases/0.12.2.txt 12 | releases/0.12.1.txt 13 | releases/0.12.0.txt 14 | releases/0.11.0.txt 15 | releases/0.10.1.txt 16 | releases/0.10.0.txt 17 | releases/0.9.6.txt 18 | releases/0.9.5.txt 19 | releases/0.9.4.txt 20 | releases/0.9.3.txt 21 | releases/0.9.2.txt 22 | releases/0.9.1.txt 23 | releases/0.9.0.txt 24 | releases/0.8.3.txt 25 | releases/0.8.2.txt 26 | releases/0.8.1.txt 27 | releases/0.8.0.txt 28 | releases/0.8.0-rc-1.txt 29 | releases/0.8-beta-2.txt 30 | releases/0.8-beta-1.txt 31 | releases/0.8-alpha-2.txt 32 | releases/0.8-alpha-1.txt 33 | releases/0.7.txt 34 | releases/0.6.txt 35 | -------------------------------------------------------------------------------- /doc/releases/0.10.0.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.10.0 release notes 3 | ========================= 4 | 5 | December 6th 2021 6 | 7 | Welcome to PyNN 0.10.0! 8 | 9 | 10 | NEST 3.1 11 | -------- 12 | 13 | PyNN now supports the latest version of NEST_. 14 | 15 | 16 | Requires Neo 0.10.0 or later 17 | 18 | Bug fixes 19 | --------- 20 | 21 | A `small number of bugs`_ have been fixed. 22 | 23 | .. _`small number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=milestone%3A0.10.0+is%3Aclosed 24 | .. _NEST: https://www.nest-simulator.org 25 | .. _`Brian 2`: https://brian2.readthedocs.io -------------------------------------------------------------------------------- /doc/releases/0.10.1.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.10.1 release notes 3 | ========================= 4 | 5 | October 13th 2022 6 | 7 | Welcome to PyNN 0.10.1! 8 | 9 | 10 | NEST 3.3, NEURON 8.1 11 | -------------------- 12 | 13 | PyNN now supports the latest version of NEST_, and NEURON_ v8.1. 14 | 15 | New neuron model 16 | ---------------- 17 | 18 | We have added a new standard neuron model, :class:`IF_curr_delta`, for which the synaptic response is 19 | a step change in the membrane voltage. 20 | 21 | Recording with irregular sampling intervals 22 | ------------------------------------------- 23 | 24 | Where a simulator supports recording with varying sampling intervals (e.g. BrainScaleS, NEURON with CVode), 25 | PyNN can now handle such data, using the :class:`IrregularlySampledSignal` class from Neo. 26 | 27 | More efficient handling of spike trains 28 | --------------------------------------- 29 | 30 | Some simulators record spike trains as a pair of arrays, one containing neuron identifiers, 31 | the other spike times. 32 | PyNN can now retain this representation for internal data handling or for export, 33 | using the :class:`SpikeTrainList` class recently introduced in Neo, 34 | which avoids the overhead of always converting to a list of :class:`SpikeTrains`, one per neuron. 35 | 36 | Dependencies 37 | ------------ 38 | 39 | PyNN now requires NumPy > 1.18.5, Neo > 0.11.0 or later. 40 | 41 | Bug fixes 42 | --------- 43 | 44 | A `small number of bugs`_ have been fixed. 45 | 46 | .. _`small number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=milestone%3A0.10.1+is%3Aclosed 47 | .. _NEST: https://www.nest-simulator.org 48 | .. _NEURON: https://neuron.yale.edu/neuron/ 49 | .. _`Brian 2`: https://brian2.readthedocs.io 50 | -------------------------------------------------------------------------------- /doc/releases/0.12.0.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.12.0 release notes 3 | ========================= 4 | 5 | September 28th 2023 6 | 7 | Welcome to PyNN 0.12.0! 8 | 9 | 10 | NEST 3.6 support 11 | ---------------- 12 | 13 | PyNN now supports the latest version of NEST_. NEST 3.4 and 3.5 should also work. 14 | For older versions of NEST, you will need an older version of PyNN to match. 15 | 16 | 17 | Beta: API extensions for multicompartment neurons 18 | ------------------------------------------------- 19 | 20 | In this release we introduce an experimental API extension for multicompartment neurons 21 | with detailed biophysics. Multicompartment support is only available for the NEURON_ backend 22 | and for the new Arbor_ backend. For more information, see :doc:`../mc_aims` and :doc:`../mc_api`. 23 | 24 | .. _NEST: https://www.nest-simulator.org 25 | .. _NEURON: https://neuron.yale.edu/neuron/ 26 | .. _Arbor: https://arbor-sim.org -------------------------------------------------------------------------------- /doc/releases/0.12.1.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.12.1 release notes 3 | ========================= 4 | 5 | September 29th 2023 6 | 7 | This is a bug-fix release, due to a missing item in the list of dependencies for PyNN 0.12.0. 8 | -------------------------------------------------------------------------------- /doc/releases/0.12.2.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.12.2 release notes 3 | ========================= 4 | 5 | February 21st 2024 6 | 7 | This is a bug-fix release, primarily fixing problems arising from changes in dependencies. 8 | -------------------------------------------------------------------------------- /doc/releases/0.12.3.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.12.3 release notes 3 | ========================= 4 | 5 | April 17th 2024 6 | 7 | Welcome to PyNN 0.12.3! 8 | 9 | 10 | NEST 3.7 support 11 | ---------------- 12 | 13 | PyNN now supports the latest version of NEST_. 14 | NEST 3.4-3.6 should also work for most neuron models, but since the extension module interface 15 | has changed in NEST 3.7, those models that require NEST extensions will not work with older versions. 16 | For older versions of NEST, you will need an older version of PyNN to match. 17 | 18 | 19 | Beta: API extensions for multicompartment neurons 20 | ------------------------------------------------- 21 | 22 | The beta API for multicompartment neurons has been updated to work with Arbor_ 0.9.0. 23 | 24 | .. _NEST: https://www.nest-simulator.org 25 | .. _Arbor: https://arbor-sim.org -------------------------------------------------------------------------------- /doc/releases/0.12.4.txt: -------------------------------------------------------------------------------- 1 | ========================= 2 | PyNN 0.12.4 release notes 3 | ========================= 4 | 5 | April 2nd 2025 6 | 7 | Welcome to PyNN 0.12.4! 8 | 9 | New documentation theme 10 | ----------------------- 11 | 12 | Many thanks to @sdivyanshu90 for suggesting use of the sphinxawesome_ theme, 13 | which fixes some problems with over-flowing side-bars, 14 | and in general provides much nicer navigation through the documentation_. 15 | 16 | 17 | NEST 3.8 support 18 | ---------------- 19 | 20 | PyNN now supports NEST_ v3.8. 21 | NEST 3.4-3.7 should also work for most neuron models, but since the extension module interface 22 | has changed in NEST 3.7, those models that require NEST extensions will not work with older versions. 23 | For older versions of NEST, you will need an older version of PyNN to match. 24 | 25 | Bug fixes 26 | --------- 27 | 28 | - Fix some problems with creating SpikeTrain objects, following the addition of SpikeTrainList to Neo. 29 | - Correctly set parameters when creating a point neuron with no synapses, with the neuron backend. 30 | - Correctly write equations for Brian2 when creating a point neuron with no synapses. 31 | 32 | Dependency updates 33 | ------------------ 34 | 35 | We have dropped support for Python 3.8. 36 | This version should also work with NumPy v2+. 37 | 38 | 39 | .. _NEST: https://www.nest-simulator.org 40 | .. _Arbor: https://arbor-sim.org 41 | .. _sphinxawesome: https://sphinxawesome.xyz 42 | .. _documentation: https://neuralensemble.org/docs/PyNN/ 43 | -------------------------------------------------------------------------------- /doc/releases/0.8-alpha-2.txt: -------------------------------------------------------------------------------- 1 | ============================== 2 | PyNN 0.8 alpha 2 release notes 3 | ============================== 4 | 5 | May 24th 2013 6 | 7 | Welcome to the second alpha release of PyNN 0.8! 8 | 9 | For full information about what's new in PyNN 0.8, see the :doc:`0.8-alpha-1`. 10 | 11 | This second alpha is mostly just a bug-fix release, although we have added a new 12 | class, :class:`CloneConnector` (thanks to Tom Close), which takes the connection 13 | matrix from an existing :class:`Projection` and uses it to create a new :class:`Projection`, 14 | with the option of changing the weights, delays, receptor type, etc. 15 | 16 | The other big change for developers is that we have switched from Subversion to 17 | Git. PyNN development now takes place at https://github.com/NeuralEnsemble/PyNN/ 18 | 19 | -------------------------------------------------------------------------------- /doc/releases/0.8.0-rc-1.txt: -------------------------------------------------------------------------------- 1 | ============================================ 2 | PyNN 0.8.0 release candidate 1 release notes 3 | ============================================ 4 | 5 | August 19th 2015 6 | 7 | Welcome to the first release candidate of PyNN 0.8.0! 8 | 9 | For full information about what's new in PyNN 0.8, see the :doc:`0.8-alpha-1`, :doc:`0.8-beta-1` and :doc:`0.8-beta-2` 10 | 11 | NEST 2.6 12 | -------- 13 | 14 | The main new feature in this release is support for NEST_ 2.6. Previous versions of NEST are no longer supported. 15 | 16 | Other changes 17 | ------------- 18 | 19 | * Travis CI now runs system tests as well as unit tests. 20 | * `Assorted bug fixes`_ 21 | 22 | 23 | 24 | .. _NEST: http://www.nest-initiative.org/ 25 | .. _`Assorted bug fixes`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aclosed+closed%3A2015-01-07..2015-08-19+milestone%3A0.8.0 -------------------------------------------------------------------------------- /doc/releases/0.8.1.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.8.1 release notes 3 | ======================== 4 | 5 | 25th May 2016 6 | 7 | Welcome to PyNN 0.8.1! 8 | 9 | 10 | NEST 2.10 11 | --------- 12 | 13 | This release introduces support for NEST_ 2.10. Previous versions of NEST are no longer supported. 14 | 15 | Other changes 16 | ------------- 17 | 18 | * `Assorted bug fixes`_ 19 | 20 | 21 | 22 | .. _NEST: http://www.nest-simulator.org/ 23 | .. _`Assorted bug fixes`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aclosed+milestone%3A0.8.1 -------------------------------------------------------------------------------- /doc/releases/0.8.2.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.8.2 release notes 3 | ======================== 4 | 5 | 6th December 2016 6 | 7 | Welcome to PyNN 0.8.2! 8 | 9 | 10 | New spike sources 11 | ----------------- 12 | 13 | Two new spike source models were added, with implementations for the NEST and NEURON backends: 14 | :class:`SpikeSourceGamma` (spikes follow a gamma process) and :class:`SpikeSourcePoissonRefractory` 15 | (inter-spike intervals are drawn from an exponential distribution as for a Poisson process, 16 | but there is a fixed refractory period after each spike during which no spike can occur). 17 | 18 | Other changes 19 | ------------- 20 | 21 | * Changed the :func:`save_positions()` format from ``id x y z`` to ``index x y z`` to make it simulator independent. 22 | * Added histograms to the :mod:`utility.plotting` module. 23 | * Added a `multiple_synapses` flag to :func:`Projection.get(..., format="array")` to control how 24 | synaptic parameters are combined when there are multiple connections between pairs of neurons. 25 | Until now, parameters were summed, which made sense for weights but not for delays. 26 | We have adopted the Brian_ approach of adding an argument ``multiple_synapses`` which is one 27 | of ``{'last', 'first', 'sum', 'min', 'max'}``. The default is ``sum``. 28 | * `Assorted bug fixes`_ 29 | 30 | 31 | 32 | .. _Brian: http://briansimulator.org 33 | .. _`Assorted bug fixes`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aclosed+milestone%3A0.8.2 -------------------------------------------------------------------------------- /doc/releases/0.8.3.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.8.3 release notes 3 | ======================== 4 | 5 | 8th March 2017 6 | 7 | Welcome to PyNN 0.8.3! 8 | 9 | 10 | NeuroML 2 11 | --------- 12 | 13 | The :mod:`neuroml` module has been completely rewritten, and updated from NeuroML v1 to v2. 14 | This module works like other PyNN "backends", i.e. ``import pyNN.neuroml as sim``... 15 | but instead of running a simulation, it exports the network to an XML file in NeuroML format. 16 | 17 | NEST 2.12 18 | --------- 19 | 20 | This release introduces support for NEST_ 2.12. Previous versions of NEST are no longer supported. 21 | 22 | 23 | Other changes 24 | ------------- 25 | 26 | * `A couple of bug fixes`_ 27 | 28 | 29 | 30 | .. _Brian: http://briansimulator.org 31 | .. _NEST: http://nest-simulator.org 32 | .. _`A couple of bug fixes`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aclosed+milestone%3A0.8.3 -------------------------------------------------------------------------------- /doc/releases/0.9.0.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.0 release notes 3 | ======================== 4 | 5 | April 12th 2017 6 | 7 | Welcome to PyNN 0.9.0! 8 | 9 | This version of PyNN adopts the new, simplified Neo_ object model, first released as Neo 0.5.0, 10 | for the data structures returned by :class:`Population.get_data()`. 11 | For more information on the new Neo API, see the `release notes`_. 12 | 13 | The main difference for a PyNN user is that the :class:`AnalogSignalArray` class has been renamed 14 | to :class:`AnalogSignal`, and similarly the :attr:`Segment.analogsignalarrays` attribute is now 15 | called :attr:`Segment.analogsignals`. 16 | 17 | In addition, a `number of bugs`_ with current injection for the :mod:`pyNN.brian` module have been fixed. 18 | 19 | .. _Neo: http://neuralensemble.org/neo 20 | .. _`release notes`: http://neo.readthedocs.io/en/0.5.0/releases/0.5.0.html 21 | .. _`number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aissue+milestone%3A0.9.0+is%3Aclosed 22 | -------------------------------------------------------------------------------- /doc/releases/0.9.1.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.1 release notes 3 | ======================== 4 | 5 | May 4th 2017 6 | 7 | Welcome to PyNN 0.9.1! 8 | 9 | 10 | Stochastic synapses 11 | ------------------- 12 | 13 | This release adds three new standard synapse models, available for the NEST and NEURON simulators. They are: 14 | 15 | * :class:`SimpleStochasticSynapse` - each spike is transmitted with a probability `p`. 16 | * :class:`StochasticTsodyksMarkramSynapse` - synapse exhibiting facilitation and depression, implemented using the model 17 | of Tsodyks, Markram et al., in its stochastic version. 18 | * :class:`MultiQuantalSynapse` - synapse exhibiting facilitation and depression with multiple quantal release sites. 19 | 20 | There are some new example scripts which demonstrate use of the synapse models - see :doc:`../examples/stochastic_synapses` 21 | and :doc:`../examples/stochastic_deterministic_comparison`. 22 | 23 | Note that the new models require building a NEST extension; 24 | this is done automatically during installation (when running :command:`pip install` or :command:`setup.py install`). 25 | 26 | 27 | Bug fixes 28 | --------- 29 | 30 | A `number of bugs`_ have been fixed. 31 | 32 | .. _`number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aissue+milestone%3A0.9.1+is%3Aclosed 33 | -------------------------------------------------------------------------------- /doc/releases/0.9.2.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.2 release notes 3 | ======================== 4 | 5 | November 22nd 2017 6 | 7 | Welcome to PyNN 0.9.2! 8 | 9 | 10 | Recording injected currents 11 | --------------------------- 12 | 13 | It is now possible to record the injected current from :class:`CurrentSource` objects in PyNN, for example: 14 | 15 | .. code-block:: python 16 | 17 | noise = sim.NoisyCurrentSource(mean=0.5, stdev=0.2, start=50.0, stop=450.0, dt=1.0) 18 | noise.record() 19 | 20 | sim.run(500.0) 21 | 22 | signal = noise.get_data() 23 | 24 | The returned signal object is a Neo :class:`AnalogSignal`. 25 | 26 | Python 2.6 27 | ---------- 28 | 29 | As of this version, PyNN no longer supports Python 2.6. 30 | 31 | 32 | NEST 2.14.0 and NEURON 7.5 33 | -------------------------- 34 | 35 | PyNN 0.9.1 now supports the latest versions of NEST and NEURON. 36 | NEURON 7.4 is also still supported. 37 | NEST 2.12.0 should still work in most circumstances, 38 | but current recording (see above) requires a more recent version. 39 | 40 | 41 | native_electrode_type 42 | --------------------- 43 | 44 | It has been possible for some time to use native (NEST-specific) neuron and synapse models with :mod:`pyNN.nest`. 45 | It is now also possible to use native current generator models, e.g.: 46 | 47 | .. code-block:: python 48 | 49 | noise = sim.native_electrode_type('noise_generator')(mean=500.0, std=200.0, start=50.0, 50 | stop=450.0, dt=1.0) 51 | 52 | Bug fixes 53 | --------- 54 | 55 | A `number of bugs`_ have been fixed. 56 | 57 | .. _`number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=is%3Aissue+milestone%3A0.9.2+is%3Aclosed 58 | -------------------------------------------------------------------------------- /doc/releases/0.9.4.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.4 release notes 3 | ======================== 4 | 5 | March 22nd 2019 6 | 7 | Welcome to PyNN 0.9.4! 8 | 9 | 10 | SONATA 11 | ------- 12 | 13 | SONATA_ is a data format for representing/storing data-driven spiking neuronal network models, experimental protocols 14 | (injecting spikes, currents) and simulation outputs. 15 | 16 | In the network representation, all connections are represented explicity, as in PyNN’s 17 | :class:`FromFileConnector` and :class:`FromListConnector`. 18 | 19 | A PyNN model/simulation script can be exported in SONATA format, 20 | and a SONATA model/simulation can be read and executed through PyNN 21 | provided the cell types used in the model are compatible with PyNN, 22 | i.e. they must be point neurons. 23 | 24 | For more information on working with the SONATA format, see :ref:`sec-sonata`. 25 | 26 | 27 | Bug fixes and performance improvements 28 | -------------------------------------- 29 | 30 | A `small number of bugs`_ have been fixed, and the documentation clarified in a few places. 31 | 32 | .. _SONATA: https://github.com/AllenInstitute/sonata 33 | .. _`small number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=milestone%3A0.9.4+is%3Aclosed 34 | -------------------------------------------------------------------------------- /doc/releases/0.9.5.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.5 release notes 3 | ======================== 4 | 5 | December 5th 2019 6 | 7 | Welcome to PyNN 0.9.5! 8 | 9 | 10 | NEST 2.18.0 11 | ----------- 12 | 13 | PyNN now supports the latest version of NEST. 14 | 15 | 16 | Bug fixes 17 | --------- 18 | 19 | A `small number of bugs`_ have been fixed. 20 | 21 | .. _`small number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=milestone%3A0.9.5+is%3Aclosed 22 | -------------------------------------------------------------------------------- /doc/releases/0.9.6.txt: -------------------------------------------------------------------------------- 1 | ======================== 2 | PyNN 0.9.6 release notes 3 | ======================== 4 | 5 | December 17th 2020 6 | 7 | Welcome to PyNN 0.9.6! 8 | 9 | 10 | NEST 2.20.0 11 | ----------- 12 | 13 | PyNN now supports the latest version of NEST_. 14 | 15 | Beta support for Brian 2 16 | ------------------------ 17 | 18 | PyNN now supports running simulations with `Brian 2`_. 19 | A few small bugs remain, so we regard this as a preview release for wider testing. 20 | 21 | End of support for Python 2.7 and Brian 1 22 | ----------------------------------------- 23 | 24 | This is the last release with support for Python 2.7 (and therefore, for Brian 1). 25 | 26 | Bug fixes 27 | --------- 28 | 29 | A `small number of bugs`_ have been fixed. 30 | 31 | .. _`small number of bugs`: https://github.com/NeuralEnsemble/PyNN/issues?q=milestone%3A0.9.6+is%3Aclosed 32 | .. _NEST: https://www.nest-simulator.org 33 | .. _`Brian 2`: https://brian2.readthedocs.io -------------------------------------------------------------------------------- /doc/roadmap.txt: -------------------------------------------------------------------------------- 1 | ======= 2 | Roadmap 3 | ======= 4 | 5 | 6 | NineML/NeuroML model definitions 7 | -------------------------------- 8 | 9 | 10 | Multi-compartmental models 11 | -------------------------- 12 | 13 | 14 | NineML/NeuroML export 15 | --------------------- 16 | 17 | 18 | API simplification 19 | ------------------ 20 | 21 | 22 | Multi-simulator models with MUSIC 23 | --------------------------------- 24 | 25 | .. code-block:: python 26 | 27 | from pyNN import music 28 | music.setup({“neuron”: 10, “nest”: 20}) 29 | sim1, sim2 = music.get_simulators() 30 | sim1.setup(timestep=0.025) 31 | sim2.setup(timestep=0.1) 32 | cell_parameters = {”tau_m”: 12.0, ”cm”: 0.8, ”v_thresh”: -50.0, 33 | ”v_reset”: -65.0} 34 | pE = sim1.Population((100,100), sim.IF_cond_exp, cell_parameters, 35 | label=”excitatory neurons”) 36 | pI = sim2.Population((50,50), sim.IF_cond_exp, cell_parameters, 37 | label=”inhibitory neurons”) 38 | all = pE + pI 39 | DDPC = sim.DistanceDependentProbabilityConnector 40 | connector = DDPC(”exp(-d**2/400.0)”, weights=0.05, 41 | delays=”0.5+0.01d”) 42 | e2e = sim1.Projection(pE, pE, connector, target=”excitatory”) 43 | e2i = music.Projection(pE, pI, connector, target=”excitatory”) 44 | i2i = sim2.Projection(pI, pI, connector, target=”inhibitory”) 45 | 46 | music.run(1000.0) 47 | 48 | The concept here is that PyNN takes over the rôle of the *music* executable, 49 | `music.setup()` launches the requested number of MPI processes for each simulator, 50 | and then the same script runs on each of these processes. On the processes running 51 | `sim1`, `sim1` is the real backend module, as in a normal, non-MUSIC PyNN script, 52 | while `sim2` is a no-op proxy object. Vice versa on processes running `sim2`. 53 | For connections between different simulators, a `music.Projection` instance is 54 | needed, which takes care of defining the MUSIC ports. 55 | -------------------------------------------------------------------------------- /doc/units.txt: -------------------------------------------------------------------------------- 1 | ===== 2 | Units 3 | ===== 4 | 5 | PyNN does not at present support explicit specification of the units of 6 | physical quantities (parameters and initial values). Instead, the following 7 | convention is used: 8 | 9 | ================= ===== 10 | Physical quantity Units 11 | ================= ===== 12 | time ms 13 | voltage mV 14 | current nA 15 | conductance µS 16 | capacitance nF 17 | firing rate /s 18 | phase/angle deg 19 | ================= ===== 20 | 21 | Synaptic weights are in microsiemens or nanoamps, depending on whether the post-synaptic mechanism implements a change in conductance or current. 22 | Distances are typically in microns, but any distance scale can be used, provided it is used consistently. 23 | -------------------------------------------------------------------------------- /examples/HH_cond_exp2.py: -------------------------------------------------------------------------------- 1 | """ 2 | A single-compartment Hodgkin-Huxley neuron with exponential, conductance-based 3 | synapses, fed by a current injection. 4 | 5 | Run as: 6 | 7 | $ python HH_cond_exp2.py 8 | 9 | where is 'neuron', 'nest', etc 10 | 11 | Andrew Davison, UNIC, CNRS 12 | March 2010 13 | 14 | """ 15 | 16 | from pyNN.utility import get_script_args 17 | 18 | make_plot = True 19 | 20 | simulator_name = get_script_args(1)[0] 21 | exec("from pyNN.%s import *" % simulator_name) 22 | 23 | 24 | setup(timestep=0.001, min_delay=0.1) 25 | 26 | cellparams = { 27 | 'gbar_Na': 20.0, 28 | 'gbar_K': 6.0, 29 | 'g_leak': 0.01, 30 | 'cm': 0.2, 31 | 'v_offset': -63.0, 32 | 'e_rev_Na': 50.0, 33 | 'e_rev_K': -90.0, 34 | 'e_rev_leak': -65.0, 35 | 'e_rev_E': 0.0, 36 | 'e_rev_I': -80.0, 37 | 'tau_syn_E': 0.2, 38 | 'tau_syn_I': 2.0, 39 | 'i_offset': 1.0, 40 | } 41 | 42 | hhcell = create(HH_cond_exp(**cellparams)) 43 | initialize(hhcell, v=-64.0) 44 | record('v', hhcell, "Results/HH_cond_exp2_%s.pkl" % simulator_name) 45 | 46 | var_names = { 47 | 'neuron': {'m': "seg.m_hh_traub", 'h': "seg.h_hh_traub", 'n': "seg.n_hh_traub"}, 48 | 'brian': {'m': 'm', 'h': 'h', 'n': 'n'}, 49 | } 50 | if simulator_name in var_names: 51 | hhcell.can_record = lambda x: True # hack 52 | for native_name in var_names[simulator_name].values(): 53 | hhcell.record(native_name) 54 | hhcell.celltype.units[native_name] = '' 55 | 56 | run(20.0) 57 | 58 | if make_plot: 59 | import matplotlib.pyplot as plt 60 | #pylab.rcParams['interactive'] = True 61 | plt.ion() 62 | 63 | data = hhcell.get_data() 64 | signal_names = [s.name for s in data.segments[0].analogsignals] 65 | vm = data.segments[0].analogsignals[signal_names.index('v')] 66 | plt.plot(vm.times, vm) 67 | plt.xlabel("time (ms)") 68 | plt.ylabel("Vm (mV)") 69 | 70 | if simulator_name in var_names: 71 | plt.figure(2) 72 | for var_name, native_name in var_names[simulator_name].items(): 73 | signal = data.segments[0].analogsignals[signal_names.index(native_name)] 74 | plt.plot(signal.times, signal, label=var_name) 75 | plt.xlabel("time (ms)") 76 | plt.legend() 77 | 78 | end() 79 | -------------------------------------------------------------------------------- /examples/Izhikevich.py: -------------------------------------------------------------------------------- 1 | """ 2 | A selection of Izhikevich neurons. 3 | 4 | Run as: 5 | 6 | $ python Izhikevich.py 7 | 8 | where is 'neuron', 'nest', etc. 9 | 10 | """ 11 | 12 | from numpy import arange 13 | from pyNN.utility import get_simulator, init_logging, normalized_filename 14 | 15 | 16 | # === Configure the simulator ================================================ 17 | 18 | sim, options = get_simulator(("--plot-figure", "Plot the simulation results to a file.", {"action": "store_true"}), 19 | ("--debug", "Print debugging information")) 20 | 21 | if options.debug: 22 | init_logging(None, debug=True) 23 | 24 | sim.setup(timestep=0.01, min_delay=1.0) 25 | 26 | 27 | # === Build and instrument the network ======================================= 28 | 29 | neurons = sim.Population(3, sim.Izhikevich(a=0.02, b=0.2, c=-65, d=6, i_offset=[0.014, 0.0, 0.0])) 30 | spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=arange(10.0, 51, 1))) 31 | 32 | connection = sim.Projection(spike_source, neurons[1:2], sim.OneToOneConnector(), 33 | sim.StaticSynapse(weight=3.0, delay=1.0), 34 | receptor_type='excitatory'), 35 | 36 | electrode = sim.DCSource(start=2.0, stop=92.0, amplitude=0.014) 37 | electrode.inject_into(neurons[2:3]) 38 | 39 | neurons.record(['v']) # , 'u']) 40 | neurons.initialize(v=-70.0, u=-14.0) 41 | 42 | 43 | # === Run the simulation ===================================================== 44 | 45 | sim.run(100.0) 46 | 47 | 48 | # === Save the results, optionally plot a figure ============================= 49 | 50 | filename = normalized_filename("Results", "Izhikevich", "pkl", 51 | options.simulator, sim.num_processes()) 52 | neurons.write_data(filename, annotations={'script_name': __file__}) 53 | 54 | if options.plot_figure: 55 | from pyNN.utility.plotting import Figure, Panel 56 | figure_filename = filename.replace("pkl", "png") 57 | data = neurons.get_data().segments[0] 58 | v = data.filter(name="v")[0] 59 | #u = data.filter(name="u")[0] 60 | Figure( 61 | Panel(v, ylabel="Membrane potential (mV)", xticks=True, 62 | xlabel="Time (ms)", yticks=True), 63 | #Panel(u, ylabel="u variable (units?)"), 64 | annotations="Simulated with %s" % options.simulator.upper() 65 | ).save(figure_filename) 66 | print(figure_filename) 67 | 68 | 69 | # === Clean up and quit ======================================================== 70 | 71 | sim.end() 72 | -------------------------------------------------------------------------------- /examples/Potjans2014/connectivity.py: -------------------------------------------------------------------------------- 1 | ################################################### 2 | ### Connection routine ### 3 | ################################################### 4 | 5 | import numpy as np 6 | from network_params import * 7 | from pyNN.random import RandomDistribution 8 | 9 | 10 | def FixedTotalNumberConnect(sim, pop1, pop2, K, w_mean, w_sd, d_mean, d_sd): 11 | n_syn = int(round(K * len(pop2))) 12 | conn = sim.FixedTotalNumberConnector(n_syn) 13 | d_distr = RandomDistribution('normal_clipped', [d_mean, d_sd, 0.1, np.inf]) 14 | if pop1.annotations['type'] == 'E': 15 | conn_type = 'excitatory' 16 | w_distr = RandomDistribution('normal_clipped', [w_mean, w_sd, 0., np.inf]) 17 | else: 18 | conn_type = 'inhibitory' 19 | w_distr = RandomDistribution('normal_clipped', [w_mean, w_sd, -np.inf, 0.]) 20 | 21 | syn = sim.StaticSynapse(weight=w_distr, delay=d_distr) 22 | proj = sim.Projection(pop1, pop2, conn, syn, receptor_type=conn_type) 23 | -------------------------------------------------------------------------------- /examples/Potjans2014/run_microcircuit.py: -------------------------------------------------------------------------------- 1 | from sim_params import system_params 2 | import os 3 | import shutil 4 | 5 | # Creates output folder if it does not exist yet, creates sim_script.sh, 6 | # and submits it to the queue 7 | 8 | system_params['num_mpi_procs'] = system_params['n_nodes'] * system_params['n_procs_per_node'] 9 | 10 | # Copy simulation scripts to output directory 11 | try: 12 | os.mkdir(system_params['output_path']) 13 | except OSError: 14 | pass 15 | 16 | shutil.copy('network_params.py', system_params['output_path']) 17 | shutil.copy('sim_params.py', system_params['output_path']) 18 | shutil.copy('microcircuit.py', system_params['output_path']) 19 | shutil.copy('network.py', system_params['output_path']) 20 | shutil.copy('connectivity.py', system_params['output_path']) 21 | shutil.copy('scaling.py', system_params['output_path']) 22 | shutil.copy('plotting.py', system_params['output_path']) 23 | 24 | job_script_template = """ 25 | #PBS -o %(output_path)s/%(outfile)s 26 | #PBS -e %(output_path)s/%(errfile)s 27 | #PBS -l walltime=%(walltime)s 28 | #PBS -l nodes=%(n_nodes)d:ppn=%(n_procs_per_node)d 29 | #PBS -q intel 30 | #PBS -l mem=%(memory)s 31 | . %(mpi_path)s 32 | mpirun -np %(num_mpi_procs)d python %(output_path)s/microcircuit.py 33 | """ 34 | 35 | f = open(system_params['output_path'] + '/sim_script.sh', 'w') 36 | f.write(job_script_template % system_params) 37 | f.close() 38 | 39 | os.system('cd %(output_path)s && %(submit_cmd)s sim_script.sh' % system_params) 40 | -------------------------------------------------------------------------------- /examples/Potjans2014/sim_params.py: -------------------------------------------------------------------------------- 1 | ################################################### 2 | ### Simulation parameters ### 3 | ################################################### 4 | 5 | simulator_params = { 6 | 'nest': 7 | { 8 | 'timestep': 0.1, # ms 9 | 'threads': 1, 10 | 'sim_duration': 1000., # ms 11 | } 12 | } 13 | 14 | system_params = { 15 | # number of MPI nodes 16 | 'n_nodes': 1, 17 | # number of MPI processes per node 18 | 'n_procs_per_node': 2, 19 | # walltime for simulation 20 | 'walltime': '8:0:0', 21 | # total memory for simulation 22 | 'memory': '4gb', 23 | 24 | # file name for standard output 25 | 'outfile': 'output.txt', 26 | # file name for error output 27 | 'errfile': 'errors.txt', 28 | # absolute path to which the output files should be written 29 | 'output_path': 'results', 30 | # path to the MPI shell script 31 | 'mpi_path': '', 32 | # path to back-end 33 | 'backend_path': '', 34 | # path to pyNN installation 35 | 'pyNN_path': '', 36 | # command for submitting the job 37 | 'submit_cmd': 'qsub' 38 | } 39 | -------------------------------------------------------------------------------- /examples/README: -------------------------------------------------------------------------------- 1 | ------------------------------- 2 | - RUNNING EXAMPLES 3 | ------------------------------- 4 | 5 | 1. If you want to run all the examples with a specific simulator (nest, neuron, brian or mock), you write the following commands: 6 | 7 | > cd tools 8 | > python run_all_examples.py NEST/NEURON/Brian/MOCK 9 | 10 | The logs, results and the plots as png files will all be put in tools/Results/*. 11 | 12 | Note that 'mock' is a dummy backend, used to check the syntax of your PyNN code. 13 | 14 | 15 | 2. If you want to run all the examples with all the available simulators, just run the run_all_examples.py script without argument: 16 | 17 | > cd tools 18 | > python run_all_examples.py 19 | 20 | The logs, results and the plots as png files will all be put in tools/Results/*. 21 | -------------------------------------------------------------------------------- /examples/StepCurrentSource.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple test of injecting time-varying current into a cell 3 | 4 | Andrew Davison, UNIC, CNRS 5 | May 2009 6 | 7 | """ 8 | 9 | from pyNN.utility import get_script_args, normalized_filename 10 | 11 | simulator_name = get_script_args(1)[0] 12 | exec("from pyNN.%s import *" % simulator_name) 13 | 14 | setup() 15 | 16 | cell = create(IF_curr_exp(v_thresh=-55.0, tau_refrac=5.0)) 17 | current_source = StepCurrentSource(times=[50.0, 110.0, 150.0, 210.0], 18 | amplitudes=[0.4, 0.6, -0.2, 0.2]) 19 | cell.inject(current_source) 20 | 21 | filename = normalized_filename("Results", "StepCurrentSource", "pkl", simulator_name) 22 | record('v', cell, filename, annotations={'script_name': __file__}) 23 | run(250.0) 24 | 25 | end() 26 | -------------------------------------------------------------------------------- /examples/distrib_example.py: -------------------------------------------------------------------------------- 1 | 2 | from mpi4py import MPI 3 | from pyNN.utility import get_script_args 4 | 5 | import sys 6 | import numpy as np 7 | 8 | simulator = get_script_args(1)[0] 9 | exec("import pyNN.%s as sim" % simulator) 10 | 11 | 12 | comm = MPI.COMM_WORLD 13 | 14 | sim.setup(debug=True) 15 | 16 | print("\nThis is node %d (%d of %d)" % (sim.rank(), sim.rank() + 1, sim.num_processes())) 17 | assert comm.rank == sim.rank() 18 | assert comm.size == sim.num_processes() 19 | 20 | data1 = np.empty(100, dtype=float) 21 | if comm.rank == 0: 22 | data1 = np.arange(100, dtype=float) 23 | else: 24 | pass 25 | comm.Bcast([data1, MPI.DOUBLE], root=0) 26 | print(comm.rank, data1) 27 | 28 | data2 = np.arange(comm.rank, 10 + comm.rank, dtype=float) 29 | print(comm.rank, data2) 30 | data2g = np.empty(10 * comm.size) 31 | comm.Gather([data2, MPI.DOUBLE], [data2g, MPI.DOUBLE], root=0) 32 | if comm.rank == 0: 33 | print("gathered (2):", data2g) 34 | 35 | data3 = np.arange(0, 5 * (comm.rank + 1), dtype=float) 36 | print(comm.rank, data3) 37 | if comm.rank == 0: 38 | sizes = range(5, 5 * comm.size + 1, 5) 39 | disp = [size - 5 for size in sizes] 40 | data3g = np.empty(sum(sizes)) 41 | else: 42 | sizes = disp = [] 43 | data3g = np.empty([]) 44 | comm.Gatherv([data3, data3.size, MPI.DOUBLE], [data3g, (sizes, disp), MPI.DOUBLE], root=0) 45 | if comm.rank == 0: 46 | print("gathered (3):", data3g) 47 | 48 | 49 | def gather(data): 50 | assert isinstance(data, np.ndarray) 51 | # first we pass the data size 52 | size = data.size 53 | sizes = comm.gather(size, root=0) or [] 54 | # now we pass the data 55 | displacements = [sum(sizes[:i]) for i in range(len(sizes))] 56 | print(comm.rank, "sizes=", sizes, "displacements=", displacements) 57 | gdata = np.empty(sum(sizes)) 58 | comm.Gatherv([data, size, MPI.DOUBLE], [gdata, (sizes, displacements), MPI.DOUBLE], root=0) 59 | return gdata 60 | 61 | 62 | data3g = gather(data3) 63 | if comm.rank == 0: 64 | print("gathered (3, again):", data3g) 65 | 66 | 67 | sim.end() 68 | -------------------------------------------------------------------------------- /examples/iaf_sfa_relref/README: -------------------------------------------------------------------------------- 1 | This directory contains example scripts using the PyNN IF_cond_exp_gsfa_grr 2 | neuron model. 3 | 4 | Specifically, it includes a PyNN implementation of the model described in: 5 | 6 | Muller, E., Meier, K., & Schemmel, J. (2004). Methods for simulating 7 | high-conductance states in neural microcircuits. Proc. of BICS2004. 8 | 9 | The excitatory neuron parameterization was then the subject of an 10 | analytical study using the adapting Markov process in: 11 | 12 | Muller, E., Buesing, L., Schemmel, J., & Meier, K. (2007). Spike-frequency 13 | adapting neural ensembles: Beyond mean adaptation and renewal theories. 14 | Neural Computation, 19, 2958-3010. 15 | 16 | The standard parameters are in: standard_neurons.yaml 17 | 18 | The iaf_sfa_network.py is a MPI enabled simulation of a 10x10x10 19 | lattice modeling cortical layer 4, as described in Muller 20 | et. al. 2004 (above), with a transient step increase in stimulation at 21 | 1s<=t<1.2s, and can be run as follows: 22 | 23 | $ /opt/mpich2/bin/mpiexec -n 4 python iaf_sfa_network.py 24 | 25 | It produces an output figure as myFigure.pdf, which can be compared to 26 | the expected output myFigure_expected.pdf. It is interesting to 27 | Increase the connection factor ICFactorE_E = 0.12 to something like 28 | 0.16 or 0.2, and observe the spontaneous ~10Hz oscillatory activity 29 | that results from excitatory avalanches due to the network being in a 30 | supra-critical excitatory feedback regime. The period of network 31 | oscillations/bursts is determined by the time-constant of 32 | Spike-Frequency Adapation, an intrinsic neuronal self-inhibition 33 | mechanism which transiently prevents the avalanches, and thus causes 34 | them to occur at regular intervals. 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /examples/iaf_sfa_relref/myFigure_expected.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/examples/iaf_sfa_relref/myFigure_expected.pdf -------------------------------------------------------------------------------- /examples/iaf_sfa_relref/standard_neurons.yaml: -------------------------------------------------------------------------------- 1 | # These are the parameters for the excitatory and inhibitory neurons used in: 2 | # 3 | # Muller, E., Buesing, L., Schemmel, J., & Meier, K. (2007). Spike-frequency 4 | # adapting neural ensembles: Beyond mean adaptation and renewal theories. 5 | # Neural Computation, 19, 2958-3010. 6 | # 7 | # and 8 | # 9 | # Muller, E., Meier, K., & Schemmel, J. (2004). Methods for simulating 10 | # high-conductance states in neural microcircuits. Proc. of BICS2004. 11 | 12 | excitatory: 13 | cm: 0.2895 14 | tau_m: 10.0 15 | v_reset: -70.0 16 | v_rest: -70.0 17 | v_thresh: -57.0 18 | e_rev_E: 0.0 19 | e_rev_I: -75.0 20 | tau_syn_E: 1.5 21 | tau_syn_I: 10.0 22 | tau_refrac: 0.1 23 | e_rev_rr: -70.0 24 | e_rev_sfa: -70.0 25 | q_rr: 3214.0 26 | q_sfa: 14.48 27 | tau_rr: 1.97 28 | tau_sfa: 110.0 29 | inhibitory: 30 | cm: 0.141 31 | e_rev_E: 0.0 32 | e_rev_I: -75.0 33 | e_rev_rr: -70.0 34 | e_rev_sfa: -70.0 35 | q_rr: 1565.0 36 | q_sfa: 0.0 37 | tau_m: 6.664 38 | tau_refrac: 0.1 39 | tau_rr: 1.97 40 | tau_sfa: 110.0 41 | tau_syn_E: 1.5 42 | tau_syn_I: 10.0 43 | v_reset: -70.0 44 | v_rest: -70.0 45 | v_thresh: -54.5 46 | -------------------------------------------------------------------------------- /examples/mc/NMLCL000641/Ih.channel.nml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | NeuroML file containing a single Channel description 5 | 6 | 7 | 8 | Non-specific cation current 9 | 10 | Comment from original mod file: 11 | Reference : : Kole,Hallermann,and Stuart, J. Neurosci. 2006 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | Models of Neocortical Layer 5b Pyramidal Cells Capturing a Wide Range of Dendritic and Perisomatic Active Properties, 20 | Etay Hay, Sean Hill, Felix Schürmann, Henry Markram and Idan Segev, PLoS Comp Biol 2011 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /examples/mc/NMLCL000641/Im.channel.nml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | NeuroML file containing a single Channel description 5 | 6 | 7 | 8 | Muscarinic K+ current 9 | 10 | Comment from original mod file: 11 | :Reference : : Adams et al. 1982 - M-currents and other potassium currents in bullfrog sympathetic neurones 12 | :Comment: corrected rates using q10 = 2.3, target temperature 34, orginal 21 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | Models of Neocortical Layer 5b Pyramidal Cells Capturing a Wide Range of Dendritic and Perisomatic Active Properties, 21 | Etay Hay, Sean Hill, Felix Schürmann, Henry Markram and Idan Segev, PLoS Comp Biol 2011 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | K channels 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /examples/mc/NMLCL000641/neuroml_cell.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | 4 | """ 5 | 6 | 7 | import pyNN.neuron as sim 8 | 9 | sim.setup() 10 | 11 | cell_types = sim.neuroml.load_neuroml_cell_types("cADpyr229_L23_PC_c2e79db05a_0_0.cell.nml") 12 | cell_type = cell_types[0] 13 | 14 | population = sim.Population(1, cell_type()) 15 | -------------------------------------------------------------------------------- /examples/mc/NMLCL000641/pas.channel.nml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | NeuroML file containing a single Channel description 5 | 6 | 7 | 8 | Simple example of a leak/passive conductance. Note: for GENESIS cells with a single leak conductance, 9 | it is better to use the Rm and Em variables for a passive current. 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /examples/parameter_changes.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from pyNN.utility import get_simulator 6 | sim, options = get_simulator() 7 | 8 | sim.setup(timestep=0.01) 9 | 10 | cell = sim.Population(1, sim.EIF_cond_exp_isfa_ista(v_thresh=-55.0, tau_refrac=5.0)) 11 | current_source = sim.StepCurrentSource(times=[50.0, 200.0, 250.0, 400.0, 450.0, 600.0, 650.0, 800.0], 12 | amplitudes=[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]) 13 | cell.inject(current_source) 14 | cell.record('v') 15 | 16 | for a in (0.0, 4.0, 20.0, 100.0): 17 | print("Setting current to %g nA" % a) 18 | cell.set(a=a) 19 | sim.run(200.0) 20 | 21 | cell.write_data("Results/parameter_changes_%s.pkl" % options.simulator) 22 | 23 | sim.end() 24 | -------------------------------------------------------------------------------- /examples/random_distributions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Illustration of the different standard random distributions and different random number generators 3 | 4 | """ 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | import matplotlib.gridspec as gridspec 9 | import scipy.stats 10 | import pyNN.random as random 11 | 12 | try: 13 | from neuron import h 14 | except ImportError: 15 | have_nrn = False 16 | else: 17 | have_nrn = True 18 | from pyNN.neuron.random import NativeRNG 19 | 20 | n = 100000 21 | nbins = 100 22 | 23 | rnglist = [random.NumpyRNG(seed=984527)] 24 | if random.have_gsl: 25 | rnglist.append(random.GSLRNG(seed=668454)) 26 | if have_nrn: 27 | rnglist.append(NativeRNG(seed=321245)) 28 | 29 | 30 | cases = ( 31 | ("uniform", {"low": -65, "high": -55}, (-65, -55), scipy.stats.uniform(loc=-65, scale=10)), 32 | ("gamma", {"k": 2.0, "theta": 0.5}, (0, 5), scipy.stats.gamma(2.0, loc=0.0, scale=0.5)), 33 | ("normal", {"mu": -1.0, "sigma": 0.5}, (-3, 1), scipy.stats.norm(loc=-1, scale=0.5)), 34 | ("exponential", {'beta': 10.0}, (0, 50), scipy.stats.expon(loc=0, scale=10)), 35 | ("normal_clipped", {"mu": 0.5, "sigma": 0.5, "low": 0, "high": 10}, (-0.5, 3.0), None), 36 | ) 37 | 38 | fig = plt.figure(1) 39 | rows = len(cases) 40 | cols = len(rnglist) 41 | 42 | settings = { 43 | 'lines.linewidth': 0.5, 44 | 'axes.linewidth': 0.5, 45 | 'axes.labelsize': 'small', 46 | 'axes.titlesize': 'small', 47 | 'legend.fontsize': 'small', 48 | 'font.size': 8, 49 | 'savefig.dpi': 150, 50 | } 51 | plt.rcParams.update(settings) 52 | width, height = (2 * cols, 2 * rows) 53 | fig = plt.figure(1, figsize=(width, height)) 54 | gs = gridspec.GridSpec(rows, cols) 55 | gs.update(hspace=0.4) 56 | 57 | 58 | for i, case in enumerate(cases): 59 | distribution, parameters, xlim, rv = case 60 | bins = np.linspace(*xlim, num=nbins) 61 | for j, rng in enumerate(rnglist): 62 | rd = random.RandomDistribution(distribution, rng=rng, **parameters) 63 | values = rd.next(n) 64 | assert values.size == n 65 | plt.subplot(gs[i, j]) 66 | counts, bins, _ = plt.hist(values, bins, range=xlim) 67 | plt.title("%s.%s%s" % (rng, distribution, parameters.values())) 68 | if rv is not None: 69 | pdf = rv.pdf(bins) 70 | scaled_pdf = n * pdf / pdf.sum() 71 | plt.plot(bins, scaled_pdf, 'r-') 72 | plt.ylim(0, 1.2 * scaled_pdf.max()) 73 | plt.xlim(xlim) 74 | 75 | plt.savefig("Results/random_distributions.png") 76 | -------------------------------------------------------------------------------- /examples/simpleRandomNetwork_csa.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simple network with a 1D population of poisson spike sources 3 | projecting to a 2D population of IF_curr_exp neurons. 4 | 5 | Andrew Davison, UNIC, CNRS 6 | August 2006, November 2009 7 | 8 | """ 9 | 10 | from pyNN.random import NumpyRNG 11 | import socket 12 | import os 13 | import csa 14 | import numpy as np 15 | from pyNN.utility import get_script_args, Timer 16 | 17 | simulator_name = get_script_args(1)[0] 18 | exec("from pyNN.%s import *" % simulator_name) 19 | 20 | 21 | timer = Timer() 22 | seed = 764756387 23 | tstop = 1000.0 # ms 24 | input_rate = 100.0 # Hz 25 | cell_params = {'tau_refrac': 2.0, # ms 26 | 'v_thresh': -50.0, # mV 27 | 'tau_syn_E': 2.0, # ms 28 | 'tau_syn_I': 2.0} # ms 29 | n_record = 5 30 | 31 | node = setup(timestep=0.025, min_delay=1.0, max_delay=10.0, debug=True, quit_on_end=False) 32 | print("Process with rank %d running on %s" % (node, socket.gethostname())) 33 | 34 | 35 | rng = NumpyRNG(seed=seed, parallel_safe=True) 36 | 37 | print("[%d] Creating populations" % node) 38 | n_spikes = int(2 * tstop * input_rate / 1000.0) 39 | spike_times = np.add.accumulate(rng.next(n_spikes, 'exponential', 40 | {'beta': 1000.0 / input_rate}, mask_local=False)) 41 | 42 | input_population = Population(100, SpikeSourceArray(spike_times=spike_times), label="input") 43 | output_population = Population(10, IF_curr_exp(**cell_params), label="output") 44 | print("[%d] input_population cells: %s" % (node, input_population.local_cells)) 45 | print("[%d] output_population cells: %s" % (node, output_population.local_cells)) 46 | 47 | print("[%d] Connecting populations" % node) 48 | timer.start() 49 | connector = CSAConnector(csa.random(0.5)) 50 | syn = StaticSynapse(weight=0.1) 51 | projection = Projection(input_population, output_population, connector, syn) 52 | print(connector.describe(), timer.elapsedTime()) 53 | 54 | file_stem = "Results/simpleRandomNetwork_csa_np%d_%s" % (num_processes(), simulator_name) 55 | 56 | projection.save('all', '%s.conn' % file_stem) 57 | 58 | input_population.record('spikes') 59 | output_population.record('spikes') 60 | output_population.sample(n_record, rng).record('v') 61 | 62 | print("[%d] Running simulation" % node) 63 | run(tstop) 64 | 65 | print("[%d] Writing spikes and Vm to disk" % node) 66 | output_population.write_data('%s_output.pkl' % file_stem) 67 | #input_population.write_data('%s_input.pkl' % file_stem) 68 | 69 | print("[%d] Finishing" % node) 70 | end() 71 | print("[%d] Done" % node) 72 | -------------------------------------------------------------------------------- /pyNN/arbor/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mock implementation of the PyNN API, for testing and documentation purposes. 3 | 4 | This simulator implements the PyNN API, but generates random data rather than 5 | really running simulations. 6 | 7 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 8 | :license: CeCILL, see LICENSE for details. 9 | """ 10 | 11 | 12 | from .. import errors, random, space # noqa: F401 13 | from ..network import Network # noqa: F401 14 | from ..space import Space # noqa: F401 15 | from ..random import NumpyRNG, GSLRNG, RandomDistribution # noqa: F401 16 | from ..connectors import * # noqa: F403, F401 17 | from ..recording import * # noqa: F403, F401 18 | from ..standardmodels import StandardCellType 19 | from .standardmodels import * # noqa: F403, F401 20 | from .populations import Population, PopulationView, Assembly # noqa: F401 21 | from .projections import Projection # noqa: F401 22 | from .control import ( # noqa: F401 23 | setup, 24 | end, 25 | run, 26 | run_until, 27 | run_for, 28 | reset, 29 | initialize, 30 | get_current_time, 31 | get_time_step, 32 | get_min_delay, 33 | get_max_delay, 34 | num_processes, 35 | rank, 36 | ) 37 | from .procedural_api import create, connect, record, record_v, record_gsyn # noqa: F401 38 | from . import morphology 39 | 40 | 41 | def list_standard_models(): 42 | """Return a list of all the StandardCellType classes available for this simulator.""" 43 | return [obj.__name__ for obj in globals().values() 44 | if isinstance(obj, type) and issubclass(obj, StandardCellType)] 45 | -------------------------------------------------------------------------------- /pyNN/arbor/control.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | 4 | """ 5 | 6 | from ..common.control import DEFAULT_MAX_DELAY, DEFAULT_TIMESTEP, DEFAULT_MIN_DELAY 7 | from .. import common 8 | from ..recording import get_io 9 | from . import simulator 10 | 11 | 12 | def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, 13 | **extra_params): 14 | 15 | max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) 16 | common.setup(timestep, min_delay, **extra_params) 17 | simulator.state.clear() 18 | simulator.state.dt = timestep # move to common.setup? 19 | simulator.state.min_delay = min_delay 20 | simulator.state.max_delay = max_delay 21 | return rank() 22 | 23 | 24 | def end(compatible_output=True): 25 | """Do any necessary cleaning up before exiting.""" 26 | for (population, variables, filename) in simulator.state.write_on_end: 27 | io = get_io(filename) 28 | population.write_data(io, variables) 29 | simulator.state.write_on_end = [] 30 | # should have common implementation of end() 31 | 32 | 33 | run, run_until = common.build_run(simulator) 34 | run_for = run 35 | 36 | reset = common.build_reset(simulator) 37 | 38 | initialize = common.initialize 39 | 40 | get_current_time, get_time_step, get_min_delay, get_max_delay, \ 41 | num_processes, rank = common.build_state_queries(simulator) 42 | -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/expsyn.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | POINT_PROCESS expsyn 3 | RANGE tau, e 4 | NONSPECIFIC_CURRENT i 5 | } 6 | 7 | UNITS { 8 | (mV) = (millivolt) 9 | (uS) = (microsiemens) 10 | } 11 | 12 | PARAMETER { 13 | tau = 2.0 (ms) : the default for Neuron is 0.1 14 | e = 0 (mV) 15 | } 16 | 17 | ASSIGNED {} 18 | 19 | STATE { 20 | g (uS) 21 | } 22 | 23 | INITIAL { 24 | g = 0 25 | } 26 | 27 | BREAKPOINT { 28 | SOLVE state METHOD cnexp 29 | i = g*(v - e) 30 | } 31 | 32 | DERIVATIVE state { 33 | g' = -g/tau 34 | } 35 | 36 | NET_RECEIVE(weight) { 37 | g = g + weight 38 | } 39 | -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/kdr.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | SUFFIX kdr 3 | USEION k READ ek WRITE ik 4 | RANGE gkbar, q10 5 | } 6 | 7 | UNITS { 8 | (mV) = (millivolt) 9 | (S) = (siemens) 10 | } 11 | 12 | PARAMETER { 13 | gkbar = 0.036 (S/cm2) 14 | celsius (degC) 15 | } 16 | 17 | STATE { n } 18 | 19 | ASSIGNED { q10 } 20 | 21 | BREAKPOINT { 22 | SOLVE states METHOD cnexp 23 | LOCAL gk, n2 24 | 25 | n2 = n*n 26 | gk = gkbar*n2*n2 27 | ik = gk*(v - ek) 28 | } 29 | 30 | INITIAL { 31 | LOCAL alpha, beta 32 | 33 | q10 = 3^(0.1*celsius - 0.63) 34 | 35 | : potassium activation system 36 | alpha = n_alpha(v) 37 | beta = n_beta(v) 38 | n = alpha/(alpha + beta) 39 | } 40 | 41 | DERIVATIVE states { 42 | LOCAL alpha, beta 43 | 44 | : potassium activation system 45 | alpha = n_alpha(v) 46 | beta = n_beta(v) 47 | n' = (alpha - n*(alpha + beta))*q10 48 | } 49 | 50 | FUNCTION n_alpha(v) { n_alpha = 0.1*exprelr(-0.1*v - 5.5) } 51 | FUNCTION n_beta(v) { n_beta = 0.125*exp(-0.0125*v - 0.8125) } -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/leak.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | SUFFIX leak 3 | NONSPECIFIC_CURRENT il 4 | RANGE gl 5 | GLOBAL el 6 | } 7 | 8 | UNITS { 9 | (mV) = (millivolt) 10 | (S) = (siemens) 11 | } 12 | 13 | PARAMETER { 14 | gl = 0.0003 (S/cm2) 15 | el = -54.3 (mV) 16 | } 17 | 18 | BREAKPOINT { 19 | il = gl*(v - el) 20 | } 21 | -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/na.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | SUFFIX na 3 | USEION na READ ena WRITE ina 4 | RANGE gnabar, q10 5 | } 6 | 7 | UNITS { 8 | (mV) = (millivolt) 9 | (S) = (siemens) 10 | } 11 | 12 | PARAMETER { 13 | gnabar = 0.12 (S/cm2) 14 | celsius (degC) 15 | } 16 | 17 | STATE { m h } 18 | 19 | ASSIGNED { q10 } 20 | 21 | BREAKPOINT { 22 | SOLVE states METHOD cnexp 23 | LOCAL gna 24 | 25 | gna = gnabar*m*m*m*h 26 | ina = gna*(v - ena) 27 | } 28 | 29 | INITIAL { 30 | LOCAL alpha, beta 31 | 32 | q10 = 3^(0.1*celsius - 0.63) 33 | 34 | : sodium activation system 35 | alpha = m_alpha(v) 36 | beta = m_beta(v) 37 | m = alpha/(alpha + beta) 38 | 39 | : sodium inactivation system 40 | alpha = h_alpha(v) 41 | beta = h_beta(v) 42 | h = alpha/(alpha + beta) 43 | 44 | } 45 | 46 | DERIVATIVE states { 47 | LOCAL alpha, beta 48 | 49 | : sodium activation system 50 | alpha = m_alpha(v) 51 | beta = m_beta(v) 52 | m' = (alpha - m*(alpha + beta))*q10 53 | 54 | : sodium inactivation system 55 | alpha = h_alpha(v) 56 | beta = h_beta(v) 57 | h' = (alpha - h*(alpha + beta))*q10 58 | 59 | } 60 | 61 | FUNCTION m_alpha(v) { m_alpha = exprelr(-0.1*v - 4.0) } 62 | FUNCTION h_alpha(v) { h_alpha = 0.07*exp(-0.05*v - 3.25) } 63 | 64 | FUNCTION m_beta(v) { m_beta = 4.0*exp(-(v + 65.0)/18.0) } 65 | FUNCTION h_beta(v) { h_beta = 1.0/(exp(-0.1*v - 3.5) + 1.0) } 66 | -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/pas.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | SUFFIX pas 3 | NONSPECIFIC_CURRENT i 4 | RANGE g 5 | GLOBAL e 6 | } 7 | 8 | UNITS { 9 | (mV) = (millivolt) 10 | (S) = (siemens) 11 | } 12 | 13 | INITIAL {} 14 | 15 | PARAMETER { 16 | g = .001 (S/cm2) 17 | e = -70 (mV) : Taken from nrn 18 | } 19 | 20 | BREAKPOINT { 21 | i = g*(v - e) 22 | } 23 | -------------------------------------------------------------------------------- /pyNN/arbor/nmodl/pas2.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | SUFFIX pas2 3 | NONSPECIFIC_CURRENT i 4 | RANGE g, e 5 | } 6 | 7 | UNITS { 8 | (mV) = (millivolt) 9 | (S) = (siemens) 10 | } 11 | 12 | INITIAL {} 13 | 14 | PARAMETER { 15 | g = .001 (S/cm2) 16 | e = -70 (mV) 17 | } 18 | 19 | BREAKPOINT { 20 | i = g*(v - e) 21 | } 22 | -------------------------------------------------------------------------------- /pyNN/arbor/procedural_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from .. import common 6 | from ..connectors import FixedProbabilityConnector 7 | from .populations import Population 8 | from .projections import Projection 9 | from ..standardmodels.synapses import StaticSynapse 10 | from . import simulator 11 | 12 | 13 | create = common.build_create(Population) 14 | 15 | connect = common.build_connect(Projection, FixedProbabilityConnector, StaticSynapse) 16 | 17 | 18 | record = common.build_record(simulator) 19 | 20 | 21 | def record_v(source, filename): 22 | return record(['v'], source, filename) 23 | 24 | 25 | def record_gsyn(source, filename): 26 | return record(['gsyn_exc', 'gsyn_inh'], source, filename) 27 | -------------------------------------------------------------------------------- /pyNN/brian2/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Brian2 implementation of the PyNN API. 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | from .. import errors, random, space # noqa: F401 9 | from ..network import Network # noqa: F401 10 | from ..standardmodels import StandardCellType 11 | from ..random import NumpyRNG, GSLRNG, RandomDistribution # noqa: F401 12 | from ..connectors import * # noqa: F401, F403 13 | from .standardmodels.cells import * # noqa: F401, F403 14 | from .standardmodels.synapses import * # noqa: F401, F403 15 | from .standardmodels.electrodes import * # noqa: F401, F403 16 | from .standardmodels.receptors import ( # noqa: F401 17 | CondAlphaPostSynapticResponse, 18 | AlphaPSR, 19 | CondExpPostSynapticResponse, 20 | ExpPSR, 21 | CurrExpPostSynapticResponse, 22 | ) 23 | from .populations import Population, PopulationView, Assembly # noqa: F401 24 | from .projections import Projection # noqa: F401 25 | from .control import ( # noqa: F401 26 | setup, 27 | end, 28 | run, 29 | run_until, 30 | run_for, 31 | reset, 32 | initialize, 33 | get_current_time, 34 | get_time_step, 35 | get_min_delay, 36 | get_max_delay, 37 | num_processes, 38 | rank, 39 | ) 40 | from .procedural_api import create, connect, record, record_v, record_gsyn, set # noqa: F401 41 | 42 | 43 | def list_standard_models(): 44 | """Return a list of all the StandardCellType classes available for this simulator.""" 45 | return [ 46 | obj.__name__ 47 | for obj in globals().values() 48 | if isinstance(obj, type) and issubclass(obj, StandardCellType) 49 | ] 50 | -------------------------------------------------------------------------------- /pyNN/brian2/control.py: -------------------------------------------------------------------------------- 1 | """ 2 | Brian 2 implementation of simulation control functions 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | import brian2 9 | from ..common.control import DEFAULT_MAX_DELAY, DEFAULT_TIMESTEP, DEFAULT_MIN_DELAY 10 | from .. import common 11 | from ..recording import get_io 12 | from . import simulator 13 | from .standardmodels.electrodes import update_currents 14 | 15 | 16 | def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, 17 | **extra_params): 18 | """ 19 | Should be called at the very beginning of a script. 20 | extra_params contains any keyword arguments that are required by a given 21 | simulator but not by others. 22 | """ 23 | 24 | max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) 25 | common.setup(timestep, min_delay, **extra_params) 26 | simulator.state.clear() 27 | simulator.state.dt = timestep # move to common.setup? 28 | simulator.state.min_delay = min_delay 29 | simulator.state.max_delay = max_delay 30 | simulator.state.mpi_rank = 0 31 | simulator.state.num_processes = 1 32 | 33 | simulator.state.network.add( 34 | brian2.NetworkOperation(update_currents, when="start", clock=simulator.state.network.clock) 35 | ) 36 | return rank() 37 | 38 | 39 | def end(compatible_output=True): 40 | """Do any necessary cleaning up before exiting.""" 41 | for (population, variables, filename) in simulator.state.write_on_end: 42 | io = get_io(filename) 43 | population.write_data(io, variables) 44 | simulator.state.write_on_end = [] 45 | # should have common implementation of end() 46 | 47 | 48 | run, run_until = common.build_run(simulator) 49 | run_for = run 50 | 51 | reset = common.build_reset(simulator) 52 | 53 | initialize = common.initialize 54 | 55 | get_current_time, get_time_step, get_min_delay, get_max_delay, \ 56 | num_processes, rank = common.build_state_queries(simulator) 57 | -------------------------------------------------------------------------------- /pyNN/brian2/procedural_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Brian 2 implementation of the "procedural" API 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | from .. import common 9 | from ..connectors import FixedProbabilityConnector 10 | from .populations import Population 11 | from .projections import Projection 12 | from .standardmodels.synapses import StaticSynapse 13 | from . import simulator 14 | 15 | 16 | create = common.build_create(Population) 17 | 18 | 19 | connect = common.build_connect(Projection, FixedProbabilityConnector, StaticSynapse) 20 | 21 | 22 | set = common.set 23 | 24 | 25 | record = common.build_record(simulator) 26 | 27 | 28 | def record_v(source, filename): 29 | return record(['v'], source, filename) 30 | 31 | 32 | def record_gsyn(source, filename): 33 | return record(['gsyn_exc', 'gsyn_inh'], source, filename) 34 | -------------------------------------------------------------------------------- /pyNN/brian2/standardmodels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/pyNN/brian2/standardmodels/__init__.py -------------------------------------------------------------------------------- /pyNN/common/__init__.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | Defines a backend-independent, partial implementation of the PyNN API 4 | 5 | Backend simulator modules are not required to use any of the code herein, 6 | provided they provide the correct interface, but it is suggested that they use 7 | as much as is consistent with good performance (optimisations may require 8 | overriding some of the default definitions given here). 9 | 10 | Utility functions and classes: 11 | is_conductance() 12 | check_weight() 13 | 14 | Base classes to be sub-classed by individual backends: 15 | IDMixin 16 | Population 17 | PopulationView 18 | Assembly 19 | Projection 20 | 21 | Function-factories to generate backend-specific API functions: 22 | build_reset() 23 | build_state_queries() 24 | build_create() 25 | build_connect() 26 | build_record() 27 | 28 | Common implementation of API functions: 29 | set() 30 | initialize() 31 | 32 | Function skeletons to be extended by backends: 33 | setup() 34 | end() 35 | run() 36 | 37 | Global constants: 38 | DEFAULT_MAX_DELAY 39 | DEFAULT_TIMESTEP 40 | DEFAULT_MIN_DELAY 41 | 42 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 43 | :license: CeCILL, see LICENSE for details. 44 | 45 | """ 46 | 47 | # flake8: noqa 48 | 49 | from .populations import IDMixin, BasePopulation, Population, PopulationView, Assembly, is_conductance 50 | from .projections import Projection, Connection 51 | from .procedural_api import build_create, build_connect, set, build_record, initialize 52 | from .control import setup, end, build_run, build_reset, build_state_queries 53 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/assembly_default.txt: -------------------------------------------------------------------------------- 1 | Neuronal assembly called "$label", consisting of the following populations: 2 | #for $p in $populations 3 | * $p.label 4 | #end for 5 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/modeltype_default.txt: -------------------------------------------------------------------------------- 1 | $name -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/population_default.txt: -------------------------------------------------------------------------------- 1 | Population "$label" 2 | #if $structure 3 | Structure : $structure.name 4 | #for $name,$value in $structure.parameters.items() 5 | $name: $value 6 | #end for 7 | #end if 8 | Local cells : $size_local 9 | Cell type : $celltype.name 10 | ID range : $first_id-$last_id 11 | #if $size_local 12 | First cell on this node: 13 | ID: $local_first_id 14 | #for $name,$value in $cell_parameters.items() 15 | $name: $value 16 | #end for 17 | #end if -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/populationview_default.txt: -------------------------------------------------------------------------------- 1 | PopulationView "$label" 2 | parent : "$parent" 3 | size : $size 4 | mask : $mask 5 | 6 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/projection_default.txt: -------------------------------------------------------------------------------- 1 | Projection "$label" from "$pre.label" ($pre.size cells) to "$post.label" ($post.size cells) 2 | Receptor type : $receptor_type 3 | Connector : $connector.name 4 | #for $name,$value in $connector.parameters.items() 5 | $name : $value 6 | #end for 7 | Synapse type : (to be reimplemented) 8 | Total connections : $size 9 | Local connections : $size_local 10 | 11 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/structure_default.txt: -------------------------------------------------------------------------------- 1 | Structure '$name' with the following parameters: 2 | #for $key,$value in $parameters.items() 3 | $key: $value 4 | #end for -------------------------------------------------------------------------------- /pyNN/descriptions/templates/cheetah/synapsedynamics_default.txt: -------------------------------------------------------------------------------- 1 | Short-term plasticity mechanism: $fast 2 | Long-term plasticity mechanism: $slow -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/assembly_default.txt: -------------------------------------------------------------------------------- 1 | Neuronal assembly called "{{label}}", consisting of the following populations: 2 | {% for p in populations %} 3 | * {{p.label}} 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/modeltype_default.txt: -------------------------------------------------------------------------------- 1 | {{name}} -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/population_default.txt: -------------------------------------------------------------------------------- 1 | Population "{{label}}"{%- if structure %} 2 | Structure : {{structure.name}} 3 | {%- for name,value in structure.parameters.items() %} 4 | {{name}}: {{value}}{% endfor %}{% endif -%} 5 | Local cells : {{size_local}} 6 | Cell type : {{celltype.name}} 7 | ID range : {{first_id}}-{{last_id}} 8 | {% if size_local %}First cell on this node: 9 | ID: {{local_first_id}} 10 | {% for name,value in cell_parameters.items() %}{{name}}: {{value}} 11 | {% endfor -%} 12 | {% endif %} -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/populationview_default.txt: -------------------------------------------------------------------------------- 1 | PopulationView "{{label}}" 2 | parent : "{{parent}}" 3 | size : {{size}} 4 | mask : {{mask}} 5 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/projection_default.txt: -------------------------------------------------------------------------------- 1 | Projection "{{label}}" from "{{pre.label}}" ({{pre.size}} cells) to "{{post.label}}" ({{post.size}} cells) 2 | Target : {{target}} 3 | Connector : {{connector.name}} 4 | {%- for name,value in connector.parameters.items() %} 5 | {{name}} : {{value}}{% endfor %} 6 | Weights : {{connector.weights}} 7 | Delays : {{connector.delays}} 8 | Plasticity : {% if plasticity %} 9 | Short-term : {{plasticity.fast}} 10 | Long-term : {% if plasticity.slow %} 11 | Timing-dependence : {{plasticity.slow.timing_dependence.name}} 12 | {%- for name,value in plasticity.slow.timing_dependence.parameters.items() %} 13 | {{name}} : {{value}}{% endfor %} 14 | Weight-dependence : {{plasticity.slow.weight_dependence.name}} 15 | {%- for name,value in plasticity.slow.weight_dependence.parameters.items() %} 16 | {{name}} : {{value}}{% endfor %} 17 | Voltage-dependence : {{plasticity.slow.voltage_dependence}} 18 | Dendritic delay fraction : {{plasticity.slow.dendritic_delay_fraction}}{% endif %}{% else %}None{% endif %} 19 | Total connections : {{size}} 20 | Local connections : {{size_local}} 21 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/structure_default.txt: -------------------------------------------------------------------------------- 1 | Structure '{{name}}' with the following parameters: 2 | {%- for key,value in parameters.items() %} 3 | {{key}} : {{value}}{% endfor %} -------------------------------------------------------------------------------- /pyNN/descriptions/templates/jinja2/synapsedynamics_default.txt: -------------------------------------------------------------------------------- 1 | Short-term plasticity mechanism: {{fast}} 2 | Long-term plasticity mechanism: {{slow}} -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/assembly_default.txt: -------------------------------------------------------------------------------- 1 | Neuronal assembly called "$label", consisting of the following populations: $populations 2 | 3 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/modeltype_default.txt: -------------------------------------------------------------------------------- 1 | $name -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/population_default.txt: -------------------------------------------------------------------------------- 1 | Population "$label" 2 | Structure : $structure_name 3 | Local cells : $size_local 4 | Cell type : $celltype_name 5 | ID range : $first_id-$last_id 6 | First cell on this node: 7 | ID: $local_first_id 8 | $cell_parameters -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/populationview_default.txt: -------------------------------------------------------------------------------- 1 | PopulationView "$label" 2 | parent : "$parent" 3 | size : $size 4 | mask : $mask 5 | 6 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/projection_default.txt: -------------------------------------------------------------------------------- 1 | Projection "$label" 2 | From: $pre 3 | To: $post 4 | Target : $target 5 | Connector : $connector 6 | Plasticity : $plasticity 7 | Total connections : $size 8 | Local connections : $size_local 9 | 10 | -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/structure_default.txt: -------------------------------------------------------------------------------- 1 | Structure '$name' with the following parameters: $parameters -------------------------------------------------------------------------------- /pyNN/descriptions/templates/string/synapsedynamics_default.txt: -------------------------------------------------------------------------------- 1 | Short-term plasticity mechanism: $fast 2 | Long-term plasticity mechanism: $slow -------------------------------------------------------------------------------- /pyNN/hardware/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | hardware implementation of the PyNN API. 4 | It includes the submodules that stand on another directory. 5 | This solution is a clean way to make the submodules (brainscales, etc...) 6 | be indeed submodules of hardware, even if they don't stand on the same directory 7 | 8 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 9 | :license: CeCILL, see LICENSE for details. 10 | 11 | """ 12 | 13 | from auxiliary import get_path_to_analog_hardware_backend, import_all_submodules 14 | 15 | __path__.append(get_path_to_analog_hardware_backend()) 16 | import_all_submodules(__path__) 17 | -------------------------------------------------------------------------------- /pyNN/mock/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mock implementation of the PyNN API, for testing and documentation purposes. 3 | 4 | This simulator implements the PyNN API, but generates random data rather than 5 | really running simulations. 6 | 7 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 8 | :license: CeCILL, see LICENSE for details. 9 | """ 10 | 11 | 12 | from .. import errors, random, space # noqa: F401 13 | from ..network import Network # noqa: F401 14 | from ..space import Space # noqa: F401 15 | from ..random import NumpyRNG, GSLRNG, RandomDistribution # noqa: F401 16 | from ..connectors import * # noqa: F403, F401 17 | from ..recording import * # noqa: F403, F401 18 | from ..standardmodels import StandardCellType 19 | from .standardmodels import * # noqa: F403, F401 20 | from .populations import Population, PopulationView, Assembly # noqa: F401 21 | from .projections import Projection # noqa: F401 22 | from .control import ( # noqa: F401 23 | setup, 24 | end, 25 | run, 26 | run_until, 27 | run_for, 28 | reset, 29 | initialize, 30 | get_current_time, 31 | get_time_step, 32 | get_min_delay, 33 | get_max_delay, 34 | num_processes, 35 | rank, 36 | ) 37 | from .procedural_api import create, connect, record, record_v, record_gsyn # noqa: F401 38 | 39 | 40 | def list_standard_models(): 41 | """Return a list of all the StandardCellType classes available for this simulator.""" 42 | return [obj.__name__ for obj in globals().values() 43 | if isinstance(obj, type) and issubclass(obj, StandardCellType)] 44 | -------------------------------------------------------------------------------- /pyNN/mock/control.py: -------------------------------------------------------------------------------- 1 | from ..common.control import DEFAULT_MAX_DELAY, DEFAULT_TIMESTEP, DEFAULT_MIN_DELAY 2 | from .. import common 3 | from ..recording import get_io 4 | from . import simulator 5 | 6 | 7 | def setup(timestep=DEFAULT_TIMESTEP, min_delay=DEFAULT_MIN_DELAY, 8 | **extra_params): 9 | 10 | max_delay = extra_params.get('max_delay', DEFAULT_MAX_DELAY) 11 | common.setup(timestep, min_delay, **extra_params) 12 | simulator.state.clear() 13 | simulator.state.dt = timestep # move to common.setup? 14 | simulator.state.min_delay = min_delay 15 | simulator.state.max_delay = max_delay 16 | simulator.state.mpi_rank = extra_params.get('rank', 0) 17 | simulator.state.num_processes = extra_params.get('num_processes', 1) 18 | return rank() 19 | 20 | 21 | def end(compatible_output=True): 22 | """Do any necessary cleaning up before exiting.""" 23 | for (population, variables, filename) in simulator.state.write_on_end: 24 | io = get_io(filename) 25 | population.write_data(io, variables) 26 | simulator.state.write_on_end = [] 27 | # should have common implementation of end() 28 | 29 | 30 | run, run_until = common.build_run(simulator) 31 | run_for = run 32 | 33 | reset = common.build_reset(simulator) 34 | 35 | initialize = common.initialize 36 | 37 | get_current_time, get_time_step, get_min_delay, get_max_delay, \ 38 | num_processes, rank = common.build_state_queries(simulator) 39 | -------------------------------------------------------------------------------- /pyNN/mock/procedural_api.py: -------------------------------------------------------------------------------- 1 | 2 | from .. import common 3 | from ..connectors import FixedProbabilityConnector 4 | from .populations import Population 5 | from .projections import Projection 6 | from ..standardmodels.synapses import StaticSynapse 7 | from . import simulator 8 | 9 | 10 | create = common.build_create(Population) 11 | 12 | connect = common.build_connect(Projection, FixedProbabilityConnector, StaticSynapse) 13 | 14 | 15 | record = common.build_record(simulator) 16 | 17 | 18 | def record_v(source, filename): 19 | return record(['v'], source, filename) 20 | 21 | 22 | def record_gsyn(source, filename): 23 | return record(['gsyn_exc', 'gsyn_inh'], source, filename) 24 | -------------------------------------------------------------------------------- /pyNN/mock/projections.py: -------------------------------------------------------------------------------- 1 | from itertools import repeat 2 | from .. import common 3 | from ..core import ezip 4 | from ..space import Space 5 | from . import simulator 6 | 7 | 8 | class Connection(common.Connection): 9 | """ 10 | Store an individual plastic connection and information about it. Provide an 11 | interface that allows access to the connection's weight, delay and other 12 | attributes. 13 | """ 14 | 15 | def __init__(self, pre, post, **attributes): 16 | self.presynaptic_index = pre 17 | self.postsynaptic_index = post 18 | for name, value in attributes.items(): 19 | setattr(self, name, value) 20 | 21 | def as_tuple(self, *attribute_names): 22 | # should return indices, not IDs for source and target 23 | return tuple([getattr(self, name) for name in attribute_names]) 24 | 25 | 26 | class Projection(common.Projection): 27 | __doc__ = common.Projection.__doc__ 28 | _simulator = simulator 29 | 30 | def __init__(self, presynaptic_population, postsynaptic_population, 31 | connector, synapse_type, source=None, receptor_type=None, 32 | space=Space(), label=None): 33 | common.Projection.__init__(self, presynaptic_population, postsynaptic_population, 34 | connector, synapse_type, source, receptor_type, 35 | space, label) 36 | 37 | # Create connections 38 | self.connections = [] 39 | connector.connect(self) 40 | 41 | def __len__(self): 42 | return len(self.connections) 43 | 44 | def set(self, **attributes): 45 | raise NotImplementedError 46 | 47 | def _convergent_connect(self, presynaptic_indices, postsynaptic_index, 48 | location_selector=None, 49 | **connection_parameters): 50 | if location_selector is not None: 51 | raise NotImplementedError("mock backend does not support multicompartmental models.") 52 | for name, value in connection_parameters.items(): 53 | if isinstance(value, float): 54 | connection_parameters[name] = repeat(value) 55 | for pre_idx, other in ezip(presynaptic_indices, *connection_parameters.values()): 56 | other_attributes = dict(zip(connection_parameters.keys(), other)) 57 | self.connections.append( 58 | Connection(pre_idx, postsynaptic_index, **other_attributes) 59 | ) 60 | -------------------------------------------------------------------------------- /pyNN/mock/recording.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .. import recording 3 | from . import simulator 4 | 5 | 6 | class Recorder(recording.Recorder): 7 | _simulator = simulator 8 | 9 | def _record(self, variable, new_ids, sampling_interval=None): 10 | pass 11 | 12 | def _get_spiketimes(self, id, clear=False): 13 | if hasattr(id, "__len__"): 14 | spks = {} 15 | for i in id: 16 | spks[i] = np.array([i, i + 5], dtype=float) % self._simulator.state.t 17 | return spks 18 | else: 19 | return np.array([id, id + 5], dtype=float) % self._simulator.state.t 20 | 21 | def _get_all_signals(self, variable, ids, clear=False): 22 | # assuming not using cvode, otherwise need to get times as well 23 | # and use IrregularlySampledAnalogSignal 24 | n_samples = int(round(self._simulator.state.t / self._simulator.state.dt)) + 1 25 | return np.vstack([np.random.uniform(size=n_samples) for id in ids]).T, None 26 | 27 | def _local_count(self, variable, filter_ids=None): 28 | N = {} 29 | if variable.name == 'spikes': 30 | for id in self.filter_recorded(variable, filter_ids): 31 | N[int(id)] = 2 32 | else: 33 | raise Exception("Only implemented for spikes") 34 | return N 35 | 36 | def _clear_simulator(self): 37 | pass 38 | 39 | def _reset(self): 40 | pass 41 | -------------------------------------------------------------------------------- /pyNN/mock/simulator.py: -------------------------------------------------------------------------------- 1 | from .. import common 2 | 3 | name = "MockSimulator" 4 | 5 | 6 | class ID(int, common.IDMixin): 7 | 8 | def __init__(self, n): 9 | """Create an ID object with numerical value `n`.""" 10 | int.__init__(n) 11 | common.IDMixin.__init__(self) 12 | 13 | 14 | class State(common.control.BaseState): 15 | 16 | def __init__(self): 17 | common.control.BaseState.__init__(self) 18 | self.mpi_rank = 0 19 | self.num_processes = 1 20 | self.clear() 21 | self.dt = 0.1 22 | 23 | def run(self, simtime): 24 | self.t += simtime 25 | self.running = True 26 | 27 | def run_until(self, tstop): 28 | self.t = tstop 29 | self.running = True 30 | 31 | def clear(self): 32 | self.recorders = set([]) 33 | self.id_counter = 42 34 | self.segment_counter = -1 35 | self.reset() 36 | 37 | def reset(self): 38 | """Reset the state of the current network to time t = 0.""" 39 | self.running = False 40 | self.t = 0 41 | self.t_start = 0 42 | self.segment_counter += 1 43 | 44 | 45 | state = State() 46 | -------------------------------------------------------------------------------- /pyNN/nest/extensions/stochastic_stp_synapse_impl.h: -------------------------------------------------------------------------------- 1 | /* 2 | * stochastic_stp_synapse_impl.h 3 | * 4 | * :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | * :license: CeCILL, see LICENSE for details. 6 | * 7 | */ 8 | 9 | #ifndef STOCHASTIC_STP_SYNAPSE_IMPL_H 10 | #define STOCHASTIC_STP_SYNAPSE_IMPL_H 11 | 12 | #include "stochastic_stp_synapse.h" 13 | 14 | // Includes from nestkernel: 15 | #include "connection.h" 16 | #include "connector_model.h" 17 | #include "nest_names.h" 18 | 19 | // Includes from sli: 20 | #include "dictutils.h" 21 | 22 | namespace pynn 23 | { 24 | 25 | template < typename targetidentifierT > 26 | stochastic_stp_synapse< targetidentifierT >::stochastic_stp_synapse() 27 | : ConnectionBase() 28 | , weight_( 1.0 ) 29 | , U_( 0.5 ) 30 | , u_( 0.0 ) 31 | , tau_rec_( 800.0 ) 32 | , tau_fac_( 10.0 ) 33 | , R_( 1.0 ) 34 | , t_surv_( 0.0 ) 35 | , t_lastspike_( 0.0 ) 36 | { 37 | } 38 | 39 | template < typename targetidentifierT > 40 | stochastic_stp_synapse< targetidentifierT >::stochastic_stp_synapse( 41 | const stochastic_stp_synapse& rhs ) 42 | : ConnectionBase( rhs ) 43 | , weight_( rhs.weight_ ) 44 | , U_( rhs.U_ ) 45 | , u_( rhs.u_ ) 46 | , tau_rec_( rhs.tau_rec_ ) 47 | , tau_fac_( rhs.tau_fac_ ) 48 | , R_( rhs.R_ ) 49 | , t_surv_( rhs.t_surv_ ) 50 | , t_lastspike_( rhs.t_lastspike_ ) 51 | { 52 | } 53 | 54 | 55 | template < typename targetidentifierT > 56 | void 57 | stochastic_stp_synapse< targetidentifierT >::get_status( 58 | DictionaryDatum& d ) const 59 | { 60 | ConnectionBase::get_status( d ); 61 | def< double >( d, nest::names::weight, weight_ ); 62 | def< double >( d, nest::names::dU, U_ ); 63 | def< double >( d, nest::names::u, u_ ); 64 | def< double >( d, nest::names::tau_rec, tau_rec_ ); 65 | def< double >( d, nest::names::tau_fac, tau_fac_ ); 66 | } 67 | 68 | 69 | template < typename targetidentifierT > 70 | void 71 | stochastic_stp_synapse< targetidentifierT >::set_status( 72 | const DictionaryDatum& d, 73 | nest::ConnectorModel& cm ) 74 | { 75 | ConnectionBase::set_status( d, cm ); 76 | updateValue< double >( d, nest::names::weight, weight_ ); 77 | 78 | updateValue< double >( d, nest::names::dU, U_ ); 79 | updateValue< double >( d, nest::names::u, u_ ); 80 | updateValue< double >( d, nest::names::tau_rec, tau_rec_ ); 81 | updateValue< double >( d, nest::names::tau_fac, tau_fac_ ); 82 | } 83 | 84 | } // of namespace pynn 85 | 86 | #endif // #ifndef STOCHASTIC_STP_SYNAPSE_IMPL_H 87 | -------------------------------------------------------------------------------- /pyNN/nest/procedural_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | NEST implementation of the "procedural" or "low-level" API for 3 | creating, connecting and recording from individual neurons. 4 | 5 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 6 | :license: CeCILL, see LICENSE for details. 7 | """ 8 | 9 | from .. import common 10 | from ..connectors import FixedProbabilityConnector 11 | from .populations import Population 12 | from .projections import Projection 13 | from .standardmodels.synapses import StaticSynapse 14 | from . import simulator 15 | 16 | create = common.build_create(Population) 17 | 18 | connect = common.build_connect(Projection, FixedProbabilityConnector, StaticSynapse) 19 | 20 | set = common.set 21 | 22 | record = common.build_record(simulator) 23 | 24 | 25 | def record_v(source, filename): return record(['v'], source, filename) 26 | 27 | 28 | def record_gsyn(source, filename): return record(['gsyn_exc', 'gsyn_inh'], source, filename) 29 | -------------------------------------------------------------------------------- /pyNN/nest/standardmodels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/pyNN/nest/standardmodels/__init__.py -------------------------------------------------------------------------------- /pyNN/nest/standardmodels/receptors.py: -------------------------------------------------------------------------------- 1 | from ...standardmodels import receptors, build_translations 2 | 3 | 4 | class CurrExpPostSynapticResponse(receptors.CurrExpPostSynapticResponse): 5 | possible_models = set(["iaf_psc_exp_multisynapse"]) 6 | 7 | translations = build_translations( 8 | ('locations', 'locations'), 9 | ('tau_syn', 'tau_syn') 10 | ) 11 | recordable = ["isyn"] 12 | scale_factors = {"isyn": 0.001} 13 | variable_map = {"isyn": "I_syn"} 14 | 15 | 16 | class CondExpPostSynapticResponse(receptors.CondExpPostSynapticResponse): 17 | possible_models = set(["gif_cond_exp_multisynapse"]) 18 | 19 | translations = build_translations( 20 | ('locations', 'locations'), 21 | ('e_syn', 'E_rev'), 22 | ('tau_syn', 'tau_syn') 23 | ) 24 | recordable = ["gsyn"] 25 | scale_factors = {"gsyn": 0.001} 26 | variable_map = {"gsyn": "g"} 27 | 28 | 29 | class CondAlphaPostSynapticResponse(receptors.CondAlphaPostSynapticResponse): 30 | possible_models = set(["aeif_cond_alpha_multisynapse"]) 31 | 32 | translations = build_translations( 33 | ('locations', 'locations'), 34 | ('e_syn', 'E_rev'), 35 | ('tau_syn', 'tau_syn') 36 | ) 37 | recordable = ["gsyn"] 38 | scale_factors = {"gsyn": 0.001} 39 | variable_map = {"gsyn": "g"} 40 | 41 | 42 | class CondBetaPostSynapticResponse(receptors.CondBetaPostSynapticResponse): 43 | possible_models = set(["aeif_cond_beta_multisynapse"]) 44 | 45 | translations = build_translations( 46 | ('locations', 'locations'), 47 | ('e_syn', 'E_rev'), 48 | ('tau_rise', 'tau_rise'), 49 | ('tau_decay', 'tau_decay') 50 | ) 51 | recordable = ["gsyn"] 52 | scale_factors = {"gsyn": 0.001} 53 | variable_map = {"gsyn": "g"} 54 | 55 | 56 | # create shorter aliases 57 | 58 | ExpPSR = CondExpPostSynapticResponse 59 | AlphaPSR = CondAlphaPostSynapticResponse 60 | BetaPSR = CondBetaPostSynapticResponse 61 | -------------------------------------------------------------------------------- /pyNN/neuroml/standardmodels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/pyNN/neuroml/standardmodels/__init__.py -------------------------------------------------------------------------------- /pyNN/neuron/connectors.py: -------------------------------------------------------------------------------- 1 | """ 2 | Connection method classes for the neuron module 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | 7 | """ 8 | 9 | # flake8: noqa 10 | 11 | from . import simulator 12 | from ..connectors import ( 13 | AllToAllConnector, 14 | OneToOneConnector, 15 | FixedProbabilityConnector, 16 | DistanceDependentProbabilityConnector, 17 | DisplacementDependentProbabilityConnector, 18 | IndexBasedProbabilityConnector, 19 | FromListConnector, 20 | FromFileConnector, 21 | FixedNumberPreConnector, 22 | FixedNumberPostConnector, 23 | SmallWorldConnector, 24 | CSAConnector, 25 | CloneConnector, 26 | ArrayConnector, 27 | FixedTotalNumberConnector, 28 | ) 29 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | """ 4 | 5 | from pyNN.models import BaseIonChannelModel 6 | from pyNN.morphology import IonChannelDistribution 7 | from .. import simulator 8 | 9 | 10 | class NMODLIonChannelModel(BaseIonChannelModel): 11 | conductance_density_parameter = None 12 | 13 | def __init__(self, **parameters): 14 | mechanism = getattr(simulator.dummy(0.5), self.name) 15 | # now get list of range variables (dir(mechanism), ends with _) 16 | lname = len(self.name) 17 | self.range_variables = [var[:-lname-1] for var in dir(mechanism) if var.endswith("_" + self.name)] 18 | # the problem now is that only some of the range variables are parameters 19 | # is there any way to introspect which these are? 20 | # maybe check the NEURON GUI - does it limit the recordable vars? 21 | # otherwise we either require the source, and parse the NMODL 22 | # or we require the user to tell us 23 | if self.conductance_density_parameter is None: 24 | for varname in ("gbar", "gnabar", "gkbar", "gcabar", "g", "gmax"): 25 | if varname in self.range_variables: 26 | self.conductance_density_parameter = varname 27 | break 28 | BaseIonChannelModel.__init__(self, **parameters) 29 | 30 | def translate(self, parameters): 31 | return parameters 32 | 33 | def get_schema(self): 34 | return {varname: IonChannelDistribution 35 | for varname in self.range_variables} 36 | 37 | 38 | def NMODLChannel(name, conductance_density_parameter=None): 39 | simulator.dummy.insert(name) 40 | return type(name, 41 | (NMODLIonChannelModel,), 42 | {"name": name, 43 | "model": name, 44 | "conductance_density_parameter": conductance_density_parameter}) 45 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/alphaisyn.mod: -------------------------------------------------------------------------------- 1 | TITLE Alpha-function synaptic current, with NET_RECEIVE 2 | 3 | COMMENT 4 | This model works with variable time-step methods (although it may not 5 | be very accurate) but at the expense of having to maintain the queues 6 | of spike times and weights. 7 | 8 | Andrew P. Davison, UNIC, CNRS, May 2006 9 | ENDCOMMENT 10 | 11 | DEFINE MAX_SPIKES 1000 12 | DEFINE CUTOFF 20 13 | 14 | NEURON { 15 | POINT_PROCESS AlphaISyn 16 | RANGE tau, i, q 17 | NONSPECIFIC_CURRENT i 18 | } 19 | 20 | UNITS { 21 | (nA) = (nanoamp) 22 | } 23 | 24 | PARAMETER { 25 | tau = 5 (ms) <1e-9,1e9> 26 | 27 | } 28 | 29 | ASSIGNED { 30 | i (nA) 31 | q 32 | quiet 33 | onset_times[MAX_SPIKES] (ms) 34 | weight_list[MAX_SPIKES] (nA) 35 | } 36 | 37 | INITIAL { 38 | i = 0 39 | q = 0 : queue index 40 | quiet = 0 41 | } 42 | 43 | BREAKPOINT { 44 | LOCAL k, expired_spikes, x 45 | i = 0 46 | expired_spikes = 0 47 | FROM k=0 TO q-1 { 48 | x = (t - onset_times[k])/tau 49 | if (x > CUTOFF) { 50 | expired_spikes = expired_spikes + 1 51 | } else { 52 | i = i - weight_list[k] * alpha(x) 53 | } 54 | } 55 | update_queue(expired_spikes) 56 | } 57 | 58 | FUNCTION update_queue(n) { 59 | LOCAL k 60 | :if (n > 0) { printf("Queue changed. t = %4.2f onset_times=[",t) } 61 | FROM k=0 TO q-n-1 { 62 | onset_times[k] = onset_times[k+n] 63 | weight_list[k] = weight_list[k+n] 64 | :if (n > 0) { printf("%4.2f ",onset_times[k]) } 65 | } 66 | :if (n > 0) { printf("]\n") } 67 | q = q-n 68 | } 69 | 70 | FUNCTION alpha(x) { 71 | if (x < 0) { 72 | alpha = 0 73 | } else { 74 | alpha = x * exp(1 - x) 75 | } 76 | } 77 | 78 | NET_RECEIVE(weight (nA)) { 79 | onset_times[q] = t 80 | weight_list[q] = weight 81 | :printf("t = %f, weight = %f\n", t, weight) 82 | if (q >= MAX_SPIKES-1) { 83 | if (!quiet) { 84 | printf("Error in AlphaSynI. Spike queue is full\n") 85 | quiet = 1 86 | } 87 | } else { 88 | q = q + 1 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/alphasyn.mod: -------------------------------------------------------------------------------- 1 | TITLE Alpha-function synaptic conductance, with NET_RECEIVE 2 | 3 | COMMENT 4 | This model works with variable time-step methods (although it may not 5 | be very accurate) but at the expense of having to maintain the queues 6 | of spike times and weights. 7 | 8 | Andrew P. Davison, UNIC, CNRS, May 2006 9 | ENDCOMMENT 10 | 11 | DEFINE MAX_SPIKES 1000 12 | DEFINE CUTOFF 20 13 | 14 | NEURON { 15 | POINT_PROCESS AlphaSyn 16 | RANGE tau, i, g, e, q 17 | NONSPECIFIC_CURRENT i 18 | } 19 | 20 | UNITS { 21 | (nA) = (nanoamp) 22 | (uS) = (microsiemens) 23 | (mV) = (millivolts) 24 | } 25 | 26 | PARAMETER { 27 | tau = 5 (ms) <1e-9,1e9> 28 | e = 0 (mV) 29 | } 30 | 31 | ASSIGNED { 32 | v (mV) 33 | i (nA) 34 | g (uS) 35 | q 36 | quiet 37 | onset_times[MAX_SPIKES] (ms) 38 | weight_list[MAX_SPIKES] (uS) 39 | } 40 | 41 | INITIAL { 42 | i = 0 43 | q = 0 : queue index 44 | quiet = 0 45 | } 46 | 47 | BREAKPOINT { 48 | LOCAL k, expired_spikes, x 49 | g = 0 50 | expired_spikes = 0 51 | FROM k=0 TO q-1 { 52 | x = (t - onset_times[k])/tau 53 | if (x > CUTOFF) { 54 | expired_spikes = expired_spikes + 1 55 | } else { 56 | g = g + weight_list[k] * alpha(x) 57 | } 58 | } 59 | i = g*(v - e) 60 | update_queue(expired_spikes) 61 | } 62 | 63 | FUNCTION update_queue(n) { 64 | LOCAL k 65 | :if (n > 0) { printf("Queue changed. t = %4.2f onset_times=[",t) } 66 | FROM k=0 TO q-n-1 { 67 | onset_times[k] = onset_times[k+n] 68 | weight_list[k] = weight_list[k+n] 69 | :if (n > 0) { printf("%4.2f ",onset_times[k]) } 70 | } 71 | :if (n > 0) { printf("]\n") } 72 | q = q-n 73 | } 74 | 75 | FUNCTION alpha(x) { 76 | if (x < 0) { 77 | alpha = 0 78 | } else { 79 | alpha = x * exp(1 - x) 80 | } 81 | } 82 | 83 | NET_RECEIVE(weight (uS)) { 84 | onset_times[q] = t 85 | weight_list[q] = weight 86 | if (q >= MAX_SPIKES-1) { 87 | if (!quiet) { 88 | printf("Error in AlphaSyn. Spike queue is full\n") 89 | quiet = 1 90 | } 91 | } else { 92 | q = q + 1 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/expisyn.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | POINT_PROCESS ExpISyn 3 | RANGE tau, i 4 | NONSPECIFIC_CURRENT i 5 | } 6 | 7 | UNITS { 8 | (nA) = (nanoamp) 9 | (mV) = (millivolt) 10 | } 11 | 12 | PARAMETER { 13 | tau = 0.1 (ms) <1e-9,1e9> 14 | } 15 | 16 | STATE { 17 | i (nA) 18 | } 19 | 20 | INITIAL { 21 | i = 0 22 | } 23 | 24 | BREAKPOINT { 25 | SOLVE state METHOD cnexp 26 | } 27 | 28 | DERIVATIVE state { 29 | i' = -i/tau 30 | } 31 | 32 | NET_RECEIVE(weight (nA)) { 33 | :printf("t = %f, weight = %f\n", t, weight) 34 | i = i - weight 35 | } 36 | 37 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/gap.mod: -------------------------------------------------------------------------------- 1 | NEURON { 2 | 3 | POINT_PROCESS Gap 4 | RANGE g, i, vgap 5 | NONSPECIFIC_CURRENT i 6 | } 7 | 8 | UNITS { 9 | 10 | (nA) = (nanoamp) 11 | (mV) = (millivolt) 12 | (nS) = (nanosiemens) 13 | } 14 | 15 | PARAMETER { g = 0 (uS) } 16 | 17 | ASSIGNED { 18 | 19 | v (mV) 20 | vgap (mV) 21 | i (nA) 22 | } 23 | 24 | BREAKPOINT { 25 | 26 | if (g>0) {i = g * (v-vgap) } 27 | 28 | } 29 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/gsfa_grr.mod: -------------------------------------------------------------------------------- 1 | : conductance based spike-frequency adaptation, and a conductance-based relative refractory 2 | : mechanism ... to be inserted in a integrate-and-fire neuron 3 | : 4 | : See: Muller et al (2007) Spike-frequency adapting neural ensembles: Beyond 5 | : mean-adaptation and renewal theories. Neural Computation 19: 2958-3010. 6 | : 7 | : 8 | : Implemented from adexp.mod by Eilif Muller. EPFL-BMI, Jan 2011. 9 | 10 | NEURON { 11 | POINT_PROCESS GsfaGrr 12 | RANGE vthresh 13 | RANGE q_r, q_s 14 | RANGE E_s, E_r, tau_s, tau_r 15 | NONSPECIFIC_CURRENT i 16 | } 17 | 18 | UNITS { 19 | (mV) = (millivolt) 20 | (nA) = (nanoamp) 21 | (uS) = (microsiemens) 22 | (nS) = (nanosiemens) 23 | } 24 | 25 | PARAMETER { 26 | vthresh = -57 (mV) : spike threshold 27 | q_r = 3214.0 (nS) : relative refractory quantal conductance 28 | q_s = 14.48 (nS) : SFA quantal conductance 29 | tau_s = 110.0 (ms) : time constant of SFA 30 | tau_r = 1.97 (ms) : time constant of relative refractory mechanism 31 | E_s = -70 (mV) : SFA reversal potential 32 | E_r = -70 (mV) : relative refractory period reversal potential 33 | } 34 | 35 | 36 | ASSIGNED { 37 | v (mV) 38 | i (nA) 39 | } 40 | 41 | STATE { 42 | g_s (nS) 43 | g_r (nS) 44 | } 45 | 46 | INITIAL { 47 | g_s = 0 48 | g_r = 0 49 | net_send(0,2) 50 | } 51 | 52 | BREAKPOINT { 53 | SOLVE states METHOD cnexp :derivimplicit 54 | i = (0.001)*(g_r*(v-E_r) + g_s*(v-E_s)) 55 | } 56 | 57 | 58 | DERIVATIVE states { : solve eq for adaptation, relref variable 59 | g_s' = -g_s/tau_s 60 | g_r' = -g_r/tau_r 61 | } 62 | 63 | NET_RECEIVE (weight) { 64 | if (flag == 1) { : beginning of spike 65 | state_discontinuity(g_s, g_s + q_s) 66 | state_discontinuity(g_r, g_r + q_r) 67 | } else if (flag == 2) { : watch membrane potential 68 | WATCH (v > vthresh) 1 69 | } 70 | } -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/izhikevich.mod: -------------------------------------------------------------------------------- 1 | : Izhikevich artificial neuron model from 2 | : EM Izhikevich "Simple Model of Spiking Neurons" 3 | : IEEE Transactions On Neural Networks, Vol. 14, No. 6, November 2003 pp 1569-1572 4 | : 5 | : This NMODL file may not work properly if used outside of PyNN. 6 | : Ted Carnevale has written a more complete, general-purpose implementation - see http://www.neuron.yale.edu/ftp/ted/devel/izhdistrib.zip 7 | 8 | NEURON { 9 | POINT_PROCESS Izhikevich 10 | RANGE a, b, c, d, u, uinit, vthresh 11 | NONSPECIFIC_CURRENT i 12 | } 13 | 14 | UNITS { 15 | (mV) = (millivolt) 16 | (nA) = (nanoamp) 17 | (nF) = (nanofarad) 18 | } 19 | 20 | INITIAL { 21 | u = uinit 22 | net_send(0, 1) 23 | } 24 | 25 | PARAMETER { 26 | a = 0.02 (/ms) 27 | b = 0.2 (/ms) 28 | c = -65 (mV) : reset potential after a spike 29 | d = 2 (mV/ms) 30 | vthresh = 30 (mV) : spike threshold 31 | Cm = 0.001 (nF) 32 | uinit = -14 (mV/ms) 33 | } 34 | 35 | ASSIGNED { 36 | v (mV) 37 | i (nA) 38 | } 39 | 40 | STATE { 41 | u (mV/ms) 42 | } 43 | 44 | BREAKPOINT { 45 | SOLVE states METHOD derivimplicit 46 | i = -Cm * (0.04*v*v + 5*v + 140 - u) 47 | :printf("t=%f, v=%f u=%f, i=%f, dv=%f, du=%f\n", t, v, u, i, 0.04*v*v + 5*v + 140 - u, a*(b*v-u)) 48 | } 49 | 50 | DERIVATIVE states { 51 | u' = a*(b*v - u) 52 | } 53 | 54 | NET_RECEIVE (weight (mV)) { 55 | if (flag == 1) { 56 | WATCH (v > vthresh) 2 57 | } else if (flag == 2) { 58 | net_event(t) 59 | v = c 60 | u = u + d 61 | } else { : synaptic activation 62 | v = v + weight 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/poisson_stim_refractory.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | 3 | Spike generator following a Poisson process with a refractory period. 4 | 5 | Parameters: 6 | rate: Mean spike frequency (Hz) 7 | tau_refrac: Minimum time between spikes (ms) 8 | start: Start time (ms) 9 | duration: Duration of spike sequence (ms) 10 | 11 | Author: Andrew P. Davison, UNIC, CNRS 12 | 13 | ENDCOMMENT 14 | 15 | NEURON { 16 | ARTIFICIAL_CELL PoissonStimRefractory 17 | RANGE rate, tau_refrac, start, duration 18 | } 19 | 20 | PARAMETER { 21 | rate = 1.0 (Hz) 22 | tau_refrac = 0.0 (ms) 23 | start = 1 (ms) 24 | duration = 1000 (ms) 25 | } 26 | 27 | ASSIGNED { 28 | event (ms) 29 | on 30 | end (ms) 31 | } 32 | 33 | PROCEDURE seed(x) { 34 | set_seed(x) 35 | } 36 | 37 | INITIAL { 38 | on = 0 39 | if (start >= 0) { 40 | net_send(event, 2) 41 | } 42 | } 43 | 44 | NET_RECEIVE (w) { 45 | LOCAL mean_poisson_interval 46 | if (flag == 2) { : from INITIAL 47 | if (on == 0) { 48 | on = 1 49 | event = t 50 | end = t + 1e-6 + duration 51 | net_send(0, 1) 52 | } 53 | } 54 | if (flag == 1 && on == 1) { 55 | net_event(t) 56 | mean_poisson_interval = 1000.0/rate - tau_refrac 57 | event = event + tau_refrac + mean_poisson_interval * exprand(1) 58 | if (event > end) { 59 | on = 0 60 | } 61 | if (on == 1) { 62 | net_send(event - t, 1) 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/refrac.mod: -------------------------------------------------------------------------------- 1 | : Insert in a passive compartment to get an integrate-and-fire neuron 2 | : with a refractory period. 3 | : Note that this only sets the membrane potential to the correct value 4 | : at the start and end of the refractory period, and prevents spikes 5 | : during the period by clamping the membrane potential to the reset 6 | : voltage with a huge conductance. 7 | : 8 | : Andrew P. Davison. UNIC, CNRS, May 2006. 9 | 10 | NEURON { 11 | POINT_PROCESS ResetRefrac 12 | RANGE vreset, trefrac, vspike, vthresh 13 | NONSPECIFIC_CURRENT i 14 | } 15 | 16 | UNITS { 17 | (mV) = (millivolt) 18 | (nA) = (nanoamp) 19 | (uS) = (microsiemens) 20 | } 21 | 22 | PARAMETER { 23 | vthresh = -50 (mV) : spike threshold 24 | vreset = -60 (mV) : reset potential after a spike 25 | vspike = 40 (mV) : spike height (mainly for graphical purposes) 26 | trefrac = 1 (ms) 27 | g_on = 1e12 (uS) 28 | spikewidth = 1e-12 (ms) : must be less than trefrac. Check for this? 29 | } 30 | 31 | 32 | ASSIGNED { 33 | v (mV) 34 | i (nA) 35 | g (uS) 36 | refractory 37 | } 38 | 39 | INITIAL { 40 | g = 0 41 | net_send(0,4) 42 | } 43 | 44 | BREAKPOINT { 45 | i = g*(v-vreset) 46 | } 47 | 48 | NET_RECEIVE (weight) { 49 | if (flag == 1) { : beginning of spike 50 | g = g_on 51 | state_discontinuity(v,vspike) 52 | net_send(spikewidth,2) 53 | net_event(t) 54 | } else if (flag == 2) { : end of spike, beginning of refractory period 55 | state_discontinuity(v,vreset) 56 | if (trefrac > spikewidth) { 57 | net_send(trefrac-spikewidth,3) 58 | } else { : also the end of the refractory period 59 | g = 0 60 | } 61 | } else if (flag == 3) { : end of refractory period 62 | state_discontinuity(v,vreset) 63 | g = 0 64 | } else if (flag == 4) { : watch membrane potential 65 | WATCH (v > vthresh) 1 66 | } 67 | } -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/reset.mod: -------------------------------------------------------------------------------- 1 | : Insert in a passive compartment to get an integrate-and-fire neuron 2 | : (no refractory period). 3 | : Andrew P. Davison. UNIC, CNRS, May 2006. 4 | 5 | NEURON { 6 | POINT_PROCESS Reset 7 | RANGE vreset, vspike 8 | } 9 | 10 | UNITS { 11 | (mV) = (millivolt) 12 | } 13 | 14 | PARAMETER { 15 | vreset = -60 (mV) : reset potential after a spike 16 | vspike = 40 (mV) : spike height (mainly for graphical purposes) 17 | } 18 | 19 | ASSIGNED { 20 | v (millivolt) 21 | } 22 | 23 | NET_RECEIVE (weight) { 24 | if (flag == 1) { 25 | v = vreset 26 | } else { 27 | v = vspike 28 | net_send(1e-12,1) : using variable time step, this should allow the spike to be detected using threshold crossing 29 | net_event(t) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/stdwa_guetig.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Spike Timing Dependent Weight Adjuster 3 | based on Song and Abbott, 2001, but with weight limits according to Guetig et al, 2003 4 | Andrew Davison, UNIC, CNRS, 2003-2005, 2009 5 | ENDCOMMENT 6 | 7 | NEURON { 8 | POINT_PROCESS StdwaGuetig 9 | RANGE interval, tlast_pre, tlast_post, M, P 10 | RANGE deltaw, wmax, wmin, aLTP, aLTD, tauLTP, tauLTD, on 11 | RANGE muLTP, muLTD 12 | RANGE allow_update_on_post 13 | POINTER wsyn 14 | } 15 | 16 | ASSIGNED { 17 | interval (ms) : since last spike of the other kind 18 | tlast_pre (ms) : time of last presynaptic spike 19 | tlast_post (ms) : time of last postsynaptic spike 20 | M : LTD function 21 | P : LTP function 22 | deltaw : change in weight 23 | wsyn : weight of the synapse 24 | } 25 | 26 | INITIAL { 27 | interval = 0 28 | tlast_pre = 0 29 | tlast_post = 0 30 | M = 0 31 | P = 0 32 | deltaw = 0 33 | } 34 | 35 | PARAMETER { 36 | tauLTP = 20 (ms) : decay time for LTP part ( values from ) 37 | tauLTD = 20 (ms) : decay time for LTD part ( Song and Abbott, 2001 ) 38 | wmax = 1 : min and max values of synaptic weight 39 | wmin = 0 40 | aLTP = 0.001 : amplitude of LTP steps 41 | aLTD = 0.00106 : amplitude of LTD steps 42 | muLTP = 0.0 43 | muLTD = 0.0 44 | on = 1 : allows learning to be turned on and off 45 | allow_update_on_post = 1 : if this is true, we update the weight on receiving both pre- and post-synaptic spikes 46 | : if it is false, weight updates are accumulated and applied only for a pre-synaptic spike 47 | } 48 | 49 | NET_RECEIVE (w) { 50 | if (w >= 0) { : this is a pre-synaptic spike 51 | P = P*exp((tlast_pre-t)/tauLTP) + aLTP 52 | interval = tlast_post - t : interval is negative 53 | tlast_pre = t 54 | deltaw = deltaw + (wsyn-wmin)^muLTD * M * exp(interval/tauLTD) 55 | } else { : this is a post-synaptic spike 56 | M = M*exp((tlast_post-t)/tauLTD) - aLTD 57 | interval = t - tlast_pre : interval is positive 58 | tlast_post = t 59 | deltaw = deltaw + (wmax-wsyn)^muLTP * P * exp(-interval/tauLTP) 60 | } 61 | if (on) { 62 | if (w >= 0 || allow_update_on_post) { 63 | wsyn = wsyn + deltaw 64 | deltaw = 0.0 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/stdwa_softlimits.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Spike Timing Dependent Weight Adjuster 3 | based on Song and Abbott, 2001, but with soft weight limits 4 | Andrew Davison, UNIC, CNRS, 2003-2005, 2009 5 | ENDCOMMENT 6 | 7 | NEURON { 8 | POINT_PROCESS StdwaSoft 9 | RANGE interval, tlast_pre, tlast_post, M, P 10 | RANGE deltaw, wmax, wmin, aLTP, aLTD, wprune, tauLTP, tauLTD, on 11 | RANGE allow_update_on_post 12 | POINTER wsyn 13 | } 14 | 15 | ASSIGNED { 16 | interval (ms) : since last spike of the other kind 17 | tlast_pre (ms) : time of last presynaptic spike 18 | tlast_post (ms) : time of last postsynaptic spike 19 | M : LTD function 20 | P : LTP function 21 | deltaw : change in weight 22 | wsyn : weight of the synapse 23 | } 24 | 25 | INITIAL { 26 | interval = 0 27 | tlast_pre = 0 28 | tlast_post = 0 29 | M = 0 30 | P = 0 31 | deltaw = 0 32 | } 33 | 34 | PARAMETER { 35 | tauLTP = 20 (ms) : decay time for LTP part ( values from ) 36 | tauLTD = 20 (ms) : decay time for LTD part ( Song and Abbott, 2001 ) 37 | wmax = 1 : min and max values of synaptic weight 38 | wmin = 0 39 | aLTP = 0.001 : amplitude of LTP steps 40 | aLTD = 0.00106 : amplitude of LTD steps 41 | on = 1 : allows learning to be turned on and off globally 42 | wprune = 0 : default is no pruning 43 | allow_update_on_post = 1 : if this is true, we update the weight on receiving both pre- and post-synaptic spikes 44 | : if it is false, weight updates are accumulated and applied only for a pre-synaptic spike 45 | } 46 | 47 | NET_RECEIVE (w) { 48 | if (w >= 0) { : this is a pre-synaptic spike 49 | P = P*exp((tlast_pre-t)/tauLTP) + aLTP 50 | interval = tlast_post - t : interval is negative 51 | tlast_pre = t 52 | deltaw = deltaw + (wsyn-wmin) * M * exp(interval/tauLTD) 53 | } else { : this is a post-synaptic spike 54 | M = M*exp((tlast_post-t)/tauLTD) - aLTD 55 | interval = t - tlast_pre : interval is positive 56 | tlast_post = t 57 | deltaw = deltaw + (wmax-wsyn) * P * exp(-interval/tauLTP) 58 | } 59 | if (on) { 60 | if (w >= 0 || allow_update_on_post) { 61 | if (wsyn > wprune) { 62 | wsyn = wsyn + deltaw 63 | } else { 64 | wsyn = 0 65 | } 66 | deltaw = 0.0 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/stdwa_songabbott.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Spike Timing Dependent Weight Adjuster 3 | based on Song and Abbott, 2001. 4 | Andrew Davison, UNIC, CNRS, 2003-2004, 2009 5 | ENDCOMMENT 6 | 7 | NEURON { 8 | POINT_PROCESS StdwaSA 9 | RANGE interval, tlast_pre, tlast_post, M, P 10 | RANGE deltaw, wmax, wmin, aLTP, aLTD, tauLTP, tauLTD, on 11 | RANGE allow_update_on_post 12 | POINTER wsyn 13 | } 14 | 15 | ASSIGNED { 16 | interval (ms) : since last spike of the other kind 17 | tlast_pre (ms) : time of last presynaptic spike 18 | tlast_post (ms) : time of last postsynaptic spike 19 | M : LTD function 20 | P : LTP function 21 | deltaw : change in weight 22 | wsyn : weight of the synapse 23 | } 24 | 25 | INITIAL { 26 | interval = 0 27 | tlast_pre = 0 28 | tlast_post = 0 29 | M = 0 30 | P = 0 31 | deltaw = 0 32 | } 33 | 34 | PARAMETER { 35 | tauLTP = 20 (ms) : decay time for LTP part ( values from ) 36 | tauLTD = 20 (ms) : decay time for LTD part ( Song and Abbott, 2001 ) 37 | wmax = 1 : min and max values of synaptic weight 38 | wmin = 0 : 39 | aLTP = 0.001 : amplitude of LTP steps 40 | aLTD = 0.00106 : amplitude of LTD steps 41 | on = 1 : allows learning to be turned on and off 42 | allow_update_on_post = 1 : if this is true, we update the weight on receiving both pre- and post-synaptic spikes 43 | : if it is false, weight updates are accumulated and applied only for a pre-synaptic spike 44 | } 45 | 46 | NET_RECEIVE (w) { 47 | if (w >= 0) { : this is a pre-synaptic spike 48 | P = P*exp((tlast_pre-t)/tauLTP) + aLTP 49 | interval = tlast_post - t : interval is negative 50 | tlast_pre = t 51 | deltaw = deltaw + wmax * M * exp(interval/tauLTD) 52 | :printf("pre: t=%f P=%f M=%f interval=%f deltaw=%f w_syn(b4)=%f\n", t, P, M, interval, deltaw, wsyn) 53 | } else { : this is a post-synaptic spike 54 | M = M*exp((tlast_post-t)/tauLTD) - aLTD 55 | interval = t - tlast_pre : interval is positive 56 | tlast_post = t 57 | deltaw = deltaw + wmax * P * exp(-interval/tauLTP) 58 | :printf("post: t=%f P=%f M=%f interval=%f deltaw=%f, w_syn(b4)=%f\n", t, P, M, interval, deltaw, wsyn) 59 | } 60 | if (on) { 61 | if (w >= 0 || allow_update_on_post) { 62 | wsyn = wsyn + deltaw 63 | if (wsyn > wmax) { 64 | wsyn = wmax 65 | } 66 | if (wsyn < wmin) { 67 | wsyn = wmin 68 | } 69 | deltaw = 0.0 70 | } 71 | } 72 | :printf("update: w=%f\n", wsyn) 73 | } 74 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/stdwa_symm.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Spike Timing Dependent Weight Adjuster 3 | with symmetric functions (i.e. only depends on the absolute value of the 4 | time difference, not on its sign. 5 | Andrew Davison, UNIC, CNRS, 2004, 2009 6 | ENDCOMMENT 7 | 8 | NEURON { 9 | POINT_PROCESS StdwaSymm 10 | RANGE interval, tlast_pre, tlast_post 11 | RANGE deltaw, wmax, f, tau_a, tau_b, a, on 12 | RANGE allow_update_on_post 13 | POINTER wsyn 14 | } 15 | 16 | ASSIGNED { 17 | interval (ms) : since last spike of the other kind 18 | tlast_pre (ms) : time of last presynaptic spike 19 | tlast_post (ms) : time of last postsynaptic spike 20 | f : weight change function 21 | deltaw : change in weight 22 | wsyn : weight of the synapse 23 | tas (ms2) : tau_a squared 24 | } 25 | 26 | INITIAL { 27 | interval = 0 28 | tlast_pre = 0 29 | tlast_post = 0 30 | f = 0 31 | deltaw = 0 32 | } 33 | 34 | PARAMETER { 35 | tau_a = 20 (ms) : crossing point from LTP to LTD 36 | tau_b = 15 (ms) : decay time constant for exponential part of f 37 | wmax = 1 : min and max values of synaptic weight 38 | a = 0.001 : step amplitude 39 | on = 1 : allows learning to be turned on and off 40 | allow_update_on_post = 1 : if this is true, we update the weight on receiving both pre- and post-synaptic spikes 41 | : if it is false, weight updates are accumulated and applied only for a pre-synaptic spike 42 | } 43 | 44 | NET_RECEIVE (w) { 45 | tas = tau_a * tau_a : do it here in case tau_a has been changed since the last spike 46 | 47 | if (w >= 0) { : this is a pre-synaptic spike 48 | interval = tlast_post - t 49 | tlast_pre = t 50 | f = (1 - interval*interval/tas) * exp(interval/tau_b) 51 | deltaw = deltaw + wmax * a * f 52 | } else { : this is a post-synaptic spike 53 | interval = t - tlast_pre 54 | tlast_post = t 55 | f = (1 - interval*interval/tas) * exp(-interval/tau_b) 56 | deltaw = deltaw + wmax * a* f 57 | } 58 | if (on) { 59 | if (w >= 0 || allow_update_on_post) { 60 | wsyn = wsyn + deltaw 61 | if (wsyn > wmax) { 62 | wsyn = wmax 63 | } 64 | if (wsyn < 0) { 65 | wsyn = 0 66 | } 67 | deltaw = 0.0 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/stochastic_synapse.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Implementation of a simple stochastic synapse (constant release probability) 3 | as a "weight adjuster" (i.e. it sets the weight of the synapse to zero if 4 | transmission fails). 5 | 6 | Andrew Davison, UNIC, CNRS, 2016 7 | ENDCOMMENT 8 | 9 | NEURON { 10 | POINT_PROCESS SimpleStochasticWA 11 | RANGE p 12 | POINTER rng, wsyn 13 | } 14 | 15 | PARAMETER { 16 | p = 0.5 : probability that transmission succeeds 17 | } 18 | 19 | VERBATIM 20 | #include 21 | #include 22 | #include 23 | 24 | double nrn_random_pick(void* r); 25 | void* nrn_random_arg(int argpos); 26 | 27 | ENDVERBATIM 28 | 29 | ASSIGNED { 30 | wsyn 31 | rng 32 | } 33 | 34 | NET_RECEIVE(w) { 35 | if (urand() < p) { 36 | wsyn = w 37 | } else { 38 | wsyn = 0.0 39 | } 40 | } 41 | 42 | PROCEDURE setRNG() { 43 | : This function takes a NEURON Random object declared in hoc and makes it usable by this mod file 44 | : The Random must be in uniform(1) mode 45 | VERBATIM 46 | { 47 | void** pv = (void**)(&_p_rng); 48 | if( ifarg(1)) { 49 | *pv = nrn_random_arg(1); 50 | } else { 51 | *pv = (void*)0; 52 | } 53 | } 54 | ENDVERBATIM 55 | } 56 | 57 | FUNCTION urand() { 58 | VERBATIM 59 | double value; 60 | if (_p_rng) { 61 | /* 62 | :Supports separate independent but reproducible streams for 63 | : each instance. However, the corresponding hoc Random 64 | : distribution MUST be set to Random.negexp(1) 65 | */ 66 | value = nrn_random_pick(_p_rng); 67 | //printf("random stream for this simulation = %lf\n",value); 68 | return value; 69 | } else { 70 | ENDVERBATIM 71 | value = scop_random(1) 72 | VERBATIM 73 | } 74 | ENDVERBATIM 75 | urand = value 76 | } 77 | -------------------------------------------------------------------------------- /pyNN/neuron/nmodl/tsodyksmarkram.mod: -------------------------------------------------------------------------------- 1 | COMMENT 2 | Implementation of the Tsodyks-Markram mechanism for synaptic depression and 3 | facilitation as a "weight adjuster" 4 | Andrew Davison, UNIC, CNRS, 2013 5 | ENDCOMMENT 6 | 7 | NEURON { 8 | POINT_PROCESS TsodyksMarkramWA 9 | RANGE tau_rec, tau_facil, U, u0, tau_syn 10 | POINTER wsyn 11 | } 12 | 13 | PARAMETER { 14 | tau_rec = 100 (ms) <1e-9, 1e9> 15 | tau_facil = 1000 (ms) <0, 1e9> 16 | U = 0.04 (1) <0, 1> 17 | u0 = 0 (1) <0, 1> 18 | tau_syn = 2 (ms) <1e-9, 1e9> : should be set to be the same as the receiving synapse 19 | } 20 | 21 | ASSIGNED { 22 | x 23 | y 24 | z 25 | u 26 | t_last (ms) 27 | wsyn 28 | } 29 | 30 | INITIAL { 31 | y = 0 32 | z = 0 33 | u = u0 34 | t_last = -1e99 35 | } 36 | 37 | NET_RECEIVE(w) { 38 | INITIAL { 39 | t_last = t 40 | } 41 | z = z*exp(-(t - t_last)/tau_rec) 42 | z = z + y*(exp(-(t - t_last)/tau_syn) - exp(-(t - t_last)/tau_rec)) / ((tau_syn/tau_rec) - 1) 43 | y = y*exp(-(t - t_last)/tau_syn) 44 | x = 1 - y - z 45 | if (tau_facil > 0) { 46 | u = u*exp(-(t - t_last)/tau_facil) 47 | u = u + U*(1-u) 48 | } else { 49 | u = U 50 | } 51 | wsyn = w*x*u 52 | y = y + x*u 53 | :printf("U=%g tau_rec=%g tau_facil=%g tau_syn=%g w=%g\n", U, tau_rec, tau_facil, tau_syn, w) 54 | :printf("%g\t%g\t%g\t%g\t%g\t%g\t%g\n", t, t - t_last, y, z, u, x, wsyn) 55 | t_last = t 56 | } 57 | -------------------------------------------------------------------------------- /pyNN/neuron/procedural_api.py: -------------------------------------------------------------------------------- 1 | """ 2 | NEURON implementation of the "procedural" or "low-level" API for 3 | creating, connecting and recording from individual neurons. 4 | 5 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 6 | :license: CeCILL, see LICENSE for details. 7 | """ 8 | 9 | from .. import common 10 | from ..connectors import FixedProbabilityConnector 11 | from .populations import Population 12 | from .projections import Projection 13 | from .standardmodels.synapses import StaticSynapse 14 | from . import simulator 15 | 16 | create = common.build_create(Population) 17 | 18 | connect = common.build_connect(Projection, FixedProbabilityConnector, StaticSynapse) 19 | 20 | set = common.set 21 | 22 | record = common.build_record(simulator) 23 | 24 | 25 | def record_v(source, filename): return record(['v'], source, filename) 26 | 27 | 28 | def record_gsyn(source, filename): return record(['gsyn_exc', 'gsyn_inh'], source, filename) 29 | -------------------------------------------------------------------------------- /pyNN/neuron/standardmodels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/pyNN/neuron/standardmodels/__init__.py -------------------------------------------------------------------------------- /pyNN/neuron/standardmodels/ion_channels.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | 4 | """ 5 | 6 | from pyNN.standardmodels import ion_channels as standard, build_translations 7 | 8 | 9 | class NaChannel(standard.NaChannel): 10 | translations = build_translations( 11 | ('conductance_density', 'gnabar'), 12 | ('e_rev', 'ena'), 13 | ) 14 | variable_translations = { 15 | 'm': ('hh', 'm'), 16 | 'h': ('hh', 'h') 17 | } 18 | model = "hh" 19 | conductance_density_parameter = 'gnabar' 20 | 21 | 22 | class KdrChannel(standard.KdrChannel): 23 | translations = build_translations( 24 | ('conductance_density', 'gkbar'), 25 | ('e_rev', 'ek'), 26 | ) 27 | variable_translations = { 28 | 'n': ('hh', 'n') 29 | } 30 | model = "hh" 31 | conductance_density_parameter = 'gkbar' 32 | 33 | 34 | class PassiveLeak(standard.PassiveLeak): 35 | translations = build_translations( 36 | ('conductance_density', 'g'), 37 | ('e_rev', 'e'), 38 | ) 39 | variable_translations = {} 40 | model = "pas" 41 | conductance_density_parameter = 'g' 42 | 43 | 44 | class PassiveLeakHH(standard.PassiveLeak): 45 | translations = build_translations( 46 | ('conductance_density', 'gl'), 47 | ('e_rev', 'el'), 48 | ) 49 | variable_translations = {} 50 | model = "hh" 51 | conductance_density_parameter = 'gl' 52 | -------------------------------------------------------------------------------- /pyNN/neuron/standardmodels/receptors.py: -------------------------------------------------------------------------------- 1 | from neuron import h 2 | from pyNN.standardmodels import receptors, build_translations 3 | 4 | 5 | class CurrExpPostSynapticResponse(receptors.CurrExpPostSynapticResponse): 6 | 7 | translations = build_translations( 8 | ('locations', 'locations'), 9 | ('tau_syn', 'tau') 10 | ) 11 | model = h.ExpISyn 12 | recordable = ["isyn"] 13 | variable_map = {"isyn": "i"} 14 | 15 | 16 | class CondExpPostSynapticResponse(receptors.CondExpPostSynapticResponse): 17 | 18 | translations = build_translations( 19 | ('locations', 'locations'), 20 | ('e_syn', 'e'), 21 | ('tau_syn', 'tau') 22 | ) 23 | model = h.ExpSyn 24 | recordable = ["gsyn"] 25 | variable_map = {"gsyn": "g"} 26 | 27 | 28 | class CondAlphaPostSynapticResponse(receptors.CondAlphaPostSynapticResponse): 29 | 30 | translations = build_translations( 31 | ('locations', 'locations'), 32 | ('e_syn', 'e'), 33 | ('tau_syn', 'tau') 34 | ) 35 | model = h.AlphaSyn 36 | recordable = ["gsyn"] 37 | variable_map = {"gsyn": "g"} 38 | 39 | 40 | # create shorter aliases 41 | 42 | AlphaPSR = CondAlphaPostSynapticResponse 43 | ExpPSR = CondExpPostSynapticResponse 44 | -------------------------------------------------------------------------------- /pyNN/nineml/connectors.py: -------------------------------------------------------------------------------- 1 | """ 2 | Export of PyNN scripts as NineML. 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | import nineml.user as nineml 9 | 10 | from pyNN import connectors 11 | from .utility import build_parameter_set, catalog_url 12 | 13 | 14 | class ConnectorMixin(object): 15 | 16 | def to_nineml(self, label): 17 | connector_parameters = {} 18 | for name in self.__class__.parameter_names: 19 | connector_parameters[name] = getattr(self, name) 20 | connection_rule = nineml.ConnectionRuleComponent( 21 | name="connection rule for projection %s" % label, 22 | definition=nineml.Definition(self.definition_url, 23 | "connection_generator"), 24 | parameters=build_parameter_set(connector_parameters)) 25 | return connection_rule 26 | 27 | 28 | class FixedProbabilityConnector(ConnectorMixin, connectors.FixedProbabilityConnector): 29 | definition_url = "%s/connectionrules/random_fixed_probability.xml" % catalog_url 30 | parameter_names = ('p_connect', 'allow_self_connections') 31 | 32 | 33 | class DistanceDependentProbabilityConnector( 34 | ConnectorMixin, 35 | connectors.DistanceDependentProbabilityConnector 36 | ): 37 | definition_url = "%s/connectionrules/distance_dependent_probability.xml" % catalog_url 38 | parameter_names = ('d_expression', 'allow_self_connections') # space 39 | 40 | 41 | def list_connectors(): 42 | return [FixedProbabilityConnector, DistanceDependentProbabilityConnector] 43 | -------------------------------------------------------------------------------- /pyNN/nineml/projections.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | """ 3 | Export of PyNN scripts as NineML. 4 | 5 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 6 | :license: CeCILL, see LICENSE for details. 7 | """ 8 | 9 | import nineml.user as nineml 10 | 11 | from pyNN import common 12 | from pyNN.space import Space 13 | from . import simulator 14 | from .utility import catalog_url, build_parameter_set 15 | 16 | 17 | class Projection(common.Projection): 18 | __doc__ = common.Projection.__doc__ 19 | _simulator = simulator 20 | 21 | def __init__(self, presynaptic_population, postsynaptic_population, 22 | connector, synapse_type, source=None, receptor_type=None, 23 | space=Space(), label=None): 24 | common.Projection.__init__(self, presynaptic_population, postsynaptic_population, 25 | connector, synapse_type, source, receptor_type, 26 | space, label) 27 | self._simulator.state.net.projections.append(self) 28 | 29 | def __len__(self): 30 | return 0 31 | 32 | def to_nineml(self): 33 | safe_label = self.label.replace(u"→", "---") 34 | connection_rule = self._connector.to_nineml(safe_label) 35 | connection_type = nineml.ConnectionType( 36 | name="connection type for projection %s" % safe_label, 37 | definition=nineml.Definition("%s/connectiontypes/static_synapse.xml" % catalog_url, 38 | "dynamics"), 39 | parameters=build_parameter_set(self.synapse_type.native_parameters, self.shape)) 40 | synaptic_responses = self.post.get_synaptic_response_components(self.receptor_type) 41 | synaptic_response, = synaptic_responses 42 | projection = nineml.Projection( 43 | name=safe_label, 44 | source=self.pre.to_nineml(), # or just pass ref, and then resolve later? 45 | target=self.post.to_nineml(), 46 | rule=connection_rule, 47 | synaptic_response=synaptic_response, 48 | connection_type=connection_type) 49 | return projection 50 | -------------------------------------------------------------------------------- /pyNN/nineml/recording.py: -------------------------------------------------------------------------------- 1 | """ 2 | Export of PyNN scripts as NineML. 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | from pyNN import recording 9 | from . import simulator 10 | 11 | 12 | class Recorder(recording.Recorder): 13 | _simulator = simulator 14 | 15 | def _record(self, variable, new_ids): 16 | pass 17 | 18 | def get(self, variables, gather=False, filter_ids=None, clear=False, 19 | annotations=None): 20 | pass 21 | 22 | def write(self, variables, file=None, gather=False, filter_ids=None, 23 | clear=False, annotations=None): 24 | pass 25 | 26 | def _local_count(self, variable, filter_ids=None): 27 | return {} 28 | -------------------------------------------------------------------------------- /pyNN/nineml/simulator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Export of PyNN scripts as NineML. 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | from pyNN import common 9 | 10 | name = "NineML" 11 | 12 | 13 | class ID(int, common.IDMixin): 14 | 15 | def __init__(self, n): 16 | """Create an ID object with numerical value `n`.""" 17 | int.__init__(n) 18 | common.IDMixin.__init__(self) 19 | 20 | 21 | class State(common.control.BaseState): 22 | 23 | def __init__(self): 24 | common.control.BaseState.__init__(self) 25 | self.mpi_rank = 0 26 | self.num_processes = 1 27 | self.clear() 28 | self.dt = 0.1 29 | 30 | def run(self, simtime): 31 | self.t += simtime 32 | self.running = True 33 | 34 | def clear(self): 35 | self.recorders = set([]) 36 | self.id_counter = 0 37 | self.segment_counter = -1 38 | self.reset() 39 | 40 | def reset(self): 41 | """Reset the state of the current network to time t = 0.""" 42 | self.running = False 43 | self.t = 0 44 | self.t_start = 0 45 | self.segment_counter += 1 46 | 47 | 48 | state = State() 49 | -------------------------------------------------------------------------------- /pyNN/nineml/synapses.py: -------------------------------------------------------------------------------- 1 | """ 2 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 3 | :license: CeCILL, see LICENSE for details. 4 | """ 5 | 6 | from utility import catalog_url 7 | 8 | 9 | class StaticSynapticConnection(object): 10 | definition_url = "%s/connectiontypes/static_connection.xml" % catalog_url 11 | -------------------------------------------------------------------------------- /pyNN/serialization/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions for exporting and importing networks to/from files 3 | """ 4 | 5 | from .sonata import ( # noqa:F401 6 | import_from_sonata, 7 | export_to_sonata, 8 | asciify, 9 | load_sonata_simulation_plan 10 | ) 11 | -------------------------------------------------------------------------------- /pyNN/spiNNaker.py: -------------------------------------------------------------------------------- 1 | from spynnaker.pyNN import * 2 | -------------------------------------------------------------------------------- /pyNN/standardmodels/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Machinery for implementation of "standard models", i.e. neuron and synapse models 3 | that are available in multiple simulators: 4 | 5 | Functions: 6 | build_translations() 7 | 8 | Classes: 9 | StandardModelType 10 | StandardCellType 11 | ModelNotAvailable 12 | STDPWeightDependence 13 | STDPTimingDependence 14 | 15 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 16 | :license: CeCILL, see LICENSE for details. 17 | 18 | """ 19 | 20 | 21 | from .base import ( # noqa: F401 22 | build_translations, 23 | check_weights, 24 | check_delays, 25 | ModelNotAvailable, 26 | StandardCellType, 27 | StandardCellTypeComponent, 28 | StandardCurrentSource, 29 | StandardIonChannelModel, 30 | StandardModelType, 31 | StandardPostSynapticResponse, 32 | StandardSynapseType, 33 | STDPTimingDependence, 34 | STDPWeightDependence, 35 | ) 36 | -------------------------------------------------------------------------------- /pyNN/standardmodels/ion_channels.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | 4 | 5 | """ 6 | 7 | 8 | from pyNN.standardmodels import StandardIonChannelModel 9 | # from pyNN.morphology import uniform 10 | 11 | 12 | class NaChannel(StandardIonChannelModel): 13 | default_parameters = { 14 | "conductance_density": 0.12, # uniform('all', 0.12), 15 | # "e_rev": 50.0 16 | } 17 | default_initial_values = { 18 | # "m": 0.0, # todo: make these functions 19 | # "h": 1.0 20 | } 21 | 22 | 23 | class KdrChannel(StandardIonChannelModel): 24 | default_parameters = { 25 | "conductance_density": 0.036, # uniform('all', 0.036), 26 | # "e_rev": -77.0 27 | } 28 | default_initial_values = { 29 | # "n": 1.0 30 | } 31 | 32 | 33 | class PassiveLeak(StandardIonChannelModel): 34 | default_parameters = { 35 | "conductance_density": 0.0003, # uniform('all', 0.0003), 36 | "e_rev": -65.0 37 | } 38 | 39 | 40 | class PassiveLeakHH(StandardIonChannelModel): 41 | default_parameters = { 42 | "conductance_density": 0.0003, # uniform('all', 0.0003), 43 | "e_rev": -54.3 44 | } 45 | -------------------------------------------------------------------------------- /pyNN/utility/progress_bar.py: -------------------------------------------------------------------------------- 1 | """ 2 | Classes for showing progress bars in the shell during simulations. 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | import sys 9 | 10 | 11 | class ProgressBar(object): 12 | """ 13 | Create a progress bar in the shell. 14 | """ 15 | 16 | def __init__(self, width=77, char="#", mode="fixed"): 17 | self.char = char 18 | self.mode = mode 19 | if self.mode not in ['fixed', 'dynamic']: 20 | self.mode = 'fixed' 21 | self.width = width 22 | 23 | def set_level(self, level): 24 | """ 25 | Rebuild the bar string based on `level`, which should be a number 26 | between 0 and 1. 27 | """ 28 | if level < 0: 29 | level = 0 30 | if level > 1: 31 | level = 1 32 | 33 | # figure the proper number of 'character' make up the bar 34 | all_full = self.width - 2 35 | num_hashes = int(round(level * all_full)) 36 | 37 | if self.mode == 'dynamic': 38 | # build a progress bar with self.char (to create a dynamic bar 39 | # where the percent string moves along with the bar progress. 40 | bar = self.char * num_hashes 41 | else: 42 | # build a progress bar with self.char and spaces (to create a 43 | # fixed bar (the percent string doesn't move) 44 | bar = self.char * num_hashes + ' ' * (all_full - num_hashes) 45 | bar = u'[ %s ] %3.0f%%' % (bar, 100 * level) 46 | print(bar, end=u' \r') 47 | sys.stdout.flush() 48 | 49 | def __call__(self, level): 50 | self.set_level(level) 51 | 52 | 53 | class SimulationProgressBar(ProgressBar): 54 | 55 | def __init__(self, interval, t_stop, char="#", mode="fixed"): 56 | super().__init__(width=int(t_stop / interval), char=char, mode=mode) 57 | self.interval = interval 58 | self.t_stop = t_stop 59 | 60 | def __call__(self, t): 61 | self.set_level(t / self.t_stop) 62 | return t + self.interval 63 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "PyNN" 3 | version = "0.12.4" 4 | description = "A Python package for simulator-independent specification of neuronal network models" 5 | readme = "README.rst" 6 | requires-python = ">=3.9" 7 | license = "CECILL-2.1" 8 | authors = [ 9 | {name = "The PyNN team", email = "pynn-maintainers@protonmail.com"} 10 | ] 11 | maintainers = [ 12 | {name = "The PyNN team", email = "pynn-maintainers@protonmail.com"} 13 | ] 14 | keywords = ["computational neuroscience, simulation, neuron, nest, brian2, neuromorphic"] 15 | classifiers = [ 16 | "Development Status :: 4 - Beta", 17 | "Environment :: Console", 18 | "Intended Audience :: Science/Research", 19 | "Natural Language :: English", 20 | "Operating System :: OS Independent", 21 | "Programming Language :: Python :: 3", 22 | "Programming Language :: Python :: 3.9", 23 | "Programming Language :: Python :: 3.10", 24 | "Programming Language :: Python :: 3.11", 25 | "Programming Language :: Python :: 3.12", 26 | "Programming Language :: Python :: 3.13", 27 | "Topic :: Education", 28 | "Topic :: Scientific/Engineering" 29 | ] 30 | dependencies = [ 31 | "lazyarray>=0.5.2", 32 | "neo>=0.13.4", 33 | "morphio" 34 | ] 35 | 36 | [project.optional-dependencies] 37 | test = ["pytest", "pytest-cov", "flake8", "wheel", "mpi4py", "scipy", "matplotlib", "Cheetah3", "h5py", "Jinja2"] 38 | doc = ["sphinx"] 39 | examples = ["matplotlib", "scipy"] 40 | plotting = ["matplotlib", "scipy"] 41 | MPI = ["mpi4py"] 42 | sonata = ["h5py"] 43 | neuron = ["neuron", "nrnutils"] 44 | brian2 = ["brian2"] 45 | arbor = ["arbor==0.9.0", "libNeuroML"] 46 | spiNNaker = ["spyNNaker"] 47 | neuroml = ["libNeuroML"] 48 | 49 | [project.urls] 50 | homepage = "http://neuralensemble.org/PyNN/" 51 | documentation = "http://neuralensemble.org/docs/PyNN/" 52 | repository = "https://github.com/NeuralEnsemble/PyNN" 53 | changelog = "http://neuralensemble.org/docs/PyNN/release_notes.html" 54 | download = "http://pypi.python.org/pypi/PyNN" 55 | 56 | [build-system] 57 | requires = ["setuptools"] 58 | build-backend = "setuptools.build_meta" 59 | 60 | [tool.setuptools.packages.find] 61 | where = ["."] 62 | include = ["pyNN*"] 63 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() -------------------------------------------------------------------------------- /test/README: -------------------------------------------------------------------------------- 1 | PyNN testing is divided into several different types of test: 2 | * checking the API is syntactically consistent between different backends 3 | * unit tests 4 | * system tests 5 | * running the example scripts 6 | * doctests 7 | 8 | For full documentation on testing, see:http://neuralensemble.org/docs/PyNN/developers/contributing.html#testing 9 | -------------------------------------------------------------------------------- /test/benchmarks/README.txt: -------------------------------------------------------------------------------- 1 | ===================== 2 | Benchmarking strategy 3 | ===================== 4 | 5 | The aim of benchmarking is to provide measurements that 6 | 7 | 1. allow us to determine the effect of code changes on performance; 8 | 2. tell us how much overhead PyNN has compared to the underlying simulators; 9 | 3. show how well PyNN scales with the number of MPI processes. 10 | 11 | Point 2 means that where possible, we should provide both a PyNN version and 12 | a PyNEST version of benchmarks. PyNEURON, Brian and NEST-SLI versions would also 13 | be interesting. 14 | 15 | We should use the PyNN Timer object to separate timings for different phases, i.e. 16 | 17 | * module imports 18 | * setup 19 | * network construction 20 | - creating neurons 21 | - creating connections 22 | * recording specification 23 | * simulation 24 | * data retrieval 25 | 26 | We should start with a fresh kernel every time, except where we specifically test the effect of reset. 27 | Each simulation should write to a common database, from which we can generate reports. Ideally, we would 28 | use Sumatra or Mozaik for this, but it might be simpler to start with, e.g., shelve or csv 29 | 30 | Ideas for benchmarks: 31 | 32 | * a simple network with two connected populations, varying: 33 | - number of neurons 34 | - number of connections 35 | - neuron type (in particular, spike sources are treated very differently in NEST to other neuron models) 36 | - synapse type 37 | - connection method 38 | - number of neurons recorded 39 | * incremental simulation, with and without clearing recorders. 40 | -------------------------------------------------------------------------------- /test/benchmarks/all_to_all_network.param: -------------------------------------------------------------------------------- 1 | { 2 | "simulator": "pyNN.nest", 3 | "threads": 1, 4 | "populations": { 5 | "A": { 6 | "n": 10000, 7 | "celltype": "IF_cond_exp", 8 | "params": { 9 | "i_offset": 1.0 }}, 10 | "B": { 11 | "n": 10000, 12 | "celltype": "IF_cond_exp", 13 | "params": { 14 | "i_offset": 0.5 }}}, 15 | "sim_time": 100.0, 16 | "recording": { 17 | "A": { 18 | "v": 10, 19 | "spikes": 1000}, 20 | "B": { 21 | "v": 10, 22 | "spikes": 1000}}, 23 | "projections": { 24 | "AB": { 25 | "pre": "A", 26 | "post": "B", 27 | "connector": { 28 | "type": "AllToAllConnector", 29 | "params": {}}, 30 | "synapse_type": { 31 | "type": "StaticSynapse", 32 | "params": { 33 | "weight": 0.000001, 34 | "delay": 1.5}}, 35 | "receptor_type": "excitatory"}} 36 | } 37 | -------------------------------------------------------------------------------- /test/benchmarks/ddpc.py: -------------------------------------------------------------------------------- 1 | from NeuroTools.parameters import ParameterSet 2 | import sys 3 | from math import sqrt 4 | from pyNN.space import Space, Grid2D 5 | 6 | P = ParameterSet(sys.argv[1]) 7 | 8 | exec("import pyNN.%s as sim" % P.simulator) 9 | 10 | sim.setup() 11 | 12 | dx1 = dy1 = 500.0 / sqrt(P.n1) 13 | dx2 = dy2 = 500.0 / sqrt(P.n2) 14 | struct1 = Grid2D(dx=dx1, dy=dy1) 15 | struct2 = Grid2D(dx=dx2, dy=dy2) 16 | 17 | p1 = sim.Population(P.n1, sim.IF_cond_exp, structure=struct1) 18 | p2 = sim.Population(P.n2, sim.IF_cond_exp, structure=struct2) 19 | 20 | space = Space() 21 | DDPC = sim.DistanceDependentProbabilityConnector 22 | c = DDPC(P.d_expression, P.allow_self_connections, P.weights, P.delays, space, P.safe) 23 | 24 | prj = sim.Projection(p1, p2, c) 25 | 26 | sys.stdout.write(p1.describe().encode('utf-8')) 27 | sys.stdout.write(p2.describe().encode('utf-8')) 28 | sys.stdout.write(prj.describe().encode('utf-8')) 29 | 30 | 31 | sim.end() 32 | -------------------------------------------------------------------------------- /test/benchmarks/neurons_no_recording.param: -------------------------------------------------------------------------------- 1 | { 2 | "simulator": "pyNN.nest", 3 | "n": 10000, 4 | "sim_time": 100.0, 5 | "recording": None 6 | } -------------------------------------------------------------------------------- /test/benchmarks/neurons_with_recording.param: -------------------------------------------------------------------------------- 1 | { 2 | "simulator": "pyNN.nest", 3 | "populations": { 4 | "A": { 5 | "n": 10000, 6 | "celltype": "IF_cond_exp", 7 | "params": { 8 | "i_offset": 1.0 }}, 9 | "B": { 10 | "n": 10000, 11 | "celltype": "IF_cond_exp", 12 | "params": { 13 | "i_offset": 0.5 }}}, 14 | "sim_time": 100.0, 15 | "recording": { 16 | "A": { 17 | "v": 100, 18 | "spikes": 1000}, 19 | "B": { 20 | "v": 100, 21 | "spikes": 1000}} 22 | } 23 | -------------------------------------------------------------------------------- /test/system/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/test/system/__init__.py -------------------------------------------------------------------------------- /test/system/scenarios/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/test/system/scenarios/__init__.py -------------------------------------------------------------------------------- /test/system/scenarios/fixtures.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import pytest 3 | 4 | available_modules = {} 5 | 6 | try: 7 | import pyNN.neuron 8 | available_modules["neuron"] = pyNN.neuron 9 | except ImportError: 10 | pass 11 | 12 | try: 13 | import pyNN.nest 14 | available_modules["nest"] = pyNN.nest 15 | except ImportError: 16 | pass 17 | 18 | try: 19 | import pyNN.brian2 20 | available_modules["brian2"] = pyNN.brian2 21 | except ImportError: 22 | pass 23 | 24 | try: 25 | import pyNN.arbor 26 | available_modules["arbor"] = pyNN.arbor 27 | except ImportError: 28 | pass 29 | 30 | 31 | class SimulatorNotAvailable: 32 | 33 | def __init__(self, sim_name): 34 | self.sim_name = sim_name 35 | 36 | def setup(self, *args, **kwargs): 37 | pytest.skip(f"{self.sim_name} not available") 38 | 39 | 40 | def get_simulator(sim_name): 41 | if sim_name in available_modules: 42 | return pytest.param(available_modules[sim_name], id=sim_name) 43 | else: 44 | return pytest.param(SimulatorNotAvailable(sim_name), id=sim_name) 45 | 46 | 47 | def run_with_simulators(*sim_names): 48 | sim_modules = (get_simulator(sim_name) for sim_name in sim_names) 49 | 50 | return pytest.mark.parametrize("sim", sim_modules) 51 | -------------------------------------------------------------------------------- /test/system/scenarios/test_issue231.py: -------------------------------------------------------------------------------- 1 | """ 2 | min_delay should be calculated automatically if not set 3 | """ 4 | 5 | from .fixtures import run_with_simulators 6 | 7 | # for NEURON, this only works when run with MPI and more than one process 8 | @run_with_simulators("nest", "brian2") 9 | def test_issue231(sim): 10 | sim.setup(min_delay='auto') 11 | 12 | p1 = sim.Population(13, sim.IF_cond_exp()) 13 | p2 = sim.Population(25, sim.IF_cond_exp()) 14 | 15 | connector = sim.AllToAllConnector() 16 | synapse = sim.StaticSynapse(delay=0.5) 17 | prj = sim.Projection(p1, p2, connector, synapse) 18 | sim.run(100.0) 19 | assert sim.get_min_delay() == 0.5 20 | sim.end() 21 | 22 | 23 | if __name__ == '__main__': 24 | from pyNN.utility import get_simulator 25 | sim, args = get_simulator() 26 | test_issue231(sim) 27 | -------------------------------------------------------------------------------- /test/system/scenarios/test_issue274.py: -------------------------------------------------------------------------------- 1 | 2 | from pyNN.random import RandomDistribution as rnd 3 | from .fixtures import run_with_simulators 4 | 5 | 6 | @run_with_simulators("nest", "neuron", "brian2") 7 | def test_issue274(sim): 8 | """Issue with offset in GIDs""" 9 | sim.setup(min_delay=0.5) 10 | 11 | p0 = sim.Population(13, sim.IF_cond_exp()) 12 | p1 = sim.Population(1000, sim.IF_cond_exp()) 13 | p2 = sim.Population(252, sim.IF_cond_exp()) 14 | 15 | connector = sim.DistanceDependentProbabilityConnector("exp(-d/100)") 16 | 17 | prj = sim.Projection(p1, p2, connector) 18 | 19 | w_dist = rnd("uniform", low=1e-6, high=2e-6) 20 | delay_dist = rnd("uniform", low=0.5, high=1.0) 21 | prj.set(weight=w_dist, delay=delay_dist) 22 | 23 | 24 | if __name__ == '__main__': 25 | from pyNN.utility import get_simulator 26 | sim, args = get_simulator() 27 | test_issue274(sim) 28 | -------------------------------------------------------------------------------- /test/system/scenarios/test_procedural_api.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from numpy.testing import assert_allclose 4 | from pyNN.utility import init_logging 5 | from .fixtures import run_with_simulators 6 | 7 | 8 | @run_with_simulators("nest", "neuron", "brian2") 9 | def test_ticket195(sim): 10 | """ 11 | Check that the `connect()` function works correctly with single IDs (see 12 | http://neuralensemble.org/trac/PyNN/ticket/195) 13 | """ 14 | init_logging(None, debug=True) 15 | sim.setup(timestep=0.01) 16 | pre = sim.Population(10, sim.SpikeSourceArray(spike_times=range(1, 10))) 17 | post = sim.Population(10, sim.IF_cond_exp()) 18 | #sim.connect(pre[0], post[0], weight=0.01, delay=0.1, p=1) 19 | sim.connect(pre[0:1], post[0:1], weight=0.01, delay=0.1, p=1) 20 | #prj = sim.Projection(pre, post, sim.FromListConnector([(0, 0, 0.01, 0.1)])) 21 | post.record(['spikes', 'v']) 22 | sim.run(100.0) 23 | assert_allclose(post.get_data().segments[0].spiketrains[0].magnitude, np.array([13.4]), 0.5) 24 | sim.end() 25 | 26 | if __name__ == '__main__': 27 | from pyNN.utility import get_simulator 28 | sim, args = get_simulator() 29 | test_ticket195(sim) 30 | -------------------------------------------------------------------------------- /test/system/scenarios/test_scenario2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from .fixtures import run_with_simulators 4 | 5 | 6 | @run_with_simulators("nest", "neuron", "brian2") 7 | def test_scenario2(sim): 8 | """ 9 | Array of neurons, each injected with a different current. 10 | 11 | firing period of a IF neuron injected with a current I: 12 | 13 | T = tau_m*log(I*tau_m/(I*tau_m - v_thresh*cm)) 14 | 15 | (if v_rest = v_reset = 0.0) 16 | 17 | we set the refractory period to be very large, so each neuron fires only 18 | once (except neuron[0], which never reaches threshold). 19 | """ 20 | n = 83 21 | t_start = 25.0 22 | duration = 100.0 23 | t_stop = 150.0 24 | tau_m = 20.0 25 | v_thresh = 10.0 26 | cm = 1.0 27 | cell_params = {"tau_m": tau_m, "v_rest": 0.0, "v_reset": 0.0, 28 | "tau_refrac": 100.0, "v_thresh": v_thresh, "cm": cm} 29 | I0 = (v_thresh * cm) / tau_m 30 | sim.setup(timestep=0.01, min_delay=0.1, spike_precision="off_grid") 31 | neurons = sim.Population(n, sim.IF_curr_exp(**cell_params)) 32 | neurons.initialize(v=0.0) 33 | I = np.arange(I0, I0 + 1.0, 1.0 / n) 34 | currents = [sim.DCSource(start=t_start, stop=t_start + duration, amplitude=amp) 35 | for amp in I] 36 | for j, (neuron, current) in enumerate(zip(neurons, currents)): 37 | if j % 2 == 0: # these should 38 | neuron.inject(current) # be entirely 39 | else: # equivalent 40 | current.inject_into([neuron]) 41 | neurons.record(['spikes', 'v']) 42 | 43 | sim.run(t_stop) 44 | 45 | spiketrains = neurons.get_data().segments[0].spiketrains 46 | assert len(spiketrains) == n 47 | assert len(spiketrains[0]) == 0 # first cell does not fire 48 | assert len(spiketrains[1]) == 1 # other cells fire once 49 | assert len(spiketrains[-1]) == 1 # other cells fire once 50 | expected_spike_times = t_start + tau_m * np.log(I * tau_m / (I * tau_m - v_thresh * cm)) 51 | a = spike_times = [np.array(st)[0] for st in spiketrains[1:]] 52 | b = expected_spike_times[1:] 53 | max_error = abs((a - b) / b).max() 54 | print("max error =", max_error) 55 | assert max_error < 0.005, max_error 56 | sim.end() 57 | if "pytest" not in sys.modules: 58 | return a, b, spike_times 59 | 60 | 61 | if __name__ == '__main__': 62 | from pyNN.utility import get_simulator 63 | sim, args = get_simulator() 64 | test_scenario2(sim) 65 | -------------------------------------------------------------------------------- /test/system/scenarios/test_synapse_types.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | import numpy as np 4 | from .fixtures import run_with_simulators 5 | 6 | 7 | @run_with_simulators("nest", "neuron") 8 | def test_simple_stochastic_synapse(sim, plot_figure=False): 9 | # in this test we connect 10 | sim.setup(min_delay=0.5) 11 | t_stop = 1000.0 12 | spike_times = np.arange(2.5, t_stop, 5.0) 13 | source = sim.Population(1, sim.SpikeSourceArray(spike_times=spike_times)) 14 | neurons = sim.Population(4, sim.IF_cond_exp(tau_syn_E=1.0)) 15 | synapse_type = sim.SimpleStochasticSynapse(weight=0.5, 16 | p=np.array([[0.0, 0.5, 0.5, 1.0]])) 17 | connections = sim.Projection(source, neurons, sim.AllToAllConnector(), 18 | synapse_type=synapse_type) 19 | source.record('spikes') 20 | neurons.record('gsyn_exc') 21 | sim.run(t_stop) 22 | 23 | data = neurons.get_data().segments[0] 24 | gsyn = data.analogsignals[0].rescale('uS') 25 | if plot_figure: 26 | import matplotlib.pyplot as plt 27 | for i in range(neurons.size): 28 | plt.subplot(neurons.size, 1, i+1) 29 | plt.plot(gsyn.times, gsyn[:, i]) 30 | plt.savefig("test_simple_stochastic_synapse_%s.png" % sim.__name__) 31 | print(data.analogsignals[0].units) 32 | crossings = [] 33 | for i in range(neurons.size): 34 | crossings.append( 35 | gsyn.times[:-1][np.logical_and(gsyn.magnitude[:-1, i] < 0.4, 0.4 < gsyn.magnitude[1:, i])]) 36 | assert crossings[0].size == 0 37 | assert crossings[1].size < 0.6*spike_times.size 38 | assert crossings[1].size > 0.4*spike_times.size 39 | assert crossings[3].size == spike_times.size 40 | assert not np.array_equal(crossings[1], crossings[2]) 41 | print(crossings[1].size / spike_times.size) 42 | if "pytest" not in sys.modules: 43 | return data 44 | 45 | 46 | if __name__ == '__main__': 47 | from pyNN.utility import get_simulator 48 | sim, args = get_simulator(("--plot-figure", 49 | {"help": "generate a figure", 50 | "action": "store_true"})) 51 | test_simple_stochastic_synapse(sim, plot_figure=args.plot_figure) 52 | -------------------------------------------------------------------------------- /test/system/scenarios/test_ticket166.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from .fixtures import run_with_simulators 4 | 5 | 6 | @run_with_simulators("nest", "neuron", "brian2") 7 | def test_ticket166(sim, plot_figure=False): 8 | """ 9 | Check that changing the spike_times of a SpikeSourceArray mid-simulation 10 | works (see http://neuralensemble.org/trac/PyNN/ticket/166) 11 | """ 12 | 13 | dt = 0.1 # ms 14 | t_step = 100.0 # ms 15 | lag = 3.0 # ms 16 | 17 | sim.setup(timestep=dt, min_delay=dt) 18 | 19 | spikesources = sim.Population(2, sim.SpikeSourceArray()) 20 | cells = sim.Population(2, sim.IF_cond_exp()) 21 | syn = sim.StaticSynapse(weight=0.01) 22 | conn = sim.Projection(spikesources, cells, sim.OneToOneConnector(), syn) 23 | cells.record('v') 24 | 25 | spiketimes = np.arange(2.0, t_step, t_step / 13.0) 26 | spikesources[0].spike_times = spiketimes 27 | spikesources[1].spike_times = spiketimes + lag 28 | 29 | t = sim.run(t_step) # both neurons depolarized by synaptic input 30 | t = sim.run(t_step) # no more synaptic input, neurons decay 31 | 32 | spiketimes += 2 * t_step 33 | spikesources[0].spike_times = spiketimes 34 | # note we add no new spikes to the second source 35 | t = sim.run(t_step) # first neuron gets depolarized again 36 | 37 | vm = cells.get_data().segments[0].analogsignals[0] 38 | final_v_0 = vm[-1, 0] 39 | final_v_1 = vm[-1, 1] 40 | 41 | sim.end() 42 | 43 | if plot_figure: 44 | import matplotlib.pyplot as plt 45 | plt.plot(vm.times, vm[:, 0]) 46 | plt.plot(vm.times, vm[:, 1]) 47 | plt.savefig("ticket166_%s.png" % sim.__name__) 48 | 49 | assert final_v_0 > -60.0 # first neuron has been depolarized again 50 | assert final_v_1 < -64.99 # second neuron has decayed back towards rest 51 | 52 | 53 | if __name__ == '__main__': 54 | from pyNN.utility import get_simulator 55 | sim, args = get_simulator(("--plot-figure", 56 | {"help": "generate a figure", 57 | "action": "store_true"})) 58 | test_ticket166(sim, plot_figure=args.plot_figure) 59 | -------------------------------------------------------------------------------- /test/system/test_neuroml.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | try: 5 | import pyNN.neuroml 6 | have_neuroml = True 7 | except ImportError: 8 | have_neuroml = False 9 | 10 | import pytest 11 | 12 | 13 | def test_save_validate_network(): 14 | if not have_neuroml: 15 | pytest.skip("neuroml module not available") 16 | sim = pyNN.neuroml 17 | reference='Test0' 18 | 19 | sim.setup(reference=reference) 20 | spike_source = sim.Population(1, sim.SpikeSourceArray(spike_times=np.arange(10, 100, 10))) 21 | neurons = sim.Population(5, sim.IF_cond_exp(e_rev_I=-75)) 22 | sim.end() 23 | 24 | from neuroml.utils import validate_neuroml2 25 | 26 | validate_neuroml2('%s.net.nml'%reference) 27 | -------------------------------------------------------------------------------- /test/system/test_serialization.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from numpy.testing import assert_array_almost_equal 4 | 5 | from pyNN.random import RandomDistribution as RD 6 | from pyNN.network import Network 7 | from pyNN.serialization import export_to_sonata, import_from_sonata, asciify 8 | 9 | try: 10 | import h5py 11 | HAVE_H5PY = True 12 | except ImportError: 13 | HAVE_H5PY = False 14 | 15 | try: 16 | import pyNN.nest as sim 17 | HAVE_NEST = True 18 | except ImportError: 19 | HAVE_NEST = False 20 | 21 | import pytest 22 | 23 | def test(): 24 | if not HAVE_H5PY: 25 | pytest.skip("h5py not available") 26 | elif not HAVE_NEST: 27 | pytest.skip("nest not available") 28 | else: 29 | sim.setup() 30 | 31 | p1 = sim.Population(10, 32 | sim.IF_cond_exp( 33 | v_rest=-65, 34 | tau_m=lambda i: 10 + 0.1*i, 35 | cm=RD('normal', (0.5, 0.05))), 36 | label="population_one") 37 | p2 = sim.Population(20, 38 | sim.IF_curr_alpha( 39 | v_rest=-64, 40 | tau_m=lambda i: 11 + 0.1*i), 41 | label="population_two") 42 | 43 | prj = sim.Projection(p1, p2, 44 | sim.FixedProbabilityConnector(p_connect=0.5), 45 | synapse_type=sim.StaticSynapse(weight=RD('uniform', [0.0, 0.1]), 46 | delay=0.5), 47 | receptor_type='excitatory') 48 | 49 | net = Network(p1, p2, prj) 50 | 51 | export_to_sonata(net, "tmp_serialization_test", overwrite=True) 52 | 53 | net2 = import_from_sonata("tmp_serialization_test/circuit_config.json", sim) 54 | 55 | for orig_population in net.populations: 56 | imp_population = net2.get_component(orig_population.label) 57 | assert orig_population.size == imp_population.size 58 | for name in orig_population.celltype.default_parameters: 59 | assert_array_almost_equal(orig_population.get(name), imp_population.get(name), 12) 60 | 61 | w1 = prj.get('weight', format='array') 62 | prj2 = net2.get_component(asciify(prj.label).decode('utf-8') + "-0") 63 | w2 = prj2.get('weight', format='array') 64 | assert_array_almost_equal(w1, w2, 12) 65 | 66 | 67 | if __name__ == "__main__": 68 | test() 69 | -------------------------------------------------------------------------------- /test/unittests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NeuralEnsemble/PyNN/78235f034028419ebbf92f20f30663149f480616/test/unittests/__init__.py -------------------------------------------------------------------------------- /test/unittests/mocks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mock classes for unit tests 3 | 4 | :copyright: Copyright 2006-2024 by the PyNN team, see AUTHORS. 5 | :license: CeCILL, see LICENSE for details. 6 | """ 7 | 8 | from pyNN import random 9 | import numpy as np 10 | 11 | 12 | class MockRNG(random.WrappedRNG): 13 | rng = None 14 | 15 | def __init__(self, start=0.0, delta=1, parallel_safe=True): 16 | random.WrappedRNG.__init__(self, parallel_safe=parallel_safe) 17 | self.start = start 18 | self.delta = delta 19 | 20 | def _next(self, distribution, n, parameters): 21 | if distribution == "uniform_int": 22 | return self._next_int(n, parameters) 23 | elif distribution == "binomial": 24 | return self._next_binomial(n, parameters) 25 | s = self.start 26 | self.start += n * self.delta 27 | return s + self.delta * np.arange(n) 28 | 29 | def _next_int(self, n, parameters): 30 | low, high = parameters["low"], parameters["high"] 31 | s = int(self.start) 32 | self.start += n * self.delta 33 | x = s + self.delta * np.arange(n) 34 | return x % (high - low) + low 35 | 36 | def _next_binomial(self, n, parameters): 37 | return self._next_int(n, {"low": 0, "high": parameters["n"]}) 38 | 39 | def permutation(self, arr): 40 | return arr[::-1] 41 | 42 | 43 | class MockRNG2(random.WrappedRNG): 44 | rng = None 45 | 46 | def __init__(self, numbers, parallel_safe=True): 47 | random.WrappedRNG.__init__(self, parallel_safe=parallel_safe) 48 | self.numbers = numbers 49 | self.i = 0 50 | 51 | def _next(self, distribution, n, parameters): 52 | x = self.numbers[self.i:self.i + n] 53 | self.i += n 54 | return x 55 | 56 | def permutation(self, arr): 57 | return arr[::-1] 58 | 59 | 60 | class MockRNG3(random.WrappedRNG): 61 | """ 62 | returns [1, 0, 0, 0,..] 63 | """ 64 | rng = None 65 | 66 | def __init__(self, parallel_safe=True): 67 | random.WrappedRNG.__init__(self, parallel_safe=parallel_safe) 68 | 69 | def _next(self, distribution, n, parameters): 70 | x = np.zeros(n) 71 | x.dtype = int 72 | x[0] = 1 73 | return x 74 | 75 | def permutation(self, arr): 76 | return arr[::-1] 77 | -------------------------------------------------------------------------------- /test/unittests/test_core.py: -------------------------------------------------------------------------------- 1 | from pyNN.core import is_listlike 2 | import numpy as np 3 | 4 | 5 | def test_is_list_like_with_tuple(): 6 | assert is_listlike((1, 2, 3)) 7 | 8 | 9 | def test_is_list_like_with_list(): 10 | assert is_listlike([1, 2, 3]) 11 | 12 | 13 | def test_is_list_like_with_iterator(): 14 | assert not is_listlike(iter((1, 2, 3))) 15 | 16 | 17 | def test_is_list_like_with_set(): 18 | assert is_listlike(set((1, 2, 3))) 19 | 20 | 21 | def test_is_list_like_with_numpy_array(): 22 | assert is_listlike(np.arange(10)) 23 | 24 | 25 | def test_is_list_like_with_string(): 26 | assert not is_listlike("abcdefg") 27 | 28 | # def test_is_list_like_with_file(): 29 | # f = file() 30 | # assert not is_listlike(f) 31 | --------------------------------------------------------------------------------