├── .gitignore ├── Contributing.md ├── Credits.ipynb ├── DynamicBrain ├── BrainObservatory.ipynb ├── EphysObservatory.ipynb ├── Modeling │ ├── biophysical_notebook │ │ ├── biophysical_modeling.ipynb │ │ ├── build_network.py │ │ ├── components │ │ │ ├── biophysical │ │ │ │ ├── electrophysiology │ │ │ │ │ ├── 472363762_fit.json │ │ │ │ │ ├── 472912177_fit.json │ │ │ │ │ ├── 473862421_fit.json │ │ │ │ │ ├── 473863035_fit.json │ │ │ │ │ └── 473863510_fit.json │ │ │ │ └── morphology │ │ │ │ │ ├── Nr5a1-Cre_Ai14_IVSCC_-169250.03.02.01_471087815_m.swc │ │ │ │ │ ├── Pvalb-IRES-Cre_Ai14_IVSCC_-169125.03.01.01_469628681_m.swc │ │ │ │ │ ├── Pvalb-IRES-Cre_Ai14_IVSCC_-176847.04.02.01_470522102_m.swc │ │ │ │ │ ├── Rorb-IRES2-Cre-D_Ai14_IVSCC_-168053.05.01.01_325404214_m.swc │ │ │ │ │ └── Scnn1a-Tg3-Cre_Ai14_IVSCC_-177300.01.02.01_473845048_m.swc │ │ │ ├── electrodes │ │ │ │ └── single_electrode.csv │ │ │ ├── hoc_templates │ │ │ │ └── Biophys1.hoc │ │ │ ├── intfire │ │ │ │ ├── IntFire1_exc_1.json │ │ │ │ └── IntFire1_inh_1.json │ │ │ ├── mechanisms │ │ │ │ └── modfiles │ │ │ │ │ ├── CaDynamics.mod │ │ │ │ │ ├── Ca_HVA.mod │ │ │ │ │ ├── Ca_LVA.mod │ │ │ │ │ ├── Ih.mod │ │ │ │ │ ├── Im.mod │ │ │ │ │ ├── Im_v2.mod │ │ │ │ │ ├── K_P.mod │ │ │ │ │ ├── K_T.mod │ │ │ │ │ ├── Kd.mod │ │ │ │ │ ├── Kv2like.mod │ │ │ │ │ ├── Kv3_1.mod │ │ │ │ │ ├── NaTa.mod │ │ │ │ │ ├── NaTs.mod │ │ │ │ │ ├── NaV.mod │ │ │ │ │ ├── Nap.mod │ │ │ │ │ ├── SK.mod │ │ │ │ │ └── vecevent.mod │ │ │ └── synaptic_models │ │ │ │ ├── AMPA_ExcToExc.json │ │ │ │ ├── AMPA_ExcToInh.json │ │ │ │ ├── GABA_InhToExc.json │ │ │ │ ├── GABA_InhToInh.json │ │ │ │ ├── instanteneousExc.json │ │ │ │ └── instanteneousInh.json │ │ ├── config.json │ │ ├── run_bionet.py │ │ └── schematics_png │ │ │ ├── External_input_connected.png │ │ │ ├── External_input_created.png │ │ │ ├── Full_figure.png │ │ │ ├── Neurons_created.png │ │ │ ├── Neurons_created_figure.png │ │ │ ├── Recurrent_connected.png │ │ │ ├── Recurrent_connected_figure.png │ │ │ └── full_network.png │ ├── layer4_analysis │ │ └── layer4_analysis.ipynb │ ├── pointnet_notebook │ │ ├── components │ │ │ ├── cell_models │ │ │ │ ├── 472363762_fit.json │ │ │ │ ├── 472363762_point.json │ │ │ │ ├── 472912177_fit.json │ │ │ │ ├── 472912177_point.json │ │ │ │ ├── 473862421_point.json │ │ │ │ ├── 473863035_point.json │ │ │ │ ├── 473863510_point.json │ │ │ │ ├── IntFire1_exc_1.json │ │ │ │ ├── IntFire1_exc_fit.json │ │ │ │ ├── IntFire1_exc_point.json │ │ │ │ ├── IntFire1_inh_1.json │ │ │ │ ├── IntFire1_inh_fit.json │ │ │ │ ├── IntFire1_inh_point.json │ │ │ │ └── filter_point.json │ │ │ └── synaptic_models │ │ │ │ ├── AMPA_ExcToExc.json │ │ │ │ ├── AMPA_ExcToInh.json │ │ │ │ ├── ExcToExc.json │ │ │ │ ├── ExcToInh.json │ │ │ │ ├── GABA_InhToExc.json │ │ │ │ ├── GABA_InhToInh.json │ │ │ │ ├── InhToExc.json │ │ │ │ ├── InhToInh.json │ │ │ │ ├── instanteneousExc.json │ │ │ │ └── instanteneousInh.json │ │ ├── config.json │ │ ├── network │ │ │ ├── recurrent_network │ │ │ │ ├── edge_types.csv │ │ │ │ ├── edges.h5 │ │ │ │ ├── node_types.csv │ │ │ │ ├── nodes.csv │ │ │ │ └── nodes.h5 │ │ │ └── source_input │ │ │ │ ├── edge_types.csv │ │ │ │ ├── edges.h5 │ │ │ │ ├── input_edge_types.csv │ │ │ │ ├── input_edges.h5 │ │ │ │ ├── input_node_types.csv │ │ │ │ ├── input_nodes.csv │ │ │ │ ├── node_types.csv │ │ │ │ ├── nodes.h5 │ │ │ │ ├── poission_input_spk_train.h5 │ │ │ │ └── poisson_input_spk_train.nwb │ │ ├── pointnet_modeling_example.ipynb │ │ ├── run_pointnet.py │ │ └── set_weights.py │ └── popnet_notebook │ │ ├── components │ │ ├── pop_models │ │ │ ├── excitatory_pop.json │ │ │ ├── filter_pop.json │ │ │ └── inhibitory_pop.json │ │ └── synaptic_models │ │ │ ├── ExcToExc.json │ │ │ ├── ExcToInh.json │ │ │ ├── InhToExc.json │ │ │ └── InhToInh.json │ │ ├── config.json │ │ ├── input_rates.csv │ │ ├── network │ │ └── recurrent_network_v2 │ │ │ ├── edge_types.csv │ │ │ └── node_types.csv │ │ ├── population_modeling.ipynb │ │ └── schematics_png │ │ ├── DiPDE_ei_net.png │ │ ├── ei_ext_pop.png │ │ ├── ei_ext_pop_conn1.png │ │ ├── ei_ext_pop_conn1and2.png │ │ └── ei_pop.png ├── Other │ ├── CellTypes.ipynb │ └── Connectivity.ipynb ├── Tutorials │ ├── 01_decoding_sklearn.ipynb │ ├── T01_Regression.ipynb │ ├── T02_Principal_component_analysis.ipynb │ ├── T03_Classification_tutorial.ipynb │ ├── T04_Pipelines.ipynb │ ├── solutions │ │ ├── 01_decoding_sklearn_solutions.html │ │ └── 01_decoding_sklearn_solutions.ipynb │ └── tree.png ├── VisualBehavior.ipynb └── solutions │ ├── BrainObservatory_solutions.html │ ├── BrainObservatory_solutions.ipynb │ ├── EphysObservatory_solutions.html │ ├── EphysObservatory_solutions.ipynb │ ├── Modeling │ ├── biophysical_notebook │ │ ├── biophysical_modeling_solutions.ipynb │ │ ├── build_network.py │ │ ├── config.json │ │ └── run_bionet.py │ ├── layer4_analysis_solutions.html │ ├── layer4_analysis_solutions.ipynb │ └── popnet_notebook │ │ ├── components │ │ ├── pop_models │ │ │ ├── excitatory_pop.json │ │ │ ├── filter_pop.json │ │ │ └── inhibitory_pop.json │ │ └── synaptic_models │ │ │ ├── ExcToExc.json │ │ │ ├── ExcToInh.json │ │ │ ├── InhToExc.json │ │ │ └── InhToInh.json │ │ ├── config.json │ │ ├── input_rates.csv │ │ └── population_modeling_solutions.ipynb │ ├── Other │ ├── CellTypes_solutions.html │ ├── CellTypes_solutions.ipynb │ ├── Connectivity_solutions.html │ └── Connectivity_solutions.ipynb │ ├── VisualBehavior_solutions.html │ └── VisualBehavior_solutions.ipynb ├── Git ├── 03 - Working with Github.md ├── 04 - Working with GitHub in the Cloud.md ├── gh_anim.gif ├── github_workflow_cheatsheet.pdf ├── tiles_00.png ├── tiles_01.png ├── tiles_02.png ├── tiles_03.png ├── tiles_04.png └── tiles_05.png ├── LICENSE.txt ├── PythonBootcamp ├── 00_Introduction.ipynb ├── 01_Basic_Python_I_Object_and_Data_Structures.ipynb ├── 02_Basic_Python_II_Control_Flow_and_Functions.ipynb ├── 03_Intro_To_Scientific_Computing.ipynb ├── 04_Introduction_To_Numpy.ipynb ├── 05_Custom_Modules_and_Version_Control.ipynb ├── 06_Introduction_To_Matplotlib.ipynb ├── 07_Introduction_To_Pandas.ipynb ├── 08_Development_Tools.ipynb ├── 09_bike_crossing.ipynb ├── 10_glm_exercise.ipynb ├── 11_Image_data.ipynb ├── solutions │ ├── 01_Basic_Python_I_Object_and_Data_Structures_solutions.html │ ├── 01_Basic_Python_I_Object_and_Data_Structures_solutions.ipynb │ ├── 02_Basic_Python_II_Control_Flow_and_Functions_solutions.html │ ├── 02_Basic_Python_II_Control_Flow_and_Functions_solutions.ipynb │ ├── 03_Intro_To_Scientific_Computing_solutions.html │ ├── 03_Intro_To_Scientific_Computing_solutions.ipynb │ ├── 04_Introduction_To_Numpy_solutions.html │ ├── 04_Introduction_To_Numpy_solutions.ipynb │ ├── 05_Custom_Modules_and_Version_Control_solutions.html │ ├── 05_Custom_Modules_and_Version_Control_solutions.ipynb │ ├── 06_Introduction_To_Matplotlib_solutions.html │ ├── 06_Introduction_To_Matplotlib_solutions.ipynb │ ├── 07_Introduction_To_Pandas_solutions.html │ ├── 07_Introduction_To_Pandas_solutions.ipynb │ ├── 09_bike_crossing_solutions.html │ ├── 09_bike_crossing_solutions.ipynb │ ├── 11_Image_data_solutions.html │ └── 11_Image_data_solutions.ipynb └── support_files │ ├── CrossingDailyBarPlot.png │ ├── CrossingMonthlyBarPlot.png │ ├── SampleWorkbook.csv │ ├── blurred.png │ ├── commit_tree.svg │ ├── cropped-SummerWorkshop_Header.png │ ├── cross_sections.png │ ├── gitkraken_1.png │ ├── gitkraken_2.png │ ├── gitkraken_3.png │ ├── gitkraken_4.png │ ├── grayscales.png │ ├── leafplot.png │ ├── maxpixel.png │ ├── neuron.jpg │ ├── parallel_commits.png │ ├── pokemon_alopez247.csv │ ├── rgb_array.svg │ ├── stinkbug.png │ ├── sweeps.csv │ ├── thresholdedimage.png │ └── topic_branches.png ├── README.md └── resources ├── EphysObservatory ├── ecephys_manifest.csv └── neuropixels.png ├── Neocortical Interneurons.png ├── change_detection_schematic.png ├── connectivity_metadata.csv └── cropped-SummerWorkshop_Header.png /.gitignore: -------------------------------------------------------------------------------- 1 | *.DS_Store 2 | *.nwb 3 | *.ipynb_checkpoints/ 4 | .idea/ 5 | **/x86_64/ 6 | **/Modeling/**/output 7 | 8 | -------------------------------------------------------------------------------- /Contributing.md: -------------------------------------------------------------------------------- 1 | # Allen Institute Contribution Agreement 2 | 3 | This document describes the terms under which you may make “Contributions” — 4 | which may include without limitation, software additions, revisions, bug fixes, configuration changes, 5 | documentation, or any other materials — to any of the projects owned or managed by the Allen Institute. 6 | If you have questions about these terms, please contact us at terms@alleninstitute.org. 7 | 8 | You certify that: 9 | 10 | • Your Contributions are either: 11 | 12 | 1. Created in whole or in part by you and you have the right to submit them under the designated license 13 | (described below); or 14 | 2. Based upon previous work that, to the best of your knowledge, is covered under an appropriate 15 | open source license and you have the right under that license to submit that work with modifications, 16 | whether created in whole or in part by you, under the designated license; or 17 | 18 | 3. Provided directly to you by some other person who certified (1) or (2) and you have not modified them. 19 | 20 | • You are granting your Contributions to the Allen Institute under the terms of the [2-Clause BSD license](https://opensource.org/licenses/BSD-2-Clause) 21 | (the “designated license”). 22 | 23 | • You understand and agree that the Allen Institute projects and your Contributions are public and that 24 | a record of the Contributions (including all metadata and personal information you submit with them) is 25 | maintained indefinitely and may be redistributed consistent with the Allen Institute’s mission and the 26 | 2-Clause BSD license. 27 | -------------------------------------------------------------------------------- /Credits.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp and Summer Workshop on the Dynamic Brain

\n", 10 | "

August 19-September 3, 2016

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "

Credits

\n", 19 | "\n", 20 | "

The material for the Python Bootcamp and the Summer Workshop on the Dynamic Brain is the result of hard work from many people. In no particular order, they are:\n", 21 | "\n", 22 | "

\n", 45 | "\n", 46 | "

This material builds upon work from previous years. Contributors are:\n", 47 | "

\n", 56 | "
" 57 | ] 58 | } 59 | ], 60 | "metadata": { 61 | "kernelspec": { 62 | "display_name": "Python 2", 63 | "language": "python", 64 | "name": "python2" 65 | }, 66 | "language_info": { 67 | "codemirror_mode": { 68 | "name": "ipython", 69 | "version": 2 70 | }, 71 | "file_extension": ".py", 72 | "mimetype": "text/x-python", 73 | "name": "python", 74 | "nbconvert_exporter": "python", 75 | "pygments_lexer": "ipython2", 76 | "version": "2.7.13" 77 | } 78 | }, 79 | "nbformat": 4, 80 | "nbformat_minor": 0 81 | } 82 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/build_network.py: -------------------------------------------------------------------------------- 1 | from bmtk.builder.networks import NetworkBuilder 2 | 3 | 4 | def build_cortical_network(output_dir='network/recurrent_network'): 5 | def distance_connection_handler(source, target, d_max, nsyn_min, nsyn_max): 6 | """ Connect cells that are less than d_max apart with a random number of synapses in the 7 | interval [nsyn_min, nsyn_max) 8 | """ 9 | sid = source['node_id'] # Get source id 10 | tid = target['node_id'] # Get target id 11 | 12 | # Avoid self-connections. 13 | if (sid == tid): 14 | return None 15 | 16 | # first calculate euclidean distance between cells 17 | src_positions = np.array([source['x'], source['y'], source['z']]) 18 | trg_positions = np.array([target['x'], target['y'], target['z']]) 19 | separation = np.sqrt(np.sum(src_positions - trg_positions)**2 ) 20 | 21 | # drop the connection if nodes too far apart 22 | if separation >= d_max: 23 | return None 24 | 25 | # Add the number of synapses for every connection. 26 | tmp_nsyn = random.randint(nsyn_min, nsyn_max) 27 | return tmp_nsyn 28 | 29 | 30 | #### Step 1: Figure out what types, and number of, different cells to use in our network #### 31 | # Number of cell models desired 32 | N_Scnn1a = 2 33 | N_PV1 = 2 34 | N_LIF_exc = 2 35 | N_LIF_inh = 2 36 | 37 | # Define all the cell models in a dictionary (note dictionaries within a dictionary) 38 | biophysical_models = { 39 | 'Scnn1a': { 40 | 'N': N_Scnn1a, 41 | 'ei': 'e', 42 | 'pop_name': 'Scnn1a', 43 | 'model_type': 'biophysical', 44 | 'model_template': 'ctdb:Biophys1.hoc', 45 | 'model_processing': 'aibs_perisomatic', 46 | 'morphology_file': 'Scnn1a-Tg3-Cre_Ai14_IVSCC_-177300.01.02.01_473845048_m.swc', 47 | 'dynamics_params': '472363762_fit.json', 48 | 'rotation_angle_zaxis': -3.646878266 49 | }, 50 | 'PV1': { 51 | 'N': N_PV1, 52 | 'ei': 'i', 53 | 'pop_name': 'PV1', 54 | 'model_type': 'biophysical', 55 | 'model_template': 'ctdb:Biophys1.hoc', 56 | 'model_processing': 'aibs_perisomatic', 57 | 'dynamics_params': '472912177_fit.json', 58 | 'morphology_file': 'Pvalb-IRES-Cre_Ai14_IVSCC_-176847.04.02.01_470522102_m.swc', 59 | 'rotation_angle_zaxis': -2.539551891 60 | } 61 | } 62 | 63 | # Define all the cell models in a dictionary. 64 | LIF_models = { 65 | 'LIF_exc': { 66 | 'N': N_LIF_exc, 67 | 'ei': 'e', 68 | 'pop_name': 'LIF_exc', 69 | 'model_type': 'point_process', 70 | 'model_template': 'nrn:IntFire1', 71 | 'dynamics_params': 'IntFire1_exc_1.json' 72 | }, 73 | 'LIF_inh': { 74 | 'N': N_LIF_inh, 75 | 'ei': 'i', 76 | 'pop_name': 'LIF_inh', 77 | 'model_type': 'point_process', 78 | 'model_template': 'nrn:IntFire1', 79 | 'dynamics_params': 'IntFire1_inh_1.json' 80 | } 81 | } 82 | 83 | #### Step 2: Create NetworkBuidler object to build nodes and edges #### 84 | net = NetworkBuilder('Cortical') 85 | 86 | #### Step 3: Used add_nodes() method to add all our cells/cell-types 87 | for model in biophysical_models: 88 | # Build our biophysical cells 89 | params = biophysical_models[model] 90 | n_cells = params.pop('N') 91 | 92 | # We'll randomly assign positions 93 | positions = generate_random_positions(n_cells) 94 | 95 | # Use add_nodes to create a set of N cells for each cell-type 96 | net.add_nodes(N=n_cells, # Specify the numer of cells belonging to this set of nodes 97 | x=positions[:,0], y=positions[:, 1], z=positions[:, 2], 98 | rotation_angle_yaxis=np.random.uniform(0.0, 2*np.pi, n_cells), 99 | 100 | # The other parameters are shared by all cells of this set in the dictionary 101 | **params) # python shortcut for unrolling a dictionary 102 | 103 | for model in LIF_models: 104 | # Same thing as above but for our LIF type cells 105 | params = LIF_models[model].copy() 106 | 107 | # Number of cells for this model type 108 | n_cells = params.pop('N') 109 | 110 | # Precacluate positions, rotation angles for each N neurons in the population 111 | positions = generate_random_positions(n_cells) 112 | 113 | # Adds node populations 114 | net.add_nodes(N=n_cells , 115 | x=positions[:,0], y=positions[:, 1], z=positions[:, 2], 116 | rotation_angle_yaxis=np.random.uniform(0.0, 2*np.pi, n_cells), 117 | **params) 118 | 119 | 120 | #### Step 4: Used add_edges() to set our connections between cells #### 121 | cparameters = {'d_max': 160.0, # Maximum separation between nodes where connection allowed 122 | 'nsyn_min': 3, # If connection exist, minimum number of synapses 123 | 'nsyn_max': 7} # If connection exist, maximum number of synapses 124 | 125 | net.add_edges(source={'ei': 'i'}, # Select all inhibitory cells to apply this connection rule too 126 | target={'ei': 'i', 'model_type': 'biophysical'}, # for the target cells we will use inhibitory biophysical cells 127 | connection_rule=distance_connection_handler, 128 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 129 | syn_weight=0.03, 130 | distance_range=[0.0, 1e+20], 131 | target_sections=['somatic', 'basal'], 132 | delay=2.0, 133 | dynamics_params='GABA_InhToInh.json', 134 | model_template='exp2syn') 135 | 136 | # inhibitory --> point-inhibitory 137 | net.add_edges(source={'ei': 'i'}, target={'ei': 'i', 'model_type': 'point_process'}, 138 | connection_rule=distance_connection_handler, 139 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 140 | syn_weight=0.3, 141 | delay=2.0, 142 | dynamics_params='instanteneousInh.json') 143 | 144 | # inhibiotry --> biophysical-excitatory 145 | net.add_edges(source={'ei': 'i'}, target={'ei': 'e', 'model_type': 'biophysical'}, 146 | connection_rule=distance_connection_handler, 147 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 148 | syn_weight=0.3, 149 | distance_range=[0.0, 50.0], 150 | target_sections=['somatic', 'basal', 'apical'], 151 | delay=2.0, 152 | dynamics_params='GABA_InhToExc.json', 153 | model_template='exp2syn') 154 | 155 | # inhibitory --> point-excitatory 156 | net.add_edges(source={'ei': 'i'}, target={'ei': 'e', 'model_type': 'point_process'}, 157 | connection_rule=distance_connection_handler, 158 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 159 | syn_weight=0.4, 160 | delay=2.0, 161 | dynamics_params='instanteneousInh.json') 162 | 163 | # excitatory --> PV1 cells 164 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'PV1'}, 165 | connection_rule=distance_connection_handler, 166 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 167 | syn_weight=0.05, 168 | distance_range=[0.0, 1e+20], 169 | target_sections=['somatic', 'basal'], 170 | delay=2.0, 171 | dynamics_params='AMPA_ExcToInh.json', 172 | model_template='exp2syn') 173 | 174 | # excitatory --> LIF_inh 175 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'LIF_inh'}, 176 | connection_rule=distance_connection_handler, 177 | connection_params=cparameters, 178 | syn_weight=0.2, 179 | delay=2.0, 180 | dynamics_params='instanteneousExc.json') 181 | 182 | # excitatory --> Scnn1a 183 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'Scnn1a'}, 184 | connection_rule=distance_connection_handler, 185 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 186 | syn_weight=0.05, 187 | distance_range=[30.0, 150.0], 188 | target_sections=['basal', 'apical'], 189 | delay=2.0, 190 | dynamics_params='AMPA_ExcToExc.json', 191 | model_template='exp2syn') 192 | 193 | # excitatory --> LIF_exc 194 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'LIF_exc'}, 195 | connection_rule=distance_connection_handler, 196 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 197 | syn_weight=0.05, 198 | delay=2.0, 199 | dynamics_params='instanteneousExc.json') 200 | 201 | 202 | #### Step 5: Build and save the network #### 203 | net.build() 204 | net.save(output_dir=output_dir) 205 | return net 206 | 207 | 208 | def build_input_network(net, output_dir='network/source_input'): 209 | def select_source_cells(sources, target, N_syn=10): 210 | """ Note here that "sources" are given (not "source"). So the iterations occur through every target 211 | with all sources as potential inputs. Faster than before and better if will have common rules. 212 | """ 213 | 214 | target_id = target.node_id 215 | source_ids = [s.node_id for s in sources] 216 | 217 | nsyns_ret = [N_syn]*len(source_ids) 218 | return nsyns_ret 219 | 220 | 221 | filter_models = { 222 | 'inputFilter': { 223 | 'N': 25, 224 | 'ei': 'e', 225 | 'pop_name': 'input_filter', 226 | 'model_type': 'virtual' 227 | } 228 | } 229 | 230 | inputNetwork = NetworkBuilder("inputNetwork") 231 | inputNetwork.add_nodes(**filter_models['inputFilter']) 232 | 233 | inputNetwork.add_edges(target=net.nodes(pop_name='Scnn1a'), 234 | iterator='all_to_one', 235 | connection_rule=select_source_cells, 236 | syn_weight=0.0007, 237 | distance_range=[0.0, 150.0], 238 | target_sections=['basal', 'apical'], 239 | delay=2.0, 240 | dynamics_params='AMPA_ExcToExc.json', 241 | model_template='exp2syn') 242 | 243 | inputNetwork.add_edges(target=net.nodes(pop_name='LIF_exc'), 244 | iterator='all_to_one', 245 | connection_rule=select_source_cells, 246 | syn_weight=0.07, 247 | delay=2.0, 248 | dynamics_params='instanteneousExc.json') 249 | 250 | inputNetwork.add_edges(target=net.nodes(pop_name='PV1'), 251 | iterator='all_to_one', 252 | connection_rule=select_source_cells, 253 | syn_weight=0.002, 254 | distance_range=[0.0, 1.0e+20], 255 | target_sections=['basal', 'somatic'], 256 | delay=2.0, 257 | dynamics_params='AMPA_ExcToInh.json', 258 | model_template='exp2syn') 259 | 260 | inputNetwork.add_edges(target=net.nodes(pop_name='LIF_inh'), 261 | iterator='all_to_one', 262 | connection_rule=select_source_cells, 263 | syn_weight=0.01, 264 | delay=2.0, 265 | dynamics_params='instanteneousExc.json') 266 | 267 | inputNetwork.build() 268 | inputNetwork.save(output_dir=output_dir) 269 | 270 | 271 | if __name__ == '__main__': 272 | net = build_cortical_network() 273 | build_input_network(net) 274 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/biophysical/electrophysiology/472363762_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "passive": [ 3 | { 4 | "ra": 138.28, 5 | "cm": [ 6 | { 7 | "section": "soma", 8 | "cm": 1.0 9 | }, 10 | { 11 | "section": "axon", 12 | "cm": 1.0 13 | }, 14 | { 15 | "section": "apic", 16 | "cm": 2.12 17 | }, 18 | { 19 | "section": "dend", 20 | "cm": 2.12 21 | } 22 | ], 23 | "e_pas": -92.49911499023438 24 | } 25 | ], 26 | "axon_morph": [ 27 | { 28 | "delete_axon": [ 29 | "forsec axonal{delete_section()}", 30 | " create axon[2]", 31 | " axon[0]{", 32 | " L= 30", 33 | " diam = 1", 34 | " nseg = 1+2*int(L/40)", 35 | " all.append()", 36 | " axonal.append()", 37 | " }", 38 | " axon[1]{", 39 | " L= 30", 40 | " diam = 1", 41 | " nseg = 1+2*int(L/40)", 42 | " all.append()", 43 | " axonal.append()", 44 | " }", 45 | "", 46 | "nSecAxonal = 2", 47 | "connect axon(0), soma(0.5)", 48 | "connect axon[1](0), axon[0](1) ", 49 | "access soma" 50 | ], 51 | "setup_line": "create soma[1], dend[1], apic[1], axon[1]" 52 | } 53 | ], 54 | "fitting": [ 55 | { 56 | "junction_potential": -14.0, 57 | "sweeps": [ 58 | 38 59 | ] 60 | } 61 | ], 62 | "conditions": [ 63 | { 64 | "celsius": 34.0, 65 | "erev": [ 66 | { 67 | "ena": 53.0, 68 | "section": "soma", 69 | "ek": -107.0 70 | } 71 | ], 72 | "v_init": -92.49911499023438 73 | } 74 | ], 75 | "genome": [ 76 | { 77 | "value": 0.0012021154978800002, 78 | "section": "soma", 79 | "name": "gbar_Im", 80 | "mechanism": "Im" 81 | }, 82 | { 83 | "value": 4.12225901169e-05, 84 | "section": "soma", 85 | "name": "gbar_Ih", 86 | "mechanism": "Ih" 87 | }, 88 | { 89 | "value": 0.98228995892999993, 90 | "section": "soma", 91 | "name": "gbar_NaTs", 92 | "mechanism": "NaTs" 93 | }, 94 | { 95 | "value": 0.000209348990528, 96 | "section": "soma", 97 | "name": "gbar_Nap", 98 | "mechanism": "Nap" 99 | }, 100 | { 101 | "value": 0.051758360920800002, 102 | "section": "soma", 103 | "name": "gbar_K_P", 104 | "mechanism": "K_P" 105 | }, 106 | { 107 | "value": 0.00073160714529799998, 108 | "section": "soma", 109 | "name": "gbar_K_T", 110 | "mechanism": "K_T" 111 | }, 112 | { 113 | "value": 0.00019222004878899999, 114 | "section": "soma", 115 | "name": "gbar_SK", 116 | "mechanism": "SK" 117 | }, 118 | { 119 | "value": 0.057264803402699994, 120 | "section": "soma", 121 | "name": "gbar_Kv3_1", 122 | "mechanism": "Kv3_1" 123 | }, 124 | { 125 | "value": 0.00053599731839199991, 126 | "section": "soma", 127 | "name": "gbar_Ca_HVA", 128 | "mechanism": "Ca_HVA" 129 | }, 130 | { 131 | "value": 0.0070061294358100008, 132 | "section": "soma", 133 | "name": "gbar_Ca_LVA", 134 | "mechanism": "Ca_LVA" 135 | }, 136 | { 137 | "value": 0.0012510775510599999, 138 | "section": "soma", 139 | "name": "gamma_CaDynamics", 140 | "mechanism": "CaDynamics" 141 | }, 142 | { 143 | "value": 717.91660042899991, 144 | "section": "soma", 145 | "name": "decay_CaDynamics", 146 | "mechanism": "CaDynamics" 147 | }, 148 | { 149 | "value": 5.71880766722e-06, 150 | "section": "soma", 151 | "name": "g_pas", 152 | "mechanism": "" 153 | }, 154 | { 155 | "value": 0.00045738760076499994, 156 | "section": "axon", 157 | "name": "g_pas", 158 | "mechanism": "" 159 | }, 160 | { 161 | "value": 3.2393273274400003e-06, 162 | "section": "dend", 163 | "name": "g_pas", 164 | "mechanism": "" 165 | }, 166 | { 167 | "value": 9.5861855476200007e-05, 168 | "section": "apic", 169 | "name": "g_pas", 170 | "mechanism": "" 171 | } 172 | ] 173 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/biophysical/electrophysiology/472912177_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "passive": [ 3 | { 4 | "ra": 143.65, 5 | "cm": [ 6 | { 7 | "section": "soma", 8 | "cm": 2.16 9 | }, 10 | { 11 | "section": "axon", 12 | "cm": 2.16 13 | }, 14 | { 15 | "section": "dend", 16 | "cm": 2.16 17 | } 18 | ], 19 | "e_pas": -95.53709411621094 20 | } 21 | ], 22 | "axon_morph": [ 23 | { 24 | "delete_axon": [ 25 | "forsec axonal{delete_section()}", 26 | " create axon[2]", 27 | " axon[0]{", 28 | " L= 30", 29 | " diam = 1", 30 | " nseg = 1+2*int(L/40)", 31 | " all.append()", 32 | " axonal.append()", 33 | " }", 34 | " axon[1]{", 35 | " L= 30", 36 | " diam = 1", 37 | " nseg = 1+2*int(L/40)", 38 | " all.append()", 39 | " axonal.append()", 40 | " }", 41 | "", 42 | "nSecAxonal = 2", 43 | "connect axon(0), soma(0.5)", 44 | "connect axon[1](0), axon[0](1) ", 45 | "access soma" 46 | ], 47 | "setup_line": "create soma[1], dend[1], apic[1], axon[1]" 48 | } 49 | ], 50 | "fitting": [ 51 | { 52 | "junction_potential": -14.0, 53 | "sweeps": [ 54 | 55 55 | ] 56 | } 57 | ], 58 | "conditions": [ 59 | { 60 | "celsius": 34.0, 61 | "erev": [ 62 | { 63 | "ena": 53.0, 64 | "section": "soma", 65 | "ek": -107.0 66 | } 67 | ], 68 | "v_init": -95.53709411621094 69 | } 70 | ], 71 | "genome": [ 72 | { 73 | "value": 5.1162860430100002e-05, 74 | "section": "soma", 75 | "name": "gbar_Ih", 76 | "mechanism": "Ih" 77 | }, 78 | { 79 | "value": 0.058520185129300004, 80 | "section": "soma", 81 | "name": "gbar_NaV", 82 | "mechanism": "NaV" 83 | }, 84 | { 85 | "value": 0.00031192529327399998, 86 | "section": "soma", 87 | "name": "gbar_Kd", 88 | "mechanism": "Kd" 89 | }, 90 | { 91 | "value": 0.051060238264300006, 92 | "section": "soma", 93 | "name": "gbar_Kv2like", 94 | "mechanism": "Kv2like" 95 | }, 96 | { 97 | "value": 0.65076055389700005, 98 | "section": "soma", 99 | "name": "gbar_Kv3_1", 100 | "mechanism": "Kv3_1" 101 | }, 102 | { 103 | "value": 0.033385946416300001, 104 | "section": "soma", 105 | "name": "gbar_K_T", 106 | "mechanism": "K_T" 107 | }, 108 | { 109 | "value": 0.00775048605222, 110 | "section": "soma", 111 | "name": "gbar_Im_v2", 112 | "mechanism": "Im_v2" 113 | }, 114 | { 115 | "value": 0.0027340091995900003, 116 | "section": "soma", 117 | "name": "gbar_SK", 118 | "mechanism": "SK" 119 | }, 120 | { 121 | "value": 0.00056478972054199987, 122 | "section": "soma", 123 | "name": "gbar_Ca_HVA", 124 | "mechanism": "Ca_HVA" 125 | }, 126 | { 127 | "value": 0.0032114779487500003, 128 | "section": "soma", 129 | "name": "gbar_Ca_LVA", 130 | "mechanism": "Ca_LVA" 131 | }, 132 | { 133 | "value": 0.0077204433411699998, 134 | "section": "soma", 135 | "name": "gamma_CaDynamics", 136 | "mechanism": "CaDynamics" 137 | }, 138 | { 139 | "value": 20.300246788599999, 140 | "section": "soma", 141 | "name": "decay_CaDynamics", 142 | "mechanism": "CaDynamics" 143 | }, 144 | { 145 | "value": 0.00026705534351299998, 146 | "section": "soma", 147 | "name": "g_pas", 148 | "mechanism": "" 149 | }, 150 | { 151 | "value": 0.00066246357111199989, 152 | "section": "axon", 153 | "name": "g_pas", 154 | "mechanism": "" 155 | }, 156 | { 157 | "value": 9.8019833221899986e-06, 158 | "section": "dend", 159 | "name": "g_pas", 160 | "mechanism": "" 161 | } 162 | ] 163 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/biophysical/electrophysiology/473862421_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "passive": [ 3 | { 4 | "ra": 138.99, 5 | "cm": [ 6 | { 7 | "section": "soma", 8 | "cm": 1.94 9 | }, 10 | { 11 | "section": "axon", 12 | "cm": 1.94 13 | }, 14 | { 15 | "section": "dend", 16 | "cm": 1.94 17 | } 18 | ], 19 | "e_pas": -88.23363494873047 20 | } 21 | ], 22 | "axon_morph": [ 23 | { 24 | "delete_axon": [ 25 | "forsec axonal{delete_section()}", 26 | " create axon[2]", 27 | " axon[0]{", 28 | " L= 30", 29 | " diam = 1", 30 | " nseg = 1+2*int(L/40)", 31 | " all.append()", 32 | " axonal.append()", 33 | " }", 34 | " axon[1]{", 35 | " L= 30", 36 | " diam = 1", 37 | " nseg = 1+2*int(L/40)", 38 | " all.append()", 39 | " axonal.append()", 40 | " }", 41 | "", 42 | "nSecAxonal = 2", 43 | "connect axon(0), soma(0.5)", 44 | "connect axon[1](0), axon[0](1) ", 45 | "access soma" 46 | ], 47 | "setup_line": "create soma[1], dend[1], apic[1], axon[1]" 48 | } 49 | ], 50 | "fitting": [ 51 | { 52 | "junction_potential": -14.0, 53 | "sweeps": [ 54 | 58 55 | ] 56 | } 57 | ], 58 | "conditions": [ 59 | { 60 | "celsius": 34.0, 61 | "erev": [ 62 | { 63 | "ena": 53.0, 64 | "section": "soma", 65 | "ek": -107.0 66 | } 67 | ], 68 | "v_init": -88.23363494873047 69 | } 70 | ], 71 | "genome": [ 72 | { 73 | "value": 0.00030357031297000004, 74 | "section": "soma", 75 | "name": "gbar_Ih", 76 | "mechanism": "Ih" 77 | }, 78 | { 79 | "value": 0.052161472289300001, 80 | "section": "soma", 81 | "name": "gbar_NaV", 82 | "mechanism": "NaV" 83 | }, 84 | { 85 | "value": 0.0033126476739899998, 86 | "section": "soma", 87 | "name": "gbar_Kd", 88 | "mechanism": "Kd" 89 | }, 90 | { 91 | "value": 0.019206276717599998, 92 | "section": "soma", 93 | "name": "gbar_Kv2like", 94 | "mechanism": "Kv2like" 95 | }, 96 | { 97 | "value": 1.2128893375100001, 98 | "section": "soma", 99 | "name": "gbar_Kv3_1", 100 | "mechanism": "Kv3_1" 101 | }, 102 | { 103 | "value": 1.4016010650299999e-05, 104 | "section": "soma", 105 | "name": "gbar_K_T", 106 | "mechanism": "K_T" 107 | }, 108 | { 109 | "value": 0.0011153668151199999, 110 | "section": "soma", 111 | "name": "gbar_Im_v2", 112 | "mechanism": "Im_v2" 113 | }, 114 | { 115 | "value": 0.048152669735999999, 116 | "section": "soma", 117 | "name": "gbar_SK", 118 | "mechanism": "SK" 119 | }, 120 | { 121 | "value": 0.0, 122 | "section": "soma", 123 | "name": "gbar_Ca_HVA", 124 | "mechanism": "Ca_HVA" 125 | }, 126 | { 127 | "value": 0.0, 128 | "section": "soma", 129 | "name": "gbar_Ca_LVA", 130 | "mechanism": "Ca_LVA" 131 | }, 132 | { 133 | "value": 0.046090335451600004, 134 | "section": "soma", 135 | "name": "gamma_CaDynamics", 136 | "mechanism": "CaDynamics" 137 | }, 138 | { 139 | "value": 574.74935741900003, 140 | "section": "soma", 141 | "name": "decay_CaDynamics", 142 | "mechanism": "CaDynamics" 143 | }, 144 | { 145 | "value": 0.00058689407428699997, 146 | "section": "soma", 147 | "name": "g_pas", 148 | "mechanism": "" 149 | }, 150 | { 151 | "value": 0.0009617982321389999, 152 | "section": "axon", 153 | "name": "g_pas", 154 | "mechanism": "" 155 | }, 156 | { 157 | "value": 4.1690838408899998e-07, 158 | "section": "dend", 159 | "name": "g_pas", 160 | "mechanism": "" 161 | } 162 | ] 163 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/biophysical/electrophysiology/473863035_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "passive": [ 3 | { 4 | "ra": 122.88, 5 | "cm": [ 6 | { 7 | "section": "soma", 8 | "cm": 1.0 9 | }, 10 | { 11 | "section": "axon", 12 | "cm": 1.0 13 | }, 14 | { 15 | "section": "apic", 16 | "cm": 2.28 17 | }, 18 | { 19 | "section": "dend", 20 | "cm": 2.28 21 | } 22 | ], 23 | "e_pas": -89.4614028930664 24 | } 25 | ], 26 | "axon_morph": [ 27 | { 28 | "delete_axon": [ 29 | "forsec axonal{delete_section()}", 30 | " create axon[2]", 31 | " axon[0]{", 32 | " L= 30", 33 | " diam = 1", 34 | " nseg = 1+2*int(L/40)", 35 | " all.append()", 36 | " axonal.append()", 37 | " }", 38 | " axon[1]{", 39 | " L= 30", 40 | " diam = 1", 41 | " nseg = 1+2*int(L/40)", 42 | " all.append()", 43 | " axonal.append()", 44 | " }", 45 | "", 46 | "nSecAxonal = 2", 47 | "connect axon(0), soma(0.5)", 48 | "connect axon[1](0), axon[0](1) ", 49 | "access soma" 50 | ], 51 | "setup_line": "create soma[1], dend[1], apic[1], axon[1]" 52 | } 53 | ], 54 | "fitting": [ 55 | { 56 | "junction_potential": -14.0, 57 | "sweeps": [ 58 | 41 59 | ] 60 | } 61 | ], 62 | "conditions": [ 63 | { 64 | "celsius": 34.0, 65 | "erev": [ 66 | { 67 | "ena": 53.0, 68 | "section": "soma", 69 | "ek": -107.0 70 | } 71 | ], 72 | "v_init": -89.4614028930664 73 | } 74 | ], 75 | "genome": [ 76 | { 77 | "value": 0.00204889965055, 78 | "section": "soma", 79 | "name": "gbar_Im", 80 | "mechanism": "Im" 81 | }, 82 | { 83 | "value": 3.7269252291999993e-05, 84 | "section": "soma", 85 | "name": "gbar_Ih", 86 | "mechanism": "Ih" 87 | }, 88 | { 89 | "value": 0.60927080502300002, 90 | "section": "soma", 91 | "name": "gbar_NaTs", 92 | "mechanism": "NaTs" 93 | }, 94 | { 95 | "value": 6.2161868365100004e-05, 96 | "section": "soma", 97 | "name": "gbar_Nap", 98 | "mechanism": "Nap" 99 | }, 100 | { 101 | "value": 0.018147424450599997, 102 | "section": "soma", 103 | "name": "gbar_K_P", 104 | "mechanism": "K_P" 105 | }, 106 | { 107 | "value": 0.000555828337834, 108 | "section": "soma", 109 | "name": "gbar_K_T", 110 | "mechanism": "K_T" 111 | }, 112 | { 113 | "value": 0.00041743171143300003, 114 | "section": "soma", 115 | "name": "gbar_SK", 116 | "mechanism": "SK" 117 | }, 118 | { 119 | "value": 0.12468487969900001, 120 | "section": "soma", 121 | "name": "gbar_Kv3_1", 122 | "mechanism": "Kv3_1" 123 | }, 124 | { 125 | "value": 0.00097272189665800009, 126 | "section": "soma", 127 | "name": "gbar_Ca_HVA", 128 | "mechanism": "Ca_HVA" 129 | }, 130 | { 131 | "value": 0.0066296509568100001, 132 | "section": "soma", 133 | "name": "gbar_Ca_LVA", 134 | "mechanism": "Ca_LVA" 135 | }, 136 | { 137 | "value": 0.00071152004894800005, 138 | "section": "soma", 139 | "name": "gamma_CaDynamics", 140 | "mechanism": "CaDynamics" 141 | }, 142 | { 143 | "value": 798.67999240300003, 144 | "section": "soma", 145 | "name": "decay_CaDynamics", 146 | "mechanism": "CaDynamics" 147 | }, 148 | { 149 | "value": 0.00054589151280800012, 150 | "section": "soma", 151 | "name": "g_pas", 152 | "mechanism": "" 153 | }, 154 | { 155 | "value": 0.00021542161812900001, 156 | "section": "axon", 157 | "name": "g_pas", 158 | "mechanism": "" 159 | }, 160 | { 161 | "value": 6.2827249623699998e-06, 162 | "section": "dend", 163 | "name": "g_pas", 164 | "mechanism": "" 165 | }, 166 | { 167 | "value": 5.9318998497700001e-06, 168 | "section": "apic", 169 | "name": "g_pas", 170 | "mechanism": "" 171 | } 172 | ] 173 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/biophysical/electrophysiology/473863510_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "passive": [ 3 | { 4 | "ra": 35.64, 5 | "cm": [ 6 | { 7 | "section": "soma", 8 | "cm": 1.0 9 | }, 10 | { 11 | "section": "axon", 12 | "cm": 1.0 13 | }, 14 | { 15 | "section": "apic", 16 | "cm": 2.19 17 | }, 18 | { 19 | "section": "dend", 20 | "cm": 2.19 21 | } 22 | ], 23 | "e_pas": -85.07815551757812 24 | } 25 | ], 26 | "axon_morph": [ 27 | { 28 | "delete_axon": [ 29 | "forsec axonal{delete_section()}", 30 | " create axon[2]", 31 | " axon[0]{", 32 | " L= 30", 33 | " diam = 1", 34 | " nseg = 1+2*int(L/40)", 35 | " all.append()", 36 | " axonal.append()", 37 | " }", 38 | " axon[1]{", 39 | " L= 30", 40 | " diam = 1", 41 | " nseg = 1+2*int(L/40)", 42 | " all.append()", 43 | " axonal.append()", 44 | " }", 45 | "", 46 | "nSecAxonal = 2", 47 | "connect axon(0), soma(0.5)", 48 | "connect axon[1](0), axon[0](1) ", 49 | "access soma" 50 | ], 51 | "setup_line": "create soma[1], dend[1], apic[1], axon[1]" 52 | } 53 | ], 54 | "fitting": [ 55 | { 56 | "junction_potential": -14.0, 57 | "sweeps": [ 58 | 38 59 | ] 60 | } 61 | ], 62 | "conditions": [ 63 | { 64 | "celsius": 34.0, 65 | "erev": [ 66 | { 67 | "ena": 53.0, 68 | "section": "soma", 69 | "ek": -107.0 70 | } 71 | ], 72 | "v_init": -85.07815551757812 73 | } 74 | ], 75 | "genome": [ 76 | { 77 | "value": 0.00043788364247700001, 78 | "section": "soma", 79 | "name": "gbar_Im", 80 | "mechanism": "Im" 81 | }, 82 | { 83 | "value": 0.0019922075246600001, 84 | "section": "soma", 85 | "name": "gbar_Ih", 86 | "mechanism": "Ih" 87 | }, 88 | { 89 | "value": 0.71282189194800005, 90 | "section": "soma", 91 | "name": "gbar_NaTs", 92 | "mechanism": "NaTs" 93 | }, 94 | { 95 | "value": 0.0012493753876800001, 96 | "section": "soma", 97 | "name": "gbar_Nap", 98 | "mechanism": "Nap" 99 | }, 100 | { 101 | "value": 0.034836377263399998, 102 | "section": "soma", 103 | "name": "gbar_K_P", 104 | "mechanism": "K_P" 105 | }, 106 | { 107 | "value": 0.0166428509042, 108 | "section": "soma", 109 | "name": "gbar_K_T", 110 | "mechanism": "K_T" 111 | }, 112 | { 113 | "value": 0.00024972209054299998, 114 | "section": "soma", 115 | "name": "gbar_SK", 116 | "mechanism": "SK" 117 | }, 118 | { 119 | "value": 0.28059766435600003, 120 | "section": "soma", 121 | "name": "gbar_Kv3_1", 122 | "mechanism": "Kv3_1" 123 | }, 124 | { 125 | "value": 0.00015339031713199999, 126 | "section": "soma", 127 | "name": "gbar_Ca_HVA", 128 | "mechanism": "Ca_HVA" 129 | }, 130 | { 131 | "value": 0.0033469316039000004, 132 | "section": "soma", 133 | "name": "gbar_Ca_LVA", 134 | "mechanism": "Ca_LVA" 135 | }, 136 | { 137 | "value": 0.0040218816981199999, 138 | "section": "soma", 139 | "name": "gamma_CaDynamics", 140 | "mechanism": "CaDynamics" 141 | }, 142 | { 143 | "value": 991.140696832, 144 | "section": "soma", 145 | "name": "decay_CaDynamics", 146 | "mechanism": "CaDynamics" 147 | }, 148 | { 149 | "value": 0.00092865666454699993, 150 | "section": "soma", 151 | "name": "g_pas", 152 | "mechanism": "" 153 | }, 154 | { 155 | "value": 0.00091423093354899986, 156 | "section": "axon", 157 | "name": "g_pas", 158 | "mechanism": "" 159 | }, 160 | { 161 | "value": 3.8264043188599994e-06, 162 | "section": "dend", 163 | "name": "g_pas", 164 | "mechanism": "" 165 | }, 166 | { 167 | "value": 2.11145615996e-06, 168 | "section": "apic", 169 | "name": "g_pas", 170 | "mechanism": "" 171 | } 172 | ] 173 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/electrodes/single_electrode.csv: -------------------------------------------------------------------------------- 1 | channel x_pos y_pos z_pos 2 | 0 0.5 0.5 0.5 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/hoc_templates/Biophys1.hoc: -------------------------------------------------------------------------------- 1 | begintemplate Biophys1 2 | 3 | public init 4 | public soma, dend, apic, axon 5 | public all, somatic, basal, apical, axonal 6 | 7 | objref all, somatic, basal, apical, axonal 8 | objref this 9 | 10 | create soma[1] 11 | create dend[1] 12 | create apic[1] 13 | create axon[1] 14 | 15 | 16 | proc init() {localobj nl, import 17 | all = new SectionList() 18 | somatic = new SectionList() 19 | basal = new SectionList() 20 | apical = new SectionList() 21 | axonal = new SectionList() 22 | forall delete_section() 23 | 24 | // nl = new Import3d_Neurolucida3() 25 | nl = new Import3d_SWC_read() 26 | nl.quiet = 1 27 | nl.input($s1) 28 | import = new Import3d_GUI(nl, 0) 29 | // import.quite = 1 30 | import.instantiate(this) 31 | 32 | } 33 | 34 | endtemplate Biophys1 35 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/intfire/IntFire1_exc_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau": 0.024, 3 | "type": "NEURON_IntFire1", 4 | "refrac": 0.003 5 | } 6 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/intfire/IntFire1_inh_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau": 0.007, 3 | "type": "NEURON_IntFire1", 4 | "refrac": 0.003 5 | } 6 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/CaDynamics.mod: -------------------------------------------------------------------------------- 1 | : Dynamics that track inside calcium concentration 2 | : modified from Destexhe et al. 1994 3 | 4 | NEURON { 5 | SUFFIX CaDynamics 6 | USEION ca READ ica WRITE cai 7 | RANGE decay, gamma, minCai, depth 8 | } 9 | 10 | UNITS { 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | FARADAY = (faraday) (coulombs) 14 | (molar) = (1/liter) 15 | (mM) = (millimolar) 16 | (um) = (micron) 17 | } 18 | 19 | PARAMETER { 20 | gamma = 0.05 : percent of free calcium (not buffered) 21 | decay = 80 (ms) : rate of removal of calcium 22 | depth = 0.1 (um) : depth of shell 23 | minCai = 1e-4 (mM) 24 | } 25 | 26 | ASSIGNED {ica (mA/cm2)} 27 | 28 | INITIAL { 29 | cai = minCai 30 | } 31 | 32 | STATE { 33 | cai (mM) 34 | } 35 | 36 | BREAKPOINT { SOLVE states METHOD cnexp } 37 | 38 | DERIVATIVE states { 39 | cai' = -(10000)*(ica*gamma/(2*FARADAY*depth)) - (cai - minCai)/decay 40 | } 41 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Ca_HVA.mod: -------------------------------------------------------------------------------- 1 | : Reference: Reuveni, Friedman, Amitai, and Gutnick, J.Neurosci. 1993 2 | 3 | NEURON { 4 | SUFFIX Ca_HVA 5 | USEION ca READ eca WRITE ica 6 | RANGE gbar, g, ica 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | } 18 | 19 | ASSIGNED { 20 | v (mV) 21 | eca (mV) 22 | ica (mA/cm2) 23 | g (S/cm2) 24 | mInf 25 | mTau 26 | mAlpha 27 | mBeta 28 | hInf 29 | hTau 30 | hAlpha 31 | hBeta 32 | } 33 | 34 | STATE { 35 | m 36 | h 37 | } 38 | 39 | BREAKPOINT { 40 | SOLVE states METHOD cnexp 41 | g = gbar*m*m*h 42 | ica = g*(v-eca) 43 | } 44 | 45 | DERIVATIVE states { 46 | rates() 47 | m' = (mInf-m)/mTau 48 | h' = (hInf-h)/hTau 49 | } 50 | 51 | INITIAL{ 52 | rates() 53 | m = mInf 54 | h = hInf 55 | } 56 | 57 | PROCEDURE rates(){ 58 | UNITSOFF 59 | : if((v == -27) ){ 60 | : v = v+0.0001 61 | : } 62 | :mAlpha = (0.055*(-27-v))/(exp((-27-v)/3.8) - 1) 63 | mAlpha = 0.055 * vtrap(-27 - v, 3.8) 64 | mBeta = (0.94*exp((-75-v)/17)) 65 | mInf = mAlpha/(mAlpha + mBeta) 66 | mTau = 1/(mAlpha + mBeta) 67 | hAlpha = (0.000457*exp((-13-v)/50)) 68 | hBeta = (0.0065/(exp((-v-15)/28)+1)) 69 | hInf = hAlpha/(hAlpha + hBeta) 70 | hTau = 1/(hAlpha + hBeta) 71 | UNITSON 72 | } 73 | 74 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 75 | UNITSOFF 76 | if (fabs(x / y) < 1e-6) { 77 | vtrap = y * (1 - x / y / 2) 78 | } else { 79 | vtrap = x / (exp(x / y) - 1) 80 | } 81 | UNITSON 82 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Ca_LVA.mod: -------------------------------------------------------------------------------- 1 | : Comment: LVA ca channel. Note: mtau is an approximation from the plots 2 | : Reference: Avery and Johnston 1996, tau from Randall 1997 3 | : Comment: shifted by -10 mv to correct for junction potential 4 | : Comment: corrected rates using q10 = 2.3, target temperature 34, orginal 21 5 | 6 | NEURON { 7 | SUFFIX Ca_LVA 8 | USEION ca READ eca WRITE ica 9 | RANGE gbar, g, ica 10 | } 11 | 12 | UNITS { 13 | (S) = (siemens) 14 | (mV) = (millivolt) 15 | (mA) = (milliamp) 16 | } 17 | 18 | PARAMETER { 19 | gbar = 0.00001 (S/cm2) 20 | } 21 | 22 | ASSIGNED { 23 | v (mV) 24 | eca (mV) 25 | ica (mA/cm2) 26 | g (S/cm2) 27 | celsius (degC) 28 | mInf 29 | mTau 30 | hInf 31 | hTau 32 | } 33 | 34 | STATE { 35 | m 36 | h 37 | } 38 | 39 | BREAKPOINT { 40 | SOLVE states METHOD cnexp 41 | g = gbar*m*m*h 42 | ica = g*(v-eca) 43 | } 44 | 45 | DERIVATIVE states { 46 | rates() 47 | m' = (mInf-m)/mTau 48 | h' = (hInf-h)/hTau 49 | } 50 | 51 | INITIAL{ 52 | rates() 53 | m = mInf 54 | h = hInf 55 | } 56 | 57 | PROCEDURE rates(){ 58 | LOCAL qt 59 | qt = 2.3^((celsius-21)/10) 60 | 61 | UNITSOFF 62 | v = v + 10 63 | mInf = 1.0000/(1+ exp((v - -30.000)/-6)) 64 | mTau = (5.0000 + 20.0000/(1+exp((v - -25.000)/5)))/qt 65 | hInf = 1.0000/(1+ exp((v - -80.000)/6.4)) 66 | hTau = (20.0000 + 50.0000/(1+exp((v - -40.000)/7)))/qt 67 | v = v - 10 68 | UNITSON 69 | } 70 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Ih.mod: -------------------------------------------------------------------------------- 1 | : Reference: Kole,Hallermann,and Stuart, J. Neurosci. 2006 2 | 3 | NEURON { 4 | SUFFIX Ih 5 | NONSPECIFIC_CURRENT ihcn 6 | RANGE gbar, g, ihcn 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | ehcn = -45.0 (mV) 18 | } 19 | 20 | ASSIGNED { 21 | v (mV) 22 | ihcn (mA/cm2) 23 | g (S/cm2) 24 | mInf 25 | mTau 26 | mAlpha 27 | mBeta 28 | } 29 | 30 | STATE { 31 | m 32 | } 33 | 34 | BREAKPOINT { 35 | SOLVE states METHOD cnexp 36 | g = gbar*m 37 | ihcn = g*(v-ehcn) 38 | } 39 | 40 | DERIVATIVE states { 41 | rates() 42 | m' = (mInf-m)/mTau 43 | } 44 | 45 | INITIAL{ 46 | rates() 47 | m = mInf 48 | } 49 | 50 | PROCEDURE rates(){ 51 | UNITSOFF 52 | : if(v == -154.9){ 53 | : v = v + 0.0001 54 | : } 55 | :mAlpha = 0.001*6.43*(v+154.9)/(exp((v+154.9)/11.9)-1) 56 | mAlpha = 0.001 * 6.43 * vtrap(v + 154.9, 11.9) 57 | mBeta = 0.001*193*exp(v/33.1) 58 | mInf = mAlpha/(mAlpha + mBeta) 59 | mTau = 1/(mAlpha + mBeta) 60 | UNITSON 61 | } 62 | 63 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 64 | UNITSOFF 65 | if (fabs(x / y) < 1e-6) { 66 | vtrap = y * (1 - x / y / 2) 67 | } else { 68 | vtrap = x / (exp(x / y) - 1) 69 | } 70 | UNITSON 71 | } 72 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Im.mod: -------------------------------------------------------------------------------- 1 | : Reference: Adams et al. 1982 - M-currents and other potassium currents in bullfrog sympathetic neurones 2 | : Comment: corrected rates using q10 = 2.3, target temperature 34, orginal 21 3 | 4 | NEURON { 5 | SUFFIX Im 6 | USEION k READ ek WRITE ik 7 | RANGE gbar, g, ik 8 | } 9 | 10 | UNITS { 11 | (S) = (siemens) 12 | (mV) = (millivolt) 13 | (mA) = (milliamp) 14 | } 15 | 16 | PARAMETER { 17 | gbar = 0.00001 (S/cm2) 18 | } 19 | 20 | ASSIGNED { 21 | v (mV) 22 | ek (mV) 23 | ik (mA/cm2) 24 | g (S/cm2) 25 | celsius (degC) 26 | mInf 27 | mTau 28 | mAlpha 29 | mBeta 30 | } 31 | 32 | STATE { 33 | m 34 | } 35 | 36 | BREAKPOINT { 37 | SOLVE states METHOD cnexp 38 | g = gbar*m 39 | ik = g*(v-ek) 40 | } 41 | 42 | DERIVATIVE states { 43 | rates() 44 | m' = (mInf-m)/mTau 45 | } 46 | 47 | INITIAL{ 48 | rates() 49 | m = mInf 50 | } 51 | 52 | PROCEDURE rates(){ 53 | LOCAL qt 54 | qt = 2.3^((celsius-21)/10) 55 | 56 | UNITSOFF 57 | mAlpha = 3.3e-3*exp(2.5*0.04*(v - -35)) 58 | mBeta = 3.3e-3*exp(-2.5*0.04*(v - -35)) 59 | mInf = mAlpha/(mAlpha + mBeta) 60 | mTau = (1/(mAlpha + mBeta))/qt 61 | UNITSON 62 | } 63 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Im_v2.mod: -------------------------------------------------------------------------------- 1 | : Based on Im model of Vervaeke et al. (2006) 2 | 3 | NEURON { 4 | SUFFIX Im_v2 5 | USEION k READ ek WRITE ik 6 | RANGE gbar, g, ik 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | } 18 | 19 | ASSIGNED { 20 | v (mV) 21 | ek (mV) 22 | ik (mA/cm2) 23 | g (S/cm2) 24 | celsius (degC) 25 | mInf 26 | mTau 27 | mAlpha 28 | mBeta 29 | } 30 | 31 | STATE { 32 | m 33 | } 34 | 35 | BREAKPOINT { 36 | SOLVE states METHOD cnexp 37 | g = gbar * m 38 | ik = g * (v - ek) 39 | } 40 | 41 | DERIVATIVE states { 42 | rates() 43 | m' = (mInf - m) / mTau 44 | } 45 | 46 | INITIAL{ 47 | rates() 48 | m = mInf 49 | } 50 | 51 | PROCEDURE rates() { 52 | LOCAL qt 53 | qt = 2.3^((celsius-30)/10) 54 | mAlpha = 0.007 * exp( (6 * 0.4 * (v - (-48))) / 26.12 ) 55 | mBeta = 0.007 * exp( (-6 * (1 - 0.4) * (v - (-48))) / 26.12 ) 56 | 57 | mInf = mAlpha / (mAlpha + mBeta) 58 | mTau = (15 + 1 / (mAlpha + mBeta)) / qt 59 | } 60 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/K_P.mod: -------------------------------------------------------------------------------- 1 | : Comment: The persistent component of the K current 2 | : Reference: Voltage-gated K+ channels in layer 5 neocortical pyramidal neurones from young rats:subtypes and gradients,Korngreen and Sakmann, J. Physiology, 2000 3 | 4 | 5 | NEURON { 6 | SUFFIX K_P 7 | USEION k READ ek WRITE ik 8 | RANGE gbar, g, ik 9 | } 10 | 11 | UNITS { 12 | (S) = (siemens) 13 | (mV) = (millivolt) 14 | (mA) = (milliamp) 15 | } 16 | 17 | PARAMETER { 18 | gbar = 0.00001 (S/cm2) 19 | vshift = 0 (mV) 20 | tauF = 1 21 | } 22 | 23 | ASSIGNED { 24 | v (mV) 25 | ek (mV) 26 | ik (mA/cm2) 27 | g (S/cm2) 28 | celsius (degC) 29 | mInf 30 | mTau 31 | hInf 32 | hTau 33 | } 34 | 35 | STATE { 36 | m 37 | h 38 | } 39 | 40 | BREAKPOINT { 41 | SOLVE states METHOD cnexp 42 | g = gbar*m*m*h 43 | ik = g*(v-ek) 44 | } 45 | 46 | DERIVATIVE states { 47 | rates() 48 | m' = (mInf-m)/mTau 49 | h' = (hInf-h)/hTau 50 | } 51 | 52 | INITIAL{ 53 | rates() 54 | m = mInf 55 | h = hInf 56 | } 57 | 58 | PROCEDURE rates() { 59 | LOCAL qt 60 | qt = 2.3^((celsius-21)/10) 61 | UNITSOFF 62 | mInf = 1 / (1 + exp(-(v - (-14.3 + vshift)) / 14.6)) 63 | if (v < -50 + vshift){ 64 | mTau = tauF * (1.25+175.03*exp(-(v - vshift) * -0.026))/qt 65 | } else { 66 | mTau = tauF * (1.25+13*exp(-(v - vshift) * 0.026))/qt 67 | } 68 | hInf = 1/(1 + exp(-(v - (-54 + vshift))/-11)) 69 | hTau = (360+(1010+24*(v - (-55 + vshift)))*exp(-((v - (-75 + vshift))/48)^2))/qt 70 | UNITSON 71 | } 72 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/K_T.mod: -------------------------------------------------------------------------------- 1 | : Comment: The transient component of the K current 2 | : Reference: Voltage-gated K+ channels in layer 5 neocortical pyramidal neurones from young rats:subtypes and gradients,Korngreen and Sakmann, J. Physiology, 2000 3 | 4 | NEURON { 5 | SUFFIX K_T 6 | USEION k READ ek WRITE ik 7 | RANGE gbar, g, ik 8 | } 9 | 10 | UNITS { 11 | (S) = (siemens) 12 | (mV) = (millivolt) 13 | (mA) = (milliamp) 14 | } 15 | 16 | PARAMETER { 17 | gbar = 0.00001 (S/cm2) 18 | vshift = 0 (mV) 19 | mTauF = 1.0 20 | hTauF = 1.0 21 | } 22 | 23 | ASSIGNED { 24 | v (mV) 25 | ek (mV) 26 | ik (mA/cm2) 27 | g (S/cm2) 28 | celsius (degC) 29 | mInf 30 | mTau 31 | hInf 32 | hTau 33 | } 34 | 35 | STATE { 36 | m 37 | h 38 | } 39 | 40 | BREAKPOINT { 41 | SOLVE states METHOD cnexp 42 | g = gbar*m*m*m*m*h 43 | ik = g*(v-ek) 44 | } 45 | 46 | DERIVATIVE states { 47 | rates() 48 | m' = (mInf-m)/mTau 49 | h' = (hInf-h)/hTau 50 | } 51 | 52 | INITIAL{ 53 | rates() 54 | m = mInf 55 | h = hInf 56 | } 57 | 58 | PROCEDURE rates(){ 59 | LOCAL qt 60 | qt = 2.3^((celsius-21)/10) 61 | 62 | UNITSOFF 63 | mInf = 1/(1 + exp(-(v - (-47 + vshift)) / 29)) 64 | mTau = (0.34 + mTauF * 0.92*exp(-((v+71-vshift)/59)^2))/qt 65 | hInf = 1/(1 + exp(-(v+66-vshift)/-10)) 66 | hTau = (8 + hTauF * 49*exp(-((v+73-vshift)/23)^2))/qt 67 | UNITSON 68 | } 69 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Kd.mod: -------------------------------------------------------------------------------- 1 | : Based on Kd model of Foust et al. (2011) 2 | 3 | 4 | NEURON { 5 | SUFFIX Kd 6 | USEION k READ ek WRITE ik 7 | RANGE gbar, g, ik 8 | } 9 | 10 | UNITS { 11 | (S) = (siemens) 12 | (mV) = (millivolt) 13 | (mA) = (milliamp) 14 | } 15 | 16 | PARAMETER { 17 | gbar = 0.00001 (S/cm2) 18 | } 19 | 20 | ASSIGNED { 21 | v (mV) 22 | ek (mV) 23 | ik (mA/cm2) 24 | g (S/cm2) 25 | celsius (degC) 26 | mInf 27 | mTau 28 | hInf 29 | hTau 30 | } 31 | 32 | STATE { 33 | m 34 | h 35 | } 36 | 37 | BREAKPOINT { 38 | SOLVE states METHOD cnexp 39 | g = gbar * m * h 40 | ik = g * (v - ek) 41 | } 42 | 43 | DERIVATIVE states { 44 | rates() 45 | m' = (mInf - m) / mTau 46 | h' = (hInf - h) / hTau 47 | } 48 | 49 | INITIAL{ 50 | rates() 51 | m = mInf 52 | h = hInf 53 | } 54 | 55 | PROCEDURE rates() { 56 | LOCAL qt 57 | qt = 2.3^((celsius-23)/10) 58 | mInf = 1 - 1 / (1 + exp((v - (-43)) / 8)) 59 | mTau = 1 60 | hInf = 1 / (1 + exp((v - (-67)) / 7.3)) 61 | hTau = 1500 62 | } 63 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Kv2like.mod: -------------------------------------------------------------------------------- 1 | : Kv2-like channel 2 | : Adapted from model implemented in Keren et al. 2005 3 | : Adjusted parameters to be similar to guangxitoxin-sensitive current in mouse CA1 pyramids from Liu and Bean 2014 4 | 5 | 6 | NEURON { 7 | SUFFIX Kv2like 8 | USEION k READ ek WRITE ik 9 | RANGE gbar, g, ik 10 | } 11 | 12 | UNITS { 13 | (S) = (siemens) 14 | (mV) = (millivolt) 15 | (mA) = (milliamp) 16 | } 17 | 18 | PARAMETER { 19 | gbar = 0.00001 (S/cm2) 20 | } 21 | 22 | ASSIGNED { 23 | v (mV) 24 | ek (mV) 25 | ik (mA/cm2) 26 | g (S/cm2) 27 | celsius (degC) 28 | mInf 29 | mAlpha 30 | mBeta 31 | mTau 32 | hInf 33 | h1Tau 34 | h2Tau 35 | } 36 | 37 | STATE { 38 | m 39 | h1 40 | h2 41 | } 42 | 43 | BREAKPOINT { 44 | SOLVE states METHOD cnexp 45 | g = gbar * m * m * (0.5 * h1 + 0.5 * h2) 46 | ik = g * (v - ek) 47 | } 48 | 49 | DERIVATIVE states { 50 | rates() 51 | m' = (mInf - m) / mTau 52 | h1' = (hInf - h1) / h1Tau 53 | h2' = (hInf - h2) / h2Tau 54 | } 55 | 56 | INITIAL{ 57 | rates() 58 | m = mInf 59 | h1 = hInf 60 | h2 = hInf 61 | } 62 | 63 | PROCEDURE rates() { 64 | LOCAL qt 65 | qt = 2.3^((celsius-21)/10) 66 | UNITSOFF 67 | mAlpha = 0.12 * vtrap( -(v - 43), 11.0) 68 | mBeta = 0.02 * exp(-(v + 1.27) / 120) 69 | mInf = mAlpha / (mAlpha + mBeta) 70 | mTau = 2.5 * (1 / (qt * (mAlpha + mBeta))) 71 | 72 | hInf = 1/(1 + exp((v + 58) / 11)) 73 | h1Tau = (360 + (1010 + 23.7 * (v + 54)) * exp(-((v + 75) / 48)^2)) / qt 74 | h2Tau = (2350 + 1380 * exp(-0.011 * v) - 210 * exp(-0.03 * v)) / qt 75 | UNITSON 76 | } 77 | 78 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 79 | UNITSOFF 80 | if (fabs(x / y) < 1e-6) { 81 | vtrap = y * (1 - x / y / 2) 82 | } else { 83 | vtrap = x / (exp(x / y) - 1) 84 | } 85 | UNITSON 86 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Kv3_1.mod: -------------------------------------------------------------------------------- 1 | : Comment: Kv3-like potassium current 2 | 3 | NEURON { 4 | SUFFIX Kv3_1 5 | USEION k READ ek WRITE ik 6 | RANGE gbar, g, ik 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | vshift = 0 (mV) 18 | } 19 | 20 | ASSIGNED { 21 | v (mV) 22 | ek (mV) 23 | ik (mA/cm2) 24 | g (S/cm2) 25 | mInf 26 | mTau 27 | } 28 | 29 | STATE { 30 | m 31 | } 32 | 33 | BREAKPOINT { 34 | SOLVE states METHOD cnexp 35 | g = gbar*m 36 | ik = g*(v-ek) 37 | } 38 | 39 | DERIVATIVE states { 40 | rates() 41 | m' = (mInf-m)/mTau 42 | } 43 | 44 | INITIAL{ 45 | rates() 46 | m = mInf 47 | } 48 | 49 | PROCEDURE rates(){ 50 | UNITSOFF 51 | mInf = 1/(1+exp(((v -(18.700 + vshift))/(-9.700)))) 52 | mTau = 0.2*20.000/(1+exp(((v -(-46.560 + vshift))/(-44.140)))) 53 | UNITSON 54 | } 55 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/NaTa.mod: -------------------------------------------------------------------------------- 1 | : Reference: Colbert and Pan 2002 2 | 3 | NEURON { 4 | SUFFIX NaTa 5 | USEION na READ ena WRITE ina 6 | RANGE gbar, g, ina 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | 18 | malphaF = 0.182 19 | mbetaF = 0.124 20 | mvhalf = -48 (mV) 21 | mk = 6 (mV) 22 | 23 | halphaF = 0.015 24 | hbetaF = 0.015 25 | hvhalf = -69 (mV) 26 | hk = 6 (mV) 27 | } 28 | 29 | ASSIGNED { 30 | v (mV) 31 | ena (mV) 32 | ina (mA/cm2) 33 | g (S/cm2) 34 | celsius (degC) 35 | mInf 36 | mTau 37 | mAlpha 38 | mBeta 39 | hInf 40 | hTau 41 | hAlpha 42 | hBeta 43 | } 44 | 45 | STATE { 46 | m 47 | h 48 | } 49 | 50 | BREAKPOINT { 51 | SOLVE states METHOD cnexp 52 | g = gbar*m*m*m*h 53 | ina = g*(v-ena) 54 | } 55 | 56 | DERIVATIVE states { 57 | rates() 58 | m' = (mInf-m)/mTau 59 | h' = (hInf-h)/hTau 60 | } 61 | 62 | INITIAL{ 63 | rates() 64 | m = mInf 65 | h = hInf 66 | } 67 | 68 | PROCEDURE rates(){ 69 | LOCAL qt 70 | qt = 2.3^((celsius-23)/10) 71 | 72 | UNITSOFF 73 | mAlpha = malphaF * vtrap(-(v - mvhalf), mk) 74 | mBeta = mbetaF * vtrap((v - mvhalf), mk) 75 | 76 | mInf = mAlpha/(mAlpha + mBeta) 77 | mTau = (1/(mAlpha + mBeta))/qt 78 | 79 | hAlpha = halphaF * vtrap(v - hvhalf, hk) : ng - adjusted this to match actual Colbert & Pan values for soma model 80 | hBeta = hbetaF * vtrap(-(v - hvhalf), hk) : ng - adjusted this to match actual Colbert & Pan values for soma model 81 | 82 | hInf = hAlpha/(hAlpha + hBeta) 83 | hTau = (1/(hAlpha + hBeta))/qt 84 | UNITSON 85 | } 86 | 87 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 88 | UNITSOFF 89 | if (fabs(x / y) < 1e-6) { 90 | vtrap = y * (1 - x / y / 2) 91 | } else { 92 | vtrap = x / (exp(x / y) - 1) 93 | } 94 | UNITSON 95 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/NaTs.mod: -------------------------------------------------------------------------------- 1 | : Reference: Colbert and Pan 2002 2 | 3 | NEURON { 4 | SUFFIX NaTs 5 | USEION na READ ena WRITE ina 6 | RANGE gbar, g, ina 7 | } 8 | 9 | UNITS { 10 | (S) = (siemens) 11 | (mV) = (millivolt) 12 | (mA) = (milliamp) 13 | } 14 | 15 | PARAMETER { 16 | gbar = 0.00001 (S/cm2) 17 | 18 | malphaF = 0.182 19 | mbetaF = 0.124 20 | mvhalf = -40 (mV) 21 | mk = 6 (mV) 22 | 23 | halphaF = 0.015 24 | hbetaF = 0.015 25 | hvhalf = -66 (mV) 26 | hk = 6 (mV) 27 | } 28 | 29 | ASSIGNED { 30 | v (mV) 31 | ena (mV) 32 | ina (mA/cm2) 33 | g (S/cm2) 34 | celsius (degC) 35 | mInf 36 | mTau 37 | mAlpha 38 | mBeta 39 | hInf 40 | hTau 41 | hAlpha 42 | hBeta 43 | } 44 | 45 | STATE { 46 | m 47 | h 48 | } 49 | 50 | BREAKPOINT { 51 | SOLVE states METHOD cnexp 52 | g = gbar*m*m*m*h 53 | ina = g*(v-ena) 54 | } 55 | 56 | DERIVATIVE states { 57 | rates() 58 | m' = (mInf-m)/mTau 59 | h' = (hInf-h)/hTau 60 | } 61 | 62 | INITIAL{ 63 | rates() 64 | m = mInf 65 | h = hInf 66 | } 67 | 68 | PROCEDURE rates(){ 69 | LOCAL qt 70 | qt = 2.3^((celsius-23)/10) 71 | 72 | UNITSOFF 73 | mAlpha = malphaF * vtrap(-(v - mvhalf), mk) 74 | mBeta = mbetaF * vtrap((v - mvhalf), mk) 75 | 76 | mInf = mAlpha/(mAlpha + mBeta) 77 | mTau = (1/(mAlpha + mBeta))/qt 78 | 79 | hAlpha = halphaF * vtrap(v - hvhalf, hk) 80 | hBeta = hbetaF * vtrap(-(v - hvhalf), hk) 81 | 82 | hInf = hAlpha/(hAlpha + hBeta) 83 | hTau = (1/(hAlpha + hBeta))/qt 84 | UNITSON 85 | } 86 | 87 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 88 | UNITSOFF 89 | if (fabs(x / y) < 1e-6) { 90 | vtrap = y * (1 - x / y / 2) 91 | } else { 92 | vtrap = x / (exp(x / y) - 1) 93 | } 94 | UNITSON 95 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/NaV.mod: -------------------------------------------------------------------------------- 1 | TITLE Mouse sodium current 2 | : Kinetics of Carter et al. (2012) 3 | : Based on 37 degC recordings from mouse hippocampal CA1 pyramids 4 | 5 | NEURON { 6 | SUFFIX NaV 7 | USEION na READ ena WRITE ina 8 | RANGE g, gbar 9 | } 10 | 11 | UNITS { 12 | (mV) = (millivolt) 13 | (S) = (siemens) 14 | } 15 | 16 | PARAMETER { 17 | gbar = .015 (S/cm2) 18 | 19 | : kinetic parameters 20 | Con = 0.01 (/ms) : closed -> inactivated transitions 21 | Coff = 40 (/ms) : inactivated -> closed transitions 22 | Oon = 8 (/ms) : open -> Ineg transition 23 | Ooff = 0.05 (/ms) : Ineg -> open transition 24 | alpha = 400 (/ms) 25 | beta = 12 (/ms) 26 | gamma = 250 (/ms) : opening 27 | delta = 60 (/ms) : closing 28 | 29 | alfac = 2.51 30 | btfac = 5.32 31 | 32 | : Vdep 33 | x1 = 24 (mV) : Vdep of activation (alpha) 34 | x2 = -24 (mV) : Vdep of deactivation (beta) 35 | } 36 | 37 | ASSIGNED { 38 | 39 | : rates 40 | f01 (/ms) 41 | f02 (/ms) 42 | f03 (/ms) 43 | f04 (/ms) 44 | f0O (/ms) 45 | f11 (/ms) 46 | f12 (/ms) 47 | f13 (/ms) 48 | f14 (/ms) 49 | f1n (/ms) 50 | fi1 (/ms) 51 | fi2 (/ms) 52 | fi3 (/ms) 53 | fi4 (/ms) 54 | fi5 (/ms) 55 | fin (/ms) 56 | 57 | b01 (/ms) 58 | b02 (/ms) 59 | b03 (/ms) 60 | b04 (/ms) 61 | b0O (/ms) 62 | b11 (/ms) 63 | b12 (/ms) 64 | b13 (/ms) 65 | b14 (/ms) 66 | b1n (/ms) 67 | bi1 (/ms) 68 | bi2 (/ms) 69 | bi3 (/ms) 70 | bi4 (/ms) 71 | bi5 (/ms) 72 | bin (/ms) 73 | 74 | v (mV) 75 | ena (mV) 76 | ina (milliamp/cm2) 77 | g (S/cm2) 78 | celsius (degC) 79 | } 80 | 81 | STATE { 82 | C1 FROM 0 TO 1 83 | C2 FROM 0 TO 1 84 | C3 FROM 0 TO 1 85 | C4 FROM 0 TO 1 86 | C5 FROM 0 TO 1 87 | I1 FROM 0 TO 1 88 | I2 FROM 0 TO 1 89 | I3 FROM 0 TO 1 90 | I4 FROM 0 TO 1 91 | I5 FROM 0 TO 1 92 | O FROM 0 TO 1 93 | I6 FROM 0 TO 1 94 | } 95 | 96 | BREAKPOINT { 97 | SOLVE activation METHOD sparse 98 | g = gbar * O 99 | ina = g * (v - ena) 100 | } 101 | 102 | INITIAL { 103 | rates(v) 104 | SOLVE seqinitial 105 | } 106 | 107 | KINETIC activation 108 | { 109 | rates(v) 110 | ~ C1 <-> C2 (f01,b01) 111 | ~ C2 <-> C3 (f02,b02) 112 | ~ C3 <-> C4 (f03,b03) 113 | ~ C4 <-> C5 (f04,b04) 114 | ~ C5 <-> O (f0O,b0O) 115 | ~ O <-> I6 (fin,bin) 116 | ~ I1 <-> I2 (f11,b11) 117 | ~ I2 <-> I3 (f12,b12) 118 | ~ I3 <-> I4 (f13,b13) 119 | ~ I4 <-> I5 (f14,b14) 120 | ~ I5 <-> I6 (f1n,b1n) 121 | ~ C1 <-> I1 (fi1,bi1) 122 | ~ C2 <-> I2 (fi2,bi2) 123 | ~ C3 <-> I3 (fi3,bi3) 124 | ~ C4 <-> I4 (fi4,bi4) 125 | ~ C5 <-> I5 (fi5,bi5) 126 | 127 | CONSERVE C1 + C2 + C3 + C4 + C5 + O + I1 + I2 + I3 + I4 + I5 + I6 = 1 128 | } 129 | 130 | LINEAR seqinitial { : sets initial equilibrium 131 | ~ I1*bi1 + C2*b01 - C1*( fi1+f01) = 0 132 | ~ C1*f01 + I2*bi2 + C3*b02 - C2*(b01+fi2+f02) = 0 133 | ~ C2*f02 + I3*bi3 + C4*b03 - C3*(b02+fi3+f03) = 0 134 | ~ C3*f03 + I4*bi4 + C5*b04 - C4*(b03+fi4+f04) = 0 135 | ~ C4*f04 + I5*bi5 + O*b0O - C5*(b04+fi5+f0O) = 0 136 | ~ C5*f0O + I6*bin - O*(b0O+fin) = 0 137 | 138 | ~ C1*fi1 + I2*b11 - I1*( bi1+f11) = 0 139 | ~ I1*f11 + C2*fi2 + I3*b12 - I2*(b11+bi2+f12) = 0 140 | ~ I2*f12 + C3*fi3 + I4*bi3 - I3*(b12+bi3+f13) = 0 141 | ~ I3*f13 + C4*fi4 + I5*b14 - I4*(b13+bi4+f14) = 0 142 | ~ I4*f14 + C5*fi5 + I6*b1n - I5*(b14+bi5+f1n) = 0 143 | 144 | ~ C1 + C2 + C3 + C4 + C5 + O + I1 + I2 + I3 + I4 + I5 + I6 = 1 145 | } 146 | 147 | PROCEDURE rates(v(mV) ) 148 | { 149 | LOCAL qt 150 | qt = 2.3^((celsius-37)/10) 151 | 152 | f01 = qt * 4 * alpha * exp(v/x1) 153 | f02 = qt * 3 * alpha * exp(v/x1) 154 | f03 = qt * 2 * alpha * exp(v/x1) 155 | f04 = qt * 1 * alpha * exp(v/x1) 156 | f0O = qt * gamma 157 | f11 = qt * 4 * alpha * alfac * exp(v/x1) 158 | f12 = qt * 3 * alpha * alfac * exp(v/x1) 159 | f13 = qt * 2 * alpha * alfac * exp(v/x1) 160 | f14 = qt * 1 * alpha * alfac * exp(v/x1) 161 | f1n = qt * gamma 162 | fi1 = qt * Con 163 | fi2 = qt * Con * alfac 164 | fi3 = qt * Con * alfac^2 165 | fi4 = qt * Con * alfac^3 166 | fi5 = qt * Con * alfac^4 167 | fin = qt * Oon 168 | 169 | b01 = qt * 1 * beta * exp(v/x2) 170 | b02 = qt * 2 * beta * exp(v/x2) 171 | b03 = qt * 3 * beta * exp(v/x2) 172 | b04 = qt * 4 * beta * exp(v/x2) 173 | b0O = qt * delta 174 | b11 = qt * 1 * beta * exp(v/x2) / btfac 175 | b12 = qt * 2 * beta * exp(v/x2) / btfac 176 | b13 = qt * 3 * beta * exp(v/x2) / btfac 177 | b14 = qt * 4 * beta * exp(v/x2) / btfac 178 | b1n = qt * delta 179 | bi1 = qt * Coff 180 | bi2 = qt * Coff / (btfac) 181 | bi3 = qt * Coff / (btfac^2) 182 | bi4 = qt * Coff / (btfac^3) 183 | bi5 = qt * Coff / (btfac^4) 184 | bin = qt * Ooff 185 | } 186 | 187 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/Nap.mod: -------------------------------------------------------------------------------- 1 | :Reference : Modeled according to kinetics derived from Magistretti & Alonso 1999 2 | :Comment: corrected rates using q10 = 2.3, target temperature 34, orginal 21 3 | 4 | NEURON { 5 | SUFFIX Nap 6 | USEION na READ ena WRITE ina 7 | RANGE gbar, g, ina 8 | } 9 | 10 | UNITS { 11 | (S) = (siemens) 12 | (mV) = (millivolt) 13 | (mA) = (milliamp) 14 | } 15 | 16 | PARAMETER { 17 | gbar = 0.00001 (S/cm2) 18 | } 19 | 20 | ASSIGNED { 21 | v (mV) 22 | ena (mV) 23 | ina (mA/cm2) 24 | g (S/cm2) 25 | celsius (degC) 26 | mInf 27 | hInf 28 | hTau 29 | hAlpha 30 | hBeta 31 | } 32 | 33 | STATE { 34 | h 35 | } 36 | 37 | BREAKPOINT { 38 | SOLVE states METHOD cnexp 39 | rates() 40 | g = gbar*mInf*h 41 | ina = g*(v-ena) 42 | } 43 | 44 | DERIVATIVE states { 45 | rates() 46 | h' = (hInf-h)/hTau 47 | } 48 | 49 | INITIAL{ 50 | rates() 51 | h = hInf 52 | } 53 | 54 | PROCEDURE rates(){ 55 | LOCAL qt 56 | qt = 2.3^((celsius-21)/10) 57 | 58 | UNITSOFF 59 | mInf = 1.0/(1+exp((v- -52.6)/-4.6)) : assuming instantaneous activation as modeled by Magistretti and Alonso 60 | 61 | hInf = 1.0/(1+exp((v- -48.8)/10)) 62 | hAlpha = 2.88e-6 * vtrap(v + 17, 4.63) 63 | hBeta = 6.94e-6 * vtrap(-(v + 64.4), 2.63) 64 | 65 | hTau = (1/(hAlpha + hBeta))/qt 66 | UNITSON 67 | } 68 | 69 | FUNCTION vtrap(x, y) { : Traps for 0 in denominator of rate equations 70 | UNITSOFF 71 | if (fabs(x / y) < 1e-6) { 72 | vtrap = y * (1 - x / y / 2) 73 | } else { 74 | vtrap = x / (exp(x / y) - 1) 75 | } 76 | UNITSON 77 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/SK.mod: -------------------------------------------------------------------------------- 1 | : SK-type calcium-activated potassium current 2 | : Reference : Kohler et al. 1996 3 | 4 | NEURON { 5 | SUFFIX SK 6 | USEION k READ ek WRITE ik 7 | USEION ca READ cai 8 | RANGE gbar, g, ik 9 | } 10 | 11 | UNITS { 12 | (mV) = (millivolt) 13 | (mA) = (milliamp) 14 | (mM) = (milli/liter) 15 | } 16 | 17 | PARAMETER { 18 | v (mV) 19 | gbar = .000001 (mho/cm2) 20 | zTau = 1 (ms) 21 | ek (mV) 22 | cai (mM) 23 | } 24 | 25 | ASSIGNED { 26 | zInf 27 | ik (mA/cm2) 28 | g (S/cm2) 29 | } 30 | 31 | STATE { 32 | z FROM 0 TO 1 33 | } 34 | 35 | BREAKPOINT { 36 | SOLVE states METHOD cnexp 37 | g = gbar * z 38 | ik = g * (v - ek) 39 | } 40 | 41 | DERIVATIVE states { 42 | rates(cai) 43 | z' = (zInf - z) / zTau 44 | } 45 | 46 | PROCEDURE rates(ca(mM)) { 47 | if(ca < 1e-7){ 48 | ca = ca + 1e-07 49 | } 50 | zInf = 1/(1 + (0.00043 / ca)^4.8) 51 | } 52 | 53 | INITIAL { 54 | rates(cai) 55 | z = zInf 56 | } 57 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/mechanisms/modfiles/vecevent.mod: -------------------------------------------------------------------------------- 1 | : Vector stream of events 2 | 3 | NEURON { 4 | ARTIFICIAL_CELL VecStim 5 | } 6 | 7 | ASSIGNED { 8 | index 9 | etime (ms) 10 | space 11 | } 12 | 13 | INITIAL { 14 | index = 0 15 | element() 16 | if (index > 0) { 17 | net_send(etime - t, 1) 18 | } 19 | } 20 | 21 | NET_RECEIVE (w) { 22 | if (flag == 1) { 23 | net_event(t) 24 | element() 25 | if (index > 0) { 26 | net_send(etime - t, 1) 27 | } 28 | } 29 | } 30 | 31 | VERBATIM 32 | extern double* vector_vec(); 33 | extern int vector_capacity(); 34 | extern void* vector_arg(); 35 | ENDVERBATIM 36 | 37 | PROCEDURE element() { 38 | VERBATIM 39 | { void* vv; int i, size; double* px; 40 | i = (int)index; 41 | if (i >= 0) { 42 | vv = *((void**)(&space)); 43 | if (vv) { 44 | size = vector_capacity(vv); 45 | px = vector_vec(vv); 46 | if (i < size) { 47 | etime = px[i]; 48 | index += 1.; 49 | }else{ 50 | index = -1.; 51 | } 52 | }else{ 53 | index = -1.; 54 | } 55 | } 56 | } 57 | ENDVERBATIM 58 | } 59 | 60 | PROCEDURE play() { 61 | VERBATIM 62 | void** vv; 63 | vv = (void**)(&space); 64 | *vv = (void*)0; 65 | if (ifarg(1)) { 66 | *vv = vector_arg(1); 67 | } 68 | ENDVERBATIM 69 | } 70 | 71 | 72 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/AMPA_ExcToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "exp2syn", 3 | "tau1": 1.0, 4 | "tau2": 3.0, 5 | "erev": 0.0 6 | } 7 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/AMPA_ExcToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "exp2syn", 3 | "tau1": 0.1, 4 | "tau2": 0.5, 5 | "erev": 0.0 6 | } 7 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/GABA_InhToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "exp2syn", 3 | "tau1": 2.7, 4 | "tau2": 15.0, 5 | "erev": -70.0 6 | } 7 | 8 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/GABA_InhToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "exp2syn", 3 | "tau1": 0.2, 4 | "tau2": 8.0, 5 | "erev": -70.0 6 | } 7 | 8 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/instanteneousExc.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "instanteneous", 3 | "sign": 1 4 | } 5 | 6 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/components/synaptic_models/instanteneousInh.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "instanteneous", 3 | "sign": -1 4 | } 5 | 6 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "manifest": { 3 | "$BASE_DIR": "${configdir}", 4 | "$OUTPUT_DIR": "$BASE_DIR/output", 5 | "$INPUT_DIR": "$BASE_DIR/network/source_input", 6 | "$NETWORK_DIR": "$BASE_DIR/network", 7 | "$COMPONENT_DIR": "${configdir}/components", 8 | "$MECHANISMS_DIR": "${configdir}/components/mechanisms" 9 | }, 10 | 11 | "run": { 12 | "tstop": 3000.0, 13 | "dt": 0.1, 14 | "dL": 20.0, 15 | "spike_threshold": -15, 16 | "nsteps_block": 5000, 17 | "overwrite_output_dir": true 18 | }, 19 | 20 | "target_simulator":"NEURON", 21 | 22 | "conditions": { 23 | "celsius": 34.0, 24 | "v_init": -80 25 | }, 26 | 27 | "inputs": { 28 | "spike_trains": { 29 | "input_type": "spikes", 30 | "module": "h5", 31 | "input_file": "$INPUT_DIR/poission_input_spk_train.h5", 32 | "node_set": "inputNetwork" 33 | } 34 | }, 35 | 36 | "output":{ 37 | "log_file": "$OUTPUT_DIR/log.txt", 38 | "output_dir": "$OUTPUT_DIR", 39 | "spikes_file": "$OUTPUT_DIR/spikes.h5", 40 | "spikes_file_csv": "$OUTPUT_DIR/spikes.csv", 41 | "spikes_sort_order": "time" 42 | }, 43 | 44 | "components": { 45 | "morphologies_dir": "$COMPONENT_DIR/biophysical/morphology", 46 | "synaptic_models_dir": "$COMPONENT_DIR/synaptic_models", 47 | "mechanisms_dir":"$MECHANISMS_DIR", 48 | "biophysical_neuron_models_dir": "$COMPONENT_DIR/biophysical/electrophysiology", 49 | "point_neuron_models_dir": "$COMPONENT_DIR/intfire" 50 | }, 51 | 52 | "node_sets": { 53 | "bio_nodes": {"model_type": "biophysical"} 54 | }, 55 | 56 | "reports": { 57 | "calcium_concentration": { 58 | "cells": "bio_nodes", 59 | "variable_name": "cai", 60 | "module": "membrane_report", 61 | "file_name": "$OUTPUT_DIR/cell_vars.h5", 62 | "sections": "soma", 63 | "enabled": true 64 | }, 65 | 66 | "membrane_potential": { 67 | "cells": "bio_nodes", 68 | "variable_name": "v", 69 | "module": "membrane_report", 70 | "file_name": "$OUTPUT_DIR/cell_vars.h5", 71 | "sections": "soma", 72 | "enabled": true 73 | }, 74 | 75 | "ecp": { 76 | "cells": [0, 1, 2, 3], 77 | "variable_name": "v", 78 | "module": "extracellular", 79 | "electrode_positions": "$COMPONENT_DIR/electrodes/single_electrode.csv", 80 | "ecp_file": "$OUTPUT_DIR/ecp.h5", 81 | "electrode_channels": "all", 82 | "contributions_dir": "$OUTPUT_DIR/ecp_contributions" 83 | } 84 | }, 85 | 86 | "networks": { 87 | "nodes": [ 88 | { 89 | "nodes_file": "$NETWORK_DIR/recurrent_network/nodes.h5", 90 | "node_types_file": "$NETWORK_DIR/recurrent_network/node_types.csv" 91 | }, 92 | { 93 | "nodes_file": "$NETWORK_DIR/source_input/nodes.h5", 94 | "node_types_file": "$NETWORK_DIR/source_input/node_types.csv" 95 | } 96 | ], 97 | 98 | "edges": [ 99 | { 100 | "edges_file": "$NETWORK_DIR/recurrent_network/edges.h5", 101 | "edge_types_file": "$NETWORK_DIR/recurrent_network/edge_types.csv" 102 | }, 103 | { 104 | "edges_file": "$NETWORK_DIR/source_input/edges.h5", 105 | "edge_types_file": "$NETWORK_DIR/source_input/edge_types.csv" 106 | } 107 | ] 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/run_bionet.py: -------------------------------------------------------------------------------- 1 | from bmtk.simulator import bionet 2 | 3 | 4 | conf = bionet.Config.from_json('config.json', validate=True) 5 | conf.build_env() 6 | graph = bionet.BioNetwork.from_config(conf) 7 | sim = bionet.BioSimulator.from_config(conf, network=graph) 8 | sim.run() 9 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/External_input_connected.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/External_input_connected.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/External_input_created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/External_input_created.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/Full_figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/Full_figure.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/Neurons_created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/Neurons_created.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/Neurons_created_figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/Neurons_created_figure.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/Recurrent_connected.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/Recurrent_connected.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/Recurrent_connected_figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/Recurrent_connected_figure.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/biophysical_notebook/schematics_png/full_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/biophysical_notebook/schematics_png/full_network.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/472363762_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 44.9, 4 | "C_m": 239.0, 5 | "t_ref": 3.0, 6 | "E_L": -78.0, 7 | "V_th": -43.0, 8 | "V_reset": -55.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/472363762_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 44.9, 4 | "C_m": 239.0, 5 | "t_ref": 3.0, 6 | "E_L": -78.0, 7 | "V_th": -43.0, 8 | "V_reset": -55.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/472912177_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 22.2, 4 | "C_m": 180.0, 5 | "t_ref": 3.0, 6 | "E_L": -82.0, 7 | "V_th": -35.0, 8 | "V_reset": -50.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/472912177_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 22.2, 4 | "C_m": 180.0, 5 | "t_ref": 3.0, 6 | "E_L": -82.0, 7 | "V_th": -35.0, 8 | "V_reset": -50.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/473862421_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 12.5, 4 | "C_m": 78.0, 5 | "t_ref": 3.0, 6 | "E_L": -73.0, 7 | "V_th": -37.0, 8 | "V_reset": -55.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/473863035_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 22.1, 4 | "C_m": 117.0, 5 | "t_ref": 3.0, 6 | "E_L": -78.0, 7 | "V_th": -47.0, 8 | "V_reset": -50.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/473863510_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 11.5, 4 | "C_m": 53.0, 5 | "t_ref": 3.0, 6 | "E_L": -72.0, 7 | "V_th": -25.0, 8 | "V_reset": -50.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_exc_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 24.0, 4 | "C_m": 120.0, 5 | "t_ref": 3.0, 6 | "E_L": -75.0, 7 | "V_th": -37.0, 8 | "V_reset": -53.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_exc_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 24.0, 4 | "C_m": 120.0, 5 | "t_ref": 3.0, 6 | "E_L": -75.0, 7 | "V_th": -37.0, 8 | "V_reset": -53.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_exc_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 24.0, 4 | "C_m": 120.0, 5 | "t_ref": 3.0, 6 | "E_L": -75.0, 7 | "V_th": -37.0, 8 | "V_reset": -53.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_inh_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 7.0, 4 | "C_m": 50.0, 5 | "t_ref": 3.0, 6 | "E_L": -77.0, 7 | "V_th": -36.0, 8 | "V_reset": -52.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_inh_fit.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 7.0, 4 | "C_m": 50.0, 5 | "t_ref": 3.0, 6 | "E_L": -77.0, 7 | "V_th": -36.0, 8 | "V_reset": -52.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/IntFire1_inh_point.json: -------------------------------------------------------------------------------- 1 | { 2 | "I_e": 0.0, 3 | "tau_m": 7.0, 4 | "C_m": 50.0, 5 | "t_ref": 3.0, 6 | "E_L": -77.0, 7 | "V_th": -36.0, 8 | "V_reset": -52.0 9 | } 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/cell_models/filter_point.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/AMPA_ExcToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/AMPA_ExcToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/ExcToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/ExcToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/GABA_InhToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/GABA_InhToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | "level_of_detail": "exp2syn", 3 | "tau1": 0.2, 4 | "tau2": 8.0, 5 | "erev": -70.0 6 | } 7 | 8 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/InhToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/InhToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/instanteneousExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/components/synaptic_models/instanteneousInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "manifest": { 3 | "$BASE_DIR": "${configdir}", 4 | "$OUTPUT_DIR": "$BASE_DIR/output", 5 | "$INPUT_DIR": "$BASE_DIR/network/source_input", 6 | "$NETWORK_DIR": "$BASE_DIR/network", 7 | "$COMPONENT_DIR": "$BASE_DIR/components" 8 | }, 9 | 10 | "run": { 11 | "tstop": 3000.0, 12 | "dt": 0.001, 13 | "dL": 20.0, 14 | "spike_threshold": -15, 15 | "nsteps_block": 5000, 16 | "overwrite_output_dir": true 17 | }, 18 | 19 | "target_simulator":"NEST", 20 | 21 | "conditions": { 22 | "celsius": 34.0, 23 | "v_init": -80 24 | }, 25 | 26 | "inputs": { 27 | "spike_trains": { 28 | "input_type": "spikes", 29 | "module": "h5", 30 | "input_file": "$INPUT_DIR/poission_input_spk_train.h5", 31 | "node_set": "inputNetwork" 32 | } 33 | }, 34 | 35 | "output":{ 36 | "log_file": "$OUTPUT_DIR/log.txt", 37 | "output_dir": "$OUTPUT_DIR", 38 | "spikes_file": "$OUTPUT_DIR/spikes.h5", 39 | "spikes_file_csv": "$OUTPUT_DIR/spikes.csv", 40 | "spikes_sort_order": "time" 41 | }, 42 | 43 | "components": { 44 | "point_neuron_models_dir": "$COMPONENT_DIR/cell_models", 45 | "synaptic_models_dir": "$COMPONENT_DIR/synaptic_models" 46 | }, 47 | 48 | "node_sets": { 49 | "point_nodes": {"model_type": "point_process"} 50 | }, 51 | 52 | "networks": { 53 | "nodes": [ 54 | { 55 | "nodes_file": "$NETWORK_DIR/recurrent_network/nodes.h5", 56 | "node_types_file": "$NETWORK_DIR/recurrent_network/node_types.csv" 57 | }, 58 | { 59 | "nodes_file": "$NETWORK_DIR/source_input/nodes.h5", 60 | "node_types_file": "$NETWORK_DIR/source_input/node_types.csv" 61 | } 62 | ], 63 | 64 | "edges": [ 65 | { 66 | "edges_file": "$NETWORK_DIR/recurrent_network/edges.h5", 67 | "edge_types_file": "$NETWORK_DIR/recurrent_network/edge_types.csv" 68 | }, 69 | { 70 | "edges_file": "$NETWORK_DIR/source_input/edges.h5", 71 | "edge_types_file": "$NETWORK_DIR/source_input/edge_types.csv" 72 | } 73 | ] 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/edge_types.csv: -------------------------------------------------------------------------------- 1 | edge_type_id target_query source_query syn_weight dynamics_params distance_range delay target_sections model_template 2 | 100 model_type=='biophysical'&ei=='i' ei=='i' 50.0 InhToInh.json "[0.0, 1e+20]" 2.0 "['somatic', 'basal']" static_synapse 3 | 101 model_type=='point_process'&ei=='i' ei=='i' 50.0 instanteneousInh.json NULL 2.0 NULL static_synapse 4 | 102 level_of_detail=='biophysical'&ei=='e' ei=='i' 50.0 InhToExc.json "[0.0, 50.0]" 2.0 "['somatic', 'basal', 'apical']" static_synapse 5 | 103 level_of_detail=='intfire'&ei=='e' ei=='i' 50.0 instanteneousInh.json NULL 2.0 NULL static_synapse 6 | 104 pop_name=='PV1' ei=='e' 30.0 ExcToInh.json "[0.0, 1e+20]" 2.0 "['somatic', 'basal']" static_synapse 7 | 105 pop_name=='LIF_inh' ei=='e' 50.0 instanteneousExc.json NULL 2.0 NULL static_synapse 8 | 106 pop_name=='Scnn1a' ei=='e' 50.0 ExcToExc.json "[30.0, 150.0]" 2.0 "['basal', 'apical']" static_synapse 9 | 107 pop_name=='LIF_exc' ei=='e' 50.0 instanteneousExc.json NULL 2.0 NULL static_synapse 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/edges.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/edges.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/node_types.csv: -------------------------------------------------------------------------------- 1 | node_type_id ei model_processing pop_name model_template model_type dynamics_params rotation_angle_zaxis 2 | 100 i NULL PV1 nest:iaf_psc_alpha point_process 472912177_fit.json -2.53 3 | 101 e NULL Scnn1a nest:iaf_psc_alpha point_process 472363762_fit.json -3.646878266 4 | 102 i NULL LIF_inh nest:iaf_psc_alpha point_process IntFire1_inh_1.json NULL 5 | 103 e NULL LIF_exc nest:iaf_psc_alpha point_process IntFire1_exc_1.json NULL 6 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/nodes.csv: -------------------------------------------------------------------------------- 1 | node_id node_type_id x_soma y_soma z_soma rotation_angle_yaxis pop_name ei 2 | 0 10002 0.59970086791473476 0.7669456700222288 0.63342296667967057 2.8855805437712054 PV1 i 3 | 1 10002 0.64573574296980885 0.41286627582336111 0.89823631588142072 2.1657588793230116 PV1 i 4 | 2 10001 0.1675878347205868 0.30591729535179824 0.31342078710130283 5.1876700684739037 Scnn1a e 5 | 3 10001 0.71617645939637709 0.68016880563220883 0.70761584532823629 1.6431578525680703 Scnn1a e 6 | 4 90002 0.13093610138425726 0.48812555606917907 0.93341052317501738 2.258221401812377 LIF_inh i 7 | 5 90002 0.81237871087206437 0.64678320267548151 0.91214503640257605 1.5857854835635639 LIF_inh i 8 | 6 90001 0.9188885897309842 0.87509533743231505 0.74657421524993361 1.0660517798843969 LIF_exc e 9 | 7 90001 0.087999125302931192 0.6027056373291817 0.92473051991482436 6.2787411022712538 LIF_exc e 10 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/nodes.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/recurrent_network/nodes.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/edge_types.csv: -------------------------------------------------------------------------------- 1 | edge_type_id target_query source_query syn_weight dynamics_params distance_range delay target_sections model_template 2 | 100 pop_name=='Scnn1a' * 100.0 ExcToExc.json "[0.0, 150.0]" 0.1 "['basal', 'apical']" static_synapse 3 | 101 pop_name=='LIF_exc' * 50.0 instanteneousExc.json NULL 0.1 NULL static_synapse 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/edges.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/source_input/edges.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/input_edge_types.csv: -------------------------------------------------------------------------------- 1 | edge_type_id target_query source_query weight_max distance_range target_sections delay params_file set_params_function synapse_model weight_function 2 | 7 pop_name=='Scnn1a' * 25.0 "[0.0, 150.0]" "['basal', 'apical']" 2.0 AMPA_ExcToExc.json exp2syn static_synapse wmax 3 | 8 pop_name=='LIF_exc' * 50.0 2.0 instanteneousExc.json static_synapse wmax 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/input_edges.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/source_input/input_edges.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/input_node_types.csv: -------------------------------------------------------------------------------- 1 | node_type_id model_type params_file level_of_detail 2 | 1001 spike_generator filter_point.json filter 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/input_nodes.csv: -------------------------------------------------------------------------------- 1 | node_id node_type_id ei pop_name 2 | 0 1001 e input_filter 3 | 1 1001 e input_filter 4 | 2 1001 e input_filter 5 | 3 1001 e input_filter 6 | 4 1001 e input_filter 7 | 5 1001 e input_filter 8 | 6 1001 e input_filter 9 | 7 1001 e input_filter 10 | 8 1001 e input_filter 11 | 9 1001 e input_filter 12 | 10 1001 e input_filter 13 | 11 1001 e input_filter 14 | 12 1001 e input_filter 15 | 13 1001 e input_filter 16 | 14 1001 e input_filter 17 | 15 1001 e input_filter 18 | 16 1001 e input_filter 19 | 17 1001 e input_filter 20 | 18 1001 e input_filter 21 | 19 1001 e input_filter 22 | 20 1001 e input_filter 23 | 21 1001 e input_filter 24 | 22 1001 e input_filter 25 | 23 1001 e input_filter 26 | 24 1001 e input_filter 27 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/node_types.csv: -------------------------------------------------------------------------------- 1 | node_type_id model_type ei pop_name 2 | 100 virtual e input_filter 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/nodes.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/source_input/nodes.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/poission_input_spk_train.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/source_input/poission_input_spk_train.h5 -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/network/source_input/poisson_input_spk_train.nwb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/pointnet_notebook/network/source_input/poisson_input_spk_train.nwb -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/run_pointnet.py: -------------------------------------------------------------------------------- 1 | from bmtk.simulator import pointnet 2 | 3 | #import json 4 | #%matplotlib inline 5 | 6 | 7 | 8 | #import set_weights as wf 9 | 10 | # Some functions in modelingSDK for faster reading of the config files 11 | configure = pointnet.Config.from_json('config.json') 12 | 13 | # Reads and loads the config file 14 | net = pointnet.PointNetwork.from_config(configure) 15 | 16 | # This will not be required for future versions (will be optional). Allows users to have different 17 | # weight functions. Here we will just use the weight as is. See set_weights.py if interested of another example. 18 | #net.add_weight_function(wf.wmax) 19 | 20 | # Create network for NEST. Can just give the configure and graph - also possible for biophysical networks. 21 | sim = pointnet.PointSimulator.from_config(configure, net) 22 | sim.run() 23 | print 'done' 24 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/pointnet_notebook/set_weights.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | 4 | def gaussianLL(tar_prop,src_prop,con_prop): 5 | src_tuning = src_prop['tuning_angle'] 6 | tar_tuning = tar_prop['tuning_angle'] 7 | 8 | w0 = con_prop["weight_max"] 9 | sigma = con_prop["weight_sigma"] 10 | 11 | delta_tuning = abs(abs(abs(180.0 - abs(float(tar_tuning) - float(src_tuning)) % 360.0) - 90.0) - 90.0) 12 | weight = w0*math.exp(-(delta_tuning / sigma) ** 2) 13 | 14 | return weight 15 | 16 | 17 | 18 | def wmax(tar_prop, src_prop, con_prop, nsyn): 19 | return con_prop["weight_max"]*nsyn 20 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/pop_models/excitatory_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau_m": 0.0429, 3 | "record": true, 4 | "v_min": -0.05, 5 | "v_max": 0.02, 6 | "dv": 0.0001, 7 | "update_method": "gmres", 8 | "approx_order": null 9 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/pop_models/filter_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/pop_models/inhibitory_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau_m": 0.0299, 3 | "record": true, 4 | "v_min": -0.05, 5 | "v_max": 0.02, 6 | "dv": 0.0001, 7 | "update_method": "gmres", 8 | "approx_order": null 9 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/synaptic_models/ExcToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/synaptic_models/ExcToInh.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/synaptic_models/InhToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/components/synaptic_models/InhToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "run": { 4 | "tstop": 3.0, 5 | "dt": 0.0002, 6 | "overwrite_output_dir": true 7 | }, 8 | 9 | "inputs": { 10 | "Ext_pop_rates": { 11 | "input_type": "csv", 12 | "module": "pop_rates", 13 | "rates": "${BASE_DIR}/input_rates.csv", 14 | "node_set": "Ext_input" 15 | } 16 | }, 17 | 18 | "output": { 19 | "rates_file": "$OUTPUT_DIR/spike_rates.txt", 20 | "log_file": "$OUTPUT_DIR/logging.txt" 21 | }, 22 | 23 | "target_simulator": "DiPDE", 24 | 25 | "components": { 26 | "population_models_dir": "$MODELS_DIR/pop_models", 27 | "synaptic_models_dir": "$MODELS_DIR/synaptic_models" 28 | }, 29 | 30 | 31 | "networks": { 32 | "nodes": [ 33 | { 34 | "nodes_file": "$NETWORK_DIR/recurrent_network/nodes.h5", 35 | "node_types_file": "$NETWORK_DIR/recurrent_network/node_types.csv" 36 | }, 37 | { 38 | "nodes_file": "$NETWORK_DIR/source_input/nodes.h5", 39 | "node_types_file": "$NETWORK_DIR/source_input/node_types.csv" 40 | } 41 | ], 42 | "edges": [ 43 | { 44 | "edges_file": "$NETWORK_DIR/recurrent_network/edges.h5", 45 | "edge_types_file": "$NETWORK_DIR/recurrent_network/edge_types.csv" 46 | }, 47 | { 48 | "edges_file": "$NETWORK_DIR/source_input/edges.h5", 49 | "edge_types_file": "$NETWORK_DIR/source_input/edge_types.csv" 50 | } 51 | ] 52 | }, 53 | 54 | "manifest": { 55 | "$BASE_DIR": "${configdir}", 56 | "$NETWORK_DIR": "$BASE_DIR/network", 57 | "$MODELS_DIR": "$BASE_DIR/components", 58 | "$OUTPUT_DIR": "$BASE_DIR/output" 59 | } 60 | } -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/input_rates.csv: -------------------------------------------------------------------------------- 1 | gid firing_rate 2 | 0 15.0 3 | 100 15.0 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/network/recurrent_network_v2/edge_types.csv: -------------------------------------------------------------------------------- 1 | edge_type_id target_query source_query weight delay nsyns params_file 2 | 0 pop_name=='excitatory' pop_name=='input_filter' 0.0015 0.002 11 InpToExc.json 3 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/network/recurrent_network_v2/node_types.csv: -------------------------------------------------------------------------------- 1 | node_type_id pop_name params_file 2 | 0 excitatory components_v2/pop_models/excitatory_pop.json 3 | filter_001 input_filter NA 4 | -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/schematics_png/DiPDE_ei_net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/popnet_notebook/schematics_png/DiPDE_ei_net.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop_conn1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop_conn1.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop_conn1and2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_ext_pop_conn1and2.png -------------------------------------------------------------------------------- /DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_pop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Modeling/popnet_notebook/schematics_png/ei_pop.png -------------------------------------------------------------------------------- /DynamicBrain/Tutorials/tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/DynamicBrain/Tutorials/tree.png -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/biophysical_notebook/build_network.py: -------------------------------------------------------------------------------- 1 | from bmtk.builder.networks import NetworkBuilder 2 | 3 | 4 | def build_cortical_network(output_dir='network/recurrent_network'): 5 | def distance_connection_handler(source, target, d_max, nsyn_min, nsyn_max): 6 | """ Connect cells that are less than d_max apart with a random number of synapses in the 7 | interval [nsyn_min, nsyn_max) 8 | """ 9 | sid = source['node_id'] # Get source id 10 | tid = target['node_id'] # Get target id 11 | 12 | # Avoid self-connections. 13 | if (sid == tid): 14 | return None 15 | 16 | # first calculate euclidean distance between cells 17 | src_positions = np.array([source['x'], source['y'], source['z']]) 18 | trg_positions = np.array([target['x'], target['y'], target['z']]) 19 | separation = np.sqrt(np.sum(src_positions - trg_positions)**2 ) 20 | 21 | # drop the connection if nodes too far apart 22 | if separation >= d_max: 23 | return None 24 | 25 | # Add the number of synapses for every connection. 26 | tmp_nsyn = random.randint(nsyn_min, nsyn_max) 27 | return tmp_nsyn 28 | 29 | 30 | #### Step 1: Figure out what types, and number of, different cells to use in our network #### 31 | # Number of cell models desired 32 | N_Scnn1a = 2 33 | N_PV1 = 2 34 | N_LIF_exc = 2 35 | N_LIF_inh = 2 36 | 37 | # Define all the cell models in a dictionary (note dictionaries within a dictionary) 38 | biophysical_models = { 39 | 'Scnn1a': { 40 | 'N': N_Scnn1a, 41 | 'ei': 'e', 42 | 'pop_name': 'Scnn1a', 43 | 'model_type': 'biophysical', 44 | 'model_template': 'ctdb:Biophys1.hoc', 45 | 'model_processing': 'aibs_perisomatic', 46 | 'morphology_file': 'Scnn1a-Tg3-Cre_Ai14_IVSCC_-177300.01.02.01_473845048_m.swc', 47 | 'dynamics_params': '472363762_fit.json', 48 | 'rotation_angle_zaxis': -3.646878266 49 | }, 50 | 'PV1': { 51 | 'N': N_PV1, 52 | 'ei': 'i', 53 | 'pop_name': 'PV1', 54 | 'model_type': 'biophysical', 55 | 'model_template': 'ctdb:Biophys1.hoc', 56 | 'model_processing': 'aibs_perisomatic', 57 | 'dynamics_params': '472912177_fit.json', 58 | 'morphology_file': 'Pvalb-IRES-Cre_Ai14_IVSCC_-176847.04.02.01_470522102_m.swc', 59 | 'rotation_angle_zaxis': -2.539551891 60 | } 61 | } 62 | 63 | # Define all the cell models in a dictionary. 64 | LIF_models = { 65 | 'LIF_exc': { 66 | 'N': N_LIF_exc, 67 | 'ei': 'e', 68 | 'pop_name': 'LIF_exc', 69 | 'model_type': 'point_process', 70 | 'model_template': 'nrn:IntFire1', 71 | 'dynamics_params': 'IntFire1_exc_1.json' 72 | }, 73 | 'LIF_inh': { 74 | 'N': N_LIF_inh, 75 | 'ei': 'i', 76 | 'pop_name': 'LIF_inh', 77 | 'model_type': 'point_process', 78 | 'model_template': 'nrn:IntFire1', 79 | 'dynamics_params': 'IntFire1_inh_1.json' 80 | } 81 | } 82 | 83 | #### Step 2: Create NetworkBuidler object to build nodes and edges #### 84 | net = NetworkBuilder('Cortical') 85 | 86 | #### Step 3: Used add_nodes() method to add all our cells/cell-types 87 | for model in biophysical_models: 88 | # Build our biophysical cells 89 | params = biophysical_models[model] 90 | n_cells = params.pop('N') 91 | 92 | # We'll randomly assign positions 93 | positions = generate_random_positions(n_cells) 94 | 95 | # Use add_nodes to create a set of N cells for each cell-type 96 | net.add_nodes(N=n_cells, # Specify the numer of cells belonging to this set of nodes 97 | x=positions[:,0], y=positions[:, 1], z=positions[:, 2], 98 | rotation_angle_yaxis=np.random.uniform(0.0, 2*np.pi, n_cells), 99 | 100 | # The other parameters are shared by all cells of this set in the dictionary 101 | **params) # python shortcut for unrolling a dictionary 102 | 103 | for model in LIF_models: 104 | # Same thing as above but for our LIF type cells 105 | params = LIF_models[model].copy() 106 | 107 | # Number of cells for this model type 108 | n_cells = params.pop('N') 109 | 110 | # Precacluate positions, rotation angles for each N neurons in the population 111 | positions = generate_random_positions(n_cells) 112 | 113 | # Adds node populations 114 | net.add_nodes(N=n_cells , 115 | x=positions[:,0], y=positions[:, 1], z=positions[:, 2], 116 | rotation_angle_yaxis=np.random.uniform(0.0, 2*np.pi, n_cells), 117 | **params) 118 | 119 | 120 | #### Step 4: Used add_edges() to set our connections between cells #### 121 | cparameters = {'d_max': 160.0, # Maximum separation between nodes where connection allowed 122 | 'nsyn_min': 3, # If connection exist, minimum number of synapses 123 | 'nsyn_max': 7} # If connection exist, maximum number of synapses 124 | 125 | net.add_edges(source={'ei': 'i'}, # Select all inhibitory cells to apply this connection rule too 126 | target={'ei': 'i', 'model_type': 'biophysical'}, # for the target cells we will use inhibitory biophysical cells 127 | connection_rule=distance_connection_handler, 128 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 129 | syn_weight=0.03, 130 | distance_range=[0.0, 1e+20], 131 | target_sections=['somatic', 'basal'], 132 | delay=2.0, 133 | dynamics_params='GABA_InhToInh.json', 134 | model_template='exp2syn') 135 | 136 | # inhibitory --> point-inhibitory 137 | net.add_edges(source={'ei': 'i'}, target={'ei': 'i', 'model_type': 'point_process'}, 138 | connection_rule=distance_connection_handler, 139 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 140 | syn_weight=0.3, 141 | delay=2.0, 142 | dynamics_params='instanteneousInh.json') 143 | 144 | # inhibiotry --> biophysical-excitatory 145 | net.add_edges(source={'ei': 'i'}, target={'ei': 'e', 'model_type': 'biophysical'}, 146 | connection_rule=distance_connection_handler, 147 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 148 | syn_weight=0.3, 149 | distance_range=[0.0, 50.0], 150 | target_sections=['somatic', 'basal', 'apical'], 151 | delay=2.0, 152 | dynamics_params='GABA_InhToExc.json', 153 | model_template='exp2syn') 154 | 155 | # inhibitory --> point-excitatory 156 | net.add_edges(source={'ei': 'i'}, target={'ei': 'e', 'model_type': 'point_process'}, 157 | connection_rule=distance_connection_handler, 158 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 159 | syn_weight=0.4, 160 | delay=2.0, 161 | dynamics_params='instanteneousInh.json') 162 | 163 | # excitatory --> PV1 cells 164 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'PV1'}, 165 | connection_rule=distance_connection_handler, 166 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 167 | syn_weight=0.05, 168 | distance_range=[0.0, 1e+20], 169 | target_sections=['somatic', 'basal'], 170 | delay=2.0, 171 | dynamics_params='AMPA_ExcToInh.json', 172 | model_template='exp2syn') 173 | 174 | # excitatory --> LIF_inh 175 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'LIF_inh'}, 176 | connection_rule=distance_connection_handler, 177 | connection_params=cparameters, 178 | syn_weight=0.2, 179 | delay=2.0, 180 | dynamics_params='instanteneousExc.json') 181 | 182 | # excitatory --> Scnn1a 183 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'Scnn1a'}, 184 | connection_rule=distance_connection_handler, 185 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 186 | syn_weight=0.05, 187 | distance_range=[30.0, 150.0], 188 | target_sections=['basal', 'apical'], 189 | delay=2.0, 190 | dynamics_params='AMPA_ExcToExc.json', 191 | model_template='exp2syn') 192 | 193 | # excitatory --> LIF_exc 194 | net.add_edges(source={'ei': 'e'}, target={'pop_name': 'LIF_exc'}, 195 | connection_rule=distance_connection_handler, 196 | connection_params={'d_max': 160.0, 'nsyn_min': 3, 'nsyn_max': 7}, 197 | syn_weight=0.05, 198 | delay=2.0, 199 | dynamics_params='instanteneousExc.json') 200 | 201 | 202 | #### Step 5: Build and save the network #### 203 | net.build() 204 | net.save(output_dir=output_dir) 205 | return net 206 | 207 | 208 | def build_input_network(net, output_dir='network/source_input'): 209 | def select_source_cells(sources, target, N_syn=10): 210 | """ Note here that "sources" are given (not "source"). So the iterations occur through every target 211 | with all sources as potential inputs. Faster than before and better if will have common rules. 212 | """ 213 | 214 | target_id = target.node_id 215 | source_ids = [s.node_id for s in sources] 216 | 217 | nsyns_ret = [N_syn]*len(source_ids) 218 | return nsyns_ret 219 | 220 | 221 | filter_models = { 222 | 'inputFilter': { 223 | 'N': 25, 224 | 'ei': 'e', 225 | 'pop_name': 'input_filter', 226 | 'model_type': 'virtual' 227 | } 228 | } 229 | 230 | inputNetwork = NetworkBuilder("inputNetwork") 231 | inputNetwork.add_nodes(**filter_models['inputFilter']) 232 | 233 | inputNetwork.add_edges(target=net.nodes(pop_name='Scnn1a'), 234 | iterator='all_to_one', 235 | connection_rule=select_source_cells, 236 | syn_weight=0.0007, 237 | distance_range=[0.0, 150.0], 238 | target_sections=['basal', 'apical'], 239 | delay=2.0, 240 | dynamics_params='AMPA_ExcToExc.json', 241 | model_template='exp2syn') 242 | 243 | inputNetwork.add_edges(target=net.nodes(pop_name='LIF_exc'), 244 | iterator='all_to_one', 245 | connection_rule=select_source_cells, 246 | syn_weight=0.07, 247 | delay=2.0, 248 | dynamics_params='instanteneousExc.json') 249 | 250 | inputNetwork.add_edges(target=net.nodes(pop_name='PV1'), 251 | iterator='all_to_one', 252 | connection_rule=select_source_cells, 253 | syn_weight=0.002, 254 | distance_range=[0.0, 1.0e+20], 255 | target_sections=['basal', 'somatic'], 256 | delay=2.0, 257 | dynamics_params='AMPA_ExcToInh.json', 258 | model_template='exp2syn') 259 | 260 | inputNetwork.add_edges(target=net.nodes(pop_name='LIF_inh'), 261 | iterator='all_to_one', 262 | connection_rule=select_source_cells, 263 | syn_weight=0.01, 264 | delay=2.0, 265 | dynamics_params='instanteneousExc.json') 266 | 267 | inputNetwork.build() 268 | inputNetwork.save(output_dir=output_dir) 269 | 270 | 271 | if __name__ == '__main__': 272 | net = build_cortical_network() 273 | build_input_network(net) 274 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/biophysical_notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "manifest": { 3 | "$BASE_DIR": "${configdir}", 4 | "$OUTPUT_DIR": "$BASE_DIR/output", 5 | "$INPUT_DIR": "$BASE_DIR/network/source_input", 6 | "$NETWORK_DIR": "$BASE_DIR/network", 7 | "$COMPONENT_DIR": "${configdir}/../../../Modeling/biophysical_notebook/components", 8 | "$MECHANISMS_DIR": "$COMPONENT_DIR/mechanisms" 9 | }, 10 | 11 | "run": { 12 | "tstop": 3000.0, 13 | "dt": 0.1, 14 | "dL": 20.0, 15 | "spike_threshold": -15, 16 | "nsteps_block": 5000, 17 | "overwrite_output_dir": true 18 | }, 19 | 20 | "target_simulator":"NEURON", 21 | 22 | "conditions": { 23 | "celsius": 34.0, 24 | "v_init": -80 25 | }, 26 | 27 | "inputs": { 28 | "spike_trains": { 29 | "input_type": "spikes", 30 | "module": "h5", 31 | "input_file": "$INPUT_DIR/poission_input_spk_train.h5", 32 | "node_set": "inputNetwork" 33 | } 34 | }, 35 | 36 | "output":{ 37 | "log_file": "$OUTPUT_DIR/log.txt", 38 | "output_dir": "$OUTPUT_DIR", 39 | "spikes_file": "$OUTPUT_DIR/spikes.h5", 40 | "spikes_file_csv": "$OUTPUT_DIR/spikes.csv", 41 | "spikes_sort_order": "time" 42 | }, 43 | 44 | "components": { 45 | "morphologies_dir": "$COMPONENT_DIR/biophysical/morphology", 46 | "synaptic_models_dir": "$COMPONENT_DIR/synaptic_models", 47 | "mechanisms_dir":"$MECHANISMS_DIR", 48 | "biophysical_neuron_models_dir": "$COMPONENT_DIR/biophysical/electrophysiology", 49 | "point_neuron_models_dir": "$COMPONENT_DIR/intfire" 50 | }, 51 | 52 | "node_sets": { 53 | "bio_nodes": {"model_type": "biophysical"} 54 | }, 55 | 56 | "reports": { 57 | "calcium_concentration": { 58 | "cells": "bio_nodes", 59 | "variable_name": "cai", 60 | "module": "membrane_report", 61 | "file_name": "$OUTPUT_DIR/cell_vars.h5", 62 | "sections": "soma", 63 | "enabled": true 64 | }, 65 | 66 | "membrane_potential": { 67 | "cells": "bio_nodes", 68 | "variable_name": "v", 69 | "module": "membrane_report", 70 | "file_name": "$OUTPUT_DIR/cell_vars.h5", 71 | "sections": "soma", 72 | "enabled": true 73 | }, 74 | 75 | "ecp": { 76 | "cells": [0, 1, 2, 3], 77 | "variable_name": "v", 78 | "module": "extracellular", 79 | "electrode_positions": "$COMPONENT_DIR/electrodes/single_electrode.csv", 80 | "ecp_file": "$OUTPUT_DIR/ecp.h5", 81 | "electrode_channels": "all", 82 | "contributions_dir": "$OUTPUT_DIR/ecp_contributions" 83 | } 84 | }, 85 | 86 | "networks": { 87 | "nodes": [ 88 | { 89 | "nodes_file": "$NETWORK_DIR/recurrent_network/nodes.h5", 90 | "node_types_file": "$NETWORK_DIR/recurrent_network/node_types.csv" 91 | }, 92 | { 93 | "nodes_file": "$NETWORK_DIR/source_input/nodes.h5", 94 | "node_types_file": "$NETWORK_DIR/source_input/node_types.csv" 95 | } 96 | ], 97 | 98 | "edges": [ 99 | { 100 | "edges_file": "$NETWORK_DIR/recurrent_network/edges.h5", 101 | "edge_types_file": "$NETWORK_DIR/recurrent_network/edge_types.csv" 102 | }, 103 | { 104 | "edges_file": "$NETWORK_DIR/source_input/edges.h5", 105 | "edge_types_file": "$NETWORK_DIR/source_input/edge_types.csv" 106 | } 107 | ] 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/biophysical_notebook/run_bionet.py: -------------------------------------------------------------------------------- 1 | from bmtk.simulator import bionet 2 | 3 | 4 | conf = bionet.Config.from_json('config.json', validate=True) 5 | conf.build_env() 6 | graph = bionet.BioNetwork.from_config(conf) 7 | sim = bionet.BioSimulator.from_config(conf, network=graph) 8 | sim.run() 9 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/pop_models/excitatory_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau_m": 0.0429, 3 | "record": true, 4 | "v_min": -0.05, 5 | "v_max": 0.02, 6 | "dv": 0.0001, 7 | "update_method": "gmres", 8 | "approx_order": null 9 | } -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/pop_models/filter_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | } -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/pop_models/inhibitory_pop.json: -------------------------------------------------------------------------------- 1 | { 2 | "tau_m": 0.0299, 3 | "record": true, 4 | "v_min": -0.05, 5 | "v_max": 0.02, 6 | "dv": 0.0001, 7 | "update_method": "gmres", 8 | "approx_order": null 9 | } -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/synaptic_models/ExcToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/synaptic_models/ExcToInh.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/synaptic_models/InhToExc.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/components/synaptic_models/InhToInh.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | 4 | -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "run": { 4 | "tstop": 1.5, 5 | "dt": 0.002, 6 | "overwrite_output_dir": true 7 | }, 8 | 9 | "inputs": { 10 | "Ext_pop_rates": { 11 | "input_type": "csv", 12 | "module": "pop_rates", 13 | "rates": "${BASE_DIR}/input_rates.csv", 14 | "node_set": "Ext_input" 15 | } 16 | }, 17 | 18 | "output": { 19 | "rates_file": "$OUTPUT_DIR/spike_rates.txt", 20 | "log_file": "$OUTPUT_DIR/logging.txt" 21 | }, 22 | 23 | "target_simulator": "DiPDE", 24 | 25 | "components": { 26 | "population_models_dir": "$MODELS_DIR/pop_models", 27 | "synaptic_models_dir": "$MODELS_DIR/synaptic_models" 28 | }, 29 | 30 | 31 | "networks": { 32 | "nodes": [ 33 | { 34 | "nodes_file": "$NETWORK_DIR/recurrent_network/nodes.h5", 35 | "node_types_file": "$NETWORK_DIR/recurrent_network/node_types.csv" 36 | }, 37 | { 38 | "nodes_file": "$NETWORK_DIR/source_input/nodes.h5", 39 | "node_types_file": "$NETWORK_DIR/source_input/node_types.csv" 40 | } 41 | ], 42 | "edges": [ 43 | { 44 | "edges_file": "$NETWORK_DIR/recurrent_network/edges.h5", 45 | "edge_types_file": "$NETWORK_DIR/recurrent_network/edge_types.csv" 46 | }, 47 | { 48 | "edges_file": "$NETWORK_DIR/source_input/edges.h5", 49 | "edge_types_file": "$NETWORK_DIR/source_input/edge_types.csv" 50 | } 51 | ] 52 | }, 53 | 54 | "manifest": { 55 | "$BASE_DIR": "${configdir}", 56 | "$NETWORK_DIR": "$BASE_DIR/network", 57 | "$MODELS_DIR": "$BASE_DIR/components", 58 | "$OUTPUT_DIR": "$BASE_DIR/output" 59 | } 60 | } -------------------------------------------------------------------------------- /DynamicBrain/solutions/Modeling/popnet_notebook/input_rates.csv: -------------------------------------------------------------------------------- 1 | gid firing_rate 2 | 0 15.0 3 | 100 15.0 4 | -------------------------------------------------------------------------------- /Git/03 - Working with Github.md: -------------------------------------------------------------------------------- 1 | # git lesson 3: Working with Github 2 | 3 | This material assumes that you have worked through the previous lessons. At this point you should understand: 4 | 5 | * How to create a repository on your computer 6 | * Stage and commit changes to your repository 7 | * Create topic branches 8 | * Merge topic branches back to your master branch 9 | 10 | ## Overview: Why this (relatively complex) workflow? 11 | 12 | GitHub is an online code collaboration platform centered around `git`. This lesson shows you a particular way to use `git` and GitHub that is focused on collaboration. We are trying to solve a few problems here. 13 | 14 | 1. We want to contribute changes to a repository owned by someone else 15 | 2. We want to control when to use changes from that repository 16 | 3. We want to minimize nasty merge conflicts 17 | 18 | The rest of these instructions boil down to a few practices: 19 | 20 | 1. Work in a fork 21 | 2. Work in topic branches, not the master branch 22 | 2. Use pull requests 23 | 24 | Let's get started. 25 | 26 | ## Create a repository and copy it to your computer (forking and cloning) 27 | 28 | 29 | 30 | The first thing you should do is create a repository on GitHub. While you can always create an new repository, in this lesson we will be showing you how to collaborate with others on a single repository. You will do this by creating a copy of an existing repository. In `git` parlance, creating a copy of a repository is called `forking`. 31 | 32 | #### Fork a repository 33 | 34 | Do this: 35 | 36 | 1. Go here: [https://github.com/alleninstitute/swdb_2018_tools](https://github.com/alleninstitute/swdb_2018_tools) 37 | 2. Click the 'Fork' button. 38 | 3. If prompted, tell GitHub to clone the repository to your profile. 39 | 40 | You now have a copy of the `swdb_2018_tools` repository all to yourself! 41 | 42 | #### Clone your fork to your computer 43 | 44 | As before, we will be using `GitKraken` when using `git` on your computer. Now we want to make changes to the fork we just created, so let's bring it down to our computers. 45 | 46 | 1. Open `GitKraken` 47 | 2. File => Clone Repo 48 | 3. Github.com 49 | 4. Choose a location on your computer to save the repository ("Where to clone to") 50 | 5. Browse to your fork (`/swdb_2018_tools`) 51 | 6. Click "Clone the repo!", then "Open Now!" 52 | 53 | ## The virtuous collaborative loop -- integrating changes and making your own 54 | 55 | You now have two copies of someone else's repository -- your fork on GitHub and the one on your computer. Those repositories all have a `master` branch. An important principle to remember: 56 | 57 | > **Leave the `master` branch alone**. 58 | 59 | > `master` is a shared resource, and it should always reflect the state of the primary repository's `master` branch 60 | 61 | Of course it's possible to work in the master branch directly, but you should prefer topic branches for two reasons: 62 | 63 | 1. What if you want to work on two different things at once? 64 | 2. Editing master directly creates a parallel history that is inconsistent with the primary repository. 65 | 66 | We'll now describe a process you can use to integrate others changes and make changes safely. But first... 67 | 68 | #### Tell GitKraken about AllenInstitute/swdb_2018_tools 69 | 70 | Right now your repository only knows about your fork on GitHub (`user_name/swdb_2018_tools`, AKA `origin`). In order to make changes from others, we need our repository to know where these changes are coming from. We only need to do this once. 71 | 72 | 1. Click the "+" in the "Remote" section on the left. 73 | 2. Paste in: https://github.com/alleninstitute/swdb_2018_tools 74 | 3. Accept the default name ("AllenInstitute") 75 | 76 | Now the `AllenInstitute` remote appears above your fork (`origin`) in the list below with its default branch (`master`). 77 | 78 | #### Loop Step 1: Pull changes from AllenInstitute to your computer 79 | 80 | 81 | 82 | Now we want to bring some changes from `AllenInstitute/master` down to your local master branch. 83 | 84 | 1. Right-click the `AllenInstitute` remote and click "Fetch AllenInstitute". This just checks Github for changes. 85 | 2. Right-click the `AllenInstitute/master` branch and choose "Fast-forward changes from AllenInstitute/master". 86 | 87 | That's it -- now you've incorporated changes from `AllenInstitute/master` to your local repository. You can now update your GitHub fork's master branch by clicking "Push". 88 | 89 | #### Loop Step 2: Create a topic branch and make a change 90 | 91 | 92 | 93 | Now we want to make some changes to this repository. Not the AllenInstitute copy (yet) -- just your local copy. 94 | 95 | Topic branches are great because they let you work on multiple things at the same time. In this case, they are necessary because remember: **don't touch the `master` branch**. So let's make our changes in a topic branch! 96 | 97 | 1. Click the 'Branch' icon at the top of the screen. Give it a cool name. 98 | 2. Make some changes that won't conflict. Leave GitKraken and create a file in the repo directory named after your Github user name. 99 | 3. Gitkracken will notice the change -- click "View Change" in the upper right panel. 100 | 4. Mouse-over your new file and click "Stage File" 101 | 5. Type a commit message. 102 | 5. Click "Commit changes to 1 file" 103 | 104 | #### Loop Step 3: Push your branch to your fork on Github 105 | 106 | 107 | 108 | Our topic branch is ready, and we'd like to get our changes integrated into `AllenInstitute/master`. GitHub has a great facility for this, so we need to get your changes up to your GitHub fork. Remember: we always want `master` to be consistent with `AllenInstitute/master`, so we aren't going to merge the topic branch back into `local/master`. Instead, we are going to push your topic branch up to your fork and integrate it into `AllenInstitue/master` from there. 109 | 110 | 1. Right-click your branch, then click "push" 111 | 2. Name your branch on Github (use the default, which is the same name) 112 | 3. Click "Submit" 113 | 114 | Note: in GitKraken, when you click "push" you are pushing to `origin`, which is your fork on GitHub. 115 | 116 | #### Loop Step 4: Issue a pull request to AllenInstitute/master 117 | 118 | 119 | 120 | We have your topic branch up on your GitHub fork. Now we want to merge your changes into `AllenInstitute/master`. We ask for this via a "Pull Request": 121 | 122 | 1. Open Github to http://github.com/user_name/swdb_2018_tools 123 | 2. Github will notice your new branch. Click "Compare and Pull Request". 124 | 3. Write a short description. 125 | 4. Click "Create pull request" 126 | 5. **wait for the instructor to accept the pull request** 127 | 6. Click "delete branch" to delete your topic branch. 128 | 129 | Pull requests are great. We are working on a shared repository, so we really want to make sure that your changes are ready to integrate before pulling the trigger. Pull requests give everyone a mechanism to review and propose new changes before updating the `master` branch. 130 | 131 | 132 | 133 | #### Loop Step 5: Bring your own change back down to local/master 134 | 135 | 136 | 137 | Once your request has been approved, `AllenInstitute/master` is now has your changes in it. Just bring your changes back down to `local/master` and we're done. 138 | 139 | 1. Check out `local/master` by double clicking on it. 140 | 2. Right-click the `AllenInstitute` remote and click "Fetch AllenInstitute". This just checks Github for changes. 141 | 3. Right-click the `AllenInstitute/master` branch and choose "Fast-forward changes from AllenInstitute/master". 142 | 4. Delete your topic branch: Right-click `cool_named_branch`, choose `Delete`. 143 | 144 | If you want to update your GitHub fork's master branch, just click "Push". 145 | 146 | #### All together now 147 | 148 | 149 | -------------------------------------------------------------------------------- /Git/04 - Working with GitHub in the Cloud.md: -------------------------------------------------------------------------------- 1 | # git lesson 4: Working with GitHub in the Cloud 2 | 3 | This material assumes that you have worked through the previous lessons. At this point you should understand: 4 | 5 | * How to create a repository on your computer 6 | * Stage and commit changes to your repository 7 | * Create topic branches 8 | * Merge topic branches back to your master branch 9 | * Work on a shared repository with forks and pull requests 10 | 11 | This lesson is identical to lesson 3, but it teaches you how to perform the same operations from the command line. 12 | 13 | ## Overview: Why this (relatively complex) workflow? 14 | 15 | GitHub is an online code collaboration platform centered around `git`. This lesson shows you a particular way to use `git` and GitHub that is focused on collaboration. We are trying to solve a few problems here. 16 | 17 | 1. We want to contribute changes to a repository owned by someone else 18 | 2. We want to control when to use changes from that repository 19 | 3. We want to minimize nasty merge conflicts 20 | 21 | The rest of these instructions boil down to a few practices: 22 | 23 | 1. Work in a fork 24 | 2. Work in topic branches, not the master branch 25 | 2. Use pull requests 26 | 27 | Let's get started. 28 | 29 | ## Oh no I don't have a GUI 30 | 31 | Don't panic. These instructions replicate the exact workflow from lesson three, this time with the Jupyter terminal. 32 | 33 | ## Create a repository and copy it to your computer (forking and cloning) 34 | 35 | 36 | 37 | The first thing you should do is create a repository on GitHub. While you can always create an new repository, in this lesson we will be showing you how to collaborate with others on a single repository. You will do this by creating a copy of an existing repository. In `git` parlance, creating a copy of a repository is called `forking`. 38 | 39 | #### Fork a repository 40 | 41 | Do this: 42 | 43 | 1. Go here: [https://github.com/alleninstitute/swdb_2018_tools](https://github.com/alleninstitute/swdb_2018_tools) 44 | 2. Click the 'Fork' button. 45 | 3. If prompted, tell it to clone the repository to your profile. 46 | 47 | You now have a copy of the `swdb_2018_tools` repository all to yourself! 48 | 49 | #### Clone your fork to your computer (in the cloud!) 50 | 51 | Now we want to make changes to the fork we just created, so let's bring it down to our computers. Instead of GitKraken, we'll use the Jupyter Terminal. 52 | 53 | 1. Open the Jupyter Terminal ("new" => "terminal") 54 | 2. Copy the URL of the GitHub repository you want to clone to your clipboard. (e.g. https://github.com/your_user_name/swdb_2018_tools.git) 55 | 3. Clone the repo! 56 | ```bash 57 | $ cd ~/SageMaker/your_user_name/ # this is just for our AWS instances 58 | $ git clone https://github.com/your_user_name/swdb_2018_tools.git 59 | ``` 60 | 61 | ## The virtuous collaborative loop -- integrating changes and making your own 62 | 63 | You now have two copies of someone else's repository -- the one on GitHub and the one on your computer. Those repositories all have a `master` branch. An important principle to remember: 64 | 65 | > **Leave the `master` branch alone**. 66 | 67 | > `master` is a shared resource, and it should always reflect the state of the primary repository's `master` branch 68 | 69 | Of course it's possible to work in the master branch directly, but you should prefer topic branches for two reasons: 70 | 71 | 1. What if you want to work on two different things at once? 72 | 2. Editing master directly creates a parallel history that is inconsistent with the primary repository. 73 | We'll now describe a process you can use to integrate others changes and make changes safely. But first... 74 | 75 | #### Tell `git` about AllenInstitute/swdb_2018_tools 76 | 77 | Right now your repository only knows about your fork (`your_user_name/swdb_2018_tools`). In order to incorporate changes from others, we need our repository to know where these changes are coming from. We only need to do this once. 78 | 79 | ```bash 80 | $ cd swdb_2018_tools # this is where your repo was cloned 81 | $ git remote add AllenInstitute https://github.com/alleninstitute/swdb_2018_tools 82 | ``` 83 | 84 | #### Loop Step 1: Pull changes from AllenInstitute to your computer 85 | 86 | 87 | 88 | Now we want to bring some changes from `AllenInstitute/master` down to your local master branch. 89 | 90 | ```bash 91 | $ git checkout master # let's make sure we're on the master branch 92 | $ git pull AllenInstitute master 93 | ``` 94 | 95 | That's it -- now you've incorporated changes from `AllenInstitute/master` to your local repository. You can now update the Github's copy of your fork's master branch by pushing it: 96 | 97 | ```bash 98 | $ git push origin master 99 | ``` 100 | 101 | #### Loop Step 2: Create a topic branch and make a change 102 | 103 | 104 | 105 | Now we want to make some changes to this repository. Not the AllenInstitute copy (yet) -- just your local copy. 106 | 107 | Topic branches are great because they let you work on multiple things at the same time. In this case, they are necessary because remember: **don't touch the `master` branch**. So let's make our changes in a topic branch! 108 | 109 | ```bash 110 | $ git checkout -b dyf_branch # create a new branch and check it out 111 | $ touch dyf.txt # create an empty file 112 | $ git add dyf.txt 113 | $ git commit -m "adding dyf.txt" 114 | ``` 115 | 116 | #### Loop Step 3: Push your branch to your fork on Github 117 | 118 | 119 | 120 | Our topic branch is ready, and we'd like to get our changes integrated into `AllenInstitute/master`. GitHub has a great facility for this, so we need to get your changes up to your GitHub fork. Remember: we always want `master` to be consistent with `AllenInstitute/master`, so we aren't going to merge the topic branch back into `local/master`. Instead, we are going to push your topic branch up to your fork and integrate it into `AllenInstitue/master` from there. 121 | 122 | ```bash 123 | $ git push origin dyf_branch 124 | ``` 125 | 126 | #### Loop Step 4: Issue a pull request to AllenInstitute/master 127 | 128 | 129 | 130 | We have your topic branch up on your GitHub fork. Now we want to merge your changes into `AllenInstitute/master`. We ask for this via a "Pull Request": 131 | 132 | 1. Open Github to http://github.com/your_user_name/swdb_2018_tools 133 | 2. Github will notice your new branch. Click "Compare and Pull Request". 134 | 3. Write a short description. 135 | 4. Click "Create pull request" 136 | 5. **wait for the instructor to accept the pull request** 137 | 6. Click "delete branch" to delete your topic branch. 138 | 139 | Pull requests are great. We are working on a shared repository, so we really want to make sure that your changes are ready to integrate before pulling the trigger. Pull requests give everyone a mechanism to review and propose new changes before updating the `master` branch. 140 | 141 | #### Loop Step 5: Bring your own change back down to local/master 142 | 143 | 144 | 145 | Once your request has been approved, `AllenInstitute/master` is now has your changes in it. Just bring your changes back down to `local/master` and we're done. 146 | 147 | ```bash 148 | $ git checkout master # just to be safe 149 | $ git branch -d dyf_branch # delete the branch 150 | $ git pull AllenInstitute master 151 | ``` 152 | 153 | You can now update the Github's copy of your fork's master branch by pushing it: 154 | 155 | ```bash 156 | $ git push origin master 157 | ``` 158 | 159 | #### All together now 160 | 161 | 162 | 163 | ## Bonus Material: How do I install this package and use it in my AWS instance? 164 | 165 | You've successfully cloned and made changes to the repo, but it would be nice if you could import it and use it like a normal python package. 166 | 167 | ```bash 168 | $ source activate python2 # this is the "conda_python2" kernel in Jupyter 169 | $ pip install --user -e swdb_2018_tools/ 170 | ``` 171 | 172 | Now you can open up a Jupyter notebook, choose the `conda_python2` kernel, and import the repo! 173 | 174 | ```python 175 | >>> import swdb_2018_tools as stools 176 | ``` 177 | -------------------------------------------------------------------------------- /Git/gh_anim.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/gh_anim.gif -------------------------------------------------------------------------------- /Git/github_workflow_cheatsheet.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/github_workflow_cheatsheet.pdf -------------------------------------------------------------------------------- /Git/tiles_00.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_00.png -------------------------------------------------------------------------------- /Git/tiles_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_01.png -------------------------------------------------------------------------------- /Git/tiles_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_02.png -------------------------------------------------------------------------------- /Git/tiles_03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_03.png -------------------------------------------------------------------------------- /Git/tiles_04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_04.png -------------------------------------------------------------------------------- /Git/tiles_05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/Git/tiles_05.png -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Allen Institute Software License – This software license is the 2-clause BSD license 2 | plus clause a third clause that prohibits redistribution for commercial purposes without further permission. 3 | 4 | Copyright © 2018. Allen Institute. All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the 7 | following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the 10 | following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the 13 | following disclaimer in the documentation and/or other materials provided with the distribution. 14 | 15 | 3. Redistributions for commercial purposes are not permitted without the Allen Institute’s written permission. 16 | For purposes of this license, commercial purposes is the incorporation of the Allen Institute's software into 17 | anything for which you will charge fees or other compensation. Contact terms@alleninstitute.org for commercial 18 | licensing opportunities. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 21 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 26 | USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- /PythonBootcamp/00_Introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp

\n", 10 | "

August 18-19, 2018

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "

Introduction: What is Python?

\n", 19 | "\n", 20 | "

How does Python compare to other programming languages? What are its strengths and weaknesses? Why have we chosen Python for this course?\n", 21 | "\n", 22 | "

" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "
\n", 30 | "

1. Python is a *general purpose* programming language.

\n", 31 | "\n", 32 | "

Most programming languages used by scientists were designed from the beginning to handle numerical and scientific tasks:\n", 33 | "\n", 34 | " R, MATLAB, Igor, Mathematica, IDL\n", 35 | "\n", 36 | "

Advantages of specialized languages:\n", 37 | "\n", 38 | "

\n", 43 | "\n", 44 | "

**General-purpose languages such as Python are intended to be useful for any type of programming, regardless of the topic or functionality needed:**\n", 45 | "\n", 46 | " C, C++, Java, Python\n", 47 | "\n", 48 | "

Python is used in virtually every corner of the software development landscape--web sites, server infrastructure, user interfaces, device control, machine learning, etc. \n", 49 | "\n", 50 | "

Advantages of general-purpose languages:\n", 51 | "\n", 52 | "

\n", 58 | "\n", 59 | "
" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "
\n", 67 | "

2. Python is an *interpreted* programming language.

\n", 68 | "\n", 69 | "

Programming languages can be compiled, interpreted, or a hybrid of the two. \n", 70 | "\n", 71 | "

**Compiled languages** like C, C++, Java, and Julia take the program you write and convert it into optimized, machine-executable code. Often, compiled languages are both *faster to execute* and *more difficult to use*. \n", 72 | "\n", 73 | "

**Interpreted languages** like Python, MATLAB, Igor, and PHP use a pre-compiled interpreter to read your program code and execute it, one step at a time. Often, interpreted languages are *slower to execute* and *easier to use*.\n", 74 | "\n", 75 | "

**Question:** Is Python a slow language?
\n", 76 | "**Answer:** It depends on how you use it.\n", 77 | "\n", 78 | "

Ideally, we would like to have a language that is both fast to execute and easy to use, and Python (like many other languages) uses many different techniques to reduce the overhead incurred by being an interpreted language. We will learn about many of these throughout the day.\n", 79 | "\n", 80 | "

" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "
\n", 88 | "

3. Python is a community of developers

\n", 89 | "\n", 90 | "

Commercial development environments like MATLAB or Igor benefit from a monolithic community structure:\n", 91 | "\n", 92 | "

\n", 98 | "\n", 99 | "

Newcomers to Python are often overwhelmed by the vast and confusing landscape of communities, distributions, 3rd-party modules, and development tools. Never fear! The scientific Python community has organized around a central \"stack\" of tools that are well supported and maintained, and have become the de-facto standards in the field. We will guide you through these core tools before releasing you into the wild.\n", 100 | "\n", 101 | "

Scientific Python Stack ([scipy.org](scipy.org)):\n", 102 | "\n", 103 | " NumPy: N-dimensional array package\n", 104 | " SciPy: Fundamental library for scientific computing\n", 105 | " Matplotlib: Comprehensive 2D Plotting\n", 106 | " IPython/Jupyter: Enhanced Interactive Console\n", 107 | " Sympy: Symbolic mathematics\n", 108 | " pandas: Data structures & analysis\n", 109 | "\n", 110 | "

" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "
\n", 118 | "

4. A carefully designed, open-source programming language

\n", 119 | "\n", 120 | "

The older, commercial specialty languages come with baggage:\n", 121 | "\n", 122 | "

\n", 128 | "\n", 129 | "

Python is widely regarded as one of the easiest, most intuitive, and most readable programming languages. Its language design combined with its open-source license are major factors in its widespread success.\n", 130 | "\n", 131 | "

" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "
\n", 139 | "

Where to get help

\n", 140 | "\n", 141 | "

Python documentation (https://www.python.org/doc/)\n", 142 | "

\n", 146 | "\n", 147 | "

MATLAB-to-Python cheat sheets: \n", 148 | "

\n", 152 | "\n", 153 | "\n", 154 | "[Scientific python stack documentation: numpy, scipy, matplotlib](http://scipy.org/docs.html)\n", 155 | "\n", 156 | "[Stack Overflow](http://stackoverflow.com)\n", 157 | " \n", 158 | "
" 159 | ] 160 | } 161 | ], 162 | "metadata": { 163 | "kernelspec": { 164 | "display_name": "Python 2", 165 | "language": "python", 166 | "name": "python2" 167 | }, 168 | "language_info": { 169 | "codemirror_mode": { 170 | "name": "ipython", 171 | "version": 2 172 | }, 173 | "file_extension": ".py", 174 | "mimetype": "text/x-python", 175 | "name": "python", 176 | "nbconvert_exporter": "python", 177 | "pygments_lexer": "ipython2", 178 | "version": "2.7.13" 179 | } 180 | }, 181 | "nbformat": 4, 182 | "nbformat_minor": 0 183 | } 184 | -------------------------------------------------------------------------------- /PythonBootcamp/08_Development_Tools.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp

\n", 10 | "

August 18-19, 2018

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "

Python Development Tools

\n", 19 | "\n", 20 | "\n", 21 | "

Commercial tools like MATLAB and Igor come with a single standardized development environment. In contrast, Python is supported by a large and bewildering ecosystem of development tools. This offers us a great deal of flexibility In this section we will present a few of our favorite tools and discuss their relative strengths and weaknesses.\n", 22 | "

" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "
\n", 30 | "

What tools do you need? That depends on what you are trying to do. Some environments are designed to streamline certain types of development. The general features we are looking for are:\n", 31 | "\n", 32 | "

\n", 39 | "\n", 40 | "
" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "
\n", 48 | "

A note about interactivity

\n", 49 | "\n", 50 | "

**Most general-purpose programming environments are non-interactive:**\n", 51 | "

    \n", 52 | "
  1. Write code\n", 53 | "
  2. Start program, execute code\n", 54 | "
  3. Stop program\n", 55 | "
  4. Repeat\n", 56 | "
\n", 57 | "\n", 58 | "

Important reasons for this:\n", 59 | "

\n", 63 | "\n", 64 | "

**Most scientific programming environments (Igor, Matlab, Mathematica, ...) prefer an interactive experience.**
\n", 65 | "Why? Data analysis is incremental.\n", 66 | "\n", 67 | "

\n", 71 | "\n", 72 | "

Interactivity means:\n", 73 | "\n", 74 | "

\n", 80 | "\n", 81 | "

**Python supports both types of environment.** The tools we will introduce fall into one category or the other, but all of them incorporate features from both categories.\n", 82 | "

" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "
\n", 90 | "

Editor + console

\n", 91 | "\n", 92 | "This is the simplest approach to code development. Use a nice text editor (sublime is a good one) that at least handles indentation. Save your code to a file, and run it from a command line like:\n", 93 | "\n", 94 | " > python myfile.py\n", 95 | " \n", 96 | "This approach falls into the \"non-interactive\" category, but there are many tools available to make it more interactive. For example, simply running python with the `-i` flag will cause it to begin an interactive session immediately after your script completes:\n", 97 | "\n", 98 | " > python -i myfile.py\n", 99 | " \n", 100 | "This allows you to inspect the variables generated by your program and experiment with new ideas.\n", 101 | "\n", 102 | "\n", 103 | "

**Pros:**\n", 104 | "

\n", 113 | "\n", 114 | "

**Cons:**\n", 115 | "

\n", 119 | "
" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": {}, 125 | "source": [ 126 | "
\n", 127 | "

Jupyter (IPython) Notebook

\n", 128 | "\n", 129 | "

Jupyter Notebook provides an *interactive* programming environment similar to the Mathematica notebook.\n", 130 | "

\n", 135 | "\n", 136 | "A Python interpreter runs *continuously* in the background. Every time you execute a cell, the state of the interpreter is updated.\n", 137 | "\n", 138 | "\n", 139 | "

**Pros:**\n", 140 | "

\n", 146 | "\n", 147 | "

**Cons:**\n", 148 | "

\n", 155 | "
" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "
\n", 163 | "

PyCharm (IDE)

\n", 164 | "\n", 165 | "

PyCharm is a popular integrated development environment. It includes a fully-featured text editor, excellent debugging tools, interactive data browsing, project management, and version control integration. It is easy to install but *not* included with Anaconda.\n", 166 | "\n", 167 | "

**Pros:**\n", 168 | "

\n", 188 | "\n", 189 | "

**Cons:**\n", 190 | "

\n", 195 | "
" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "
\n", 203 | "

Spyder

\n", 204 | "\n", 205 | "Spyder is an integrated development environment with many similarities to MATLAB's default environment. It is included with Anaconda and provides a more interactive approach to development. \n", 206 | "\n", 207 | "Matlab + Notebook - like features:\n", 208 | "* Divide script into cells, execute in persistent process\n", 209 | "* Inline matplotlib results\n", 210 | "\n", 211 | "

**Pros:**\n", 212 | "

\n", 218 | "\n", 219 | "

**Cons:**\n", 220 | "

\n", 224 | "
" 225 | ] 226 | } 227 | ], 228 | "metadata": { 229 | "kernelspec": { 230 | "display_name": "Python 2", 231 | "language": "python", 232 | "name": "python2" 233 | }, 234 | "language_info": { 235 | "codemirror_mode": { 236 | "name": "ipython", 237 | "version": 2 238 | }, 239 | "file_extension": ".py", 240 | "mimetype": "text/x-python", 241 | "name": "python", 242 | "nbconvert_exporter": "python", 243 | "pygments_lexer": "ipython2", 244 | "version": "2.7.13" 245 | } 246 | }, 247 | "nbformat": 4, 248 | "nbformat_minor": 0 249 | } 250 | -------------------------------------------------------------------------------- /PythonBootcamp/09_bike_crossing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp

\n", 10 | "

August 18-19, 2018

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "

Exercise: Pandas, Matplotlib, Numpy

\n", 19 | "

\n", 20 | "**Seattle tracks bike crossings across the Fremont Bridge, one of the major north/south crossings of the Ship Canal, and makes data available online**\n", 21 | "

\n", 22 | "

\n", 23 | "This exercise uses that data to demonstrate some basic Pandas functionality, including:\n", 24 | "

\n", 30 | "

\n", 31 | "
\n" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "
\n", 39 | "

We'll need the following libraries

\n", 40 | "\n", 41 | "\n", 46 | "\n", 47 | "

\n", 48 | "And don't forget to turn on the inline (or notebook) plotting magic\n", 49 | "

\n", 50 | "\n", 51 | "
\n" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": { 58 | "collapsed": true 59 | }, 60 | "outputs": [], 61 | "source": [ 62 | "# Import packages" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "
\n", 70 | "

Download and open the data, then do some initial formatting

\n", 71 | "\n", 72 | "

Data is from October 2012 to the end of the last month \n", 73 | "\n", 74 | "

get the data using the read_csv method from the following URL (web connection required): \n", 75 | "https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD\n", 76 | "\n", 77 | "

" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": { 84 | "collapsed": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "# Read the CSV from the above link" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "
\n", 96 | "\n", 97 | "

Take a look at the first few columns using the .head() method\n", 98 | "\n", 99 | "

" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": null, 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "# Display the head" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "metadata": {}, 114 | "source": [ 115 | "
\n", 116 | "\n", 117 | "

Shorten the column names to make them easier to reference\n", 118 | "\n", 119 | "

" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "#rename data columns 'northbound' and 'southbound'" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "
\n", 138 | "\n", 139 | "

Add a column containing the total crossings for each hour\n", 140 | "\n", 141 | "

" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "df['total'] = #add a total column" 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "metadata": {}, 156 | "source": [ 157 | "
\n", 158 | "\n", 159 | "

Take a look at the beginning and end of the dataset. How many total entries are in the table?\n", 160 | "\n", 161 | "

" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": null, 167 | "metadata": {}, 168 | "outputs": [], 169 | "source": [ 170 | "#display the head again" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "#display the tail" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "metadata": {}, 186 | "outputs": [], 187 | "source": [ 188 | "#print the length" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "\n", 196 | "
\n", 197 | "

Take advantage of Pandas datetime functionlity to make filtering easy

\n", 198 | "

Take a look at one of the date entries, what is it's data type?\n", 199 | "\n", 200 | "

" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": null, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "#print the type of one entry" 210 | ] 211 | }, 212 | { 213 | "cell_type": "markdown", 214 | "metadata": {}, 215 | "source": [ 216 | "\n", 217 | "\n", 218 | "
\n", 219 | "\n", 220 | "

We need to convert it to a datetime object, which Pandas can then recognize for easy parsing by date\n", 221 | "\n", 222 | "

" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": null, 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "# look up the pd.to_datetime() method" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": null, 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "# look at the head again, how have the dates changed?" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "
\n", 248 | "

Now plot the total column vs. date

\n", 249 | "

Notice how easily Pandas deals with the date column. It automatically parses and labels the x-axis in a rational way.\n", 250 | "\n", 251 | "\n", 252 | "

" 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": null, 258 | "metadata": {}, 259 | "outputs": [], 260 | "source": [ 261 | "#use the df.plot() method with x being date and y being total" 262 | ] 263 | }, 264 | { 265 | "cell_type": "markdown", 266 | "metadata": {}, 267 | "source": [ 268 | "\n", 269 | "
\n", 270 | "

To make parsing by date easier, add some columns that explicitly list year, month, hour, day of week

\n", 271 | "

Pandas recently added the handy dt accessor, which makes this very easy: \n", 272 | "\n", 273 | "

http://pandas.pydata.org/pandas-docs/version/0.15.0/basics.html#dt-accessor\n", 274 | "\n", 275 | "\n", 276 | "

" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": null, 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [ 285 | "# make new columns for year, month, hour, and day of week. Here's how to make the year column:\n", 286 | "df['year']=df['Date'].dt.year" 287 | ] 288 | }, 289 | { 290 | "cell_type": "markdown", 291 | "metadata": {}, 292 | "source": [ 293 | "\n", 294 | "
\n", 295 | "

What is the most common hourly count?

\n", 296 | "

Make a histogram of hourly counts\n", 297 | "\n", 298 | "\n", 299 | "

" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": null, 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "#make a histogram of the values in the total column" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "metadata": {}, 314 | "source": [ 315 | "\n", 316 | "
\n", 317 | "

Find the busiest month for total crossings

\n", 318 | "

One approach is to use nested for-loops to search over all combinations of unique years and months, checking against the maximum value on each iteration\n", 319 | "\n", 320 | "\n", 321 | "

" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": null, 327 | "metadata": {}, 328 | "outputs": [], 329 | "source": [ 330 | "#try writing a for-loop to do this. But don't try too hard - there's a one-line way of doing this instead!" 331 | ] 332 | }, 333 | { 334 | "cell_type": "markdown", 335 | "metadata": {}, 336 | "source": [ 337 | "\n", 338 | "\n", 339 | "
\n", 340 | "\n", 341 | "

Another approach is to use the Pandas \"groupby\" method\n", 342 | "\n", 343 | "\n", 344 | "

" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": null, 350 | "metadata": {}, 351 | "outputs": [], 352 | "source": [ 353 | "#Instead of a for-loop, you can use the 'groupby' method, sorting by year and month" 354 | ] 355 | }, 356 | { 357 | "cell_type": "code", 358 | "execution_count": null, 359 | "metadata": { 360 | "scrolled": false 361 | }, 362 | "outputs": [], 363 | "source": [ 364 | "#print the maximum month from the grouped dataframe" 365 | ] 366 | }, 367 | { 368 | "cell_type": "markdown", 369 | "metadata": {}, 370 | "source": [ 371 | "\n", 372 | "
\n", 373 | "

Make a bar plot showing crossings for each month

\n", 374 | "

Start with the \"groupby\" method\n", 375 | "\n", 376 | "\n", 377 | "

" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "#using the grouped dataframe, make a bar plot with the total crossings for each month" 387 | ] 388 | }, 389 | { 390 | "cell_type": "markdown", 391 | "metadata": {}, 392 | "source": [ 393 | "\n", 394 | "
\n", 395 | "\n", 396 | "

To gain a bit more control over the plot, make a temporary dataframe called \"monthdf\" that contains only the data we're interested in plotting\n", 397 | "\n", 398 | "

" 399 | ] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "execution_count": null, 404 | "metadata": {}, 405 | "outputs": [], 406 | "source": [ 407 | "monthdf = pd.DataFrame(columns=('month', 'year', 'total'))\n", 408 | "for year in df.year.unique():\n", 409 | " for month in df.month.unique():\n", 410 | " monthdf = monthdf.append(pd.DataFrame({'month':[month],\n", 411 | " 'year':[year],\n", 412 | " 'total':[df[(df.month==month) & (df.year==year)].total.sum()]}))" 413 | ] 414 | }, 415 | { 416 | "cell_type": "markdown", 417 | "metadata": {}, 418 | "source": [ 419 | "\n", 420 | "
\n", 421 | "\n", 422 | "

Now make another version of the plot where months are grouped and color coded by year\n", 423 | "

" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": null, 429 | "metadata": {}, 430 | "outputs": [], 431 | "source": [ 432 | "# Make the plot here" 433 | ] 434 | }, 435 | { 436 | "cell_type": "markdown", 437 | "metadata": {}, 438 | "source": [ 439 | "\n", 440 | "
\n", 441 | "

Make a bar plot showing crossings by day of week, seperated by year

\n", 442 | "

Again, make a temporary dataframe containing only the data we need for the plot\n", 443 | "\n", 444 | "

Make sure to normalize the sum by the total number of days in each year!\n", 445 | "

" 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": null, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [ 454 | "#Try making another intermediate dataframe that contains data sorted by day" 455 | ] 456 | }, 457 | { 458 | "cell_type": "markdown", 459 | "metadata": {}, 460 | "source": [ 461 | "\n", 462 | "
\n", 463 | "\n", 464 | "

Make a bar plot where days of week are grouped and color coded by year. \n", 465 | "

Again, make a temporary dataframe containing only the data we need for the plot\n", 466 | "\n", 467 | "\n", 468 | "

" 469 | ] 470 | }, 471 | { 472 | "cell_type": "code", 473 | "execution_count": null, 474 | "metadata": {}, 475 | "outputs": [], 476 | "source": [ 477 | "# make a similar plot below" 478 | ] 479 | } 480 | ], 481 | "metadata": { 482 | "kernelspec": { 483 | "display_name": "Python [default]", 484 | "language": "python", 485 | "name": "python2" 486 | }, 487 | "language_info": { 488 | "codemirror_mode": { 489 | "name": "ipython", 490 | "version": 2 491 | }, 492 | "file_extension": ".py", 493 | "mimetype": "text/x-python", 494 | "name": "python", 495 | "nbconvert_exporter": "python", 496 | "pygments_lexer": "ipython2", 497 | "version": "2.7.13" 498 | }, 499 | "widgets": { 500 | "state": {}, 501 | "version": "1.1.2" 502 | } 503 | }, 504 | "nbformat": 4, 505 | "nbformat_minor": 1 506 | } 507 | -------------------------------------------------------------------------------- /PythonBootcamp/10_glm_exercise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp

\n", 10 | "

August 18-19, 2018

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "Exercise \n", 19 | " \n", 20 | "Numpy, Scipy, Pandas\n", 21 | "

\n", 22 | "**Weisberg (1985) makes available a dataset of faculty salaries, along with sevaral possible predictors. We will analyze these data using a general linear model**\n", 23 | "

\n", 24 | "

\n", 25 | "This exercise covers:\n", 26 | "

\n", 31 | "

\n", 32 | "
" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "## Requirements\n", 40 | "\n", 41 | "* pandas\n", 42 | "* numpy\n", 43 | "* scipy stats\n", 44 | "\n", 45 | "You should also proabbly import division from \\__future__ - just to be safe." 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 1, 51 | "metadata": { 52 | "collapsed": true 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "from __future__ import division\n", 57 | "\n", 58 | "import pandas as pd\n", 59 | "import numpy as np\n", 60 | "import scipy.stats" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "## The Data\n", 68 | "\n", 69 | "These data come from a study of salaries among university faculty. The data file is [here](http://data.princeton.edu/wws509/datasets/salary.dat) and a description of the coding is [here](http://data.princeton.edu/wws509/datasets/#salary) (You should probably at least glance at this).\n", 70 | "\n", 71 | "Load these data into a pandas dataframe. Note - the delimiter is not a comma!" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 2, 77 | "metadata": { 78 | "collapsed": false 79 | }, 80 | "outputs": [], 81 | "source": [ 82 | "data = pd.read_csv('http://data.princeton.edu/wws509/datasets/salary.dat', sep='\\s+')" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## A fitting excercise\n", 90 | "\n", 91 | "We'll use a general linear model to analyze these data. In order to do this, we need to be able to fit such models. Fortunately, numpy's linalg module contains a method for least squares fitting. Learn how to use this by generating some noisy (gaussian) data from a toy linear model (try numpy's random module) and then recovering your coefficents.\n", 92 | "\n", 93 | "Note: functions are good." 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 3, 99 | "metadata": { 100 | "collapsed": true 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "def make_test_data(nobs, true_coefs, sigma):\n", 105 | " \n", 106 | " npar = len(true_coefs)\n", 107 | " design = np.random.rand(nobs, npar)\n", 108 | " target = np.dot(design, true_coefs) + np.random.randn(nobs) * sigma\n", 109 | " \n", 110 | " return design, target" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 4, 116 | "metadata": { 117 | "collapsed": false 118 | }, 119 | "outputs": [], 120 | "source": [ 121 | "test_design, test_target = make_test_data(20, np.array([2, 3, 7]), 0.1)" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 5, 127 | "metadata": { 128 | "collapsed": true 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "coefficients, residuals, rank, sv = np.linalg.lstsq(test_design, test_target)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 6, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "[ 2.03368024 2.92780121 6.96839643]\n" 147 | ] 148 | } 149 | ], 150 | "source": [ 151 | "print(coefficients)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## Reformatting the data\n", 159 | "\n", 160 | "If you've taken a look at the data (hint), you probably know that it is not properly formatted for the method of least-squares fitting that we are using here. It has:\n", 161 | "\n", 162 | "* categorical variables in single columns\n", 163 | "* no distinction between the predictor and estimand columns\n", 164 | "* no way to specify an intercept\n", 165 | "\n", 166 | "Write a function to rectify this situation. Your function should have the following signature:\n", 167 | "\n", 168 | "```python\n", 169 | "def glm_data_reformat(dataframe, target_name, cont_pred=None, cat_pred=None, intercept=True):\n", 170 | " '''Sets up a dataframe for fitting with numpy (main effects only)\n", 171 | " \n", 172 | " Parameters\n", 173 | " ---------\n", 174 | " dataframe : pandas df\n", 175 | " contains mix of categorical and continuous predictors\n", 176 | " target_name : str\n", 177 | " column header of target variable (treated as continuous)\n", 178 | " cont_pred : list of str, optional\n", 179 | " column headers of continuous predictors, if any\n", 180 | " cat_pred : list of str, optional\n", 181 | " column headers of categorical predictors, if any\n", 182 | " intercept : bool, optional\n", 183 | " fit an intercept? Defaults to yes.\n", 184 | " \n", 185 | " Returns\n", 186 | " -------\n", 187 | " design : ndarray (n_observations x n_parameters)\n", 188 | " predictor data.\n", 189 | " target : ndarray (n_observations)\n", 190 | " estimand\n", 191 | " design_names : list of str\n", 192 | " names of parameters in design matrix columns\n", 193 | " \n", 194 | " '''\n", 195 | "\n", 196 | " # your code here\n", 197 | "\n", 198 | " return design, target, design_names\n", 199 | "```\n", 200 | "\n", 201 | "Note: You will need to code the continuous variables somehow. This will require spooling them out into multiple columns of the design matrix." 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 7, 207 | "metadata": { 208 | "collapsed": true 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "def glm_data_reformat(dataframe, target_name, cont_pred=None, cat_pred=None, intercept=True):\n", 213 | " '''Sets up a dataframe for fitting with numpy (main effects only)\n", 214 | "\n", 215 | " Parameters\n", 216 | " ---------\n", 217 | " dataframe : pandas df\n", 218 | " contains mix of categorical and continuous predictors\n", 219 | " target_name : str\n", 220 | " column header of target variable (treated as continuous)\n", 221 | " cont_pred : list of str, optional\n", 222 | " column headers of continuous predictors, if any\n", 223 | " cat_pred : list of str, optional\n", 224 | " column headers of categorical predictors, if any\n", 225 | " intercept : bool, optional\n", 226 | " fit an intercept? Defaults to yes.\n", 227 | "\n", 228 | " Returns\n", 229 | " -------\n", 230 | " design : ndarray (n_observations x n_parameters)\n", 231 | " predictor data.\n", 232 | " target : ndarray (n_observations)\n", 233 | " estimand\n", 234 | " design_names : list of str\n", 235 | " names of parameters in design matrix columns\n", 236 | "\n", 237 | " '''\n", 238 | "\n", 239 | " if cont_pred is None: cont_pred = []\n", 240 | " if cat_pred is None: cat_pred = []\n", 241 | " \n", 242 | " design_names = []\n", 243 | " columns = []\n", 244 | " \n", 245 | " for var_name in cont_pred:\n", 246 | " columns.append(dataframe[var_name])\n", 247 | " design_names.append(var_name)\n", 248 | " \n", 249 | " for var_name in cat_pred:\n", 250 | " \n", 251 | " levels = dataframe[var_name].unique()\n", 252 | " nlevels = len(levels)\n", 253 | " \n", 254 | " if nlevels < 2:\n", 255 | " continue\n", 256 | " \n", 257 | " for ii, level in enumerate(levels):\n", 258 | " \n", 259 | " if ii == nlevels - 1 :\n", 260 | " break\n", 261 | " \n", 262 | " indicator = np.zeros(dataframe.shape[0])\n", 263 | " indicator[np.where(dataframe[var_name] == level)] = 1\n", 264 | " columns.append(indicator)\n", 265 | " design_names.append('{0}_as_{1}'.format(var_name, level))\n", 266 | " \n", 267 | " if intercept:\n", 268 | " columns.append(np.ones(dataframe.shape[0]))\n", 269 | " design_names.append('intercept')\n", 270 | " \n", 271 | "\n", 272 | " return np.array(columns).T, np.array(dataframe[target_name]), design_names\n" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 8, 278 | "metadata": { 279 | "collapsed": true 280 | }, 281 | "outputs": [], 282 | "source": [ 283 | "full_design, full_target, full_design_names = glm_data_reformat(\n", 284 | " data, target_name='sl', cont_pred=['yr', 'yd'], cat_pred=['dg', 'rk', 'sx'], intercept=True\n", 285 | " )" 286 | ] 287 | }, 288 | { 289 | "cell_type": "markdown", 290 | "metadata": {}, 291 | "source": [ 292 | "If you have not already, test your function:" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": null, 298 | "metadata": { 299 | "collapsed": true 300 | }, 301 | "outputs": [], 302 | "source": [] 303 | }, 304 | { 305 | "cell_type": "markdown", 306 | "metadata": {}, 307 | "source": [ 308 | "Now use this function and the linalg module to format the data and fit a model of your choice." 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": 9, 314 | "metadata": { 315 | "collapsed": false 316 | }, 317 | "outputs": [], 318 | "source": [ 319 | "full_coefficients, residuals, rank, sv = np.linalg.lstsq(full_design, full_target)" 320 | ] 321 | }, 322 | { 323 | "cell_type": "markdown", 324 | "metadata": {}, 325 | "source": [ 326 | "## Analysis\n", 327 | "\n", 328 | "You have a model, let's do something with it. In particular, we will investigate whether there is an effect of sex on salary in these data. We can use a sequential sum of squares f-test, where:\n", 329 | "\n", 330 | "$$\n", 331 | "f = \\frac{\\frac{SSE_{red} - SSE_{full}}{DFE_{red} - DFE{full}}} {\\frac{SSE_{full}}{DFE_{full}}}\n", 332 | "$$\n", 333 | "Here SSE is the sum of squared errors (i.e. the residuals). DFE is the error degrees of freedom (number of observations - number of design matrix columns). The full model is exactly what it sounds like, while the red (reduced) model is just the same model sans one parameter.\n", 334 | "\n", 335 | "Fit a full and reduced model for a parameter of interest and generate an f-value." 336 | ] 337 | }, 338 | { 339 | "cell_type": "code", 340 | "execution_count": 10, 341 | "metadata": { 342 | "collapsed": false 343 | }, 344 | "outputs": [], 345 | "source": [ 346 | "red_design, red_target, red_design_names = glm_data_reformat(\n", 347 | " data, target_name='sl', cont_pred=['yr', 'yd'], cat_pred=['dg', 'rk'], intercept=True\n", 348 | " )\n", 349 | "red_coefficients, _, _, _ = np.linalg.lstsq(red_design, red_target)\n" 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": 11, 355 | "metadata": { 356 | "collapsed": false 357 | }, 358 | "outputs": [], 359 | "source": [ 360 | "full_sse = ((np.dot(full_design, full_coefficients) - full_target)**2).sum()\n", 361 | "red_sse = ((np.dot(red_design, red_coefficients) - red_target)**2).sum()\n", 362 | "\n", 363 | "full_dfm = len(full_design_names) \n", 364 | "red_dfm = len(red_design_names)\n", 365 | "\n", 366 | "full_dfe = full_design.shape[0] - full_dfm\n", 367 | "red_dfe = red_design.shape[0] - red_dfm" 368 | ] 369 | }, 370 | { 371 | "cell_type": "code", 372 | "execution_count": 12, 373 | "metadata": { 374 | "collapsed": false 375 | }, 376 | "outputs": [], 377 | "source": [ 378 | "fhat = ( (red_sse - full_sse) / (red_dfe - full_dfe) ) / (full_sse / full_dfe)" 379 | ] 380 | }, 381 | { 382 | "cell_type": "code", 383 | "execution_count": 13, 384 | "metadata": { 385 | "collapsed": false 386 | }, 387 | "outputs": [ 388 | { 389 | "name": "stdout", 390 | "output_type": "stream", 391 | "text": [ 392 | "1.58802561117\n" 393 | ] 394 | } 395 | ], 396 | "source": [ 397 | "print(fhat)" 398 | ] 399 | }, 400 | { 401 | "cell_type": "markdown", 402 | "metadata": {}, 403 | "source": [ 404 | "Now get a p-value by using the cdf of an f-distributed random variable. Scipy.stats has a handy function for this.\n", 405 | "\n", 406 | "Note that your f-distribution's parameters should be:\n", 407 | "\n", 408 | "1. $DFM_{full} - DFM_{red}$ where DFM is the number of columns in a model's design matrix.\n", 409 | "2. $DFE_{full}$" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": 14, 415 | "metadata": { 416 | "collapsed": false 417 | }, 418 | "outputs": [], 419 | "source": [ 420 | "fvar = scipy.stats.f\n", 421 | "\n", 422 | "pvalue = 1 - fvar.cdf(fhat, full_dfm - red_dfm, full_dfe)" 423 | ] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": 15, 428 | "metadata": { 429 | "collapsed": false 430 | }, 431 | "outputs": [ 432 | { 433 | "name": "stdout", 434 | "output_type": "stream", 435 | "text": [ 436 | "0.214104335593\n" 437 | ] 438 | } 439 | ], 440 | "source": [ 441 | "print(pvalue)" 442 | ] 443 | }, 444 | { 445 | "cell_type": "markdown", 446 | "metadata": {}, 447 | "source": [ 448 | "## Continuations\n", 449 | "\n", 450 | "* extend your glm_data_reformat to handle interactions\n", 451 | "* evaluate the model's performance using leave-one-out cross-validation" 452 | ] 453 | } 454 | ], 455 | "metadata": { 456 | "kernelspec": { 457 | "display_name": "Python 2", 458 | "language": "python", 459 | "name": "python2" 460 | }, 461 | "language_info": { 462 | "codemirror_mode": { 463 | "name": "ipython", 464 | "version": 2 465 | }, 466 | "file_extension": ".py", 467 | "mimetype": "text/x-python", 468 | "name": "python", 469 | "nbconvert_exporter": "python", 470 | "pygments_lexer": "ipython2", 471 | "version": "2.7.11" 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 0 476 | } 477 | -------------------------------------------------------------------------------- /PythonBootcamp/11_Image_data.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | " \n", 8 | "\n", 9 | "

Python Bootcamp

\n", 10 | "

August 18-19, 2018, Seattle, WA

" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
\n", 18 | "

Exercise: Numpy, Scipy, Matplotlib

\n", 19 | "\n", 20 | "

The following series of exercises are designed to give you more practice manipulating and plotting data using Numpy, Scipy, and Matplotlib\n", 21 | "

" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": { 28 | "collapsed": true 29 | }, 30 | "outputs": [], 31 | "source": [ 32 | "import numpy as np\n", 33 | "import matplotlib.pyplot as plt\n", 34 | "import scipy.misc\n", 35 | "import scipy.ndimage\n", 36 | "from __future__ import print_function\n", 37 | "\n", 38 | "%matplotlib notebook" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "\n", 46 | "
\n", 47 | "

Scipy has a built in image called 'face' that we can use to do some image processing

\n", 48 | "

The exercises below will walk you through various basic operations on this image\n", 49 | "

First, make a variabled called \"face\" that contains the image\n", 50 | "\n", 51 | "

" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [], 61 | "source": [ 62 | "face = scipy.misc.face()" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "\n", 70 | "
\n", 71 | "

What is the image?

\n", 72 | "

Show the image using matplotlib\n", 73 | "

" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": { 80 | "collapsed": false 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "# Use imshow" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "
-left: 3px solid #000; padding: 1px; padding-left: 10px; background: #F0FAFF; \">\n", 92 | "

What is the shape of the image?

\n", 93 | "

\n", 94 | "

" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": { 101 | "collapsed": false 102 | }, 103 | "outputs": [], 104 | "source": [] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "\n", 111 | "
\n", 112 | "

Make note of the fact that the first two dimensions are ROWS x COLUMNS. When indexing into this array, you'll need to use this convention, as opposed to thinking of X and Y position (which would actually be column and row, respectively).\n", 113 | "

" 114 | ] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "
\n", 121 | "

What is the intensity range for the pixel values?

\n", 122 | "

hint: use the 'flatten' method to make a 1-D array, then make a histogram\n", 123 | "

" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "metadata": { 130 | "collapsed": false, 131 | "scrolled": false 132 | }, 133 | "outputs": [], 134 | "source": [ 135 | "face_flat = face.flatten()\n", 136 | "#now make a histogram" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "
\n", 144 | "

The third image dimension refers to the number of color channels (R,G,B).

\n", 145 | "

Try making an array of equal size to the image, but that contains only the color at a given pixel (for example, find a pixel on a leaf, then display an array that contains only that color)\n", 146 | "

Below is an example of how the plot might look:\n", 147 | " \n", 148 | "

\n" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "metadata": { 155 | "collapsed": false 156 | }, 157 | "outputs": [], 158 | "source": [] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "
\n", 165 | "

Convert the image to grayscale instead

\n", 166 | "

To start with, look at any (or all) of the color channels individually. Are they all the same?\n", 167 | "

Below is an example of what a plot of each individual color channel might look like. \n", 168 | "

Note that Matplotlib does not make it easy to put individual colorbars on subplots - scour the documentation or just check out the key to figure this out.\n", 169 | " \n", 170 | "

\n" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": { 177 | "collapsed": false 178 | }, 179 | "outputs": [], 180 | "source": [] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "metadata": { 185 | "collapsed": true 186 | }, 187 | "source": [ 188 | "\n", 189 | "
\n", 190 | "

To properly convert the color channels to gray (luminance), you need to account for our visual system's sensitivity to each individual color: https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale. Try writing a function to do this, then applying it to the image.\n", 191 | "

" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "metadata": { 198 | "collapsed": true 199 | }, 200 | "outputs": [], 201 | "source": [ 202 | "#try making a function that converts each pixel from RBG to gray" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": { 209 | "collapsed": false 210 | }, 211 | "outputs": [], 212 | "source": [ 213 | "#apply the function, make a grayscale plot. Be sure to set the colormap appropriately." 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": {}, 219 | "source": [ 220 | "
\n", 221 | "

Try using scipy's gaussian filter method to smooth (blur) the grayscaled face image

\n", 222 | "

Hint: Look up documentation for scipy.ndimage.filters.gaussian_filter\n", 223 | "

Here's what a blurred image might look like:\n", 224 | " \n", 225 | "

\n" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": null, 231 | "metadata": { 232 | "collapsed": true 233 | }, 234 | "outputs": [], 235 | "source": [] 236 | }, 237 | { 238 | "cell_type": "markdown", 239 | "metadata": {}, 240 | "source": [ 241 | "
\n", 242 | "

In the grayscale image, find the pixel with the highest intensity

\n", 243 | "

plot an X over it in the image\n", 244 | "

Here's what it should look like:\n", 245 | "\n", 246 | " \n", 247 | "

" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": null, 253 | "metadata": { 254 | "collapsed": false 255 | }, 256 | "outputs": [], 257 | "source": [] 258 | }, 259 | { 260 | "cell_type": "markdown", 261 | "metadata": {}, 262 | "source": [ 263 | "
\n", 264 | "

In the grayscale image, choose a threshold, then make all values above that threshold white and all values below that threshold black

\n", 265 | "

**Bonus exercise: Use a sliderbar widget to make the threshold dynamically adjustable:**\n", 266 | "

see: http://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html\n", 267 | "

Here's what it should look like:\n", 268 | " \n", 269 | "

\n" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": null, 275 | "metadata": { 276 | "collapsed": false 277 | }, 278 | "outputs": [], 279 | "source": [ 280 | "# basic answer\n", 281 | "gray_face_thresholded = gray_face.copy() #make a copy of the data so you don't affect the original image\n", 282 | "\n", 283 | "threshold = 150\n", 284 | "gray_face_thresholded[gray_face_thresholded<=threshold] = 0\n", 285 | "gray_face_thresholded[gray_face_thresholded>threshold] = 1\n", 286 | "\n", 287 | "fig,ax=plt.subplots()\n", 288 | "\n", 289 | "ax.imshow(gray_face_thresholded,cmap='gray')" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": null, 295 | "metadata": { 296 | "collapsed": false 297 | }, 298 | "outputs": [], 299 | "source": [ 300 | "# Answer with slider bar\n", 301 | "# have to run 'pip install ipywidgets' from the command line first\n", 302 | "from ipywidgets import interact\n", 303 | "@interact\n", 304 | "def show_thresholded(threshold=(0,255,1)):\n", 305 | " gray_face_thresholded = gray_face.copy() #make a copy of the data so you don't affect the original image\n", 306 | "\n", 307 | " gray_face_thresholded[gray_face_thresholded<=threshold] = 0\n", 308 | " gray_face_thresholded[gray_face_thresholded>threshold] = 1\n", 309 | "\n", 310 | " fig,ax=plt.subplots()\n", 311 | "\n", 312 | " ax.imshow(gray_face_thresholded,cmap='gray')" 313 | ] 314 | }, 315 | { 316 | "cell_type": "markdown", 317 | "metadata": {}, 318 | "source": [ 319 | "\n", 320 | "
\n", 321 | "

Try making plots of intensity values for an x,y cross section through the image

\n", 322 | "

For example, a plot might look like this:\n", 323 | " \n", 324 | "

" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": null, 330 | "metadata": { 331 | "collapsed": false 332 | }, 333 | "outputs": [], 334 | "source": [ 335 | "#hint: Use gridspec to define the size and locations of the three plots" 336 | ] 337 | } 338 | ], 339 | "metadata": { 340 | "kernelspec": { 341 | "display_name": "Python 2", 342 | "language": "python", 343 | "name": "python2" 344 | }, 345 | "language_info": { 346 | "codemirror_mode": { 347 | "name": "ipython", 348 | "version": 2 349 | }, 350 | "file_extension": ".py", 351 | "mimetype": "text/x-python", 352 | "name": "python", 353 | "nbconvert_exporter": "python", 354 | "pygments_lexer": "ipython2", 355 | "version": "2.7.13" 356 | } 357 | }, 358 | "nbformat": 4, 359 | "nbformat_minor": 0 360 | } 361 | -------------------------------------------------------------------------------- /PythonBootcamp/support_files/CrossingDailyBarPlot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/CrossingDailyBarPlot.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/CrossingMonthlyBarPlot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/CrossingMonthlyBarPlot.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/SampleWorkbook.csv: -------------------------------------------------------------------------------- 1 | Column 1,Column 2 2 | one,1 3 | two,2 4 | three,3 -------------------------------------------------------------------------------- /PythonBootcamp/support_files/blurred.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/blurred.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/cropped-SummerWorkshop_Header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/cropped-SummerWorkshop_Header.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/cross_sections.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/cross_sections.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/gitkraken_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/gitkraken_1.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/gitkraken_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/gitkraken_2.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/gitkraken_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/gitkraken_3.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/gitkraken_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/gitkraken_4.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/grayscales.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/grayscales.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/leafplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/leafplot.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/maxpixel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/maxpixel.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/neuron.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/neuron.jpg -------------------------------------------------------------------------------- /PythonBootcamp/support_files/parallel_commits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/parallel_commits.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/stinkbug.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/stinkbug.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/thresholdedimage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/thresholdedimage.png -------------------------------------------------------------------------------- /PythonBootcamp/support_files/topic_branches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/PythonBootcamp/support_files/topic_branches.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SWDB_2018 2 | ![SWDB_2018](/resources/cropped-SummerWorkshop_Header.png) 3 | 4 | This is the repository for the course materials for the 2018 Summer Workshop on the Dynamic Brain. 5 | 6 | # Support Policy 7 | 8 | We are releasing this code as part of the 2017 Summer Workshop on the Dynamic Brain and will only be supporting and developing it for the context of this workshop. The community is welcome to submit issues, but you should not expect an active response outside of the context of the course. 9 | 10 | Copyright 2018 Allen Institute 11 | -------------------------------------------------------------------------------- /resources/EphysObservatory/ecephys_manifest.csv: -------------------------------------------------------------------------------- 1 | nwb_filename,experiment_type,VISp,VISal,VISam,VISlm,VISpm,VISrl,locally_sparse_noise,gabor,drifting_gratings,static_gratings,natural_images,natural_movie_3,full_field_flashes 2 | nwb_M14_actual_ds2.nwb,single_probe,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE 3 | nwb_M15_actual_ds2.nwb,single_probe,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE 4 | nwb_M16_actual_ds2.nwb,single_probe,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE 5 | nwb_M39_actual_ds2.nwb,single_probe,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE 6 | nwb_M51_actual_ds2.nwb,single_probe,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE 7 | mouse372584_probe_surf_bob.nwb,multi_probe,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE 8 | -------------------------------------------------------------------------------- /resources/EphysObservatory/neuropixels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/resources/EphysObservatory/neuropixels.png -------------------------------------------------------------------------------- /resources/Neocortical Interneurons.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/resources/Neocortical Interneurons.png -------------------------------------------------------------------------------- /resources/change_detection_schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/resources/change_detection_schematic.png -------------------------------------------------------------------------------- /resources/cropped-SummerWorkshop_Header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AllenInstitute/SWDB_2018/94c120088284b156f278e4758aa5691b68f0b76e/resources/cropped-SummerWorkshop_Header.png --------------------------------------------------------------------------------