├── .gitignore ├── README.md ├── course_arrangement.png ├── requirements.txt ├── 作业 ├── W1_1 Python & BrainPy编程基础作业.ipynb ├── W1_2 Hodg kin–Huxley神经元 基础作业.ipynb ├── W1_2 Hodgkin–Huxley神经元 大作业 优秀答案.ipynb ├── W1_2 Hodgkin–Huxley神经元 大作业.ipynb ├── W1_2 Hodgkin–Huxley神经元 进阶作业.ipynb ├── W2_1 简化神经元模型 基础作业.ipynb ├── W2_1 简化神经元模型 进阶作业.ipynb ├── W2_2 突触模型及编程 How to customize a synapse.ipynb ├── W2_2 突触模型及编程 Phenomenological Synaptic Models.ipynb ├── W2_2 突触模型及编程 kinetic Synaptic Models.ipynb ├── W2_3 突触可塑性模型基础作业.ipynb ├── W2_3 突触可塑性模型进阶作业.ipynb ├── W3_1 兴奋抑制平衡网络.ipynb ├── W3_2 抉择网络模型 rate model.ipynb ├── W3_2 抉择网络模型 spiking model.ipynb ├── W3_3 连续吸引子网络(上).ipynb ├── W3_3 连续吸引子网络(下).ipynb ├── W4_1 循环神经网络.ipynb ├── W4_2 循环神经网络+库网络.ipynb └── W4_3 脉冲神经网络训练.ipynb └── 课件 ├── Day 1.1_programming_basics.ipynb ├── Day 1.2_programming_number_string.ipynb ├── Day 1.3_programming_class.ipynb ├── Day 1.4_programming_list_tuple_dict.ipynb ├── Day 1.5_programming_function.ipynb ├── Day 1.6_programming_numpy_matplotlib.ipynb ├── W1_1 Python & BrainPy基础.pdf ├── W1_1 神经计算建模简介-吴思.pdf ├── W1_2 Conductance-based models.pdf ├── W1_2 HH编程.pdf ├── W2_1 Single neuron modeling - simplified models.pdf ├── W2_2 Reduced model programming.pdf ├── W2_3 how to customze a synapse.ipynb ├── W2_3 phenon synapse models.ipynb ├── W2_3 kinetic synapse models.ipynb ├── W2_3 突触模型.pdf ├── W2_4 Synaptic Plasticity.pdf ├── W2_4 Synaptic_Plasticity.ipynb ├── W3_1 E-I_Balanced Network.ipynb ├── W3_1 兴奋抑制平衡网络及其编程实现.pdf ├── W3_2 Decision_making_model.pdf ├── W3_2 a_rate_network_of_decision_making.ipynb ├── W3_2 a_spiking_network_of_decision_making.ipynb ├── W3_3 Attractor_Network.ipynb ├── W3_3 Hopfield+CANN.pdf ├── W3_4 CANN with Adaptation.pdf ├── W3_4 CANN_Adaptation.ipynb ├── W4_1 RNN.pdf ├── W4_2 RNN-RC.pdf ├── W4_2 reservoir.ipynb ├── W4_3 Neural Coding.ipynb ├── W4_3 Simple SNN for MNIST.ipynb ├── W4_3 脉冲神经网络的训练.pdf ├── W4_4 类脑计算芯片与系统简介.pdf └── figs ├── E_I_balance_network.png ├── align_post.png ├── align_pre.png ├── csr_matrix.png ├── masked_matrix.png ├── mlp_sketch.png ├── s01apgi89t.png ├── snn_graph.png ├── tc-fig1.png ├── tc-fig2.png └── tc-fig3.png /.gitignore: -------------------------------------------------------------------------------- 1 | ### Python template 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | 8 | .idea 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | 165 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Second Training Course on Neural Modeling and Programming (第二届神经计算建模及编程培训班) 2 | 3 | 4 | ## 一、培训班简介 5 | 神经计算建模是研究神经系统的结构、功能和机制的重要工具。通过数学、物理、计算机等工具,神经计算建模阐明了大脑工作的计算原理并推动了类脑智能的发展。为了普及这一方法,推动我国计算神经科学的人才培养,我们特别推出了《第二期神经计算建模及编程培训班》课程。 6 | 7 | 本课程由中国神经科学学会下属的《计算神经科学与神经工程专业委员会》发起,以北京大学神经信息处理课题组所著的《神经计算建模实战》教材为基础,结合BrainPy编程框架进行实践。你将学习神经计算的基本概念、方法和技术,如何用Python编程语言实现神经系统的建模和高效模拟。这将为你在神经计算和类脑智能领域的研究打下坚实的基础。同时,本期课程特别邀请了广东省智能科学与技术研究院类脑计算架构与超大规模处理系统研究组为大家讲解类脑计算芯片与系统,带领大家走进前沿的类脑芯片发展。 8 | 9 | 我们欢迎所有对神经计算建模感兴趣的老师和学生参加本期培训班,不论你来自哪个领域,我们坚信你将从中收获颇丰。 10 | 11 | 本期课程由和鲸HeyWhale社区提供免费的云平台算力支持。 12 | 13 | ## 二、培训安排 14 | 15 | 课程内容包括: 16 | 17 | 1. 神经计算建模简介 18 | 19 | - 神经计算的历史和发展 20 | 21 | - 神经计算建模的应用和简介 22 | 23 | 2. 神经元的电生理模型及动力学分析 24 | 25 | - 神经元的电生理模型介绍 26 | 27 | - 神经元动力学模型的分析 28 | 29 | 3. 突触动力学及其可塑性模型 30 | 31 | - 突触的生理机制介绍 32 | 33 | - 突触动力学模型及其可塑性机制 34 | 35 | 4. 神经网络的动力学及常见计算模型 36 | 37 | - 常见脉冲神经网络模型及实现 38 | 39 | - 常见发放率网络模型及实现 40 | 41 | 5. AI在神经计算建模中的应用 42 | 43 | - AI在神经计算建模中的应用概述 44 | 45 | - AI在脉冲神经网络模型中的应用 46 | 47 | 6. 类脑芯片简介 48 | 49 | 具体日程安排如下: 50 | 51 | ![](course_arrangement.png) 52 | 53 | 我们期待的学员: 54 | 55 | - 对神经科学、计算科学、人工智能等领域感兴趣的师生; 56 | 57 | - 希望了解神经计算模型、类脑智能等前沿技术的专业人士; 58 | 59 | - 有志于深入学习并掌握神经计算建模及编程的爱好者。 60 | 61 | 62 | ## 三、授课单位 63 | 64 | 本次培训班主要由「北京大学信息处理实验室」开展进行,授课老师由吴思教授及其课题组成员组成。由吴思教授领导的北京大学神经信息处理实验室隶属于北京大学心理与认知科学学院。该实验室的研究领域是计算认知神经科学和类脑计算。实验室与认知科学家、神经科学家、信息科学家等进行密切合作,用数理方法和计算机仿真来构建神经系统加工信息的计算模型,阐明大脑处理信息的一般性原理,并在此基础上发展类脑的人工智能算法。 65 | 66 | 本次培训班特别邀请广东智能科学研究院「类脑计算架构与超大规模处理系统课题组」讲授类脑计算系统与芯片。由环宇翔研究员领导的类脑计算架构与超大规模处理系统课题组面向类脑计算的硬件处理架构和超大规模类脑计算系统设计展开研究,旨在借鉴人脑的信息处理机制,设计具有神经拟态特性的专用处理内核、大规模的芯片互联架构与方法、以及面向全脑尺度千亿神经元规模超级计算系统。 67 | 68 | ## 四、培训时间和形式 69 | 70 | 培训时间:2023年11月04日-11月26日 71 | 72 | 培训方式:线上课程,基于腾讯会议进行授课。 73 | 74 | 75 | 微信链接:https://mp.weixin.qq.com/s/sObQ5EypmUuIv_4aCenzfw 76 | 77 | 报名链接:https://meeting.cns.org.cn/2ndNCMP/ 78 | 79 | 80 | -------------------------------------------------------------------------------- /course_arrangement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/course_arrangement.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | brainpy>=2.4.6 2 | brainpylib -------------------------------------------------------------------------------- /作业/W1_1 Python & BrainPy编程基础作业.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D03E4802184F4CC4B850A0E630BDF673","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"# Day 1: BrainPy programming basics homework \nThis is the first assignment for this course. The assignment is to familiarize themselves with the basic programming of BrainPy that was covered in class, and the participants will need to fill in the missing content according to the code comments and execute the cells to observe the results. \n\nFirst of all, we need to import all the libraries."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"8191B581346B43E78221B2D87D69A487","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"import brainpy as bp\nimport brainpy.math as bm\nimport numpy as np","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"8CF1FA4FA27C46858EFE1FAF41D3AFC8","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"## 1. JIT compilation \nJust-in-time compilation is the basic technique that gaurantee the efficiency of BrainPy. In this section, we will show the basic usages of JIT compilation and experience the improvement on running performance."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E47907992E3F40EA8ACEB6F2A555C4C6","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"### 1.1 Functional JIT compilation"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"90A05A5289604FBAAE17B05D2596B03E","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Let's start with the function. Suppose we implement a Gaussian Error Linear Unit (GELU) function."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"510FA52059F84A7B9E2970659F8DB69A","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"def gelu(x):\n sqrt = bm.sqrt(2 / bm.pi)\n cdf = 0.5 * (1.0 + bm.tanh(sqrt * (x + 0.044715 * (x ** 3))))\n y = x * cdf\n return y","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"361DC55E409C4B7AA4D62355585113C5","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Let's test the execution time without JIT compilation first:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"CC61FB3A159846AEA579413BFF43A5DD","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"x = bm.random.random(100000)\n%timeit gelu(x)","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F58EE6776EA941178FF1913029388DC7","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"If you use JIT compilation and pass the function into bm.jit(), the execution time of the function will be significantly reduced."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7FB1C53FE97B4760A2A3DF55991911AF","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: JIT compile the gelu function using the brainpy.math library\n# Hint: Use the bm.jit()\ngelu_jit = ...\n%timeit gelu_jit(x)","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3F6D3E29C2124E83AAB83991E4555BD5","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"### 1.2 Object-oriented JIT compilation"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"2C9BA1B169DF45348E385CA246579E01","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"We use the logistic regression classifier as an example, in this model, since the weight $w$ needs to be modified during training, it needs to be defined as ``brainpy.math.Variable``, and the rest of the parameters will be treated as static variables during compilation, and their values will not be changed."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4499765F023947259D1A905486BA1F6F","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class LogisticRegression(bp.BrainPyObject):\n def __init__(self, dimension):\n super(LogisticRegression, self).__init__()\n\n # parameters\n self.dimension = dimension\n\t\t\t\n # variables\n self.w = bm.Variable(2.0 * bm.ones(dimension) - 1.3)\n \n def __call__(self, X, Y):\n u = bm.dot(((1.0 / (1.0 + bm.exp(-Y * bm.dot(X, self.w))) - 1.0) * Y), X)\n self.w.value = self.w - u # in-place update","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"29A839FD8B8941438759031710FD3F5B","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"To test the execution time, we write a function that calculates the execution time and define the dataset:."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E7276ACFB56F43858C8D8F90EF6DC484","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"import time\n\ndef benckmark(model, points, labels, num_iter=30, name=''):\n t0 = time.time()\n for i in range(num_iter): \n model(points, labels)\n print(f'{name} used time {time.time() - t0} s')\n \nnum_dim, num_points = 10, 20000000\npoints = bm.random.random((num_points, num_dim))\nlabels = bm.random.random(num_points)","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"74340EFA2F8C42A8AC63E334E5E0908A","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Next, let's test the execution time without JIT compilation."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7196F845100D4849B4E0CDB2D1E0833A","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"lr1 = LogisticRegression(num_dim)\nbenckmark(lr1, points, labels, name='Logistic Regression (without jit)')","outputs":[],"execution_count":null},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"34A83AD4F282430FBD05C1C0FE29A3C1","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"lr1 = bm.jit(LogisticRegression(num_dim))\nlr1.vars().keys()","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"83903FB2024C4F5B946BD5990631FCE6","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Next, we test the execution time for the case of JIT compilation, which is used in a similar way to a function, simply passing the class instance into ``brainpy.math.jit()``:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A587B834C01A4E35A91FFD651388F3DF","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"lr2 = LogisticRegression(num_dim)\n# TODO: JIT compile the gelu function using the brainpy.math library\n# Hint: Use the bm.jit()\nlr2 = ...\nbenckmark(lr2, points, labels, name='Logistic Regression (with jit)')","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A568325F97D547D8A5800CFD4E5E1C2F","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"## 2. Data structures \n### 2.1 Arrays \nAn array is a data structure that organizes algebraic objects in a multi-dimensional vector space. Simply put, in BrainPy, this data structure is a multidimensional array of the same data type, most commonly numeric or boolean."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0670276C531A4FE999D63E6B51ABE9D9","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"bm_array = bm.array([0, 1, 2, 3, 4, 5])\nnp_array = np.array([0, 1, 2, 3, 4, 5])\nbm_array","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"AFF7A3F009EE40DB8141AFDE7636EE9F","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"We can create a high-dimensional array and check the properties of the array."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3B919700496D4C1783A7E029F466AD69","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: Create a new brainpy array name t2\nt2 = ...\nprint('t2.ndim: {}'.format(t2.ndim))\nprint('t2.shape: {}'.format(t2.shape))\nprint('t2.size: {}'.format(t2.size))\nprint('t2.dtype: {}'.format(t2.dtype))\n","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3AE8C2E255D04F138979F5B52F44130B","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"The array created by ``brainty.math`` will be stored in a JaxArray, which internally holds the JAX data format DeviceArray. if the user wants to unwrap the JaxArray to get the JAX data type DeviceArray inside, simply perform the ``.value`` operation:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"92C6B5B5A13740B89AEBF532D76364AE","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: Get value from t2\nt2_value = ...\nprint('t2_value: {}'.format(t2_value))","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A7BC80CE5F294740B2A0CA0508F75A0D","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"### 2.2 Variables"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FDC4056F8B48421589367ED812BEC630","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"A dynamic variable is a pointer to an array of values (DeviceArray) stored in memory. The data in a dynamic variable can be modified during JIT compilation. If an array is declared as a dynamic variable, it means that it is an array that changes dynamically over time. To convert an array to a dynamic variable, the user simply wraps the array in `brainpy.math`."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E58172D9FB3F46F5BC3D31BB115B6FF4","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"t = bm.arange(4)\n# TODO: Convert t to Variable\nv = ...","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5668114458FB42C88DD08416F394B913","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Since dynamic variables are stored as arrays, all operations on arrays can be grafted directly onto dynamic variables. In addition, dynamic variables can be modified by the user, and in the next section, we will explain in detail how to modify dynamic variables under JIT compilation."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7DBA05E33FE248379F4CBF897C997F2F","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### Indexing and slicing \nUsers can use indexes to modify data in dynamic variables:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D5F5DAB78CDC42E79BF12A69D12AA290","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"v = bm.Variable(bm.arange(4))\n# TODO: Set the first element of v to 10\n...\nv","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3417F0E61B9746E79CD77D57CF1C3D97","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### Augmented assignment \nAll incremental assignments in Python modify only the internal value of a dynamic variable, so you can use incremental assignments without worrying about updating dynamic variables."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"07A181673A6F462BA63B777A7249C6F7","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: all the elements in v add 1\n...\nv","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"58F9AE02C4864815BEC9AF6598C043A3","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### `.value` assignment"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FA504DB7EA63401B94B415438ED4DD9E","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"This is one of the most common operations for updating variables in place. We often need to assign an array of values to a dynamic variable when updating it, and a common scenario is to reset the value of a dynamic variable during an iterative update of the dynamics system. In this case, we can use the `.value` assignment operation to override the data of the dynamic variable v, which has direct access to the data stored in the JaxArray."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3E2BFEF893FC4CE7AF8E0AB2730C55C3","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: reset all the elements in v to 0\n...\nv","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"84C03E0741BB46D99640CDBC365319E2","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### `.update` assignment"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"622BA29D659D4595B73C3CBCC72B40E6","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"This method is functionally similar to `.value `assignment and is another method provided by BrainPy to override dynamic variables, which also requires that the shape and element types of the array be consistent with the dynamic variable."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1E2D60ABDA214A7AB2C4576CB98CE4C2","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"# TODO: set v to be [3, 4, 5, 6]\n...\nv","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"C121EEA443624165B0062D62872139AF","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"## 3. Control flows \n### 3.1 If-else"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"008EF158FCF94E5FB60AE1913C1B9435","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"Compilation errors occur when conditional judgment depends on dynamic variables. Our error message will tell you about alternative solutions, so here are two ways to write a conditional statement that can be used instead of an if-else statement. \n\nFirst we check out the simple example that will occur compilation error:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A706BD277BD144E9886F51B6FCAE39FF","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class OddEvenCauseError(bp.BrainPyObject):\n def __init__(self):\n super(OddEvenCauseError, self).__init__()\n self.rand = bm.Variable(bm.random.random(1))\n self.a = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n if self.rand < 0.5: \n self.a += 1\n else: \n self.a -= 1\n return self.a","outputs":[],"execution_count":null},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5A729A13ED464B9E938E32C236C28F3A","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"wrong_model = bm.jit(OddEvenCauseError())\n\ntry:\n wrong_model()\nexcept Exception as e:\n print(f\"{e.__class__.__name__}: {str(e)}\")","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1540740D88BC4ABA950CBAB1ACE042A9","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### `brainpy.math.where()` \nThis function in NumPy corresponds to `numpy.where()`, where(condition, x, y) function According to the condition to determine the true or false, the condition is true to return x, the condition is false to return y. We can change the above example of failure to."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"22A81AA0ACF845EB9837D2D03B4C4720","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class OddEvenWhere(bp.BrainPyObject):\n def __init__(self):\n super(OddEvenWhere, self).__init__()\n self.rand = bm.Variable(bm.random.random(1))\n self.a = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n # TODO: Use bm.where() to fix the error\n ...\n return self.a","outputs":[],"execution_count":null},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F49226B7E5694F8EAECDE0F5AD9F6741","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"model = bm.jit(OddEvenWhere())\nmodel()","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D60C6E65F1B64A0BB3517CC245CBA0E4","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### `brainpy.math.ifelse()` \nBrainPy provides a generic conditional statement that enables multiple branches. You need to change this example to the `bm.ifelse` statement version:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5B1A7AC922364B30830C3756D0BA8390","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class OddEvenCond(bp.BrainPyObject):\n def __init__(self):\n super(OddEvenCond, self).__init__()\n self.rand = bm.Variable(bm.random.random(1))\n self.a = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n # TODO: Use bm.ifelse() to fix the error\n ...\n return self.a","outputs":[],"execution_count":null},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F29410229A6C4D9CA76509194E8C25EB","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"model = bm.jit(OddEvenCond())\nmodel()","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"B8A3EBFCA705424D9F1D38D659B6269F","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"### For loop"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"B48865C8FACF44EC8F7E552C236FFA75","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"In fact, BrainPy can write loops in Python mode. The user simply iterates over the sequence data and then operates on the iterated objects. This loop syntax is compatible with JIT compilation, but can lead to long tracing and compilation times. The following example is a class object that implement for loop in its function. "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"04232DAD33694B3D9221793C5C7DC4F2","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class LoopSimple(bp.BrainPyObject):\n def __init__(self):\n super(LoopSimple, self).__init__()\n rng = bm.random.RandomState(123)\n self.seq = bm.Variable(rng.random(1000))\n self.res = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n for s in self.seq:\n self.res += s\n return self.res.value","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"19AA93C1FFB84F29914269BFA4328600","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"By running the following code, we will find that the first compilation takes longer, and if the logic of the statements in the program is more complex, the compilation will take an intolerable amount of time."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6620F79AC48848B2A47279707ACE0E5D","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"import time\n\ndef measure_time(f, return_res=False, verbose=True):\n t0 = time.time()\n r = f()\n t1 = time.time()\n if verbose:\n print(f'Result: {r}, Time: {t1 - t0}')\n return r if return_res else None\n\nmodel = bm.jit(LoopSimple())\n\n# First time will trigger compilation\nmeasure_time(model)\n\n# Second running\nmeasure_time(model)","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"DCA765116569448F8D50251CBA1D8A9B","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"#### `brainpy.math.for_loop()` \nWe speed up the code by using structured looping statements, you need to fill the blank in the code below:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"C665C33D866743B7939EBFE48B77BF0F","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"class LoopStruct(bp.BrainPyObject):\n def __init__(self):\n super(LoopStruct, self).__init__()\n rng = bm.random.RandomState(123)\n self.seq = rng.random(1000)\n self.res = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n # TODO: Use bm.for_loop() to complete the loop\n ...","outputs":[],"execution_count":null},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4512CDF5F55C4A8BA1D74B2DBC0067D3","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"model = bm.jit(LoopStruct())\n\nr = measure_time(model, verbose=False, return_res=True)\nr.shape","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D6F31B36D58248DB8B266A7474D440F5","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65450f74b4c12f15a78b60ae"},"source":"## Solutions"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"ED82C241BD444FF294902C0CB8E06E23","notebookId":"65450f74b4c12f15a78b60ae","trusted":true},"source":"\n# Functional JIT compilation: \ngelu_jit = bm.jit(gelu)\n\n# Object-oriented JIT compilation:\nlr2 = bm.jit(lr2)\n\n# Create arrays:\nt2 = bm.array([[[0, 1, 2, 3], [1, 2, 3, 4], [4, 5, 6, 7]],\n [[0, 0, 0, 0], [-1, 1, -1, 1], [2, -2, 2, -2]]])\n\n\n# Get values of arrays:\nt2_value = t2.value\n\n# Convert to variable:\nv = bm.Variable(t)\n\n# Indexing and slicing:\nv[0] = 10\n\n# Augmented assignment:\nv += 1\n\n# .value assignment:\nv.value = bm.zeros(4, dtype=int)\n\n# .update assignment:\nv.update(bm.array([3, 4, 5, 6]))\n\n# where condition:\nclass OddEvenWhere(bp.BrainPyObject):\n def __init__(self):\n super(OddEvenWhere, self).__init__()\n self.rand = bm.Variable(bm.random.random(1))\n self.a = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n self.a += bm.where(self.rand < 0.5, 1., -1.)\n return self.a\n\n# ifelse condition:\nclass OddEvenCond(bp.BrainPyObject):\n def __init__(self):\n super(OddEvenCond, self).__init__()\n self.rand = bm.Variable(bm.random.random(1))\n self.a = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n self.a += bm.ifelse(self.rand[0] < 0.5,\n [1., -1.])\n return self.a\n\n# For loop:\nclass LoopStruct(bp.BrainPyObject):\n def __init__(self):\n super(LoopStruct, self).__init__()\n rng = bm.random.RandomState(123)\n self.seq = rng.random(1000)\n self.res = bm.Variable(bm.zeros(1))\n\n def __call__(self):\n def add(s):\n self.res += s\n return self.res.value\n\n return bm.for_loop(body_fun=add, operands=self.seq)","outputs":[],"execution_count":null}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":2} -------------------------------------------------------------------------------- /作业/W1_2 Hodg kin–Huxley神经元 基础作业.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"id":"95A22C27FB35477E9EC447235ADDBD25","notebookId":"6547318fb4c12f15a7a5f9f3","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"# Day 2 基础作业-HH model"},{"cell_type":"markdown","metadata":{"id":"5F43A84D0F9D460AA5E51A9BF9D7F2A4","notebookId":"6547318fb4c12f15a7a5f9f3","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"In this section, we are going to implement a Hodgkin-Huxley (HH) model. \n\nPlease follow the comment instruction to fill the *todo* blanks of the following code. Then run all the cells to see the simulation result."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"EBC501FBC6274D6BA1A74B63925874D0","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6547318fb4c12f15a7a5f9f3"},"source":"The Hodgkin-Huxley (HH) model is a continuous-time dynamical system. It is one of the most successful mathematical models of a complex biological process that has ever been formulated. Changes of the membrane potential influence the conductance of different channels, elaborately modeling the neural activities in biological systems. Mathematically, the model is given by: \n\n$$ \n\\begin{aligned} \n C_m \\frac {dV} {dt} &= -(\\bar{g}_{Na} m^3 h (V -E_{Na}) \n + \\bar{g}_K n^4 (V-E_K) + g_{leak} (V - E_{leak})) + I(t) \\quad\\quad(1) \\\\ \n \\frac {dx} {dt} &= \\alpha_x (1-x) - \\beta_x, \\quad x\\in {\\rm{\\{m, h, n\\}}} \\quad\\quad(2) \\\\ \n &\\alpha_m(V) = \\frac {0.1(V+40)}{1-\\exp(\\frac{-(V + 40)} {10})} \\quad\\quad(3) \\\\ \n &\\beta_m(V) = 4.0 \\exp(\\frac{-(V + 65)} {18}) \\quad\\quad(4) \\\\ \n &\\alpha_h(V) = 0.07 \\exp(\\frac{-(V+65)}{20}) \\quad\\quad(5) \\\\ \n &\\beta_h(V) = \\frac 1 {1 + \\exp(\\frac{-(V + 35)} {10})} \\quad\\quad(6) \\\\ \n &\\alpha_n(V) = \\frac {0.01(V+55)}{1-\\exp(-(V+55)/10)} \\quad\\quad(7) \\\\ \n &\\beta_n(V) = 0.125 \\exp(\\frac{-(V + 65)} {80}) \\quad\\quad(8) \\\\ \n\\end{aligned} \n$$ \n\nwhere $V$ is the membrane potential, $C_m$ is the membrane capacitance per unit area, $E_K$ and $E_{Na}$ are the potassium and sodium reversal potentials, respectively, $E_l$ is the leak reversal potential, $\\bar{g}_K$ and $\\bar{g}_{Na}$ are the potassium and sodium conductance per unit area, respectively, and $\\bar{g}_l$ is the leak conductance per unit area. Because the potassium and sodium channels are voltage-sensitive, according to the biological experiments, $m$, $n$ and $h$ are used to simulate the activation of the channels. Specially, $n$ measures the activation of potassium channels, and $m$ and $h$ measures the activation and inactivation of sodium channels, respectively. $\\alpha_{x}$ and $\\beta_{x}$ are rate constants for the ion channel x and depend exclusively on the membrane potential."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0F5BA555FCCD4C01ACFC11EA9DF109F0","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6547318fb4c12f15a7a5f9f3"},"source":"To implement the HH model, variables should be specified. According to the above equations, the following state variables change with respect to time: \n- `V`: the membrane potential \n- `m`: the activation of sodium channels \n- `h`: the inactivation of sodium channels \n- `n`: the activation of potassium channels \n\nBesides, the spiking state and the last spiking time can also be recorded for statistic analysis: \n- ``spike``: whether a spike is produced \n- ``t_last_spike``: the last spiking time \n\nBased on these state variables, the HH model can be implemented as below."},{"cell_type":"code","metadata":{"collapsed":false,"id":"B0A614F8077E4E16B20CF7FA1D4014CA","notebookId":"6547318fb4c12f15a7a5f9f3","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"import brainpy as bp\nimport brainpy.math as bm \n\nclass HH(bp.dyn.NeuDyn):\n def __init__(self, size, ENa=50., gNa=120., EK=-77., gK=36., EL=-54.387, gL=0.03, V_th=0., C=1.0, T=6.3):\n super().__init__(size=size)\n\n # 定义神经元参数\n self.ENa = ENa\n self.EK = EK\n self.EL = EL\n self.gNa = gNa\n self.gK = gK\n self.gL = gL\n self.C = C\n self.V_th = V_th\n self.T_base = 6.3\n self.phi = 3.0 ** ((T - self.T_base) / 10.0)\n\n # 初始化变量\n # TODO: 初始化膜电压self.V:统一设置为-70.68,动态变量大小为self.num\n # self.V = \n # TODO: 初始化门控变量self.m:统一设置为0.0266,动态变量大小为self.num\n # self.m = \n # TODO: 初始化门控变量self.h:统一设置为0.772,动态变量大小为self.num\n # self.h = \n # TODO: 初始化门控变量self.n:统一设置为0.235,动态变量大小为self.num\n # self.n = \n # TODO: 初始化上一次脉冲发放时间记录self.t_last_spike,记录神经元上一次发放脉冲的时间,统一初始化为-1e7\n # self.t_last_spike =\n # TODO: 初始化脉冲发放状态self.spike:bool类型,如果神经元正处于发放状态则为1,否则为0\n # self.spike = \n\n # 定义积分函数\n self.integral = bp.odeint(f=self.derivative, method='exp_auto') \n \n # 定义联合微分方程\n @property\n def derivative(self):\n # TODO: 将多个微分方程联合为一个,以便同时积分(使用brainpy.JointEq() )\n return ...\n\n # 定义膜电位关于时间变化的微分方程\n def dV(self, V, t, m, h, n, Iext):\n I_Na = (self.gNa * m ** 3.0 * h) * (V - self.ENa)\n I_K = (self.gK * n ** 4.0) * (V - self.EK)\n I_leak = self.gL * (V - self.EL)\n dVdt = (- I_Na - I_K - I_leak + Iext) / self.C\n return dVdt\n\n # 定义门控变量m关于时间变化的微分方程\n def dm(self, m, t, V):\n alpha = 0.1 * (V + 40) / (1 - bm.exp(-(V + 40) / 10))\n beta = 4.0 * bm.exp(-(V + 65) / 18)\n dmdt = alpha * (1 - m) - beta * m\n return self.phi * dmdt\n \n # 定义门控变量h关于时间变化的微分方程\n def dh(self, h, t, V):\n alpha = 0.07 * bm.exp(-(V + 65) / 20.)\n beta = 1 / (1 + bm.exp(-(V + 35) / 10))\n dhdt = alpha * (1 - h) - beta * h\n return self.phi * dhdt\n\n # 定义门控变量n关于时间变化的微分方程\n def dn(self, n, t, V):\n alpha = 0.01 * (V + 55) / (1 - bm.exp(-(V + 55) / 10))\n beta = 0.125 * bm.exp(-(V + 65) / 80)\n dndt = alpha * (1 - n) - beta * n\n return self.phi * dndt\n\n def update(self, x=None):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n # TODO: 更新变量V, m, h, n, 暂存在V, m, h, n中\n V, m, h, n = ...\n\n #判断是否发生动作电位\n self.spike.value = bm.logical_and(self.V < self.V_th, V >= self.V_th)\n # 更新最后一次脉冲发放时间\n self.t_last_spike.value = bm.where(self.spike, t, self.t_last_spike)\n\n # TODO: 更新变量V, m, h, n的值\n ...","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"id":"4872F846941640F28B6F52BC6089DC63","notebookId":"6547318fb4c12f15a7a5f9f3","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"After finishing the code completion, you can run the following code to simulate the HH model you just create!"},{"cell_type":"code","metadata":{"collapsed":false,"id":"527D100CF41E474DB22DEA67BC60B380","notebookId":"6547318fb4c12f15a7a5f9f3","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"current, length = bp.inputs.section_input(\n values=[0., bm.asarray([1., 2., 4., 8., 10., 15.]), 0.],\n durations=[10, 2, 25],\n return_length=True\n)\n\nhh_neurons = HH(current.shape[1])\n\nrunner = bp.DSRunner(hh_neurons, monitors=['V', 'm', 'h', 'n'])\nrunner.run(inputs=current)","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"id":"F90AE8F82F974D429630E38221F202B1","notebookId":"6547318fb4c12f15a7a5f9f3","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"## Results visulization"},{"cell_type":"code","metadata":{"collapsed":false,"id":"5957EF6803A64AD29879CBA6A3DB4679","notebookId":"6547318fb4c12f15a7a5f9f3","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"import numpy as np\nimport matplotlib.pyplot as plt\n\nbp.visualize.line_plot(runner.mon.ts, runner.mon.V, ylabel='V (mV)', plot_ids=np.arange(current.shape[1]))\n\nplt.plot(runner.mon.ts, bm.where(current[:, -1]>0, 10, 0) - 90.)\nplt.figure()\nplt.plot(runner.mon.ts, runner.mon.m[:, -1])\nplt.plot(runner.mon.ts, runner.mon.h[:, -1])\nplt.plot(runner.mon.ts, runner.mon.n[:, -1])\nplt.legend(['m', 'h', 'n'])\nplt.xlabel('Time (ms)')","outputs":[],"execution_count":null},{"cell_type":"markdown","metadata":{"id":"0667180D42A44079AD65C374A6B90AC1","notebookId":"6547318fb4c12f15a7a5f9f3","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"# Answer"},{"cell_type":"code","metadata":{"collapsed":false,"id":"29E42CE360A940DFB15584583DDC5626","notebookId":"6547318fb4c12f15a7a5f9f3","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"import brainpy as bp\nimport brainpy.math as bm \n\nclass HH(bp.dyn.NeuDyn):\n def __init__(self, size, ENa=50., gNa=120., EK=-77., gK=36., EL=-54.387, gL=0.03, V_th=0., C=1.0, T=6.3):\n super(HH, self).__init__(size=size)\n\n # 定义神经元参数\n self.ENa = ENa\n self.EK = EK\n self.EL = EL\n self.gNa = gNa\n self.gK = gK\n self.gL = gL\n self.C = C\n self.V_th = V_th\n self.T_base = 6.3\n self.phi = 3.0 ** ((T - self.T_base) / 10.0)\n\n # 定义神经元变量\n self.V = bm.Variable(-70.68 * bm.ones(self.num))\n self.m = bm.Variable(0.0266 * bm.ones(self.num))\n self.h = bm.Variable(0.772 * bm.ones(self.num))\n self.n = bm.Variable(0.235 * bm.ones(self.num))\n self.input = bm.Variable(bm.zeros(self.num))\n self.spike = bm.Variable(bm.zeros(self.num, dtype=bool))\n self.t_last_spike = bm.Variable(bm.ones(self.num) * -1e7)\n\n # 定义积分函数\n self.integral = bp.odeint(f=self.derivative, method='exp_auto') \n \n @property\n def derivative(self):\n return bp.JointEq(self.dV, self.dm, self.dh, self.dn)\n\n def dV(self, V, t, m, h, n, Iext):\n I_Na = (self.gNa * m ** 3.0 * h) * (V - self.ENa)\n I_K = (self.gK * n ** 4.0) * (V - self.EK)\n I_leak = self.gL * (V - self.EL)\n dVdt = (- I_Na - I_K - I_leak + Iext) / self.C\n return dVdt\n\n def dm(self, m, t, V):\n alpha = 0.1 * (V + 40) / (1 - bm.exp(-(V + 40) / 10))\n beta = 4.0 * bm.exp(-(V + 65) / 18)\n dmdt = alpha * (1 - m) - beta * m\n return self.phi * dmdt\n \n def dh(self, h, t, V):\n alpha = 0.07 * bm.exp(-(V + 65) / 20.)\n beta = 1 / (1 + bm.exp(-(V + 35) / 10))\n dhdt = alpha * (1 - h) - beta * h\n return self.phi * dhdt\n\n def dn(self, n, t, V):\n alpha = 0.01 * (V + 55) / (1 - bm.exp(-(V + 55) / 10))\n beta = 0.125 * bm.exp(-(V + 65) / 80)\n dndt = alpha * (1 - n) - beta * n\n return self.phi * dndt\n\n def update(self, x=0.):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n #计算更新后的值\n V, m, h, n = self.integral(self.V, self.m, self.h, self.n, t, x, dt=dt)\n\n #判断是否发生动作电位\n self.spike.value = bm.logical_and(self.V < self.V_th, V >= self.V_th)\n self.t_last_spike.value = bm.where(self.spike, t, self.t_last_spike)\n\n # 更新变量的值\n self.V.value = V\n self.m.value = m\n self.h.value = h\n self.n.value = n","outputs":[],"execution_count":14}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":2} -------------------------------------------------------------------------------- /作业/W1_2 Hodgkin–Huxley神经元 大作业 优秀答案.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1C8572F312B34546B684743738BE4D96","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"# Neuron Model for Thalamocortical cells"},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-07T05:27:43.106431600Z","start_time":"2023-11-07T05:27:40.594415800Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A51D7B0C9E944949B4CDB9EAF4FDF402","notebookId":"65658a6b05870bfbf2a617f9","trusted":true},"source":"import brainpy as bp\nimport brainpy.math as bm\nimport numpy as np\nimport matplotlib.pyplot as plt","outputs":[],"execution_count":1},{"cell_type":"code","metadata":{"id":"9FA9688E5CF74FE29D052C0B0E44FCAB","notebookId":"65658a6b05870bfbf2a617f9","jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"trusted":true},"source":"bm.exp(2)","outputs":[{"output_type":"execute_result","data":{"text/plain":"Array(7.389056, dtype=float32, weak_type=True)"},"metadata":{},"execution_count":27}],"execution_count":27},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6A48C9A1700349008B2A70E41E92CB25","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"Here we are trying to reproduce the thalamocortical model in the following papers: \n\n- Li G, Henriquez CS, Fröhlich F (2017) Unified thalamic model generates multiple distinct oscillations with state-dependent entrainment by stimulation. PLoS Comput Biol 13(10): e1005797. https://doi.org/10.1371/journal.pcbi.1005797 \n- Gu Q L, Lam N H, Wimmer R D, et al. Computational circuit mechanisms underlying thalamic control of attention[J]. bioRxiv, 2021."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A74225AE21874F5F85BFA370D79D09A7","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"## The model"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1EB2D1766CDB4B5084813F5BADB76EA4","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"Following previous “point” models of thalamic cells, all single cell models in the thalamic network contained one single compartment and multiple ionic currents described by the Hodgkin-Huxley formulism. The current balance equation was given by: \n\n$$ \nC_m \\frac{d V}{d t}=-g_L\\left(V-E_L\\right)-g_{K L}\\left(V-E_{K L}\\right)-\\sum I^{i n t}-10^{-3} \\sum \\frac{I^{s n}}{A}+10^{-3} \\frac{I_{a p p}}{A} \n$$ \n\n\nwhere $Cm = 1μF/cm^2$ is the membrane capacitance for all four types of neurons, $g_L$ is the leakage conductance (nominal value: $gL = 0.01 mS/cm^2$ for all four types of cells) and $g_{KL}$ is the potassium leak conductance modulated by both ACh and NE. $E_L$ is the leakage reversal potential ($E_L$ = −70 mV for both HTC cells), and $E_{KL}$ is the reversal potential for the potassium leak current ($E_{KL}$ = −90 mV for all neurons). $I_{int}$ and $I_{syn}$ are the intrinsic ionic currents (in $μA/cm^2$) and synaptic currents (in $nA$) respectively and $I_{app}$ is the externally applied current injection (in $nA$). The following total membrane area (A) was used to normalize the synaptic and externally applied currents in Eq: HTC cells: 2.9×10−4 $cm^2$."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"071963F01B774E729D567AE3F6009B89","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"HTC cells contained the following six active ionic currents: \n\n- a spike generating fast sodium current (INa), ``bp.dyn.INa_Ba2002`` \n- a delayed rectifier potassium current (IDR), ``bp.dyn.IKDR_Ba2002`` \n- a hyperpolarization-activated cation current (IH), ``bp.dyn.Ih_HM1992`` \n- a high-threshold L-type Ca2+ current (ICa/L), ``bp.dyn.ICaL_IS2008`` \n- a Ca2+- dependent potassium current (IAHP), ``bp.dyn.IAHP_De1994`` \n- a Ca2+- activated nonselective cation current (ICAN). ``bp.dyn.ICaN_IS2008`` \n\nIn addition, both TC cells included \n- a regular low-threshold T-type Ca2+ current (ICa/T), ``bp.dyn.ICaT_HM1992`` \n- and a high-threshold T-type Ca2+ current (ICa/HT); ``bp.dyn.ICaHT_HM1992`` \n\nTo obtain the high-threshold T-type current ICa/HT, both the activation and inactivation of the ICa/T current was shifted by 28 mV, similar to a previous TC modeling study. "},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"03AA02FD2F8E4AAF830B9CE64D6D7F08","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"## Task 1 \n\n1. implement the above HTC neuron model \n2. give the stimulus, and reproduce the rebound bursting firing pattern of the HTC cell"},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-07T05:37:16.711153100Z","start_time":"2023-11-07T05:37:16.695370700Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A4B1423AF3E34ABDA5246FD36B677ACD","notebookId":"65658a6b05870bfbf2a617f9","trusted":true},"source":"class HTC(bp.dyn.CondNeuGroupLTC):\n def __init__(self,size,gKL=0.01, V_initializer=bp.init.OneInit(-65.)):\n super().__init__(size,A=2.9e-4,V_initializer=V_initializer,V_th=20.)\n self.IL = bp.dyn.IL(size,g_max=0.01,E=-70.)\n self.INa = bp.dyn.INa_Ba2002(size,V_sh=-30)\n self.Ih = bp.dyn.Ih_HM1992(size,g_max=0.01,E=-43)\n\n self.Ca = bp.dyn.CalciumDetailed(size,C_rest=5e-5,tau=10.,d=0.5)\n self.Ca.add_elem(bp.dyn.ICaL_IS2008(size,g_max=0.5))\n self.Ca.add_elem(bp.dyn.ICaN_IS2008(size,g_max=0.5))\n self.Ca.add_elem(bp.dyn.ICaT_HM1992(size,g_max=2.1))\n self.Ca.add_elem(bp.dyn.ICaHT_HM1992(size,g_max=3.0))\n\n self.K = bp.dyn.PotassiumFixed(size,E=-90.)\n self.K.add_elem(bp.dyn.IKDR_Ba2002v2(size,V_sh=-30.,phi=0.25))\n self.K.add_elem(bp.dyn.IK_Leak(size,g_max=gKL))\n\n self.KCa = bp.dyn.MixIons(self.K,self.Ca)\n self.KCa.add_elem(bp.dyn.IAHP_De1994v2(size))\n","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"id":"E44484EF0BE94EEFA4128CB6D9CE66DD","notebookId":"65658a6b05870bfbf2a617f9","jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"trusted":true},"source":"htc = HTC(1)\nrunner = bp.DSRunner(htc,monitors={'v':htc.V})\nI = -30/1e3/2.9e-4*1e-3 # input current = -30pA\ninputs = np.ones(20000)*I\nrunner.run(inputs=inputs)\nbp.visualize.line_plot(runner.mon.ts,runner.mon['v'],legend='v',show=True)","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/20000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":3},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BCD2386FA9A24FADA3AE61CBB01EFF54","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"65658a6b05870bfbf2a617f9"},"source":"## Task 2 \n\n\nThe next task is to implement the ``TC neuron model`` used in the following paper: \n\n- Gu Q L, Lam N H, Wimmer R D, et al. Computational circuit mechanisms underlying thalamic control of attention[J]. bioRxiv, 2021. \n\n![](figs/tc-fig1.png) \n![](figs/tc-fig2.png) \n![](figs/tc-fig3.png) \n"},{"cell_type":"code","metadata":{"id":"28AC1E501730449483BFCFED7F9ED0EE","notebookId":"65658a6b05870bfbf2a617f9","jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"trusted":true},"source":"class CaLT(bp.dyn.IonChannel):\n master_type = bp.dyn.HHTypedNeuron\n def __init__(self,size,g_max=1.4,E=120.,T=36.0,method='exp_auto'):\n super().__init__(size)\n self.g_max = g_max\n self.E = E\n self.T = T\n self.m = bm.Variable(bm.zeros(size))\n self.h = bm.Variable(bm.zeros(size))\n self.d = bm.Variable(bm.zeros(size))\n\n self.integral = bp.odeint(bp.JointEq(self.dm,self.dh,self.dd),method=method)\n \n def dm(self,m,t,V):\n m_Inf = 1/(1+bm.exp((V+65.)/-7.8))\n phi_m = 5**((self.T-24.)/10.)\n tau_Ca = m_Inf*(1+bm.exp((V+30.8)/-13.5))/phi_m\n return (m_Inf-m)/tau_Ca\n\n def dh(self,h,t,d,V):\n phi_h = 3**((self.T-24.)/10.)\n a = bm.exp((V+162.3)/-17.8)*phi_h\n k = bm.exp((V+85.5)/2)/(6.3**0.5)-0.5\n b = a*k\n return a*(1-h-d)-b*h\n\n def dd(self,d,t,h,V):\n phi_h = 3**((self.T-24.)/10.)\n k = bm.exp((V+85.5)/2)/(6.3**0.5)-0.5\n a = (1+bm.exp(V+39.4)/30)/(240.*(1+k))*phi_h\n b = a*k\n return b*(1-h-d)-a*d\n \n def reset_state(self,V,batch_or_ormode=None,*args,**kwargs):\n self.m = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n self.h = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n self.d = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n\n def update(self,V,*args,**kwargs):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n self.m.value,self.h.value,self.d.value = self.integral(self.m,self.h,self.d,t,V,dt=dt)\n \n def current(self,V,*args,**kwargs):\n return self.g_max * self.m**3 * self.h * (V-self.E)\n\nclass CaH(bp.dyn.IonChannel):\n master_type = bp.dyn.Calcium\n def __init__(self,size,g_max=0.05,E=-43.,T=36.0,k1=25,k2=4e-4,k3=0.1,k4=1e-3,method='exp_auto'):\n super().__init__(size)\n self.g_max = g_max\n self.E = E\n self.T = T\n self.k1 = k1\n self.k2 = k2\n self.k3 = k3\n self.k4 = k4\n self.O1 = bm.Variable(bm.zeros(size))\n self.O2 = bm.Variable(bm.zeros(size))\n self.p = bm.Variable(bm.zeros(size))\n\n self.integral = bp.odeint(bp.JointEq(self.dO1,self.dO2,self.dp),method=method)\n\n def dO1(self,O1,t,O2,p,V):\n h_Inf = 1/(1+bm.exp((V+75)/5.5))\n phi_m = 3**((self.T-24)/10)\n tau_h = 1/phi_m*(20+1000/(bm.exp((V+71.5)/14.2)+bm.exp((V+89)/11.6)))\n a = h_Inf/tau_h\n b = (1-h_Inf)/tau_h\n return a*(1-O1-O2)-b*O1+self.k4*O2-self.k3*self.p*O1\n\n def dO2(self,O2,t,O1,p):\n return -1e-3*O2+0.1*p*O1\n \n def dp(self,p,t,C):\n return -1*self.k2*p+self.k1*C*(1-p)\n\n def reset_state(self,V,C,batch_or_ormode=None,*args,**kwargs):\n self.O1 = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n self.O2 = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n self.p = bp.init.variable_(bm.zeros,self.num,batch_or_mode)\n \n def update(self,V,C,*args,**kwargs):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n self.O1.value,self.O2.value,self.p.value = self.integral(self.O1,self.O2,self.p,t,V,C,dt=dt)\n\n def current(self,V,*args,**kwargs):\n return self.g_max * (self.O1+2*self.O2) * (V-self.E)\n\nclass TC(bp.dyn.CondNeuGroupLTC):\n def __init__(self,size,V_initializer=bp.init.OneInit(-65.)):\n super().__init__(size,V_initializer=V_initializer)\n self.IL = bp.dyn.IL(size,g_max=0.05,E=-90.)\n self.INa = bp.dyn.INa_Ba2002(size,g_max=30.,V_sh=-55.)\n self.IK = bp.dyn.IK_TM1991(size,E=-95.,g_max=2.)\n self.ICa = CaLT(size)\n #self.Ca = bp.dyn.CalciumDetailed(size)\n #self.Ca.add_elem(CaH(size))\n","outputs":[],"execution_count":64},{"cell_type":"code","metadata":{"id":"1481BBE445134DFF9FBDB7F9E8DE995B","notebookId":"65658a6b05870bfbf2a617f9","jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"trusted":true,"hide_input":false},"source":"tc = TC(1)\nrunner = bp.DSRunner(tc, monitors={'v': tc.V,'ICa_m':tc.ICa.m,'ICa_h':tc.ICa.h,'ICa_d':tc.ICa.d})\n#runner = bp.DSRunner(tc, monitors={'v': tc.V})\nI = 10\ninputs = np.ones(2000)*I\nrunner.run(inputs=inputs)\nbp.visualize.line_plot(runner.mon.ts, runner.mon['v'], legend='v', show=True)\nbp.visualize.line_plot(runner.mon.ts, runner.mon['ICa_m'], legend='ICa_m', show=True)\nbp.visualize.line_plot(runner.mon.ts, runner.mon['ICa_h'], legend='ICa_h', show=True)\nbp.visualize.line_plot(runner.mon.ts, runner.mon['ICa_d'], legend='ICa_d', show=True)\nm_Inf = 1/(1+bm.exp((runner.mon['v']+65.)/-7.8))\nphi_m = 5**((36-24.)/10.)\nphi_h = 3**((36-24.)/10.)\ntau_Ca = m_Inf*(1+bm.exp((runner.mon['v']+30.8)/-13.5))/phi_m\n\nbp.visualize.line_plot(runner.mon.ts, 1.4*runner.mon['ICa_m']**3*runner.mon['ICa_h']*(runner.mon['v']-120), legend='ICa', show=True)","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":65}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /作业/W1_2 Hodgkin–Huxley神经元 大作业.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1C8572F312B34546B684743738BE4D96","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"# Neuron Model for Thalamocortical cells"},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-07T05:27:43.106431600Z","start_time":"2023-11-07T05:27:40.594415800Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A51D7B0C9E944949B4CDB9EAF4FDF402","notebookId":"6549ced0a0df0a3c9c06dd72","trusted":true},"source":"import brainpy as bp","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6A48C9A1700349008B2A70E41E92CB25","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"Here we are trying to reproduce the thalamocortical model in the following papers: \n\n- Li G, Henriquez CS, Fröhlich F (2017) Unified thalamic model generates multiple distinct oscillations with state-dependent entrainment by stimulation. PLoS Comput Biol 13(10): e1005797. https://doi.org/10.1371/journal.pcbi.1005797 \n- Gu Q L, Lam N H, Wimmer R D, et al. Computational circuit mechanisms underlying thalamic control of attention[J]. bioRxiv, 2021."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A74225AE21874F5F85BFA370D79D09A7","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"## The model"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1EB2D1766CDB4B5084813F5BADB76EA4","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"Following previous “point” models of thalamic cells, all single cell models in the thalamic network contained one single compartment and multiple ionic currents described by the Hodgkin-Huxley formulism. The current balance equation was given by: \n\n$$ \nC_m \\frac{d V}{d t}=-g_L\\left(V-E_L\\right)-g_{K L}\\left(V-E_{K L}\\right)-\\sum I^{i n t}-10^{-3} \\sum \\frac{I^{s n}}{A}+10^{-3} \\frac{I_{a p p}}{A} \n$$ \n\n\nwhere $Cm = 1μF/cm^2$ is the membrane capacitance for all four types of neurons, $g_L$ is the leakage conductance (nominal value: $gL = 0.01 mS/cm^2$ for all four types of cells) and $g_{KL}$ is the potassium leak conductance modulated by both ACh and NE. $E_L$ is the leakage reversal potential ($E_L$ = −70 mV for both HTC cells), and $E_{KL}$ is the reversal potential for the potassium leak current ($E_{KL}$ = −90 mV for all neurons). $I_{int}$ and $I_{syn}$ are the intrinsic ionic currents (in $μA/cm^2$) and synaptic currents (in $nA$) respectively and $I_{app}$ is the externally applied current injection (in $nA$). The following total membrane area (A) was used to normalize the synaptic and externally applied currents in Eq: HTC cells: 2.9×10−4 $cm^2$."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"071963F01B774E729D567AE3F6009B89","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"HTC cells contained the following six active ionic currents: \n\n- a spike generating fast sodium current (INa), ``bp.dyn.INa_Ba2002`` \n- a delayed rectifier potassium current (IDR), ``bp.dyn.IKDR_Ba2002`` \n- a hyperpolarization-activated cation current (IH), ``bp.dyn.Ih_HM1992`` \n- a high-threshold L-type Ca2+ current (ICa/L), ``bp.dyn.ICaL_IS2008`` \n- a Ca2+- dependent potassium current (IAHP), ``bp.dyn.IAHP_De1994`` \n- a Ca2+- activated nonselective cation current (ICAN). ``bp.dyn.ICaN_IS2008`` \n\nIn addition, both TC cells included \n- a regular low-threshold T-type Ca2+ current (ICa/T), ``bp.dyn.ICaT_HM1992`` \n- and a high-threshold T-type Ca2+ current (ICa/HT); ``bp.dyn.ICaHT_HM1992`` \n\nTo obtain the high-threshold T-type current ICa/HT, both the activation and inactivation of the ICa/T current was shifted by 28 mV, similar to a previous TC modeling study. "},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"03AA02FD2F8E4AAF830B9CE64D6D7F08","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"## Task 1 \n\n1. implement the above HTC neuron model \n2. give the stimulus, and reproduce the rebound bursting firing pattern of the HTC cell"},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-07T05:37:16.711153100Z","start_time":"2023-11-07T05:37:16.695370700Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A4B1423AF3E34ABDA5246FD36B677ACD","notebookId":"6549ced0a0df0a3c9c06dd72","trusted":true},"source":"class HTC(bp.dyn.CondNeuGroupLTC):\n pass","outputs":[],"execution_count":2},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BCD2386FA9A24FADA3AE61CBB01EFF54","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"6549ced0a0df0a3c9c06dd72"},"source":"## Task 2 \n\n\nThe next task is to implement the ``TC neuron model`` used in the following paper: \n\n- Gu Q L, Lam N H, Wimmer R D, et al. Computational circuit mechanisms underlying thalamic control of attention[J]. bioRxiv, 2021. \n\n![](figs/tc-fig1.png) \n![](figs/tc-fig2.png) \n![](figs/tc-fig3.png) \n"}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /作业/W1_2 Hodgkin–Huxley神经元 进阶作业.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"id":"5E26ADFB269D45FABC0223BD1463282B","notebookId":"654731a4b4c12f15a7a5fc1f","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"# Day 2 进阶作业-HH model \n\nIn this section, we try to understand how to build conductance-based biophysical neuron models. \n\nPlease follow the comment instruction to fill the *todo* blanks of the following code. Then run all the cells to see the simulation result."},{"cell_type":"code","metadata":{"collapsed":false,"id":"0E2419D0D67748C4A403D86E8FF46E9F","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"ExecuteTime":{"end_time":"2023-11-05T04:17:16.146798500Z","start_time":"2023-11-05T04:17:16.117304400Z"},"jupyter":{}},"source":"import numpy as np\n\nimport brainpy as bp\nimport brainpy.math as bm","outputs":[],"execution_count":17},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0E98C95518804B04A68B30517417C2F9","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"## ``master_type`` organizes structures between neurons and ion channels "},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5D85B950EA9C45A3B0E7864B8EE0002E","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"``master_type`` determines what information will be passed into ``.reset_state()`` and ``update()`` function in a model."},{"cell_type":"code","metadata":{"collapsed":false,"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4EC7D64F4413453E8A2AAA255A3E26FA","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"class IK(bp.dyn.IonChannel):\n master_type = bp.dyn.CondNeuGroup\n \n def update(self, V, *args, **kwargs):\n pass\n \n def reset_state(self, V, *args, **kwargs):\n pass","outputs":[],"execution_count":18},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"21423718EEF74EBE8339E18D2DD981AD","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"For the above ``IK`` model, its ``master_type: bp.dyn.CondNeuGroup`` will give ``V`` variable into this node. Therefore, ``IK`` model can utilize ``V`` to update or reset its states. "},{"cell_type":"code","metadata":{"collapsed":false,"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E3BB82A89B20456983C0CCE92515A5D4","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"class ICa(bp.dyn.IonChannel):\n master_type = bp.dyn.Calcium\n \n def update(self, V, C, E, *args, **kwargs):\n pass\n \n def reset_state(self, V, C, E, *args, **kwargs):\n pass","outputs":[],"execution_count":19},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1A0AF692B85A4CC7BBA24AB8329A5E34","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"For ``ICa`` class, its ``master_type (bp.dyn.Calcium)`` will deliver the concentration of Calcium ``C`` and the reversal potential of Calcium ion ``E`` into this node. Moreover, since the ``master_type`` of ``bp.dyn.Calcium`` is ``bp.dyn.CondNeuGroup``, it will inherit the passing of ``bp.dyn.CondNeuGroup`` and deliver ``V`` into ``ICa`` class too. "},{"cell_type":"code","metadata":{"collapsed":false,"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"56388C240BE1479DA52C262FEE97DF97","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"class ICaNa(bp.dyn.IonChannel):\n master_type = bp.mixin.JointType[bp.dyn.Calcium, bp.dyn.Sodium]\n \n def update(self, V, Ca_info, Na_info, *args, **kwargs):\n pass\n \n def reset_state(self, V, Ca_info, Na_info, *args, **kwargs):\n pass","outputs":[],"execution_count":20},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4147B3FC5B0A43D4B419827E3C79443A","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"If an ion channel depends on more than two ion types, it can define ``master_type`` as a joint type by using ``brainpy.mixin.JointType``. For example, the above ``ICaNa`` class depends on ``bp.dyn.Calcium`` and ``bp.dyn.Sodium``, so the ``update()`` and ``reset_state()`` function depends on information of both subclasses and their parents. "},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5CC1AB8DF1064F2EBAD74D044B419287","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"For an existing ion channel, users can check the ``master_type`` using:"},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-05T05:55:18.134765300Z","start_time":"2023-11-05T05:55:18.122204400Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"8B15300C84414E49AB3A165006637822","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"bp.dyn.INa_Ba2002v2.master_type","outputs":[{"output_type":"execute_result","data":{"text/plain":"brainpy._src.dyn.ions.sodium.Sodium"},"metadata":{},"execution_count":21}],"execution_count":21},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-05T05:55:22.721900900Z","start_time":"2023-11-05T05:55:22.699959700Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"C1A21D323CCB49FBA383DACBA78B47B4","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"bp.dyn.INa_Ba2002.master_type","outputs":[{"output_type":"execute_result","data":{"text/plain":"brainpy._src.dyn.neurons.hh.HHTypedNeuron"},"metadata":{},"execution_count":22}],"execution_count":22},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F322DE431E574DE3AA842923B5D973C2","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"## Build a HH model by composing existing ion channels"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"C54B6D88EBFD4F13855F3A286A5B32E6","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"Here is an example by building a HH neuron model by composing existing ion channels. "},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-05T04:16:40.025565500Z","start_time":"2023-11-05T04:16:39.987617100Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"65FBA0F61EB545F3B25800C317844898","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"class HH(bp.dyn.CondNeuGroupLTC):\n def __init__(self, size):\n super().__init__(size)\n\n self.INa = bp.dyn.INa_HH1952(size)\n self.IK = bp.dyn.IK_HH1952(size)\n self.IL = bp.dyn.IL(size, E=-54.387, g_max=0.03)","outputs":[],"execution_count":23},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-05T04:17:45.771771700Z","start_time":"2023-11-05T04:17:44.975617900Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E51BBF72FA484236A4F1E4D3D7E7A466","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"hh = HH(1)\n\nrunner = bp.DSRunner(hh, monitors={'na-p': hh.INa.p, 'na-q': hh.INa.q, 'k-p': hh.IK.p, 'v': hh.V})\n\ninputs = np.ones(1000) * 4.\n_ = runner.run(inputs=inputs)\n","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/1000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":25},{"cell_type":"code","metadata":{"collapsed":false,"ExecuteTime":{"end_time":"2023-11-05T04:26:37.489404300Z","start_time":"2023-11-05T04:26:37.335362500Z"},"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"295C0E829D87444B90898633AD1EA4D4","notebookId":"654731a4b4c12f15a7a5fc1f","trusted":true},"source":"bp.visualize.line_plot(runner.mon.ts, runner.mon['v'], show=True)","outputs":[{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":26},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FB94957B4BB9418AB1D4E9BFD69DFE38","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"## Customizing ion channels"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"ECAE729288DB4CBB9AB85A360875D39A","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"To customize an ion channel that can be composed using the above interface, users should define a normal ``DynamicalSystem`` with the specification of ``master_type``. \n\nHere are several examples:"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"04C8609AA85847E49BFDB6C3C55884F9","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"For a potassium ion channel: \n\n$$ \n\\begin{split}\\begin{aligned} \n I_{\\mathrm{K}} &= g_{\\mathrm{max}} * p^4 \\\\ \n \\frac{dp}{dt} &= \\phi * (\\alpha_p (1-p) - \\beta_p p) \\\\ \n \\alpha_{p} &= \\frac{0.01 (V -V_{sh} + 10)}{1-\\exp \\left(-\\left(V-V_{sh}+ 10\\right) / 10\\right)} \\\\ \n \\beta_p &= 0.125 \\exp \\left(-\\left(V-V_{sh}+20\\right) / 80\\right) \n \\end{aligned}\\end{split} \n$$ \n\nwhere $V_{sh}$ is the membrane shift (default -45 mV), and $\\phi$ is the temperature-dependent factor (default 1.)."},{"cell_type":"code","metadata":{"collapsed":false,"id":"047B9FBC9B104717AC74970D1659E72F","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"class IK(bp.dyn.IonChannel):\n master_type = bp.dyn.HHTypedNeuron\n \n def __init__(self, size, E=-77., g_max=36., phi=1., method='exp_auto'):\n super().__init__(size)\n self.g_max = g_max\n self.E = E\n self.phi = phi\n\n self.integral = bp.odeint(self.dn, method=method)\n\n def dn(self, n, t, V):\n alpha_n = 0.01 * (V + 55) / (1 - bm.exp(-(V + 55) / 10))\n beta_n = 0.125 * bm.exp(-(V + 65) / 80)\n return self.phi * (alpha_n * (1. - n) - beta_n * n)\n \n def reset_state(self, V, batch_or_mode=None, **kwargs):\n self.n = bp.init.variable_(bm.zeros, self.num, batch_or_mode)\n \n def update(self, V):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n self.n.value = self.integral(self.n, t, V, dt=dt)\n\n def current(self, V):\n return self.g_max * self.n ** 4 * (self.E - V)","outputs":[],"execution_count":27},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A63315E65828401AB9BA6032D79B4ECB","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"For a sodium ion channel, \n\n$$ \n\\begin{split}\\begin{split} \n\\begin{aligned} \n I_{\\mathrm{Na}} &= g_{\\mathrm{max}} m^3 h \\\\ \n \\frac {dm} {dt} &= \\phi (\\alpha_m (1-x) - \\beta_m) \\\\ \n &\\alpha_m(V) = \\frac {0.1(V-V_{sh}-5)}{1-\\exp(\\frac{-(V -V_{sh} -5)} {10})} \\\\ \n &\\beta_m(V) = 4.0 \\exp(\\frac{-(V -V_{sh}+ 20)} {18}) \\\\ \n \\frac {dh} {dt} &= \\phi (\\alpha_h (1-x) - \\beta_h) \\\\ \n &\\alpha_h(V) = 0.07 \\exp(\\frac{-(V-V_{sh}+20)}{20}) \\\\ \n &\\beta_h(V) = \\frac 1 {1 + \\exp(\\frac{-(V -V_{sh}-10)} {10})} \\\\ \n\\end{aligned} \n\\end{split}\\end{split} \n$$ \n\nwhere $V_{sh}$ is the membrane shift (default -45 mV), and $\\phi$ is the temperature-dependent factor (default 1.)."},{"cell_type":"code","metadata":{"collapsed":false,"id":"92F8054041EF4EE685C8BFB3E3008F27","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"class INa(bp.dyn.IonChannel):\n master_type = bp.dyn.HHTypedNeuron\n \n def __init__(self, size, E= 50., g_max=120., phi=1., method='exp_auto'):\n super(INa, self).__init__(size)\n self.g_max = g_max\n self.E = E\n self.phi = phi\n self.integral = bp.odeint(bp.JointEq(self.dm, self.dh), method=method)\n\n def dm(self, m, t, V):\n alpha_m = 0.11 * (V + 40) / (1 - bm.exp (-(V + 40) / 10))\n beta_m = 4* bm.exp(-(V + 65) /18)\n return self.phi * (alpha_m * (1. - m) - beta_m * m)\n \n def dh(self, h, t, V):\n alpha_h = 0.07 * bm.exp(-(V + 65) / 20)\n beta_h = 1. / (1 + bm.exp(-(V + 35) / 10))\n return self.phi * (alpha_h * (1. - h) - beta_h * h)\n \n def reset_state(self, V, batch_or_mode=None, **kwargs):\n self.m = bp.init.variable_(bm.zeros, self.num, batch_or_mode)\n self.h = bp.init.variable_(bm.zeros, self.num, batch_or_mode)\n \n def update(self, V):\n t = bp.share.load('t')\n dt = bp.share.load('dt')\n self.m.value, self.h.value = self.integral(self.m, self.h, t, V, dt=dt)\n\n def current(self, V):\n return self.g_max * self.m ** 3 * self.h * (self.E - V)","outputs":[],"execution_count":28},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5662C78D46C64EF48208609018A9EB00","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654731a4b4c12f15a7a5fc1f"},"source":"The leakage channel current."},{"cell_type":"code","metadata":{"collapsed":false,"id":"E9F47A5EF3EF4CAABF4DC4D0CBF98B6B","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"class IL(bp.dyn.IonChannel):\n master_type = bp.dyn.HHTypedNeuron\n \n def __init__(self, size, E=-54.39, g_max=0.03):\n super(IL, self).__init__(size)\n self.g_max = g_max\n self.E = E\n \n def reset_state(self, *args, **kwargs):\n pass\n \n def update(self, V):\n pass\n \n def current(self, V):\n return self.g_max * (self.E - V)","outputs":[],"execution_count":29},{"cell_type":"code","metadata":{"collapsed":false,"id":"B00168826F8046C59FCED99795EDD38C","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"class HH(bp.dyn.CondNeuGroup):\n def __init__(self, size):\n super().__init__(size, V_initializer=bp.init.Uniform(-80, -60.))\n # TODO: 初始化三个离子通道\n self.IK = ... # 参数:E=-77., g_max=36.\n self.INa = ... # 参数:E=50., g_max=120.\n self.IL = ... # 参数:E=-54.39, g_max=0.03","outputs":[],"execution_count":30},{"cell_type":"code","metadata":{"collapsed":false,"id":"5A6DD4DECE3B44EF931B876B4F05AC03","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"neu = HH(1)\nneu.reset()\n\ninputs = np.ones(int(200 / bm.dt)) * 1.698 # 200 ms\nrunner = bp.DSRunner(neu, monitors=['V', 'IK.n', 'INa.m', 'INa.h'])\nrunner.run(inputs=inputs) # the running time is 200 ms\n\nimport matplotlib.pyplot as plt\n\nplt.plot(runner.mon['ts'], runner.mon['V'])\nplt.xlabel('t (ms)')\nplt.ylabel('V (mV)')\nplt.savefig(\"HH.jpg\")\nplt.show()\n\nplt.figure(figsize=(6, 2))\nplt.plot(runner.mon['ts'], runner.mon['IK.n'], label='n')\nplt.plot(runner.mon['ts'], runner.mon['INa.m'], label='m')\nplt.plot(runner.mon['ts'], runner.mon['INa.h'], label='h')\nplt.xlabel('t (ms)')\nplt.legend()\n\nplt.show()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":31},{"cell_type":"markdown","metadata":{"id":"1C26FB13EFDB4CB4868DFD45AFFA8047","notebookId":"654731a4b4c12f15a7a5fc1f","runtime":{"status":"default","execution_status":null,"is_visible":false},"scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"jupyter":{}},"source":"## Answer"},{"cell_type":"code","metadata":{"collapsed":false,"id":"EE0FFD3EFFC14BEFB491C7D074DFF357","notebookId":"654731a4b4c12f15a7a5fc1f","scrolled":false,"slideshow":{"slide_type":"slide"},"tags":[],"trusted":true,"jupyter":{}},"source":"import brainpy as bp\nimport brainpy.math as bm\n\n \nclass HH(bp.dyn.CondNeuGroup):\n def __init__(self, size):\n super().__init__(size, V_initializer=bp.init.Uniform(-80, -60.))\n self.IK = IK(size, E=-77., g_max=36.)\n self.INa = INa(size, E=50., g_max=120.)\n self.IL = IL(size, E=-54.39, g_max=0.03)\n\nneu = HH(1)\nneu.reset()\n\ninputs = np.ones(int(200 / bm.dt)) * 1.698 # 200 ms\nrunner = bp.DSRunner(neu, monitors=['V', 'IK.n', 'INa.m', 'INa.h'])\nrunner.run(inputs=inputs) # the running time is 200 ms\n\nimport matplotlib.pyplot as plt\n\nplt.plot(runner.mon['ts'], runner.mon['V'])\nplt.xlabel('t (ms)')\nplt.ylabel('V (mV)')\nplt.savefig(\"HH.jpg\")\nplt.show()\n\nplt.figure(figsize=(6, 2))\nplt.plot(runner.mon['ts'], runner.mon['IK.n'], label='n')\nplt.plot(runner.mon['ts'], runner.mon['INa.m'], label='m')\nplt.plot(runner.mon['ts'], runner.mon['INa.h'], label='h')\nplt.xlabel('t (ms)')\nplt.legend()\nplt.savefig(\"HH_channels.jpg\")\n\nplt.show()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":32}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":2} -------------------------------------------------------------------------------- /作业/W2_1 简化神经元模型 基础作业.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A7E8BE9E9FD94E939A28207102C47904","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"# W2_1: reduced model 基础作业"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"08C7D02B51FB4C7A80A8F936EB3F2655","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"import brainpy as bp\nimport brainpy.math as bm\nimport numpy as np\nimport matplotlib.pyplot as plt","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"79FD6AA45B8A48A7A0A701D134CD03BC","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"## 1. Running a built-in Leaky Integrate-and-Fire (LIF) neuron model"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"29476A8598D24D56ABFFBDB7FC9A8857","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"In this section, you can play with the built-in LIF model and use different inputs to determine the minimal current. First, we inject a constant input into the model."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"850752C531B1457FBE15A5D51A4C4C1E","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"def run_LIF_constant_input():\n # 运行LIF模型\n\n # TODO: 实例化一个LIF模型\n group = ...\n\n # TODO: 实例化DSRunner,设置constant input为22.,并monitor变量V\n runner = ...\n runner(200) # 运行时长为200ms\n\n # 结果可视化\n fig, gs = bp.visualize.get_figure(1, 1, 4.5, 6)\n ax = fig.add_subplot(gs[0, 0])\n plt.plot(runner.mon.ts, runner.mon.V)\n plt.xlabel(r'$t$ (ms)')\n plt.ylabel(r'$V$ (mV)')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.show()","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E8C6A32C0652442B9C10BC1F7F46D540","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"run_LIF_constant_input()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":6},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3F74B1586864425786DBD28B18A8FFCF","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"Next, you can try to find the rheobase current by using section input. "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1948B32DE0D146489A2F9F45F841A99E","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"def run_LIF_section_input():\n # 运行LIF模型\n\n # TODO: 实现一个section input,并对不用神经元设置不同大小的电流输入,观察神经元的输出,确定rheobase current\n current, length = ...\n\n group = bp.neurons.LIF(current.shape[1])\n\n # TODO: 实例化DSRunner,monitor变量V,并传入section input\n runner = ...\n runner(200) # 运行时长为200ms\n\n # 结果可视化\n fig, gs = bp.visualize.get_figure(1, 1, 4.5, 6)\n ax = fig.add_subplot(gs[0, 0])\n plt.plot(runner.mon.ts, runner.mon.V)\n plt.xlabel(r'$t$ (ms)')\n plt.ylabel(r'$V$ (mV)')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.show()","outputs":[],"execution_count":6},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"20256E7E36594F6A974B7A2E64BDB895","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"run_LIF_section_input()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":7},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0ED0C358374244DD8DC8393195F2BE41","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"## Answer 1"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"596AB67729F947CA823E2A9F7688EF81","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"def run_LIF_constant_input():\n # 运行LIF模型\n\n group = bp.neurons.LIF(1)\n\n runner = bp.DSRunner(group, monitors=['V'], inputs=('input', 22.))\n runner(200) # 运行时长为200ms\n\n # 结果可视化\n fig, gs = bp.visualize.get_figure(1, 1, 4.5, 6)\n ax = fig.add_subplot(gs[0, 0])\n plt.plot(runner.mon.ts, runner.mon.V)\n plt.xlabel(r'$t$ (ms)')\n plt.ylabel(r'$V$ (mV)')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n plt.show()","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"42BAD46A8886479FB5B5C84C17890147","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"run_LIF_constant_input()","outputs":[{"output_type":"stream","name":"stderr","text":"No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"},{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":3},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5D2DB2AE91084F90B50C25B1AD520E9E","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"def run_LIF_section_input():\n # 运行LIF模型\n current, length = bp.inputs.section_input(values=[0., bm.asarray([15., 20., 25.]), 0.],\n durations=[10, 200, 10],\n return_length=True)\n\n group = bp.neurons.LIF(current.shape[1])\n runner = bp.DSRunner(group, monitors=['V'], inputs=('input', current, 'iter'))\n runner(200) # 运行时长为200ms\n\n # 结果可视化\n fig, gs = bp.visualize.get_figure(1, 1, 4.5, 6)\n ax = fig.add_subplot(gs[0, 0])\n plt.plot(runner.mon.ts, runner.mon.V)\n plt.xlabel(r'$t$ (ms)')\n plt.ylabel(r'$V$ (mV)')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.show()","outputs":[],"execution_count":4},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"410583E75AC14070918537D68BFDBD1A","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"run_LIF_section_input()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/2000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":5},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"48FB23E8112B4AACA0D7045BE3590526","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"## 2. Running a built-in AdEx neuron model"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5AA224BA930E4DB78E3A34570F9FD724","runtime":{"status":"default","execution_status":null,"is_visible":false},"notebookId":"654f0a64cc918ad4c0290b93"},"source":"You can try different parameters combinations and observe the firing pattern."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"80FE32C66B5F4F6D8A4F5D880F325A48","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"def run_AdEx():\n # 运行AdEx模型\n param_dict = {\n 'V_rest': -70.,\n 'V_reset': -55.,\n 'V_th': -30.,\n 'V_T': -50.,\n 'delta_T': 2.,\n 'a': 0.01,\n 'b': 60.,\n 'R': .5,\n 'tau': 20.,\n 'tau_w': 30.\n }\n # 可以尝试的参数组合包括但不限于:\n # a 0.01, 0.01, 0.5, -0.5, 1., -1.\n # b 60., 5., 7., 7., 10., 5.\n # tau 20., 20., 5., 5., 10., 5.\n # tau_w 30., 100., 100., 100., 100., 100.\n # V_reset -55., -55., -51., -47., -60., -60.\n # Iext 65., 65., 65., 65., 55., 25.\n group = bp.neurons.AdExIF(10, **param_dict)\n runner = bp.DSRunner(group, monitors=['V', 'w'], inputs=('input', 65.))\n runner(300)\n bp.visualize.line_plot(runner.mon.ts, runner.mon.V, legend='V', show=False)\n bp.visualize.line_plot(runner.mon.ts, runner.mon.w, legend='w', show=True)","outputs":[],"execution_count":8},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A5A348B595CA433AAC8E10EC7CC31976","notebookId":"654f0a64cc918ad4c0290b93","trusted":true},"source":"run_AdEx()","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/3000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":9}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /作业/W3_2 抉择网络模型 rate model.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"2B9EFA41D3714AF9AFDDD9FC77DAE318","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"import numpy as np\nimport brainpy as bp\nimport brainpy.math as bm\n\nbm.enable_x64()\nbm.set_platform('cpu')\n\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\n","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F5066958199D4FBBAC3627505B526189","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"# **A Rate Model for Decision-Making** \nConsider two excitatory neural assemblies, populations $1$ and $2$, that compete with each other through a shared pool of \ninhibitory neurons. \n\nLet $r_1$ and $r_2$ be their respective population-firing rates, and the total synaptic input current $x_i$ and the resulting firing rate $r_i$ of the neural population $i$ obey the following input-output relationship ($F - I$ curve): \n\n$$ \nr_i = F(x_i) = \\frac{ax_i - b}{1-\\exp(-d(ax_i-b))} \n$$ \n\nwhich captures the current-frequency function of a leaky integrate-and-fire neuron. The parameter values are $a$ = 270 Hz/nA, $b$ = 108 Hz, $d$ = 0.154 sec. \n\nAssume that the 'synaptic drive variables' $S_1$ and $S_2$ obey \n\n$$ \n\\frac{dS_1}{dt} = F(x_1)\\,\\gamma(1-S_1)-S_1/\\tau_s\\\\ \n$$ \n$$ \n\\frac{dS_2}{dt} = F(x_2)\\,\\gamma(1-S_2)-S_2/\\tau_s \n$$ \n\nwhere $\\gamma$ = 0.641. The net current into each population is given by \n\n$$ \nx_1 = J_E S_1 + J_I S_2 + I_0 + I_{noise1} + J_{ext}\\mu_1\\\\ \n$$ \n$$ \nx_2 = J_E S_2 + J_I S_1 + I_0 + I_{noise2} +J_{ext}\\mu_2. \n$$ \n\n\nThe synaptic time constant is $\\tau_s$ = 100 ms (NMDA time consant). The synaptic coupling strengths are $J_E$ = 0.2609 nA, $J_I$ = -0.0497 nA, and $J_{ext}$ = 0.00052 nA. Stimulus-selective inputs to populations 1 and 2 are governed by unitless parameters $\\mu_1$ and $\\mu_2$, respectively. $I_0+ I_{noise}$ is the background input which has a mean $I_0$ and a noise component $I_{noise}$ described by an Ornstein-Uhlenbeck process: \n\n$$ \ndI_{noise1} = - I_{noise1} \\frac{dt}{\\tau_0} + \\sigma dW \\\\ \n$$ \n$$ \ndI_{noise2} = - I_{noise2} \\frac{dt}{\\tau_0} + \\sigma dW \\\\ \n$$ \n\nwhere $I_0=0.3255$ nA, filter time constant $\\tau_0=2$ ms, and noise amplitude $\\sigma=0.02$ nA. $dW$ is a Wiener process and note that when numerially integrating that with step size $\\frac{dt}{\\tau_0}$ then $\\Delta W \\sim \\mathcal{N}(0, \\frac{dt}{\\tau_0})$, a normal distribution with mean 0 and variance $\\frac{dt}{\\tau_0}$ \n\nFor the decision-making paradigm, the input rates $\\mu_1$ and $\\mu_2$ are determined by the stimulus coherence $c'$ which ranges between 0% and 100%: \n\n$$ \n\\mu_1 =\\mu_0(1+c'/100)\\\\ \n$$ \n$$ \n\\mu_2 =\\mu_0(1-c'/100) \n$$"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A0FF55624AE14A6699702D4D734DB837","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"**References:** \n\n- Wong K-F and Wang X-J (2006). A recurrent network mechanism for time integration in perceptual decisions. J. Neurosci 26, 1314-1328."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F6898EDDD65B443682FE58107CC38977","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"class DecisionMakingRateModel(bp.dyn.NeuGroup):\n def __init__(self, size, coherence, JE=0.2609, JI=0.0497, Jext=5.2e-4, I0=0.3255,\n gamma=6.41e-4, tau=100., tau_n=2., sigma_n=0.02, a=270., b=108., d=0.154,\n noise_freq=2400., method='exp_auto', **kwargs):\n super(DecisionMakingRateModel, self).__init__(size, **kwargs)\n \n # 初始化参数\n self.coherence = coherence\n self.JE = JE\n self.JI = JI\n self.Jext = Jext\n self.I0 = I0\n self.gamma = gamma\n self.tau = tau\n self.tau_n = tau_n\n self.sigma_n = sigma_n\n self.a = a\n self.b = b\n self.d = d\n \n # 初始化变量\n self.s1 = bm.Variable(bm.zeros(self.num) + 0.15)\n self.s2 = bm.Variable(bm.zeros(self.num) + 0.15)\n self.r1 = bm.Variable(bm.zeros(self.num))\n self.r2 = bm.Variable(bm.zeros(self.num))\n self.mu0 = bm.Variable(bm.zeros(self.num))\n self.I1_noise = bm.Variable(bm.zeros(self.num))\n self.I2_noise = bm.Variable(bm.zeros(self.num))\n \n # 噪声输入的神经元\n self.noise1 = bp.dyn.PoissonGroup(self.num, freqs=noise_freq)\n self.noise2 = bp.dyn.PoissonGroup(self.num, freqs=noise_freq)\n \n # 定义积分函数\n self.integral = bp.odeint(self.derivative, method=method)\n \n @property\n def derivative(self):\n return bp.JointEq([self.ds1, self.ds2, self.dI1noise, self.dI2noise])\n \n def ds1(self, s1, t, s2, mu0):\n #### TO DO!!! S1的动力学公式\n\n \n def ds2(self, s2, t, s1, mu0):\n ### TO DO!!! S2的动力学公式\n\n\n def dI1noise(self, I1_noise, t, noise1):\n return (- I1_noise + noise1.spike * bm.sqrt(self.tau_n * self.sigma_n * self.sigma_n)) / self.tau_n\n \n def dI2noise(self, I2_noise, t, noise2):\n return (- I2_noise + noise2.spike * bm.sqrt(self.tau_n * self.sigma_n * self.sigma_n)) / self.tau_n\n \n \n def update(self, tdi):\n # 更新噪声神经元以产生新的随机发放 self.noise1.update(tdi) self.noise2.update(tdi)\n # 更新s1、s2、I1_noise、I2_noise\n integral = self.integral(self.s1, self.s2, self.I1_noise, self.I2_noise, tdi.t, mu0=self.mu0,\n noise1=self.noise1, noise2=self.noise2, dt=tdi.dt)\n self.s1.value, self.s2.value, self.I1_noise.value, self.I2_noise.value = integral\n \n # 用更新后的s1、s2计算r1、r2\n #### TO DO!!!\n\n \n # 重置外部输入 \n self.mu0[:] = 0.","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4FBC1D4F17944DD99D2687B4A46E1C8C","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"# 定义各个阶段的时长\npre_stimulus_period, stimulus_period, delay_period = 100., 2000., 500.\n\n# 生成模型\ndmnet = DecisionMakingRateModel(1, coherence=25.6, noise_freq=2400.)\n\n# 定义电流随时间的变化\ninputs, total_period = bp.inputs.constant_input([(0., pre_stimulus_period),\n (20., stimulus_period),\n (0., delay_period)])\n# 运行数值模拟\nrunner = bp.DSRunner(dmnet,\n monitors=['s1', 's2', 'r1', 'r2'],\n inputs=('mu0', inputs, 'iter'))\nrunner.run(total_period)\n\n# 可视化\nfig, gs = plt.subplots(2, 1, figsize=(6, 6), sharex='all')\ngs[0].plot(runner.mon.ts, runner.mon.s1, label='s1')\ngs[0].plot(runner.mon.ts, runner.mon.s2, label='s2')\ngs[0].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].set_ylabel('gating variable $s$')\ngs[0].legend()\n\ngs[1].plot(runner.mon.ts, runner.mon.r1, label='r1')\ngs[1].plot(runner.mon.ts, runner.mon.r2, label='r2')\ngs[1].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].set_xlabel('t (ms)')\ngs[1].set_ylabel('firing rate $r$')\ngs[1].legend()\n\nplt.subplots_adjust(hspace=0.1)\nplt.show()\n","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/26000 [00:00=2.4.3, update() function no longer needs to receive a global shared argument.\n\nInstead of using:\n\n def update(self, tdi, *args, **kwagrs):\n t = tdi['t']\n ...\n\nPlease use:\n\n def update(self, *args, **kwagrs):\n t = bp.share['t']\n ...\n\n warnings.warn(_update_deprecate_msg, UserWarning)\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":3},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"39C9A4E90EFD4426A422814C0B30B0B0","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"# 生成模型\ndmnet = DecisionMakingRateModel(1, coherence=-1, noise_freq=2400.)\n\n# 定义电流随时间的变化\ninputs, total_period = bp.inputs.constant_input([(0., pre_stimulus_period),\n (20., stimulus_period),\n (0., delay_period)])\n# 运行数值模拟\nrunner = bp.DSRunner(dmnet,\n monitors=['s1', 's2', 'r1', 'r2'],\n inputs=('mu0', inputs, 'iter'))\nrunner.run(total_period)\n\n# 可视化\nfig, gs = plt.subplots(2, 1, figsize=(6, 6), sharex='all')\ngs[0].plot(runner.mon.ts, runner.mon.s1, label='s1')\ngs[0].plot(runner.mon.ts, runner.mon.s2, label='s2')\ngs[0].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].set_ylabel('gating variable $s$')\ngs[0].legend()\n\ngs[1].plot(runner.mon.ts, runner.mon.r1, label='r1')\ngs[1].plot(runner.mon.ts, runner.mon.r2, label='r2')\ngs[1].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].set_xlabel('t (ms)')\ngs[1].set_ylabel('firing rate $r$')\ngs[1].legend()\n\nplt.subplots_adjust(hspace=0.1)\nplt.show()\n","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/26000 [00:00=2.4.3, update() function no longer needs to receive a global shared argument.\n\nInstead of using:\n\n def update(self, tdi, *args, **kwagrs):\n t = tdi['t']\n ...\n\nPlease use:\n\n def update(self, *args, **kwagrs):\n t = bp.share['t']\n ...\n\n warnings.warn(_update_deprecate_msg, UserWarning)\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":4},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4D266D7AA080498DA84441230EE0A590","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"## Phase plane analysis"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FD36303FC0F340A2BF71C6DAA14AB4B5","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"### Parameters:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7D33380A20D047CABF92573BA2BCE537","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"gamma = 0.641 # Saturation factor for gating variable\ntau = 0.1 # Synaptic time constant [sec]\na = 270. # Hz/nA\nb = 108. # Hz\nd = 0.154 # sec\n\nJE = 0.2609 # self-coupling strength [nA]\nJI = -0.0497 # cross-coupling strength [nA]\nJAext = 0.00052 # Stimulus input strength [nA]\nIb = 0.3255 # The background input Ib=I0+I_noise","outputs":[],"execution_count":5},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E0F0889103BD475EA604B29FE4AD50B7","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"### Model implementation \n\n$$ \nr_i = F(x_i) = \\frac{ax_i - b}{1-\\exp(-d(ax_i-b))} \n$$ \n\n$$ \n\\frac{dS_1}{dt} = F(x_1)\\,\\gamma(1-S_1)-S_1/\\tau_s \n$$ \n\n$$ \n\\frac{dS_2}{dt} = F(x_2)\\,\\gamma(1-S_2)-S_2/\\tau_s \n$$ \n\n\n$$ \nx_1 = J_E S_1 + J_I S_2 + I_0 + I_{noise1} + J_{ext}\\mu_1 \n$$ \n\n$$ \nx_2 = J_E S_2 + J_I S_1 + I_0 + I_{noise2} +J_{ext}\\mu_2. \n$$ \n\n"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FDBA8C289C6F4F518082CE9130336819","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"@bp.odeint\ndef int_s1(s1, t, s2, coh=0.5, mu=20.):\n x1 = JE * s1 + JI * s2 + Ib + JAext * mu * (1. + coh/100)\n r1 = (a * x1 - b) / (1. - bm.exp(-d * (a * x1 - b)))\n return - s1 / tau + (1. - s1) * gamma * r1\n\n@bp.odeint\ndef int_s2(s2, t, s1, coh=0.5, mu=20.):\n #### TO DO!!!S2的动力学表达式\n\n \n","outputs":[],"execution_count":6},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"686AC1D24A8B46F191E6DB8E4660B1F7","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"No stimulus: $μ_0=0$ Hz."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D7041A29EC1C41EEB0438B87556C70CA","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"##### TO DO!!! mu=0 的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': },\n resolutions=0.001,\n)\n\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 5 fixed points.\n\t#1 s1=0.5669871605297275, s2=0.03189141971571579 is a stable node.\n\t#2 s1=0.3138449248913595, s2=0.055785333471845534 is a saddle node.\n\t#3 s1=0.10265144582202228, s2=0.10265095098913339 is a stable node.\n\t#4 s1=0.05578534267632876, s2=0.3138449310808803 is a saddle node.\n\t#5 s1=0.03189144636489119, s2=0.5669870352865433 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":7},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7E600035034747D38F92CA5E1DF7E2E0","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=0.$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FA76D2D6F3934B619A255A7FB1F6C2AC","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"##### TO DO!!! mu=30 coh=0的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': , 'coh': },\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 3 fixed points.\n\t#1 s1=0.658694232143127, s2=0.051807199439912924 is a stable node.\n\t#2 s1=0.4244557898485831, s2=0.42445562837314016 is a saddle node.\n\t#3 s1=0.05180717720080604, s2=0.6586942355713474 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":8},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"ACA0729B1E10406BA2A340E8F4469420","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=14\\%$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BED5832727334733849C90378DDA6276","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"##### TO DO!!! mu=30 coh=14的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': , 'coh': },\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 3 fixed points.\n\t#1 s1=0.6679776124172938, s2=0.045830222261007005 is a stable node.\n\t#2 s1=0.38455860789855545, s2=0.45363090352898194 is a saddle node.\n\t#3 s1=0.05911003280235089, s2=0.6481046659437737 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":9},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"37BD09A184384F68AA024B8C172E53A2","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8553852baaea3e1229b"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=100\\%$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"37F259D6AE624790B3C40914605BBF2B","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"##### TO DO!!! mu=30 coh=100的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': , 'coh': },\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 1 fixed points.\n\t#1 s1=0.7092805209334905, s2=0.0239636630419946 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":10},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9BC75857380F440A93B77AF3D91E70A0","notebookId":"64ebf8553852baaea3e1229b","trusted":true},"source":"","outputs":[],"execution_count":null}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /作业/W3_2 抉择网络模型 spiking model.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5BCEE66C4FB640EC98B2F25C9C010C58","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport brainpy as bp\nimport brainpy.math as bm\n","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BD9200CEAEC64DD48730870CD5B0EF52","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf8563852baaea3e122c0"},"source":"# **The Cortical Network Model for Decision-Making** \n\nThe network is composed of N neurons, with NE pyramidal cells (80%) and NI interneurons (20%) (Braitenberg and Schütz 1991). \nSimulations reported in here were done with NE = 1600, NI = 400. \nEach stimulus activates a distinct and small subpopulation of fNE excitatory cells (f = 0.15). \n\n**Neurons** \n\nBoth pyramidal cells and interneurons are described by leaky integrate-and-fire neurons (see for example Tuckwell 1988) and are characterized by a resting potential VL = −70 mV, a firing threshold Vth = −50 mV, a reset potential Vreset = −55 mV, a membrane capacitance Cm = 0.5 nF for pyramidal cells and 0.2 nF for interneurons, a membrane leak conductance gL = 25 nS for pyramidal cells and 20 nS for interneurons, and a refractory period τref = 2 ms for pyramidal cells and 1 ms for interneurons. The corresponding membrane time constants are τm = Cm/gL = 20 ms for excitatory cells and 10 ms for interneurons (McCormick et al. 1985). Below threshold, the membrane potential V(t) of a cell \n\n$$ \nC_m \\frac{d V(t)}{d t}=-g_L\\left(V(t)-V_L\\right)-I_{s y n}(t) \n$$ \n\nwhere Isyn(t) represents the total synaptic current flowing into the cell. \n\n\n**Synapses** \n\nThe total synaptic currents are given by: \n\n$$ \nI_{s y n}(t)=I_{\\text {ext,AMPA }}(t)+I_{\\text {rec }, A M P A}(t)+I_{\\text {rec }, N M D A}(t)+I_{\\text {rec }, \\mathrm{GABA}}(t) \n$$ \n\nin which \n\n$$ \n\\begin{gathered} \nI_{\\text {ext,AMPA }}(t)=g_{\\text {ext,AMPA }}\\left(V(t)-V_E\\right) s^{\\text {ext,AMPA }}(t) \\\\ \nI_{\\text {rec,AMPA }}(t)=g_{\\text {rec,AMPA }}\\left(V(t)-V_E\\right) \\sum_{j=1}^{C_E} w_j s_j^{A M P A}(t) \\\\ \nI_{\\text {rec,NMDA }}(t)=\\frac{g_{\\mathrm{NMDA}}\\left(V(t)-V_E\\right)}{\\left(1+\\left[\\mathrm{Mg}^{2+}\\right] \\exp (-0.062 V(t)) / 3.57\\right)} \\sum_{j=1}^{\\mathrm{C}_E} w_j s_j^{\\mathrm{NMDA}}(t) \\\\ \nI_{\\mathrm{rec}, \\mathrm{GABA}}(t)=g_{\\mathrm{GABA}}\\left(V(t)-V_l\\right) \\sum_{j=1}^{C_1} s_j^{\\mathrm{GABA}}(t) \n\\end{gathered} \n$$ \n\nwhere VE = 0 mV, VI = −70 mV. \n\n**Synaptic Weights** \n\nHence, inside a selective population, $w_j$ = w+, where w+ > 1 is a dimensionless parameter that is equal to the relative strength of “potentiated” synapses with respect to the baseline. Unless specified otherwise, I used w+ = 1.7. \n\nBetween two different selective populations, and from the nonselective population to selective ones, $w_j$ = w−, where w− < 1 measures the strength of synaptic “depression.” $w− = 1 − f(w_+ − 1)/(1 − f)$. \n\nOther connections have $w_j$ = 1. \n\n\n**References:** \n\n-Wang XJ. Probabilistic Decision Making by Slow Reverberation in Cortical Circuits. Neuron. 2002;36(5):955-968. doi:10.1016/S0896-6273(02)01092-9 \n"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"66274F5894314B98B94388947CEAF759","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"class AMPA(bp.Projection):\n def __init__(self, pre, post, conn, delay, g_max, tau, E):\n super().__init__()\n if conn == 'all2all':\n comm = bp.dnn.AllToAll(pre.num, post.num, g_max)\n elif conn == 'one2one':\n comm = bp.dnn.OneToOne(pre.num, g_max)\n else:\n raise ValueError\n syn = bp.dyn.Expon.desc(post.num, tau=tau)\n out = bp.dyn.COBA.desc(E=E)\n self.proj = bp.dyn.ProjAlignPostMg2(\n pre=pre, delay=delay, comm=comm,\n syn=syn, out=out, post=post\n )\n\n\nclass NMDA(bp.Projection):\n def __init__(self, pre, post, conn, delay, g_max):\n super().__init__()\n if conn == 'all2all':\n comm = bp.dnn.AllToAll(pre.num, post.num, g_max)\n elif conn == 'one2one':\n comm = bp.dnn.OneToOne(pre.num, g_max)\n else:\n raise ValueError\n syn = bp.dyn.NMDA.desc(pre.num, a=0.5, tau_decay=100., tau_rise=2.)\n out = bp.dyn.MgBlock(E=0., cc_Mg=1.0)\n self.proj = bp.dyn.ProjAlignPreMg2(\n pre=pre, delay=delay, syn=syn,\n comm=comm, out=out, post=post\n )\n","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"29CD6F4660A94CB48A9AF4D79BE9E208","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"class DecisionMakingNet(bp.DynSysGroup):\n def __init__(self, scale=1., f=0.15):\n super().__init__()\n # 网络中各组神经元的数目\n num_exc = int(1600 * scale)\n num_I, num_A, num_B = int(400 * scale), int(f * num_exc), int(f * num_exc)\n num_N = num_exc - num_A - num_B\n self.num_A, self.num_B, self.num_N, self.num_I = num_A, num_B, num_N, num_I\n\n poisson_freq = 2400. # Hz\n w_pos = 1.7\n w_neg = 1. - f * (w_pos - 1.) / (1. - f)\n g_ext2E_AMPA = 2.1 # nS\n g_ext2I_AMPA = 1.62 # nS\n g_E2E_AMPA = 0.05 / scale # nS\n g_E2I_AMPA = 0.04 / scale # nS\n g_E2E_NMDA = 0.165 / scale # nS\n g_E2I_NMDA = 0.13 / scale # nS\n g_I2E_GABAa = 1.3 / scale # nS\n g_I2I_GABAa = 1.0 / scale # nS\n\n neu_par = dict(V_rest=-70., V_reset=-55., V_th=-50., V_initializer=bp.init.OneInit(-70.))\n\n # E neurons/pyramid neurons\n self.A = bp.dyn.LifRef(num_A, tau=20., R=0.04, tau_ref=2., **neu_par)\n self.B = bp.dyn.LifRef(num_B, tau=20., R=0.04, tau_ref=2., **neu_par)\n self.N = bp.dyn.LifRef(num_N, tau=20., R=0.04, tau_ref=2., **neu_par)\n\n # I neurons/interneurons\n self.I = bp.dyn.LifRef(num_I, tau=10., R=0.05, tau_ref=1., **neu_par)\n\n # poisson stimulus # 'freqs' as bm.Variable\n self.IA = bp.dyn.PoissonGroup(num_A, freqs=bm.Variable(bm.zeros(1)))\n self.IB = bp.dyn.PoissonGroup(num_B, freqs=bm.Variable(bm.zeros(1)))\n\n # noise neurons\n self.noise_B = bp.dyn.PoissonGroup(num_B, freqs=poisson_freq)\n self.noise_A = bp.dyn.PoissonGroup(num_A, freqs=poisson_freq)\n self.noise_N = bp.dyn.PoissonGroup(num_N, freqs=poisson_freq)\n self.noise_I = bp.dyn.PoissonGroup(num_I, freqs=poisson_freq)\n\n # define external inputs\n #### TO DO!!!!\n\n\n # define AMPA projections from N\n #### TO DO!!!!\n\n\n # define NMDA projections from N\n #### TO DO!!!!\n\n\n # define AMPA projections from B\n #### TO DO!!!!\n\n\n # define NMDA projections from B\n #### TO DO!!!!\n\n\n # define AMPA projections from A\n #### TO DO!!!!\n\n\n # define NMDA projections from A\n #### TO DO!!!!\n\n\n # define I->E/I conn\n #### TO DO!!!! 用AMPA()\n\n\n # define external projections\n #### TO DO!!!!\n\n","outputs":[],"execution_count":3},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"02D959C5C6D24799A5759826572282E0","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"class Tool:\n def __init__(self, pre_stimulus_period=100., stimulus_period=1000., delay_period=500.):\n self.pre_stimulus_period = pre_stimulus_period\n self.stimulus_period = stimulus_period\n self.delay_period = delay_period\n self.freq_variance = 10.\n self.freq_interval = 50.\n self.total_period = pre_stimulus_period + stimulus_period + delay_period\n\n def generate_freqs(self, mean):\n # stimulus period\n n_stim = int(self.stimulus_period / self.freq_interval)\n n_interval = int(self.freq_interval / bm.get_dt())\n freqs_stim = np.random.normal(mean, self.freq_variance, (n_stim, 1))\n freqs_stim = np.tile(freqs_stim, (1, n_interval)).flatten()\n # pre stimulus period\n freqs_pre = np.zeros(int(self.pre_stimulus_period / bm.get_dt()))\n # post stimulus period\n freqs_delay = np.zeros(int(self.delay_period / bm.get_dt()))\n all_freqs = np.concatenate([freqs_pre, freqs_stim, freqs_delay], axis=0)\n return bm.asarray(all_freqs)\n\n def visualize_results(self, mon, IA_freqs, IB_freqs, t_start=0., title=None):\n fig, gs = bp.visualize.get_figure(4, 1, 3, 10)\n axes = [fig.add_subplot(gs[i, 0]) for i in range(4)]\n\n ax = axes[0]\n bp.visualize.raster_plot(mon['ts'], mon['A.spike'], markersize=1, ax=ax)\n if title: ax.set_title(title)\n ax.set_ylabel(\"Group A\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n\n ax = axes[1]\n bp.visualize.raster_plot(mon['ts'], mon['B.spike'], markersize=1, ax=ax)\n ax.set_ylabel(\"Group B\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n\n ax = axes[2]\n rateA = bp.measure.firing_rate(mon['A.spike'], width=10.)\n rateB = bp.measure.firing_rate(mon['B.spike'], width=10.)\n ax.plot(mon['ts'], rateA, label=\"Group A\")\n ax.plot(mon['ts'], rateB, label=\"Group B\")\n ax.set_ylabel('Population activity [Hz]')\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n ax.legend()\n\n ax = axes[3]\n ax.plot(mon['ts'], IA_freqs, label=\"group A\")\n ax.plot(mon['ts'], IB_freqs, label=\"group B\")\n ax.set_ylabel(\"Input activity [Hz]\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n ax.legend()\n ax.set_xlabel(\"Time [ms]\")\n\n plt.show()","outputs":[],"execution_count":4},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E0D978CEF052486395B1A39B1A827152","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"tool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 25.6\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)\n","outputs":[{"output_type":"stream","name":"stderr","text":"No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"},{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":5},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"53BD7F79EC6E4076A8E56D96BCA0F715","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"#测试多次随机猜测时候决策网路的表现\ntool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 0.\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)\n\ntool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 0.\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":6},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3E99B2463AD64A038E0D645793E7A38A","notebookId":"64ebf8563852baaea3e122c0","trusted":true},"source":"","outputs":[],"execution_count":null}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /课件/Day 1.5_programming_function.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"B2BD7C9825024E82AC9683CD36FDA572","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"# `function` (函数)"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"393FDB5D1A1843FC87B714D286EE8F19","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"函数是组织好的,可重复使用的,用来实现单一,或相关联功能的代码段。 \n\n函数能提高应用的模块性,和代码的重复利用率。 \n\nPython提供了许多内建函数,比如`print()`。但用户也可以自己创建函数,这被叫做用户自定义函数。"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F8191286084241D09AF7F4262FFB5DDF","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"## 创建函数"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"85286A767B6E4F209634603A6B96C8BF","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### `def`关键字"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"94EC604162DF493BB0A680B1C7EA1D12","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"Python中使用def关键字来声明函数,声明函数的格式为: \n\n```python \n\ndef func_name(args): \n ...body... \n [return ...] \n```\n\n\n有3个需要注意的地方: \n\n- 函数名后面必须加冒号 \n- 如果函数体和def不在同一行,则必须缩进 \n- return指定函数返回值,用来结束函数 \n\n但return语句是可有可无的,如果不给return,则等价于加上了`return None`,即函数默认返回None结构"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4B854C0EE4EE4F8F8C69D26C321F3BDA","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### 函数参数"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"858E509E8468461DA046E428905A7A68","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"**必需参数** \n\n必需参数须以正确的顺序传入函数。调用时的数量必须和声明时的一样。 \n\n调用 printme() 函数,你必须传入一个参数,不然会出现语法错误:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"37B09222AC7F4F53B07F781721D10C41","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def print1( a ):\n print(a)","outputs":[],"execution_count":6},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"67A5D86B39CC4925845E4CB024C1A126","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"type(print1)","outputs":[{"output_type":"execute_result","data":{"text/plain":"function"},"metadata":{},"execution_count":7}],"execution_count":7},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0FE6ECDF8934432FB85211505DEF152B","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"dir(print1)","outputs":[{"output_type":"execute_result","data":{"text/plain":"['__annotations__',\n '__call__',\n '__class__',\n '__closure__',\n '__code__',\n '__defaults__',\n '__delattr__',\n '__dict__',\n '__dir__',\n '__doc__',\n '__eq__',\n '__format__',\n '__ge__',\n '__get__',\n '__getattribute__',\n '__globals__',\n '__gt__',\n '__hash__',\n '__init__',\n '__init_subclass__',\n '__kwdefaults__',\n '__le__',\n '__lt__',\n '__module__',\n '__name__',\n '__ne__',\n '__new__',\n '__qualname__',\n '__reduce__',\n '__reduce_ex__',\n '__repr__',\n '__setattr__',\n '__sizeof__',\n '__str__',\n '__subclasshook__']"},"metadata":{},"execution_count":8}],"execution_count":8},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"8BD8F665DB0044029D7344AB6C509574","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print1('hhhhhh')","outputs":[{"output_type":"stream","name":"stdout","text":"hhhhhh\n"}],"execution_count":9},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4B842757CA354A988773C218E04637E8","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"# print1()","outputs":[],"execution_count":10},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E8448BB52F3B43CCA1366604FABDC6F2","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### 关键字参数"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4A630CD58AED4B1AA69265EC147028A2","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def print2( name, age ):\n print (\"名字: \", name)\n print (\"年龄: \", age)","outputs":[],"execution_count":11},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4F677EAEF6A241129CF4E79DBEEEE16D","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print2(age=50, name=\"runoob\" )","outputs":[{"output_type":"stream","name":"stdout","text":"名字: runoob\n年龄: 50\n"}],"execution_count":12},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"56A44962EAB64739BFF5F1362568FFD2","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print2( name=\"runoob\", age=50, )","outputs":[{"output_type":"stream","name":"stdout","text":"名字: runoob\n年龄: 50\n"}],"execution_count":13},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A94D662AA1A54A45BD309F95A3FB92D9","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### 默认参数 \n\n调用函数时,如果没有传递参数,则会使用默认参数。以下实例中如果没有传入 age 参数,则使用默认值:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6456B06D42EC4FDC89E0EF18020DE1D8","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def print3( name, age = 35 ):\n print (\"名字: \", name)\n print (\"年龄: \", age)","outputs":[],"execution_count":14},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"EE5A2FCDDB4A46558040A76CC1732566","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print3( age=50, name=\"runoob\" )\nprint (\"------------------------\")\nprint3( name=\"runoob\" )","outputs":[{"output_type":"stream","name":"stdout","text":"名字: runoob\n年龄: 50\n------------------------\n名字: runoob\n年龄: 35\n"}],"execution_count":15},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"463305B188554712994D9EC992613E63","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### 不定长参数 \n\n你可能需要一个函数能处理比当初声明时更多的参数。这些参数叫做不定长参数,和上述 2 种参数不同,声明时不会命名。基本语法如下:`*args` \n\n加了星号 `*` 的参数会以元组(tuple)的形式导入,存放所有未命名的变量参数。"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"33B05C92BA1D441B8BBC1F624D8358C3","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def print4( arg1, *vartuple ):\n print (\"输出: \")\n print (arg1)\n print (vartuple)","outputs":[],"execution_count":16},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6170B8C921CE43EC9FB37BCB34A37918","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print4( 70, 60, 50 )","outputs":[{"output_type":"stream","name":"stdout","text":"输出: \n70\n(60, 50)\n"}],"execution_count":17},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"789611E576A0409C832F91B8B7505E78","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"如果在函数调用时没有指定参数,它就是一个空元组。我们也可以不向函数传递未命名的变量。如下实例:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1814254159A84975898BC7B8B0269DBA","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print4( 70, )","outputs":[{"output_type":"stream","name":"stdout","text":"输出: \n70\n()\n"}],"execution_count":18},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A0C33F8CAF35469B8BAEC36754D8372F","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### 命名关键字参数 \n\n加了两个星号 `**` 的参数会以字典的形式导入。"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"AE3861EE711C4C0EAD16365E5E274765","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def print5( arg1, **vardict ):\n print (\"输出: \")\n print (arg1)\n print (vardict)","outputs":[],"execution_count":19},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"CC02557FCC8A49E48348DE3A34454C5B","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print5(1, a=2,b=3)","outputs":[{"output_type":"stream","name":"stdout","text":"输出: \n1\n{'a': 2, 'b': 3}\n"}],"execution_count":20},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"37FE6AC1D702477B8EA8A60583EAFA45","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"print5(1)","outputs":[{"output_type":"stream","name":"stdout","text":"输出: \n1\n{}\n"}],"execution_count":21},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0D144955B43C4AECA9C21F47076C38C2","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### return语句"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"20D73D52133B4DA083E124904E75D995","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"`return [表达式]` 语句用于退出函数,选择性地向调用方返回一个表达式。不带参数值的return语句返回None。"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"52A3FB78D4004C958D473A12CEB04362","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def sum1( a, b ):\n return a + b","outputs":[],"execution_count":22},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"942D4FAC86FF49C5895A16BA2C968F42","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"sum1(1, 2)","outputs":[{"output_type":"execute_result","data":{"text/plain":"3"},"metadata":{},"execution_count":23}],"execution_count":23},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"C91C189181F54F3796574E7848F7520D","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def sum2( a, b ):\n return a + b, a ","outputs":[],"execution_count":24},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5FE636E01EE6446AAFE2E637C9B082CE","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"sum2( 1, 2)","outputs":[{"output_type":"execute_result","data":{"text/plain":"(3, 1)"},"metadata":{},"execution_count":25}],"execution_count":25},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"62110F6C33754DB4807AD6612A705141","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def sum3( a, b ):\n return a + b, ","outputs":[],"execution_count":26},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"ED5E1584DB1C462FB0DC945E8988F1E7","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"sum3( 1, 2)","outputs":[{"output_type":"execute_result","data":{"text/plain":"(3,)"},"metadata":{},"execution_count":27}],"execution_count":27},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1353D13C253F4A3F97A427D1E269D985","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### `lambda`匿名函数"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"ED9EF1F12F0A4263A7DD2C3D012E745A","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"在python中使用`lambda`关键字声明匿名函数,python中的`lambda`是一个表达式而不是一个语句,这意味着某些语句环境下可能无法使用def声明函数,但却可以使用lambda声明匿名函数。当然,匿名函数能实现的功能,命名函数也以一样都能实现,只不过有时候可能会比较复杂,可读性会更差。 \n\nlambda声明匿名函数的方式很简单,lambda关键字后面跟上参数列表,然后一个冒号,冒号后跟一个表达式。 \n\n```python \nlambda argl, arg2,... argN :expression statement \n```"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9834B68F2EDB4481811FE671BDA0B69E","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f = lambda x,y,z: x+y+z\n\nf","outputs":[{"output_type":"execute_result","data":{"text/plain":"(x, y, z)>"},"metadata":{},"execution_count":28}],"execution_count":28},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"3DCED65A63724FFB97A61634874BB20E","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f(2,3,4)","outputs":[{"output_type":"execute_result","data":{"text/plain":"9"},"metadata":{},"execution_count":29}],"execution_count":29},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9E7B91960B3C46EDAB6C1BE916FDEF11","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"## 使用函数"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0A713C1FF99144E79965049DCFD456C4","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### 函数调用"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F5A92E47DE6142CAA51C49D13409C782","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"如上所述。"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6D035A7BF3424F9982D3A22806B9597E","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### 函数变量"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5E463AD45DA44CA38FDE9B065C280CAF","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"python是解释性语言,读一行解释一行,解释一行忘记一行。而函数是一种代码块,代码块是一个解释单元,是一个整体。在代码块范围内不会忘记读取过的行,也不会读一行就立即解释一行,而是读取完所有代码块内的行,然后统筹安排地进行解释。 \n\n一个函数声明语句有一个属于自己的代码块范围。"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9EFDC0C34ABD4BAB96C3DBF19E7E47B0","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"x1 = 3\nx2 = 5\n\ndef myfunc(a, x2):\n x3 = 10\n print('a =', a)\n print('x1 =', x1)\n print('x2 =', x2)\n print('x3 =', x3)","outputs":[],"execution_count":30},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"76AAAD7E11184CF681ABA028DE9B5AEF","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"myfunc(5, 6)","outputs":[{"output_type":"stream","name":"stdout","text":"a = 5\nx1 = 3\nx2 = 6\nx3 = 10\n"}],"execution_count":31},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7C88C42FD5694F5484BFC31D599262F7","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"- 全局变量: `x1`, `x2` \n- 本地变量: `a`, `x2`, `x3`"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"02728745650441AAA9392B7B4B938EB5","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"当在某个范围引用某个变量的时候,将从它所在的层次开始搜索变量是否存在,不存在则向外层继续搜索。搜索到了,则立即停止。"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"46CEAEF70CAE442083A0A909F024C62C","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"#### `global`关键字 \n\n如果想要在def的内部修改全局变量,就需要使用global关键字声明变量:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"98F1CF7A594442A8A524161C007A48B2","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"x=2\ndef f():\n global x\n x=3\n print(x)","outputs":[],"execution_count":32},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"B4D768205A2143438FC02BF7EB600FE9","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f() \n\nx","outputs":[{"output_type":"stream","name":"stdout","text":"3\n"},{"output_type":"execute_result","data":{"text/plain":"3"},"metadata":{},"execution_count":33}],"execution_count":33},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"020CBE70D4714A7391249B6F6CA869EB","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"global可以声明一个或多个变量为全局变量,多个变量使用逗号隔开,也可以声明事先不存在的变量为全局变量:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"29ACF855324245D789518376CD6C3A56","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"x=2\ndef f():\n \"\"\"Function usage can be documented here.\"\"\"\n global x,y\n x,y = 3,4\n print(x,y)","outputs":[],"execution_count":34},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A29E358B9C724C79808A221CBCEBEE81","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f()\n\nx, y","outputs":[{"output_type":"stream","name":"stdout","text":"3 4\n"},{"output_type":"execute_result","data":{"text/plain":"(3, 4)"},"metadata":{},"execution_count":35}],"execution_count":35},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9EA23F1C003F44CCA5C28C6524EAA707","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"不能global中直接赋值。所以下面的是错的: \n\n```python \n\nglobal x=2 \n```"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E9B10D2CA0C34FEE901355CE005F3753","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### 函数属性"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F1085B88B0194D7CB7F822081A16D08E","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f.var1 = 'abc'","outputs":[],"execution_count":36},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7334AE4EA932473E99DDE372AE182F3D","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f.var1","outputs":[{"output_type":"execute_result","data":{"text/plain":"'abc'"},"metadata":{},"execution_count":37}],"execution_count":37},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E79C183437C347F9BDB776908A930036","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f.__name__","outputs":[{"output_type":"execute_result","data":{"text/plain":"'f'"},"metadata":{},"execution_count":38}],"execution_count":38},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"45F1E0CA34FE4DE18395E608D23A7C68","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f.__doc__","outputs":[{"output_type":"execute_result","data":{"text/plain":"'Function usage can be documented here.'"},"metadata":{},"execution_count":39}],"execution_count":39},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"21AFC959474A43409EE81DEF8FC1399F","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f.__code__","outputs":[{"output_type":"execute_result","data":{"text/plain":"\", line 2>"},"metadata":{},"execution_count":40}],"execution_count":40},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"1D84D110739241B89870ED863C08880D","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"for i in dir(f.__code__):\n if i.startswith(\"co\"):\n print(i+\":\",eval(\"f.__code__.\"+i))","outputs":[{"output_type":"stream","name":"stdout","text":"co_argcount: 0\nco_cellvars: ()\nco_code: b'd\\x01\\\\\\x02a\\x00a\\x01t\\x02t\\x00t\\x01\\x83\\x02\\x01\\x00d\\x02S\\x00'\nco_consts: ('Function usage can be documented here.', (3, 4), None)\nco_filename: \nco_firstlineno: 2\nco_flags: 67\nco_freevars: ()\nco_kwonlyargcount: 0\nco_lnotab: b'\\x00\\x03\\x08\\x01'\nco_name: f\nco_names: ('x', 'y', 'print')\nco_nlocals: 0\nco_posonlyargcount: 0\nco_stacksize: 3\nco_varnames: ()\n"}],"execution_count":41},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"507B92DE67554AC697AA73971774DEBB","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"下面将根据上面查看的结果解释各属性: \n\n`co_name`: \n函数的名称。 \n\n上例中该属性的值为外层函数f和闭包函数g,注意不是f1。 \n\n`co_filename`: \n函数定义在哪个文件名中。 \n\n`co_firstlineno`: \n函数声明语句在文件中的第几行。即def关键字所在的行号。 \n\n`co_consts`: \n该函数中使用的常量有哪些。python中并没有专门的常量概念,所有字面意义的数据都是常量。 \n\n..."},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"31FE3EE6BC294D88A32DC24E614801D1","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### object => function"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"FB23AB84F96044E5809D61C8038E3625","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"`__call__()`"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A1AABCE8FE5543E7880BC39C35A0DC5D","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"class MyFunc(object):\n def __init__(self, a):\n self.a = a\n \n def __call__(self, b):\n return self.a + b","outputs":[],"execution_count":42},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9E4CC5E1618744DD8BF2786CA7F0737C","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f2 = MyFunc(10)\n\nf2","outputs":[{"output_type":"execute_result","data":{"text/plain":"<__main__.MyFunc at 0x7faa9bf7a520>"},"metadata":{},"execution_count":43}],"execution_count":43},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"B34C0104FE74443ABFB549B765FCD7D4","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f2(20)","outputs":[{"output_type":"execute_result","data":{"text/plain":"30"},"metadata":{},"execution_count":44}],"execution_count":44},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"04A239815AF04F138421C535C976A9C0","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"### 嵌套函数 ==> decorator"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"F19BA805BF254BF3BCF936FA778159E4","runtime":{"status":"default","execution_status":null},"notebookId":"64e4305cea861da36c25565d"},"source":"函数内部可以嵌套函数。一般来说,在函数嵌套时,内层函数会作为外层函数的返回值(当然,并非必须)。"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"D4FB6E643C394B0ABF816459F970D678","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"def augment(f):\n def new_f(a, b):\n return f(a, b) + 10\n return new_f","outputs":[],"execution_count":45},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"381421AD52A0434C9A2CAF7731D4C00B","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"@augment\ndef f_add(a, b):\n return a + b","outputs":[],"execution_count":46},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BE62B44A95F149AFA518CF98A806CB89","notebookId":"64e4305cea861da36c25565d","trusted":true},"source":"f_add(5, 5)","outputs":[{"output_type":"execute_result","data":{"text/plain":"20"},"metadata":{},"execution_count":47}],"execution_count":47}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /课件/W1_1 Python & BrainPy基础.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W1_1 Python & BrainPy基础.pdf -------------------------------------------------------------------------------- /课件/W1_1 神经计算建模简介-吴思.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W1_1 神经计算建模简介-吴思.pdf -------------------------------------------------------------------------------- /课件/W1_2 Conductance-based models.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W1_2 Conductance-based models.pdf -------------------------------------------------------------------------------- /课件/W1_2 HH编程.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W1_2 HH编程.pdf -------------------------------------------------------------------------------- /课件/W2_1 Single neuron modeling - simplified models.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W2_1 Single neuron modeling - simplified models.pdf -------------------------------------------------------------------------------- /课件/W2_2 Reduced model programming.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W2_2 Reduced model programming.pdf -------------------------------------------------------------------------------- /课件/W2_3 突触模型.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W2_3 突触模型.pdf -------------------------------------------------------------------------------- /课件/W2_4 Synaptic Plasticity.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W2_4 Synaptic Plasticity.pdf -------------------------------------------------------------------------------- /课件/W3_1 兴奋抑制平衡网络及其编程实现.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W3_1 兴奋抑制平衡网络及其编程实现.pdf -------------------------------------------------------------------------------- /课件/W3_2 Decision_making_model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W3_2 Decision_making_model.pdf -------------------------------------------------------------------------------- /课件/W3_2 a_rate_network_of_decision_making.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"89BD1777B45349C7B58DC8CC1F7957BF","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"import numpy as np\nimport brainpy as bp\nimport brainpy.math as bm\n\nbm.enable_x64()\nbm.set_platform('cpu')\n\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\n","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6F02B6BD31CA461D9D8105B75178B2E0","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"# **A Rate Model for Decision-Making** \nConsider two excitatory neural assemblies, populations $1$ and $2$, that compete with each other through a shared pool of \ninhibitory neurons. \n\nLet $r_1$ and $r_2$ be their respective population-firing rates, and the total synaptic input current $x_i$ and the resulting firing rate $r_i$ of the neural population $i$ obey the following input-output relationship ($F - I$ curve): \n\n$$ \nr_i = F(x_i) = \\frac{ax_i - b}{1-\\exp(-d(ax_i-b))} \n$$ \n\nwhich captures the current-frequency function of a leaky integrate-and-fire neuron. The parameter values are $a$ = 270 Hz/nA, $b$ = 108 Hz, $d$ = 0.154 sec. \n\nAssume that the 'synaptic drive variables' $S_1$ and $S_2$ obey \n\n\\begin{align} \n\\frac{dS_1}{dt} &= F(x_1)\\,\\gamma(1-S_1)-S_1/\\tau_s\\\\ \n\\frac{dS_2}{dt} &= F(x_2)\\,\\gamma(1-S_2)-S_2/\\tau_s \n\\end{align} \n\nwhere $\\gamma$ = 0.641. The net current into each population is given by \n\n\\begin{align} \nx_1 &= J_E S_1 + J_I S_2 + I_0 + I_{noise1} + J_{ext}\\mu_1\\\\ \nx_2 &= J_E S_2 + J_I S_1 + I_0 + I_{noise2} +J_{ext}\\mu_2. \n\\end{align} \n\n\nThe synaptic time constant is $\\tau_s$ = 100 ms (NMDA time consant). The synaptic coupling strengths are $J_E$ = 0.2609 nA, $J_I$ = -0.0497 nA, and $J_{ext}$ = 0.00052 nA. Stimulus-selective inputs to populations 1 and 2 are governed by unitless parameters $\\mu_1$ and $\\mu_2$, respectively. $I_0+ I_{noise}$ is the background input which has a mean $I_0$ and a noise component $I_{noise}$ described by an Ornstein-Uhlenbeck process: \n\n\\begin{align} \ndI_{noise1} &= - I_{noise1} \\frac{dt}{\\tau_0} + \\sigma dW \\\\ \ndI_{noise2} &= - I_{noise2} \\frac{dt}{\\tau_0} + \\sigma dW \\\\ \n\\end{align} \n\nwhere $I_0=0.3255$ nA, filter time constant $\\tau_0=2$ ms, and noise amplitude $\\sigma=0.02$ nA. $dW$ is a Wiener process and note that when numerially integrating that with step size $\\frac{dt}{\\tau_0}$ then $\\Delta W \\sim \\mathcal{N}(0, \\frac{dt}{\\tau_0})$, a normal distribution with mean 0 and variance $\\frac{dt}{\\tau_0}$ \n\nFor the decision-making paradigm, the input rates $\\mu_1$ and $\\mu_2$ are determined by the stimulus coherence $c'$ which ranges between 0% and 100%: \n\n\\begin{align} \n\\mu_1 &=\\mu_0(1+c'/100)\\\\ \n\\mu_2 &=\\mu_0(1-c'/100) \n\\end{align}"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"49F35CE3C7FF48669BAD9430A49C08F8","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"**References:** \n\n- Wong K-F and Wang X-J (2006). A recurrent network mechanism for time integration in perceptual decisions. J. Neurosci 26, 1314-1328."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4B3B4C9CB1FD4C209B48E7286B0F08C0","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"class DecisionMakingRateModel(bp.dyn.NeuGroup):\n def __init__(self, size, coherence, JE=0.2609, JI=0.0497, Jext=5.2e-4, I0=0.3255,\n gamma=6.41e-4, tau=100., tau_n=2., sigma_n=0.02, a=270., b=108., d=0.154,\n noise_freq=2400., method='exp_auto', **kwargs):\n super(DecisionMakingRateModel, self).__init__(size, **kwargs)\n \n # 初始化参数\n self.coherence = coherence\n self.JE = JE\n self.JI = JI\n self.Jext = Jext\n self.I0 = I0\n self.gamma = gamma\n self.tau = tau\n self.tau_n = tau_n\n self.sigma_n = sigma_n\n self.a = a\n self.b = b\n self.d = d\n \n # 初始化变量\n self.s1 = bm.Variable(bm.zeros(self.num) + 0.15)\n self.s2 = bm.Variable(bm.zeros(self.num) + 0.15)\n self.r1 = bm.Variable(bm.zeros(self.num))\n self.r2 = bm.Variable(bm.zeros(self.num))\n self.mu0 = bm.Variable(bm.zeros(self.num))\n self.I1_noise = bm.Variable(bm.zeros(self.num))\n self.I2_noise = bm.Variable(bm.zeros(self.num))\n \n # 噪声输入的神经元\n self.noise1 = bp.dyn.PoissonGroup(self.num, freqs=noise_freq)\n self.noise2 = bp.dyn.PoissonGroup(self.num, freqs=noise_freq)\n \n # 定义积分函数\n self.integral = bp.odeint(self.derivative, method=method)\n \n @property\n def derivative(self):\n return bp.JointEq([self.ds1, self.ds2, self.dI1noise, self.dI2noise])\n \n def ds1(self, s1, t, s2, mu0):\n #### TO DO!!! S1的动力学公式\n I1 = self.Jext * mu0 * (1. + self.coherence / 100.)\n x1 = self.JE * s1 - self.JI * s2 + self.I0 + I1 + self.I1_noise\n r1 = (self.a * x1 - self.b) / (1. - bm.exp(-self.d * (self.a * x1 - self.b)))\n return - s1 / self.tau + (1. - s1) * self.gamma * r1\n \n def ds2(self, s2, t, s1, mu0):\n ### TO DO!!! S2的动力学公式\n I2=self.Jext*mu0*(1.- self.coherence / 100.)\n x2 = self.JE * s2 - self.JI * s1 + self.I0 + I2 + self.I2_noise\n r2 = (self.a * x2 - self.b) / (1. - bm.exp(-self.d * (self.a * x2 - self.b))) \n return - s2 / self.tau + (1. - s2) * self.gamma * r2\n\n def dI1noise(self, I1_noise, t, noise1):\n return (- I1_noise + noise1.spike * bm.sqrt(self.tau_n * self.sigma_n * self.sigma_n)) / self.tau_n\n \n def dI2noise(self, I2_noise, t, noise2):\n return (- I2_noise + noise2.spike * bm.sqrt(self.tau_n * self.sigma_n * self.sigma_n)) / self.tau_n\n \n \n def update(self, tdi):\n # 更新噪声神经元以产生新的随机发放 self.noise1.update(tdi) self.noise2.update(tdi)\n # 更新s1、s2、I1_noise、I2_noise\n integral = self.integral(self.s1, self.s2, self.I1_noise, self.I2_noise, tdi.t, mu0=self.mu0,\n noise1=self.noise1, noise2=self.noise2, dt=tdi.dt)\n self.s1.value, self.s2.value, self.I1_noise.value, self.I2_noise.value = integral\n \n # 用更新后的s1、s2计算r1、r2\n #### TO DO!!!\n I1 = self.Jext * self.mu0 * (1. + self.coherence / 100.)\n x1 = self.JE * self.s1 + self.JI * self.s2 + self.I0 + I1 + self.I1_noise\n self.r1.value = (self.a * x1 - self.b) / (1. - bm.exp(-self.d * (self.a * x1 - self.b)))\n I2 = self.Jext * self.mu0 * (1. - self.coherence / 100.)\n x2 = self.JE * self.s2 + self.JI * self.s1 + self.I0 + I2 + self.I2_noise\n self.r2.value = (self.a * x2 - self.b) / (1. - bm.exp(-self.d * (self.a * x2 - self.b)))\n \n # 重置外部输入 \n self.mu0[:] = 0.","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"BE9AC134A04B4A1EA1DEA5C147E78EC7","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"# 定义各个阶段的时长\npre_stimulus_period, stimulus_period, delay_period = 100., 2000., 500.\n\n# 生成模型\ndmnet = DecisionMakingRateModel(1, coherence=25.6, noise_freq=2400.)\n\n# 定义电流随时间的变化\ninputs, total_period = bp.inputs.constant_input([(0., pre_stimulus_period),\n (20., stimulus_period),\n (0., delay_period)])\n# 运行数值模拟\nrunner = bp.DSRunner(dmnet,\n monitors=['s1', 's2', 'r1', 'r2'],\n inputs=('mu0', inputs, 'iter'))\nrunner.run(total_period)\n\n# 可视化\nfig, gs = plt.subplots(2, 1, figsize=(6, 6), sharex='all')\ngs[0].plot(runner.mon.ts, runner.mon.s1, label='s1')\ngs[0].plot(runner.mon.ts, runner.mon.s2, label='s2')\ngs[0].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].set_ylabel('gating variable $s$')\ngs[0].legend()\n\ngs[1].plot(runner.mon.ts, runner.mon.r1, label='r1')\ngs[1].plot(runner.mon.ts, runner.mon.r2, label='r2')\ngs[1].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].set_xlabel('t (ms)')\ngs[1].set_ylabel('firing rate $r$')\ngs[1].legend()\n\nplt.subplots_adjust(hspace=0.1)\nplt.show()\n","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/26000 [00:00=2.4.3, update() function no longer needs to receive a global shared argument.\n\nInstead of using:\n\n def update(self, tdi, *args, **kwagrs):\n t = tdi['t']\n ...\n\nPlease use:\n\n def update(self, *args, **kwagrs):\n t = bp.share['t']\n ...\n\n warnings.warn(_update_deprecate_msg, UserWarning)\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":3},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"0C3B9CC50A5440D1BC010D372993BADA","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"# 生成模型\ndmnet = DecisionMakingRateModel(1, coherence=-1, noise_freq=2400.)\n\n# 定义电流随时间的变化\ninputs, total_period = bp.inputs.constant_input([(0., pre_stimulus_period),\n (20., stimulus_period),\n (0., delay_period)])\n# 运行数值模拟\nrunner = bp.DSRunner(dmnet,\n monitors=['s1', 's2', 'r1', 'r2'],\n inputs=('mu0', inputs, 'iter'))\nrunner.run(total_period)\n\n# 可视化\nfig, gs = plt.subplots(2, 1, figsize=(6, 6), sharex='all')\ngs[0].plot(runner.mon.ts, runner.mon.s1, label='s1')\ngs[0].plot(runner.mon.ts, runner.mon.s2, label='s2')\ngs[0].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[0].set_ylabel('gating variable $s$')\ngs[0].legend()\n\ngs[1].plot(runner.mon.ts, runner.mon.r1, label='r1')\ngs[1].plot(runner.mon.ts, runner.mon.r2, label='r2')\ngs[1].axvline(pre_stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].axvline(pre_stimulus_period + stimulus_period, 0., 1., linestyle='dashed', color=u'#444444')\ngs[1].set_xlabel('t (ms)')\ngs[1].set_ylabel('firing rate $r$')\ngs[1].legend()\n\nplt.subplots_adjust(hspace=0.1)\nplt.show()\n","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/26000 [00:00=2.4.3, update() function no longer needs to receive a global shared argument.\n\nInstead of using:\n\n def update(self, tdi, *args, **kwagrs):\n t = tdi['t']\n ...\n\nPlease use:\n\n def update(self, *args, **kwagrs):\n t = bp.share['t']\n ...\n\n warnings.warn(_update_deprecate_msg, UserWarning)\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":4},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4C9D7293749A44CA91306771E68A221E","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"## Phase plane analysis"},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6DB4C31DB1FB4E9B96B87CBA1CAB47C9","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"### Parameters:"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"9B6A072A33C5482583E7218BE44D7155","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"gamma = 0.641 # Saturation factor for gating variable\ntau = 0.1 # Synaptic time constant [sec]\na = 270. # Hz/nA\nb = 108. # Hz\nd = 0.154 # sec\n\nJE = 0.2609 # self-coupling strength [nA]\nJI = -0.0497 # cross-coupling strength [nA]\nJAext = 0.00052 # Stimulus input strength [nA]\nIb = 0.3255 # The background input Ib=I0+I_noise","outputs":[],"execution_count":5},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"6A60B910FF2E4B88A5EDBC5A6E9F6051","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"### Model implementation \n\n$$ \nr_i = F(x_i) = \\frac{ax_i - b}{1-\\exp(-d(ax_i-b))} \n$$ \n\n\\begin{align} \n\\frac{dS_1}{dt} &= F(x_1)\\,\\gamma(1-S_1)-S_1/\\tau_s\\\\ \n\\frac{dS_2}{dt} &= F(x_2)\\,\\gamma(1-S_2)-S_2/\\tau_s \n\\end{align} \n\n\n\\begin{align} \nx_1 &= J_E S_1 + J_I S_2 + I_0 + I_{noise1} + J_{ext}\\mu_1\\\\ \nx_2 &= J_E S_2 + J_I S_1 + I_0 + I_{noise2} +J_{ext}\\mu_2. \n\\end{align} \n\n"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"A389E5A02B5347BFA42EF5F18D9DFA7C","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"@bp.odeint\ndef int_s1(s1, t, s2, coh=0.5, mu=20.):\n x1 = JE * s1 + JI * s2 + Ib + JAext * mu * (1. + coh/100)\n r1 = (a * x1 - b) / (1. - bm.exp(-d * (a * x1 - b)))\n return - s1 / tau + (1. - s1) * gamma * r1\n\n@bp.odeint\ndef int_s2(s2, t, s1, coh=0.5, mu=20.):\n #### TO DO!!!S2的动力学表达式\n x2 = JE * s2 + JI * s1 + Ib + JAext * mu * (1. - coh/100)\n r2 = (a * x2 - b) / (1. - bm.exp(-d * (a * x2 - b)))\n return - s2 / tau + (1. - s2) * gamma * r2","outputs":[],"execution_count":6},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"45B32216468D48C6B8ACAAF3E49F3110","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"No stimulus: $μ_0=0$ Hz."},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"20663452831C47A0A53B9B5CC231AE04","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"##### TO DO!!! mu=0 的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': 0.},\n resolutions=0.001,\n)\n\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 5 fixed points.\n\t#1 s1=0.5669871605297275, s2=0.03189141971571579 is a stable node.\n\t#2 s1=0.3138449248913595, s2=0.055785333471845534 is a saddle node.\n\t#3 s1=0.10265144582202228, s2=0.10265095098913339 is a stable node.\n\t#4 s1=0.05578534267632876, s2=0.3138449310808803 is a saddle node.\n\t#5 s1=0.03189144636489119, s2=0.5669870352865433 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":7},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"744B02C9F4314AA0BC90217CC5807355","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=0.$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"152065A66F934BDEB839C6AAED1B35FD","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"##### TO DO!!! mu=30 coh=0的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': 30., 'coh': 0.},\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 3 fixed points.\n\t#1 s1=0.658694232143127, s2=0.051807199439912924 is a stable node.\n\t#2 s1=0.4244557898485831, s2=0.42445562837314016 is a saddle node.\n\t#3 s1=0.05180717720080604, s2=0.6586942355713474 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":8},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"7606E8FF95DA47CF8ED56149FF7A810B","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=14\\%$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"129D51E52AAA4BEE997E4F5EEE921EC1","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"##### TO DO!!! mu=30 coh=14的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': 30., 'coh': 14.},\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 3 fixed points.\n\t#1 s1=0.6679776124172938, s2=0.045830222261007005 is a stable node.\n\t#2 s1=0.38455860789855545, s2=0.45363090352898194 is a saddle node.\n\t#3 s1=0.05911003280235089, s2=0.6481046659437737 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":9},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"165A699AAA224D7A86BE0654CBBE10FA","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74bf2211668f833254c"},"source":"Symmetric stimulus: \n$μ_0=30 Hz, c'=100\\%$ "},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"14CBFEB0586E4E0A90211522C71183B0","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"##### TO DO!!! mu=30 coh=100的情况\nanalyzer = bp.analysis.PhasePlane2D(\n model=[int_s1, int_s2],\n target_vars={'s1': [0, 1], 's2': [0, 1]},\n pars_update={'mu': 30., 'coh': 100.},\n resolutions=0.001,\n)\nanalyzer.plot_vector_field()\nanalyzer.plot_nullcline(coords=dict(s2='s2-s1'),\n x_style={'fmt': '-'},\n y_style={'fmt': '-'})\nanalyzer.plot_fixed_point()\nanalyzer.show_figure()","outputs":[{"output_type":"stream","name":"stderr","text":"I am creating the vector field ...\nI am computing fx-nullcline ...\nI am evaluating fx-nullcline by optimization ...\nI am computing fy-nullcline ...\nI am evaluating fy-nullcline by optimization ...\nI am searching fixed points ...\nI am trying to find fixed points by optimization ...\n\tThere are 1212 candidates\nI am trying to filter out duplicate fixed points ...\n\tFound 1 fixed points.\n\t#1 s1=0.7092805209334905, s2=0.0239636630419946 is a stable node.\n"},{"output_type":"display_data","data":{"text/plain":"
","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":10},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5C823FC014244A08A47F6B167033BE03","notebookId":"64ebf74bf2211668f833254c","trusted":true},"source":"","outputs":[],"execution_count":null}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /课件/W3_2 a_spiking_network_of_decision_making.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"55655974444244BD90F799E8C21725E9","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport brainpy as bp\nimport brainpy.math as bm\n","outputs":[],"execution_count":1},{"cell_type":"markdown","metadata":{"jupyter":{},"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"933BA876020C4582819822B8DDA08B86","runtime":{"status":"default","execution_status":null},"notebookId":"64ebf74cf2211668f8332551"},"source":"# **The Cortical Network Model for Decision-Making** \n\nThe network is composed of N neurons, with NE pyramidal cells (80%) and NI interneurons (20%) (Braitenberg and Schütz 1991). \nSimulations reported in here were done with NE = 1600, NI = 400. \nEach stimulus activates a distinct and small subpopulation of fNE excitatory cells (f = 0.15). \n\n**Neurons** \n\nBoth pyramidal cells and interneurons are described by leaky integrate-and-fire neurons (see for example Tuckwell 1988) and are characterized by a resting potential VL = −70 mV, a firing threshold Vth = −50 mV, a reset potential Vreset = −55 mV, a membrane capacitance Cm = 0.5 nF for pyramidal cells and 0.2 nF for interneurons, a membrane leak conductance gL = 25 nS for pyramidal cells and 20 nS for interneurons, and a refractory period τref = 2 ms for pyramidal cells and 1 ms for interneurons. The corresponding membrane time constants are τm = Cm/gL = 20 ms for excitatory cells and 10 ms for interneurons (McCormick et al. 1985). Below threshold, the membrane potential V(t) of a cell \n\n$$ \nC_m \\frac{d V(t)}{d t}=-g_L\\left(V(t)-V_L\\right)-I_{s y n}(t) \n$$ \n\nwhere Isyn(t) represents the total synaptic current flowing into the cell. \n\n\n**Synapses** \n\nThe total synaptic currents are given by: \n\n$$ \nI_{s y n}(t)=I_{\\text {ext,AMPA }}(t)+I_{\\text {rec }, A M P A}(t)+I_{\\text {rec }, N M D A}(t)+I_{\\text {rec }, \\mathrm{GABA}}(t) \n$$ \n\nin which \n\n$$ \n\\begin{gathered} \nI_{\\text {ext,AMPA }}(t)=g_{\\text {ext,AMPA }}\\left(V(t)-V_E\\right) s^{\\text {ext,AMPA }}(t) \\\\ \nI_{\\text {rec,AMPA }}(t)=g_{\\text {rec,AMPA }}\\left(V(t)-V_E\\right) \\sum_{j=1}^{C_E} w_j s_j^{A M P A}(t) \\\\ \nI_{\\text {rec,NMDA }}(t)=\\frac{g_{\\mathrm{NMDA}}\\left(V(t)-V_E\\right)}{\\left(1+\\left[\\mathrm{Mg}^{2+}\\right] \\exp (-0.062 V(t)) / 3.57\\right)} \\sum_{j=1}^{\\mathrm{C}_E} w_j s_j^{\\mathrm{NMDA}}(t) \\\\ \nI_{\\mathrm{rec}, \\mathrm{GABA}}(t)=g_{\\mathrm{GABA}}\\left(V(t)-V_l\\right) \\sum_{j=1}^{C_1} s_j^{\\mathrm{GABA}}(t) \n\\end{gathered} \n$$ \n\nwhere VE = 0 mV, VI = −70 mV. \n\n**Synaptic Weights** \n\nHence, inside a selective population, $w_j$ = w+, where w+ > 1 is a dimensionless parameter that is equal to the relative strength of “potentiated” synapses with respect to the baseline. Unless specified otherwise, I used w+ = 1.7. \n\nBetween two different selective populations, and from the nonselective population to selective ones, $w_j$ = w−, where w− < 1 measures the strength of synaptic “depression.” $w− = 1 − f(w_+ − 1)/(1 − f)$. \n\nOther connections have $w_j$ = 1. \n\n\n**References:** \n\n-Wang XJ. Probabilistic Decision Making by Slow Reverberation in Cortical Circuits. Neuron. 2002;36(5):955-968. doi:10.1016/S0896-6273(02)01092-9 \n"},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5F589FD4B4A04D158A62EB5433E2A055","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"class AMPA(bp.Projection):\n def __init__(self, pre, post, conn, delay, g_max, tau, E):\n super().__init__()\n if conn == 'all2all':\n comm = bp.dnn.AllToAll(pre.num, post.num, g_max)\n elif conn == 'one2one':\n comm = bp.dnn.OneToOne(pre.num, g_max)\n else:\n raise ValueError\n syn = bp.dyn.Expon.desc(post.num, tau=tau)\n out = bp.dyn.COBA.desc(E=E)\n self.proj = bp.dyn.ProjAlignPostMg2(\n pre=pre, delay=delay, comm=comm,\n syn=syn, out=out, post=post\n )\n\n\nclass NMDA(bp.Projection):\n def __init__(self, pre, post, conn, delay, g_max):\n super().__init__()\n if conn == 'all2all':\n comm = bp.dnn.AllToAll(pre.num, post.num, g_max)\n elif conn == 'one2one':\n comm = bp.dnn.OneToOne(pre.num, g_max)\n else:\n raise ValueError\n syn = bp.dyn.NMDA.desc(pre.num, a=0.5, tau_decay=100., tau_rise=2.)\n out = bp.dyn.MgBlock(E=0., cc_Mg=1.0)\n self.proj = bp.dyn.ProjAlignPreMg2(\n pre=pre, delay=delay, syn=syn,\n comm=comm, out=out, post=post\n )\n","outputs":[],"execution_count":2},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"77186C726A0240598EAA5BFC00746A58","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"class DecisionMakingNet(bp.DynSysGroup):\n def __init__(self, scale=1., f=0.15):\n super().__init__()\n # 网络中各组神经元的数目\n num_exc = int(1600 * scale)\n num_I, num_A, num_B = int(400 * scale), int(f * num_exc), int(f * num_exc)\n num_N = num_exc - num_A - num_B\n self.num_A, self.num_B, self.num_N, self.num_I = num_A, num_B, num_N, num_I\n\n poisson_freq = 2400. # Hz\n w_pos = 1.7\n w_neg = 1. - f * (w_pos - 1.) / (1. - f)\n g_ext2E_AMPA = 2.1 # nS\n g_ext2I_AMPA = 1.62 # nS\n g_E2E_AMPA = 0.05 / scale # nS\n g_E2I_AMPA = 0.04 / scale # nS\n g_E2E_NMDA = 0.165 / scale # nS\n g_E2I_NMDA = 0.13 / scale # nS\n g_I2E_GABAa = 1.3 / scale # nS\n g_I2I_GABAa = 1.0 / scale # nS\n\n neu_par = dict(V_rest=-70., V_reset=-55., V_th=-50., V_initializer=bp.init.OneInit(-70.))\n\n # E neurons/pyramid neurons\n self.A = bp.dyn.LifRef(num_A, tau=20., R=0.04, tau_ref=2., **neu_par)\n self.B = bp.dyn.LifRef(num_B, tau=20., R=0.04, tau_ref=2., **neu_par)\n self.N = bp.dyn.LifRef(num_N, tau=20., R=0.04, tau_ref=2., **neu_par)\n\n # I neurons/interneurons\n self.I = bp.dyn.LifRef(num_I, tau=10., R=0.05, tau_ref=1., **neu_par)\n\n # poisson stimulus # 'freqs' as bm.Variable\n self.IA = bp.dyn.PoissonGroup(num_A, freqs=bm.Variable(bm.zeros(1)))\n self.IB = bp.dyn.PoissonGroup(num_B, freqs=bm.Variable(bm.zeros(1)))\n\n # noise neurons\n self.noise_B = bp.dyn.PoissonGroup(num_B, freqs=poisson_freq)\n self.noise_A = bp.dyn.PoissonGroup(num_A, freqs=poisson_freq)\n self.noise_N = bp.dyn.PoissonGroup(num_N, freqs=poisson_freq)\n self.noise_I = bp.dyn.PoissonGroup(num_I, freqs=poisson_freq)\n\n # define external inputs\n #### TO DO!!!!\n self.IA2A = AMPA(self.IA, self.A, 'one2one', None, g_ext2E_AMPA, tau=2., E=0.)\n self.IB2B = AMPA(self.IB, self.B, 'one2one', None, g_ext2E_AMPA, tau=2., E=0.)\n\n # define AMPA projections from N\n #### TO DO!!!!\n self.N2B_AMPA = AMPA(self.N, self.B, 'all2all', 0.5, g_E2E_AMPA * w_neg, tau=2., E=0.)\n self.N2A_AMPA = AMPA(self.N, self.A, 'all2all', 0.5, g_E2E_AMPA * w_neg, tau=2., E=0.)\n self.N2N_AMPA = AMPA(self.N, self.N, 'all2all', 0.5, g_E2E_AMPA, tau=2., E=0.)\n self.N2I_AMPA = AMPA(self.N, self.I, 'all2all', 0.5, g_E2I_AMPA, tau=2., E=0.)\n\n # define NMDA projections from N\n #### TO DO!!!!\n self.N2B_NMDA = NMDA(self.N, self.B, 'all2all', 0.5, g_E2E_NMDA * w_neg)\n self.N2A_NMDA = NMDA(self.N, self.A, 'all2all', 0.5, g_E2E_NMDA * w_neg)\n self.N2N_NMDA = NMDA(self.N, self.N, 'all2all', 0.5, g_E2E_NMDA)\n self.N2I_NMDA = NMDA(self.N, self.I, 'all2all', 0.5, g_E2I_NMDA)\n\n # define AMPA projections from B\n #### TO DO!!!!\n self.B2B_AMPA = AMPA(self.B, self.B, 'all2all', 0.5, g_E2E_AMPA * w_pos, tau=2., E=0.)\n self.B2A_AMPA = AMPA(self.B, self.A, 'all2all', 0.5, g_E2E_AMPA * w_neg, tau=2., E=0.)\n self.B2N_AMPA = AMPA(self.B, self.N, 'all2all', 0.5, g_E2E_AMPA, tau=2., E=0.)\n self.B2I_AMPA = AMPA(self.B, self.I, 'all2all', 0.5, g_E2I_AMPA, tau=2., E=0.)\n\n # define NMDA projections from B\n #### TO DO!!!!\n self.B2B_NMDA = NMDA(self.B, self.B, 'all2all', 0.5, g_E2E_NMDA * w_pos)\n self.B2A_NMDA = NMDA(self.B, self.A, 'all2all', 0.5, g_E2E_NMDA * w_neg)\n self.B2N_NMDA = NMDA(self.B, self.N, 'all2all', 0.5, g_E2E_NMDA)\n self.B2I_NMDA = NMDA(self.B, self.I, 'all2all', 0.5, g_E2I_NMDA)\n\n # define AMPA projections from A\n #### TO DO!!!!\n self.A2B_AMPA = AMPA(self.A, self.B, 'all2all', 0.5, g_E2E_AMPA * w_neg, tau=2., E=0.)\n self.A2A_AMPA = AMPA(self.A, self.A, 'all2all', 0.5, g_E2E_AMPA * w_pos, tau=2., E=0.)\n self.A2N_AMPA = AMPA(self.A, self.N, 'all2all', 0.5, g_E2E_AMPA, tau=2., E=0.)\n self.A2I_AMPA = AMPA(self.A, self.I, 'all2all', 0.5, g_E2I_AMPA, tau=2., E=0.)\n\n # define NMDA projections from A\n #### TO DO!!!!\n self.A2B_NMDA = NMDA(self.A, self.B, 'all2all', 0.5, g_E2E_NMDA * w_neg)\n self.A2A_NMDA = NMDA(self.A, self.A, 'all2all', 0.5, g_E2E_NMDA * w_pos)\n self.A2N_NMDA = NMDA(self.A, self.N, 'all2all', 0.5, g_E2E_NMDA)\n self.A2I_NMDA = NMDA(self.A, self.I, 'all2all', 0.5, g_E2I_NMDA)\n\n # define I->E/I conn\n #### TO DO!!!! 用AMPA()\n self.I2B = AMPA(self.I, self.B, 'all2all', 0.5, g_I2E_GABAa, tau=5., E=-70.)\n self.I2A = AMPA(self.I, self.A, 'all2all', 0.5, g_I2E_GABAa, tau=5., E=-70.)\n self.I2N = AMPA(self.I, self.N, 'all2all', 0.5, g_I2E_GABAa, tau=5., E=-70.)\n self.I2I = AMPA(self.I, self.I, 'all2all', 0.5, g_I2I_GABAa, tau=5., E=-70.)\n\n # define external projections\n #### TO DO!!!!\n self.noise2B = AMPA(self.noise_B, self.B, 'one2one', None, g_ext2E_AMPA, tau=2., E=0.)\n self.noise2A = AMPA(self.noise_A, self.A, 'one2one', None, g_ext2E_AMPA, tau=2., E=0.)\n self.noise2N = AMPA(self.noise_N, self.N, 'one2one', None, g_ext2E_AMPA, tau=2., E=0.)\n self.noise2I = AMPA(self.noise_I, self.I, 'one2one', None, g_ext2I_AMPA, tau=2., E=0.)\n","outputs":[],"execution_count":3},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"51CCE07DFE64409DAD088F401C0C0015","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"class Tool:\n def __init__(self, pre_stimulus_period=100., stimulus_period=1000., delay_period=500.):\n self.pre_stimulus_period = pre_stimulus_period\n self.stimulus_period = stimulus_period\n self.delay_period = delay_period\n self.freq_variance = 10.\n self.freq_interval = 50.\n self.total_period = pre_stimulus_period + stimulus_period + delay_period\n\n def generate_freqs(self, mean):\n # stimulus period\n n_stim = int(self.stimulus_period / self.freq_interval)\n n_interval = int(self.freq_interval / bm.get_dt())\n freqs_stim = np.random.normal(mean, self.freq_variance, (n_stim, 1))\n freqs_stim = np.tile(freqs_stim, (1, n_interval)).flatten()\n # pre stimulus period\n freqs_pre = np.zeros(int(self.pre_stimulus_period / bm.get_dt()))\n # post stimulus period\n freqs_delay = np.zeros(int(self.delay_period / bm.get_dt()))\n all_freqs = np.concatenate([freqs_pre, freqs_stim, freqs_delay], axis=0)\n return bm.asarray(all_freqs)\n\n def visualize_results(self, mon, IA_freqs, IB_freqs, t_start=0., title=None):\n fig, gs = bp.visualize.get_figure(4, 1, 3, 10)\n axes = [fig.add_subplot(gs[i, 0]) for i in range(4)]\n\n ax = axes[0]\n bp.visualize.raster_plot(mon['ts'], mon['A.spike'], markersize=1, ax=ax)\n if title: ax.set_title(title)\n ax.set_ylabel(\"Group A\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n\n ax = axes[1]\n bp.visualize.raster_plot(mon['ts'], mon['B.spike'], markersize=1, ax=ax)\n ax.set_ylabel(\"Group B\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n\n ax = axes[2]\n rateA = bp.measure.firing_rate(mon['A.spike'], width=10.)\n rateB = bp.measure.firing_rate(mon['B.spike'], width=10.)\n ax.plot(mon['ts'], rateA, label=\"Group A\")\n ax.plot(mon['ts'], rateB, label=\"Group B\")\n ax.set_ylabel('Population activity [Hz]')\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n ax.legend()\n\n ax = axes[3]\n ax.plot(mon['ts'], IA_freqs, label=\"group A\")\n ax.plot(mon['ts'], IB_freqs, label=\"group B\")\n ax.set_ylabel(\"Input activity [Hz]\")\n ax.set_xlim(t_start, self.total_period + 1)\n ax.axvline(self.pre_stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period, linestyle='dashed')\n ax.axvline(self.pre_stimulus_period + self.stimulus_period + self.delay_period, linestyle='dashed')\n ax.legend()\n ax.set_xlabel(\"Time [ms]\")\n\n plt.show()","outputs":[],"execution_count":4},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"E92921C7A8C248E681B6ED74A6AFBC5E","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"tool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 25.6\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)\n","outputs":[{"output_type":"stream","name":"stderr","text":"No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"},{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":5},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"5CB5DC6F8EB6467880A38F971766FD9E","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"#测试多次随机猜测时候决策网路的表现\ntool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 0.\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)\n\ntool = Tool()\nnet = DecisionMakingNet()\n\nmu0 = 40.\ncoherence = 0.\nIA_freqs = tool.generate_freqs(mu0 + mu0 / 100. * coherence)\nIB_freqs = tool.generate_freqs(mu0 - mu0 / 100. * coherence)\n\ndef give_input():\n i = bp.share['i']\n net.IA.freqs[0] = IA_freqs[i]\n net.IB.freqs[0] = IB_freqs[i]\n\nrunner = bp.DSRunner(net, inputs=give_input, monitors=['A.spike', 'B.spike'])\nrunner.run(tool.total_period)\ntool.visualize_results(runner.mon, IA_freqs, IB_freqs)","outputs":[{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}},{"output_type":"display_data","data":{"text/plain":" 0%| | 0/16000 [00:00","text/html":""},"metadata":{"needs_background":"light"}}],"execution_count":6},{"cell_type":"code","metadata":{"jupyter":{},"collapsed":false,"scrolled":false,"tags":[],"slideshow":{"slide_type":"slide"},"id":"4A25DC557C8740BA83C9673A76488560","notebookId":"64ebf74cf2211668f8332551","trusted":true},"source":"","outputs":[],"execution_count":null}],"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"name":"python","mimetype":"text/x-python","nbconvert_exporter":"python","file_extension":".py","version":"3.5.2","pygments_lexer":"ipython3"}},"nbformat":4,"nbformat_minor":5} -------------------------------------------------------------------------------- /课件/W3_3 Hopfield+CANN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W3_3 Hopfield+CANN.pdf -------------------------------------------------------------------------------- /课件/W3_4 CANN with Adaptation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W3_4 CANN with Adaptation.pdf -------------------------------------------------------------------------------- /课件/W4_1 RNN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W4_1 RNN.pdf -------------------------------------------------------------------------------- /课件/W4_2 RNN-RC.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W4_2 RNN-RC.pdf -------------------------------------------------------------------------------- /课件/W4_3 脉冲神经网络的训练.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W4_3 脉冲神经网络的训练.pdf -------------------------------------------------------------------------------- /课件/W4_4 类脑计算芯片与系统简介.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/W4_4 类脑计算芯片与系统简介.pdf -------------------------------------------------------------------------------- /课件/figs/E_I_balance_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/E_I_balance_network.png -------------------------------------------------------------------------------- /课件/figs/align_post.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/align_post.png -------------------------------------------------------------------------------- /课件/figs/align_pre.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/align_pre.png -------------------------------------------------------------------------------- /课件/figs/csr_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/csr_matrix.png -------------------------------------------------------------------------------- /课件/figs/masked_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/masked_matrix.png -------------------------------------------------------------------------------- /课件/figs/mlp_sketch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/mlp_sketch.png -------------------------------------------------------------------------------- /课件/figs/s01apgi89t.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/s01apgi89t.png -------------------------------------------------------------------------------- /课件/figs/snn_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/snn_graph.png -------------------------------------------------------------------------------- /课件/figs/tc-fig1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/tc-fig1.png -------------------------------------------------------------------------------- /课件/figs/tc-fig2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/tc-fig2.png -------------------------------------------------------------------------------- /课件/figs/tc-fig3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainpy/2nd-neural-modeling-and-programming-course/d6979c4b9187462a55568a17dfa0b18fbef77d8d/课件/figs/tc-fig3.png --------------------------------------------------------------------------------