├── .all-contributorsrc ├── .github ├── contributing.md └── workflows │ ├── download_slides.yml │ ├── notebook-pr.yaml │ ├── publish-book.yml │ └── tutorial-readme-pr.yaml ├── .gitignore ├── CNAME ├── LICENSE-CODE.md ├── LICENSE.md ├── README.md ├── book ├── README.md ├── _config.yml └── _static │ └── custom.css ├── environment.yml ├── prereqs └── NeuroAI.md ├── projects ├── README.md ├── docs │ ├── continuing_your_project_after_the_course.md │ ├── datasets_overview.md │ ├── past_projects_overview.md │ └── project_guidance.md ├── keynote.ipynb ├── past-projects │ └── example_past_project.ipynb ├── professional_development │ ├── README.md │ ├── career_features.md │ ├── career_panels.md │ ├── impact_talks.ipynb │ ├── mentorship_program.md │ └── prof_dev_materials.yml ├── project-notebooks │ ├── ComparingNetworks.ipynb │ ├── Macrocircuits.ipynb │ ├── Microlearning.ipynb │ └── static │ │ ├── BiasVariance.png │ │ ├── ComputationalSimilarityTemplate.png │ │ ├── FinalGif.gif │ │ ├── HebbianLearning.png │ │ ├── Hourglass.png │ │ ├── MicrolearningProjectTemplate.png │ │ ├── MotorNet Illustration-01.png │ │ ├── MotorNetGif.gif │ │ ├── MotorNetIllustration-01.png │ │ ├── NCAPPaper.png │ │ ├── NCAPProjectTemplate.png │ │ ├── Problem.png │ │ ├── SussilloBarack.png │ │ ├── TTModelExample-01.png │ │ └── TaskComplexity-01.png └── project_materials.yml ├── requirements.txt └── tutorials ├── Art └── W0D0_NeuroVideoSeries-Daniela_Buchwald.png ├── Bonus_BonusContent ├── Bonus_Intro.ipynb ├── Bonus_Outro.ipynb ├── Bonus_Tutorial1.ipynb ├── Bonus_Tutorial2.ipynb ├── Bonus_Tutorial3.ipynb ├── instructor │ ├── Bonus_Intro.ipynb │ ├── Bonus_Outro.ipynb │ ├── Bonus_Tutorial1.ipynb │ ├── Bonus_Tutorial2.ipynb │ └── Bonus_Tutorial3.ipynb ├── solutions │ ├── Bonus_Tutorial1_Solution_464a2875.py │ ├── Bonus_Tutorial1_Solution_9d6c1017.py │ ├── Bonus_Tutorial1_Solution_c05ddd88.py │ ├── Bonus_Tutorial1_Solution_e7182519.py │ ├── Bonus_Tutorial2_Solution_0d50004f.py │ ├── Bonus_Tutorial2_Solution_f82e3b9c.py │ └── Bonus_Tutorial3_Solution_22e2c431.py ├── static │ ├── Bonus_Tutorial1_Solution_464a2875_0.png │ ├── Bonus_Tutorial1_Solution_9d6c1017_11.png │ └── Bonus_Tutorial1_Solution_c05ddd88_0.png └── student │ ├── Bonus_Intro.ipynb │ ├── Bonus_Outro.ipynb │ ├── Bonus_Tutorial1.ipynb │ ├── Bonus_Tutorial2.ipynb │ └── Bonus_Tutorial3.ipynb ├── Module_WrapUps ├── CourseContentTemplateInstructions.ipynb ├── InsertCategoryNameHere.ipynb ├── instructor │ ├── DynamicalSystems.ipynb │ ├── MachineLearning.ipynb │ └── StochasticProcesses.ipynb └── student │ ├── DynamicalSystems.ipynb │ ├── MachineLearning.ipynb │ └── StochasticProcesses.ipynb ├── README.md ├── Schedule ├── daily_schedules.md ├── schedule_intro.md ├── shared_calendars.md └── timezone_widget.md ├── TechnicalHelp ├── Discord.md ├── Jupyterbook.md ├── Links_Policy.md ├── Tutorial_colab.md ├── Tutorial_kaggle.md ├── chapter_cover.png └── tech_intro.md ├── W1D1_Generalization ├── README.md ├── W1D1_Intro.ipynb ├── W1D1_Outro.ipynb ├── W1D1_Tutorial1.ipynb ├── W1D1_Tutorial2.ipynb ├── W1D1_Tutorial3.ipynb ├── further_reading.md ├── instructor │ ├── W1D1_Intro.ipynb │ ├── W1D1_Outro.ipynb │ ├── W1D1_Tutorial1.ipynb │ ├── W1D1_Tutorial2.ipynb │ └── W1D1_Tutorial3.ipynb ├── solutions │ ├── W1D1_Tutorial1_Solution_22613224.py │ ├── W1D1_Tutorial1_Solution_427804b6.py │ ├── W1D1_Tutorial1_Solution_4d36b048.py │ ├── W1D1_Tutorial1_Solution_7cf70ea7.py │ ├── W1D1_Tutorial1_Solution_cbbb272d.py │ ├── W1D1_Tutorial2_Solution_90edb742.py │ ├── W1D1_Tutorial2_Solution_98ebec10.py │ ├── W1D1_Tutorial2_Solution_c14a4735.py │ ├── W1D1_Tutorial3_Solution_9e44e6ca.py │ └── W1D1_Tutorial3_Solution_dbbeabd0.py ├── static │ ├── DancingScript-Bold.ttf │ ├── DancingScript-Medium.ttf │ ├── DancingScript-Regular.ttf │ ├── DancingScript-SemiBold.ttf │ ├── W1D1_goal.png │ ├── model_diagram.png │ ├── neuroai_diagram.png │ ├── neuroai_hello_world.png │ ├── rnn.png │ ├── sample_0.png │ ├── sample_1.png │ ├── sample_2.png │ ├── sample_3.png │ ├── setup.png │ ├── transformer_one_layer.png │ └── trocr_architecture.png └── student │ ├── W1D1_Intro.ipynb │ ├── W1D1_Outro.ipynb │ ├── W1D1_Tutorial1.ipynb │ ├── W1D1_Tutorial2.ipynb │ └── W1D1_Tutorial3.ipynb ├── W1D2_ComparingTasks ├── README.md ├── W1D2_Intro.ipynb ├── W1D2_Outro.ipynb ├── W1D2_Tutorial1.ipynb ├── W1D2_Tutorial2.ipynb ├── W1D2_Tutorial3.ipynb ├── further_reading.md ├── instructor │ ├── W1D2_Intro.ipynb │ ├── W1D2_Outro.ipynb │ ├── W1D2_Tutorial1.ipynb │ ├── W1D2_Tutorial2.ipynb │ └── W1D2_Tutorial3.ipynb ├── solutions │ ├── W1D2_Tutorial1_Solution_17513eb4.py │ ├── W1D2_Tutorial1_Solution_329eb9d7.py │ ├── W1D2_Tutorial1_Solution_430915db.py │ ├── W1D2_Tutorial1_Solution_48232671.py │ ├── W1D2_Tutorial1_Solution_53f79ab6.py │ ├── W1D2_Tutorial1_Solution_5ddb1f2c.py │ ├── W1D2_Tutorial1_Solution_733ba3ce.py │ ├── W1D2_Tutorial1_Solution_9836554e.py │ ├── W1D2_Tutorial1_Solution_9e82edae.py │ ├── W1D2_Tutorial1_Solution_e18cfe86.py │ ├── W1D2_Tutorial1_Solution_eb15e56d.py │ ├── W1D2_Tutorial2_Solution_0cd5a0dc.py │ ├── W1D2_Tutorial2_Solution_13c3d606.py │ ├── W1D2_Tutorial2_Solution_39154423.py │ ├── W1D2_Tutorial2_Solution_936416da.py │ ├── W1D2_Tutorial3_Solution_62d8e960.py │ ├── W1D2_Tutorial3_Solution_bb3c2703.py │ ├── W1D2_Tutorial3_Solution_d48fc54f.py │ └── W1D2_Tutorial3_Solution_ecdb8e78.py ├── static │ ├── W1D2_Tutorial2_Solution_39154423_0.png │ ├── W1D2_Tutorial3_Solution_62d8e960_0.png │ ├── cummulative_regret.png │ ├── evolution.png │ ├── exploration_exploitation_tradeoff.png │ ├── infoNCEloss.png │ ├── learning_temporal_scales.png │ ├── model_architecture.png │ ├── puppies.png │ ├── triplet_loss_minimization.png │ └── two_armed_bandit.png └── student │ ├── W1D2_Intro.ipynb │ ├── W1D2_Outro.ipynb │ ├── W1D2_Tutorial1.ipynb │ ├── W1D2_Tutorial2.ipynb │ └── W1D2_Tutorial3.ipynb ├── W1D3_ComparingArtificialAndBiologicalNetworks ├── README.md ├── W1D3_Intro.ipynb ├── W1D3_Outro.ipynb ├── W1D3_Tutorial1.ipynb ├── W1D3_Tutorial2.ipynb ├── W1D3_Tutorial3.ipynb ├── W1D3_Tutorial4.ipynb ├── W1D3_Tutorial5.ipynb ├── further_reading.md ├── instructor │ ├── W1D3_Intro.ipynb │ ├── W1D3_Outro.ipynb │ ├── W1D3_Tutorial1.ipynb │ ├── W1D3_Tutorial2.ipynb │ ├── W1D3_Tutorial3.ipynb │ ├── W1D3_Tutorial4.ipynb │ └── W1D3_Tutorial5.ipynb ├── solutions │ ├── W1D3_Tutorial1_Solution_192afe09.py │ ├── W1D3_Tutorial1_Solution_7a860365.py │ ├── W1D3_Tutorial1_Solution_dd06ba72.py │ ├── W1D3_Tutorial3_Solution_147d3932.py │ ├── W1D3_Tutorial3_Solution_5ab03b03.py │ ├── W1D3_Tutorial4_Solution_05135f17.py │ ├── W1D3_Tutorial4_Solution_1ac2083f.py │ ├── W1D3_Tutorial4_Solution_3c63934e.py │ ├── W1D3_Tutorial4_Solution_6c059522.py │ ├── W1D3_Tutorial4_Solution_b21e3c22.py │ └── W1D3_Tutorial5_Solution_0467919d.py ├── static │ ├── NSD.png │ ├── W1D3_Tutorial4_Solution_1ac2083f_0.png │ ├── W1D3_Tutorial5_Solution_0467919d_0.png │ ├── rcnn_tutorial.png │ └── response_matrix.png └── student │ ├── W1D3_Intro.ipynb │ ├── W1D3_Outro.ipynb │ ├── W1D3_Tutorial1.ipynb │ ├── W1D3_Tutorial2.ipynb │ ├── W1D3_Tutorial3.ipynb │ ├── W1D3_Tutorial4.ipynb │ └── W1D3_Tutorial5.ipynb ├── W1D5_Microcircuits ├── README.md ├── W1D5_Intro.ipynb ├── W1D5_Outro.ipynb ├── W1D5_Tutorial1.ipynb ├── W1D5_Tutorial2.ipynb ├── W1D5_Tutorial3.ipynb ├── further_reading.md ├── instructor │ ├── W1D5_Intro.ipynb │ ├── W1D5_Outro.ipynb │ ├── W1D5_Tutorial1.ipynb │ ├── W1D5_Tutorial2.ipynb │ └── W1D5_Tutorial3.ipynb ├── solutions │ ├── W1D5_Tutorial1_Solution_0b47c17f.py │ ├── W1D5_Tutorial1_Solution_45a72023.py │ ├── W1D5_Tutorial1_Solution_5a7b462a.py │ ├── W1D5_Tutorial1_Solution_e286253f.py │ ├── W1D5_Tutorial1_Solution_e8a7baa1.py │ ├── W1D5_Tutorial1_Solution_eb440839.py │ ├── W1D5_Tutorial1_Solution_f770be90.py │ ├── W1D5_Tutorial2_Solution_9b3b7306.py │ ├── W1D5_Tutorial2_Solution_a36a7d90.py │ ├── W1D5_Tutorial2_Solution_b46035c9.py │ ├── W1D5_Tutorial2_Solution_dd43ccaf.py │ ├── W1D5_Tutorial3_Solution_072775c1.py │ └── W1D5_Tutorial3_Solution_c81fc074.py ├── static │ ├── W1D5_Tutorial2_Solution_9b3b7306_0.png │ ├── components.png │ ├── cross_attention.png │ ├── dictionary.png │ ├── filters.png │ ├── self_attention.png │ ├── sparse_and.png │ └── sparsity.png └── student │ ├── W1D5_Intro.ipynb │ ├── W1D5_Outro.ipynb │ ├── W1D5_Tutorial1.ipynb │ ├── W1D5_Tutorial2.ipynb │ └── W1D5_Tutorial3.ipynb ├── W2D1_Macrocircuits ├── README.md ├── W2D1_Intro.ipynb ├── W2D1_Outro.ipynb ├── W2D1_Tutorial1.ipynb ├── W2D1_Tutorial2.ipynb ├── W2D1_Tutorial3.ipynb ├── further_reading.md ├── instructor │ ├── W2D1_Intro.ipynb │ ├── W2D1_Outro.ipynb │ ├── W2D1_Tutorial1.ipynb │ ├── W2D1_Tutorial2.ipynb │ └── W2D1_Tutorial3.ipynb ├── solutions │ ├── W2D1_Tutorial1_Solution_166f0c8a.py │ ├── W2D1_Tutorial1_Solution_44d1308a.py │ ├── W2D1_Tutorial1_Solution_485ec5dd.py │ ├── W2D1_Tutorial1_Solution_6b3d3e34.py │ ├── W2D1_Tutorial1_Solution_6fa930a8.py │ ├── W2D1_Tutorial1_Solution_74ab8f48.py │ ├── W2D1_Tutorial1_Solution_8817495d.py │ ├── W2D1_Tutorial1_Solution_8c945e68.py │ ├── W2D1_Tutorial1_Solution_a3e6ddda.py │ ├── W2D1_Tutorial1_Solution_a5e90b35.py │ ├── W2D1_Tutorial1_Solution_a955337c.py │ ├── W2D1_Tutorial1_Solution_bc99a7f9.py │ ├── W2D1_Tutorial1_Solution_c3274cd4.py │ ├── W2D1_Tutorial1_Solution_dcafefea.py │ ├── W2D1_Tutorial2_Solution_00f64733.py │ ├── W2D1_Tutorial2_Solution_6d385a89.py │ ├── W2D1_Tutorial2_Solution_7717ab4f.py │ ├── W2D1_Tutorial2_Solution_dd90b43d.py │ ├── W2D1_Tutorial2_Solution_e1136bc5.py │ ├── W2D1_Tutorial2_Solution_fd82f22a.py │ ├── W2D1_Tutorial3_Solution_02426ac5.py │ ├── W2D1_Tutorial3_Solution_16aa2d9e.py │ ├── W2D1_Tutorial3_Solution_3d63fcd3.py │ ├── W2D1_Tutorial3_Solution_3f3de125.py │ ├── W2D1_Tutorial3_Solution_4fb20840.py │ ├── W2D1_Tutorial3_Solution_9a60cb69.py │ └── W2D1_Tutorial3_Solution_ecb01cdf.py ├── static │ ├── W2D1_Tutorial1_Solution_74ab8f48_2.png │ ├── W2D1_Tutorial1_Solution_a5e90b35_5.png │ ├── W2D1_Tutorial1_Solution_dcafefea_1.png │ ├── W2D1_Tutorial2_Solution_00f64733_1.png │ ├── W2D1_Tutorial2_Solution_6d385a89_0.png │ ├── W2D1_Tutorial2_Solution_7717ab4f_0.png │ ├── W2D1_Tutorial2_Solution_e1136bc5_0.png │ ├── W2D1_Tutorial2_Solution_fd82f22a_1.png │ ├── W2D1_Tutorial3_Solution_3f3de125_1.png │ ├── actor_critic.png │ ├── gain_change.png │ ├── holistic_modular.png │ ├── navigation_task.png │ └── nets.png └── student │ ├── W2D1_Intro.ipynb │ ├── W2D1_Outro.ipynb │ ├── W2D1_Tutorial1.ipynb │ ├── W2D1_Tutorial2.ipynb │ └── W2D1_Tutorial3.ipynb ├── W2D2_NeuroSymbolicMethods ├── README.md ├── W2D2_Intro.ipynb ├── W2D2_Outro.ipynb ├── W2D2_Tutorial1.ipynb ├── W2D2_Tutorial2.ipynb ├── W2D2_Tutorial3.ipynb ├── W2D2_Tutorial4.ipynb ├── W2D2_Tutorial5.ipynb ├── further_reading.md ├── instructor │ ├── W2D2_Intro.ipynb │ ├── W2D2_Outro.ipynb │ ├── W2D2_Tutorial1.ipynb │ ├── W2D2_Tutorial2.ipynb │ ├── W2D2_Tutorial3.ipynb │ ├── W2D2_Tutorial4.ipynb │ └── W2D2_Tutorial5.ipynb ├── solutions │ ├── W2D2_Tutorial1_Solution_0b0b7a2d.py │ ├── W2D2_Tutorial1_Solution_15f690ab.py │ ├── W2D2_Tutorial1_Solution_3e9c4916.py │ ├── W2D2_Tutorial1_Solution_513dd01a.py │ ├── W2D2_Tutorial1_Solution_99c595f2.py │ ├── W2D2_Tutorial1_Solution_b91a4ab5.py │ ├── W2D2_Tutorial1_Solution_b9294b66.py │ ├── W2D2_Tutorial1_Solution_ce5fc6c7.py │ ├── W2D2_Tutorial1_Solution_da1926e8.py │ ├── W2D2_Tutorial1_Solution_db547f5d.py │ ├── W2D2_Tutorial1_Solution_f12d9c75.py │ ├── W2D2_Tutorial2_Solution_3a819ce6.py │ ├── W2D2_Tutorial2_Solution_550fd076.py │ ├── W2D2_Tutorial2_Solution_a0e39449.py │ ├── W2D2_Tutorial2_Solution_bab79b64.py │ ├── W2D2_Tutorial2_Solution_cc0b7eb5.py │ ├── W2D2_Tutorial4_Solution_603ce327.py │ ├── W2D2_Tutorial4_Solution_73f6dd01.py │ ├── W2D2_Tutorial4_Solution_873dadc4.py │ ├── W2D2_Tutorial4_Solution_8ecc4392.py │ ├── W2D2_Tutorial4_Solution_a848247a.py │ ├── W2D2_Tutorial4_Solution_f2f03c67.py │ ├── W2D2_Tutorial5_Solution_1090e65a.py │ ├── W2D2_Tutorial5_Solution_2b4c5a99.py │ ├── W2D2_Tutorial5_Solution_57dcbce1.py │ ├── W2D2_Tutorial5_Solution_730a75ed.py │ ├── W2D2_Tutorial5_Solution_8c79265f.py │ ├── W2D2_Tutorial5_Solution_99c56d84.py │ ├── W2D2_Tutorial5_Solution_b3d8f220.py │ ├── W2D2_Tutorial5_Solution_bd7761a4.py │ └── W2D2_Tutorial5_Solution_cff4accc.py ├── static │ ├── W2D2_Tutorial5_Solution_1090e65a_0.png │ ├── W2D2_Tutorial5_Solution_57dcbce1_0.png │ ├── W2D2_Tutorial5_Solution_99c56d84_0.png │ ├── W2D2_Tutorial5_Solution_b3d8f220_0.png │ ├── W2D2_Tutorial5_Solution_bd7761a4_0.png │ └── W2D2_Tutorial5_Solution_cff4accc_0.png └── student │ ├── W2D2_Intro.ipynb │ ├── W2D2_Outro.ipynb │ ├── W2D2_Tutorial1.ipynb │ ├── W2D2_Tutorial2.ipynb │ ├── W2D2_Tutorial3.ipynb │ ├── W2D2_Tutorial4.ipynb │ └── W2D2_Tutorial5.ipynb ├── W2D3_Microlearning ├── README.md ├── W2D3_Intro.ipynb ├── W2D3_Outro.ipynb ├── W2D3_Tutorial1.ipynb ├── further_reading.md ├── instructor │ ├── W2D3_Intro.ipynb │ ├── W2D3_Outro.ipynb │ └── W2D3_Tutorial1.ipynb ├── solutions │ ├── W2D3_Tutorial1_Solution_01f74aae.py │ ├── W2D3_Tutorial1_Solution_95265523.py │ └── W2D3_Tutorial1_Solution_d1bc17ea.py ├── static │ ├── feedback_alignment.png │ └── network.png └── student │ ├── W2D3_Intro.ipynb │ ├── W2D3_Outro.ipynb │ └── W2D3_Tutorial1.ipynb ├── W2D4_Macrolearning ├── README.md ├── W2D4_Intro.ipynb ├── W2D4_Outro.ipynb ├── W2D4_Tutorial1.ipynb ├── W2D4_Tutorial2.ipynb ├── W2D4_Tutorial3.ipynb ├── W2D4_Tutorial4.ipynb ├── W2D4_Tutorial5.ipynb ├── further_reading.md ├── instructor │ ├── W2D4_Intro.ipynb │ ├── W2D4_Outro.ipynb │ ├── W2D4_Tutorial1.ipynb │ ├── W2D4_Tutorial2.ipynb │ ├── W2D4_Tutorial3.ipynb │ ├── W2D4_Tutorial4.ipynb │ └── W2D4_Tutorial5.ipynb ├── solutions │ ├── W2D4_Tutorial1_Solution_0b85ba35.py │ ├── W2D4_Tutorial1_Solution_35e7f912.py │ ├── W2D4_Tutorial1_Solution_41fdc825.py │ ├── W2D4_Tutorial1_Solution_4dea007f.py │ ├── W2D4_Tutorial1_Solution_59146784.py │ ├── W2D4_Tutorial2_Solution_07232036.py │ ├── W2D4_Tutorial2_Solution_2ba000ea.py │ ├── W2D4_Tutorial2_Solution_c3936e0c.py │ ├── W2D4_Tutorial2_Solution_e2bacfd6.py │ ├── W2D4_Tutorial3_Solution_08b01bcf.py │ ├── W2D4_Tutorial3_Solution_2753b5eb.py │ ├── W2D4_Tutorial3_Solution_576c8d87.py │ ├── W2D4_Tutorial3_Solution_593cdcd4.py │ ├── W2D4_Tutorial4_Solution_2bbabefe.py │ ├── W2D4_Tutorial4_Solution_fa5c39c8.py │ └── W2D4_Tutorial5_Solution_178f638f.py ├── static │ ├── W2D4_Tutorial1_Solution_59146784_0.png │ ├── W2D4_Tutorial2_Solution_07232036_53.png │ ├── W2D4_Tutorial2_Solution_07232036_54.png │ ├── W2D4_Tutorial2_Solution_c3936e0c_7.png │ ├── W2D4_Tutorial2_Solution_c3936e0c_8.png │ ├── W2D4_Tutorial2_Solution_e2bacfd6_54.png │ ├── W2D4_Tutorial2_Solution_e2bacfd6_55.png │ ├── evolution.png │ ├── feedback_alignment.png │ └── network.png └── student │ ├── W2D4_Intro.ipynb │ ├── W2D4_Outro.ipynb │ ├── W2D4_Tutorial1.ipynb │ ├── W2D4_Tutorial2.ipynb │ ├── W2D4_Tutorial3.ipynb │ ├── W2D4_Tutorial4.ipynb │ └── W2D4_Tutorial5.ipynb ├── W2D5_Mysteries ├── README.md ├── W2D5_Intro.ipynb ├── W2D5_Outro.ipynb ├── W2D5_Tutorial1.ipynb ├── W2D5_Tutorial2.ipynb ├── W2D5_Tutorial3.ipynb ├── further_reading.md ├── instructor │ ├── W2D5_Intro.ipynb │ ├── W2D5_Outro.ipynb │ ├── W2D5_Tutorial1.ipynb │ ├── W2D5_Tutorial2.ipynb │ └── W2D5_Tutorial3.ipynb ├── solutions │ ├── W2D5_Tutorial1_Solution_20a869fc.py │ ├── W2D5_Tutorial1_Solution_a617d707.py │ ├── W2D5_Tutorial1_Solution_a926812a.py │ ├── W2D5_Tutorial1_Solution_bfd5f466.py │ ├── W2D5_Tutorial1_Solution_f1250f89.py │ ├── W2D5_Tutorial3_Solution_a926812a.py │ └── W2D5_Tutorial3_Solution_f903bbb4.py ├── static │ ├── HOSS.png │ ├── RIMs.png │ ├── Shared_Workspace.png │ ├── W1D1_goal.png │ └── ethics_roadmap.png └── student │ ├── W2D5_Intro.ipynb │ ├── W2D5_Outro.ipynb │ ├── W2D5_Tutorial1.ipynb │ ├── W2D5_Tutorial2.ipynb │ └── W2D5_Tutorial3.ipynb ├── instructor └── intro.ipynb ├── intro.ipynb ├── materials.yml ├── static ├── AirtableSubmissionButton.png ├── Closed_Access_logo.png ├── ConceptMap.png ├── Humor-Sans.ttf ├── NMA-W1D2-fig06.png ├── NMA_W1D2_dataproject_draft.jpg ├── NeuroAI_big_tent.png ├── NeuroAI_concept_map.png ├── NeuroAI_sponsors_intro.png ├── NeuroAI_sponsors_intro2.png ├── Open_Access_logo.png ├── SurveyButton.png ├── W3D4_Tutorial2_MultiarmedBandit.png ├── W3D4_Tutorial3_CliffWorld.png ├── W3D4_Tutorial3_GridWorld410.png ├── W3D4_Tutorial4_QuentinsWorld.png ├── W3D4_Tutorial4_QuentinsWorldShortcut.png ├── add-tag.png ├── ai-logo.png ├── astrocat.png ├── button.png ├── conv-network.png ├── conv_fc.PNG ├── convnet.png ├── convolutional_layer.PNG ├── data_analysis_step6.jpeg ├── folder-structure.png ├── generative_model.png ├── gh-pages.gif ├── github-actions.gif ├── grad_descent.gif ├── img_1235.jpg ├── img_1237_720.jpg ├── import-complete.png ├── import-repo.gif ├── kaggle_internet_enabled.png ├── kaggle_step1.png ├── kaggle_step2.png ├── kaggle_step3.png ├── kaggle_step4.png ├── kaggle_step5.1.png ├── kaggle_step5.2.png ├── kaggle_step6_1.png ├── kaggle_step6_2.png ├── modeling_step6.png ├── new-course.gif ├── nma-logo-square-4xp.png ├── one-layer-network.png ├── process-notebook.gif ├── publish-book.gif ├── pull-request.gif ├── pull-request.png ├── restart-kernel.gif ├── sample_output.png ├── tag-added.png ├── view-tags.png └── weight-sharing.png └── student └── intro.ipynb /.github/workflows/tutorial-readme-pr.yaml: -------------------------------------------------------------------------------- 1 | name: tutorial-readme-pr 2 | on: 3 | pull_request: 4 | branches: main 5 | paths: 6 | - tutorials/materials.yaml 7 | 8 | jobs: 9 | 10 | generate-readme: 11 | 12 | runs-on: ubuntu-latest 13 | steps: 14 | 15 | - name: Checkout 16 | uses: actions/checkout@v3 17 | with: 18 | persist-credentials: false 19 | fetch-depth: 0 20 | ref: ${{ github.head_ref }} 21 | 22 | - name: Set up Python 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: 3.9 26 | 27 | - name: Install CI tools 28 | run: | 29 | BRANCH=`python -c 'import os, re; m = re.search(r"nmaci:([\w-]+)", os.environ["COMMIT_MESSAGE"]); print("main" if m is None else m.group(1))'` 30 | wget https://github.com/NeuromatchAcademy/nmaci/archive/refs/heads/$BRANCH.tar.gz 31 | tar -xzf $BRANCH.tar.gz 32 | mv nmaci-$BRANCH/scripts/ ci/ 33 | rm -r nmaci-$BRANCH 34 | echo ci/ >> .gitignore 35 | 36 | - name: Update READMEs 37 | run: python ci/generate_tutorial_readmes.py 38 | 39 | - name: Commit post-processed files 40 | if: ${{ success() }} 41 | run: | 42 | git config --local user.email "action@github.com" 43 | git config --local user.name "GitHub Action" 44 | git add '**/README.md' 45 | git diff-index --quiet HEAD || git commit -m "Update tutorial README" 46 | 47 | - name: Push post-processed files 48 | if: ${{ success() }} 49 | uses: ad-m/github-push-action@v0.6.0 50 | with: 51 | github_token: ${{ secrets.GITHUB_TOKEN }} 52 | branch: ${{ github.head_ref }} 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | .vscode 3 | .idea 4 | _build 5 | _toc.yml -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | neuroai.neuromatch.io -------------------------------------------------------------------------------- /LICENSE-CODE.md: -------------------------------------------------------------------------------- 1 | Copyright 2020 Neuromatch Academy 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /book/_static/custom.css: -------------------------------------------------------------------------------- 1 | .inline-icon { 2 | vertical-align: baseline; 3 | } 4 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: Change name #e.g., nma-compneuro 2 | dependencies: 3 | - python=3.9 4 | - requests 5 | - numpy 6 | - scipy 7 | - matplotlib 8 | - scikit-learn 9 | - pytorch 10 | - ipywidgets 11 | - pip 12 | -------------------------------------------------------------------------------- /prereqs/NeuroAI.md: -------------------------------------------------------------------------------- 1 | # Prerequisites and preparatory materials for NeuroAI course 2 | Welcome to the [Neuromatch Academy](https://academy.neuromatch.io/)! We're really excited to bring the field of NeuroAI to such a wide and varied audience. We're preparing an amazing set of lectures and tutorials for you! 3 | ## Preparing yourself for the course 4 | This is a more advanced course than other Neuromatch courses so far. We will be relating principles of neuroscience and principles of artificial intelligence, so you should already know the fundamentals in both disciplines. We ask that all students: 5 | * Have taken Neuromatch courses in [computational neuroscience](https://compneuro.neuromatch.io/tutorials/intro.html) and [deep learning](https://deeplearning.neuromatch.io/), or the equivalent 6 | * Have intermediate proficiency in Python 7 | * Some core math concepts 8 | 9 | Below are more details on the prerequisites. 10 | ### Neuroscience 11 | You should have some exposure to computational neuroscience, such as through our Neuromatch course. A rudimentary familiarity with neurobiology is fine. 12 | ### Programming 13 | This course will be run using Python. We expect students to be familiar with variables, lists, dicts, the numpy and scipy libraries, and plotting in matplotlib. Especially for projects, you will benefit from knowing PyTorch. 14 | ### Deep Learning 15 | You should be familiar with the core ideas of deep learning, including definitions of task goals, neural network architectures, and training and testing procedures. 16 | ### Math skills 17 | We rely on linear algebra, probability, basic statistics, and multivariable calculus. 18 | **Linear algebra:** You will need a good grasp of linear algebra: vector and matrix addition and multiplication, rank, bases, determinants, inverses, and the eigenvalue decomposition. 19 | **Statistics:** You should be comfortable with means and variances, and the normal distribution. You should be familiar with linear regression and cross-validation. 20 | **Calculus:** You should know what integrals and derivatives are, and understand what a differential equation means. 21 | The Neuromatch Academy team. 22 | ### Resources for learning PyTorch 23 | https://pytorch.org/tutorials/ 24 | -------------------------------------------------------------------------------- /projects/README.md: -------------------------------------------------------------------------------- 1 | # Projects 2 | 3 | ---- 4 | 5 | See the [Daily Guide to Projects](./docs/project_guidance.md). 6 | -------------------------------------------------------------------------------- /projects/docs/continuing_your_project_after_the_course.md: -------------------------------------------------------------------------------- 1 | # Impact Scholars program 2 | 3 | If you would like to continue working on your project after the two-week course, we encourage you to apply to the [Impact Scholars program](https://impact-scholars.neuromatch.io/impact-scholars/structure.html). -------------------------------------------------------------------------------- /projects/docs/datasets_overview.md: -------------------------------------------------------------------------------- 1 | # Datasets 2 | 3 | -------------------------------------------------------------------------------- /projects/docs/past_projects_overview.md: -------------------------------------------------------------------------------- 1 | # Past projects 2 | 3 | We are proud to showcase the project presentations our students prepared in previous iterations of *Computational Tools for Climate Science*. 4 | 5 | You can browse their presentation slides by project topic: 6 | - [Sea level rise](projects/past-projects/Sea_level_rise.ipynb) 7 | - [Regional precipitation variability and extreme events](projects/past-projects/precipitation.ipynb) 8 | - [Changes in land cover: Albedo and carbon sequestration](projects/past-projects/albedo.ipynb) 9 | - [Monitoring and mapping wildfires using satellite data](projects/past-projects/wildfires.ipynb) 10 | - [Ocean acidification](projects/past-projects/ocean_acidification.ipynb) 11 | - [The impact of ENSO on precipitation and temperature](projects/past-projects/ENSO.ipynb) 12 | - [Heatwaves: Assessing the dynamic interactions of the atmosphere and land](projects/past-projects/heatwaves.ipynb) 13 | 14 | 15 | -------------------------------------------------------------------------------- /projects/past-projects/example_past_project.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Example Past Project\n", 8 | "## 2023" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "metadata": {}, 14 | "source": [ 15 | "### Insert you title here\n", 16 | "\n", 17 | "**Authors**: Author names\n", 18 | "\n", 19 | "**Teaching assistants**: TA names" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "# @markdown\n", 29 | "from ipywidgets import widgets\n", 30 | "from IPython.display import IFrame\n", 31 | "\n", 32 | "filename = \"2023SLR_Investigating_the_relationship_between_sea_level_rise_and_extreme_events_in_Indonesia\"\n", 33 | "\n", 34 | "download_link = f\"https://github.com/ClimateMatchAcademy/course-content/blob/main/projects/past-projects/{filename}.pdf\"\n", 35 | "render_link = f\"https://nbviewer.org/github/ClimateMatchAcademy/course-content/blob/main/projects/past-projects/{filename}.pdf\"\n", 36 | "\n", 37 | "# @markdown\n", 38 | "out = widgets.Output()\n", 39 | "with out:\n", 40 | " print(f\"If you want to download the slides: {download_link}\")\n", 41 | " display(IFrame(src=f\"{render_link}\", width=730, height=410))\n", 42 | "display(out)" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "## NOTE: you should add as many past projects as you have by including a markdown header cell, and a code cell importing the slide like above " 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [] 56 | } 57 | ], 58 | "metadata": { 59 | "language_info": { 60 | "name": "python" 61 | }, 62 | "orig_nbformat": 4 63 | }, 64 | "nbformat": 4, 65 | "nbformat_minor": 2 66 | } 67 | -------------------------------------------------------------------------------- /projects/professional_development/README.md: -------------------------------------------------------------------------------- 1 | # Professional Development 2 | 3 | Professional development enables students to connect and expand their experiences, skills and interest, broadening students’ perception of how they can make an impact via different career options. 4 | 5 | 6 | ## Mentorhip Program 7 | 8 | Professionals with experience in various fields **meet with student groups live** to share their career experiences. Mentor meetings aim to inspire and expand students’ perspectives, during and after the course. 9 | 10 | Learn more about [Mentor Meetings](mentorship_program.md) 11 | 12 | -------------------------------------------------------------------------------- /projects/professional_development/career_features.md: -------------------------------------------------------------------------------- 1 | # Career Features 2 | 3 | 4 | * **_Why_**: There is no fixed career pathways for everyone. Career features are tailored to **showcase a range of professionals** working to support action to address our current climate crisis, and that may inspire students for their own career exploration. 5 | * **_What_**: **Online write-ups** of practitioners in climate and related fields that are part of the program book during the program. These career features may include mentors, impact speakers, career panelists, and other volunteers in CMA. 6 | * **_How_**: Students are encouraged to **browse career features** that interest them **at their own pace**, and follow up with practitioners if they want to learn more about different career pathways. -------------------------------------------------------------------------------- /projects/professional_development/prof_dev_materials.yml: -------------------------------------------------------------------------------- 1 | - title: Introduction 2 | file: projects/professional_development/README.md 3 | # - title: Impact Talks 4 | # file: projects/professional_development/impact_talks.ipynb 5 | - title: Mentorship Program 6 | file: projects/professional_development/mentorship_program.md 7 | # - title: Career Features 8 | # file: projects/professional_development/career_features.md 9 | # - title: Career Panels 10 | # file: projects/professional_development/career_panels.md 11 | -------------------------------------------------------------------------------- /projects/project-notebooks/static/BiasVariance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/BiasVariance.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/ComputationalSimilarityTemplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/ComputationalSimilarityTemplate.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/FinalGif.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/FinalGif.gif -------------------------------------------------------------------------------- /projects/project-notebooks/static/HebbianLearning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/HebbianLearning.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/Hourglass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/Hourglass.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/MicrolearningProjectTemplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/MicrolearningProjectTemplate.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/MotorNet Illustration-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/MotorNet Illustration-01.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/MotorNetGif.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/MotorNetGif.gif -------------------------------------------------------------------------------- /projects/project-notebooks/static/MotorNetIllustration-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/MotorNetIllustration-01.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/NCAPPaper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/NCAPPaper.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/NCAPProjectTemplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/NCAPProjectTemplate.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/Problem.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/Problem.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/SussilloBarack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/SussilloBarack.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/TTModelExample-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/TTModelExample-01.png -------------------------------------------------------------------------------- /projects/project-notebooks/static/TaskComplexity-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/projects/project-notebooks/static/TaskComplexity-01.png -------------------------------------------------------------------------------- /projects/project_materials.yml: -------------------------------------------------------------------------------- 1 | - file: projects/README.md 2 | title: Introduction 3 | # - file: projects/keynote.ipynb 4 | # title: Project Day keynote (W2D2) 5 | - file: projects/docs/project_guidance.md 6 | title: Daily guide for projects 7 | - file: projects/docs/continuing_your_project_after_the_course.md 8 | title: Continuing your project after the course 9 | - file: projects/docs/datasets_overview.md 10 | title: Project materials 11 | sections: 12 | - file: projects/project-notebooks/Macrocircuits.ipynb 13 | title: Macrocircuits 14 | - file: projects/project-notebooks/Microlearning.ipynb 15 | title: Microlearning 16 | - file: projects/project-notebooks/ComparingNetworks.ipynb 17 | title: Comparing Networks 18 | 19 | 20 | # The below may be used in the future 21 | # - file: projects/docs/past_projects_overview.md 22 | # title: Past projects 23 | # sections: 24 | # - file: projects/past-projects/example_past_project.ipynb 25 | # title: Example Past Project 26 | # You should add your past projects here using the following: 27 | # - file: point to the file 28 | # title: name the project -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.26.4 2 | scipy==1.12.0 3 | matplotlib==3.8.3 4 | Pillow==10.2.0 5 | torch 6 | torchvision==0.16 7 | transformers==4.38.1 8 | gradio==4.19.2 9 | networkx==3.2.1 10 | ipywidgets==8.1.2 11 | requests==2.31.0 12 | scikit-learn==1.4.1.post1 13 | scikit-image==0.19.2 14 | tqdm==4.66.2 15 | protobuf==3.20.* 16 | numba==0.59.0 17 | sentencepiece==0.2.0 18 | python-dotenv==1.0.1 19 | gymnasium==0.29.1 20 | pytorch_lightning==2.2.4 21 | jupyter_ui_poll==0.2.2 22 | tensorflow==2.8 23 | ffmpeg 24 | imageio-ffmpeg 25 | torchlens 26 | rsatoolbox==0.1.5 27 | vibecheck 28 | seaborn 29 | plotly 30 | git+https://github.com/neuromatch/TextRecognitionDataGenerator#egg=trdg 31 | git+https://github.com/neuromatch/GNS-Modeling#egg=gns 32 | git+https://github.com/neuromatch/pyBPL#egg=pybpl 33 | git+https://github.com/neuromatch/MotorNet#egg=motornet 34 | git+https://github.com/ctn-waterloo/sspspace@neuromatch#egg=sspspace 35 | git+https://github.com/mitchellostrow/DSA#egg=DSA -------------------------------------------------------------------------------- /tutorials/Art/W0D0_NeuroVideoSeries-Daniela_Buchwald.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/Art/W0D0_NeuroVideoSeries-Daniela_Buchwald.png -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial1_Solution_464a2875.py: -------------------------------------------------------------------------------- 1 | # create the model 2 | pca = decomposition.PCA(n_components=2) 3 | # fit the model on training data 4 | pca.fit(input_train) 5 | # transformation on 2D space 6 | pca_latent_test = pca.transform(input_test) 7 | 8 | # Uncomment to test your code! 9 | with plt.xkcd(): 10 | plot_latent_generative(pca_latent_test, y_test, pca.inverse_transform, 11 | image_shape=image_shape) -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial1_Solution_9d6c1017.py: -------------------------------------------------------------------------------- 1 | encoding_size = 2 2 | n_epochs = 10 3 | batch_size = 64 4 | 5 | # set PyTorch RNG seed 6 | torch_seed = 0 7 | 8 | model = nn.Sequential( 9 | nn.Linear(input_size, encoding_size), 10 | nn.ReLU(), 11 | nn.Linear(encoding_size, input_size), 12 | nn.Sigmoid() 13 | ) 14 | 15 | encoder = model[:2] 16 | decoder = model[2:] 17 | 18 | # reset RNGs for weight initialization 19 | torch.manual_seed(torch_seed) 20 | np.random.seed(0) 21 | 22 | # reset encoder weights and biases 23 | encoder.apply(init_weights_kaiming_uniform) 24 | 25 | # retrieve weights and biases from the encoder before training 26 | encoder_w_init, encoder_b_init = get_layer_weights(encoder[0]) 27 | decoder_w_init, decoder_b_init = get_layer_weights(decoder[0]) 28 | 29 | # reset RNGs for minibatch sequence 30 | torch.manual_seed(torch_seed) 31 | np.random.seed(0) 32 | 33 | # train the autoencoder 34 | runSGD(model, input_train, input_test, criterion='bce', 35 | n_epochs=n_epochs, batch_size=batch_size) 36 | 37 | # retrieve weights and biases from the encoder after training 38 | encoder_w_train, encoder_b_train = get_layer_weights(encoder[0]) 39 | decoder_w_train, decoder_b_train = get_layer_weights(decoder[0]) -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial1_Solution_c05ddd88.py: -------------------------------------------------------------------------------- 1 | # create the model 2 | pca = decomposition.PCA(n_components=2) 3 | # fit the model on training data 4 | pca.fit(input_train) 5 | # transformation on 2D space 6 | pca_latent_test = pca.transform(input_test) 7 | 8 | with plt.xkcd(): 9 | plot_latent_generative(pca_latent_test, y_test, pca.inverse_transform, 10 | image_shape=image_shape) -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial1_Solution_e7182519.py: -------------------------------------------------------------------------------- 1 | encoding_size = 32 2 | 3 | model = nn.Sequential( 4 | nn.Linear(input_size, encoding_size), 5 | nn.ReLU(), 6 | # insert your code here to add the layer 7 | nn.Linear(encoding_size, input_size), 8 | # insert the activation function 9 | nn.Sigmoid() 10 | ) 11 | 12 | print(f'Model structure \n\n {model}') -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial2_Solution_0d50004f.py: -------------------------------------------------------------------------------- 1 | encoding_size = 2 2 | 3 | model = nn.Sequential( 4 | nn.Linear(input_size, int(input_size / 2)), 5 | nn.PReLU(), 6 | nn.Linear(int(input_size / 2), encoding_size * 32), 7 | # Add activation function 8 | nn.PReLU(), 9 | # Add another layer 10 | nn.Linear(encoding_size * 32, encoding_size), 11 | # Add activation function 12 | nn.PReLU(), 13 | # Add another layer 14 | nn.Linear(encoding_size, encoding_size * 32), 15 | # Add activation function 16 | nn.PReLU(), 17 | # Add another layer 18 | nn.Linear(encoding_size * 32, int(input_size / 2)), 19 | # Add activation function 20 | nn.PReLU(), 21 | # Add another layer 22 | nn.Linear(int(input_size / 2), input_size), 23 | # Add activation function 24 | nn.Sigmoid() 25 | ) 26 | 27 | model[:-2].apply(init_weights_kaiming_normal) 28 | 29 | print(f'Autoencoder \n\n {model}\n') 30 | 31 | # Adjust the value n_l to split your model correctly 32 | n_l = 6 33 | 34 | # uncomment when you fill the code 35 | encoder = model[:n_l] 36 | decoder = model[n_l:] 37 | print(f'Encoder \n\n {encoder}\n') 38 | print(f'Decoder \n\n {decoder}') -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial2_Solution_f82e3b9c.py: -------------------------------------------------------------------------------- 1 | encoding_size = 3 2 | 3 | model = nn.Sequential( 4 | nn.Linear(input_size, int(input_size / 2)), 5 | nn.PReLU(), 6 | nn.Linear(int(input_size / 2), encoding_size * 32), 7 | nn.PReLU(), 8 | nn.Linear(encoding_size * 32, encoding_size), 9 | nn.PReLU(), 10 | # add the normalization layer 11 | NormalizeLayer(), 12 | nn.Linear(encoding_size, encoding_size * 32), 13 | nn.PReLU(), 14 | nn.Linear(encoding_size * 32, int(input_size / 2)), 15 | nn.PReLU(), 16 | nn.Linear(int(input_size / 2), input_size), 17 | nn.Sigmoid() 18 | ) 19 | 20 | model[:-2].apply(init_weights_kaiming_normal) 21 | 22 | print(f'Autoencoder \n\n {model}\n') 23 | 24 | # Adjust the value n_l to split your model correctly 25 | n_l = 7 26 | 27 | # uncomment when you fill the code 28 | encoder = model[:n_l] 29 | decoder = model[n_l:] 30 | print(f'Encoder \n\n {encoder}\n') 31 | print(f'Decoder \n\n {decoder}') -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/solutions/Bonus_Tutorial3_Solution_22e2c431.py: -------------------------------------------------------------------------------- 1 | missing_a = 1 2 | missing_b = 0 3 | # input train data 4 | my_input_train = input_train[(y_train != missing_a) & (y_train != missing_b)] 5 | # input test data 6 | my_input_test = input_test[(y_test != missing_a) & (y_test != missing_b)] 7 | # model 8 | my_y_test = y_test[(y_test != missing_a) & (y_test != missing_b)] 9 | 10 | # Uncomment to test your code 11 | print(my_input_train.shape) 12 | print(my_input_test.shape) 13 | print(my_y_test.shape) -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/static/Bonus_Tutorial1_Solution_464a2875_0.png: -------------------------------------------------------------------------------- 1 | �PNG 2 |  3 | 4 | -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/static/Bonus_Tutorial1_Solution_9d6c1017_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/Bonus_BonusContent/static/Bonus_Tutorial1_Solution_9d6c1017_11.png -------------------------------------------------------------------------------- /tutorials/Bonus_BonusContent/static/Bonus_Tutorial1_Solution_c05ddd88_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/Bonus_BonusContent/static/Bonus_Tutorial1_Solution_c05ddd88_0.png -------------------------------------------------------------------------------- /tutorials/Schedule/schedule_intro.md: -------------------------------------------------------------------------------- 1 | # Schedule -------------------------------------------------------------------------------- /tutorials/Schedule/shared_calendars.md: -------------------------------------------------------------------------------- 1 | # Shared calendars 2 | 3 | We have five google calendars to help you see events in your local time! If you open the links below, they will be in 4 | universal coordinated time (UTC). If you click at the bottom to add to your Google Calendar, you should see them in the time zone of your google 5 | calendar (probably yours)! 6 | 7 | [Zone 1](https://calendar.google.com/calendar/u/0/embed?src=c_5b9b0f7f14c4177cf781c422c4c753c8b5b449ec86c2f05604b844e1ebc81e98@group.calendar.google.com&ctz=UTC) 8 | 9 | [Zone 2](https://calendar.google.com/calendar/u/0/embed?src=c_6a32dcb5f399fb89cd204f3059307caea5aa06593670ab8e7260c9e7b45e1ded@group.calendar.google.com&ctz=UTC) 10 | 11 | [Zone 3](https://calendar.google.com/calendar/u/0/embed?src=c_741228ed2ce9fa885df21456ad6c01eb047302366077b6418910d2cfe2b98c24@group.calendar.google.com&ctz=UTC) 12 | 13 | [Zone 4](https://calendar.google.com/calendar/u/0/embed?src=c_daa1fb75e95db401eff323765c0fb67fd5d24e04cd25b1cfbd0521571e0c45a6@group.calendar.google.com&ctz=UTC) 14 | 15 | [Zone 5](https://calendar.google.com/calendar/u/0/embed?src=c_92f327ec5557687c9f390d6b62e99bc5dbc9d75734c1d1bdf260aef37b5ea2a1@group.calendar.google.com&ctz=UTC) 16 | -------------------------------------------------------------------------------- /tutorials/Schedule/timezone_widget.md: -------------------------------------------------------------------------------- 1 | # Timezone widget 2 | 3 | Please check out this timezone widget to help find your tutorial and project times: [https://neuromatchacademy.github.io/widgets/tz.html](https://neuromatchacademy.github.io/widgets/tz.html) 4 | 5 | -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/Discord.md: -------------------------------------------------------------------------------- 1 | # Using discord 2 | 3 | [Please click here for Discord Guide](https://docs.neuromatch.io/p/bz7uXZLMwMvDqn/Discord-Guides) 4 | -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/Jupyterbook.md: -------------------------------------------------------------------------------- 1 | # Using jupyterbook 2 | 3 | While you can see videos and the notebooks in this jupyterbook, you have to launch each page in an interactive environment in order to write or run code. 4 | -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/Links_Policy.md: -------------------------------------------------------------------------------- 1 | # Quick links and policies 2 | 3 | ## Quick links 4 | 5 | Course materials: [https://neuroai.neuromatch.io/](https://neuroai.neuromatch.io/) 6 | 7 | Portal: [https://portal.neuromatchacademy.org/](https://portal.neuromatchacademy.org/) 8 | 9 | Website: [https://neuromatch.io/](https://neuromatch.io/) 10 | 11 | Code of Conduct and Code of Conduct Violations Form: [https://github.com/NeuromatchAcademy/precourse/blob/main/CODE_OF_CONDUCT.md](https://github.com/NeuromatchAcademy/precourse/blob/main/CODE_OF_CONDUCT.md) 12 | 13 | Attendance Policy & Waiver: [https://docs.neuromatch.io/p/BI_ssrrHYrfg_E/Academy-Student-Attendance-Policy-and-Waivers](https://docs.neuromatch.io/p/BI_ssrrHYrfg_E/Academy-Student-Attendance-Policy-and-Waivers) 14 | 15 | 16 | 17 | ## Policies 18 | 19 | ### Coursework attendance policy 20 | 21 | [See full course attendance policy here.](https://docs.neuromatch.io/p/BI_ssrrHYrfg_E/Academy-Student-Attendance-Policy-and-Waivers) 22 | -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/Tutorial_colab.md: -------------------------------------------------------------------------------- 1 | # Using Google Colab 2 | 3 | If you have access to Google Colab, you can click the "Open in Colab" button at the top of each page to launch it in an interactive environment where you can write and run code. 4 | 5 | ## Advice for using Colab 6 | * Make a local copy to your Drive: otherwise changes you make (e.g. notes you take, code you write, etc) will not be saved 7 | * Do not edit the same colab directly as a group. Unlike Google Docs, one person's version will overwrite another's. If your browser happens to be open, it can overwrite anything that has been done by others since the last version in your browser! 8 | * Colab does not have version control! Use git for this if you want to collaborate (on a group project colab for example) 9 | 10 | To learn the basics of Colab, please check out this Colab notebook: https://colab.research.google.com/notebooks/intro.ipynb 11 | -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/chapter_cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/TechnicalHelp/chapter_cover.png -------------------------------------------------------------------------------- /tutorials/TechnicalHelp/tech_intro.md: -------------------------------------------------------------------------------- 1 | # Technical Help 2 | 3 | Wrench labeled neuromatch on top of a phone 4 | 5 | *Artwork by Daniela Buchwald* 6 | -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/further_reading.md: -------------------------------------------------------------------------------- 1 | # Suggested further readings 2 | 3 | ## Introduction 4 | 5 | * [Catalyzing next-generation Artificial Intelligence through NeuroAI (2023)](https://www.nature.com/articles/s41467-023-37180-x) 6 | * [The neuroconnectionist research programme (2023)](https://www.nature.com/articles/s41583-023-00705-w) 7 | * [A deep learning framework for neuroscience (2019)](https://www.nature.com/articles/s41593-019-0520-2) 8 | * [Neuroscience-Inspired Artificial Intelligence (2017)](https://pubmed.ncbi.nlm.nih.gov/28728020/) 9 | * [Out of distribution generalization in machine learning (2021)](https://arxiv.org/abs/2103.02667) 10 | * [Universal Intelligence: A Definition of Machine Intelligence (2007)](https://arxiv.org/abs/0712.3329) 11 | * [Emergent behaviour and neural dynamics in artificial agents tracking odour plumes (2023)](https://www.nature.com/articles/s42256-022-00599-w) 12 | 13 | ## Tutorial 1 14 | 15 | * [Universal Language Model Fine-tuning for Text Classification (2018)](https://arxiv.org/abs/1801.06146) 16 | * [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models (2021)](https://arxiv.org/abs/2109.10282) 17 | 18 | ## Tutorial 2 19 | 20 | * [A neural network that finds a naturalistic solution for the production of muscle activity (2015)](https://www.nature.com/articles/nn.4042) 21 | * [MotorNet: a Python toolbox for controlling differentiable biomechanical effectors with artificial neural networks (2024)](https://elifesciences.org/reviewed-preprints/88591v2) 22 | * [μSim: A goal-driven framework for elucidating the neural control of movement through musculoskeletal modeling (2024)](https://www.biorxiv.org/content/10.1101/2024.02.02.578628v2.abstract) 23 | 24 | ## Tutorial 3 25 | 26 | * [Human-level concept learning through probabilistic program induction (2015)](https://www.science.org/doi/abs/10.1126/science.aab3050) 27 | * [Learning Task-General Representations with Generative Neuro-Symbolic Modeling (2020)](https://arxiv.org/abs/2006.14448) -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial1_Solution_22613224.py: -------------------------------------------------------------------------------- 1 | 2 | def inspect_encoder(model): 3 | """ 4 | Inspect encoder to verify that it processes inputs in the expected way. 5 | 6 | Args: 7 | model: the TrOCR model 8 | """ 9 | # Create an empty tensor (batch size of 1) to feed it to the encoder. 10 | # Remember that images should have 3 channels and have size 384x384 11 | # Recall that images are fed in pytorch with tensors of shape 12 | # batch x channels x height x width 13 | single_input = torch.zeros(1, 3, 384, 384).to(device) 14 | 15 | # Run the input through the encoder. 16 | output = model.encoder(single_input) 17 | 18 | # Measure the number of hidden tokens which are the output of the encoder 19 | hidden_shape = output['last_hidden_state'].shape 20 | 21 | assert hidden_shape[0] == 1 22 | assert hidden_shape[1] == 577 23 | assert hidden_shape[2] == 768 -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial1_Solution_427804b6.py: -------------------------------------------------------------------------------- 1 | """ 2 | The model does a pretty good job. However, we can see some mistakes in its transcription of the 3 | first image, in particular, it fails to recognize the words "Neuromatch" and "Neuro AI". 4 | This is likely due to the fact that the model's decoder was trained in 2019, prior to 5 | the inception of Neuromatch in 2020 and the recent popularity of Neuro AI. Although it 6 | has the capacity to express strings like "Neuromatch" and "Neuro AI", it assigns low 7 | probabilities to these words, which weren't in its corpus. This is a clear example of 8 | the importance of training data in building successful models. 9 | """ -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial1_Solution_4d36b048.py: -------------------------------------------------------------------------------- 1 | import torchmetrics.functional.text as fm 2 | 3 | def clean_string(input_string): 4 | """ 5 | Clean string prior to comparison 6 | 7 | Args: 8 | input_string (str): the input string 9 | 10 | Returns: 11 | (str) a cleaned string, lowercase, alphabetical characters only, no double spaces 12 | """ 13 | 14 | # Convert all characters to lowercase 15 | lowercase_string = input_string.lower() 16 | 17 | # Remove non-alphabetic characters 18 | alpha_string = re.sub(r'[^a-z\s]', '', lowercase_string) 19 | 20 | # Remove double spaces and start and end spaces 21 | return re.sub(r'\s+', ' ', alpha_string).strip() 22 | 23 | 24 | def calculate_mismatch(estimated_text, reference_text): 25 | """ 26 | Calculate mismatch (character and word error rates) between estimated and true text. 27 | 28 | Args: 29 | estimated_text: a list of strings 30 | reference_text: a list of strings 31 | 32 | Returns: 33 | A tuple, (CER and WER) 34 | """ 35 | # Lowercase the text and remove special characters for the comparison 36 | estimated_text = [clean_string(x) for x in estimated_text] 37 | reference_text = [clean_string(x) for x in reference_text] 38 | 39 | # Calculate the character error rate and word error rates. They should be 40 | # raw floats, not tensors. 41 | cer = fm.char_error_rate(estimated_text, reference_text).item() 42 | wer = fm.word_error_rate(estimated_text, reference_text).item() 43 | return (cer, wer) -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial1_Solution_7cf70ea7.py: -------------------------------------------------------------------------------- 1 | def calculate_mean_max_cer(df_results): 2 | """ 3 | Calculate the mean character-error-rate across subjects as 4 | well as the maximum (that is, the OOD risk). 5 | 6 | Args: 7 | df_results: a dataframe containing results 8 | 9 | Returns: 10 | A tuple, (mean_cer, max_cer) 11 | """ 12 | # Calculate the mean CER across test subjects. 13 | mean_subjects = df_results.cer.mean() 14 | 15 | # Calculate the max CER across test subjects. 16 | max_subjects = df_results.cer.max() 17 | return mean_subjects, max_subjects -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial1_Solution_cbbb272d.py: -------------------------------------------------------------------------------- 1 | def calculate_all_mismatch(df, model, processor): 2 | """ 3 | Calculate CER and WER for all subjects in a dataset 4 | 5 | Args: 6 | df: a dataframe containing information about images and transcripts 7 | model: an image-to-text model 8 | processor: a processor object 9 | 10 | Returns: 11 | a list of dictionaries containing a per-subject breakdown of the 12 | results 13 | """ 14 | subjects = df.subject.unique().tolist() 15 | 16 | results = [] 17 | 18 | # Calculate CER and WER for all subjects 19 | for subject in tqdm.tqdm(subjects): 20 | # Load images and labels for a given subject 21 | images, true_transcripts = get_images_and_transcripts(df, subject) 22 | 23 | # Transcribe the images to text 24 | transcribed_text = transcribe_images(images, model, processor) 25 | 26 | # Calculate the CER and WER 27 | cer, wer = calculate_mismatch(transcribed_text, true_transcripts) 28 | 29 | results.append({ 30 | 'subject': subject, 31 | 'cer': cer, 32 | 'wer': wer, 33 | }) 34 | return results -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial2_Solution_98ebec10.py: -------------------------------------------------------------------------------- 1 | class UnregularizedRNN(nn.Module): 2 | def __init__(self, input_size, hidden_size, output_size, g, h, tau_over_dt=5): 3 | super(UnregularizedRNN, self).__init__() 4 | self.hidden_size = hidden_size 5 | self.tau_over_dt = tau_over_dt 6 | self.output_linear = nn.Linear(hidden_size, output_size) 7 | 8 | # Weight initialization 9 | self.J = nn.Parameter(torch.randn(hidden_size, hidden_size) * (g / torch.sqrt(torch.tensor(hidden_size, dtype=torch.float)))) 10 | self.B = nn.Parameter(torch.randn(hidden_size, input_size) * (h / torch.sqrt(torch.tensor(input_size, dtype=torch.float)))) 11 | self.bx = nn.Parameter(torch.zeros(hidden_size)) 12 | 13 | # Nonlinearity 14 | self.nonlinearity = rectified_tanh 15 | 16 | def forward(self, input, hidden): 17 | 18 | # Calculate the visible firing rate from the hidden state. 19 | firing_rate_before = self.nonlinearity(hidden) 20 | 21 | # Update hidden state 22 | recurrent_drive = torch.matmul(self.J, firing_rate_before.transpose(0, 1)) 23 | input_drive = torch.matmul(self.B, input.transpose(0, 1)) 24 | total_drive = recurrent_drive + input_drive + self.bx.unsqueeze(1) 25 | total_drive = total_drive.transpose(0, 1) 26 | 27 | # Euler integration for continuous-time update 28 | hidden = hidden + (1 / self.tau_over_dt) * (-hidden + total_drive) 29 | 30 | # Calculate the new firing rate given the update. 31 | firing_rate = self.nonlinearity(hidden) 32 | 33 | # Project the firing rate linearly to form the output 34 | output = self.output_linear(firing_rate) 35 | 36 | return output, hidden 37 | 38 | def init_hidden(self, batch_size): 39 | return torch.zeros(batch_size, self.hidden_size) -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial2_Solution_c14a4735.py: -------------------------------------------------------------------------------- 1 | def generate_trajectory(model, inputs, device): 2 | inputs = inputs.to(device) 3 | batch_size = inputs.size(0) 4 | h = model.init_hidden(batch_size).to(device) #note that `UnregularizedRNN` has a specific method for that 5 | 6 | loss = 0 7 | outputs = [] 8 | hidden_states = [] 9 | with torch.no_grad(): 10 | for t in range(inputs.shape[1]): 11 | # Forward the model's input and hidden state to obtain the model 12 | # output and hidden state *h*. 13 | # Note that you should index the input tensor by the time dimension 14 | # Capture any additional outputs in 'rest' 15 | output, h, *rest = model(inputs[:, t], h) 16 | outputs.append(output) 17 | hidden_states.append(h.detach().clone()) 18 | 19 | return torch.stack(outputs, axis=1).to(device), torch.stack(hidden_states, axis=1).to(device) -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial3_Solution_9e44e6ca.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loose definition: 3 | 4 | The Omniglot task you just tried has one labelled example per class: the reference 5 | image defines the reference class. People tested on the Omniglot task generally 6 | display far higher performance than chance (here, 11%). Based on this, we often say 7 | (loosely) that people display a sample complexity N=1 on this task. 8 | 9 | Strict definition: 10 | 11 | When I tried this task 20 times, I got 18 correct answers. Based on that, I can 12 | estimate a 90% confidence interval (delta = 0.05) for my error rate using binomial 13 | statistics as [.02, .23]. Thus, I can state that my sample complexity for this task 14 | is N(epsilon=.1, delta=.23) = 1. 15 | 16 | My numbers are on the low end of what has been demonstrated in the literature. 17 | Lake et al. (2015) estimated an error rate in humans of 4.5% with 20 distractors. 18 | """ -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/solutions/W1D1_Tutorial3_Solution_dbbeabd0.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inferring how humans perform cognitive tasks is a central question in cognitive science. 3 | Our ability to infer our strategies by introspection is limited, but controlled lab 4 | experiments can help us tease apart how we perform tasks. 5 | 6 | Lake et al. (2015) ask: 7 | 8 | > How do people learn new concepts from just one or a few examples? And how do 9 | people learn such abstract, rich, and flexible representations? An even greater challenge arises 10 | when putting them together: How can learning succeed from such sparse data yet also produce 11 | such rich representations? 12 | 13 | In the next section, we'll explore one model that can reach human-level performance 14 | through strong inductive biases, involving: 15 | 16 | * bottom-up visual processing 17 | * a generative model 18 | * a neurosymbolic approach 19 | 20 | """ -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/DancingScript-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/DancingScript-Bold.ttf -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/DancingScript-Medium.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/DancingScript-Medium.ttf -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/DancingScript-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/DancingScript-Regular.ttf -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/DancingScript-SemiBold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/DancingScript-SemiBold.ttf -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/W1D1_goal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/W1D1_goal.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/model_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/model_diagram.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/neuroai_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/neuroai_diagram.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/neuroai_hello_world.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/neuroai_hello_world.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/rnn.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/sample_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/sample_0.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/sample_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/sample_1.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/sample_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/sample_2.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/sample_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/sample_3.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/setup.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/transformer_one_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/transformer_one_layer.png -------------------------------------------------------------------------------- /tutorials/W1D1_Generalization/static/trocr_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D1_Generalization/static/trocr_architecture.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/further_reading.md: -------------------------------------------------------------------------------- 1 | # Suggested further readings 2 | 3 | ## Tutorial 2 4 | 5 | * Wu, Z., Xiong, Y., Yu, S., & Lin, D. (2018). [Unsupervised feature learning via non-parametric instance discrimination (2018)](https://arxiv.org/abs/1805.01978) 6 | 7 | * Oord, A. van den, Li, Y., & Vinyals, O. (2019). [Representation learning with contrastive predictive coding (2018)](https://arxiv.org/abs/1807.03748) 8 | 9 | * Chen, T., Kornblith, S., Norouzi, M., & Hinton, G. (2020). [A simple framework for contrastive learning of visual representations (2020)](https://arxiv.org/abs/2002.05709) 10 | 11 | * Sohn, K. (2016). [Improved Deep Metric Learning with Multi-class N-pair Loss Objective (2016)](https://papers.nips.cc/paper_files/paper/2016/hash/6b180037abbebea991d8b1232f8a8ca9-Abstract.html). Advances in Neural Information Processing Systems, 29. 12 | 13 | * Gutmann, M., & Hyvärinen, A. (2010). [Noise-contrastive estimation: A new estimation principle for unnormalize statistical models (2010)](https://proceedings.mlr.press/v9/gutmann10a.html). Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, 297–304. 14 | 15 | * Yeh, C.-H., Hong, C.-Y., Hsu, Y.-C., Liu, T.-L., Chen, Y., & LeCun, Y. (2022). [Decoupled Contrastive Learning (2022)](https://arxiv.org/abs/2110.06848) 16 | 17 | * Konkle, T., & Alvarez, G. A. (2022). [A self-supervised domain-general learning framework for human ventral stream representation (2022)](https://www.nature.com/articles/s41467-022-28091-4#Sec9). Nature Communications, 13(1), 491. 18 | 19 | * Wang, J. X., Kurth-Nelson, Z., Kumaran, D., Tirumala, D., Soyer, H., Leibo, J. Z., Hassabis, D., & Botvinick, M. (2018). [Prefrontal cortex as a meta-reinforcement learning system (2018)](https://www.nature.com/articles/s41593-018-0147-8). Nature Neuroscience, 21(6), 860–868. 20 | 21 | 22 | -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_17513eb4.py: -------------------------------------------------------------------------------- 1 | """ 2 | At small sample sizes, the model learns very little. The limited data does not provide enough variability or information for the model to learn effectively. 3 | 4 | At sample sizes of 1000 Points, there is a noticeable improvement in the model's performance. 5 | 6 | At the largest sample size, it achieves good performance within a few epochs. Here, an epoch involves the model seeing 10,000 points, which significantly accelerates learning 7 | and leads to more accurate results due to the greater amount of data and variability provided. 8 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_329eb9d7.py: -------------------------------------------------------------------------------- 1 | """ 2 | We see that the representations learned by the regression model transfered the worst, 3 | while the autoencoder and inpainting tasks transferred almost on par. 4 | 5 | Interestingly, the regression model performed best *on its task*, which was very simple, 6 | but transfered the worst. Unsupervised tasks based on finding good representations of the 7 | data or finding regularities in the data can be useful for transfer learning. 8 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_430915db.py: -------------------------------------------------------------------------------- 1 | """ 2 | The regression model has learned a representation of visual stimuli which is not useful 3 | for classification. There is no positive transfer between the regression model and the 4 | classification model. 5 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_48232671.py: -------------------------------------------------------------------------------- 1 | def cost_autoencoder(output, target): 2 | criterion = nn.MSELoss() 3 | output_flat = output.view(output.size(0), -1) 4 | target_flat = target.view(target.size(0), -1) 5 | cost = criterion(output_flat, target_flat) 6 | return cost -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_53f79ab6.py: -------------------------------------------------------------------------------- 1 | 2 | def cost_classification(output, target): 3 | criterion = nn.CrossEntropyLoss() 4 | target = target.to(torch.int64) 5 | cost = criterion(output, target) 6 | return cost -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_5ddb1f2c.py: -------------------------------------------------------------------------------- 1 | 2 | def cost_regression(output, target): 3 | criterion = nn.MSELoss() 4 | cost = criterion(output, target) 5 | return cost -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_733ba3ce.py: -------------------------------------------------------------------------------- 1 | """ 2 | The representations learned by the autoencoder transfer somewhat to the classification 3 | task, far more than in the simple regression task. 4 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_9836554e.py: -------------------------------------------------------------------------------- 1 | """ 2 | - Inpainting, similar to autoencoding, necessitates a substantial amount of data for effective training. 3 | - Even after 10,000 examples and 10 epochs, the network is still learning. The multiple masked images 4 | generated from the same image means the network keeps learning from the same images. 5 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_9e82edae.py: -------------------------------------------------------------------------------- 1 | """ 2 | In Day 3, we'll learn several ways of comparing networks at the level of architecture, 3 | but also at the level of representations. We'll learn about several tools to give us 4 | insight into what networks are learning. 5 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_e18cfe86.py: -------------------------------------------------------------------------------- 1 | """ 2 | - The model requires more data to train effectively. With a limited dataset, the model lacks 3 | the necessary information to learn the underlying patterns and generalize well to new data. 4 | - Even with a dataset of 1,000 training points, the model has still not quite learning the task. 5 | This suggests that the current training strategy or model architecture might need adjustment 6 | to fully use the available data and improve performance. 7 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial1_Solution_eb15e56d.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a very easy task, and the model learns quickly, with a final error, below .005. 3 | Training is less stable for smaller batch sizes, but the model hovers around a similar 4 | baseline error value. 5 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream. 3 | This area of the brain processes visual information and has been shown to develop hierarchical features that 4 | capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior 5 | regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial 6 | information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022). 7 | Instead, these representations emerge through domain-general learning from natural image structures, where the brain 8 | differentiates between individual views and categories based on the inherent statistical properties of visual input 9 | (Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual 10 | system can form complex object representations and categorization using self-supervised learning frameworks similar to 11 | those in artificial neural networks. 12 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_13c3d606.py: -------------------------------------------------------------------------------- 1 | """ 2 | Since our network is untrained, there isn't much difference in the cosine similarities 3 | within and across image classes. This lack of clear structure in the similarity matrix 4 | is expected at this stage because the network has not yet learned to distinguish between 5 | different classes. 6 | 7 | Ideally, we should observe a very high cosine similarity for images within the same 8 | class (along the diagonal) and very low cosine similarity for images from different 9 | classes (off-diagonal). 10 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_39154423.py: -------------------------------------------------------------------------------- 1 | 2 | # Dictionary to store normalized embeddings for each class 3 | embeddings = {} 4 | for i in range(10): 5 | embeddings[i] = test_embeddings_untrained[test_labels_untrained == i] 6 | 7 | # Within class cosine similarity: 8 | for i in range(10): 9 | sims = embeddings[i] @ embeddings[i].T # Compute cosine similarity matrix within the class 10 | np.fill_diagonal(sims, np.nan) # Ignore diagonal values (self-similarity) 11 | cur_sim = np.nanmean(sims) # Calculate the mean similarity excluding diagonal 12 | sim_matrix[i, i] = cur_sim # Store the within-class similarity in the matrix 13 | 14 | # Between class cosine similarity: 15 | for i in range(10): 16 | for j in range(10): 17 | if i == j: 18 | continue # Skip if same class (already computed) 19 | elif i > j: 20 | continue # Skip if already computed (matrix symmetry) 21 | else: 22 | sims = embeddings[i] @ embeddings[j].T # Compute cosine similarity between different classes 23 | cur_sim = np.mean(sims) # Calculate the mean similarity 24 | sim_matrix[i, j] = cur_sim # Store the similarity in the matrix 25 | sim_matrix[j, i] = cur_sim # Ensure symmetry in the matrix 26 | 27 | plt.figure(figsize=(8, 6)) 28 | sns.heatmap(sim_matrix, vmin=0.0, vmax=1.0, annot=True, fmt=".2f", cmap="YlGnBu", linewidths=0.5) 29 | plt.title("Untrained Network Cosine Similarity Matrix") 30 | plt.show() -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial3_Solution_62d8e960.py: -------------------------------------------------------------------------------- 1 | t = np.arange(1,101) # Array representing trials from 1 to 100 2 | p_L = 0.25 3 | p_R = 0.75 4 | 5 | # In the best case scenario, the agent chooses the best arm every trial, 6 | # leading to a cumulative regret of 0. 7 | cr_best = np.zeros(100) 8 | 9 | # In the worst-case scenario, the agent chooses the worst arm every trial, 10 | # leading to per trial regret of the best arm's reward - the worst arm's reward 11 | per_trial_regret = p_R - p_L 12 | regret_worst = per_trial_regret * np.ones(100) 13 | cr_worst = np.cumsum(regret_worst) 14 | 15 | with plt.xkcd(): 16 | plt.plot(t, cr_best, label = 'best case') 17 | plt.plot(t, cr_worst, label = 'worst case') 18 | 19 | plt.xlabel('trial') 20 | plt.ylabel('cumulative regret') 21 | plt.legend() -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial3_Solution_bb3c2703.py: -------------------------------------------------------------------------------- 1 | """ 2 | The adaptation to fast-changing latent variables, as described in the changes in activation patterns over trials, 3 | shows the algorithm's capability to adjust its internal representations based on feedback from the environment. 4 | 5 | - The first and last plots show very fast convergence towards the optimal arm when the reward probabilities are 6 | very different. The trajectory moves rapidly towards one or the other side of the PC space. 7 | 8 | - In the middle two plots, correspond to more difficult settings, the model takes longer to converge, 9 | The model starts by exploiting the left arm, but after sampling a right action, ends up converging on the right. 10 | 11 | - The first PC appears to correspond to the certainty of the algorithm about the optimal arm 12 | (low PC value = left arm, high PC value = right arm). 13 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial3_Solution_d48fc54f.py: -------------------------------------------------------------------------------- 1 | """ 2 | When one of the arms has a high reward, it is easier to identify it. The uncertainty of 3 | the agent thus reduces quickly, and they spend less time in exploration. When the 4 | probabilities of rewards are more equal (close to 0.5), many trials are needed to 5 | reduce the uncertainty, and the exploration phase lasts a long time. 6 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial3_Solution_ecdb8e78.py: -------------------------------------------------------------------------------- 1 | """ 2 | The agent models the reward probabilities of the two arms using fixed distributions. 3 | There is no mechanism by which the agent can detect that there's a change in the 4 | environment and rapidly update its beliefs. Its underlying model is not flexible, 5 | and it cannot adapt to changes in the environment, leading to suboptimal performance. 6 | """ -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial3_Solution_62d8e960_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial3_Solution_62d8e960_0.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/cummulative_regret.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/cummulative_regret.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/evolution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/evolution.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/exploration_exploitation_tradeoff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/exploration_exploitation_tradeoff.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/infoNCEloss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/infoNCEloss.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/learning_temporal_scales.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/learning_temporal_scales.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/model_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/model_architecture.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/puppies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/puppies.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/triplet_loss_minimization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/triplet_loss_minimization.png -------------------------------------------------------------------------------- /tutorials/W1D2_ComparingTasks/static/two_armed_bandit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D2_ComparingTasks/static/two_armed_bandit.png -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial1_Solution_192afe09.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Question: For adversarial images, how do the RDMs change when comparing representations from the standard model to the adversarially trained model? 4 | 5 | With adversarial images, the within-category representation similarity (the block effect) is disrupted in the deeper layers of the standard model. 6 | The adversarially trained model preserves its block-like structure across categories when presented with adversarial images, however. 7 | """; -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial1_Solution_7a860365.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Question: For the standard clean images, how do the RDMs change across the model layers, how do they compare to the category structure, and why? 4 | 5 | For clean images representing the same digit, their representations in the deeper layers of the network are remarkably similar and align well with the inherent category structure. This manifests as a block diagonal structure. In contrast, this block effect is less pronounced in the earlier layers of the network. The initial layers focus more on capturing general and granular visual features. This progression from generic to more refined feature extraction across layers underscores the hierarchical nature of learning in deep neural networks, where complex representations are built upon the simpler ones extracted at earlier stages. 6 | """; -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial1_Solution_dd06ba72.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Question: How do the RDMs relate to the performances of the standard and the adversarially trained models on clean and adversarial images? 4 | 5 | RDMs provide a visual and quantitative way to analyze how a neural network processes and represents different stimuli. By comparing the similarity of responses within the network across various inputs, the RDM can reveal insights into the network's internal representations and, consequently, its ability to generalize. 6 | """; -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial3_Solution_147d3932.py: -------------------------------------------------------------------------------- 1 | v1_rdms = fmri_rdms.subset('roi', 'V1') 2 | show_rdm_plotly(v1_rdms, rdm_descriptor='subject') -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial3_Solution_5ab03b03.py: -------------------------------------------------------------------------------- 1 | 2 | # Compute rdms for each layer of AlexNet 3 | alexnet_rdms_dict = {} 4 | for layer, dataset in alexnet_datasets.items(): 5 | alexnet_rdms_dict[layer] = rsa.rdm.calc_rdm(dataset) -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial4_Solution_05135f17.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: 1. Does the amount of distortion after projection depend on the dimension $d$ of the original space? Observe the dimension $k$ that preserves Euclidean distance up to a small distortion for both the 2-neuron and 100-neuron datasets. 4 | 5 | 2. What is the distance between two identical stimuli after random projection? 6 | 7 | 1. No. Empirically, the dimension that preserves Euclidean distance up to a small distortion for the 100-neuron dataset is similar to the 2-neuron dataset. Theoretically, the distortion bound is independent of the original dimension (https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma). 8 | 9 | 2. The distance is always 0. 10 | """; -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial4_Solution_1ac2083f.py: -------------------------------------------------------------------------------- 1 | 2 | stim_idx = [0,1] # change stimulus index to visualize another pair of stimuli 3 | m_dims_list = np.power(2, np.arange(1,10)) 4 | true_dist, projected_dist = {}, {} 5 | for i, n_neurons in enumerate(n_neurons_list): 6 | data = clean_dataset[n_neurons].sel({"stim": stim_idx}) 7 | # Let's first recalculate the ground truth euclidean rdm again, without normalization by the number of neurons this time. 8 | true_dist[n_neurons] = calc_rdm(data, method='euclidean', noise=None, normalize_by_channels=False).dissimilarities.item() 9 | 10 | projected_dist[n_neurons]=[] 11 | for m_dims in m_dims_list: 12 | A = np.random.normal(loc=0, scale=1, size=(n_neurons, m_dims)) 13 | A *= np.sqrt(1/m_dims) 14 | transformed_data = (data.values @ A) 15 | transformed_data = np2xr(transformed_data, coords={'stim': data.stim.values, 'neuron': np.arange(m_dims)}) 16 | rdm = calc_rdm(transformed_data, method='euclidean', noise=None, normalize_by_channels=False) 17 | projected_dist[n_neurons].append(rdm.dissimilarities.item()) 18 | projected_dist[n_neurons] = np.array(projected_dist[n_neurons]) 19 | 20 | plot_distance_after_projection(true_dist, projected_dist, n_neurons_list, m_dims_list) -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial4_Solution_3c63934e.py: -------------------------------------------------------------------------------- 1 | n_neurons = 2 2 | stimulus_idx = 0,1 # choose two stimuli 3 | b_j = clean_dataset[n_neurons].loc[stimulus_idx[0]].values # select the stimulus response 4 | b_k = clean_dataset[n_neurons].loc[stimulus_idx[1]].values 5 | # compute the squared euclidean and mahalanobis distance, and then divide the distance by the number of neurons (2) 6 | euclidean_dist = ((b_j-b_k) @ (b_j-b_k).T) / n_neurons 7 | mahalanobis_dist = ((b_j-b_k) @ np.linalg.inv(correlated_cov[n_neurons]) @ (b_j-b_k).T) / n_neurons -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial4_Solution_6c059522.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: For isotropic Gaussian noise, what is the relationship between Euclidean and Mahalanobis distance? 4 | 5 | For isotropic Gaussian noise, the Euclidean distance and Mahalanobis distance are equivalent up to a constant factor. When the covariance matrix is an identity matrix, the Euclidean and Mahalanobis distances are exactly equal. 6 | """; -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial4_Solution_b21e3c22.py: -------------------------------------------------------------------------------- 1 | n_neurons = 2 2 | isotropic_cov_2d = np.identity(n_neurons) 3 | correlated_cov_2d = np.array([[1.,0.6],[0.6,1.0]]) -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/solutions/W1D3_Tutorial5_Solution_0467919d.py: -------------------------------------------------------------------------------- 1 | 2 | n_delays = 20 3 | delay_interval = 10 4 | 5 | models = [trajectory_circle, trajectory_oval, trajectory_walk] 6 | dsa = DSA(models, n_delays=n_delays, delay_interval=delay_interval) 7 | similarities = dsa.fit_score() 8 | 9 | labels = ['Circle', 'Oval', 'Walk'] 10 | ax = sns.heatmap(similarities, xticklabels=labels, yticklabels=labels) 11 | cbar = ax.collections[0].colorbar 12 | cbar.ax.set_ylabel('DSA Score'); 13 | plt.title("Dynamic Similarity Analysis Score among Trajectories"); -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/NSD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/NSD.png -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/W1D3_Tutorial4_Solution_1ac2083f_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/W1D3_Tutorial4_Solution_1ac2083f_0.png -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/W1D3_Tutorial5_Solution_0467919d_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/W1D3_Tutorial5_Solution_0467919d_0.png -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/rcnn_tutorial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/rcnn_tutorial.png -------------------------------------------------------------------------------- /tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/response_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D3_ComparingArtificialAndBiologicalNetworks/static/response_matrix.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/W1D5_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {}, 16 | "pycharm": { 17 | "name": "#%% md\n" 18 | } 19 | }, 20 | "source": [ 21 | "# Daily survey\n", 22 | "\n", 23 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 24 | "\n", 25 | "\"button" 26 | ] 27 | } 28 | ], 29 | "metadata": { 30 | "colab": { 31 | "collapsed_sections": [], 32 | "include_colab_link": true, 33 | "name": "W1D5_Outro", 34 | "provenance": [], 35 | "toc_visible": true 36 | }, 37 | "kernel": { 38 | "display_name": "Python 3", 39 | "language": "python", 40 | "name": "python3" 41 | }, 42 | "kernelspec": { 43 | "display_name": "Python 3 (ipykernel)", 44 | "language": "python", 45 | "name": "python3" 46 | }, 47 | "language_info": { 48 | "codemirror_mode": { 49 | "name": "ipython", 50 | "version": 3 51 | }, 52 | "file_extension": ".py", 53 | "mimetype": "text/x-python", 54 | "name": "python", 55 | "nbconvert_exporter": "python", 56 | "pygments_lexer": "ipython3", 57 | "version": "3.9.19" 58 | } 59 | }, 60 | "nbformat": 4, 61 | "nbformat_minor": 4 62 | } 63 | -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_0b47c17f.py: -------------------------------------------------------------------------------- 1 | temporal_diff = np.abs(np.diff(sig)) -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_45a72023.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: What do you observe about the kurtosis after applying the temporal differentiation? 4 | 5 | In general, kurtosis value becomes higher, meaning that temporal differentiation makes the signal more concentrated. 6 | """; -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_5a7b462a.py: -------------------------------------------------------------------------------- 1 | num_taus = 10 2 | 3 | # create taus 4 | taus = np.linspace(1, 91, num_taus).astype(int) 5 | 6 | # create taus_list 7 | taus_list = [np.abs(sig[tau:] - sig[:-tau]) for tau in taus] -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_e286253f.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: 1. Why do you think the filter is asymmetric? 4 | 2. How might a filter influence the sparsity patterns observed in data? 5 | 6 | 1. As the filter reflects how we process time series data, it accounts for the past & for the future with different powers. Particularly, for this filter, past information is not included in the result at all, while the future one is evenly distributed for the defined window size. 7 | 2. Note that the filter takes the average of the future (not only one point of time). Thus, it would be much smoother than the regular one. 8 | """; -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_e8a7baa1.py: -------------------------------------------------------------------------------- 1 | def ReLU(x, theta = 0): 2 | """ 3 | Calculates ReLU function for the given level of theta. 4 | 5 | Inputs: 6 | - x (np.ndarray): input data. 7 | - theta (float, default = 0): threshold parameter. 8 | 9 | Outputs: 10 | - thres_x (np.ndarray): filtered values. 11 | """ 12 | 13 | thres_x = np.maximum(x - theta, 0) 14 | 15 | return thres_x -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_eb440839.py: -------------------------------------------------------------------------------- 1 | # Video_file is a 3D array representing pixels X pixels X time 2 | video_file = np.load('reweight_digits.npy') 3 | 4 | # Create a copy of the video_fire array 5 | im_focus = video_file.copy() 6 | 7 | # Get the number of frames in the video 8 | T = im_focus.shape[2] 9 | 10 | # Get the number of rows in the video 11 | N0 = im_focus.shape[0] 12 | 13 | # Get the number of columns in the video, leaving out 10 columns 14 | N1 = im_focus.shape[1] - 10 15 | 16 | # Create a copy of the extracted frames 17 | low_res = im_focus.copy() 18 | 19 | # Get the shape of a single frame 20 | shape_frame = low_res[:, :, 0].shape 21 | 22 | # Flatten each frame and store them in a list 23 | video_file_ar = [low_res[:, :, frame].flatten() for frame in range(low_res.shape[2])] 24 | 25 | # Create dict_learner object 26 | dict_learner = DictionaryLearning( 27 | n_components=15, transform_algorithm='lasso_lars', transform_alpha=0.9, 28 | random_state=402, 29 | ) 30 | 31 | # List to np.array 32 | video_v = np.vstack(video_file_ar) 33 | 34 | # Fit and transform `video_v` 35 | D_transformed = dict_learner.fit(video_v).transform(video_v) -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial1_Solution_f770be90.py: -------------------------------------------------------------------------------- 1 | T_ar = np.arange(len(sig)) 2 | 3 | #100 different frequency values from 0.001 to 1, then apply each frequency on `T_ar` 4 | freqs = np.linspace(0.001, 1, 100) 5 | set_sigs = [np.sin(T_ar*f) for f in freqs] 6 | 7 | # define 'reg' --- an sklearn object of OrthogonalMatchingPursuit, and fit it to the data, where the frequency bases are the features and the signal is the label 8 | reg = OrthogonalMatchingPursuit(fit_intercept = True, n_nonzero_coefs = 10).fit(np.vstack(set_sigs).T, sig) -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial2_Solution_9b3b7306.py: -------------------------------------------------------------------------------- 1 | number_samples = 300 # Number of samples 2 | number_pixels = 5 # Number of pixels per sample 3 | 4 | # True reflectance 5 | reflectance = np.random.exponential(1, size=(number_samples, number_pixels)) 6 | # Illuminant intensity 7 | illuminant_intensity = np.random.exponential(1, size=(number_samples, 1)) 8 | # Visible image 9 | visible_image = np.repeat(illuminant_intensity, number_pixels, axis=1) * reflectance 10 | 11 | # Normalized visible image 12 | norm_visible_image = normalize( 13 | visible_image, 14 | sigma = 0.1, 15 | p = 1, 16 | g = 1 17 | ) 18 | 19 | # Visualize the images 20 | visualize_images_sec22( 21 | [reflectance, illuminant_intensity, visible_image, norm_visible_image], 22 | ['Reflectance', 'Illuminant intensity', 'Visible image', 'Normalized visible image'], 23 | 25 24 | ) -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial2_Solution_a36a7d90.py: -------------------------------------------------------------------------------- 1 | non_linearities = { 2 | 'ReLU': nn.ReLU(), 3 | 'ReLU6': nn.ReLU6(), 4 | 'SoftPlus': nn.Softplus(), 5 | 'Sigmoid': nn.Sigmoid(), 6 | 'Tanh': nn.Tanh() 7 | } -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial2_Solution_b46035c9.py: -------------------------------------------------------------------------------- 1 | class ReLUNet(nn.Module): 2 | """ 3 | ReLUNet architecture 4 | Structure is as follows: 5 | y = Σi(ai * ReLU(θi - x)) 6 | """ 7 | # Define the structure of your network 8 | def __init__(self, n_units): 9 | """ 10 | Args: 11 | n_units (int): Number of hidden units 12 | 13 | Returns: 14 | Nothing 15 | """ 16 | super(ReLUNet, self).__init__() 17 | # Create input thresholds 18 | self.input_threshold_weights = nn.Parameter(torch.abs(torch.randn(n_units))) 19 | self.non_linearity = nn.ReLU() 20 | self.output_layer = nn.Linear(n_units, 1) 21 | nn.init.xavier_normal_(self.output_layer.weight) 22 | 23 | def forward(self, x): 24 | """ 25 | Args: 26 | x: torch.Tensor 27 | Input tensor of size ([1]) 28 | """ 29 | op = self.input_threshold_weights - x #prepare the input to be passed through ReLU 30 | op = self.non_linearity(op) #apply ReLU 31 | op = self.output_layer(op) #run through output layer 32 | return op 33 | 34 | # Choose the most likely label predicted by the network 35 | def predict(self, x): 36 | """ 37 | Args: 38 | x: torch.Tensor 39 | Input tensor of size ([1]) 40 | """ 41 | output = self.forward(x) 42 | return output -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial2_Solution_dd43ccaf.py: -------------------------------------------------------------------------------- 1 | def normalize(x, sigma, p, g): 2 | """ 3 | Inputs: 4 | - x(np.ndarray): Input array (n_samples * n_dim) 5 | - sigma(float): Smoothing factor 6 | - p(int): p-norm 7 | - g(int): scaling factor 8 | 9 | Outputs: 10 | - xnorm (np.ndarray): normalized values. 11 | """ 12 | # Raise the absolute value of x to the power p 13 | xp = np.power(np.abs(x), p) 14 | # Sum the x over the dimensions (n_dim) axis 15 | xp_sum = np.sum(np.power(np.abs(x), p), axis=1) 16 | # Correct the dimensions of xp_sum, and taking the average reduces the dimensions 17 | # Making xp_sum a row vector of shape (1, n_dim) 18 | xp_sum = np.expand_dims(xp_sum, axis=1) 19 | # Raise the sum to the power 1/p and add the smoothing factor (sigma) 20 | denominator = sigma + np.power(xp_sum, 1/p) 21 | # Scale the input data with a factor of g 22 | numerator = x*g 23 | # Calculate normalized x 24 | xnorm = numerator/denominator 25 | return xnorm -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial3_Solution_072775c1.py: -------------------------------------------------------------------------------- 1 | def gained_dot_product_attention(x: torch.Tensor, # input vector 2 | q_1: torch.Tensor, # query vector 1 3 | q_2: torch.Tensor, # query vector 2 4 | z_1: float, # query gain 1 5 | z_2: float, # query gain 2 6 | ): 7 | """This function computes the gained dot product attention 8 | Args: 9 | x (Tensor): input vector 10 | q_1 (Tensor): query vector 1 11 | q_2 (Tensor): query vector 2 12 | z_1 (float): query gain 1 13 | z_2 (float): query gain 2 14 | Returns: 15 | w (Tensor): attention weights 16 | y (float): gained dot product attention 17 | """ 18 | w = torch.softmax(z_1 * q_1 + z_2 * q_2, dim=0) 19 | y = torch.dot(w, x) 20 | return w, y -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/solutions/W1D5_Tutorial3_Solution_c81fc074.py: -------------------------------------------------------------------------------- 1 | class ScaledDotProductAttention(torch.nn.Module): 2 | def __init__(self, T: int, dm: int, dk: int): 3 | """ 4 | Scaled Dot Product Attention 5 | Args: 6 | T (int): context length 7 | dm (int): model dimension 8 | dk (int): key dimension 9 | Note: 10 | we assume dm == dv 11 | """ 12 | super().__init__() 13 | self.T = T # context length 14 | self.dm = dm # model dimension 15 | self.dk = dk # key dimension 16 | self.scale = 1.0 / math.sqrt(dk) 17 | 18 | # positional Encoding 19 | self.position = PositionalEncoding(T, dm) 20 | 21 | # self-attention layers 22 | self.Wq = torch.nn.Linear(dm, dk, bias=False) # query layer 23 | self.Wk = torch.nn.Linear(dm, dk, bias=False) # key layer 24 | self.Wv = torch.nn.Linear(dm, dm, bias=False) # value layer 25 | 26 | def forward(self, x: torch.Tensor): 27 | """ 28 | Args: 29 | x (torch.Tensor): input tensor of shape (T, d) 30 | """ 31 | # Positional Encoding 32 | x = x + self.position() 33 | 34 | # (Scaled Dot-Product Attention) 35 | Q = self.Wq(x) # Query 36 | K = self.Wk(x) # Key 37 | V = self.Wv(x) # Value 38 | QK = Q @ K.T # Query Key product 39 | S = QK * self.scale # Scores (scaled against saturation) 40 | S_softmax = torch.softmax(S, dim=-1) # softmax attention scores (row dimensions) 41 | A = S_softmax @ V # scaled dot-product attention 42 | return A -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/W1D5_Tutorial2_Solution_9b3b7306_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/W1D5_Tutorial2_Solution_9b3b7306_0.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/components.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/cross_attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/cross_attention.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/dictionary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/dictionary.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/filters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/filters.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/self_attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/self_attention.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/sparse_and.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/sparse_and.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/static/sparsity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W1D5_Microcircuits/static/sparsity.png -------------------------------------------------------------------------------- /tutorials/W1D5_Microcircuits/student/W1D5_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {}, 16 | "pycharm": { 17 | "name": "#%% md\n" 18 | } 19 | }, 20 | "source": [ 21 | "# Daily survey\n", 22 | "\n", 23 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 24 | "\n", 25 | "\"button" 26 | ] 27 | } 28 | ], 29 | "metadata": { 30 | "colab": { 31 | "collapsed_sections": [], 32 | "include_colab_link": true, 33 | "name": "W1D5_Outro", 34 | "provenance": [], 35 | "toc_visible": true 36 | }, 37 | "kernel": { 38 | "display_name": "Python 3", 39 | "language": "python", 40 | "name": "python3" 41 | }, 42 | "kernelspec": { 43 | "display_name": "Python 3 (ipykernel)", 44 | "language": "python", 45 | "name": "python3" 46 | }, 47 | "language_info": { 48 | "codemirror_mode": { 49 | "name": "ipython", 50 | "version": 3 51 | }, 52 | "file_extension": ".py", 53 | "mimetype": "text/x-python", 54 | "name": "python", 55 | "nbconvert_exporter": "python", 56 | "pygments_lexer": "ipython3", 57 | "version": "3.9.19" 58 | } 59 | }, 60 | "nbformat": 4, 61 | "nbformat_minor": 4 62 | } 63 | -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/W2D1_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D1_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/further_reading.md: -------------------------------------------------------------------------------- 1 | # Suggested further readings 2 | 3 | ## Tutorial 1: Depth vs Width 4 | 5 | - [Exponential expressivity in deep neural networks through transient chaos](https://papers.nips.cc/paper_files/paper/2016/hash/148510031349642de5ca0c544f31b2ef-Abstract.html) 6 | 7 | ## Tutorial 2: Double descent 8 | 9 | - [Double Descent: A Visual Introduction](https://mlu-explain.github.io/double-descent/) 10 | - [Double Descent: A Mathematical Explanation](https://mlu-explain.github.io/double-descent2/) 11 | - [Deep Double Descent: Where Bigger Models and More Data Hurt](https://arxiv.org/abs/1912.02292) 12 | - [A Farewell to the Bias-Variance Tradeoff? An Overview of the Theory of Overparameterized Machine Learning](https://arxiv.org/abs/2109.02355) 13 | 14 | ## Tutorial 3: Modularity 15 | 16 | - [Inductive biases of neural network modularity in spatial navigation](https://www.science.org/doi/10.1126/sciadv.adk1256) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/instructor/W2D1_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D1_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_166f0c8a.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: 1. What is the minimum error achievable by an MLP on the generated problem? 4 | 2. What is the minimum error achievable by a 1-hidden-layer MLP? 5 | 6 | 1. This is a trick question! We generated the data ourselves; the teacher network is an MLP. In principle, a student network with the same architecture could learn the exact weights of the teacher and achieve exactly 0 error. 7 | 2. By the universal approximator theorem, we can approximate the teacher network arbitrarily well with a 1-hidden-layer MLP, as long as there is not limit on the number of hidden units. So the answer is technically 0. In practice, however, when fitting a complex function, for example a deep teacher network, the number of hidden units required for low error can be impractical. 8 | """ -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_44d1308a.py: -------------------------------------------------------------------------------- 1 | torch.manual_seed(-1) 2 | 3 | # Create teacher 4 | n_in = 5 # input dimension 5 | W_teacher, D_teacher = 5, 5 # teacher width, depth 6 | sigma_teacher = 0.4 # teacher weight variance 7 | teacher = make_MLP(n_in, W_teacher, D_teacher) 8 | initialize_layers(teacher, sigma_teacher) 9 | 10 | # generate train and test set 11 | N_train, N_test = 4000, 1000 12 | X_train, y_train = make_data(teacher, n_in, N_train) 13 | X_test, y_test = make_data(teacher, n_in, N_test) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_485ec5dd.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: What is the qualitative difference between trajectories propagation through these networks? Does it fit what we have seen earlier with wide student approximation? 4 | 5 | Indeed, a standard network (with sigma = 2) is much more expressive; it folds the space here and there, creating vivid and tangled representations with each additional layer, whereas the quasi-linear network preserves the original structure. 6 | It is in line with the experiments on wide student approximation as shallow and wide networks cannot express the tangled representation which a standard net creates. 7 | """ -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_6b3d3e34.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | def make_data(net, n_in, n_examples): 4 | """ 5 | Generate data by sampling from a multivariate gaussian distribution, and output data by passing the inputs through the network. 6 | 7 | Inputs: 8 | - net (nn.Sequential): network. 9 | - n_in (int): input dimension. 10 | - n_examples (int): number of data examples to generate. 11 | 12 | Outputs: 13 | - X (torch.tensor): input data. 14 | - y (torch.tensor): output data. 15 | """ 16 | X = torch.randn(n_examples, n_in) 17 | y = net(X).detach() 18 | return X, y 19 | 20 | X, y = make_data(net, 10, 10000000) 21 | np.testing.assert_allclose(X[0][0].item(), 1.927, err_msg = "Expected value of data is different!", atol = 1e-3) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_6fa930a8.py: -------------------------------------------------------------------------------- 1 | 2 | def compute_loss(net, X, y): 3 | """ 4 | Calculate loss on given network and data. 5 | 6 | Inputs: 7 | - net (nn.Sequential): network. 8 | - X (torch.tensor): input data. 9 | - y (torch.tensor): output data. 10 | 11 | Outputs: 12 | - loss (float): computed loss. 13 | """ 14 | loss_fn = nn.MSELoss() 15 | 16 | y_pred = net(X) 17 | loss = loss_fn(y_pred, y) 18 | loss = float(loss.detach()) 19 | return loss 20 | 21 | loss = compute_loss(net, X, y) 22 | np.testing.assert_allclose(loss, 0.0, err_msg = "Expected value of loss is different!", atol = 1e-3) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_74ab8f48.py: -------------------------------------------------------------------------------- 1 | W_student, D_student = 10, 2 # student width, depth 2 | 3 | lr = 1e-3 4 | n_epochs = 20000 5 | Es_shallow_train = np.zeros((len(Ws_student),n_epochs)) 6 | Es_shallow_test = np.zeros(len(Ws_student)) 7 | 8 | student = make_MLP(n_in, W_student, D_student) 9 | initialize_layers(student, sigma_teacher) 10 | 11 | # make sure we have enough data 12 | P = get_num_params(n_in, W_student, D_student) 13 | assert(N_train > 3*P) 14 | 15 | # train 16 | Es_shallow_train = train_model(student, X_train, y_train, n_epochs, lr, progressbar=True) 17 | 18 | # # evaluate test error 19 | Es_shallow_test = compute_loss(student, X_test, y_test)/float(y_test.var()) 20 | print('Shallow student loss: ',Es_shallow_test) 21 | plot_students_predictions_vs_teacher_values(Es_shallow_train, X_test, y_test) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_8817495d.py: -------------------------------------------------------------------------------- 1 | torch.manual_seed(-1) 2 | 3 | # Create teacher 4 | n_in = 5 # input dimension 5 | W_teacher, D_teacher = 5, 5 # teacher width, depth 6 | sigma_teacher = 2 # teacher weight variance 7 | teacher = make_MLP(n_in, W_teacher, D_teacher) 8 | initialize_layers(teacher, sigma_teacher) 9 | 10 | # generate train and test set 11 | N_train, N_test = 4000, 1000 12 | X_train, y_train = make_data(teacher, n_in, N_train) 13 | X_test, y_test = make_data(teacher, n_in, N_test) 14 | 15 | np.testing.assert_allclose(X_test[0][0].item(), 0.19076240062713623, err_msg = "Expected value of data is different!") -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_8c945e68.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | def initialize_layers(net,sigma): 4 | """ 5 | Set weight to each of the parameters in the model of value sigma/sqrt(n_in), where n_in is the number of inputs to the layer. 6 | 7 | Inputs: 8 | - net (nn.Sequential): network. 9 | - sigma (float): standard deviation. 10 | """ 11 | for param in net.parameters(): 12 | n_in = param.shape[1] 13 | nn.init.normal_(param, std = sigma/np.sqrt(n_in)) 14 | 15 | initialize_layers(net, 1) 16 | np.testing.assert_allclose(next(net.parameters())[0][0].item(), 0.609, err_msg = "Expected value of parameter is different!", atol = 1e-3) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_a3e6ddda.py: -------------------------------------------------------------------------------- 1 | error_target = 1e-6 2 | 3 | m,b = np.polyfit(np.log(Ws_student), np.log(Es_shallow_test), 1) 4 | print('Predicted width: ', np.exp((np.log(error_target) - b) / m)) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_a5e90b35.py: -------------------------------------------------------------------------------- 1 | lr = 0.003 2 | Es_deep = [] 3 | for i in range(4): 4 | Es_deep.append(train_model(student, X_train, y_train, 50000, lr)) 5 | #observe we reduce learning rate 6 | lr /= 3 7 | Es_deep = np.array(Es_deep) 8 | Es_deep = Es_deep.ravel() 9 | 10 | # evaluate test error 11 | loss_deep = compute_loss(student, X_test, y_test) / float(y_test.var()) 12 | print("Loss of deep student: ",loss_deep) 13 | plot_loss(Es_deep) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_a955337c.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Why do you think we obtain zero error right away (on the first epoch)? 4 | 5 | The network we are training also generates the data. Thus, there is no 6 | need to change weights at all, the gradient is zero. 7 | """ -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_bc99a7f9.py: -------------------------------------------------------------------------------- 1 | 2 | def get_num_params(n_in,W,D): 3 | """ 4 | Simple function to compute number of learned parameters in an MLP with given dimensions. 5 | 6 | Inputs: 7 | - n_in (int): input dimension. 8 | - W (int): width of the network. 9 | - D (int): depth if the network. 10 | 11 | Outputs: 12 | - num_params (int): number of parameters in the network. 13 | """ 14 | input_params = n_in * W 15 | hidden_layers_params = (D-2) * W**2 16 | output_params = W 17 | return input_params + hidden_layers_params + output_params 18 | 19 | np.testing.assert_allclose(get_num_params(10, 3, 2), 33, err_msg = "Expected value of parameters number is different!") -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_c3274cd4.py: -------------------------------------------------------------------------------- 1 | 2 | set_seed(42) 3 | 4 | def train_model(net, X, y, n_epochs, lr, progressbar=True): 5 | """ 6 | Perform training of the network. 7 | 8 | Inputs: 9 | - net (nn.Sequential): network. 10 | - X (torch.tensor): input data. 11 | - y (torch.tensor): output data. 12 | - n_epochs (int): number of epochs to train the model for. 13 | - lr (float): learning rate for optimizer (we will use `Adam` by default). 14 | - progressbar (bool, default = True): whether to use additional bar for displaying training progress. 15 | 16 | Outputs: 17 | - Es (np.ndarray): array which contains loss for each epoch. 18 | """ 19 | 20 | # Set up optimizer 21 | loss_fn = nn.MSELoss() 22 | optimizer = torch.optim.Adam(net.parameters(), lr = lr) 23 | 24 | # Run training loop 25 | Es = np.zeros(n_epochs) 26 | for n in (tqdm(range(n_epochs)) if progressbar else range(n_epochs)): 27 | y_pred = net(X) 28 | loss = loss_fn(y_pred, y) 29 | optimizer.zero_grad() 30 | loss.backward() 31 | optimizer.step() 32 | Es[n] = float(loss.detach()) 33 | 34 | return Es 35 | 36 | Es = train_model(net, X, y, 10, 1e-3) 37 | np.testing.assert_allclose(Es[0], 0.0, err_msg = "Expected value of loss is different!", atol = 1e-3) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial1_Solution_dcafefea.py: -------------------------------------------------------------------------------- 1 | D_student = 2 # student depth 2 | Ws_student = np.array([5, 15, 45, 135]) # widths 3 | 4 | lr = 1e-3 5 | n_epochs = 20000 6 | Es_shallow_train = np.zeros((len(Ws_student), n_epochs)) 7 | Es_shallow_test = np.zeros(len(Ws_student)) 8 | 9 | 10 | for index, W_student in enumerate(tqdm(Ws_student)): 11 | 12 | student = make_MLP(n_in, W_student, D_student) 13 | 14 | # make sure we have enough data 15 | P = get_num_params(n_in, W_student, D_student) 16 | assert(N_train > 3*P) 17 | 18 | # train 19 | Es_shallow_train[index] = train_model(student, X_train, y_train, n_epochs, lr, progressbar=False) 20 | Es_shallow_train[index] /= y_test.var() 21 | 22 | # evaluate test error 23 | loss = compute_loss(student, X_test, y_test)/y_test.var() 24 | Es_shallow_test[index] = loss 25 | 26 | plot_loss_as_function_of_width(Ws_student, Es_shallow_test, Es_shallow_train) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial2_Solution_00f64733.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | n_hids = np.unique(np.round(np.logspace(0, 3, 10))).astype(int) 4 | 5 | std_devs = np.linspace(0, 1.0, 3) 6 | 7 | def plot_error(x_train, y_train, x_test, y_test, std_devs, n_hids, n_hidden = 10, n_reps = 100, reg = 0.0): 8 | """ 9 | Plot mean test error for distinct values of noise added to train dataset. 10 | 11 | Inputs: 12 | - x_train (np.ndarray): train input data. 13 | - y_train (np.ndarray): train target data. 14 | - x_test (np.ndarray): test input data. 15 | - y_test (np.ndarray): test target data. 16 | - std_devs (np.ndarray): different standard deviation values for noise. 17 | - n_hids (np.ndarray): different values for hidden layer size. 18 | - n_hidden (int, default = 10): size of hidden layer. 19 | - n_reps (int, default = 100): number of resamples for data. 20 | - reg (float, default = 0): regularization constant. 21 | """ 22 | with plt.xkcd(): 23 | for sd in tqdm(std_devs): 24 | test_errs = [sweep_test(x_train, y_train + np.random.normal(0,sd,y_train.shape), x_test, y_test, n_hidden = n_hid, n_reps = n_reps, reg = reg * (1 + sd)) for n_hid in n_hids] 25 | plt.loglog(n_hids,test_errs,'o-',label="std={}".format(sd)) 26 | 27 | plt.legend() 28 | plt.xlabel('Number of Hidden Units') 29 | plt.ylabel('Test Error') 30 | plt.show() 31 | 32 | plot_error(x_train, y_train, x_test, y_test, std_devs, n_hids) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial2_Solution_6d385a89.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | std_dev = .2 4 | 5 | noise = np.random.normal(0, std_dev, y_train.shape) 6 | 7 | n_hid = 500 8 | n_reps = 10 9 | 10 | with plt.xkcd(): 11 | plt.plot(x_test, y_test,linewidth=4,label='Test data') 12 | plt.plot(x_train, y_train + noise,'o',label='Training data') 13 | train_err, test_err, y_pred = fit_relu(x_train, y_train + noise, x_test, y_test, n_hidden = n_hid) 14 | plt.plot(x_test, y_pred, color='g', label='Prediction') 15 | plt.legend() 16 | plt.xlabel('Input Feature') 17 | plt.ylabel('Target Output') 18 | plt.title('Number of Hidden Units = {}'.format(n_hid)) 19 | plt.show() -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial2_Solution_7717ab4f.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | n_hids = np.unique(np.round(np.logspace(0, 3, 20))).astype(int) 4 | 5 | def sweep_test(x_train, y_train, x_test, y_test, n_hidden = 10, n_reps = 100, reg = 0.0): 6 | """ 7 | Calculate the mean test error for fitting the second layer of the network for a defined number of repetitions. 8 | Notice that `init_scale` is always set to 0 in this case. 9 | Inputs: 10 | - x_train (np.ndarray): train input data. 11 | - y_train (np.ndarray): train target data. 12 | - x_test (np.ndarray): test input data. 13 | - y_test (np.ndarray): test target data. 14 | - n_hidden (int, default = 10): size of hidden layer. 15 | - n_reps (int, default = 100): number of resamples for data. 16 | - reg (float, default = 0): regularization constant. 17 | 18 | Outputs: 19 | - (float): mean error for train data. 20 | """ 21 | return np.mean(np.array([fit_relu(x_train, y_train, x_test, y_test, n_hidden=n_hidden, reg = reg)[1] for _ in range(n_reps)])) 22 | 23 | test_errs = [sweep_test(x_train, y_train, x_test, y_test, n_hidden=n_hid, n_reps=100, reg = 0.0) for n_hid in n_hids] 24 | 25 | with plt.xkcd(): 26 | plt.loglog(n_hids,test_errs,'o-',label='Test') 27 | plt.xlabel('Number of Hidden Units') 28 | plt.ylabel('Test Error') 29 | plt.show() -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial2_Solution_e1136bc5.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | 3 | n_hid = 2 4 | 5 | n_reps = 10 # Number of networks to train 6 | 7 | def plot_predictions(n_hid, n_reps): 8 | """ 9 | Generate train and test data for `n_reps` times, fit it for a network with hidden size `n_hid`, and plot prediction values. 10 | 11 | Inputs: 12 | - n_hid (int): size of hidden layer. 13 | - n_reps (int): number of data regenerations. 14 | """ 15 | with plt.xkcd(): 16 | plt.plot(x_test, y_test,linewidth=4,label='Test data') 17 | plt.plot(x_train, y_train,'o',label='Training data') 18 | 19 | train_err, test_err, y_pred = fit_relu(x_train, y_train, x_test, y_test, n_hidden=n_hid) 20 | plt.plot(x_test, y_pred, color='g', label='Prediction') 21 | 22 | for rep in range(n_reps-1): 23 | train_err, test_err, y_pred = fit_relu(x_train, y_train, x_test, y_test, n_hidden=n_hid) 24 | plt.plot(x_test, y_pred, color='g', alpha=.5, label='_') 25 | 26 | plt.legend() 27 | plt.xlabel('Input Feature') 28 | plt.ylabel('Target Output') 29 | plt.title('Number of Hidden Units = {}'.format(n_hid)) 30 | plt.show() 31 | 32 | plot_predictions(n_hid, n_reps) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial2_Solution_fd82f22a.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | init_scales = np.linspace(0, 3, 5) 3 | 4 | n_hids = np.unique(np.round(np.logspace(0, 3, 10))).astype(int) 5 | 6 | with plt.xkcd(): 7 | for sd in tqdm(init_scales): 8 | test_errs = [sweep_test_init_scale(x_train, y_train, x_test, y_test, init_scale = sd, n_hidden=n_hid, n_reps=100) for n_hid in n_hids] 9 | plt.loglog(n_hids,test_errs,'o-',label="Init Scale={}".format(sd)) 10 | 11 | plt.legend() 12 | plt.xlabel('Number of Hidden Units') 13 | plt.ylabel('Test Error') 14 | plt.show() -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial3_Solution_02426ac5.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Is there any difference between the trajectories for the modular and holistic agents? If so, what does it imply? 4 | 5 | The holistic agent's trajectory has a higher curvature and length than that of the modular agent, suggesting that the modular agent's trajectory is more optimal. This is because, based on the RL objective with a discount factor smaller than 1, the trajectory should be as efficient (involving fewer steps) as possible. 6 | """ -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial3_Solution_16aa2d9e.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: To ensure a fair comparison, the total number of trainable parameters is designed to be similar between the two architectures. How many trainable parameters are there in each architecture? 4 | 5 | Taking into consideration that OBS_DIM = ACTION_DIM = TARGET_DIM = 2, and that for `LSTM` layer, the total number of parameters is 4(nm + n^2 + n) where m is the input dimension and n is the output dimension (as we have self-recurrence, thus n^2, projection from input to output, thus nm, and, finally, bias, thus n), we have: 6 | 7 | - for holistic actor (LSTM + Linear projection): 4 * (6 * 220 + 220*220 + 220) + (220 * 2 + 220) = 200420. 8 | - for modular actor (LSTM + Linear projections): 4 * (6 * 128 + 128*128 + 128) + (128 * 300 + 128) + (300 * 300 + 300) + (300 * 2 + 300) = 198848. 9 | """ -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial3_Solution_3d63fcd3.py: -------------------------------------------------------------------------------- 1 | class HolisticActor(nn.Module): 2 | def __init__(self, OBS_DIM, ACTION_DIM, TARGET_DIM): 3 | """ 4 | Initializes the holistic actor model with given dimensions. 5 | 6 | Inputs: 7 | - OBS_DIM (int): The dimension of the observation input. 8 | - ACTION_DIM (int): The dimension of the action output. 9 | - TARGET_DIM (int): The dimension of the target input. 10 | 11 | Outputs: 12 | - None 13 | """ 14 | super().__init__() 15 | self.OBS_DIM = OBS_DIM 16 | self.ACTION_DIM = ACTION_DIM 17 | self.RNN_SIZE = 220 # RNN hidden size 18 | 19 | self.rnn = nn.LSTM(input_size=OBS_DIM + ACTION_DIM + TARGET_DIM, hidden_size=self.RNN_SIZE) 20 | self.l1 = nn.Linear(self.RNN_SIZE, ACTION_DIM) 21 | 22 | def forward(self, x, hidden_in): 23 | """ 24 | Computes the action based on the current input and hidden state. 25 | 26 | Inputs: 27 | - x (tensor): The current input to the model, which includes observation, action, and target information. 28 | - hidden_in (tuple): The initial hidden state for the LSTM. 29 | 30 | Outputs: 31 | - a (tensor): The action output from the model. 32 | - hidden_out (tuple): The updated hidden state from the LSTM. 33 | """ 34 | ####################################################### 35 | # TODO: Pass the input 'x' and the previous hidden state 'hidden_in' to the RNN module 'self.rnn'. 36 | # Get the output 'x' and the hidden state 'hidden_out' from the RNN module. 37 | # Refer to https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html. 38 | # Hint: 'self.rnn' takes two arguments as inputs and outputs two things. 39 | # The first position corresponds to 'x', and the second position corresponds to the hidden state. 40 | ####################################################### 41 | x, hidden_out = self.rnn(x, hidden_in) 42 | 43 | a = torch.tanh(self.l1(x)) 44 | return a, hidden_out -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial3_Solution_3f3de125.py: -------------------------------------------------------------------------------- 1 | with plt.xkcd(): 2 | trial_idx = 21 3 | trial = modular_df.iloc[trial_idx] 4 | 5 | fig = plt.figure(figsize=(2.2, 1.7), dpi=200) 6 | ax = fig.add_subplot(111) 7 | 8 | ax.set_aspect('equal') 9 | ax.spines['top'].set_visible(False) 10 | ax.spines['right'].set_visible(False) 11 | ax.spines['bottom'].set_visible(False) 12 | ax.spines['left'].set_visible(False) 13 | ax.axes.xaxis.set_ticks([]); ax.axes.yaxis.set_ticks([]) 14 | 15 | # plot trajectory 16 | px = trial.pos_x; py = trial.pos_y 17 | ax.plot(px, py, lw=lw, c=modular_c) 18 | 19 | # plot target 20 | target_x = trial.target_x; target_y = trial.target_y 21 | print(f'Target distance from the start location: {np.around(trial.target_r, 1)} cm') 22 | 23 | # Given target locations as trial.target_x and trial.target_y, 24 | # and stop locations as trial.pos_x[-1] and trial.pos_y[-1], 25 | # compute the Euclidean distance between the target and stop locations. 26 | distance_stoploc_to_target = np.sqrt((trial.target_x - trial.pos_x[-1])**2 + (trial.target_y - trial.pos_y[-1])**2) 27 | print(f'Target distance from the stop location: {np.around(distance_stoploc_to_target, 1)} cm') 28 | 29 | print(f'Steps taken: {px.size - 1}') 30 | 31 | reward_boundary_radius = arg.goal_radius * arg.LINEAR_SCALE 32 | target_color = reward_c if distance_stoploc_to_target < reward_boundary_radius else unreward_c 33 | 34 | cir1 = Circle(xy=[target_x, target_y], radius=reward_boundary_radius, alpha=0.4, color=target_color, lw=0) 35 | ax.add_patch(cir1) 36 | ax.scatter(target_x, target_y, c=target_color, s=5) 37 | 38 | # plot initial position 39 | ax.scatter(0, 0, c='k', s=20, marker='*') 40 | ax.text(10, -10, s='Start', fontsize=fontsize) 41 | 42 | fig.tight_layout(pad=0) -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/solutions/W2D1_Tutorial3_Solution_ecb01cdf.py: -------------------------------------------------------------------------------- 1 | class ModularActor(nn.Module): 2 | def __init__(self, OBS_DIM, ACTION_DIM, TARGET_DIM): 3 | """ 4 | Initializes the modular actor model with given dimensions. 5 | 6 | Inputs: 7 | - OBS_DIM (int): The dimension of the observation input. 8 | - ACTION_DIM (int): The dimension of the action output. 9 | - TARGET_DIM (int): The dimension of the target input. 10 | 11 | Outputs: 12 | - None 13 | """ 14 | super().__init__() 15 | self.OBS_DIM = OBS_DIM 16 | self.ACTION_DIM = ACTION_DIM 17 | self.RNN_SIZE = 128 # RNN hidden size 18 | MLP_SIZE = 300 # number of neurons in one MLP layer 19 | 20 | self.rnn = nn.LSTM(input_size=OBS_DIM + ACTION_DIM + TARGET_DIM, hidden_size=self.RNN_SIZE) 21 | self.l1 = nn.Linear(self.RNN_SIZE, MLP_SIZE) 22 | self.l2 = nn.Linear(MLP_SIZE, MLP_SIZE) 23 | self.l3 = nn.Linear(MLP_SIZE, ACTION_DIM) 24 | 25 | def forward(self, x, hidden_in): 26 | """ 27 | Computes the action based on the current input and hidden state. 28 | 29 | Inputs: 30 | - x (tensor): The current input to the model, which includes observation, action, and target information. 31 | - hidden_in (tuple): The initial hidden state for the LSTM. 32 | 33 | Outputs: 34 | - a (tensor): The action output from the model. 35 | - hidden_out (tuple): The updated hidden state from the LSTM. 36 | """ 37 | ####################################################### 38 | # TODO: Pass 'x' to the MLP module, which consists of two linear layers with ReLU nonlinearity. 39 | # First, pass 'x' to the first linear layer, 'self.l1', followed by 'F.relu'. 40 | # Second, pass 'x' again to the second linear layer, 'self.l2', followed by 'F.relu'. 41 | ####################################################### 42 | x, hidden_out = self.rnn(x, hidden_in) 43 | x = F.relu(self.l1(x)) 44 | x = F.relu(self.l2(x)) 45 | 46 | a = torch.tanh(self.l3(x)) 47 | 48 | return a, hidden_out -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_74ab8f48_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_74ab8f48_2.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_a5e90b35_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_a5e90b35_5.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_dcafefea_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial1_Solution_dcafefea_1.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_00f64733_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_00f64733_1.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_6d385a89_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_6d385a89_0.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_7717ab4f_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_7717ab4f_0.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_e1136bc5_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_e1136bc5_0.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_fd82f22a_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial2_Solution_fd82f22a_1.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial3_Solution_3f3de125_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/W2D1_Tutorial3_Solution_3f3de125_1.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/actor_critic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/actor_critic.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/gain_change.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/gain_change.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/holistic_modular.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/holistic_modular.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/navigation_task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/navigation_task.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/static/nets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D1_Macrocircuits/static/nets.png -------------------------------------------------------------------------------- /tutorials/W2D1_Macrocircuits/student/W2D1_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D1_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/W2D2_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D2_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/instructor/W2D2_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D2_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_0b0b7a2d.py: -------------------------------------------------------------------------------- 1 | 2 | set_seed(42) 3 | 4 | symbol_names = ['fire-fighter','math-teacher','sales-manager'] 5 | discrete_space = sspspace.DiscreteSPSpace(symbol_names, ssp_dim=1024, optimize=False) 6 | 7 | vocab = {n:discrete_space.encode(n) for n in symbol_names} 8 | 9 | noisy_vector = 0.2 * vocab['fire-fighter'] + 0.15 * vocab['math-teacher'] + 0.3 * vocab['sales-manager'] 10 | 11 | sims = np.array([noisy_vector | vocab[name] for name in symbol_names]).squeeze() -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_15f690ab.py: -------------------------------------------------------------------------------- 1 | 2 | sim_mat = np.zeros((4,4)) 3 | 4 | sim_mat[0,0] = spa.dot(circle, circle) 5 | sim_mat[1,1] = spa.dot(square, square) 6 | sim_mat[2,2] = spa.dot(triangle, triangle) 7 | sim_mat[3,3] = spa.dot(shape, shape) 8 | 9 | sim_mat[0,1] = sim_mat[1,0] = spa.dot(circle, square) 10 | sim_mat[0,2] = sim_mat[2,0] = spa.dot(circle, triangle) 11 | sim_mat[0,3] = sim_mat[3,0] = spa.dot(circle, shape) 12 | 13 | sim_mat[1,2] = sim_mat[2,1] = spa.dot(square, triangle) 14 | sim_mat[1,3] = sim_mat[3,1] = spa.dot(square, shape) 15 | sim_mat[2,3] = sim_mat[3,2] = spa.dot(triangle, shape) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_3e9c4916.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: How would you provide intuitive reasoning or rigorous mathematical proof behind the fact that random high-dimensional vectors (note that each of the components is drawn from uniform distribution with zero mean) approximately orthogonal? 4 | 5 | Observe that as each of the components are independent and they are sampled from distribution with zero mean, it means that expected value of dot product E(x*y) = E(\sum_i x_i * y_i) = (linearity of expectation) \sum_i E(x_i * y_i) = (independence) \sum_i (E(x_i) * E(y_i)) = 0. 6 | """; -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_513dd01a.py: -------------------------------------------------------------------------------- 1 | 2 | five_unbind_two = integers[4] * ~integers[1] 3 | sims = np.array([spa.dot(five_unbind_two, i) for i in integers]) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_99c595f2.py: -------------------------------------------------------------------------------- 1 | 2 | set_seed(42) 3 | 4 | vector_length = 1024 5 | symbol_names = ['CIRCLE','SQUARE','TRIANGLE'] 6 | 7 | vocab = make_vocabulary(vector_length) 8 | vocab.populate(';'.join(symbol_names)) 9 | print(list(vocab.keys())) 10 | 11 | circle = vocab['CIRCLE'] 12 | square = vocab['SQUARE'] 13 | triangle = vocab['TRIANGLE'] 14 | 15 | print('|circle| =', np.linalg.norm(circle.v)) 16 | print('|triangle| =', np.linalg.norm(square.v)) 17 | print('|square| =', np.linalg.norm(triangle.v)) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_b91a4ab5.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: How would you explain the usage of `d,md->m` in `np.einsum()` function in the previous coding exercise? 4 | 5 | `d` is the dimensionality of the vector; we compute similariy of one vector (representing `0` object) with other `m` vectors of the same dimension `d` (thus `md`); as the result we receive `m` values of similarity. 6 | """; -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_b9294b66.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Why do we need to normalize the vector obtained as a result of the bundling operation? What length do you expect to receive without normalization? 4 | 5 | We would like to preserve the unitary length of the vector so it fits the rules of the vector space we've defined. If we simply add three vectors together, we can calculate the resulted length by taking the dot product with itself - it will be the sum of pairwise dot products of all vectors in the sum (with repetition of vectors with themselves), thus the sum is going to be around three (remember that = 0 while = 1), meaning that length of the obtained vector is sqrt(3). 6 | """; -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_ce5fc6c7.py: -------------------------------------------------------------------------------- 1 | 2 | phi_shifted = phis[200] * X**-3.1 3 | sims = np.array([spa.dot(phi_shifted, p) for p in phis]) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_da1926e8.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('RED_CIRCLE', vocab['RED'] * vocab['CIRCLE']) 3 | vocab.add('BLUE_TRIANGLE', vocab['BLUE'] * vocab['TRIANGLE']) 4 | vocab.add('GREEN_SQUARE', vocab['GREEN'] * vocab['SQUARE']) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_db547f5d.py: -------------------------------------------------------------------------------- 1 | object_names = ['RED','EST_RED','RED_CIRCLE','CIRCLE','EST_CIRCLE'] 2 | 3 | vocab.add('EST_RED', (vocab['RED_CIRCLE'] * ~vocab['CIRCLE']).normalized()) 4 | vocab.add('EST_CIRCLE', (vocab['RED_CIRCLE'] * ~vocab['RED']).normalized()) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial1_Solution_f12d9c75.py: -------------------------------------------------------------------------------- 1 | 2 | set_seed(42) 3 | 4 | class Cleanup: 5 | def __init__(self, vocab, temperature=1e5): 6 | self.weights = np.array([vocab[k] for k in vocab.keys()]).squeeze() 7 | self.temp = temperature 8 | def __call__(self, x): 9 | sims = np.einsum('nd,md->nm', self.weights, x) 10 | max_sim = softmax(sims * self.temp, axis=0) 11 | return sspspace.SSP(np.einsum('nd,nm->md', self.weights, max_sim)) 12 | 13 | 14 | cleanup = Cleanup(vocab) 15 | 16 | clean_vector = cleanup(noisy_vector) 17 | 18 | clean_sims = np.array([clean_vector | vocab[name] for name in symbol_names]).squeeze() -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial2_Solution_3a819ce6.py: -------------------------------------------------------------------------------- 1 | 2 | new_rule = (vocab['ANT'] * vocab['RED'] + vocab['RELATION'] * vocab['IMPLIES'] + vocab['CONS'] * vocab['PRIME']).normalized() 3 | 4 | #apply transform on new rule to test the generalization of the transform 5 | a_hat = spa.SemanticPointer(transform) * new_rule 6 | 7 | new_sims = np.einsum('nd,d->n', action_space, a_hat.v) 8 | y_hat = softmax(new_sims) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial2_Solution_550fd076.py: -------------------------------------------------------------------------------- 1 | 2 | num_iters = 500 3 | losses = [] 4 | sims = [] 5 | lr = 1e-1 6 | ant_names = ["BLUE", "ODD"] 7 | cons_names = ["EVEN", "GREEN"] 8 | vector_length = 1024 9 | 10 | transform = np.zeros((vector_length)) 11 | for i in range(num_iters): 12 | loss = 0 13 | for rule, ant_name, cons_name in zip(rules, ant_names, cons_names): 14 | 15 | #perfect similarity 16 | y_true = np.eye(len(action_names))[action_names.index(ant_name),:] + np.eye(len(action_names))[4+action_names.index(cons_name),:] 17 | 18 | #prediction with current transform (a_hat = transform * rule) 19 | a_hat = spa.SemanticPointer(transform) * rule 20 | 21 | #similarity with current transform 22 | sim_mat = np.einsum('nd,d->n', action_space, a_hat.v) 23 | 24 | #cleanup 25 | y_hat = softmax(sim_mat) 26 | 27 | #true solution (a* = ant_name + not * cons_name) 28 | a_true = (vocab[ant_name] + vocab['NOT']*vocab[cons_name]).normalized() 29 | 30 | #calculate loss 31 | loss += log_loss(y_true, y_hat) 32 | 33 | #update transform (T <- T - lr * (A* * (~rule))) 34 | transform -= (lr) * (transform - (a_true * ~rule).v) 35 | transform = transform / np.linalg.norm(transform) 36 | 37 | #save predicted similarities if it is last iteration 38 | if i == num_iters - 1: 39 | sims.append(sim_mat) 40 | 41 | #save loss 42 | losses.append(np.copy(loss)) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial2_Solution_a0e39449.py: -------------------------------------------------------------------------------- 1 | set_seed(42) 2 | vector_length = 1024 3 | 4 | card_states = ['RED','BLUE','ODD','EVEN','NOT','GREEN','PRIME','IMPLIES','ANT','RELATION','CONS'] 5 | vocab = make_vocabulary(vector_length) 6 | vocab.populate(';'.join(card_states)) 7 | 8 | 9 | for a in ['RED','BLUE','ODD','EVEN','GREEN','PRIME']: 10 | vocab.add(f'NOT_{a}', vocab['NOT'] * vocab[a]) 11 | 12 | action_names = ['RED','BLUE','ODD','EVEN','GREEN','PRIME','NOT_RED','NOT_BLUE','NOT_ODD','NOT_EVEN','NOT_GREEN','NOT_PRIME'] 13 | action_space = np.array([vocab[x].v for x in action_names]).squeeze() -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial2_Solution_bab79b64.py: -------------------------------------------------------------------------------- 1 | 2 | rules = [ 3 | (vocab['ANT'] * vocab['BLUE'] + vocab['RELATION'] * vocab['IMPLIES'] + vocab['CONS'] * vocab['EVEN']).normalized(), 4 | (vocab['ANT'] * vocab['ODD'] + vocab['RELATION'] * vocab['IMPLIES'] + vocab['CONS'] * vocab['GREEN']).normalized(), 5 | ] -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial2_Solution_cc0b7eb5.py: -------------------------------------------------------------------------------- 1 | 2 | #features - rules 3 | X_train = np.array([r.v for r in rules]).squeeze() 4 | 5 | #output - a* for each rule 6 | y_train = np.array([ 7 | (vocab[ant_names[0]] + vocab['NOT']*vocab[cons_names[0]]).normalized().v, 8 | (vocab[ant_names[1]] + vocab['NOT']*vocab[cons_names[1]]).normalized().v, 9 | ]).squeeze() 10 | 11 | regr = MLPRegressor(random_state=1, hidden_layer_sizes=(1024,1024), max_iter=1000).fit(X_train, y_train) 12 | 13 | a_mlp = regr.predict(new_rule.v[None,:]) 14 | 15 | mlp_sims = np.einsum('nd,md->nm', action_space, a_mlp) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_603ce327.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('QUERY_MX_CURRENCY', vocab['MEXICO'] * ~(vocab['CANADA'] * ~vocab['DOLLAR'])) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_73f6dd01.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('QUERY_PRINCESS', ((vocab['PRINCE'] - vocab['KING']) + vocab['QUEEN']).normalized()) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_873dadc4.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('KING', vocab['MONARCH'] * vocab['MALE']) 3 | vocab.add('QUEEN', vocab['MONARCH'] * vocab['FEMALE']) 4 | vocab.add('PRINCE', vocab['HEIR'] * vocab['MALE']) 5 | vocab.add('PRINCESS', vocab['HEIR'] * vocab['FEMALE']) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_8ecc4392.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('QUERY_QUEEN', (vocab['KING'] * ~vocab['MALE']) * vocab['FEMALE']) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_a848247a.py: -------------------------------------------------------------------------------- 1 | 2 | vocab.add('CANADA', (vocab['CURRENCY'] * vocab['DOLLAR'] + vocab['CAPITAL'] * vocab['OTTAWA']).normalized()) 3 | vocab.add('MEXICO', (vocab['CURRENCY'] * vocab['PESO'] + vocab['CAPITAL'] * vocab['MEXICO_CITY']).normalized()) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial4_Solution_f2f03c67.py: -------------------------------------------------------------------------------- 1 | 2 | # objs['query'] = (objs['prince'] * ~objs['king']) * objs['queen'] 3 | vocab.add('QUERY_PRINCESS_2', (vocab['PRINCE'] * ~vocab['KING']) * vocab['QUEEN']) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_1090e65a.py: -------------------------------------------------------------------------------- 1 | # unifying bundled representation of all objects 2 | all_objs = (objs['circle'] + objs['square'] + objs['triangle']).normalize() 3 | 4 | # unbind this unifying representation from the map 5 | query_map = ssp_map * ~all_objs 6 | 7 | sims = query_ssps @ query_map.flatten() 8 | size = (dim0.size,dim1.size) 9 | 10 | plot_unbinding_objects_map(sims, positions, query_xs, size) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_2b4c5a99.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Can you guess which of the representations will be more efficient by the nature of the function? 4 | 5 | As the function is not separable, we expect the bound representation to perform better. 6 | """; -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_57dcbce1.py: -------------------------------------------------------------------------------- 1 | 2 | query_objs = np.vstack([objs[n] for n in obj_names]) 3 | test_positions = np.vstack((positions, [0,0], [0,-1.5])) 4 | 5 | sims = [] 6 | 7 | for pos_idx, pos in enumerate(test_positions): 8 | position_ssp = ssp_space.encode(pos[None,:]) #remember we need to have 2-dimensional vectors for `encode()` function 9 | #unbind positions from the map 10 | query_map = ssp_map * ~position_ssp 11 | sims.append(query_objs @ query_map.flatten()) 12 | 13 | plot_unbinding_positions_map(sims, test_positions, obj_names) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_730a75ed.py: -------------------------------------------------------------------------------- 1 | 2 | def non_separable(x): 3 | """Compute non-separable function for given array of 2-dimenstional vectors. 4 | 5 | Inputs: 6 | - x (np.ndarray of shape (n, 2)): n 2-dimensional vectors. 7 | 8 | Outputs: 9 | - y (np.ndarray of shape (n, 1)): non-separable function value for each of the vectors. 10 | """ 11 | return np.sin(np.multiply(x[:, 0], x[:, 1])) 12 | 13 | x0_non_separable = np.linspace(-4, 4, 100) 14 | X_non_separable, Y_non_separable = np.meshgrid(x0_non_separable,x0_non_separable) 15 | xs_non_separable = np.vstack((X_non_separable.flatten(), Y_non_separable.flatten())).T 16 | 17 | ys_non_separable = non_separable(xs_non_separable) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_8c79265f.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Why do you think the bundled representation is superior for the Rastrigin function? 4 | 5 | The Rastrigin function is a superposition of independent functions of the input variable dimensions. The bundled representation is a superposition of a high-dimensional representation of the input dimensions, making it easier to learn this function, which is additive. For the bound representation, we have to learn a mapping from each tuple of input values to the appropriate output value, meaning more samples are required to approximate the function. 6 | """; -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_99c56d84.py: -------------------------------------------------------------------------------- 1 | 2 | objects_sims = [] 3 | 4 | for obj_idx, obj_name in enumerate(obj_names): 5 | #query the object name by unbinding it from the map 6 | query_map = ssp_map * ~objs[obj_name] 7 | objects_sims.append(query_ssps @ query_map.flatten()) 8 | 9 | plot_2d_similarity(objects_sims, obj_names, (dim0.size, dim1.size), title_argmax = True) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_b3d8f220.py: -------------------------------------------------------------------------------- 1 | 2 | #objects are located in `objs` and positions in `ssps` 3 | bound_objects = [objs[n] * ssps[n] for n in obj_names] 4 | 5 | sims = [] 6 | 7 | for obj_idx, obj in enumerate(obj_names): 8 | sims.append(query_ssps @ bound_objects[obj_idx].flatten()) 9 | 10 | plt.figure(figsize=(8, 2.4)) 11 | plot_2d_similarity(sims, obj_names, (dim0.size, dim1.size)) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_bd7761a4.py: -------------------------------------------------------------------------------- 1 | 2 | def rastrigin(x): 3 | """Compute Rastrigin function for given array of d-dimenstional vectors. 4 | 5 | Inputs: 6 | - x (np.ndarray of shape (n, d)): n d-dimensional vectors. 7 | 8 | Outputs: 9 | - y (np.ndarray of shape (n, 1)): Rastrigin function value for each of the vectors. 10 | """ 11 | return 10 * x.shape[1] + np.sum(x**2 - 10 * np.cos(2*np.pi*x), axis=1) 12 | 13 | # this code creates 10000 2-dimensional vectors which are going to be served as input to the function (thus, output is of shape (10000, 1)) 14 | x0_rastrigin = np.linspace(-5.12, 5.12, 100) 15 | X_rastrigin, Y_rastrigin = np.meshgrid(x0_rastrigin,x0_rastrigin) 16 | xs_rastrigin = np.vstack((X_rastrigin.flatten(), Y_rastrigin.flatten())).T 17 | 18 | ys_rastrigin = rastrigin(xs_rastrigin) 19 | 20 | plot_3d_function([X_rastrigin],[Y_rastrigin], [ys_rastrigin.reshape(X_rastrigin.shape)], ['Rastrigin Function']) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/solutions/W2D2_Tutorial5_Solution_cff4accc.py: -------------------------------------------------------------------------------- 1 | 2 | sims = [] 3 | 4 | for obj_idx, obj in enumerate(obj_names): 5 | sims.append(query_ssps @ ssps[obj].flatten()) 6 | 7 | plt.figure(figsize=(8, 2.4)) 8 | plot_2d_similarity(sims, obj_names, (dim0.size, dim1.size)) -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_1090e65a_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_1090e65a_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_57dcbce1_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_57dcbce1_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_99c56d84_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_99c56d84_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_b3d8f220_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_b3d8f220_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_bd7761a4_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_bd7761a4_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_cff4accc_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D2_NeuroSymbolicMethods/static/W2D2_Tutorial5_Solution_cff4accc_0.png -------------------------------------------------------------------------------- /tutorials/W2D2_NeuroSymbolicMethods/student/W2D2_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D2_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/W2D3_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D3_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/further_reading.md: -------------------------------------------------------------------------------- 1 | # Suggested further readings 2 | 3 | - Arna Ghosh et al. (2023). ["How gradient estimator variance and bias impact learning in neural networks"](https://openreview.net/forum?id=EBC60mxBwyw). 4 | - Blake A. Richards et al. (2019). ["A deep learning framework for neuroscience"](https://www.nature.com/articles/s41593-019-0520-2). 5 | - Paul Züge et al. (2023). ["Weight versus Node Perturbation Learning in Temporally Extended Tasks: Weight Perturbation Often Performs Similarly or Better"](https://journals.aps.org/prx/abstract/10.1103/PhysRevX.13.021006). 6 | - Timothy P. Lillicrap et al. (2016). ["Random synaptic feedback weights support error backpropagation for deep learning"](https://www.nature.com/articles/ncomms13276). 7 | - Stephen Grossberg (1987). ["Competitive learning: From interactive activation to adaptive resonance"](https://www.sciencedirect.com/science/article/pii/S0364021387800253). 8 | -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/instructor/W2D3_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D3_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/solutions/W2D3_Tutorial1_Solution_01f74aae.py: -------------------------------------------------------------------------------- 1 | 2 | class WeightPerturbMLP(MLP): 3 | """ 4 | A multilayer perceptron that is capable of learning through weight perturbation 5 | """ 6 | 7 | def perturb(self, rng, inputs, targets, noise=1.0): 8 | """ 9 | Calculates the weight updates for perturbation learning, using noise with SD as given 10 | """ 11 | 12 | # get the random perturbations 13 | delta_W_h = rng.normal(scale=noise, size=self.W_h.shape) 14 | delta_W_y = rng.normal(scale=noise, size=self.W_y.shape) 15 | 16 | # calculate the loss with and without the perturbations 17 | loss_now = self.mse_loss(rng, inputs, targets) 18 | loss_per = self.mse_loss(rng, inputs, targets, self.W_h + delta_W_h, self.W_y + delta_W_y) 19 | 20 | # updates 21 | delta_loss = loss_now - loss_per 22 | W_h_update = delta_loss * delta_W_h / noise ** 2 23 | W_y_update = delta_loss * delta_W_y / noise ** 2 24 | return W_h_update, W_y_update -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/solutions/W2D3_Tutorial1_Solution_95265523.py: -------------------------------------------------------------------------------- 1 | 2 | class KolenPollackMLP(MLP): 3 | """ 4 | A multilayer perceptron that is capable of learning through the Kolen-Pollack algorithm 5 | """ 6 | 7 | def kolepoll(self, rng, inputs, targets, eta_back=0.01): 8 | """ 9 | Calculates the weight updates for Kolen-Polack learning 10 | """ 11 | 12 | # do a forward pass 13 | (hidden, output) = self.inference(rng, inputs) 14 | 15 | # calculate the updates for the forward weights 16 | error = targets - output 17 | delta_W_h = np.dot(np.dot(self.B, error * self.act_deriv(output)) * self.act_deriv(hidden), \ 18 | add_bias(inputs).transpose()) 19 | delta_err = np.dot(error * self.act_deriv(output), add_bias(hidden).transpose()) 20 | delta_W_y = delta_err - 0.1 * self.W_y 21 | 22 | # calculate the updates for the backwards weights and implement them 23 | delta_B = delta_err[:, :-1].transpose() - 0.1 * self.B 24 | self.B += eta_back * delta_B 25 | return (delta_W_h, delta_W_y) -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/solutions/W2D3_Tutorial1_Solution_d1bc17ea.py: -------------------------------------------------------------------------------- 1 | 2 | class FeedbackAlignmentMLP(MLP): 3 | """ 4 | A multilayer perceptron that is capable of learning through the Feedback Alignment algorithm 5 | """ 6 | 7 | # function for calculating feedback alignment updates 8 | def feedback(self, rng, inputs, targets): 9 | """ 10 | Calculates the weight updates for feedback alignment learning 11 | """ 12 | 13 | # do a forward pass 14 | hidden, output = self.inference(rng, inputs) 15 | 16 | # calculate the updates 17 | error = targets - output 18 | delta_W_h = np.dot(np.dot(self.B, error * self.act_deriv(output)) * self.act_deriv(hidden), 19 | add_bias(inputs).transpose()) 20 | delta_W_y = np.dot(error * self.act_deriv(output), add_bias(hidden).transpose()) 21 | 22 | return delta_W_h, delta_W_y -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/static/feedback_alignment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D3_Microlearning/static/feedback_alignment.png -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/static/network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D3_Microlearning/static/network.png -------------------------------------------------------------------------------- /tutorials/W2D3_Microlearning/student/W2D3_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {} 16 | }, 17 | "source": [ 18 | "# Daily survey\n", 19 | "\n", 20 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 21 | "\n", 22 | "\"button" 23 | ] 24 | } 25 | ], 26 | "metadata": { 27 | "colab": { 28 | "collapsed_sections": [], 29 | "include_colab_link": true, 30 | "name": "W2D3_Outro", 31 | "provenance": [], 32 | "toc_visible": true 33 | }, 34 | "kernel": { 35 | "display_name": "Python 3", 36 | "language": "python", 37 | "name": "python3" 38 | }, 39 | "kernelspec": { 40 | "display_name": "Python 3 (ipykernel)", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.9.19" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 4 59 | } 60 | -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/W2D4_Outro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "execution": {} 7 | }, 8 | "source": [ 9 | "\"Open   \"Open" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "execution": {}, 16 | "pycharm": { 17 | "name": "#%% md\n" 18 | } 19 | }, 20 | "source": [ 21 | "# Daily survey\n", 22 | "\n", 23 | "Don't forget to complete your reflections and content check in the daily survey! Please be patient after logging in as there is a small delay before you will be redirected to the survey.\n", 24 | "\n", 25 | "\"button" 26 | ] 27 | } 28 | ], 29 | "metadata": { 30 | "colab": { 31 | "collapsed_sections": [], 32 | "include_colab_link": true, 33 | "name": "W2D4_Outro", 34 | "provenance": [], 35 | "toc_visible": true 36 | }, 37 | "kernel": { 38 | "display_name": "Python 3", 39 | "language": "python", 40 | "name": "python3" 41 | }, 42 | "kernelspec": { 43 | "display_name": "Python 3 (ipykernel)", 44 | "language": "python", 45 | "name": "python3" 46 | }, 47 | "language_info": { 48 | "codemirror_mode": { 49 | "name": "ipython", 50 | "version": 3 51 | }, 52 | "file_extension": ".py", 53 | "mimetype": "text/x-python", 54 | "name": "python", 55 | "nbconvert_exporter": "python", 56 | "pygments_lexer": "ipython3", 57 | "version": "3.9.19" 58 | } 59 | }, 60 | "nbformat": 4, 61 | "nbformat_minor": 4 62 | } 63 | -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/further_reading.md: -------------------------------------------------------------------------------- 1 | # Suggested further readings 2 | 3 | ## Tutorial 1: The problem of changing data distributions 4 | 5 | - [Environment and Distribution Shift](https://d2l.ai/chapter_linear-classification/environment-and-distribution-shift.html) 6 | 7 | ## Tutorial 2: Continual learning 8 | 9 | - [Continual Lifelong Learning with Neural Networks: A Review](https://arxiv.org/pdf/1802.07569) 10 | - [ContinualAI](https://www.continualai.org/) 11 | - [A Comprehensive Survey of Continual Learning: Theory, Method and Application](https://arxiv.org/pdf/2302.00487) 12 | - [Brain-inspired replay for continual learning with artificial neural networks](https://www.nature.com/articles/s41467-020-17866-2) 13 | 14 | ## Tutorial 3: Meta-learning 15 | 16 | - [Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks](https://arxiv.org/abs/1703.03400) 17 | - [An Interactive Introduction to Model-Agnostic Meta-Learning](https://interactive-maml.github.io/maml.html) 18 | 19 | ## Tutorial 4: Biological meta reinforcement learning 20 | 21 | - [Meta-Learning by the Baldwin Effect](https://arxiv.org/pdf/1806.07917) 22 | - [Prefrontal cortex as a meta-reinforcement learning system](https://www.nature.com/articles/s41593-018-0147-8) 23 | - [Reinforcement Learning, Fast and Slow](https://www.cell.com/action/showPdf?pii=S1364-6613%2819%2930061-0) 24 | 25 | ## Tutorial 5: Replay 26 | 27 | - [Experience Replay for Continual Learning](https://arxiv.org/abs/1811.11682) -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial1_Solution_0b85ba35.py: -------------------------------------------------------------------------------- 1 | 2 | #take only summer data 3 | summer_days = np.expand_dims(days[151:243], 1) 4 | summer_prices = prices[151:243] 5 | 6 | #divide data into train and test sets 7 | summer_days_train, summer_days_test, summer_prices_train, summer_prices_test = train_test_split(summer_days, summer_prices, random_state = 42) 8 | 9 | #apply normalization for days 10 | summer_days_mean, summer_days_std = np.mean(summer_days), np.std(summer_days) 11 | summer_days_train_norm = (summer_days_train - summer_days_mean) / summer_days_std 12 | summer_days_test_norm = (summer_days_test - summer_days_mean) / summer_days_std 13 | 14 | #define MLP 15 | model = MLPRegressor(hidden_layer_sizes=(100, 100), max_iter=10000, random_state = 42, solver = "lbfgs") # LBFGS is better to use when there is small amount of data 16 | 17 | #train MLP 18 | model.fit(summer_days_train_norm, summer_prices_train) 19 | 20 | #evaluate MLP on test data 21 | print(f"R-squared value is: {model.score(summer_days_test_norm, summer_prices_test):.02f}.") -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial1_Solution_35e7f912.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Why do you think the R-squared value is still higher for this particular 4 | example of concept shift compared to the covariate shift? 5 | 6 | In this example, concept shift preserves the annual and weekly trends (we can see that 7 | predictions oscillate the same way as the shifted function). Thus, the R-squared value is relatively high. 8 | """; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial1_Solution_41fdc825.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Does the amount of covariate shift impact the model's performance? 4 | What happens at the borders of the training period—does the model still capture the 5 | dynamics right before and after it? 6 | 7 | Indeed, the bigger the covariate shift (the more distinct the days are), the worse 8 | the performance we observe. In both border cases, the model performs poorly; what is 9 | more - even on the fraction of training data near these regions, we can observe that 10 | the model is going to lose the desired dynamics. 11 | """; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial1_Solution_4dea007f.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: How would you qualitatively evaluate the model's performance on autumn data? 4 | Does it capture the annual trend? Does it capture the weekly trend? 5 | 6 | Model predictions are completely invariant to the weekly seasonality of the data, though 7 | they somewhat capture the increasing trend. Thus, it definitely can't be used to make 8 | quality predictions on a daily basis. 9 | """; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial1_Solution_59146784.py: -------------------------------------------------------------------------------- 1 | 2 | #define variables 3 | A = .005 4 | B = 0.1 5 | phi = 0 6 | C = 1 7 | 8 | #define days (observe that those are not 1, ..., 365 but proxy ones to make model function neat) 9 | days = np.arange(-26, 26 + 1/7, 1/7) #defined as fractions of a week 10 | 11 | prices = A * days**2 + B * np.sin(np.pi * days + phi) + C 12 | 13 | #plot relation between days and prices 14 | with plt.xkcd(): 15 | plt.plot(days, prices) 16 | plt.xlabel('Week') 17 | plt.ylabel('Price') 18 | plt.show() -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial2_Solution_2ba000ea.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: 1. Note that the number of epochs is doubled in sequential training mode compared to interspersed mode. Why is this the case? 4 | 5 | Each epoch only contains summer or autumn data and is only of size $K$, whereas interspersed training epochs contain $2K$ 6 | data points. In order to expose both models to the same amount of data, the number of epochs must be doubled. 7 | 8 | 2. Which training scheduler performed better in this particular example? Why do you think this occurred? 9 | 10 | Interspersed training works better. In sequential training, the model is constantly shifting from learning one type 11 | of relation to another (which is basically what we have tried during the first section of the tutorial); still, here, 12 | sequential joint training helps because we change the data source for each epoch which makes the model still remember 13 | both data distributions. With interspersed, this ability to remember both distributions is even stronger as both are 14 | represented in each epoch. 15 | """; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial2_Solution_c3936e0c.py: -------------------------------------------------------------------------------- 1 | 2 | # Initial r-squared calculations 3 | summer_r_squared = [base_model.score(summer_days_test_norm, summer_prices_test)] 4 | autumn_r_squared = [base_model.score(autumn_days_test_norm, autumn_prices_test)] 5 | num_epochs = 10 6 | 7 | # Progress bar integration with tqdm 8 | for _ in tqdm(range(num_epochs - 1), desc="Training Progress"): 9 | # Fit new data for one epoch 10 | base_model.partial_fit(autumn_days_train_norm, autumn_prices_train) 11 | 12 | # Calculate r-squared values on test sets 13 | summer_r_squared.append(base_model.score(summer_days_test_norm, summer_prices_test)) 14 | autumn_r_squared.append(base_model.score(autumn_days_test_norm, autumn_prices_test)) 15 | 16 | model = base_model 17 | plot_performance(num_epochs, summer_r_squared, autumn_r_squared) 18 | 19 | #predict for test sets 20 | summer_prices_predictions = model.predict(summer_days_test_norm) 21 | autumn_prices_predictions = model.predict(autumn_days_test_norm) 22 | 23 | plot_summer_autumn_predictions(summer_prices_predictions, autumn_prices_predictions) -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_2753b5eb.py: -------------------------------------------------------------------------------- 1 | 2 | def finetune(model, x_finetune, y_finetune, finetune_gradient_steps): 3 | """ 4 | Take a fixed number of gradient steps for the given x_finetune and y_finetune. 5 | 6 | Inputs: 7 | - model (MetaLearningModel): trained meta learning model. 8 | - x_finetune (torch.tensor): features (days) of the specific task. 9 | - y_finetune (torch.tensor): outcomes (prices) of the specific task. 10 | - finetune_gradient_steps (int): number of gradient steps to perform for this task. 11 | """ 12 | #apply normalization on days 13 | x_finetune = (x_finetune - model.mean) / model.std 14 | 15 | #need to create clone, so that we preserve meta-learnt parameters 16 | clone = model.deep_clone_model(model.model) 17 | optimizer = optim.SGD(clone.parameters(), lr = model.inner_learning_rate) 18 | 19 | for _ in range(finetune_gradient_steps): 20 | optimizer.zero_grad() 21 | loss = model.loss_fn(clone(x_finetune), y_finetune) 22 | loss.backward() 23 | optimizer.step() 24 | 25 | return clone -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_576c8d87.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: Do you think these particular tasks are similar? Do you expect the model to learn their general nature? 4 | 5 | Though being pretty distinct visually, they share joint underlying dynamics - fast oscillation modeled by 6 | sinusoid and general increasing trend with the quadratic term; we expect the model to learn these patterns 7 | and to adapt quickly to the particular case. 8 | """; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial4_Solution_2bbabefe.py: -------------------------------------------------------------------------------- 1 | 2 | def update_population(env, optimizer_func, population, parents_population, best_score, new_generation_new_individuals = 5): 3 | """ 4 | Updates population with new individuals which are the result of crossing over and mutation of two parents agents. 5 | Removes the same amount of random agents from the population. 6 | 7 | Inputs: 8 | - env (HarlowExperimentEnv): environment. 9 | - optimizer_func (torch.Optim): optimizer to use for training. 10 | - population (list): current population which consists of tuples (agent, score). 11 | - parents_population (list) : parents individuals (part of current population) for creating new individuals. 12 | - best_score (int): the best score for the individual in the population registered so far. 13 | - new_generation_new_individuals (int, default = 5): the number of individuals to create (and the old ones to remove). 14 | """ 15 | 16 | # Create new individuals with progress bar 17 | new_individuals = [] 18 | for _ in tqdm(range(new_generation_new_individuals), desc="Creating New Individuals"): 19 | agent1, agent2 = random.choices(parents_population, k=2) 20 | new_agent = create_new_agent(agent1[0], agent2[0]) 21 | score, _ = evaluate_individual(env, new_agent, optimizer_func) 22 | # Evaluate whether best score has increased 23 | best_score = max(score, best_score) 24 | new_individuals.append((new_agent, score)) 25 | 26 | # Remove random old individuals with progress bar 27 | for _ in tqdm(range(new_generation_new_individuals), desc="Removing Old Individuals"): 28 | population.pop(random.randint(0, len(population) - 1)) 29 | 30 | return population + new_individuals, best_score -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial4_Solution_fa5c39c8.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Discussion: What should be changed in the implementation approach (code base) to reflect Lamarckian evolution? 4 | 5 | For Lamarckian evolution, we might want to change the base parameters of the agent each time it learns a new task; this way, 6 | the learned benefits are inherited by the next generation."""; -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial5_Solution_178f638f.py: -------------------------------------------------------------------------------- 1 | 2 | class ReplayBuffer(): 3 | def __init__(self, max_experience = 250, num_trials = 100): 4 | """Initialize replay buffer. 5 | Notice that when replay buffer is full of experience and new one should be remembered, it replaces existing ones, starting 6 | from the oldest. 7 | 8 | Inputs: 9 | - max_experience (int, default = 250): the maximum number of experience (gradient steps) which can be stored. 10 | - num_trials (int, default = 100): number of times the agent is exposed to the environment per gradient step to be trained. 11 | """ 12 | self.max_experience = max_experience 13 | 14 | #variable which fully describe experience 15 | self.losses = [0 for _ in range(self.max_experience)] 16 | 17 | #number of memory cell to point to (write or overwrite experience) 18 | self.writing_pointer = 0 19 | self.reading_pointer = 0 20 | 21 | #to keep track how many experience there were 22 | self.num_experience = 0 23 | 24 | def write_experience(self, loss): 25 | """Write new experience.""" 26 | self.losses[self.writing_pointer] = loss 27 | 28 | #so that pointer is in range of max_experience and will point to the older experience while full 29 | self.writing_pointer = (self.writing_pointer + 1) % self.max_experience 30 | self.num_experience += 1 31 | 32 | def read_experience(self): 33 | """Read existing experience.""" 34 | loss = self.losses[self.reading_pointer] 35 | 36 | #so that pointer is in range of self.max_experience and will point to the older experience while full 37 | self.reading_pointer = (self.reading_pointer + 1) % min(self.max_experience, self.num_experience) 38 | return loss -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial1_Solution_59146784_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial1_Solution_59146784_0.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_07232036_53.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_07232036_53.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_07232036_54.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_07232036_54.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_c3936e0c_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_c3936e0c_7.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_c3936e0c_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_c3936e0c_8.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_e2bacfd6_54.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_e2bacfd6_54.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_e2bacfd6_55.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/W2D4_Tutorial2_Solution_e2bacfd6_55.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/evolution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/evolution.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/feedback_alignment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/feedback_alignment.png -------------------------------------------------------------------------------- /tutorials/W2D4_Macrolearning/static/network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D4_Macrolearning/static/network.png -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial1_Solution_20a869fc.py: -------------------------------------------------------------------------------- 1 | 2 | def HOSS_evaluate(X, mu, Sigma, Aprior, Wprior): 3 | """ 4 | Inference on 2D Bayes net for asymmetric inference on presence vs. absence. 5 | """ 6 | 7 | # Initialise variables and conditional prob tables 8 | p_A = np.array([1 - Aprior, Aprior]) # prior on awareness state A 9 | p_W_a1 = np.append(0, Wprior) # likelihood of world states W given aware, first entry is absence 10 | p_W_a0 = np.append(1, np.zeros(len(Wprior))) # likelihood of world states W given unaware, first entry is absence 11 | p_W = (p_W_a1 + p_W_a0) / 2 # prior on W marginalising over A (for KL) 12 | 13 | # Compute likelihood of observed X for each possible W (P(X|mu_w, Sigma)) 14 | lik_X_W = np.array([multivariate_normal.pdf(X, mean=mu_i, cov=Sigma) for mu_i in mu]) 15 | p_X_W = lik_X_W / lik_X_W.sum() # normalise to get P(X|W) 16 | 17 | # Combine with likelihood of each world state w given awareness state A 18 | lik_W_A = np.vstack((p_X_W * p_W_a0 * p_A[0], p_X_W * p_W_a1 * p_A[1])) 19 | post_A = lik_W_A.sum(axis=1) # sum over W 20 | post_A = post_A / post_A.sum() # normalise 21 | 22 | # Posterior over W (P(W|X=x) marginalising over A) 23 | post_W = lik_W_A.sum(axis=0) # sum over A 24 | post_W = post_W / post_W.sum() # normalise 25 | 26 | # KL divergences 27 | KL_W = (post_W * np.log(post_W / p_W)).sum() 28 | KL_A = (post_A * np.log(post_A / p_A)).sum() 29 | 30 | return post_W, post_A, KL_W, KL_A -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial1_Solution_a617d707.py: -------------------------------------------------------------------------------- 1 | """ 2 | RIMs are built as a collection of modules, each operating independently. They don't 3 | continuously interact with each other; instead, they primarily operate separately and 4 | only occasionally connect through attention. This attention mechanism allows a module to 5 | focus on specific, relevant parts of the input data when necessary. 6 | 7 | Because each module in a RIM can focus on learning different features or aspects of the 8 | data, it becomes very adaptable. For instance, one module might become specialized in 9 | recognizing edges, another in textures, and so on. When the model encounters a new 10 | environment or different image sizes, like 19x19 or 24x24, each module uses its 11 | specialized knowledge to handle the changes in its specific area. This modular and 12 | focused approach allows RIMs to maintain performance across varied conditions, 13 | demonstrating out-of-distribution generalization. 14 | """ -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial1_Solution_a926812a.py: -------------------------------------------------------------------------------- 1 | class SecondOrderNetwork(nn.Module): 2 | def __init__(self, use_gelu): 3 | super(SecondOrderNetwork, self).__init__() 4 | # Define a linear layer for comparing the difference between input and output of the first-order network 5 | self.comparison_layer = nn.Linear(100, 100) 6 | 7 | # Linear layer for determining wagers, mapping from 100 features to a single output 8 | self.wager = nn.Linear(100, 1) 9 | 10 | # Dropout layer to prevent overfitting by randomly setting input units to 0 with a probability of 0.5 during training 11 | self.dropout = nn.Dropout(0.5) 12 | 13 | # Select activation function based on the `use_gelu` flag 14 | self.activation = torch.relu 15 | 16 | # Additional activation functions for potential use in network operations 17 | self.sigmoid = torch.sigmoid 18 | 19 | self.softmax = nn.Softmax() 20 | 21 | # Initialize the weights of the network 22 | self._init_weights() 23 | 24 | def _init_weights(self): 25 | # Uniformly initialize weights for the comparison and wager layers 26 | init.uniform_(self.comparison_layer.weight, -1.0, 1.0) 27 | init.uniform_(self.wager.weight, 0.0, 0.1) 28 | 29 | def forward(self, first_order_input, first_order_output): 30 | # Calculate the difference between the first-order input and output 31 | comparison_matrix = first_order_input - first_order_output 32 | 33 | #Another option is to directly calculate the per unit MSE to use as input for the comparator matrix 34 | #comparison_matrix = nn.MSELoss(reduction='none')(first_order_output, first_order_input) 35 | 36 | # Pass the difference through the comparison layer and apply the chosen activation function 37 | comparison_out=self.dropout(self.activation(self.comparison_layer(comparison_matrix))) 38 | 39 | # Calculate the wager value, applying dropout and sigmoid activation to the output of the wager layer 40 | wager = self.sigmoid(self.wager(comparison_out)) 41 | 42 | return wager -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial1_Solution_bfd5f466.py: -------------------------------------------------------------------------------- 1 | """ 2 | They both agree that ignition happens, i.e that it's a key empirical phenomenon that needs explaining. 3 | The difference is whether it is assumed to be constitutive of consciousness, or a by-product of computations 4 | related to consciousness. GWS says that it's constitutive - global broadcast underpins conscious access, 5 | and ignition is the neural signature of broadcast. HOSS says that ignition is a by-product of computations 6 | related to consciousness. Under HOSS, consciousness is tied to the higher-order evaluation of the 7 | reliability/strength of first-order state(s) - and when a first-order state is reliable / precise, 8 | it will also be associated with widespread updates to the internal model which manifest as ignition. 9 | """ -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial1_Solution_f1250f89.py: -------------------------------------------------------------------------------- 1 | class SharedWorkspace(nn.Module): 2 | 3 | def __init__(self, num_specialists, hidden_dim, num_memory_slots, memory_slot_dim): 4 | super().__init__() 5 | self.num_specialists = num_specialists 6 | self.hidden_dim = hidden_dim 7 | self.num_memory_slots = num_memory_slots 8 | self.memory_slot_dim = memory_slot_dim 9 | self.workspace_memory = nn.Parameter(torch.randn(num_memory_slots, memory_slot_dim)) 10 | 11 | # Attention mechanism components for writing to the workspace 12 | self.key = nn.Linear(hidden_dim, memory_slot_dim) 13 | self.query = nn.Linear(memory_slot_dim, memory_slot_dim) 14 | self.value = nn.Linear(hidden_dim, memory_slot_dim) 15 | 16 | def write_to_workspace(self, specialists_states): 17 | # Flatten specialists' states if they're not already 18 | specialists_states = specialists_states.view(-1, self.hidden_dim) 19 | 20 | # Compute key, query, and value 21 | keys = self.key(specialists_states) 22 | query = self.query(self.workspace_memory) 23 | values = self.value(specialists_states) 24 | 25 | # Compute attention scores and apply softmax 26 | attention_scores = torch.matmul(query, keys.transpose(-2, -1)) / (self.memory_slot_dim ** 0.5) 27 | attention_probs = F.softmax(attention_scores, dim=-1) 28 | 29 | # Update workspace memory with weighted sum of values 30 | updated_memory = torch.matmul(attention_probs, values) 31 | self.workspace_memory = nn.Parameter(updated_memory) 32 | 33 | return self.workspace_memory 34 | 35 | def forward(self, specialists_states): 36 | updated_memory = self.write_to_workspace(specialists_states) 37 | return updated_memory -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial3_Solution_a926812a.py: -------------------------------------------------------------------------------- 1 | class SecondOrderNetwork(nn.Module): 2 | def __init__(self, use_gelu): 3 | super(SecondOrderNetwork, self).__init__() 4 | # Define a linear layer for comparing the difference between input and output of the first-order network 5 | self.comparison_layer = nn.Linear(100, 100) 6 | 7 | # Linear layer for determining wagers, mapping from 100 features to a single output 8 | self.wager = nn.Linear(100, 1) 9 | 10 | # Dropout layer to prevent overfitting by randomly setting input units to 0 with a probability of 0.5 during training 11 | self.dropout = nn.Dropout(0.5) 12 | 13 | # Select activation function based on the `use_gelu` flag 14 | self.activation = torch.relu 15 | 16 | # Additional activation functions for potential use in network operations 17 | self.sigmoid = torch.sigmoid 18 | 19 | self.softmax = nn.Softmax() 20 | 21 | # Initialize the weights of the network 22 | self._init_weights() 23 | 24 | def _init_weights(self): 25 | # Uniformly initialize weights for the comparison and wager layers 26 | init.uniform_(self.comparison_layer.weight, -1.0, 1.0) 27 | init.uniform_(self.wager.weight, 0.0, 0.1) 28 | 29 | def forward(self, first_order_input, first_order_output): 30 | # Calculate the difference between the first-order input and output 31 | comparison_matrix = first_order_input - first_order_output 32 | 33 | #Another option is to directly calculate the per unit MSE to use as input for the comparator matrix 34 | #comparison_matrix = nn.MSELoss(reduction='none')(first_order_output, first_order_input) 35 | 36 | # Pass the difference through the comparison layer and apply the chosen activation function 37 | comparison_out=self.dropout(self.activation(self.comparison_layer(comparison_matrix))) 38 | 39 | # Calculate the wager value, applying dropout and sigmoid activation to the output of the wager layer 40 | wager = self.sigmoid(self.wager(comparison_out)) 41 | 42 | return wager -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/solutions/W2D5_Tutorial3_Solution_f903bbb4.py: -------------------------------------------------------------------------------- 1 | """ 2 | At the level of perceptual states W, there is a substantial asymmetry in the KL-divergence expected when the 3 | model says ‘seen’ vs. ‘unseen’ (lefthand panel). This is due to the large belief updates invoked in the 4 | perceptual layer W by samples that deviate from the lower lefthand corner - from absence. In contrast, when 5 | we compute KL-divergence for the A-level (righthand panel), the level of prediction error is symmetric across 6 | seen and unseen decisions, leading to "hot" zones both at the upper righthand (present) and lower lefthand 7 | (absent) corners of the 2D space. 8 | 9 | Intuitively, this means that at the W-level, there's a noticeable difference in the KL-divergence values 10 | between "seen" and "unseen" predictions. This large difference is mainly due to significant updates in the 11 | model's beliefs at this level when the detected samples are far from what is expected under the condition of 12 | "absence." However, when we analyze the K-L divergence at the A-level, the discrepancies in prediction errors 13 | between "seen" and "unseen" are balanced. This creates equally strong responses in the model, whether something 14 | is detected or not detected. 15 | 16 | We can also sort the KL-divergences as a function of whether the model "reported" presence or absence. As 17 | can be seen in the bar plots below, there is more asymmetry in the prediction error at the W compared to the 18 | A levels. 19 | 20 | """ -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/static/HOSS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D5_Mysteries/static/HOSS.png -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/static/RIMs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D5_Mysteries/static/RIMs.png -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/static/Shared_Workspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D5_Mysteries/static/Shared_Workspace.png -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/static/W1D1_goal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D5_Mysteries/static/W1D1_goal.png -------------------------------------------------------------------------------- /tutorials/W2D5_Mysteries/static/ethics_roadmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/W2D5_Mysteries/static/ethics_roadmap.png -------------------------------------------------------------------------------- /tutorials/static/AirtableSubmissionButton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/AirtableSubmissionButton.png -------------------------------------------------------------------------------- /tutorials/static/Closed_Access_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/Closed_Access_logo.png -------------------------------------------------------------------------------- /tutorials/static/ConceptMap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/ConceptMap.png -------------------------------------------------------------------------------- /tutorials/static/Humor-Sans.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/Humor-Sans.ttf -------------------------------------------------------------------------------- /tutorials/static/NMA-W1D2-fig06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NMA-W1D2-fig06.png -------------------------------------------------------------------------------- /tutorials/static/NMA_W1D2_dataproject_draft.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NMA_W1D2_dataproject_draft.jpg -------------------------------------------------------------------------------- /tutorials/static/NeuroAI_big_tent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NeuroAI_big_tent.png -------------------------------------------------------------------------------- /tutorials/static/NeuroAI_concept_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NeuroAI_concept_map.png -------------------------------------------------------------------------------- /tutorials/static/NeuroAI_sponsors_intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NeuroAI_sponsors_intro.png -------------------------------------------------------------------------------- /tutorials/static/NeuroAI_sponsors_intro2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/NeuroAI_sponsors_intro2.png -------------------------------------------------------------------------------- /tutorials/static/Open_Access_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/Open_Access_logo.png -------------------------------------------------------------------------------- /tutorials/static/SurveyButton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/SurveyButton.png -------------------------------------------------------------------------------- /tutorials/static/W3D4_Tutorial2_MultiarmedBandit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/W3D4_Tutorial2_MultiarmedBandit.png -------------------------------------------------------------------------------- /tutorials/static/W3D4_Tutorial3_CliffWorld.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/W3D4_Tutorial3_CliffWorld.png -------------------------------------------------------------------------------- /tutorials/static/W3D4_Tutorial3_GridWorld410.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/W3D4_Tutorial3_GridWorld410.png -------------------------------------------------------------------------------- /tutorials/static/W3D4_Tutorial4_QuentinsWorld.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/W3D4_Tutorial4_QuentinsWorld.png -------------------------------------------------------------------------------- /tutorials/static/W3D4_Tutorial4_QuentinsWorldShortcut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/W3D4_Tutorial4_QuentinsWorldShortcut.png -------------------------------------------------------------------------------- /tutorials/static/add-tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/add-tag.png -------------------------------------------------------------------------------- /tutorials/static/ai-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/ai-logo.png -------------------------------------------------------------------------------- /tutorials/static/astrocat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/astrocat.png -------------------------------------------------------------------------------- /tutorials/static/button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/button.png -------------------------------------------------------------------------------- /tutorials/static/conv-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/conv-network.png -------------------------------------------------------------------------------- /tutorials/static/conv_fc.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/conv_fc.PNG -------------------------------------------------------------------------------- /tutorials/static/convnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/convnet.png -------------------------------------------------------------------------------- /tutorials/static/convolutional_layer.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/convolutional_layer.PNG -------------------------------------------------------------------------------- /tutorials/static/data_analysis_step6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/data_analysis_step6.jpeg -------------------------------------------------------------------------------- /tutorials/static/folder-structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/folder-structure.png -------------------------------------------------------------------------------- /tutorials/static/generative_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/generative_model.png -------------------------------------------------------------------------------- /tutorials/static/gh-pages.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/gh-pages.gif -------------------------------------------------------------------------------- /tutorials/static/github-actions.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/github-actions.gif -------------------------------------------------------------------------------- /tutorials/static/grad_descent.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/grad_descent.gif -------------------------------------------------------------------------------- /tutorials/static/img_1235.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/img_1235.jpg -------------------------------------------------------------------------------- /tutorials/static/img_1237_720.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/img_1237_720.jpg -------------------------------------------------------------------------------- /tutorials/static/import-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/import-complete.png -------------------------------------------------------------------------------- /tutorials/static/import-repo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/import-repo.gif -------------------------------------------------------------------------------- /tutorials/static/kaggle_internet_enabled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_internet_enabled.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step1.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step2.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step3.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step4.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step5.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step5.1.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step5.2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step5.2.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step6_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step6_1.png -------------------------------------------------------------------------------- /tutorials/static/kaggle_step6_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/kaggle_step6_2.png -------------------------------------------------------------------------------- /tutorials/static/modeling_step6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/modeling_step6.png -------------------------------------------------------------------------------- /tutorials/static/new-course.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/new-course.gif -------------------------------------------------------------------------------- /tutorials/static/nma-logo-square-4xp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/nma-logo-square-4xp.png -------------------------------------------------------------------------------- /tutorials/static/one-layer-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/one-layer-network.png -------------------------------------------------------------------------------- /tutorials/static/process-notebook.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/process-notebook.gif -------------------------------------------------------------------------------- /tutorials/static/publish-book.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/publish-book.gif -------------------------------------------------------------------------------- /tutorials/static/pull-request.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/pull-request.gif -------------------------------------------------------------------------------- /tutorials/static/pull-request.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/pull-request.png -------------------------------------------------------------------------------- /tutorials/static/restart-kernel.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/restart-kernel.gif -------------------------------------------------------------------------------- /tutorials/static/sample_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/sample_output.png -------------------------------------------------------------------------------- /tutorials/static/tag-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/tag-added.png -------------------------------------------------------------------------------- /tutorials/static/view-tags.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/view-tags.png -------------------------------------------------------------------------------- /tutorials/static/weight-sharing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neuromatch/NeuroAI_Course/05d5bb213d63a051b44fca397a2e1998dc45760e/tutorials/static/weight-sharing.png --------------------------------------------------------------------------------