├── .DS_Store ├── README.ipynb ├── README.md ├── code_by_chapter ├── .DS_Store ├── Chapter_1 │ ├── .DS_Store │ ├── Ch1_input_param_vals.py │ ├── README.md │ ├── ch1_figs.py │ ├── ch1_gradient_descent_unknown_func.py │ ├── ch1_tf2_2dim_min.py │ ├── ch1_tf2_min.py │ ├── ch1_tf2_simple_min.py │ ├── ch1_tf_2dim_min.py │ ├── ch1_tf_simple_min.py │ └── ch1_three_unknowns.py ├── Chapter_10 │ ├── .DS_Store │ ├── ch10_figs.py │ ├── ch10_logical_rules.py │ ├── ch10_logical_rules_2.py │ ├── ch10_logical_rules_3.py │ ├── ch10_tf_rbm.py │ ├── ch10_unsup_hebb.py │ └── readme.md ├── Chapter_11 │ ├── .DS_Store │ ├── README.md │ ├── __pycache__ │ │ └── ch11_tf2_vae.cpython-36.pyc │ ├── ch11_bayes.py │ ├── ch11_crp.py │ ├── ch11_entropy.py │ ├── ch11_figs.py │ ├── ch11_free_energy.py │ ├── ch11_light.py │ ├── ch11_soc.py │ ├── ch11_tf2_vae.py │ ├── ch11_tf2_vae_image.py │ └── ch11_tf2_vae_image_conv2.py ├── Chapter_12 │ ├── ch12_figs.py │ └── readme.md ├── Chapter_2 │ ├── .DS_Store │ ├── README.md │ ├── ch2_figs.py │ ├── ch2_hopfield_model.py │ ├── ch2_linear_model.py │ ├── ch2_pet_detector_optimization.py │ ├── ch2_plot_energy_fun.py │ ├── ch2_tf2_activation_min.py │ ├── ch2_tf2_cats_dogs.py │ ├── ch2_tf2_cats_dogs_hist.py │ ├── ch2_tf2_hopfield.py │ ├── ch2_tf2_hopfield_large.py │ ├── ch2_tf2_simple_model.py │ ├── ch2_tf_activation_min.py │ ├── ch2_tf_cats_dogs.py │ ├── ch2_tf_hopfield.py │ └── ch2_trav_sale.py ├── Chapter_3 │ ├── .DS_Store │ ├── README.md │ ├── ch3_figs.py │ ├── ch3_hebb.py │ ├── ch3_orthogonality_1_some_points.py │ ├── ch3_orthogonality_2_cloud_random_points.py │ ├── ch3_orthogonality_3_cloud_almost_orthogonal_points.py │ ├── ch3_tf2_hebb.py │ ├── ch3_tf2_hebb_simplified.py │ ├── ch3_tf2_hopfield.py │ ├── ch3_tf_hebb.py │ └── ch3_tf_hopfield.py ├── Chapter_4 │ ├── .DS_Store │ ├── Ch4_linear_separability.py │ ├── cdb.npy │ ├── ch4_deltarule_demo.py │ ├── ch4_deltarule_demo_repeated.py │ ├── ch4_figs.py │ ├── ch4_online_updater.py │ ├── ch4_tf2_cats_dogs.py │ ├── ch4_tf2_cdb.py │ ├── ch4_tf2_delta.py │ ├── ch4_tf2_delta_alternative.py │ ├── ch4_tf2_digit_classif.py │ ├── ch4_tf2_image_classif.py │ ├── ch4_tf2_rules.py │ ├── ch4_tf_delta.py │ ├── ch4_tf_image_classif.py │ ├── ch4_tf_logreg.py │ └── readme.md ├── Chapter_5 │ ├── .DS_Store │ ├── __pycache__ │ │ ├── ch5_tf2_confabulator_word.cpython-38.pyc │ │ ├── ch5_tf2_digit_classif.cpython-311.pyc │ │ ├── ch5_tf2_digit_classif.cpython-36.pyc │ │ ├── ch5_tf2_digit_classif.cpython-38.pyc │ │ ├── ch5_tf2_image_classif.cpython-311.pyc │ │ ├── ch5_tf2_image_classif.cpython-36.pyc │ │ ├── ch5_tf2_image_classif.cpython-38.pyc │ │ └── process_faces.cpython-38.pyc │ ├── beatles.txt │ ├── ch5_expon_function.py │ ├── ch5_figs.py │ ├── ch5_linear_mappings.py │ ├── ch5_tf2_confabulator.py │ ├── ch5_tf2_confabulator_word.py │ ├── ch5_tf2_confabulator_word_2.py │ ├── ch5_tf2_confabulator_word_3.py │ ├── ch5_tf2_digit_classif.py │ ├── ch5_tf2_digit_classif_conv.py │ ├── ch5_tf2_digit_encoder.py │ ├── ch5_tf2_digit_rnn.py │ ├── ch5_tf2_face_classif_conv.py │ ├── ch5_tf2_image_classif.py │ ├── ch5_tf2_image_classif_conv.py │ ├── ch5_tf2_rain_rnn.py │ ├── ch5_tf2_rules.py │ ├── ch5_tf2_sharedunits.py │ ├── ch5_tf_backprop.py │ ├── ch5_tf_image_classif.py │ ├── ch5_tf_image_classif_2layer.py │ ├── ch5_tf_image_classif_3layer.py │ ├── ch5_tf_image_classif_conv.py │ ├── models_beatles │ │ ├── log1.pkl │ │ ├── log2.pkl │ │ ├── log3.pkl │ │ ├── log4.pkl │ │ ├── model_beat1.keras │ │ ├── model_beat2.keras │ │ ├── model_beat3.keras │ │ ├── model_beat4.keras │ │ ├── texts1.pkl │ │ ├── texts2.pkl │ │ ├── texts3.pkl │ │ └── texts4.pkl │ ├── process_faces.py │ └── readme.md ├── Chapter_6 │ ├── .DS_Store │ ├── __pycache__ │ │ ├── ch6_estimation.cpython-37.pyc │ │ ├── ch6_generation.cpython-37.pyc │ │ └── ch6_likelihood.cpython-37.pyc │ ├── ch6_curve.py │ ├── ch6_estimation.py │ ├── ch6_figs.py │ ├── ch6_generation.py │ ├── ch6_likelihood.py │ ├── ch6_recovery.py │ ├── ch6_recovery_ab.py │ ├── ch6_test_estimator.py │ ├── readme.md │ ├── simulation_data.csv │ ├── simulation_results_1_CG_bayes_small.npy │ ├── simulation_results_1_L-BFGS-B_bayes_small.npy │ ├── simulation_results_5_CG_bayes_small.npy │ └── simulation_results_5_Powell_bayes_small.npy ├── Chapter_7 │ ├── .DS_Store │ ├── ch7_chi2.py │ ├── ch7_figs.py │ ├── ch7_model_comparison.py │ └── readme.md ├── Chapter_8 │ ├── .DS_Store │ ├── README.md │ ├── ch8_RL_bandit.py │ ├── ch8_figs.py │ ├── ch8_tf2_lunar.py │ ├── ch8_tf2_lunar_2.py │ ├── ch8_tf2_mountaincar.py │ ├── ch8_tf2_mountaincar_cont.py │ ├── ch8_tf2_pole_1.py │ ├── ch8_tf2_pole_2.py │ ├── ch8_tf2_rl_bandit.py │ ├── ch8_tf2_rl_bandit_2.py │ ├── ch8_tf2_taxi.py │ ├── ch8_tf2_taxi_2.py │ ├── ch8_tf2_taxi_3.py │ └── extra │ │ ├── .DS_Store │ │ ├── guessing_game_env.py │ │ ├── im │ │ └── .DS_Store │ │ ├── models │ │ ├── model_cartpole │ │ ├── model_cartpole.h5 │ │ ├── model_lunar │ │ ├── model_lunar.h5 │ │ ├── model_mountaincar.h5 │ │ ├── model_taxi.h5 │ │ └── model_taxi_dqn.h5 │ │ ├── save_to_movie.py │ │ └── warmup_guessgame.py ├── Chapter_9 │ ├── .DS_Store │ ├── README.md │ ├── ch9_RL_frozen_lake.py │ ├── ch9_RL_mountaincar.py │ ├── ch9_RL_mountaincar_cont.py │ ├── ch9_RL_taxi.py │ ├── ch9_RL_taxi_2.py │ ├── ch9_figs.py │ ├── ch9_gridworld_optimal.py │ ├── ch9_gridworld_optimal_2.py │ ├── ch9_gridworld_optimal_slippery.py │ ├── ch9_lineworld_Q.py │ ├── ch9_lineworld_Q_optimal.py │ ├── ch9_lineworld_V.py │ ├── ch9_lineworld_V_optimal.py │ ├── ch9_plotting.py │ ├── ch9_ringworld_Q_optimal.py │ ├── im │ │ ├── .DS_Store │ │ ├── img001.png │ │ ├── img002.png │ │ ├── img003.png │ │ ├── img004.png │ │ ├── img005.png │ │ ├── img006.png │ │ ├── img007.png │ │ ├── img008.png │ │ ├── img009.png │ │ ├── img010.png │ │ ├── img011.png │ │ ├── img012.png │ │ ├── img013.png │ │ ├── img014.png │ │ ├── img015.png │ │ ├── img016.png │ │ ├── img017.png │ │ ├── img018.png │ │ ├── img019.png │ │ ├── img020.png │ │ ├── img021.png │ │ ├── img022.png │ │ ├── img023.png │ │ ├── img024.png │ │ ├── img025.png │ │ └── taxi_video.mp4 │ └── save_to_movie.py ├── readme.md └── untitled0.py └── installation ├── modelling_mac.yml ├── modelling_windows.yml └── readme.md /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CogComNeuroSci/modeling-master/0225598be33593c62ec9e6d488000ffa538d760e/.DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction to modeling cognitive processes 2 | 3 | [![Lines](https://img.shields.io/tokei/lines/github/CogComNeuroSci/modeling-master?style=plastic?color=yellowgreen)](https://img.shields.io/tokei/lines/github/CogComNeuroSci/modeling-master?style=plastic?color=yellowgreen) 4 | [![Commits](https://img.shields.io/github/last-commit/CogComNeuroSci/modeling-master?style=plastic)](https://img.shields.io/github/last-commit/CogComNeuroSci/modeling-master?style=plastic) 5 | [![Contributors](https://img.shields.io/github/contributors/CogComNeuroSci/modeling-master?style=plastic)](https://img.shields.io/github/contributors/CogComNeuroSci/modeling-master?style=plastic) 6 | 7 | 8 | ## Overview 9 | 10 | This folder contains the code attached to the MCP handbook. The book is written by Tom Verguts; he thanks Esther De Loof, Mehdi Senoussi, and Pieter Huycke for lots of coding inspiration. 11 | 12 | ## Organization 13 | 14 | The folders are organized according to chapter. The chapters line up with the MCP handbook. 15 | 16 | 17 | ## Programming environment 18 | 19 | We rely heavily on Python 3 incorporated in the Anaconda environment. The most recent scripts use TensorFlow 2 (TF2); I highly recommend TF2, rather than the earlier TF1. Several modules are required to run the scripts, but the most important softwares are: 20 | 21 | - Python 3 (*v. 3.6) 22 | - Anaconda 3 (*v. 4.10.3) 23 | - TensorFlow 2 (*v. 2.4) 24 | 25 | Note that we always work with Anaconda via virtual environments. 26 | Our preferred editor is Spyder. You can use our own TensorFlow 2 environment by creating a new environment with the modelling_mac.yml or modelling_windows.yml files (in folder installation). 27 | 28 | ## Contact 29 | 30 | Tom Verguts 31 | * [mail](mailto:Tom.Verguts@UGent.be) 32 | * [web entry](https://www.cogcomneurosci.com/about/#principal-investigator) 33 | 34 | [Lab website]: https://cogcomneurosci.com/ 35 | 36 | **Last edit: 1-09-2022** 37 | -------------------------------------------------------------------------------- /code_by_chapter/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CogComNeuroSci/modeling-master/0225598be33593c62ec9e6d488000ffa538d760e/code_by_chapter/.DS_Store -------------------------------------------------------------------------------- /code_by_chapter/Chapter_1/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CogComNeuroSci/modeling-master/0225598be33593c62ec9e6d488000ffa538d760e/code_by_chapter/Chapter_1/.DS_Store -------------------------------------------------------------------------------- /code_by_chapter/Chapter_1/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Chapter 1: What is cognitive modeling? 3 | 4 | Code for (very) basic function optimisation and visualisation. 5 | The script ch1_tf2_simple_min.py demonstrates how to optimize a function using TensorFlow. 6 | -------------------------------------------------------------------------------- /code_by_chapter/Chapter_1/ch1_figs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Tue Jul 3 13:49:33 2018 5 | 6 | @author: tom verguts 7 | create figs (and table) of chapters 1 8 | """ 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | 13 | 14 | # illustrate the algorithm in a plot 15 | x = np.linspace(start = -1, stop = 3, num = 20) 16 | xvals = [2.7, 2.02, 1.61, 1.37] 17 | y = (x - 1)**2 18 | 19 | plt.plot(x,y, color = "black") 20 | plt.ylabel("$y = (x-1)^2$") 21 | bottom, top = plt.ylim() 22 | plt.scatter(xvals, [bottom]*4, color = "black", s = 80) 23 | plt.scatter(xvals, (np.array(xvals)-1)**2, color = "black", s = 80) 24 | plt.ylim((bottom, top)) 25 | 26 | # now run the actual algorithm 27 | np.set_printoptions(precision = 3, suppress = True) 28 | 29 | def y(x): # the function we aim to optimize 30 | return (x-1)**2 31 | 32 | def y_der(x): # the derivative of the function we aim to optimize 33 | return 2*(x-1) 34 | 35 | n_steps = 100 36 | x_start = 2.7 # random starting point 37 | alpha = 0.1 # step size scaling parameter 38 | data = np.zeros((n_steps,4)) 39 | data[0,0] = x_start 40 | for step in range(n_steps): 41 | data[step,1] = y(data[step,0]) 42 | data[step,2] = y_der(data[step,0]) 43 | data[step,3] = -alpha*data[step,2] 44 | if step