├── notebooks ├── figs │ ├── CRP.png │ ├── MSSR.png │ ├── SAM.png │ ├── SPC.png │ ├── TCM.png │ ├── accumulators.png │ ├── SampleRecoverBiz.png │ └── Hawkins_Heathcote_TRDM.png ├── random_data.pickle ├── sam_bestfits_LTP105.pickle.gz ├── tcm_bestfits_LTP105.pickle.gz ├── contest_data_03.csv ├── contest_data_02.csv ├── ci_within.py ├── contest_data_01.csv ├── nu_wfpt.py ├── flanker_s1.csv ├── waldrace.py ├── dists.py ├── banal.py ├── btcm.py ├── bsam.py └── 01_Introduction.ipynb ├── syllabus ├── syllabus.pdf └── syllabus.md ├── .gitignore ├── README.md └── assignments ├── A01_Data_Read_Plot.ipynb ├── dists.py ├── A03_BEST_decisions.ipynb ├── P01_Confidence.ipynb ├── P02_Policy_Change.ipynb └── A02_Plot_Distributions.ipynb /notebooks/figs/CRP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/CRP.png -------------------------------------------------------------------------------- /notebooks/figs/MSSR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/MSSR.png -------------------------------------------------------------------------------- /notebooks/figs/SAM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/SAM.png -------------------------------------------------------------------------------- /notebooks/figs/SPC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/SPC.png -------------------------------------------------------------------------------- /notebooks/figs/TCM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/TCM.png -------------------------------------------------------------------------------- /syllabus/syllabus.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/syllabus/syllabus.pdf -------------------------------------------------------------------------------- /notebooks/random_data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/random_data.pickle -------------------------------------------------------------------------------- /notebooks/figs/accumulators.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/accumulators.png -------------------------------------------------------------------------------- /notebooks/figs/SampleRecoverBiz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/SampleRecoverBiz.png -------------------------------------------------------------------------------- /notebooks/sam_bestfits_LTP105.pickle.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/sam_bestfits_LTP105.pickle.gz -------------------------------------------------------------------------------- /notebooks/tcm_bestfits_LTP105.pickle.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/tcm_bestfits_LTP105.pickle.gz -------------------------------------------------------------------------------- /notebooks/figs/Hawkins_Heathcote_TRDM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/compmem/QuantCog/HEAD/notebooks/figs/Hawkins_Heathcote_TRDM.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # other 107 | *~ 108 | -------------------------------------------------------------------------------- /notebooks/contest_data_03.csv: -------------------------------------------------------------------------------- 1 | ,correct,rt 2 | 0,1,0.377 3 | 1,1,0.393 4 | 2,0,0.3 5 | 3,1,0.46900000000000003 6 | 4,1,0.256 7 | 5,0,0.232 8 | 6,1,0.428 9 | 7,1,0.468 10 | 8,1,0.457 11 | 9,1,0.34700000000000003 12 | 10,1,0.557 13 | 11,0,0.462 14 | 12,1,0.333 15 | 13,1,0.47500000000000003 16 | 14,1,0.309 17 | 15,1,0.435 18 | 16,1,0.28400000000000003 19 | 17,1,0.494 20 | 18,1,0.276 21 | 19,1,0.461 22 | 20,1,0.263 23 | 21,1,0.441 24 | 22,1,0.381 25 | 23,0,0.29 26 | 24,1,0.3 27 | 25,0,0.854 28 | 26,1,0.495 29 | 27,1,0.387 30 | 28,1,0.516 31 | 29,1,0.546 32 | 30,1,0.485 33 | 31,1,0.233 34 | 32,0,0.38 35 | 33,0,0.338 36 | 34,0,0.293 37 | 35,0,0.301 38 | 36,1,0.514 39 | 37,0,0.34500000000000003 40 | 38,1,0.532 41 | 39,1,0.323 42 | 40,1,0.5640000000000001 43 | 41,1,0.327 44 | 42,1,0.717 45 | 43,0,0.375 46 | 44,0,0.584 47 | 45,0,0.549 48 | 46,1,0.386 49 | 47,1,0.449 50 | 48,1,0.47200000000000003 51 | 49,1,0.911 52 | 50,1,0.498 53 | 51,0,0.454 54 | 52,1,0.342 55 | 53,0,0.84 56 | 54,0,0.333 57 | 55,1,0.558 58 | 56,0,0.339 59 | 57,1,0.481 60 | 58,1,0.398 61 | 59,1,0.5760000000000001 62 | 60,0,0.422 63 | 61,0,0.42 64 | 62,0,0.41300000000000003 65 | 63,1,0.47000000000000003 66 | 64,0,0.46 67 | 65,0,0.518 68 | 66,1,0.468 69 | 67,1,0.28700000000000003 70 | 68,1,0.292 71 | 69,1,0.325 72 | 70,0,0.769 73 | 71,1,0.28700000000000003 74 | 72,1,0.432 75 | 73,1,0.389 76 | 74,1,0.35200000000000004 77 | 75,1,0.53 78 | 76,0,0.309 79 | 77,0,0.33 80 | 78,1,0.501 81 | 79,0,0.5720000000000001 82 | 80,0,0.5700000000000001 83 | 81,1,0.243 84 | 82,1,0.332 85 | 83,0,0.668 86 | 84,1,0.634 87 | 85,0,0.461 88 | 86,0,0.339 89 | 87,1,0.41000000000000003 90 | 88,0,0.371 91 | 89,1,0.34500000000000003 92 | 90,0,0.387 93 | 91,1,0.448 94 | 92,1,0.546 95 | 93,1,0.455 96 | 94,1,0.28700000000000003 97 | 95,1,0.396 98 | 96,1,0.296 99 | 97,1,0.504 100 | 98,1,0.604 101 | 99,0,0.51 102 | 100,0,0.355 103 | 101,1,0.587 104 | 102,0,0.444 105 | 103,1,0.6920000000000001 106 | 104,1,0.41600000000000004 107 | 105,0,0.647 108 | 106,0,0.388 109 | 107,0,0.746 110 | 108,1,0.33 111 | 109,0,0.464 112 | 110,1,0.421 113 | 111,1,0.976 114 | 112,0,0.47000000000000003 115 | 113,0,0.47600000000000003 116 | 114,0,0.31 117 | 115,1,0.302 118 | 116,1,0.652 119 | 117,1,0.362 120 | 118,1,0.291 121 | 119,0,0.309 122 | 120,1,0.318 123 | 121,0,0.278 124 | 122,1,0.334 125 | 123,1,0.645 126 | 124,0,0.294 127 | 125,1,0.313 128 | 126,1,0.544 129 | 127,0,0.867 130 | 128,1,0.332 131 | 129,1,0.364 132 | 130,1,0.299 133 | 131,0,0.263 134 | 132,0,0.47200000000000003 135 | 133,1,0.47000000000000003 136 | 134,1,0.541 137 | 135,1,0.364 138 | 136,1,0.216 139 | 137,1,0.468 140 | 138,0,0.679 141 | 139,1,0.316 142 | 140,1,0.32 143 | 141,1,0.49 144 | 142,1,0.391 145 | 143,1,0.302 146 | 144,1,0.23800000000000002 147 | 145,1,0.609 148 | 146,1,0.397 149 | 147,1,0.34900000000000003 150 | 148,0,0.58 151 | 149,1,0.293 152 | -------------------------------------------------------------------------------- /notebooks/contest_data_02.csv: -------------------------------------------------------------------------------- 1 | ,correct,rt 2 | 0,1.0,2.026 3 | 1,0.0,1.112 4 | 2,0.0,0.55 5 | 3,1.0,0.512 6 | 4,1.0,0.569 7 | 5,1.0,0.47 8 | 6,1.0,0.5589999999999999 9 | 7,1.0,0.45899999999999996 10 | 8,1.0,0.454 11 | 9,1.0,0.34299999999999997 12 | 10,1.0,0.644 13 | 11,1.0,0.431 14 | 12,0.0,1.065 15 | 13,1.0,0.46599999999999997 16 | 14,1.0,1.2389999999999999 17 | 15,1.0,0.89 18 | 16,1.0,0.496 19 | 17,0.0,1.03 20 | 18,1.0,0.73 21 | 19,1.0,0.787 22 | 20,1.0,1.618 23 | 21,1.0,0.751 24 | 22,1.0,0.618 25 | 23,1.0,2.529 26 | 24,1.0,1.738 27 | 25,1.0,0.595 28 | 26,1.0,0.533 29 | 27,0.0,0.8210000000000001 30 | 28,0.0,1.577 31 | 29,1.0,0.396 32 | 30,1.0,0.404 33 | 31,1.0,0.7290000000000001 34 | 32,1.0,0.46199999999999997 35 | 33,1.0,0.751 36 | 34,1.0,0.716 37 | 35,1.0,0.706 38 | 36,0.0,0.66 39 | 37,1.0,0.495 40 | 38,1.0,0.527 41 | 39,1.0,1.301 42 | 40,1.0,1.568 43 | 41,1.0,1.989 44 | 42,1.0,1.189 45 | 43,1.0,0.395 46 | 44,1.0,0.441 47 | 45,1.0,0.673 48 | 46,1.0,0.528 49 | 47,1.0,0.354 50 | 48,1.0,0.434 51 | 49,1.0,0.896 52 | 50,1.0,0.5489999999999999 53 | 51,1.0,0.46699999999999997 54 | 52,1.0,0.323 55 | 53,1.0,1.95 56 | 54,0.0,0.392 57 | 55,1.0,0.453 58 | 56,1.0,1.5030000000000001 59 | 57,1.0,0.791 60 | 58,1.0,0.313 61 | 59,1.0,0.474 62 | 60,1.0,0.704 63 | 61,0.0,0.5940000000000001 64 | 62,1.0,0.373 65 | 63,1.0,0.53 66 | 64,1.0,0.835 67 | 65,1.0,0.738 68 | 66,1.0,2.48 69 | 67,1.0,0.56 70 | 68,1.0,1.816 71 | 69,1.0,1.482 72 | 70,1.0,0.84 73 | 71,0.0,1.1320000000000001 74 | 72,1.0,0.737 75 | 73,1.0,1.207 76 | 74,1.0,0.376 77 | 75,1.0,1.455 78 | 76,1.0,0.448 79 | 77,1.0,1.621 80 | 78,1.0,1.0710000000000002 81 | 79,0.0,0.439 82 | 80,1.0,0.957 83 | 81,1.0,0.875 84 | 82,1.0,0.833 85 | 83,1.0,1.6440000000000001 86 | 84,1.0,0.5529999999999999 87 | 85,1.0,0.41800000000000004 88 | 86,1.0,0.446 89 | 87,1.0,0.397 90 | 88,0.0,1.335 91 | 89,0.0,0.869 92 | 90,1.0,0.548 93 | 91,1.0,0.5589999999999999 94 | 92,1.0,0.5 95 | 93,1.0,1.2269999999999999 96 | 94,1.0,0.918 97 | 95,1.0,0.637 98 | 96,1.0,1.089 99 | 97,1.0,0.483 100 | 98,1.0,1.472 101 | 99,1.0,0.7230000000000001 102 | 100,1.0,0.526 103 | 101,0.0,0.954 104 | 102,0.0,0.859 105 | 103,1.0,0.666 106 | 104,1.0,0.5589999999999999 107 | 105,1.0,1.241 108 | 106,1.0,0.91 109 | 107,1.0,0.396 110 | 108,1.0,0.525 111 | 109,0.0,0.457 112 | 110,1.0,0.862 113 | 111,1.0,0.771 114 | 112,1.0,0.384 115 | 113,1.0,0.5960000000000001 116 | 114,1.0,0.787 117 | 115,1.0,0.481 118 | 116,1.0,0.46199999999999997 119 | 117,1.0,0.496 120 | 118,0.0,0.937 121 | 119,1.0,0.83 122 | 120,1.0,0.43 123 | 121,1.0,0.936 124 | 122,1.0,0.5740000000000001 125 | 123,1.0,1.041 126 | 124,1.0,0.671 127 | 125,1.0,0.534 128 | 126,0.0,0.8180000000000001 129 | 127,0.0,0.774 130 | 128,1.0,0.797 131 | 129,0.0,0.8220000000000001 132 | 130,1.0,1.131 133 | 131,1.0,1.195 134 | 132,1.0,0.337 135 | 133,1.0,1.404 136 | 134,1.0,0.7090000000000001 137 | 135,0.0,0.9420000000000001 138 | 136,1.0,0.401 139 | 137,1.0,0.663 140 | 138,1.0,1.1280000000000001 141 | 139,0.0,1.017 142 | 140,1.0,1.804 143 | 141,1.0,1.2510000000000001 144 | 142,1.0,0.363 145 | 143,1.0,0.33899999999999997 146 | 144,1.0,1.021 147 | 145,1.0,0.849 148 | 146,1.0,0.403 149 | 147,1.0,0.494 150 | 148,0.0,1.04 151 | 149,0.0,0.881 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QuantCog: Introduction to Quantified Cognition 2 | 3 | This repository contains the content for the course, an *Introduction to Quantified Cognition*. 4 | 5 | You can find the syllabus [here](syllabus/syllabus.pdf). 6 | 7 | The [notebooks](notebooks) folder contains Jypyter notebooks for each lesson. 8 | 9 | 10 | ## Annotated Bibliography 11 | 12 | The following are papers and other sources relevant for the class: 13 | 14 | - Wagenmakers, Eric-Jan (2007). A practical solution to the pervasive problems of *p* values. *Psychonomic Bulletin & Review*, 14(5), 779--804. 15 | [PDF](https://www.ejwagenmakers.com/2007/pValueProblems.pdf) 16 | - Covers the issues with standard frequentist approaches for calculating p-values and presents an argument for Bayesian approaches and, in particular, Bayes factor. 17 | 18 | - Kruschke, John K. (2013). Bayesian Estimation Supersedes the *t* Test. *Journal of Experimental Psychology: General*, 142(2), 573--603. 19 | [PDF](https://jkkweb.sitehost.iu.edu/articles/Kruschke2013JEPG.pdf) 20 | - Excellent paper explaining and justifying a Bayesian replacement to the standard *t*-test. 21 | 22 | - Gelman, Andrew (2006). Prior distributions for variance parameters in hierarchical models. *Bayesian Analysis*, 1(3), 515--533. 23 | [PDF](http://www.stat.columbia.edu/~gelman/research/published/taumain.pdf) 24 | - Long story short, use half Cauchy priors for variance parameters. 25 | 26 | - Forstmann, B. U., Tittgemeyer, M., Wagenmakers, E. J., Derrfuss, J., Imperati, D., & Brown, S. (2011). The speed-accuracy tradeoff in the elderly brain: a structural model-based approach. *Journal of Neuroscience*, 31(47), 17242-17249. 27 | [PDF](https://www.jneurosci.org/content/jneuro/31/47/17242.full.pdf) 28 | - Paper applying LBA to the speed--accuracy tradeoff. 29 | 30 | - Brown, S.D., & Heathcote, A. J. (2008). The simplest complete model of choice 31 | response time: Linear ballistic accumulation. *Cognitive Psychology*, 57, 153-178. 32 | [PDF](http://www.tascl.org/uploads/4/9/3/3/49339445/38_.pdf) 33 | - The first presentation of the LBA model. 34 | 35 | 36 | - Navarro, D. J., & Fuss, I. G. (2009). Fast and accurate calculations for first-passage times in Wiener diffusion models. *Journal of Mathematical Psychology*, 53(4), 222-230. 37 | [PDF](https://compcogscisydney.org/publications/NavarroFuss2009.pdf) 38 | - Paper illustrating fast and accurate estimation of WFPT models. 39 | 40 | - Hawkins, G., & Heathcote, A. (2021). Racing against the clock: 41 | Evidence-based vs. time-based decisions. *Psychological Review*. 42 | [PDF](http://www.tascl.org/uploads/4/9/3/3/49339445/hawkins__in_press__racing_against_the_clock.pdf) 43 | - Independent accumulator model with a timer. 44 | 45 | - McClelland, J. L., (2009). The place of modeling in cognitive science. *Topics in Cognitive Science*, 1, 11-38. 46 | [HTML](https://onlinelibrary.wiley.com/doi/full/10.1111/j.1756-8765.2008.01003.x) 47 | 48 | - Heathcote A, Brown SD, & Wagenmakers E-J, "An Introduction to Good Practices in Cognitive Modeling", in *An Introduction to Model-Based Cognitive Neuroscience*, Springer, E-J Wagenmakers (ed), 1-14. [PDF](http://www.ejwagenmakers.com/inpress/HeathcoteModelingIntro.pdf) 49 | - An excellent introduction providing some rules to follow when 50 | developing and presenting cognitive models. 51 | -------------------------------------------------------------------------------- /notebooks/ci_within.py: -------------------------------------------------------------------------------- 1 | # Author Denis A. Engemann 2 | # Adjustments: Josef Perktold, Per Sederberg 3 | # 4 | # License: BSD (3-clause) 5 | 6 | import numpy as np 7 | from scipy import stats 8 | import pandas as pd 9 | 10 | def ci_within(df, indexvar, withinvars, measvar, confint=0.95, 11 | copy=True): 12 | """ Compute CI / SEM correction factor 13 | Morey 2008, Cousinaueu 2005, Loftus & Masson, 1994 14 | Also see R-cookbook http://goo.gl/QdwJl 15 | Note. This functions helps to generate appropriate confidence 16 | intervals for repeated measure designs. 17 | Standard confidence intervals are are computed on normalized data 18 | and a correction factor is applied that prevents insanely small values. 19 | df : instance of pandas.DataFrame 20 | The data frame objetct. 21 | indexvar : str 22 | The column name of of the identifier variable that representing 23 | subjects or repeated measures 24 | withinvars : str | list of str 25 | The column names of the categorial data identifying random effects 26 | measvar : str 27 | The column name of the response measure 28 | confint : float 29 | The confidence interval 30 | copy : bool 31 | Whether to copy the data frame or not. 32 | """ 33 | if copy: 34 | df = df.copy() 35 | 36 | # Apply Cousinaueu's method: 37 | # compute grand mean 38 | mean_ = df[measvar].mean() 39 | 40 | # compute subject means 41 | subj_means = df.groupby(indexvar)[measvar].mean().values 42 | for subj, smean_ in zip(df[indexvar].unique(), subj_means): 43 | # center 44 | #df[measvar][df[indexvar] == subj] -= smean_ 45 | df.loc[df[indexvar] == subj, measvar] -= smean_ 46 | # add grand average 47 | #df[measvar][df[indexvar] == subj] += mean_ 48 | df.loc[df[indexvar] == subj, measvar] += mean_ 49 | 50 | def sem(x): 51 | return x.std() / np.sqrt(len(x)) 52 | 53 | def ci(x): 54 | se = sem(x) 55 | return se * stats.t.interval(confint, len(x - 1))[1] 56 | 57 | aggfuncs = [np.mean, np.std, sem, ci, len] 58 | out = df.groupby(withinvars)[measvar].agg(aggfuncs) 59 | 60 | # compute & apply correction factor 61 | n_within = np.prod([len(df[k].unique()) for k in withinvars], 62 | dtype= df[measvar].dtype) 63 | cf = np.sqrt(n_within / (n_within - 1.)) 64 | for k in ['sem', 'std', 'ci']: 65 | out[k] *= cf 66 | 67 | out['ci'] = stats.t.isf((1 - confint) / 2., out['len'] - 1) * out['sem'] 68 | 69 | return out 70 | 71 | 72 | if __name__ == '__main__': 73 | ss = ''' 74 | subject condition value 75 | 1 pretest 59.4 76 | 2 pretest 46.4 77 | 3 pretest 46.0 78 | 4 pretest 49.0 79 | 5 pretest 32.5 80 | 6 pretest 45.2 81 | 7 pretest 60.3 82 | 8 pretest 54.3 83 | 9 pretest 45.4 84 | 10 pretest 38.9 85 | 1 posttest 64.5 86 | 2 posttest 52.4 87 | 3 posttest 49.7 88 | 4 posttest 48.7 89 | 5 posttest 37.4 90 | 6 posttest 49.5 91 | 7 posttest 59.9 92 | 8 posttest 54.1 93 | 9 posttest 49.6 94 | 10 posttest 48.5''' 95 | 96 | import StringIO 97 | df = pd.read_fwf(StringIO.StringIO(ss), widths=[8, 10, 6], header=1) 98 | res = ci_within(df2, 'subject', ['condition'], 'value', confint=0.95) 99 | print(res) 100 | print(res[['len', 'mean', 'std', 'sem', 'ci']]) 101 | 102 | #ci is different from R 103 | #http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_%28ggplot2%29/#error-bars-for-within-subjects-variables 104 | 105 | #dfwc <- summarySEwithin(dfw.long, measurevar="value", withinvars="condition", 106 | # idvar="subject", na.rm=FALSE, conf.interval=.95) 107 | # condition N value value_norm sd se ci 108 | # posttest 10 51.43 51.43 2.262361 0.7154214 1.618396 109 | # pretest 10 47.74 47.74 2.262361 0.7154214 1.618396 110 | -------------------------------------------------------------------------------- /notebooks/contest_data_01.csv: -------------------------------------------------------------------------------- 1 | ,correct,rt 2 | 0,1,0.7403689886598981 3 | 1,1,0.5420620240470722 4 | 2,1,0.7920669951631241 5 | 3,1,0.6634637410958621 6 | 4,1,0.8505671655112819 7 | 5,0,0.5773787423699848 8 | 6,1,0.9662563395399772 9 | 7,1,0.8072235104802077 10 | 8,1,0.6075839494801442 11 | 9,1,0.7626596857014163 12 | 10,1,0.8567580345645254 13 | 11,1,0.9525034072391487 14 | 12,1,0.7988238247615658 15 | 13,1,0.6462006605576252 16 | 14,1,0.6606115619167059 17 | 15,1,0.5176552224584284 18 | 16,0,0.7545973105192412 19 | 17,0,0.6510446639609065 20 | 18,1,0.5572526205036954 21 | 19,1,0.754977035247637 22 | 20,1,0.6649644512387606 23 | 21,1,0.7119582761756922 24 | 22,1,1.0212463410680024 25 | 23,1,0.5590441294316087 26 | 24,0,0.9104705018803902 27 | 25,0,1.0010121209337448 28 | 26,1,0.6580326847147322 29 | 27,0,0.8203860770909099 30 | 28,0,0.8816283791321764 31 | 29,1,1.4437510438511925 32 | 30,1,0.7020752056729063 33 | 31,1,1.2329318458471874 34 | 32,1,0.5097827290613521 35 | 33,1,0.793414857244459 36 | 34,0,0.7086499666525468 37 | 35,1,0.6580044192698973 38 | 36,0,0.6649216986063358 39 | 37,1,1.1215970884106794 40 | 38,0,0.7208864437174689 41 | 39,0,0.6893626904903285 42 | 40,1,0.7028915574962906 43 | 41,1,0.634919984163379 44 | 42,0,0.6402729169808913 45 | 43,1,0.5526396556693165 46 | 44,1,0.5286105880076508 47 | 45,1,0.6162991213120224 48 | 46,0,0.556561203056962 49 | 47,0,0.6363992290984102 50 | 48,1,0.6259105217667609 51 | 49,1,0.5262150036929305 52 | 50,0,0.7909943960547763 53 | 51,1,0.8860835849381861 54 | 52,1,0.6832401817395111 55 | 53,0,1.1940885130066858 56 | 54,1,0.7537650620824814 57 | 55,1,0.7157993957000368 58 | 56,1,0.5300803364214539 59 | 57,1,0.920758662098236 60 | 58,1,0.5235185139749494 61 | 59,0,0.692413033159857 62 | 60,0,0.7506829052130946 63 | 61,0,0.6577026593522869 64 | 62,1,1.2284362865728404 65 | 63,1,0.731326913222967 66 | 64,1,0.7613490112492673 67 | 65,0,0.6229361344927047 68 | 66,0,2.1164972723745534 69 | 67,1,0.6525540482869738 70 | 68,0,1.313843750852544 71 | 69,1,0.8425800864511153 72 | 70,1,0.6189550813575233 73 | 71,1,0.6698749638918212 74 | 72,1,0.7113163118509971 75 | 73,0,1.2552350333283513 76 | 74,0,0.5739411922235582 77 | 75,1,0.6657322731970771 78 | 76,1,0.6350163619204148 79 | 77,1,0.7688581546879185 80 | 78,1,0.8524135903576986 81 | 79,1,0.6750860388763 82 | 80,1,0.5750371577487079 83 | 81,1,0.6149474750616295 84 | 82,1,0.6404792271166977 85 | 83,1,1.2321488442941415 86 | 84,1,0.604046103259698 87 | 85,1,0.6815841126325379 88 | 86,1,0.6238002622833596 89 | 87,0,0.8228301657452979 90 | 88,0,0.6607820642671419 91 | 89,1,0.722626902880459 92 | 90,0,0.9492479210359928 93 | 91,1,1.0187609828199038 94 | 92,0,0.8227613828989588 95 | 93,0,0.7157055358229176 96 | 94,1,0.6391805295443256 97 | 95,1,0.6381918701530496 98 | 96,0,0.7029132324593874 99 | 97,1,0.8355738733005227 100 | 98,1,0.5622709795237717 101 | 99,1,0.6958664925704969 102 | 100,1,0.5501857558995287 103 | 101,1,0.7364501963684147 104 | 102,1,0.7686475845663012 105 | 103,1,0.6161074710348697 106 | 104,1,1.0735754773498183 107 | 105,0,0.6432834104684501 108 | 106,1,0.830168281846823 109 | 107,1,0.6091383026219743 110 | 108,1,0.6323727578899863 111 | 109,1,0.5300138560592667 112 | 110,1,0.645442036296011 113 | 111,1,0.6772662129476197 114 | 112,1,0.8544131053539179 115 | 113,1,0.6546099688994362 116 | 114,1,0.49476367355275647 117 | 115,1,0.5568949102884491 118 | 116,1,0.7825536107241189 119 | 117,1,0.7122685120009671 120 | 118,0,1.0486537041810746 121 | 119,1,1.3554618477177518 122 | 120,0,0.8021710488401907 123 | 121,1,0.7845185414735603 124 | 122,1,0.6497090773862223 125 | 123,1,1.6859468333188932 126 | 124,1,0.8034807692173813 127 | 125,0,1.4257910483368772 128 | 126,1,0.8380461555699177 129 | 127,1,0.74953174623105 130 | 128,1,0.6424690398828443 131 | 129,1,0.8354770829177813 132 | 130,1,0.7002807173114156 133 | 131,0,0.8056648479886339 134 | 132,1,0.6453690818371758 135 | 133,1,0.6674181802373754 136 | 134,1,0.5901849147399748 137 | 135,1,0.6090689532477466 138 | 136,0,0.6119767062878413 139 | 137,1,0.5194826974395124 140 | 138,1,0.6660102846823512 141 | 139,1,0.5749353503251355 142 | 140,0,0.7055124661940666 143 | 141,0,0.7322243761286833 144 | 142,1,1.0721590340514429 145 | 143,1,0.5896160075485377 146 | 144,1,0.6996101831742875 147 | 145,0,0.6929136891750218 148 | 146,1,0.7081030735899805 149 | 147,1,1.1585093760237954 150 | 148,1,0.7527176496454613 151 | 149,1,0.5492475580225664 152 | -------------------------------------------------------------------------------- /assignments/A01_Data_Read_Plot.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Assignment 1: Reading Data and Plotting\n", 8 | "## Quantified Cognition\n", 9 | "### Psychology 5332\n", 10 | "\n", 11 | "By: Per B. Sederberg, PhD\n" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "# Objectives\n", 19 | "\n", 20 | "Upon completion of this assignment, the student will demonstrate the ability to:\n", 21 | "\n", 22 | "1. Use a Jupyter Notebook\n", 23 | "\n", 24 | "2. Load some common modules\n", 25 | "\n", 26 | "3. Read in data from a CSV file with Pandas\n", 27 | "\n", 28 | "4. Perform some simiple analyses of the data\n", 29 | "\n", 30 | "5. Plot some simple visualizations of the data\n" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "# Assignment\n", 38 | "\n", 39 | "- Work through the Software Carpentry introduction here: https://swcarpentry.github.io/python-novice-inflammation/. Be sure to open a new Jupyter Notebook and type in \n", 40 | " the examples as you work through the different sections. If you are relatively new to Python or feeling rusty, I strongly recommend going through all sections. \n", 41 | "\n", 42 | "- After you have worked through Software Carpentry tutorial, you will perform this assignment by writing code in *this notebook* (***after making a copy and renaming it to have your userid in the title --- e.g., A01_Data_Read_Plot_mst3k***).\n", 43 | "\n", 44 | "- In addition to this notebook, you will need to download the data from the same directory on GitHub. The file is decision_data.csv.\n", 45 | "\n", 46 | "- ***When you are done, save this notebook as HTML (`File -> Download as -> HTML`) and upload it to the matching assignment on UVACollab.***\n", 47 | "\n", 48 | "## HINTS\n", 49 | "\n", 50 | "- Be sure to comment your code\n", 51 | "- I have provided cells with general instructions for what they should contain.\n", 52 | " " 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "# Name: (your name here)\n", 60 | "# User ID: (your userid here)" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "# Load in necessary modules\n", 70 | " " 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 1, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "# Read in the data from the decision_data.csv file to a Pandas DataFrame\n", 80 | "# (This is data from a perceptual decision-making task, BTW)\n", 81 | "\n" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 3, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "# Use the .head method of the DataFrame to show the first few lines of the file\n", 91 | "# The columns are:\n", 92 | "# correct: whether they made a correct response (1=Correct, 0=Incorrect)\n", 93 | "# rt: their reaction time in seconds\n", 94 | "# cond: whether they were in a speeded, accuracy, or neutral condition\n", 95 | "# log_rt: log-transformed reaction time\n" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "# Calculate the accuracy (proportion correct) for each condition\n", 105 | "# (HINT: loop over conditions and select the rows corresponding to the current\n", 106 | "# condition, then determine the proportion of correct responses for those rows)\n" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 4, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# Calculate the mean reaction time for each condition\n" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "# Plot a histogram of the Speed condition reaction times\n", 125 | "\n", 126 | "# On the same figure, plot a histogram of the Accuracy condition reaction times\n", 127 | "\n", 128 | "# (HINT: See histogram example in the first lesson.)\n" 129 | ] 130 | } 131 | ], 132 | "metadata": { 133 | "kernelspec": { 134 | "display_name": "Python 3 (ipykernel)", 135 | "language": "python", 136 | "name": "python3" 137 | }, 138 | "language_info": { 139 | "codemirror_mode": { 140 | "name": "ipython", 141 | "version": 3 142 | }, 143 | "file_extension": ".py", 144 | "mimetype": "text/x-python", 145 | "name": "python", 146 | "nbconvert_exporter": "python", 147 | "pygments_lexer": "ipython3", 148 | "version": "3.9.7" 149 | } 150 | }, 151 | "nbformat": 4, 152 | "nbformat_minor": 4 153 | } 154 | -------------------------------------------------------------------------------- /syllabus/syllabus.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'Introduction to Quantified Cognition' 3 | subtitle: 'Psychology 5332 --- Fall 2022' 4 | author: Per B. Sederberg, PhD 5 | documentclass: scrartcl 6 | date: Version 2022-09-08 7 | header-includes: 8 | - \usepackage{array,hyperref} 9 | - \usepackage[letterpaper, margin=1in]{geometry} 10 | - \usepackage{libertine} 11 | - \usepackage[libertine]{newtxmath} 12 | --- 13 | 14 | 15 | # Quick Reference 16 | 17 | Credit: 18 | : 3 units 19 | 20 | Time: 21 | : Thursday, 14:00 -- 16:30 22 | 23 | Place: 24 | : Gilmer 245 25 | 26 | Text: 27 | : Assigned readings 28 | 29 | Course Web Page: 30 | : Discord / [GitHub](https://github.com/compmem/QuantCog) 31 | 32 | Course assistants: 33 | : Adam Fenton (af5fn@virginia.edu) 34 | 35 | Instructor: 36 | : Dr. Per Sederberg 37 | 38 | Office: 39 | : Gilmer 412 40 | 41 | E-mail: 42 | : pbs5u@virginia.edu 43 | 44 | Lab Website: 45 | : [Computational Memory Lab](https://compmem.org) 46 | 47 | Office hours: 48 | : Mondays, 2:30--3pm and Wednesdays, 2--2:30pm 49 | 50 | Final: 51 | : Project-based 52 | 53 | 54 | 55 | # Overview and Course Objectives 56 | 57 | Much of science, and especially psychology and neuroscience, involves testing and updating verbal theories, which are often imprecise and under-specified. Only by quantitative modeling of our experimental results can we hope to make significant progress in understanding the mechanisms that underlie cognition. Furthermore, only by quantifying and defining knowledge via mathematical principles can we achieve the effective interdisciplinary communication necessary for combining approaches to make useful progress towards understanding a system as complex as the brain. This course aims to equip students with the skills to a) think more deeply about the mechanisms underlying observed neural and behavioral phenomena, improving scientific thinking, and b) develop computational models that enable more precise, quantifiably testable hypotheses, improving the scientific process. 58 | 59 | Topics covered in the course will include: computer programming, probability theory, Bayesian statistics, computational modeling, model comparison, neuroscience methods, neural networks, and open science. Taught at a high level, yet with practical hands-on examples for every topic, this course will provide the foundation necessary to understand, develop, and compare mechanistic models of cognitive processes. 60 | 61 | # Computing Requirements 62 | 63 | This is a computational class and all work will be performed on a computer and almost entirely with the Python programming language within Jupyter notebooks. You will need to bring a laptop running Windows, OSX, or Linux to every class. 64 | 65 | There are some online tools that provide free hosting and execution for Jupyter notebooks (e.g., [Google Collaboratory](https://colab.research.google.com/)). These hosting services limit the computational resources, so you may want to run [Jupyter](https://jupyter.org) notebooks directly on your computer. This will also allow you to incorporate these approaches into your own research more easily. Thus, my recommendation is that you install and use the [Anaconda Python](https://www.anaconda.com/download/) distribution for your OS. 66 | 67 | We will spend time on the first day of class to ensure everyone has a functioning computer that will be able to run everything necessary for the course either via Google Collaboratory or locally. 68 | 69 | 70 | # Assistance 71 | 72 | I am eager for you to get as much as possible from this course, so please feel free to come to me with any questions you have. That said, science is a team effort and in order to reduce duplication of questions and discussions, we will be using Discord for all class communication and discussions. Please do not email me unless there is an issue with Discord. If you'd prefer to have a one-on-one discussion it is possible to send direct messages in Discord. We will spend some time on the first day ensuring everyone is set up to use Discord. I will also have weekly office hours to which you are always welcome to come and have in-person discussions. 73 | 74 | 75 | # Schedule 76 | 77 | The following is the general order of the topics covered in the course. Please note that sometimes we may cover multiple topics in a single lecture, or spend more than one lecture on a single topic, and this list is subject to modification at any time. That said, all major changes will also include an update to the syllabus, so it will remain a point of reference. 78 | 79 | 0. Overview 80 | 1. Programming and Principles of Open Science 81 | 2. Probability 82 | 3. Data as a Random Variable 83 | 4. Quantifying Uncertainty 84 | 5. Bayesian Regression 85 | 6. Bayesian Data Analysis 86 | 7. Cognitive Process Models of Decision-making 87 | 8. Interactive Model Exploration 88 | 9. Bayesian Fits of Process Models 89 | 10. Inference with Process Models 90 | 11. Models of Memory 91 | 12. Models of Reinforcement Learning 92 | 13. Bring Your Own Data (BYOD) Project 93 | 94 | 95 | # Readings 96 | 97 | There is no textbook for this course, however, there will often be relevant readings that accompany each lesson. PDF versions of these will be shared along with the Jupyter notebooks and lectures for each class. 98 | 99 | 100 | # Evaluation 101 | 102 | This is a graduate-level course, which means that much of the burden of staying motivated to learn is transferred to the student. As such, there will not be any in-class exams. Students will be evaluated on the basis of: 103 | 104 | - Lesson exercises / class participation (30 pts) 105 | - Mini projects (30 pts) 106 | - Final project (40 pts) 107 | 108 | for a total of 100 points. 109 | 110 | The course will be graded using the standard grading scale with your percentage of points earned out of the total possible points rounding to the nearest whole percentage point. 111 | 112 | -------------------------------------------------------------------------------- /notebooks/nu_wfpt.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import numpy as np 4 | import scipy.stats.distributions as dists 5 | import numba 6 | from numba import jit 7 | 8 | def wfpt_like(choices, rts, v_mean, a, w_mode, w_std=0.0, 9 | v_std=0.0, t0=0.0, nsamp=5000, err=.0001, 10 | dt=0.001, max_time=5.0): 11 | """ 12 | Calculate WFPT likelihoods for choices and rts 13 | 14 | 15 | """ 16 | # fill likes 17 | likes = np.zeros(len(choices)) 18 | 19 | # process the v_mean and w_mode 20 | if w_std > 0.0: 21 | # calc with beta distribution 22 | mu = w_mode 23 | sigma = w_std 24 | kappa = mu * (1 - mu) / sigma**2 - 1 25 | alpha = mu * kappa 26 | beta = (1 - mu) * kappa 27 | 28 | if alpha <= 0.0 or beta <= 0.0: 29 | # illegal param 30 | return likes 31 | 32 | # sample from the beta distribution 33 | w = dists.beta(alpha, beta).rvs(nsamp) 34 | else: 35 | w = w_mode 36 | 37 | # proc the v 38 | if v_std > 0.0: 39 | v = dists.norm(v_mean, v_std).rvs(nsamp)[np.newaxis] 40 | else: 41 | v = v_mean 42 | 43 | # loop over the two choices 44 | # first choice 1, no change in v or w 45 | ind = np.where(choices == 1)[0] 46 | 47 | # loop over rts, setting likes for that choice 48 | if len(ind) > 0: 49 | #likes[ind] = np.array([wfpt(rts[i]-t0, v=v, a=a, w=w, err=err) 50 | # for i in ind]) 51 | likes[ind] = wfpt_loop(rts[ind]-t0, v=v, a=a, w=w, err=err) 52 | #for i in ind: 53 | # # calc the like, adjusting rt with t0 54 | # likes[i] = wfpt(rts[i]-t0, v=v, a=a, w=w, err=err) 55 | 56 | # then choice 2 with flip of v and w 57 | v = -v 58 | w = 1-w 59 | ind = np.where(choices == 2)[0] 60 | 61 | # loop over rts, setting likes for that choice 62 | if len(ind) > 0: 63 | #likes[ind] = np.array([wfpt(rts[i]-t0, v=v, a=a, w=w, err=err) 64 | # for i in ind]) 65 | likes[ind] = wfpt_loop(rts[ind]-t0, v=v, a=a, w=w, err=err) 66 | 67 | #for i in ind: 68 | # # calc the like, adjusting rt with t0 69 | # likes[i] = wfpt(rts[i]-t0, v=v, a=a, w=w, err=err) 70 | 71 | # finally the non-responses (the v and w can be either direction) 72 | ind = np.where(choices == 0)[0] 73 | if len(ind) > 0: 74 | likes[ind] = wfpt_gen(v_mean=v_mean, a=a, w_mode=w_mode, 75 | w_std=w_std, v_std=v_std, 76 | wfpt_nsamp=nsamp, t0=t0, 77 | dt=dt, max_time=max_time, 78 | err=err, only_prob_no_resp=True) 79 | 80 | return likes 81 | 82 | @jit(nopython=True) 83 | def wfpt_loop(rts, v, a, w, err=0.0001): 84 | return np.array([wfpt(t, v=v, a=a, w=w, err=err) 85 | for t in rts]) 86 | 87 | @jit(nopython=True) 88 | def wfpt(t, v, a, w, err=.0001): 89 | """ 90 | Wiener First Passage of Time 91 | 92 | Params 93 | ------ 94 | t : reaction time 95 | v : drift rate 96 | a : boundary 97 | w : starting point 98 | err : algorithm tolearance 99 | 100 | Returns 101 | ------- 102 | p : likelihood for the specified time and params 103 | 104 | Reference 105 | --------- 106 | 107 | https://compcogscisydney.org/publications/NavarroFuss2009.pdf 108 | 109 | """ 110 | # this function is ported from R, hence the comments 111 | # if(t>0){ 112 | if t <= 0.0: 113 | return 0.0 114 | 115 | # make w and v 2d 116 | w = np.atleast_2d(np.asarray(w)) 117 | v = np.atleast_2d(np.asarray(v)) 118 | 119 | # tt=t/(a^2) 120 | tt = t/(a**2) 121 | # if(pi*tt*err<1){ 122 | if (np.pi*tt*err) < 1.0: 123 | # kl=sqrt(-2*log(pi*tt*err)/(pi^2*tt)) 124 | kl = np.sqrt(-2.*np.log(np.pi*tt*err)/(np.pi**2*tt)) 125 | # kl=max(kl,1/(pi*sqrt(tt))) 126 | kl = max(kl, 1/(np.pi * np.sqrt(tt))) 127 | # } else { 128 | else: 129 | # kl=1/(pi*sqrt(tt)) 130 | kl = 1 / (np.pi * np.sqrt(tt)) 131 | # } 132 | # if(2*sqrt(2*pi*tt)*err<1){ 133 | if (2*np.sqrt(2.*np.pi*tt)*err) < 1.0: 134 | # ks=2+sqrt(-2*tt*log(2*sqrt(2*pi*tt)*err)) 135 | ks = 2 + np.sqrt(-2. * tt * np.log(2. * np.sqrt(2*np.pi*tt)*err)) 136 | # ks=max(ks,sqrt(tt)+1) 137 | ks = max(ks, np.sqrt(tt) + 1) 138 | # } else { 139 | else: 140 | # ks=2 141 | ks = 2.0 142 | # } 143 | # p=0 144 | p = 0.0 145 | # if(ks np.random.rand()).argmax() 210 | for i in range(nsamp)] 211 | 212 | return choices[inds], rts[inds]+t0 213 | 214 | -------------------------------------------------------------------------------- /notebooks/flanker_s1.csv: -------------------------------------------------------------------------------- 1 | ,cond,correct,rt,trial 2 | 1,-,True,0.9115760326385498,1 3 | 2,-,True,0.5126774311065674,2 4 | 3,-,True,0.5843076705932617,3 5 | 4,+,True,0.4612798690795898,4 6 | 7,+,True,0.6971240043640137,5 7 | 8,+,True,0.3981466293334961,6 8 | 9,-,True,0.7144472599029541,7 9 | 10,+,True,0.390709400177002,8 10 | 12,-,True,0.5698244571685791,9 11 | 13,-,True,0.5795142650604248,10 12 | 14,-,True,0.3563413619995117,11 13 | 15,+,True,0.3592934608459473,12 14 | 16,+,True,0.3460385799407959,13 15 | 17,+,True,0.3811142444610596,14 16 | 20,+,True,0.4252820014953613,15 17 | 21,-,True,0.5418009757995605,16 18 | 22,+,True,0.42812633514404297,17 19 | 24,-,True,0.45196056365966797,18 20 | 25,+,True,0.3789825439453125,19 21 | 26,-,True,0.3845417499542236,20 22 | 28,+,True,0.3867197036743164,21 23 | 29,+,True,0.4536299705505371,22 24 | 31,-,True,0.4848754405975342,23 25 | 33,-,True,0.4933738708496094,24 26 | 35,-,True,0.4644656181335449,25 27 | 36,-,True,0.5044693946838378,26 28 | 38,+,True,0.4120559692382813,27 29 | 41,+,True,0.3065025806427002,28 30 | 42,-,False,0.3204519748687744,29 31 | 43,-,True,0.4855959415435791,30 32 | 44,+,True,0.3864028453826904,31 33 | 47,+,True,0.3883540630340576,32 34 | 48,+,True,0.3629059791564941,33 35 | 49,+,True,0.35448384284973145,34 36 | 50,+,False,0.3439924716949463,35 37 | 52,-,True,0.569127082824707,36 38 | 53,+,True,0.3721165657043457,37 39 | 54,-,True,0.5703160762786865,38 40 | 56,-,False,0.18010807037353516,39 41 | 57,+,True,1.0219299793243408,40 42 | 58,-,True,0.6253151893615722,41 43 | 60,+,True,0.410128116607666,42 44 | 61,-,True,0.44535231590271,43 45 | 62,+,True,0.3740298748016357,44 46 | 63,-,True,0.4718422889709473,45 47 | 66,+,True,0.3427133560180664,46 48 | 67,-,True,0.4228656291961669,47 49 | 68,+,True,0.4137744903564453,48 50 | 69,+,False,0.2345542907714844,49 51 | 74,-,True,0.4245104789733887,50 52 | 76,-,True,0.3852269649505615,51 53 | 77,+,True,0.3454964160919189,52 54 | 78,+,True,0.2823350429534912,53 55 | 80,-,True,0.3826303482055664,54 56 | 81,+,True,0.3921265602111816,55 57 | 82,-,True,0.3963630199432373,56 58 | 86,+,True,0.4378089904785156,57 59 | 87,-,True,0.44590115547180176,58 60 | 89,-,True,0.4157116413116455,59 61 | 90,-,True,0.5500514507293701,60 62 | 91,-,True,0.4362936019897461,61 63 | 92,+,True,0.35184693336486816,62 64 | 93,-,True,0.5263602733612059,63 65 | 94,+,True,0.3029153347015381,64 66 | 96,+,True,1.5301799774169922,65 67 | 98,+,True,0.39968085289001465,66 68 | 99,+,True,0.4066493511199951,67 69 | 100,+,True,0.3269674777984619,68 70 | 104,-,False,0.4857025146484375,69 71 | 106,+,True,0.4401476383209229,70 72 | 109,-,True,0.4774024486541748,71 73 | 110,+,True,0.4011976718902588,72 74 | 112,+,True,0.3549649715423584,73 75 | 113,+,True,0.3723564147949219,74 76 | 115,-,True,0.4395184516906738,75 77 | 116,-,True,0.3876631259918213,76 78 | 117,-,True,0.4918899536132813,77 79 | 119,+,True,0.40105295181274414,78 80 | 121,+,True,0.4229738712310791,79 81 | 123,-,True,1.1748614311218262,80 82 | 124,-,True,0.4871964454650879,81 83 | 125,+,True,0.41228437423706055,82 84 | 126,-,True,0.5855889320373535,83 85 | 129,+,True,0.4357678890228272,84 86 | 130,-,True,0.6126015186309814,85 87 | 132,+,True,0.37445712089538574,86 88 | 133,-,True,0.5254459381103516,87 89 | 134,-,True,0.9857189655303956,88 90 | 135,-,True,0.5904889106750488,89 91 | 136,+,True,0.4330170154571533,90 92 | 137,-,True,0.5264213085174559,91 93 | 138,-,True,0.5275068283081055,92 94 | 139,-,True,0.4703524112701416,93 95 | 140,+,True,0.4220206737518311,94 96 | 141,-,True,0.476876974105835,95 97 | 142,+,True,0.4251422882080079,96 98 | 145,-,False,0.6810948848724365,97 99 | 146,+,False,0.24347591400146484,98 100 | 147,-,False,0.1915130615234375,99 101 | 148,+,True,0.2590925693511963,100 102 | 150,+,True,0.4661309719085693,101 103 | 152,-,False,0.17673206329345706,102 104 | 154,+,False,0.33499765396118164,103 105 | 155,+,True,0.32630205154418945,104 106 | 158,-,False,0.1840507984161377,105 107 | 159,-,False,0.32958340644836426,106 108 | 161,-,True,0.30792951583862305,107 109 | 164,+,True,0.25392889976501465,108 110 | 165,+,True,0.267991304397583,109 111 | 167,+,True,0.4525375366210938,110 112 | 168,+,True,0.4498729705810546,111 113 | 169,+,False,0.23016762733459475,112 114 | 170,-,False,0.23501396179199216,113 115 | 173,+,True,0.5491244792938232,114 116 | 174,-,False,0.4172935485839844,115 117 | 175,+,True,0.4154019355773925,116 118 | 176,-,True,0.547001838684082,117 119 | 177,-,True,0.43862152099609375,118 120 | 180,+,True,0.35133862495422363,119 121 | 181,-,True,0.5441994667053223,120 122 | 182,-,True,0.2530770301818848,121 123 | 183,-,False,0.39063596725463867,122 124 | 185,+,False,0.2737736701965332,123 125 | 186,+,True,0.7832450866699219,124 126 | 187,-,False,0.3422873020172119,125 127 | 188,-,True,0.17482590675354004,126 128 | 189,-,True,0.27208781242370605,127 129 | 191,+,True,0.3442208766937256,128 130 | 192,+,True,0.4403531551361084,129 131 | 194,-,False,0.2884039878845215,130 132 | 195,+,False,0.5041818618774414,131 133 | 196,-,True,1.5041122436523438,132 134 | 197,+,False,0.2529122829437256,133 135 | 199,+,True,0.554379940032959,134 136 | 200,+,False,0.2426929473876953,135 137 | 203,+,False,0.43697452545166016,136 138 | 205,-,False,0.9421389102935792,137 139 | 207,+,False,0.2275686264038085,138 140 | 210,-,True,0.33147454261779785,139 141 | 211,-,False,0.20042943954467773,140 142 | 212,-,False,0.2631936073303223,141 143 | 215,+,True,0.5302526950836182,142 144 | 216,+,True,0.3178434371948243,143 145 | 217,-,True,0.5081243515014648,144 146 | 218,-,True,0.4801976680755615,145 147 | 219,-,True,0.7250888347625731,146 148 | 220,-,True,0.3680286407470703,147 149 | 223,+,True,0.4787464141845703,148 150 | 224,-,False,0.4276418685913085,149 151 | 225,+,True,0.7205216884613037,150 152 | 226,+,True,0.3888981342315673,151 153 | 227,-,True,0.5176606178283691,152 154 | 228,+,True,0.44089603424072266,153 155 | 230,+,True,0.36641287803649897,154 156 | 231,-,True,0.5472681522369385,155 157 | 232,+,True,0.44290757179260254,156 158 | 235,-,True,0.5582334995269777,157 159 | 236,-,True,0.5291450023651123,158 160 | 238,+,True,1.7495467662811282,159 161 | 239,-,True,0.3862595558166504,160 162 | 242,-,False,0.3152415752410889,161 163 | 243,-,False,0.4952914714813232,162 164 | 244,-,True,0.549074649810791,163 165 | 246,-,True,0.19676661491394049,164 166 | 247,-,False,0.2703747749328613,165 167 | 249,+,True,0.4145069122314453,166 168 | 251,-,False,0.734363317489624,167 169 | 255,+,True,0.44073057174682617,168 170 | 256,+,True,0.5050375461578369,169 171 | 257,+,True,0.4945425987243652,170 172 | 258,-,True,0.8648681640625,171 173 | 259,+,True,0.5564439296722412,172 174 | 260,+,True,0.3980531692504882,173 175 | 261,+,True,0.39580392837524414,174 176 | 264,-,True,0.5603125095367432,175 177 | 265,-,True,0.4026720523834229,176 178 | 266,-,True,0.35220813751220703,177 179 | 267,+,True,0.2203242778778076,178 180 | 269,+,True,0.4287271499633789,179 181 | 270,-,False,0.5620155334472656,180 182 | 272,+,True,0.3481199741363525,181 183 | 273,-,True,0.6021831035614014,182 184 | 275,-,True,0.5044109821319579,183 185 | 276,+,True,0.551020622253418,184 186 | 277,+,True,0.3553736209869385,185 187 | 278,-,True,0.4748656749725342,186 188 | 280,+,True,0.7197198867797852,187 189 | 283,+,True,0.31537437438964844,188 190 | 284,-,True,0.4955270290374756,189 191 | 285,+,True,0.36916351318359375,190 192 | -------------------------------------------------------------------------------- /notebooks/waldrace.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 4 | # 5 | # See the COPYING file distributed along with the CogMod package for the 6 | # copyright and license terms. 7 | # 8 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 9 | 10 | import numpy as np 11 | from scipy.stats.distributions import invgauss, norm 12 | 13 | 14 | def trdm_like(choice, t, v, alpha, theta, sig, rho=0, timer=True): 15 | """Timed Racing Diffusion Model (TRDM) 16 | 17 | rho - Scaler between 0 and 1 for the tradeoff between the timer 18 | giving rise to chance decisions (when rho=0) and the timer giving 19 | rise to the decision based on the ratio of the CDF for the choice 20 | to the CDF of all choices (when rho=1), approximating the 21 | proportion of evidence for that choice. 22 | 23 | """ 24 | d_choice, d_timer = _trdm_density(choice, t, v, 25 | alpha, theta, sig, 26 | rho=rho, timer=timer) 27 | return d_choice + d_timer 28 | 29 | def _trdm_density(choice, t, v, alpha, theta, sig, rho=0, timer=True): 30 | # make sure params are arrays 31 | choice = np.atleast_1d(choice) 32 | t = np.atleast_1d(t) 33 | v = np.atleast_1d(v) 34 | if not isinstance(v, np.floating): 35 | v = v.astype(np.float64) 36 | 37 | # set n_choice based on the drift rates 38 | # (last drift rate is for timer) 39 | n_race = len(v) 40 | if timer: 41 | n_choice = n_race-1 42 | else: 43 | n_choice = n_race 44 | 45 | # if only supplying same vals for other params 46 | # replicate them 47 | alpha = np.atleast_1d(alpha) 48 | if len(alpha) < n_race: 49 | alpha = alpha.repeat(n_race) 50 | 51 | theta = np.atleast_1d(theta) 52 | if len(theta) < n_race: 53 | theta = theta.repeat(n_race) 54 | 55 | sig = np.atleast_1d(sig) 56 | if len(sig) < n_race: 57 | sig = sig.repeat(n_race) 58 | 59 | # make the choice values zero-based 60 | choice = choice-1 61 | 62 | # pick the unique race indices 63 | uniq_choice = np.arange(n_race) 64 | 65 | # initialize density values to zeros 66 | d_choice = np.zeros(len(t)) 67 | d_timer = np.zeros(len(t)) 68 | 69 | # fix the drift rates as needed 70 | bad_ind = (v 0), axis=1) 89 | if ind.sum() > 0: 90 | # calc p not sel for all choices 91 | # putting everything in choice, not timer 92 | d_choice[ind] = np.product(not_sel[ind], axis=1) 93 | 94 | # loop over each choice 95 | for j in range(n_choice): 96 | # pick the trials with that choice 97 | ind = (choice==j) & (x[:, j] > 0) 98 | 99 | if ind.sum() == 0: 100 | # there weren't any trials with that choice, so skip it 101 | continue 102 | 103 | # process the selected choice 104 | # calculate the probability of making that response 105 | # at the specified rt 106 | f_sel = invgauss(mu[j]/lamb[j]).pdf(x[ind][:, j]/lamb[j])*(1/lamb[j]) 107 | 108 | # get the p not selected for non-selected choices (includes timer) 109 | p_term = np.product(not_sel[ind][:, uniq_choice!=j], 110 | axis=1) 111 | 112 | # timer choice based on ratio of cdfs of choice accumulators (or chance) 113 | if timer and rho > 0: 114 | # mixture of chance and probability of accumulator being ahead 115 | # pick the times for the non-selected options 116 | nctimes = x[ind][:, uniq_choice!=j][:, :-1] 117 | 118 | # pick the times for the selected options 119 | ctimes = x[ind][:, uniq_choice==j] 120 | 121 | # calculate the differences in means at those times 122 | mu_diff = (v[uniq_choice!=j][:-1]*nctimes - 123 | v[j]*ctimes) 124 | 125 | # calc sum of variances (then take sqrt) at those times 126 | std_sum = np.sqrt((sig[uniq_choice!=j][:-1]*np.sqrt(nctimes))**2 + \ 127 | (sig[j]*np.sqrt(ctimes))**2) 128 | 129 | # CDF at 0 tells the probability of being ahead for each choice 130 | # Product is the probability of being ahead of all other choices 131 | p_ahead = np.product(norm(loc=mu_diff, scale=std_sum).cdf(0), axis=1) 132 | 133 | # combine with chance performance based on rho 134 | p_choice = rho*p_ahead + (1-rho) * 1./n_choice 135 | else: 136 | # pick by chance 137 | p_choice = 1./n_choice 138 | 139 | # calculate the density for that choice 140 | # 1) p(choice) * p(other choices not done) * p(timer not done) 141 | # 2) p(choice) * p(timer going off) * p(all choices not done) 142 | d_choice[ind] = (f_sel * p_term) 143 | d_timer[ind] = (p_choice * f_timer[ind] * np.product(not_sel[ind, :-1], axis=1)) 144 | return d_choice, d_timer 145 | 146 | 147 | def trdm_gen(v, alpha, theta, sig, rho=0, timer=True, 148 | dt=0.001, max_time=5.0, nsamp=1000): 149 | 150 | # get choice counts 151 | n_race = len(v) 152 | if timer: 153 | n_choice = n_race-1 154 | else: 155 | n_choice = n_race 156 | 157 | # generate time range 158 | trange = np.arange(dt, max_time+dt, dt) 159 | 160 | # calc cdf of each choice 161 | rts = np.concatenate([trange]*n_choice + [[-1.]]) 162 | ntimes = len(trange) 163 | choices = np.ones(len(rts), dtype=np.int) 164 | for i in range(1, n_choice): 165 | choices[i*ntimes:i*ntimes+ntimes] = i+1 166 | choices[-1] = 0 167 | timer_lookup = np.array([0]*(len(rts)-1) + [1]*(len(rts)-1) + [-1]) 168 | rts_lookup = np.concatenate([rts[:-1], rts[:-1], [-1]]) 169 | choices_lookup = np.concatenate([choices[:-1], choices[:-1], [0]]) 170 | 171 | # evaluate the likelihoods for choices and times 172 | d_choice, d_timer = _trdm_density(choices[:-1], rts[:-1], 173 | v, alpha, theta, sig, 174 | rho=rho, timer=timer) 175 | 176 | # stack the densities for choices or timer 177 | likes = np.concatenate([d_choice, d_timer]) 178 | 179 | # generate desired responses 180 | # calc cdfs 181 | cdfs = np.concatenate([(likes*dt).cumsum(), [1.0]]) 182 | 183 | # draw uniform rand numbers to determine choices and rts 184 | inds = [(cdfs > np.random.rand()).argmax() 185 | for i in range(nsamp)] 186 | 187 | return choices_lookup[inds], rts_lookup[inds], timer_lookup[inds] 188 | 189 | 190 | if __name__ == '__main__': 191 | t = np.array([.5, .6, .7, .8]) 192 | choice = np.array([2, 1, 1, 1]) 193 | v = np.array([5.5, 2.5, 0.5]) 194 | alpha = np.array([2.0]) 195 | theta = np.array([.2]) 196 | sig = np.array([1.0]) 197 | 198 | trdm_like(t, choice, v, alpha, theta, sig, rho=.5) 199 | -------------------------------------------------------------------------------- /assignments/dists.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 4 | # 5 | # See the COPYING file distributed along with the RunDEMC package for the 6 | # copyright and license terms. 7 | # 8 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 9 | 10 | import numpy as np 11 | import scipy.stats.distributions as dists 12 | from scipy.special import gammaln 13 | 14 | 15 | def logit(x): 16 | """Returns the logit transform of x.""" 17 | return np.log(x / (1. - x)) 18 | 19 | 20 | def invlogit(x): 21 | """Return the inverse logit transform of x.""" 22 | return 1. / (1. + np.exp(-x)) 23 | 24 | 25 | def log_factorial(x): 26 | """Returns the logarithm of x! 27 | Also accepts lists and NumPy arrays in place of x.""" 28 | return gammaln(np.array(x) + 1) 29 | 30 | 31 | def multinomial(xs, ps): 32 | """ 33 | Calculate multinomial probability. 34 | 35 | xs = vector of observations in each choice 36 | ps = probabilities of observing each choice 37 | """ 38 | xs, ps = np.array(xs), np.array(ps) 39 | n = xs.sum() 40 | result = log_factorial(n) - np.sum(log_factorial(xs)) + \ 41 | np.sum(xs * np.log(ps)) 42 | return np.exp(result) 43 | 44 | 45 | def normal(mean=0.0, std=1.0): 46 | return dists.norm(loc=mean, scale=std) 47 | 48 | 49 | import scipy.stats.distributions 50 | from scipy.stats._distn_infrastructure import argsreduce 51 | _norm_cdf = scipy.stats.distributions._continuous_distns._norm_cdf 52 | _norm_sf = scipy.stats.distributions._continuous_distns._norm_sf 53 | _norm_isf = scipy.stats.distributions._continuous_distns._norm_isf 54 | _norm_ppf = scipy.stats.distributions._continuous_distns._norm_ppf 55 | np = scipy.stats.distributions._continuous_distns.np 56 | log = np.log 57 | my_tn_gen = scipy.stats.distributions._continuous_distns.truncnorm_gen 58 | 59 | 60 | def _argcheck_fixed(self, a, b): 61 | self.a = a 62 | self.b = b 63 | self._nb = _norm_cdf(b) 64 | self._na = _norm_cdf(a) 65 | self._sb = _norm_sf(b) 66 | self._sa = _norm_sf(a) 67 | if np.ndim(self.a) == 0: 68 | if self.a > 0: 69 | self._delta = -(self._sb - self._sa) 70 | else: 71 | self._delta = self._nb - self._na 72 | else: 73 | self._delta = np.zeros_like(self._sa) 74 | self._delta[self.a > 0] = - \ 75 | (self._sb[self.a > 0] - self._sa[self.a > 0]) 76 | self._delta[self.a <= 0] = self._nb[self.a <= 0] - \ 77 | self._na[self.a <= 0] 78 | self._logdelta = log(self._delta) 79 | return np.all((a - b) != 0.0) 80 | 81 | 82 | def _ppf_fixed(self, q, a, b): 83 | if np.ndim(self.a) == 0: 84 | if self.a > 0: 85 | return _norm_isf(q * self._sb + self._sa * (1.0 - q)) 86 | else: 87 | return _norm_ppf(q * self._nb + self._na * (1.0 - q)) 88 | else: 89 | out = np.zeros_like(self._sa) 90 | ind = self.a > 0 91 | out[ind] = _norm_isf(q * self._sb[ind] + self._sa[ind] * (1.0 - q)) 92 | out[~ind] = _norm_ppf(q * self._nb[~ind] + self._na[~ind] * (1.0 - q)) 93 | return out 94 | 95 | 96 | def _pdf_fixed(self, x, *args, **kwds): 97 | """ 98 | Probability density function at x of the given RV. 99 | Parameters 100 | ---------- 101 | x : array_like 102 | quantiles 103 | arg1, arg2, arg3,... : array_like 104 | The shape parameter(s) for the distribution (see docstring of the 105 | instance object for more information) 106 | loc : array_like, optional 107 | location parameter (default=0) 108 | scale : array_like, optional 109 | scale parameter (default=1) 110 | Returns 111 | ------- 112 | pdf : ndarray 113 | Probability density function evaluated at x 114 | """ 115 | args, loc, scale = self._parse_args(*args, **kwds) 116 | x, loc, scale = list(map(np.asarray, (x, loc, scale))) 117 | args = tuple(map(np.asarray, args)) 118 | x = np.asarray((x - loc) * 1.0 / scale) 119 | cond0 = self._argcheck(*args) & (scale > 0) 120 | cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) 121 | cond = cond0 & cond1 122 | output = np.zeros(np.shape(cond), 'd') 123 | np.putmask(output, (1 - cond0) + np.isnan(x), self.badvalue) 124 | if any(cond.flatten()): 125 | goodargs = argsreduce(cond | ~cond, *((x,) + args + (scale,))) 126 | scale, goodargs = goodargs[-1], goodargs[:-1] 127 | ccond = cond.copy() 128 | ccond.shape = goodargs[0].shape 129 | output[cond] = (self._pdf(*goodargs) / scale)[ccond] 130 | #place(output, cond, self._pdf(*goodargs) / scale) 131 | if output.ndim == 0: 132 | return output[()] 133 | return output 134 | 135 | 136 | my_tn_gen._argcheck = _argcheck_fixed 137 | my_tn_gen._ppf = _ppf_fixed 138 | my_tn_gen.pdf = _pdf_fixed 139 | my_tn = my_tn_gen(name='truncnorm') 140 | 141 | 142 | def trunc_normal(mean=0.0, std=1.0, lower=0.0, upper=1.0): 143 | a = (np.array(lower) - np.array(mean)) / np.array(std) 144 | b = (np.array(upper) - np.array(mean)) / np.array(std) 145 | return my_tn(a, b, loc=mean, scale=std) 146 | 147 | 148 | def uniform(lower=0.0, upper=1.0): 149 | return dists.uniform(loc=lower, scale=upper - lower) 150 | 151 | 152 | def beta(alpha=.5, beta=.5): 153 | return dists.beta(alpha, beta) 154 | 155 | 156 | def gamma(alpha=1.0, beta=1.0): 157 | """ 158 | alpha = k 159 | beta = 1/theta 160 | """ 161 | return dists.gamma(alpha, scale=1. / beta) 162 | 163 | 164 | def invgamma(alpha=1.0, beta=1.0): 165 | """ 166 | """ 167 | return dists.invgamma(alpha, scale=beta) 168 | 169 | 170 | def exp(lam=1.0): 171 | return dists.expon(scale=1. / lam) 172 | 173 | 174 | def poisson(lam=1.0): 175 | return dists.poisson(mu=lam) 176 | 177 | 178 | def laplace(loc=0.0, diversity=1.0): 179 | return dists.laplace(loc=loc, scale=diversity) 180 | 181 | 182 | def students_t(mean=0, std=1.0, df=1.0): 183 | return dists.t(df=df, loc=mean, scale=std) 184 | 185 | 186 | def noncentral_t(mean=0, std=1.0, df=1.0, nc=0.0): 187 | return dists.nct(df=df, nc=nc, loc=mean, scale=std) 188 | 189 | 190 | def halfcauchy(scale=1.0, loc=0.0): 191 | return dists.halfcauchy(loc=loc, scale=scale) 192 | 193 | 194 | def epa_kernel(x, delta): 195 | """ 196 | Epanechnikov kernel. 197 | """ 198 | # make sure 1d 199 | x = np.atleast_1d(x) 200 | delta = np.atleast_1d(delta) 201 | 202 | # make sure we have matching deltas 203 | if len(delta) == 1: 204 | delta = delta.repeat(len(x)) 205 | 206 | # allocate for the weights 207 | w = np.zeros_like(x) 208 | 209 | # determine 210 | ind = (delta - np.abs(x)) > 0.0 # np.abs(x) 1.0) | 264 | (self._weights < 0.0)).sum(1) > 0 265 | pdfs[bad_ind] = 0.0 266 | 267 | return pdfs 268 | 269 | def rvs(self, size): 270 | # calc the rvs 271 | rv = np.array([d.rvs(size) 272 | for d in self._dist_list]) 273 | 274 | # handle the weights 275 | inds = np.array([np.random.choice(list(range(len(self._dist_list))), 276 | size=rv.shape[2:], 277 | p=self._weights[i]) 278 | for i in range(len(self._weights))]) 279 | 280 | return rv[inds, np.arange(len(inds))] 281 | -------------------------------------------------------------------------------- /notebooks/dists.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 4 | # 5 | # See the COPYING file distributed along with the RunDEMC package for the 6 | # copyright and license terms. 7 | # 8 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 9 | 10 | import numpy as np 11 | import scipy.stats.distributions as dists 12 | from scipy.special import gammaln 13 | 14 | 15 | def logit(x): 16 | """Returns the logit transform of x.""" 17 | return np.log(x / (1. - x)) 18 | 19 | 20 | def invlogit(x): 21 | """Return the inverse logit transform of x.""" 22 | return 1. / (1. + np.exp(-x)) 23 | 24 | 25 | def log_factorial(x): 26 | """Returns the logarithm of x! 27 | Also accepts lists and NumPy arrays in place of x.""" 28 | return gammaln(np.array(x) + 1) 29 | 30 | 31 | def multinomial(xs, ps): 32 | """ 33 | Calculate multinomial probability. 34 | 35 | xs = vector of observations in each choice 36 | ps = probabilities of observing each choice 37 | """ 38 | xs, ps = np.array(xs), np.array(ps) 39 | n = xs.sum() 40 | result = log_factorial(n) - np.sum(log_factorial(xs)) + \ 41 | np.sum(xs * np.log(ps)) 42 | return np.exp(result) 43 | 44 | 45 | def normal(mean=0.0, std=1.0): 46 | return dists.norm(loc=mean, scale=std) 47 | 48 | 49 | import scipy.stats.distributions 50 | from scipy.stats._distn_infrastructure import argsreduce 51 | _norm_cdf = scipy.stats.distributions._continuous_distns._norm_cdf 52 | _norm_sf = scipy.stats.distributions._continuous_distns._norm_sf 53 | _norm_isf = scipy.stats.distributions._continuous_distns._norm_isf 54 | _norm_ppf = scipy.stats.distributions._continuous_distns._norm_ppf 55 | np = scipy.stats.distributions._continuous_distns.np 56 | log = np.log 57 | my_tn_gen = scipy.stats.distributions._continuous_distns.truncnorm_gen 58 | 59 | 60 | def _argcheck_fixed(self, a, b): 61 | self.a = a 62 | self.b = b 63 | self._nb = _norm_cdf(b) 64 | self._na = _norm_cdf(a) 65 | self._sb = _norm_sf(b) 66 | self._sa = _norm_sf(a) 67 | if np.ndim(self.a) == 0: 68 | if self.a > 0: 69 | self._delta = -(self._sb - self._sa) 70 | else: 71 | self._delta = self._nb - self._na 72 | else: 73 | self._delta = np.zeros_like(self._sa) 74 | self._delta[self.a > 0] = - \ 75 | (self._sb[self.a > 0] - self._sa[self.a > 0]) 76 | self._delta[self.a <= 0] = self._nb[self.a <= 0] - \ 77 | self._na[self.a <= 0] 78 | self._logdelta = log(self._delta) 79 | return np.all((a - b) != 0.0) 80 | 81 | 82 | def _ppf_fixed(self, q, a, b): 83 | if np.ndim(self.a) == 0: 84 | if self.a > 0: 85 | return _norm_isf(q * self._sb + self._sa * (1.0 - q)) 86 | else: 87 | return _norm_ppf(q * self._nb + self._na * (1.0 - q)) 88 | else: 89 | out = np.zeros_like(self._sa) 90 | ind = self.a > 0 91 | out[ind] = _norm_isf(q * self._sb[ind] + self._sa[ind] * (1.0 - q)) 92 | out[~ind] = _norm_ppf(q * self._nb[~ind] + self._na[~ind] * (1.0 - q)) 93 | return out 94 | 95 | 96 | def _pdf_fixed(self, x, *args, **kwds): 97 | """ 98 | Probability density function at x of the given RV. 99 | Parameters 100 | ---------- 101 | x : array_like 102 | quantiles 103 | arg1, arg2, arg3,... : array_like 104 | The shape parameter(s) for the distribution (see docstring of the 105 | instance object for more information) 106 | loc : array_like, optional 107 | location parameter (default=0) 108 | scale : array_like, optional 109 | scale parameter (default=1) 110 | Returns 111 | ------- 112 | pdf : ndarray 113 | Probability density function evaluated at x 114 | """ 115 | args, loc, scale = self._parse_args(*args, **kwds) 116 | x, loc, scale = list(map(np.asarray, (x, loc, scale))) 117 | args = tuple(map(np.asarray, args)) 118 | x = np.asarray((x - loc) * 1.0 / scale) 119 | cond0 = self._argcheck(*args) & (scale > 0) 120 | cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) 121 | cond = cond0 & cond1 122 | output = np.zeros(np.shape(cond), 'd') 123 | np.putmask(output, (1 - cond0) + np.isnan(x), self.badvalue) 124 | if any(cond.flatten()): 125 | goodargs = argsreduce(cond | ~cond, *((x,) + args + (scale,))) 126 | scale, goodargs = goodargs[-1], goodargs[:-1] 127 | ccond = cond.copy() 128 | ccond.shape = goodargs[0].shape 129 | output[cond] = (self._pdf(*goodargs) / scale)[ccond] 130 | #place(output, cond, self._pdf(*goodargs) / scale) 131 | if output.ndim == 0: 132 | return output[()] 133 | return output 134 | 135 | 136 | my_tn_gen._argcheck = _argcheck_fixed 137 | my_tn_gen._ppf = _ppf_fixed 138 | my_tn_gen.pdf = _pdf_fixed 139 | my_tn = my_tn_gen(name='truncnorm') 140 | 141 | 142 | def trunc_normal(mean=0.0, std=1.0, lower=0.0, upper=1.0): 143 | a = (np.array(lower) - np.array(mean)) / np.array(std) 144 | b = (np.array(upper) - np.array(mean)) / np.array(std) 145 | return my_tn(a, b, loc=mean, scale=std) 146 | 147 | 148 | def uniform(lower=0.0, upper=1.0): 149 | return dists.uniform(loc=lower, scale=upper - lower) 150 | 151 | 152 | def beta(alpha=.5, beta=.5): 153 | return dists.beta(alpha, beta) 154 | 155 | 156 | def gamma(alpha=1.0, beta=1.0): 157 | """ 158 | alpha = k 159 | beta = 1/theta 160 | """ 161 | return dists.gamma(alpha, scale=1. / beta) 162 | 163 | 164 | def invgamma(alpha=1.0, beta=1.0): 165 | """ 166 | """ 167 | return dists.invgamma(alpha, scale=beta) 168 | 169 | 170 | def exp(lam=1.0): 171 | return dists.expon(scale=1. / lam) 172 | 173 | 174 | def poisson(lam=1.0): 175 | return dists.poisson(mu=lam) 176 | 177 | 178 | def laplace(loc=0.0, diversity=1.0): 179 | return dists.laplace(loc=loc, scale=diversity) 180 | 181 | 182 | def students_t(mean=0, std=1.0, df=1.0): 183 | return dists.t(df=df, loc=mean, scale=std) 184 | 185 | 186 | def noncentral_t(mean=0, std=1.0, df=1.0, nc=0.0): 187 | return dists.nct(df=df, nc=nc, loc=mean, scale=std) 188 | 189 | 190 | def halfcauchy(scale=1.0, loc=0.0): 191 | return dists.halfcauchy(loc=loc, scale=scale) 192 | 193 | 194 | def epa_kernel(x, delta): 195 | """ 196 | Epanechnikov kernel. 197 | """ 198 | # make sure 1d 199 | x = np.atleast_1d(x) 200 | delta = np.atleast_1d(delta) 201 | 202 | # make sure we have matching deltas 203 | if len(delta) == 1: 204 | delta = delta.repeat(len(x)) 205 | 206 | # allocate for the weights 207 | w = np.zeros_like(x) 208 | 209 | # determine 210 | ind = (delta - np.abs(x)) > 0.0 # np.abs(x) 1.0) | 264 | (self._weights < 0.0)).sum(1) > 0 265 | pdfs[bad_ind] = 0.0 266 | 267 | return pdfs 268 | 269 | def rvs(self, size): 270 | # calc the rvs 271 | rv = np.array([d.rvs(size) 272 | for d in self._dist_list]) 273 | 274 | # handle the weights 275 | inds = np.array([np.random.choice(list(range(len(self._dist_list))), 276 | size=rv.shape[2:], 277 | p=self._weights[i]) 278 | for i in range(len(self._weights))]) 279 | 280 | return rv[inds, np.arange(len(inds))] 281 | -------------------------------------------------------------------------------- /assignments/A03_BEST_decisions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Assignment 3: BEST Decisions\n", 8 | "## Quantified Cognition\n", 9 | "### Psychology 5332\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "# Name: *Your Name Here*\n", 17 | "# User ID: *Your ID Here*" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "# Objectives\n", 25 | "\n", 26 | "Upon completion of this assignment, the student will demonstrate the ability to:\n", 27 | "\n", 28 | "1. Build/Modify a RunDEMC model\n", 29 | "2. Fit the model to data\n", 30 | "3. Plot parameter posteriors\n", 31 | "4. Evaluate model fit results\n" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# Assignment\n", 39 | "\n", 40 | "- The goal of this assignment is to analyze the reaction time data from the perceptual decision-making task we examined in the first assignment. We want to know whether there are reliable differences in RTs (either means or standard deviations) between the speeded, accuracy, or normal conditions. \n", 41 | "\n", 42 | "- You will perform this assignment by writing code in *this notebook* (***after making a copy and renaming it to have your userid in the title --- e.g., A03_BEST_decisions_mst3k***).\n", 43 | "\n", 44 | "- In addition to this notebook, you will need to download the data from the same directory on GitHub. The file is decision_data.csv.\n", 45 | "\n", 46 | "- ***When you are done, save this notebook as HTML (`File -> Download as -> HTML`) and upload it to the matching assignment on UVACollab.***\n", 47 | "\n", 48 | "## HINTS\n", 49 | "\n", 50 | "- Be sure to comment your code\n", 51 | "- I have provided cells with general instructions for what they should contain.\n", 52 | " " 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# Load in necessary modules\n", 62 | "%matplotlib inline\n", 63 | "\n", 64 | "# import some useful libraries\n", 65 | "import numpy as np # numerical analysis linear algebra\n", 66 | "import pandas as pd # efficient tables\n", 67 | "import matplotlib.pyplot as plt # plotting\n", 68 | "from scipy import stats\n", 69 | "\n", 70 | "import arviz as az\n", 71 | "\n", 72 | "# load pieces from RunDEMC\n", 73 | "from RunDEMC import Model, Param, dists\n", 74 | "from RunDEMC.io import arviz_dict" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "# Read in the data from the decision_data.csv file to a Pandas DataFrame\n", 84 | "# (This is data from a perceptual decision-making task, BTW)\n", 85 | "df = pd.read_csv('decision_data.csv', index_col='Unnamed: 0')" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "# Use the .head method of the DataFrame to show the first few lines of the file\n", 95 | "# The columns are:\n", 96 | "# correct: whether they made a correct response (1=Correct, 0=Incorrect)\n", 97 | "# rt: their reaction time in seconds\n", 98 | "# cond: whether they were in a speeded, accuracy, or neutral condition\n", 99 | "# log_rt: log-transformed reaction time\n", 100 | "df.head(5)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "## Bayesian t-test with all three pairwise comparisons\n", 108 | "\n", 109 | "We want to test whether there are differences between the log reaction times (`log_rt` column in the data) for *correct* responses in each of the three conditions (`cond` column): Accuracy, Neutral, and Speed.\n", 110 | "\n", 111 | "I've pasted in the 2-class independent samples t-test from class. You will need to extend this to three classes. \n", 112 | "\n", 113 | "First, you'll need to extract the log RTs for each of the three conditions.\n", 114 | "\n", 115 | "Then, you'll need to modify the model code pasted below to fit to one more data (it's currently fitting to A and B dataset, but you need to add a C dataset.)\n", 116 | "\n", 117 | "Finally, you'll fit the model to the data and plot posteriors (especially of the differences in posteriors) to assess where there are differences in the means (mu) and standard deviations (sd) between pairs of conditions.\n" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "# Make three datasets from df\n", 127 | "# one for each condition \n", 128 | "# (like A and B from class, but for all three experimental conditions).\n", 129 | "\n" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "# Build a RunDEMC model to perform an independent samples Bayesian t-test\n", 139 | "# ***UPDATE THIS MODEL TO HANDLE THREE CONDITIONS***\n", 140 | "# Define a likelihood function\n", 141 | "def ind_students_like(pop, *args):\n", 142 | " # pull the dat out of the args\n", 143 | " datA = args[0]\n", 144 | " datB = args[1]\n", 145 | " \n", 146 | " # instantiate the model with the params, \n", 147 | " distA = dists.students_t(pop['mu_A'][:, np.newaxis], \n", 148 | " pop['sigma_A'][:, np.newaxis], \n", 149 | " pop['nu'][:, np.newaxis])\n", 150 | " distB = dists.students_t(pop['mu_B'][:, np.newaxis], \n", 151 | " pop['sigma_B'][:, np.newaxis], \n", 152 | " pop['nu'][:, np.newaxis])\n", 153 | " \n", 154 | " # calc the log like and sum across obs\n", 155 | " log_like = np.log(distA.pdf(datA)).sum(1)\n", 156 | " log_like += np.log(distB.pdf(datB)).sum(1)\n", 157 | " log_like[np.isnan(log_like)] = -np.inf\n", 158 | " \n", 159 | " # return the log like for each proposal\n", 160 | " return log_like\n", 161 | "\n", 162 | "# set up the model\n", 163 | "params = [Param(name='mu_A', prior=dists.normal(overall_mean, overall_std*2.0)),\n", 164 | " Param(name='mu_B', prior=dists.normal(overall_mean, overall_std*2.0)),\n", 165 | " Param(name='sigma_A', prior=dists.halfcauchy(5)),\n", 166 | " Param(name='sigma_B', prior=dists.halfcauchy(5)),\n", 167 | " Param(name='nu', prior=dists.exp(1/29.), \n", 168 | " transform=lambda x: x + 1,\n", 169 | " inv_transform=lambda x: x - 1)]\n", 170 | "\n", 171 | "# set up DE\n", 172 | "m = Model('t-test_ind', \n", 173 | " params=params,\n", 174 | " like_fun=ind_students_like,\n", 175 | " like_args=(A, B),\n", 176 | " verbose=True)\n", 177 | "\n", 178 | "# run the model, first with some burnin\n", 179 | "times = m.sample(100, burnin=True)\n", 180 | "times = m.sample(1600, burnin=False)" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "# convert the data to arviz\n" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": null, 195 | "metadata": {}, 196 | "outputs": [], 197 | "source": [ 198 | "# show an arviz summary\n" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": null, 204 | "metadata": {}, 205 | "outputs": [], 206 | "source": [ 207 | "# plot the traces\n" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [ 216 | "# calculate posterior differences\n" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": null, 222 | "metadata": {}, 223 | "outputs": [], 224 | "source": [ 225 | "# plot the posterior differences for all possible comparisons of interest\n" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "metadata": {}, 231 | "source": [ 232 | "## Evaluation\n", 233 | "\n", 234 | "***Where do we see reliable differences in mean or standard deviation between conditions?***" 235 | ] 236 | }, 237 | { 238 | "cell_type": "markdown", 239 | "metadata": {}, 240 | "source": [ 241 | "### Write your short answer here:\n" 242 | ] 243 | } 244 | ], 245 | "metadata": { 246 | "kernelspec": { 247 | "display_name": "Python 3 (ipykernel)", 248 | "language": "python", 249 | "name": "python3" 250 | }, 251 | "language_info": { 252 | "codemirror_mode": { 253 | "name": "ipython", 254 | "version": 3 255 | }, 256 | "file_extension": ".py", 257 | "mimetype": "text/x-python", 258 | "name": "python", 259 | "nbconvert_exporter": "python", 260 | "pygments_lexer": "ipython3", 261 | "version": "3.9.12" 262 | } 263 | }, 264 | "nbformat": 4, 265 | "nbformat_minor": 4 266 | } 267 | -------------------------------------------------------------------------------- /notebooks/banal.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import numpy as np 4 | from scipy.io import loadmat 5 | from scipy.spatial.distance import pdist, squareform 6 | from scipy.stats import rankdata 7 | 8 | def apply_to_zeros(lst, dtype=np.int64): 9 | """ 10 | Convert a list of arrays to a 2d array padded with zeros to the right. 11 | """ 12 | # determine the inner max length 13 | inner_max_len = max(map(len, lst)) 14 | 15 | # allocate the return array 16 | result = np.zeros([len(lst), inner_max_len], dtype) 17 | 18 | # loop over the list and fill the non-zero entries 19 | for i, row in enumerate(lst): 20 | # fill the row 21 | result[i,:len(row)] = row 22 | #for j, val in enumerate(row): 23 | # result[i][j] = val 24 | return result 25 | 26 | def combined_CI(dat): 27 | """ 28 | Calculate a 95% confidence interval across the rows of dat. 29 | """ 30 | mdat = dat.mean(0) 31 | edat = (dat-dat.mean(1)).std()/np.sqrt(len(dat)) 32 | return mdat,edat*1.96 33 | 34 | 35 | def spc(listlen=None, recalls=None, filter_ind=None, **kwargs): 36 | """ 37 | Calculate the serial position curve for a list of recall lists. 38 | """ 39 | if listlen is None or recalls is None: 40 | raise ValueError("You must specify both listlen and recalls.") 41 | 42 | if isinstance(recalls,list): 43 | # convert to padded array 44 | recalls = apply_to_zeros(recalls) 45 | 46 | if filter_ind is None: 47 | # make one 48 | filter_ind = np.ones(len(recalls), dtype=np.bool) 49 | 50 | # loop over serial positions to get vals 51 | serpos = range(1,listlen+1) 52 | vals = [((recalls[filter_ind]==p).sum(1)>0).mean() for p in serpos] 53 | return np.rec.fromarrays([serpos,vals], names='serial_pos,prec') 54 | 55 | 56 | def prec_op(outpos=1, listlen=None, recalls=None, filter_ind=None, **kwargs): 57 | """ 58 | Calculate probability of recall as a function of output position. 59 | """ 60 | if listlen is None or recalls is None: 61 | raise ValueError("You must specify both listlen and recalls.") 62 | 63 | if isinstance(recalls,list): 64 | # convert to padded array 65 | recalls = apply_to_zeros(recalls) 66 | 67 | if filter_ind is None: 68 | # make one 69 | filter_ind = np.ones(len(recalls), dtype=np.bool) 70 | 71 | # loop over serial positions to get vals 72 | serpos = range(1,listlen+1) 73 | vals = [((recalls[filter_ind,outpos-1]==p)>0).mean() for p in serpos] 74 | return np.rec.fromarrays([serpos,vals,[outpos]*len(vals)], 75 | names='serial_pos,prec,op') 76 | 77 | 78 | def irt_op(listlen=None, recalls=None, times=None, **kwargs): 79 | """ 80 | """ 81 | if listlen is None or recalls is None or times is None: 82 | raise ValueError("You must specify listlen, recalls, and times.") 83 | 84 | 85 | 86 | def trans_fact(recs, dists): 87 | """ 88 | Calculate transition factor. 89 | 90 | dists = -squareform(pdist(np.array([range(list_len)]).T)) 91 | 92 | """ 93 | 94 | # make sure recs are array 95 | recs = np.asanyarray(recs) 96 | 97 | # get lengths 98 | list_len = len(dists) 99 | nrecs = len(recs) 100 | 101 | # initialize containers 102 | tfs = np.empty(nrecs)*np.nan 103 | #weights = np.zeros(nrecs) 104 | 105 | # init poss ind 106 | poss_ind = np.arange(list_len) 107 | 108 | # loop over items 109 | for i in xrange(1,nrecs): 110 | # if current is 0, then stop 111 | if recs[i] == 0: 112 | break 113 | 114 | # make sure 115 | # 1) current and prev valid 116 | # 2) not a repeat 117 | if ((recs[i-1]>0) and (recs[i]>0) and 118 | (not recs[i] in recs[:i])): 119 | # get possible 120 | ind = poss_ind[~np.in1d(poss_ind,recs[:i]-1)] 121 | act_ind = poss_ind[ind]==(recs[i]-1) 122 | 123 | if (len(ind) == 1): 124 | # there are not any more possible recalls other than 125 | # this one so we're done 126 | continue 127 | 128 | # rank them 129 | ranks = rankdata(dists[recs[i-1]-1][ind]) 130 | #print ranks 131 | 132 | # set the tf for that transition 133 | tfs[i] = (ranks[act_ind]-1.)/(len(ind)-1.) 134 | 135 | # fiddling with weights 136 | #weights[i] = (ranks[act_ind])/(2.*ranks[~act_ind].mean()) 137 | #weights[i] = np.abs(ranks[act_ind] - ranks[~act_ind]).mean()/(ranks[act_ind] - ranks[~act_ind]).std() 138 | #weights[i] = ranks[act_ind]/(2.*ranks[~act_ind].mean()) 139 | 140 | 141 | return tfs #,weights 142 | 143 | 144 | def tem_fact(listlen=None, recalls=None, filter_ind=None, **kwargs): 145 | """ 146 | """ 147 | if listlen is None or recalls is None: 148 | raise ValueError("You must specify both listlen and recalls.") 149 | 150 | if isinstance(recalls,list): 151 | # convert to padded array 152 | recalls = apply_to_zeros(recalls) 153 | 154 | if filter_ind is None: 155 | # make one 156 | filter_ind = np.ones(len(recalls), dtype=np.bool) 157 | 158 | # get the dist factor 159 | dists = -squareform(pdist(np.array([range(listlen)]).T)) 160 | 161 | # get pos and neg only 162 | #pos_dists = dists.copy() 163 | #pos_dists[np.tril_indices(listlen,1)] = np.nan 164 | #neg_dists = dists.copy() 165 | #neg_dists[np.triu_indices(listlen,1)] = np.nan 166 | 167 | # loop over the lists 168 | res = [] 169 | for i, recs in enumerate(recalls[filter_ind]): 170 | # get the full tfact 171 | tfs = trans_fact(recs, dists) 172 | 173 | # get the direction 174 | rtemp = recs.copy().astype(np.float) 175 | rtemp[rtemp<=0] = np.nan 176 | lags = np.diff(rtemp) 177 | lags = np.array([np.nan] + lags.tolist()).astype(np.int) 178 | 179 | # append the recarray of results 180 | res.append(np.rec.fromarrays([[i+1]*len(tfs),recs[:len(tfs)],tfs,lags], 181 | names='list_num,rec_item,tf,lag')) 182 | return np.concatenate(res) 183 | 184 | 185 | def crp(listlen=None, recalls=None, filter_ind=None, 186 | allow_repeats=False, exclude_op=0, **kwargs): 187 | """ 188 | Calculate a conditional response probability. 189 | 190 | Returns a recarray with lags, mcrp, ecrp, crpAll. 191 | """ 192 | if listlen is None or recalls is None: 193 | raise ValueError("You must specify both listlen and recalls.") 194 | 195 | if isinstance(recalls,list): 196 | # convert to padded array 197 | recalls = apply_to_zeros(recalls) 198 | 199 | if filter_ind is None: 200 | # make one 201 | filter_ind = np.ones(len(recalls), dtype=np.bool) 202 | 203 | # determine possible lags 204 | lags = np.arange(0,2*listlen-1)-(listlen-1) 205 | 206 | # reset the numerator and denominator 207 | numer = np.zeros(len(lags),np.float64) 208 | denom = np.zeros(len(lags),np.float64) 209 | 210 | # loop over the lists 211 | for lis in recalls[filter_ind]: 212 | 213 | # loop over items in the list 214 | for r in np.arange(exclude_op,len(lis)-1): 215 | # get the items 216 | i = lis[r] 217 | j = lis[r+1] 218 | 219 | # see if increment, must be: 220 | # 1) positive serial positions (not intrusion) 221 | # 2) not immediate repetition 222 | # 3) not already recalled 223 | # 4) any optional conditional 224 | # if opt_cond is not None: 225 | # opt_res = eval(opt_cond) 226 | # else: 227 | opt_res = True 228 | 229 | if (i>0 and j>0 and 230 | i-j != 0 and 231 | not np.any(np.in1d([i,j],lis[0:r])) and 232 | opt_res): 233 | #not any(setmember1d([i,j],lis[0:r]))): 234 | 235 | # increment numerator 236 | lag = j-i 237 | nInd = np.nonzero(lags==lag)[0] 238 | numer[nInd] = numer[nInd] + 1 239 | 240 | # get all possible lags 241 | negLag = np.arange(i-1)-(i-1) 242 | posLag = np.arange(i,listlen)-(i-1) 243 | allLag = np.union1d(negLag,posLag) 244 | 245 | # remove lags to previously recalled items 246 | if not allow_repeats: 247 | recInd = np.nonzero(lis[0:r] > 0)[0] 248 | recLag = lis[recInd]-i 249 | goodInd = np.nonzero(~np.in1d(allLag,recLag))[0] 250 | #goodInd = nonzero(~setmember1d(allLag,recLag))[0] 251 | allLag = allLag[goodInd] 252 | 253 | # increment the denominator 254 | dInd = np.nonzero(np.in1d(lags,allLag))[0] 255 | #dInd = nonzero(setmember1d(lags,allLag))[0] 256 | denom[dInd] = denom[dInd] + 1 257 | 258 | # add in the subject's crp 259 | denom[denom==0] = np.nan 260 | crp_val = numer/denom 261 | 262 | # return the values 263 | return np.rec.fromarrays([lags,crp_val], names='lag,crp') 264 | 265 | 266 | def proc_mat_subj(subj_file): 267 | # extract the subj 268 | #subj_file = 'data/ltp/stat_data_LTP265.mat' 269 | #bfile = os.path.splitext(os.path.basename(subj_file))[0] 270 | #subj = bfile[10:] 271 | 272 | # load the data 273 | x = loadmat(subj_file)['data'] 274 | # look at sessions above 8 and up to 16 275 | sessions = x['session'][0,0][:,0] 276 | subj_num = x['subject'][0,0][0,0] 277 | subj = '%d'%subj_num 278 | min_list = 8 279 | if subj_num > 209: 280 | max_list = 16 281 | else: 282 | max_list = 14 283 | sess_ind = (sessions>min_list)&(sessions<=max_list) 284 | 285 | # lists without task switches 286 | info = {} 287 | info['recalls'] = x['recalls'][0,0][sess_ind] 288 | rtimes = x['times'][0,0][sess_ind]/1000. 289 | info['times'] = np.diff(np.hstack([np.zeros((len(rtimes),1)),rtimes])).clip(0,np.inf) 290 | info['listtype'] = x['pres'][0,0]['listtype'][0,0][sess_ind][:,0] 291 | info['distractor'] = x['pres'][0,0]['distractor'][0,0][sess_ind][:,0] 292 | info['final_distractor'] = x['pres'][0,0]['final_distractor'][0,0][sess_ind][:,0] 293 | info['task'] = x['pres'][0,0]['task'][0,0][sess_ind] 294 | info['subj'] = subj 295 | info['subjnum'] = subj_num 296 | info['listlen'] = x['listLength'][0,0][0,0] 297 | 298 | return info 299 | -------------------------------------------------------------------------------- /assignments/P01_Confidence.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Project 1: Confidence\n", 8 | "## Quantified Cognition\n", 9 | "### Psychology 5332\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "# Name: *Your Name Here*\n", 17 | "# User ID: *Your ID Here*" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "# Objectives\n", 25 | "\n", 26 | "Upon completion of this assignment, the student will demonstrate the ability to:\n", 27 | "\n", 28 | "1. Modify LBA to include confidence\n", 29 | "2. Fit the model to speed--accuracy data\n", 30 | "3. Produce distributions confidence for the different conditions\n", 31 | "4. Evaluate model predicted results\n" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# Assignment\n", 39 | "\n", 40 | "- The goal of this assignment is to augment the Linear Ballistic Accumulator (LBA) model to generate confidence in addition to choices and response times. You will then test whether the model generates confidence values that make sense for the speed--accuracy trade-off data. \n", 41 | "\n", 42 | "- You will perform this assignment by writing code in *this notebook* (***after making a copy and renaming it to have your userid in the title --- e.g., P01_Confidence_mst3k***).\n", 43 | "\n", 44 | "- In addition to this notebook, you will need to download the data from the same directory on GitHub. The file is decision_data.csv.\n", 45 | "\n", 46 | "- ***When you are done, save this notebook as HTML (`File -> Download as -> HTML`) and upload it to the matching assignment on UVACollab.***\n", 47 | "\n", 48 | "## HINTS\n", 49 | "\n", 50 | "- Be sure to comment your code\n", 51 | "- I have provided cells with general instructions for what they should contain.\n", 52 | " " 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "# LBA Confidence\n", 60 | "\n", 61 | "Extend LBA to include generation of a confidence value in addition to the choice and reaction time it already produces. One method of achieving this (though I'm open to other approaches as long as you justify them) is to assume that confidence is directly proportional to the level of activation for the accumulator with the winning choice relative to the sum of all the accumulator activations at that time. In the literature this is called the Relative Balence of Evidence.\n", 62 | "\n", 63 | "Intuitively, this approach makes some sense. If the selected choice has a high level of activation relative to the non-selected choice, then the confidence will be high (close to 1.0). On the other hand, if there is strong evidence for both choices and one just barely wins out over the other, then the ratio of the winning choice to all choices will be closer to .5 (for the two-choice case).\n", 64 | "\n", 65 | "To test whether this model is, indeed, making predictions that make sense, pick the variant of the LBA model that fit best to the speed--accuracy trade-off decision data (the one that allowed threshold to change between conditions) and perform the fit again with this new model. Even though you are not fitting to confidence, we can simulate the model with the best-fitting parameters and generate a distribution of confidence values for the speed condition and confidence values for the accuracy condition.\n", 66 | "\n", 67 | "Here are some questions to answer in your write-up:\n", 68 | "\n", 69 | "- Are people more confident in their correct answers in the accuracy condition than in the speeded condition? Does your result make sense?\n", 70 | "- Are the confidence values different for correct and incorrect answers?\n", 71 | "- What would happen to the confidence values (on average) if you added in a third option? (You could even take your best-fitting params and simply add in a third option to the inputs at either low or high levels of input to see what would happen.)\n" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 1, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "# Load in necessary modules\n", 81 | "import numpy as np\n", 82 | "import matplotlib.pyplot as plt\n", 83 | "import pandas as pd\n", 84 | "import arviz as az\n", 85 | "\n", 86 | "from RunDEMC.density import kdensity\n", 87 | "from RunDEMC import Model, Param, dists\n", 88 | "from RunDEMC import DE, calc_bpic, joint_plot\n", 89 | "from RunDEMC.io import arviz_dict\n", 90 | "from RunDEMC.pda import PDA\n" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 2, 96 | "metadata": {}, 97 | "outputs": [ 98 | { 99 | "data": { 100 | "text/html": [ 101 | "
\n", 102 | "\n", 115 | "\n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | " \n", 167 | " \n", 168 | "
correctrtcondlog_rtrt_acc
310.4784Accuracy-0.7373080.4784
410.4300Accuracy-0.8439700.4300
510.4486Accuracy-0.8016240.4486
610.3991Speed-0.9185430.3991
810.4393Accuracy-0.8225730.4393
\n", 169 | "
" 170 | ], 171 | "text/plain": [ 172 | " correct rt cond log_rt rt_acc\n", 173 | "3 1 0.4784 Accuracy -0.737308 0.4784\n", 174 | "4 1 0.4300 Accuracy -0.843970 0.4300\n", 175 | "5 1 0.4486 Accuracy -0.801624 0.4486\n", 176 | "6 1 0.3991 Speed -0.918543 0.3991\n", 177 | "8 1 0.4393 Accuracy -0.822573 0.4393" 178 | ] 179 | }, 180 | "execution_count": 2, 181 | "metadata": {}, 182 | "output_type": "execute_result" 183 | } 184 | ], 185 | "source": [ 186 | "# load in the data\n", 187 | "dat = pd.read_csv('decision_data.csv', index_col=0)\n", 188 | "dat = dat[dat.cond != 'Neutral']" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "## Code for the LBA model (from class)\n", 196 | "\n", 197 | "The next few cells instantiate the LBA model and also fit a variant where we allow the threshold ($b$) to change between conditions." 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 4, 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "def lba_sim(I=(1.0,1.5), A=.1, S=1.0, b=1.0, num_sims=1000,\n", 207 | " max_time=2., t0=0.0, **kwargs):\n", 208 | " # set drift rate from inputs\n", 209 | " dr = np.squeeze(np.atleast_1d(I))\n", 210 | " \n", 211 | " # set the number of choices\n", 212 | " nc = len(dr)\n", 213 | " \n", 214 | " # pick starting points\n", 215 | " k = np.random.uniform(0.,A,(num_sims,nc))\n", 216 | " \n", 217 | " # pick drifts\n", 218 | " # must make sure at least one d is greater than zero for each sim\n", 219 | " d = np.random.normal(dr,S,(num_sims,nc))\n", 220 | " \n", 221 | " ## see where there are none above zero\n", 222 | " #ind = np.all(d<=0.0,axis=1)\n", 223 | " #while np.any(ind):\n", 224 | " # d[ind,:] = np.random.normal(dr,S,(ind.sum(),nc))\n", 225 | " # ind = np.all(d<=0.0,axis=1)\n", 226 | "\n", 227 | " # clip it to avoid divide by zeros\n", 228 | " d[d<=0.0] = np.finfo(dr.dtype).eps\n", 229 | "\n", 230 | " # calc the times for each\n", 231 | " t = (b-k)/d\n", 232 | "\n", 233 | " # see the earliest for each resp\n", 234 | " inds = t.argmin(1)\n", 235 | " times = t.take(inds+np.arange(t.shape[0])*t.shape[1])\n", 236 | "\n", 237 | " # process into choices\n", 238 | " times += t0\n", 239 | "\n", 240 | " # get valid responses\n", 241 | " resp_ind = times < (max_time)\n", 242 | " inds[~resp_ind] = -1\n", 243 | " times[~resp_ind] = -1\n", 244 | " \n", 245 | " # make a dataframe \n", 246 | " # NOTE: you should add in returning confidence values here\n", 247 | " return pd.DataFrame({'correct': inds, 'rt': times})" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": null, 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "# fit the model with a threshold change between conditions\n", 257 | "\n" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "# extract best fitting params\n", 267 | "\n", 268 | "\n" 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": null, 274 | "metadata": {}, 275 | "outputs": [], 276 | "source": [ 277 | "# generate distributions of confidence values for speed and accuracy conditions\n", 278 | "\n" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": null, 284 | "metadata": {}, 285 | "outputs": [], 286 | "source": [ 287 | "# make plots to see how those distributions compare to one another\n" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "### Write your short answer here to the questions listed above:\n" 295 | ] 296 | } 297 | ], 298 | "metadata": { 299 | "kernelspec": { 300 | "display_name": "Python 3 (ipykernel)", 301 | "language": "python", 302 | "name": "python3" 303 | }, 304 | "language_info": { 305 | "codemirror_mode": { 306 | "name": "ipython", 307 | "version": 3 308 | }, 309 | "file_extension": ".py", 310 | "mimetype": "text/x-python", 311 | "name": "python", 312 | "nbconvert_exporter": "python", 313 | "pygments_lexer": "ipython3", 314 | "version": "3.9.12" 315 | } 316 | }, 317 | "nbformat": 4, 318 | "nbformat_minor": 4 319 | } 320 | -------------------------------------------------------------------------------- /assignments/P02_Policy_Change.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Project 2: Policy Change\n", 8 | "## Quantified Cognition\n", 9 | "### Psychology 5332\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "# Name: *Your Name Here*\n", 17 | "# User ID: *Your ID Here*" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "# Objectives\n", 25 | "\n", 26 | "Upon completion of this assignment, the student will demonstrate the ability to:\n", 27 | "\n", 28 | "1. Modify the policy of an RL model\n", 29 | "2. Test whether the new policy improves learning in various environments\n", 30 | "3. Evaluate why the policy did or did not work and propose alternative approaches\n" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "# Assignment\n", 38 | "\n", 39 | "- The goal of this assignment is to modify the *policy* of the successor representation model to generate different behaviors given the environment. You will then test whether this change improves learning in different environmental contexts. \n", 40 | "\n", 41 | "- You will perform this assignment by writing code in *this notebook* (***after making a copy and renaming it to have your userid in the title --- e.g., P02_Policy_Change_mst3k***).\n", 42 | "\n", 43 | "- ***When you are done, save this notebook as HTML (`File -> Download as -> HTML`) and upload it to the matching assignment on UVACollab.***\n", 44 | "\n", 45 | "## HINTS\n", 46 | "\n", 47 | "- Be sure to comment your code\n", 48 | "- I have provided cells with general instructions for what they should contain.\n", 49 | " " 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "# Policy Change\n", 57 | "\n", 58 | "As discussed in class, both learning and behavior in an environment depend on the agent's policy for selecting actions given the current state. We also discussed the importance of the exploration vs. exploitation trade-off during learning. Whether you exploit currently preferred actions or explore novel actions also depends on the policy.\n", 59 | "\n", 60 | "Here, we will modify the policy of the successor representation (SR) reinforcement learning (RL) model we covered in class. Specifically, the current policy involves evaluating future visitions to other states given a current state and all possible actions, and calculating a Q value for each action taking into account the learned rewards (or punishments) at each state. Mathematically, that involves:\n", 61 | "\n", 62 | "$$Q = (M \\cdot s_i) \\cdot r,$$\n", 63 | "\n", 64 | "where $M$ is the successor representation matrix, $s_i$ is the current state, and $r$ is the vector of rewards associated with each state.\n", 65 | "\n", 66 | "What we'd like to do is encourage the agent to move away from their most recent states, thereby enhancing exploration of the environment. Luckily, the agent already keeps track of recent states in the eligibility trace (or context, in the parlance of the temporal context model). Your job is to use this vector (which is already passed into the policy function as `t0`) along with a new parameter (let's call this `revisit_penalty`) that can take on values between zero and one, to modify the rewards associated with each state such that recent states are penalized.\n", 67 | "\n", 68 | "Once you have updated the policy code, train the model in an environment that is not slippery (i.e., set `slippery = False`) and make note of how many training iterations it needs to learn to a criterion of 25 correct in a row. Does the model learn faster when you penalize more for revisiting recent states? Make sure to assess this in a handful of random environments by rerunning the code that generates a random map (see below), so that your assessment isn't biased by a single environment.\n", 69 | "\n", 70 | "Next, make the environment slippery (i.e., set `slippery = True`) and see if penalizing revisitation of recent states helps or hurts learning. Is the model able to learn better or worse on average? Even if it never reaches the criterion, is it solvinging more or less often with the penalty?\n", 71 | "\n", 72 | "In addition to the questions above, here are some questions to answer in your write-up:\n", 73 | "\n", 74 | "- Why do you think you observed what you did?\n", 75 | "- Can you think of a different policy that may work better in slippery environments?\n" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "# to install more libraries\n", 85 | "!pip install gymnasium" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 1, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "# load matplotlib inline mode\n", 95 | "%matplotlib inline\n", 96 | "\n", 97 | "# import some useful libraries\n", 98 | "import numpy as np # numerical analysis linear algebra\n", 99 | "import matplotlib.pyplot as plt # plotting\n", 100 | "\n", 101 | "from IPython.display import display, clear_output, Image\n", 102 | "import time\n", 103 | "\n", 104 | "from fastprogress.fastprogress import master_bar, progress_bar\n", 105 | "\n", 106 | "#import gym\n", 107 | "#from gym.envs.toy_text import frozen_lake\n", 108 | "\n", 109 | "import gymnasium as gym\n", 110 | "from gymnasium.envs.toy_text import frozen_lake" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "## Environment defined from class" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "# define the environment\n", 127 | "size = 10\n", 128 | "p_frozen = 0.9\n", 129 | "slippery = False\n", 130 | "\n", 131 | "# generate a random map\n", 132 | "desc = frozen_lake.generate_random_map(size=size, p=p_frozen)\n", 133 | "env = frozen_lake.FrozenLakeEnv(desc=desc,\n", 134 | " is_slippery=slippery,\n", 135 | " render_mode='ansi'\n", 136 | " )\n", 137 | "\n", 138 | "## reset the environment and get the initial state\n", 139 | "observation, info = env.reset()\n", 140 | "display(print(env.render()))" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "## Policy and Model from class" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# params\n", 157 | "gamma = .95\n", 158 | "alpha = .5\n", 159 | "rho = .25\n", 160 | "tau = 10.0\n", 161 | "p_rand = 0.0\n", 162 | "hole_penalty = -1.0\n", 163 | "off_board_penalty = -0.0\n", 164 | "\n", 165 | "# set up our agent\n", 166 | "# pull out the number of actions and unique states\n", 167 | "n_actions = env.action_space.n\n", 168 | "n_states = env.observation_space.n\n", 169 | "\n", 170 | "# create orthogonal state representations\n", 171 | "states = np.eye(n_states)\n", 172 | "\n", 173 | "# allocate for where we learn:\n", 174 | "# rewards associated with each state\n", 175 | "rewards = np.zeros(n_states)\n", 176 | "# states associated with each other (via SR)\n", 177 | "M = np.zeros((n_actions, n_states, n_states))\n", 178 | "\n", 179 | "# keep track of scores during learning\n", 180 | "scores = []\n", 181 | "\n", 182 | "# define a policy \n", 183 | "# !!!!! MODIFY THIS CODE TO UPDATE THE POLICY !!!!\n", 184 | "def pick_action(f0, t0, M, rewards, tau, p_rand=0.0):\n", 185 | " # apply policy to pick action\n", 186 | " if p_rand > 0.0 and np.random.rand() < p_rand:\n", 187 | " # pick a random action\n", 188 | " action = env.action_space.sample()\n", 189 | " else:\n", 190 | " Q = np.dot(np.dot(M, f0), rewards)\n", 191 | " #action = np.random.choice(np.where(Q==Q.max())[0])\n", 192 | " #action = np.argmax(Q)\n", 193 | " pQ = np.exp(Q*tau)/np.exp(Q*tau).sum()\n", 194 | " action = np.argmax(np.random.rand() < np.cumsum(pQ))\n", 195 | " return action\n" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "ntrials = 1000\n", 205 | "max_moves = 500\n", 206 | "max_corr = 25\n", 207 | "\n", 208 | "for r in progress_bar(range(ntrials)):\n", 209 | " # reset for new attempt at recovering the frisbee\n", 210 | " observation, info = env.reset()\n", 211 | " last_obs = observation\n", 212 | " f0 = states[observation]\n", 213 | " t0 = states[observation]\n", 214 | " \n", 215 | " # set current annealing\n", 216 | " cur_p_rand = p_rand*(1-((r+1)/ntrials))\n", 217 | "\n", 218 | " for i in range(max_moves):\n", 219 | " # pick an action\n", 220 | " action = pick_action(f0, t0, M, rewards, tau, p_rand=cur_p_rand) \n", 221 | " \n", 222 | " # observe new state\n", 223 | " observation, reward, trunc, done, info = env.step(action)\n", 224 | " \n", 225 | " # turn the new state into a vector representation\n", 226 | " f1 = states[observation]\n", 227 | "\n", 228 | " # learn via successor representation\n", 229 | " # prediction from previous state\n", 230 | " p0 = np.dot(M[action], f0)\n", 231 | " \n", 232 | " # observed outcome, plus discounted future prediction\n", 233 | " # when following that policy\n", 234 | " f1_action = pick_action(f1, t0, M, rewards, tau, p_rand=cur_p_rand)\n", 235 | " o1 = (f1 + gamma*(np.dot(M[f1_action], f1)))\n", 236 | " \n", 237 | " # update the association for that action\n", 238 | " M[action] += alpha * np.outer((o1 - p0), t0)\n", 239 | "\n", 240 | " # update context (eligibility trace)\n", 241 | " #t1 = rho*t0 + (1-rho)*f1\n", 242 | " t1 = np.clip(rho*t0 + f1, 0, 1)\n", 243 | "\n", 244 | " # process the reward if any\n", 245 | " if trunc and reward==0:\n", 246 | " # get negative rewards for falling in a hole\n", 247 | " reward = hole_penalty\n", 248 | " \n", 249 | " if last_obs == observation:\n", 250 | " # action gave rise to no change in movement\n", 251 | " reward = off_board_penalty\n", 252 | " \n", 253 | " #if reward == 0:\n", 254 | " # # punish going to a state and not getting anything for it\n", 255 | " # rewards[last_obs] -= .1\n", 256 | "\n", 257 | " # update our representation of rewards/punishments at the observed state\n", 258 | " rewards[observation] += alpha*(reward - rewards[observation])\n", 259 | "\n", 260 | " # see if we're done\n", 261 | " if trunc:\n", 262 | " #print(\"Episode finished after {} timesteps with reward {}\".format(i+1, reward))\n", 263 | " # save out our final reward/punishment\n", 264 | " scores.append(reward)\n", 265 | " break\n", 266 | "\n", 267 | " # prepare for next iteration\n", 268 | " f0 = f1\n", 269 | " t0 = t1\n", 270 | " last_obs = observation\n", 271 | " \n", 272 | " # if we ran out of time, say we fell in\n", 273 | " if i==(max_moves-1):\n", 274 | " scores.append(hole_penalty)\n", 275 | " \n", 276 | " if len(scores)>max_corr and np.mean(scores[-max_corr:])==1.0:\n", 277 | " # we're consistently solving it, so quit\n", 278 | " break\n", 279 | "\n", 280 | "# render the final state\n", 281 | "env.render()\n", 282 | "\n", 283 | "# plot a moving average of scores\n", 284 | "N=10\n", 285 | "plt.plot(np.convolve(scores, np.ones((N,))/N, mode='valid'))\n", 286 | "\n", 287 | "print(\"Mean final performance:\", np.mean(scores[-max_corr:]))" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "### Write your short answer here to the questions listed above:\n" 295 | ] 296 | } 297 | ], 298 | "metadata": { 299 | "kernelspec": { 300 | "display_name": "Python 3 (ipykernel)", 301 | "language": "python", 302 | "name": "python3" 303 | }, 304 | "language_info": { 305 | "codemirror_mode": { 306 | "name": "ipython", 307 | "version": 3 308 | }, 309 | "file_extension": ".py", 310 | "mimetype": "text/x-python", 311 | "name": "python", 312 | "nbconvert_exporter": "python", 313 | "pygments_lexer": "ipython3", 314 | "version": "3.9.12" 315 | } 316 | }, 317 | "nbformat": 4, 318 | "nbformat_minor": 4 319 | } 320 | -------------------------------------------------------------------------------- /notebooks/btcm.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 4 | # 5 | # See the COPYING file distributed along with the CogMod package for the 6 | # copyright and license terms. 7 | # 8 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 9 | 10 | 11 | import numpy as np 12 | from numba import njit 13 | 14 | from samprec import _calc_p_attempts 15 | 16 | def l1_norm(dat): 17 | """ 18 | l1_norm along the first dimension of a data array. 19 | """ 20 | denom = np.abs(dat).sum(axis=0)[np.newaxis] 21 | denom[denom == 0.0] = 1.0 22 | return dat/denom.repeat(len(dat), axis=0) 23 | 24 | 25 | def l2_norm(dat): 26 | """ 27 | l2_norm along the first dimension of a data array. 28 | """ 29 | denom = np.sqrt(np.power(dat, 2).sum(axis=0))[np.newaxis] 30 | denom[denom == 0.0] = 1.0 31 | return dat/denom.repeat(len(dat), axis=0) 32 | 33 | 34 | class TCM(object): 35 | """Temporal Context Model 36 | """ 37 | default_params = { 38 | # assoc 39 | 'rho': .5, 40 | 'rho_dist': None, 41 | 'rho_ret': None, 42 | 'beta': .5, 43 | 'phi': 2.0, 44 | 'gamma': .5, 45 | 'lambda': 0.5, 46 | 'alpha': 1.0, 47 | 48 | # retrieval (recall) 49 | 'sigma_base': 0.0, 50 | 'sigma_exp': 4.0, 51 | 'Kmax': 30, 52 | 'tau': 1.0, 53 | 'xi': .0001 54 | 55 | } 56 | 57 | def __init__(self, listlen=16, nitems=None, params=None, 58 | scale_thresh=0.00001): 59 | """ 60 | """ 61 | # save the nitems 62 | self.listlen = listlen 63 | if nitems is None: 64 | nitems = (listlen*2)+3 65 | self.nitems = nitems 66 | self.items = np.eye(nitems) 67 | self.scale_thresh = scale_thresh 68 | 69 | # process the params 70 | # start with defaults 71 | p = dict(**self.default_params) 72 | if params is not None: 73 | # get provided vals 74 | p.update(params) 75 | self.params = p 76 | 77 | # check the rho values 78 | if self.params['rho_dist'] == None: 79 | self.params['rho_dist'] = self.params['rho'] 80 | if self.params['rho_ret'] == None: 81 | self.params['rho_ret'] = self.params['rho'] 82 | 83 | # set phi_decay from rho 84 | self.params['phi_decay'] = -np.log(self.params['rho']) 85 | 86 | # set up the model 87 | self.reset() 88 | 89 | def reset(self): 90 | # allocate for all matrices and vectors 91 | self.M = np.zeros((self.nitems, self.nitems)) 92 | self.f1 = np.zeros(self.nitems) 93 | self.f0 = np.zeros(self.nitems) 94 | self.t1 = np.zeros(self.nitems) 95 | self.t0 = np.zeros(self.nitems) 96 | 97 | # set up t0 with init item 98 | self.t0[0] = 1.0 99 | 100 | # set up list context 101 | self.lc_ind = -1 102 | self.t0[self.lc_ind] = self.params['lambda'] 103 | 104 | # normalize it (not including list context unit) 105 | self.t0[:-1] = l2_norm(self.t0[:-1]) 106 | 107 | # set current distractor ind 108 | self.cur_dist = -2 109 | self.cur_pres = 0 110 | 111 | pass 112 | 113 | def _present_item(self, i, rho, alpha=0.0): 114 | 115 | # pick the item 116 | self.f1 = self.items[i] 117 | 118 | # calc the new t 119 | tIN = l2_norm(self.params['beta']*self.f1 + 120 | (1-self.params['beta'])*np.dot(self.f1, self.M)) 121 | #self.t1 = rho*self.t0 + (1-rho)*tIN 122 | self.t1 = rho*self.t0 + tIN 123 | 124 | # reset the list context unit 125 | self.t1[self.lc_ind] = self.params['lambda'] 126 | 127 | # make unit length (not including list context unit) 128 | self.t1[:-1] = l2_norm(self.t1[:-1]) 129 | 130 | # update M 131 | if alpha > 0.0: 132 | phi = alpha + (self.params['phi'] * 133 | np.exp(-self.params['phi_decay'] * 134 | (self.cur_pres))) 135 | self.M += phi * np.outer(self.f1, self.t0) 136 | 137 | # update cur_pres 138 | self.cur_pres += 1 139 | 140 | # set the latest item/context as the old 141 | self.f0 = self.f1 142 | self.t0 = self.t1 143 | 144 | def present_list(self, list_def=None, list_type='IFR'): 145 | """Present a list to the model. 146 | 147 | Parameters 148 | ---------- 149 | list_def: list of item_ids 150 | list_type: {'IFR','DFR','CDFR'} 151 | """ 152 | if list_def is None: 153 | # make based on nitems 154 | list_def = range(1, self.listlen+1) 155 | 156 | # loop over items 157 | for i in list_def: 158 | if list_type[0].upper() == 'C': # 'CDFR': 159 | # present premath 160 | self._present_item(self.cur_dist, 161 | self.params['rho_dist'], 162 | alpha=self.params['alpha']) 163 | self.cur_dist -= 1 164 | 165 | # present the item 166 | self._present_item(i, self.params['rho'], 167 | alpha=self.params['alpha']) 168 | 169 | # see if postmath 170 | if list_type[0].upper() in ['C', 'D']: # ['CDFR', 'DFR'] 171 | self._present_item(self.cur_dist, 172 | self.params['rho_dist'], 173 | alpha=self.params['alpha']) 174 | self.cur_dist -= 1 175 | 176 | # save current context 177 | self.t_save = self.t0.copy() 178 | 179 | @property 180 | def strengths(self): 181 | #return (self.params['gamma']*np.dot(self.M, self.t0) + 182 | # (1-self.params['gamma'])*self.t0) 183 | return (np.dot(self.M, self.t0) + 184 | (self.params['gamma']*self.t0)) 185 | 186 | def calc_list_like(self, recalls): 187 | # reset context 188 | self.t0 = self.t_save.copy() 189 | 190 | # convert to 0-based index 191 | recalls = np.atleast_1d(recalls) - 1 192 | 193 | # var to save p_k 194 | p_k = np.zeros(self.params['Kmax']) 195 | 196 | # start with k with p(1.0) at zero 197 | p_k[0] = 1.0 198 | 199 | likes = [] 200 | for i, rec in enumerate(recalls): 201 | # get rec_ind 202 | rec_ind = np.in1d(np.arange(self.listlen), recalls[:i]) 203 | 204 | if rec < 0: 205 | # they stopped, so calc p_stop 206 | # first calc p_rec for all non-recalled items 207 | p_nrecs = 0.0 208 | for nrec in np.where(~rec_ind)[0]: 209 | # calc the like for the list 210 | p_nrec, p_nk = self._recall_like(nrec, p_k, 211 | recalls=recalls[:i]) 212 | p_nrecs += p_nrec 213 | p_stop = 1-np.sum(p_nrecs) 214 | 215 | # calc the other way 216 | #p_stop, p_nk = self._recall_like(rec, p_k, 217 | # recalls=recalls[:i]) 218 | 219 | # p_stopping is not the sum of retrieving non-recalled items 220 | #likes.append(1 - np.sum(p_nrecs)) 221 | likes.append(p_stop) 222 | 223 | # we're done recalling 224 | break 225 | 226 | # retrieve from LTM 227 | # calc the like for the list 228 | p_rec, p_k = self._recall_like(rec, p_k, 229 | recalls=recalls[:i]) 230 | 231 | # append the new rec like 232 | likes.append(p_rec) 233 | 234 | # do output encoding and move to next item 235 | self._present_item(rec+1, self.params['rho_ret'], alpha=0.0) 236 | 237 | return likes 238 | 239 | def _recall_like(self, rec, p_k, recalls=None): 240 | if recalls is None: 241 | recalls = [] 242 | 243 | # set up the number of attempts 244 | context_att = self.params['Kmax'] 245 | p_last = 1.0 246 | 247 | # attempts with just context 248 | S = self.strengths[1:self.listlen+1]**self.params['tau'] 249 | #CV = S[S>self.params['xi']].std()/S[S>self.params['xi']].mean() 250 | #S *= CV 251 | #print(CV) 252 | #S = S**CV 253 | p_att, p_last = _calc_p_attempts(rec, S, 254 | recalls=recalls, 255 | attempts=context_att, 256 | p_start=p_last, 257 | scale_thresh=self.scale_thresh) 258 | 259 | # do weighted combo of attempts 260 | new_p_k = np.zeros(self.params['Kmax']) 261 | if rec < 0: 262 | # adjust p_att 263 | p_att = np.concatenate([p_att[1:], [p_last]]) 264 | 265 | # we're testing stopping, so just pick end values 266 | for k, p in enumerate(p_k): 267 | new_p_k[k] += \ 268 | p * p_att[self.params['Kmax']-k-1] 269 | else: 270 | # do weighted combo of all different p_k 271 | for k, p in enumerate(p_k): 272 | new_p_k[k:self.params['Kmax']] += \ 273 | p * p_att[:self.params['Kmax']-k] 274 | 275 | # p_rec is just sum over new_p_k 276 | p_rec = new_p_k.sum() 277 | 278 | # set new p_k 279 | if rec < 0: 280 | # we've stopped, so we have maxed out k 281 | p_k = np.zeros(self.params['Kmax']) 282 | else: 283 | # normalize new p_k for next iteration 284 | p_k = new_p_k/new_p_k.sum() 285 | 286 | return p_rec, p_k 287 | 288 | def simulate(self, nlists, list_type='IFR', list_def=None): 289 | # reset the model 290 | self.reset() 291 | 292 | # present the list 293 | self.present_list(list_type=list_type, list_def=list_def) 294 | 295 | # simulate lists 296 | #recs = [self.sim_list() for i in range(nlists)] 297 | recs = [_tcm_sim_recs(self.M, self.items, self.t_save.copy(), 298 | self.listlen, self.params['beta'], 299 | self.params['rho_ret'], self.params['gamma'], 300 | self.params['tau'], self.params['lambda'], 301 | self.params['Kmax']) 302 | for i in range(nlists)] 303 | 304 | return recs 305 | 306 | def sim_list(self): 307 | # reset context 308 | self.t0 = self.t_save.copy() 309 | 310 | # var to save p_k 311 | p_k = np.zeros(self.params['Kmax']) 312 | 313 | # start with k with p(1.0) at zero 314 | p_k[0] = 1.0 315 | 316 | # init recalls 317 | recalls = [] 318 | rec_ind = np.zeros(self.listlen, dtype=np.bool) 319 | for i in range(self.listlen): 320 | # recall from LTM 321 | # loop over not-recalled items to get likes 322 | p_recs = [] 323 | p_nks = [] 324 | recs = [] 325 | for nrec in np.where(~rec_ind)[0]: 326 | # calc the like for the list 327 | p_nrec, p_nk = self._recall_like(nrec, p_k, 328 | recalls=recalls[:i]) 329 | 330 | p_recs.append(p_nrec) 331 | p_nks.append(p_nk) 332 | recs.append(nrec) 333 | 334 | # append p_stop 335 | recs.append(-1) 336 | p_stop, p_nk = self._recall_like(-1, p_k, 337 | recalls=recalls[:i]) 338 | p_recs.append(p_stop) 339 | 340 | # normalize to fix approx 341 | p_recs = np.array(p_recs) 342 | p_recs = p_recs/p_recs.sum() 343 | #p_recs[:-1] = p_recs[:-1]*(1-p_recs[-1])/(p_recs[:-1]).sum() 344 | #cdfs = np.concatenate([np.cumsum(p_recs), [1.0]]) 345 | cdfs = np.cumsum(p_recs) 346 | 347 | # pick a recall at random 348 | ind = (cdfs > np.random.rand()).argmax() 349 | rec = recs[ind] 350 | recalls.append(rec) 351 | if rec < 0: 352 | # all done 353 | break 354 | 355 | # set the p_k 356 | p_k = p_nks[ind] 357 | rec_ind[rec] = True 358 | 359 | # do output encoding (item processing) 360 | self._present_item(rec+1, self.params['rho_ret'], alpha=0.0) 361 | 362 | # add one to the returns 363 | return np.atleast_1d(recalls)+1 364 | 365 | 366 | @njit 367 | def _tcm_sim_recs(M, items, t0, listlen, beta, rho, gamma, tau, lamb, Kmax): 368 | # do recalls 369 | recalls = np.zeros(listlen) 370 | rec_ind = np.zeros(listlen, dtype=np.bool_) 371 | k = 0 372 | for i in range(listlen): 373 | # get strength 374 | S = (np.dot(M, t0) + (gamma*t0))[1:listlen+1]**tau 375 | p_s = (S/S.sum()) 376 | p_r = (1-np.exp(-S)) 377 | samp_ind = rec_ind.copy() 378 | for l in range(Kmax): 379 | # sample an item 380 | ind = (np.cumsum(p_s) > np.random.rand()).argmax() 381 | 382 | if samp_ind[ind]: 383 | # we've already sampled it, so skip 384 | k += 1 385 | if k >= Kmax: 386 | break 387 | continue 388 | 389 | # we've sampled it now 390 | samp_ind[ind] = True 391 | 392 | # see if recover 393 | if np.random.rand() < p_r[ind]: 394 | # we recover it 395 | rec = ind 396 | recalls[i] = rec + 1 397 | rec_ind[rec] = True 398 | 399 | # update context and start over 400 | # pick the item 401 | f1 = items[rec+1] 402 | 403 | # calc the new t 404 | tIN = beta*f1 + (1-beta)*np.dot(f1, M) 405 | tIN /= np.sqrt((tIN**2).sum()) 406 | t1 = rho*t0 + tIN 407 | 408 | # reset the list context unit 409 | t1[-1] = lamb 410 | 411 | # make unit length (not including list context unit) 412 | t1[:-1] = t1[:-1]/np.sqrt((t1[:-1]**2).sum()) 413 | 414 | # replace new context 415 | t0 = t1 416 | 417 | break 418 | else: 419 | # we failed 420 | k += 1 421 | if k >= Kmax: 422 | break 423 | 424 | if k >= Kmax: 425 | # enough failures 426 | break 427 | 428 | return recalls 429 | 430 | 431 | 432 | if __name__ == "__main__": 433 | 434 | # set up items 435 | listlen = 16 436 | nlists = 1000 437 | 438 | params = { 439 | # assoc 440 | 'rho': .238, 441 | 'rho_dist': .070, 442 | 'rho_ret': None, 443 | 'beta': .965, 444 | 'phi': 1.201, 445 | 'gamma': .050, 446 | 'lambda': .349, 447 | 448 | # retrieval 449 | 'sigma_base': 0.0, 450 | 'sigma_exp': 11.647, 451 | 'tau': 1.306} 452 | 453 | tcm = TCM(listlen, params=params) 454 | 455 | recalls = tcm.simulate(nlists=nlists, 456 | list_def=range(1, listlen+1), 457 | list_type='DFR') 458 | 459 | #ll = [tcm.calc_list_like(recs) for recs in recalls] 460 | 461 | recs = np.zeros((len(recalls), listlen)) 462 | for i, r in enumerate(recalls): 463 | recs[i, :len(r)] = r 464 | -------------------------------------------------------------------------------- /notebooks/bsam.py: -------------------------------------------------------------------------------- 1 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 2 | # ex: set sts=4 ts=4 sw=4 et: 3 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 4 | # 5 | # See the COPYING file distributed along with the CogMod package for the 6 | # copyright and license terms. 7 | # 8 | ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 9 | 10 | import numpy as np 11 | from scipy.stats.distributions import norm, truncnorm 12 | 13 | from samprec import _calc_p_attempts 14 | 15 | 16 | def _trunc_norm(mean=0.0, std=1.0, lower=0.0, upper=1.0): 17 | """Wrapper for truncated normal.""" 18 | a = (np.array(lower) - np.array(mean)) / np.array(std) 19 | b = (np.array(upper) - np.array(mean)) / np.array(std) 20 | return truncnorm(a, b, loc=mean, scale=std) 21 | 22 | 23 | class SAMWrap(object): 24 | """Wrapper for SAM to handle range of r values.""" 25 | def __init__(self, n_items=16, rmin=1, rmax=None, 26 | params=None, scale_thresh=0.00001): 27 | # make sure params is at least empty dict 28 | if params is None: 29 | params = {} 30 | 31 | # handle the r min and max 32 | self.rmin = rmin 33 | if rmax is None: 34 | # set to list length 35 | rmax = n_items 36 | self.rmax = rmax 37 | 38 | # pop off the r params (defaults from fSAM) 39 | self.r_mu = params.get('r_mu', 4.0) 40 | self.r_std = params.get('r_std', 1.4) 41 | 42 | # determine probabilities of each r in range 43 | rd = _trunc_norm(mean=self.r_mu, 44 | std=self.r_std, 45 | lower=self.rmin-0.5, 46 | upper=self.rmax+0.5) 47 | self.rvals = np.arange(self.rmin, self.rmax+1) 48 | self.p_r = np.array([rd.cdf(i+0.5)-rd.cdf(i-0.5) 49 | for i in self.rvals]) 50 | self.cdf_r = np.cumsum(self.p_r) 51 | 52 | # create SAM instances for each rval 53 | self.sams = [] 54 | for r in self.rvals: 55 | # set the params 56 | params['r'] = r 57 | self.sams.append(SAM(n_items=n_items, params=params, 58 | scale_thresh=scale_thresh)) 59 | 60 | def reset(self): 61 | # loop over each SAM instance 62 | for i in range(len(self.sams)): 63 | self.sams[i].reset() 64 | 65 | 66 | def present_list(self, list_type='IFR', list_def=None): 67 | # loop over each SAM instance 68 | for i in range(len(self.sams)): 69 | self.sams[i].present_list(list_type=list_type, 70 | list_def=list_def) 71 | 72 | def calc_list_like(self, recalls): 73 | # loop over each SAM instance 74 | avg_likes = None 75 | for i in range(len(self.sams)): 76 | # get the likes for each recall 77 | likes = self.sams[i].calc_list_like(recalls) 78 | 79 | # scale them by p_r 80 | likes = np.array(likes) * self.p_r[i] 81 | 82 | # sum over all r 83 | if avg_likes is None: 84 | avg_likes = likes 85 | else: 86 | avg_likes += likes 87 | 88 | return avg_likes 89 | 90 | def simulate(self, nlists, list_type='IFR', list_def=None): 91 | # reset the model 92 | self.reset() 93 | 94 | # present the list 95 | self.present_list(list_type=list_type, list_def=list_def) 96 | 97 | # simulate lists 98 | recs = [self.sim_list() for i in range(nlists)] 99 | 100 | return recs 101 | 102 | def sim_list(self): 103 | # pick r from dist at random 104 | ind = (self.cdf_r > np.random.rand()).argmax() 105 | return self.sams[ind].sim_list() 106 | 107 | 108 | class SAM(object): 109 | """Bayesian Seach of Associative Memory model""" 110 | default_params = { 111 | 'a': 0.3, 112 | 'b1': 0.5, 113 | 'b2': None, 114 | 'c': 0.7, 115 | 'd': 0.01, 116 | 'e': 0.1, 117 | 'f1': 0.3, 118 | 'f2': None, 119 | 'g': 0.1, 120 | 'r': 4, 121 | 'r_dist': 3, 122 | 'Kmax': 30, 123 | 'Lmax': 4, 124 | } 125 | 126 | def __init__(self, n_items=16, params=None, scale_thresh=0.00001): 127 | # process the params 128 | self.n_items = n_items 129 | self.scale_thresh = scale_thresh 130 | 131 | # start with defaults 132 | p = dict(**self.default_params) 133 | if params is not None: 134 | # get provided vals 135 | p.update(params) 136 | self.params = p 137 | 138 | # check the possible None 139 | if self.params['b2'] is None: 140 | self.params['b2'] = self.params['b1'] / 2 141 | 142 | if self.params['f2'] is None: 143 | self.params['f2'] = self.params['f1'] / 2 144 | 145 | # set up the model 146 | self.reset() 147 | 148 | def reset(self): 149 | # init memory representations 150 | self.M = np.zeros((self.n_items, self.n_items)) 151 | self.buffer = np.zeros(self.n_items) 152 | self.C = np.zeros(self.n_items) 153 | self.cur_r = 0 154 | 155 | def present_list(self, list_type='IFR', list_def=None): 156 | """Present a list to the model. 157 | 158 | Parameters 159 | ---------- 160 | list_def: list of item_ids (currently ignored) 161 | list_type: {'IFR','DFR','CDFR'} 162 | """ 163 | #if list_def is None: 164 | # # make based on nitems 165 | # list_def = range(1, self.listlen+1) 166 | 167 | for i in range(self.n_items): 168 | if i>0 and list_type[0].upper() == 'C': # 'CDFR': 169 | # remove items from buffer for pre-item distractor 170 | for d in range(self.params['r_dist']): 171 | if self.cur_r > 0: 172 | self.buffer *= 1-(1/self.cur_r) 173 | self.cur_r -= 1 174 | else: 175 | break 176 | 177 | # make room in the buffer 178 | if self.cur_r >= self.params['r']: 179 | # we have a full buffer, so must decay 180 | # this does equal prob dropout 181 | self.buffer *= 1-(1/self.params['r']) 182 | self.cur_r -= 1 183 | 184 | # add it to the buffer 185 | self.buffer[i] = 1.0 186 | self.cur_r += 1 187 | 188 | # store the item-to-item associations 189 | self.M += np.outer(self.buffer, self.buffer) 190 | 191 | # store context to item associations 192 | self.C += self.params['a']*self.buffer 193 | 194 | # process the post-list distractor 195 | if list_type[0].upper() in ['C', 'D']: # ['CDFR', 'DFR'] 196 | for d in range(self.params['r_dist']): 197 | if self.cur_r > 0: 198 | self.buffer *= 1-(1/self.cur_r) 199 | self.cur_r -= 1 200 | else: 201 | break 202 | 203 | # apply scaling based on learning params 204 | L = np.diag(np.ones(self.n_items)*self.params['c']) 205 | L[np.triu_indices(self.n_items, 1)] = self.params['b1'] 206 | L[np.tril_indices(self.n_items, -1)] = self.params['b2'] 207 | self.M *= L 208 | 209 | # add in baseline memory 210 | # it may be that this should be added to all values 211 | self.M[self.M 0.0: 259 | not_one = self.buffer < 1.0 260 | self.buffer[~rec_ind & not_one] *= 1 - \ 261 | (adjust_amt/self.buffer[~rec_ind & not_one].sum()) 262 | 263 | # process it and continue 264 | # buffer doesn't update p_k 265 | likes.append(p_rec) 266 | 267 | # do output encoding 268 | # increment context to item 269 | self.C[rec] += self.params['e'] 270 | 271 | # self to self 272 | self.M[rec, rec] += self.params['g'] 273 | 274 | # set last_rec (so last recall from buffer is used as cue) 275 | last_rec = rec 276 | continue 277 | 278 | if rec < 0: 279 | # they stopped, so calc p_stop 280 | # first calc p_rec for all non-recalled items 281 | p_nrecs = 0.0 282 | for nrec in np.where(~rec_ind)[0]: 283 | # calc the like for the list 284 | p_nrec, p_nk = self._recall_like(nrec, p_k, 285 | last_rec=last_rec, 286 | recalls=recalls[:i]) 287 | p_nrecs += p_nrec 288 | 289 | # p_stopping is not the sum of retrieving non-recalled items 290 | likes.append(1 - np.sum(p_nrecs)) 291 | 292 | # calc the other way 293 | #p_stop, p_nk = self._recall_like(rec, p_k, 294 | # recalls=recalls[:i]) 295 | #likes.append(p_stop) 296 | 297 | # we're done recalling 298 | break 299 | 300 | # retrieve from LTM 301 | # calc the like for the list 302 | p_rec, p_k = self._recall_like(rec, p_k, 303 | last_rec=last_rec, 304 | recalls=recalls[:i]) 305 | 306 | # append the new rec like 307 | likes.append(p_rec) 308 | 309 | # do output encoding 310 | # increment context to item 311 | self.C[rec] += self.params['e'] 312 | 313 | # item to item (asymmetric, but based on order of recall, not serial position) 314 | self.M[last_rec, rec] += self.params['f1'] 315 | self.M[rec, last_rec] += self.params['f2'] 316 | 317 | # self to self 318 | self.M[rec, rec] += self.params['g'] 319 | 320 | # save last rec 321 | last_rec = rec 322 | return likes 323 | 324 | def _recall_like(self, rec, p_k, last_rec=None, recalls=None): 325 | if recalls is None: 326 | recalls = [] 327 | 328 | # see if going to make item and context attempts 329 | if last_rec and (self.params['Lmax']>0): 330 | # sample some with items and context 331 | S = self.C.copy() + self.M[last_rec] 332 | p_ci_att, p_last = _calc_p_attempts(rec, S, 333 | attempts=self.params['Lmax'], 334 | recalls=recalls, 335 | scale_thresh=self.scale_thresh) 336 | context_att = self.params['Kmax']-self.params['Lmax'] 337 | else: 338 | context_att = self.params['Kmax'] 339 | p_ci_att = [] 340 | p_last = 1.0 341 | 342 | # attempts with just context 343 | if context_att > 0: 344 | S = self.C.copy() 345 | p_c_att, p_last = _calc_p_attempts(rec, S, 346 | attempts=context_att, 347 | recalls=recalls, 348 | p_start=p_last, 349 | scale_thresh=self.scale_thresh) 350 | 351 | # concat all attempts 352 | p_att = np.concatenate([p_ci_att, p_c_att]) 353 | else: 354 | # just go with ci attempts 355 | p_att = p_ci_att 356 | 357 | # do weighted combo of attempts 358 | new_p_k = np.zeros(self.params['Kmax']) 359 | if rec < 0: 360 | # adjust p_att 361 | p_att = np.concatenate([p_att[1:], [p_last]]) 362 | 363 | # we're testing stopping, so just pick end values 364 | for k, p in enumerate(p_k): 365 | new_p_k[k] += \ 366 | p * p_att[self.params['Kmax']-k-1] 367 | else: 368 | # do weighted combo of all different p_k 369 | for k, p in enumerate(p_k): 370 | new_p_k[k:self.params['Kmax']] += \ 371 | p * p_att[:self.params['Kmax']-k] 372 | 373 | # p_rec is just sum over new_p_k 374 | p_rec = new_p_k.sum() 375 | 376 | # set new p_k 377 | if rec < 0: 378 | # we've stopped, so we have maxed out k 379 | p_k = np.zeros(self.params['Kmax']) 380 | else: 381 | # normalize new p_k for next iteration 382 | p_k = new_p_k/new_p_k.sum() 383 | 384 | return p_rec, p_k 385 | 386 | def simulate(self, nlists, list_type='IFR', list_def=None): 387 | # reset the model 388 | self.reset() 389 | 390 | # present the list 391 | self.present_list(list_type=list_type, list_def=list_def) 392 | 393 | # simulate lists 394 | recs = [self.sim_list() for i in range(nlists)] 395 | 396 | return recs 397 | 398 | def sim_list(self): 399 | # save starting C and M (do be restored after simulation) 400 | self.M = self.M_save.copy() 401 | self.C = self.C_save.copy() 402 | self.buffer = self.buffer_save.copy() 403 | 404 | # init the loop over items 405 | last_rec = None 406 | 407 | # var to save p_k 408 | p_k = np.zeros(self.params['Kmax']) 409 | 410 | # start with k with p(1.0) at zero 411 | p_k[0] = 1.0 412 | 413 | # init recalls 414 | recalls = [] 415 | rec_ind = np.zeros(self.n_items, dtype=np.bool) 416 | for i in range(self.n_items): 417 | # first recall with buffer 418 | if i < self.cur_r: 419 | # pull from non-recalled buffer items 420 | # base on probability of being in the buffer 421 | p_recs = self.buffer[~rec_ind]/self.buffer[~rec_ind].sum() 422 | cdfs = np.cumsum(p_recs) 423 | 424 | # pick one at random 425 | ind = (cdfs > np.random.rand()).argmax() 426 | rec = np.where(~rec_ind)[0][ind] 427 | recalls.append(rec) 428 | 429 | # set the last_rec 430 | last_rec = rec 431 | rec_ind[rec] = True 432 | 433 | # remove that item from the buffer and renormalize 434 | adjust_amt = 1-self.buffer[rec] 435 | self.buffer[rec] = 0.0 436 | if adjust_amt > 0: 437 | not_one = self.buffer < 1.0 438 | self.buffer[~rec_ind & not_one] *= 1 - \ 439 | (adjust_amt/self.buffer[~rec_ind & not_one].sum()) 440 | 441 | # do output encoding 442 | # increment context to item 443 | self.C[rec] += self.params['e'] 444 | 445 | # self to self 446 | self.M[rec, rec] += self.params['g'] 447 | 448 | # keep going 449 | continue 450 | 451 | # recall from LTM 452 | # loop over not-recalled items to get likes 453 | p_recs = [] 454 | p_nks = [] 455 | recs = [] 456 | for nrec in np.where(~rec_ind)[0]: 457 | # calc the like for the list 458 | p_nrec, p_nk = self._recall_like(nrec, p_k, 459 | last_rec=last_rec, 460 | recalls=recalls[:i]) 461 | 462 | p_recs.append(p_nrec) 463 | p_nks.append(p_nk) 464 | recs.append(nrec) 465 | 466 | # append p_stop 467 | recs.append(-1) 468 | cdfs = np.concatenate([np.cumsum(p_recs), [1.0]]) 469 | 470 | #p_stop, p_nk = self._recall_like(-1, p_k, 471 | # recalls=recalls[:i]) 472 | #p_recs.append(p_stop) 473 | 474 | # normalize to fix approx 475 | #p_recs = np.array(p_recs) 476 | #p_recs = p_recs/p_recs.sum() 477 | #cdfs = np.cumsum(p_recs) 478 | #cdfs = np.concatenate([np.cumsum(p_recs), [1.0]]) 479 | 480 | # pick a recall at random 481 | ind = (cdfs > np.random.rand()).argmax() 482 | rec = recs[ind] 483 | recalls.append(rec) 484 | if rec < 0: 485 | # all done 486 | break 487 | 488 | # set the last_rec and p_k 489 | last_rec = rec 490 | p_k = p_nks[ind] 491 | rec_ind[rec] = True 492 | 493 | # do output encoding 494 | # increment context to item 495 | self.C[rec] += self.params['e'] 496 | 497 | # item to item (asymmetric, but based on order of recall, not serial position) 498 | self.M[last_rec, rec] += self.params['f1'] 499 | self.M[rec, last_rec] += self.params['f2'] 500 | 501 | # self to self 502 | self.M[rec, rec] += self.params['g'] 503 | 504 | # add one to the returns 505 | return np.atleast_1d(recalls)+1 506 | 507 | 508 | 509 | -------------------------------------------------------------------------------- /notebooks/01_Introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "slideshow": { 7 | "slide_type": "slide" 8 | } 9 | }, 10 | "source": [ 11 | "# Introduction to Quantified Cognition\n", 12 | "## Psychology 5332 --- Fall 2022\n", 13 | "\n", 14 | "By: Per B. Sederberg, PhD\n", 15 | "\n", 16 | "![](http://compmem.org/assets/img/cmlab_logo.jpg)\n", 17 | "\n", 18 | "\n", 19 | "\"Open\n", 20 | "\n" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": { 26 | "slideshow": { 27 | "slide_type": "slide" 28 | } 29 | }, 30 | "source": [ 31 | "## Quick Reference\n", 32 | "\n", 33 | "\n", 34 | "- *Credit*: 3 units\n", 35 | "\n", 36 | "- *Time*: Thursday, 14:00 -- 16:30\n", 37 | "\n", 38 | "- *Place*: Gilmer 245\n", 39 | "\n", 40 | "- *Text*: Assigned readings\n", 41 | "\n", 42 | "- *Course Web Page*: GitHub (https://github.com/compmem/QuantCog)\n", 43 | "\n", 44 | "- *Instructor*: Dr. Per Sederberg\n", 45 | "\n", 46 | "- *Office*: Gilmer 412\n", 47 | "\n", 48 | "- *E-mail*: pbs5u@virginia.edu (but use Discord whenever possible)\n", 49 | "\n", 50 | "- *TA*: Adam Fenton (af5fn@virginia.edu)\n", 51 | "\n", 52 | "- *Lab Website*: Computational Memory Lab (https://compmem.org)\n", 53 | "\n", 54 | "- *Office hours*: TBA\n", 55 | "\n", 56 | "- *Final*: Project-based\n" 57 | ] 58 | }, 59 | { 60 | "cell_type": "markdown", 61 | "metadata": { 62 | "slideshow": { 63 | "slide_type": "slide" 64 | } 65 | }, 66 | "source": [ 67 | "## Why are we here?\n", 68 | "\n", 69 | "Much of science, and especially psychology and neuroscience, involves testing and updating verbal theories, which are often imprecise and under-specified. \n", 70 | "\n", 71 | "Only by quantitative modeling of our experimental results can we hope to make significant progress in understanding the mechanisms that underlie cognition. \n", 72 | "\n", 73 | "Furthermore, only by quantifying and defining knowledge via mathematical principles can we achieve the effective interdisciplinary communication necessary for combining approaches to make useful progress towards understanding a system as complex as the brain. " 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": { 79 | "slideshow": { 80 | "slide_type": "slide" 81 | } 82 | }, 83 | "source": [ 84 | "## Course Aims\n", 85 | "\n", 86 | "Equip students with the skills to:\n", 87 | "\n", 88 | "a. think more deeply about the mechanisms underlying observed neural and behavioral phenomena, improving scientific thinking, and\n", 89 | "\n", 90 | "b. develop computational models that enable more precise, quantifiably testable, hypotheses, improving the scientific process. \n", 91 | "\n", 92 | "While the course will be taught at a high level, the goal is to provide practical hands-on examples for every topic, to lay the foundation necessary to understand, develop, and compare mechanistic models of cognition." 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": { 98 | "slideshow": { 99 | "slide_type": "slide" 100 | } 101 | }, 102 | "source": [ 103 | "## Computing Requirements\n", 104 | "\n", 105 | "- This is a computational class and all work will be performed on a computer, and almost entirely with the Python programming language within Jupyter notebooks. \n", 106 | " - You will need to bring a laptop running Windows, OSX, or Linux to every class. \n", 107 | "\n", 108 | "- You will run the [Jupyter](https://jupyter.org) notebooks directly on your computer. This will also allow you to incorporate these approaches into your own research more easily. \n", 109 | " - Thus, my recommendation is that you install and use the [Anaconda Python](https://www.anaconda.com/) distribution for your OS. \n", 110 | "\n", 111 | "- We will spend time on the first day of class to ensure everyone has a functioning computer that will be able to run everything necessary for the course.\n" 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": { 117 | "slideshow": { 118 | "slide_type": "slide" 119 | } 120 | }, 121 | "source": [ 122 | "## Schedule\n", 123 | "\n", 124 | "The following is the general order of the topics covered in the course. Please note that sometimes we may cover multiple topics in a single lecture, or spend more than one lecture on a single topic, and this list is subject to modification at any time. These will be replaced by links to specific lectures as they are finalized.\n", 125 | "\n", 126 | "0. Overview\n", 127 | "1. Programming and Principles of Open Science\n", 128 | "2. Probability\n", 129 | "3. Data as a Random Variable\n", 130 | "4. Quantifying Uncertainty\n", 131 | "5. Bayesian Regression\n", 132 | "6. Bayesian Data Analysis\n", 133 | "7. Cognitive Process Models of Decision-making\n", 134 | "8. Interactive Model Exploration\n", 135 | "9. Bayesian Fits of Process Models\n", 136 | "10. Inference with Process Models\n", 137 | "11. Models of Memory\n", 138 | "12. Models of Reinforcement Learning\n", 139 | "13. Bring Your Own Data (BYOD) Project\n" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": { 145 | "slideshow": { 146 | "slide_type": "slide" 147 | } 148 | }, 149 | "source": [ 150 | "\n", 151 | "## Evaluation\n", 152 | "\n", 153 | "This is a graduate-level course, which means that much of the burden of staying motivated to learn is transferred to the student. As such, there will not be any in-class exams. Students will be evaluated on the basis of:\n", 154 | "\n", 155 | "- Lesson exercises / class participation (30 pts)\n", 156 | "- Mini projects (30 pts)\n", 157 | "- Final project (40 pts)\n", 158 | "\n", 159 | "for a total of 100 points. \n", 160 | "\n", 161 | "The course will be graded using the standard grading scale with your percentage of points earned out of the total possible points rounding to the nearest whole percentage point.\n" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": { 167 | "collapsed": true, 168 | "slideshow": { 169 | "slide_type": "slide" 170 | } 171 | }, 172 | "source": [ 173 | "## GitHub\n", 174 | "\n", 175 | "All course material will be available on a public [GitHub](https://github.com/compmem/QuantCog) repository. \n", 176 | "\n", 177 | "There you will find the current version of the syllabus, all Jupyter Notebooks for the lessons, and any associated readings.\n", 178 | "\n", 179 | "I encourage you all to sign up for a GitHub account and make use of [Git](https://git-scm.com/) in your research.\n", 180 | "\n", 181 | "If you are interested, the [Software Carpentry course on git](http://swcarpentry.github.io/git-novice/) provides a fantastic introduction to version control with Git. \n", 182 | "\n" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "metadata": { 188 | "slideshow": { 189 | "slide_type": "slide" 190 | } 191 | }, 192 | "source": [ 193 | "## Communications\n", 194 | "\n", 195 | "We will use [Discord](https://discord.com/) for all class communication and discussions. \n", 196 | "\n", 197 | "Please do not email me unless there is an issue with Discord. If you'd prefer to have a one-on-one discussion, it is possible to send direct messages in Discord.\n", 198 | "\n", 199 | "There will also be traditional office hours for in-person conversations.\n", 200 | "\n", 201 | "***Look for an email after class inviting you to the Discord Server for the course!***\n" 202 | ] 203 | }, 204 | { 205 | "cell_type": "markdown", 206 | "metadata": { 207 | "slideshow": { 208 | "slide_type": "slide" 209 | } 210 | }, 211 | "source": [ 212 | "# Let's get started!" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "metadata": { 218 | "slideshow": { 219 | "slide_type": "slide" 220 | } 221 | }, 222 | "source": [ 223 | "## Python for Data Science\n", 224 | "\n", 225 | "You should complete the introductory Python modules at [Software Carpentry](http://swcarpentry.github.io/python-novice-inflammation/) as a *homework* assignment. \n", 226 | "\n", 227 | "This is a hands-on tutorial that introduces a slew of concepts for Python in Data Science. \n", 228 | "\n", 229 | "### A great book\n", 230 | "\n", 231 | "Most of the [*Python Data Science Handbook*](https://jakevdp.github.io/PythonDataScienceHandbook/) by Jake VanderPlas is excellent and relevant for this course, especially the first *four* chapters.\n", 232 | "\n", 233 | "### An entire course\n", 234 | "\n", 235 | "Finally, if you are feeling extremely ambitious, there is an entire set of [Python for Data Science course material](https://data.berkeley.edu/education/courses/data-8) from the University of California at Berkeley." 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": { 241 | "slideshow": { 242 | "slide_type": "slide" 243 | } 244 | }, 245 | "source": [ 246 | "## Jupyter Notebooks\n", 247 | "\n", 248 | "The core computational and interactive infrastructure for the entire class will be [Jupyter](https://jupyter.org) notebooks. They provide an interactive way of interspersing code, text, and graphics, akin to a dynamic electronic lab notebook. \n", 249 | "\n", 250 | "Jupyter notebooks are *VERY* powerful, and have many useful extensions (for example, this presentation is an live and editable rendering of a Jupyter notebook!)" 251 | ] 252 | }, 253 | { 254 | "cell_type": "markdown", 255 | "metadata": {}, 256 | "source": [ 257 | "### A quick tour of Jupyter notebook features...\n", 258 | "\n", 259 | "- Client/Server architecture\n", 260 | "- Cells of code/text\n", 261 | "- HTML-based front-end\n", 262 | "- Cell output can be tables and (interactive) graphics" 263 | ] 264 | }, 265 | { 266 | "cell_type": "markdown", 267 | "metadata": { 268 | "slideshow": { 269 | "slide_type": "slide" 270 | } 271 | }, 272 | "source": [ 273 | "Markdown cells can have fancy formatting:\n", 274 | "\n", 275 | "- Headings with #, ##, ###, etc...\n", 276 | "- *italics*\n", 277 | "- **bold**\n", 278 | "- Inline equations like $t_i = \\rho t_{i-1} + (1 - \\rho)f_i$\n", 279 | "- Or a full equation on its on line:\n", 280 | "\n", 281 | " $$\\frac{dx}{dt} = \\frac{(\\rho - \\kappa x)}{\\tau}$$\n", 282 | "- Images:\n", 283 | " ![](https://jupyter.org/assets/main-logo.svg)\n", 284 | "- Tables:\n", 285 | "\n", 286 | "| This | is |\n", 287 | "|------|------|\n", 288 | "| a | table|" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 1, 294 | "metadata": { 295 | "slideshow": { 296 | "slide_type": "slide" 297 | } 298 | }, 299 | "outputs": [], 300 | "source": [ 301 | "# Best practice is to put imports and settings at the top\n", 302 | "\n", 303 | "# load matplotlib in inline mode (you can use notebook if you are running local)\n", 304 | "%matplotlib inline\n", 305 | "\n", 306 | "# import some useful libraries\n", 307 | "import numpy as np # numerical analysis linear algebra\n", 308 | "import pandas as pd # efficient tables\n", 309 | "import matplotlib.pyplot as plt # plotting\n", 310 | "import ipywidgets as widgets # interactive widgets\n" 311 | ] 312 | }, 313 | { 314 | "cell_type": "markdown", 315 | "metadata": { 316 | "slideshow": { 317 | "slide_type": "slide" 318 | } 319 | }, 320 | "source": [ 321 | "## Other useful libraries\n", 322 | "\n", 323 | "- [SciPy](https://www.scipy.org): Wide range of tools scientific computing\n", 324 | "- [StatsModels](https://www.statsmodels.org/stable/index.html): Many statistics\n", 325 | "- [NiLearn](https://nilearn.github.io): Machine learning of neuroimaging data\n", 326 | "- [Numba](http://numba.pydata.org): Just-in-time compiler to speed up Python\n", 327 | "- [PlotNine](https://plotnine.readthedocs.io/en/stable/): Port of ggplot2 to Python\n", 328 | "- [Seaborn](https://seaborn.pydata.org): Statistical data visualization" 329 | ] 330 | }, 331 | { 332 | "cell_type": "markdown", 333 | "metadata": { 334 | "slideshow": { 335 | "slide_type": "slide" 336 | } 337 | }, 338 | "source": [ 339 | "## Some libraries from my lab\n", 340 | "\n", 341 | "- [SMILE](https://github.com/compmem/SMILE): A library for writing Psychology/Neuroscience experiments in Python.\n", 342 | "- [RunDEMC](https://github.com/compmem/RunDEMC): Library for running DEMC on hierarchical models.\n", 343 | "- [PTSA](https://github.com/compmem/ptsa): Short for Python Time Series Analysis, this library aids in analysis of EEG and other forms of neural data represented as time series." 344 | ] 345 | }, 346 | { 347 | "cell_type": "code", 348 | "execution_count": 2, 349 | "metadata": { 350 | "slideshow": { 351 | "slide_type": "slide" 352 | } 353 | }, 354 | "outputs": [], 355 | "source": [ 356 | "# get some random data\n", 357 | "dat = np.random.randn(1000, 10)" 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": 3, 363 | "metadata": { 364 | "slideshow": { 365 | "slide_type": "slide" 366 | } 367 | }, 368 | "outputs": [ 369 | { 370 | "name": "stdout", 371 | "output_type": "stream", 372 | "text": [ 373 | "2.18 ms ± 6.43 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" 374 | ] 375 | } 376 | ], 377 | "source": [ 378 | "%%timeit\n", 379 | "# calculate the mean of each column\n", 380 | "mdat = []\n", 381 | "for i in range(len(dat)):\n", 382 | " mdat.append(sum(dat[i])/len(dat[i]))\n" 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": 7, 388 | "metadata": { 389 | "slideshow": { 390 | "slide_type": "slide" 391 | } 392 | }, 393 | "outputs": [ 394 | { 395 | "name": "stdout", 396 | "output_type": "stream", 397 | "text": [ 398 | "24.2 µs ± 1.01 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n" 399 | ] 400 | } 401 | ], 402 | "source": [ 403 | "%%timeit\n", 404 | "# numpy can help us do things like this much faster\n", 405 | "mdat = dat.mean(1)" 406 | ] 407 | }, 408 | { 409 | "cell_type": "code", 410 | "execution_count": 10, 411 | "metadata": { 412 | "slideshow": { 413 | "slide_type": "slide" 414 | } 415 | }, 416 | "outputs": [ 417 | { 418 | "data": { 419 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD4CAYAAAAXUaZHAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAPiUlEQVR4nO3df6jdd33H8efLrGvFH9jS2xKTsPSPbNgWrXDNOvqPs85mWho36IhMiVsg/0RWQbDNhImMQEEQB66MYIsZVruASkNl0xgtMqi2N1q1ado1WNdeEpqrm6gMOhLf++N+O06Tc3NP7jkn59zPfT7gcr7fz/l+v+ed5N7X/eRzPt/PSVUhSWrLayZdgCRp9Ax3SWqQ4S5JDTLcJalBhrskNeh3Jl0AwNVXX12bN2+edBmStKocPXr051U10++5qQj3zZs3Mzc3N+kyJGlVSfKfSz3nsIwkNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDVoKu5QlYax6wtPnNd2/4ffMYFKpOlhz12SGmS4S1KDDHdJapDhLkkNMtwlqUEDhXuSnyX5SZInk8x1bVclOZzkue7xyp7j9yY5keTZJLeNq3hJUn8X03P/46q6qapmu/17gCNVtQU40u2T5HpgB3ADsA24L8m6EdYsSVrGMPPctwPv7LYPAI8Cd3ftD1XVy8DzSU4AW4HHhngt6aI4911r3aA99wK+meRokt1d27VVdQqge7yma98AvNhz7nzX9ipJdieZSzK3sLCwsuolSX0N2nO/papOJrkGOJzkmQscmz5tdV5D1X5gP8Ds7Ox5z0uXir18tWignntVneweTwNfY3GY5aUk6wG6x9Pd4fPApp7TNwInR1WwJGl5y4Z7ktclecMr28B7gKeAQ8DO7rCdwMPd9iFgR5LLk1wHbAEeH3XhkqSlDTIscy3wtSSvHP+lqvq3JE8AB5PsAl4A7gSoqmNJDgJPA2eAPVV1dizVS5L6Wjbcq+qnwNv6tP8CuHWJc/YB+4auThqhfmPrUqtc8lerhuEsDc5wlwa01C8XZ9ZoGrm2jCQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGufyA1Ifr2Gi1s+cuSQ0y3CWpQYa7JDXIMXdNJce8peHYc5ekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIJcfkMag3/IJ93/4HROoRGuVPXdJapDhLkkNGjjck6xL8sMkj3T7VyU5nOS57vHKnmP3JjmR5Nkkt42jcEnS0i6m534XcLxn/x7gSFVtAY50+yS5HtgB3ABsA+5Lsm405UqSBjFQuCfZCLwP+HxP83bgQLd9AHh/T/tDVfVyVT0PnAC2jqRaSdJABu25fxb4OPDbnrZrq+oUQPd4Tde+AXix57j5ru1VkuxOMpdkbmFh4WLrliRdwLLhnuR24HRVHR3wmunTVuc1VO2vqtmqmp2ZmRnw0pKkQQwyz/0W4I4k7wWuAN6Y5IvAS0nWV9WpJOuB093x88CmnvM3AidHWbTa4kfqSaO3bM+9qvZW1caq2sziG6XfrqoPAoeAnd1hO4GHu+1DwI4klye5DtgCPD7yyiVJSxrmDtV7gYNJdgEvAHcCVNWxJAeBp4EzwJ6qOjt0pVr17KFLl06qzhsOv+RmZ2drbm5u0mVohAzy87n8gEYtydGqmu33nHeoSlKDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1KBhPqxD0gj0W/vetd81LHvuktQge+7SJeKnU+lSsucuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapA3MUlTyCUJNCx77pLUIMNdkhpkuEtSgwx3SWrQsuGe5Iokjyf5UZJjST7VtV+V5HCS57rHK3vO2ZvkRJJnk9w2zj+AJOl8g/TcXwbeVVVvA24CtiW5GbgHOFJVW4Aj3T5Jrgd2ADcA24D7kqwbQ+2SpCUsG+616Dfd7mXdVwHbgQNd+wHg/d32duChqnq5qp4HTgBbR1m0JOnCBhpzT7IuyZPAaeBwVX0fuLaqTgF0j9d0h28AXuw5fb5rO/eau5PMJZlbWFgY4o8gSTrXQOFeVWer6iZgI7A1yY0XODz9LtHnmvuraraqZmdmZgYqVpI0mIuaLVNVvwQeZXEs/aUk6wG6x9PdYfPApp7TNgInhy1UkjS4QWbLzCR5U7f9WuDdwDPAIWBnd9hO4OFu+xCwI8nlSa4DtgCPj7huSdIFDLK2zHrgQDfj5TXAwap6JMljwMEku4AXgDsBqupYkoPA08AZYE9VnR1P+ZKkfpYN96r6MfD2Pu2/AG5d4px9wL6hq5MkrYh3qEpSgwx3SWqQ4S5JDTLcJalBhrskNciP2ZNWiX4fvQd+/J76s+cuSQ2y566hLdWjlDQ59twlqUGGuyQ1yGEZDczhF2n1sOcuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUEuP6C+XGpAWt3suUtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGORVSWuX6TVu9/8PvmEAlmib23CWpQcuGe5JNSb6T5HiSY0nu6tqvSnI4yXPd45U95+xNciLJs0luG+cfQJJ0vkF67meAj1XVW4CbgT1JrgfuAY5U1RbgSLdP99wO4AZgG3BfknXjKF6S1N+y4V5Vp6rqB932r4HjwAZgO3CgO+wA8P5uezvwUFW9XFXPAyeArSOuW5J0ARf1hmqSzcDbge8D11bVKVj8BZDkmu6wDcD3ek6b79okXSK+yaqB31BN8nrgK8BHq+pXFzq0T1v1ud7uJHNJ5hYWFgYtQ5I0gIHCPcllLAb7g1X11a75pSTru+fXA6e79nlgU8/pG4GT516zqvZX1WxVzc7MzKy0fklSH4PMlglwP3C8qj7T89QhYGe3vRN4uKd9R5LLk1wHbAEeH13JkqTlDDLmfgvwIeAnSZ7s2v4WuBc4mGQX8AJwJ0BVHUtyEHiaxZk2e6rq7KgLlyQtbdlwr6p/p/84OsCtS5yzD9g3RF2SpCF4h6okNchwl6QGGe6S1CDDXZIaZLhLUoNcz119b1WXtLrZc5ekBhnuktQgw12SGuSY+xri2Lq0dthzl6QGGe6S1CCHZaQ1YqlhOT+hqU323CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDnArZKO9GldY2e+6S1CDDXZIaZLhLUoMMd0lqkOEuSQ1ytoy0xvWbWeViYqufPXdJapDhLkkNMtwlqUGGuyQ1yHCXpAYtO1smyQPA7cDpqrqxa7sK+BdgM/Az4C+q6r+75/YCu4CzwN9U1TfGUvka5MekSRrUIFMhvwB8DvjnnrZ7gCNVdW+Se7r9u5NcD+wAbgDeDHwrye9X1dnRlq1eLhIm6VzLhntVfTfJ5nOatwPv7LYPAI8Cd3ftD1XVy8DzSU4AW4HHRlSvpEvAue+r30rH3K+tqlMA3eM1XfsG4MWe4+a7tvMk2Z1kLsncwsLCCsuQJPUz6jdU06et+h1YVfuraraqZmdmZkZchiStbSsN95eSrAfoHk937fPApp7jNgInV16eJGklVhruh4Cd3fZO4OGe9h1JLk9yHbAFeHy4EiVJF2uQqZBfZvHN06uTzAOfBO4FDibZBbwA3AlQVceSHASeBs4Ae5wpI0mX3iCzZT6wxFO3LnH8PmDfMEVJkobjHaqS1CDDXZIaZLhLUoMMd0lqkB+zJ2kgLly3uthzl6QGGe6S1CDDXZIa5Jj7lHKNdknDsOcuSQ2y5y5pKH6wx3Qy3CWNnIE/eYb7FHB8XdKoOeYuSQ0y3CWpQQ7LXEIOv0i6VOy5S1KDDHdJapDDMpIuCVeVvLTsuUtSgwx3SWqQwzKSJsq7WcfDnrskNchwl6QGOSwzJt6wJK2cM2uGZ7iPgEEuado4LCNJDbLnfhHsoUvTx9k2/RnuSzDIJa1mhrukVcNO1+AMd/yGkdaKtTSEM7ZwT7IN+AdgHfD5qrp3XK/Vj4EtrV3+/I8p3JOsA/4R+BNgHngiyaGqenocr+c/pKSVarU3P66e+1bgRFX9FCDJQ8B2YCzhLkmjdCk7jOP6RTKucN8AvNizPw/8Ye8BSXYDu7vd3yR5dky1XMjVwM8n8LoXyzpHa7XUCaunVutcoQf+qm/zoHX+3lJPjCvc06etXrVTtR/YP6bXH0iSuaqanWQNg7DO0VotdcLqqdU6R2sUdY7rDtV5YFPP/kbg5JheS5J0jnGF+xPAliTXJfldYAdwaEyvJUk6x1iGZarqTJKPAN9gcSrkA1V1bByvNaSJDgtdBOscrdVSJ6yeWq1ztIauM1W1/FGSpFXFVSElqUGGuyQ1aE2He5K/T/LjJE8m+WaSN0+6pqUk+XSSZ7p6v5bkTZOuqZ8kdyY5luS3SaZuylmSbUmeTXIiyT2TrqefJA8kOZ3kqUnXciFJNiX5TpLj3b/5XZOuaSlJrkjyeJIfdbV+atI1LSXJuiQ/TPLIMNdZ0+EOfLqq3lpVNwGPAH834Xou5DBwY1W9FfgPYO+E61nKU8CfA9+ddCHn6lkW40+B64EPJLl+slX19QVg26SLGMAZ4GNV9RbgZmDPlP59ArwMvKuq3gbcBGxLcvNkS1rSXcDxYS+ypsO9qn7Vs/s6zrnRappU1Ter6ky3+z0W7x2YOlV1vKomcbfxIP5/WYyq+l/glWUxpkpVfRf4r0nXsZyqOlVVP+i2f81iIG2YbFX91aLfdLuXdV9T9/OeZCPwPuDzw15rTYc7QJJ9SV4E/pLp7rn3+mvgXyddxCrUb1mMqQyj1SbJZuDtwPcnXMqSuuGOJ4HTwOGqmsZaPwt8HPjtsBdqPtyTfCvJU32+tgNU1SeqahPwIPCRaa61O+YTLP53+MFprnNKLbsshi5ektcDXwE+es7/hqdKVZ3thmA3AluT3Djhkl4lye3A6ao6OorrNf9hHVX17gEP/RLwdeCTYyzngparNclO4Hbg1prgDQoX8Xc6bVwWY8SSXMZisD9YVV+ddD2DqKpfJnmUxfc1pulN61uAO5K8F7gCeGOSL1bVB1dyseZ77heSZEvP7h3AM5OqZTndh5/cDdxRVf8z6XpWKZfFGKEkAe4HjlfVZyZdz4UkmXllhlmS1wLvZsp+3qtqb1VtrKrNLH5vfnulwQ5rPNyBe7vhhB8D72HxXepp9TngDcDhburmP026oH6S/FmSeeCPgK8n+caka3pF94b0K8tiHAcOTuOyGEm+DDwG/EGS+SS7Jl3TEm4BPgS8q/uefLLrdU6j9cB3up/1J1gccx9qquG0c/kBSWrQWu+5S1KTDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUoP8DF2kt8NZsnywAAAAASUVORK5CYII=\n", 420 | "text/plain": [ 421 | "
" 422 | ] 423 | }, 424 | "metadata": { 425 | "needs_background": "light" 426 | }, 427 | "output_type": "display_data" 428 | } 429 | ], 430 | "source": [ 431 | "# sample some normally-distributed random numbers\n", 432 | "x = np.random.randn(10000)\n", 433 | "\n", 434 | "# plot a histogram of them\n", 435 | "plt.hist(x, alpha=.7, bins='auto'); # the ; suppresses the printout of the return values" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": 6, 441 | "metadata": { 442 | "slideshow": { 443 | "slide_type": "slide" 444 | } 445 | }, 446 | "outputs": [ 447 | { 448 | "data": { 449 | "application/vnd.jupyter.widget-view+json": { 450 | "model_id": "827f3c4d47d44f258e69207e19c5408f", 451 | "version_major": 2, 452 | "version_minor": 0 453 | }, 454 | "text/plain": [ 455 | "interactive(children=(IntSlider(value=10, description='x', max=30, min=-10), Output()), _dom_classes=('widget-…" 456 | ] 457 | }, 458 | "metadata": {}, 459 | "output_type": "display_data" 460 | } 461 | ], 462 | "source": [ 463 | "# you can even have interactive widgets!\n", 464 | "def f(x):\n", 465 | " print(x)\n", 466 | "widgets.interact(f, x=10);" 467 | ] 468 | }, 469 | { 470 | "cell_type": "markdown", 471 | "metadata": { 472 | "slideshow": { 473 | "slide_type": "slide" 474 | } 475 | }, 476 | "source": [ 477 | "## Why do we use Anaconda?\n", 478 | "\n", 479 | "![](https://imgs.xkcd.com/comics/python_environment.png)" 480 | ] 481 | }, 482 | { 483 | "cell_type": "markdown", 484 | "metadata": { 485 | "slideshow": { 486 | "slide_type": "slide" 487 | } 488 | }, 489 | "source": [ 490 | "## Why do we use Anaconda?\n", 491 | "\n", 492 | "1. Relatively easy to install and well-supported\n", 493 | " - Can be installed with GUI or command line\n", 494 | " - Easy to find solutions to installation or runtime problems online\n", 495 | "2. Allows you to create virtual environments\n", 496 | " - Essential when packages share dependencies but require different versions of the same package\n", 497 | "3. Has a powerful dependency solver\n", 498 | " - Will upgrade/downgrade your packages as needed when you install a new package\n", 499 | "4. Development team maintains a core scientific stack (numpy, scipy, matplotlib, etc.)\n", 500 | " - Takes pressure off the user to determine if their scientific packages are working as expected\n", 501 | " - More resources for troubleshooting\n", 502 | "5. Has a highly customizable and user-friendly interactive python shell\n", 503 | " - Can be easier to test code quickly in an interactive shell than by running a script and waiting for output\n", 504 | " - Crucial for visualization of data and results\n", 505 | "6. Makes programming much easier on windows\n", 506 | " - No need to worry about system variables" 507 | ] 508 | }, 509 | { 510 | "cell_type": "markdown", 511 | "metadata": { 512 | "slideshow": { 513 | "slide_type": "slide" 514 | } 515 | }, 516 | "source": [ 517 | "## Installing Anaconda\n", 518 | "\n", 519 | "Follow the instructions to download and install Anaconda on this website:\n", 520 | "\n", 521 | "https://docs.anaconda.com/anaconda/install/\n", 522 | "\n", 523 | "- Make sure to follow the instructions specific to your operating system!\n", 524 | "- Each operating system works differently, so the Anaconda team has designed installation software specifically\n", 525 | " tailored for each operating system" 526 | ] 527 | }, 528 | { 529 | "cell_type": "markdown", 530 | "metadata": { 531 | "slideshow": { 532 | "slide_type": "slide" 533 | } 534 | }, 535 | "source": [ 536 | "## First Assignments!!!\n", 537 | "\n", 538 | "- Find the assignment on GitHub page for this course as Jupyter notebook. In short, you should:\n", 539 | " - Complete the introductory Python modules at [Software Carpentry](http://swcarpentry.github.io/python-novice-inflammation/) depending on your familiarity with Python\n", 540 | " - Use the knowledge you gained to read in a data file provided on the course GitHub page and perform the requested data processing\n", 541 | " - Finally, upload the notebook, saved as an html file.\n", 542 | "- Look for an email about joining our Discord server and join!\n", 543 | "\n", 544 | "You will receive ***points*** for the participation/homework grade by finishing these two tasks!\n", 545 | "\n", 546 | "### See you next week!!!" 547 | ] 548 | } 549 | ], 550 | "metadata": { 551 | "celltoolbar": "Slideshow", 552 | "kernelspec": { 553 | "display_name": "Python 3 (ipykernel)", 554 | "language": "python", 555 | "name": "python3" 556 | }, 557 | "language_info": { 558 | "codemirror_mode": { 559 | "name": "ipython", 560 | "version": 3 561 | }, 562 | "file_extension": ".py", 563 | "mimetype": "text/x-python", 564 | "name": "python", 565 | "nbconvert_exporter": "python", 566 | "pygments_lexer": "ipython3", 567 | "version": "3.9.7" 568 | }, 569 | "rise": { 570 | "scroll": true 571 | } 572 | }, 573 | "nbformat": 4, 574 | "nbformat_minor": 2 575 | } 576 | -------------------------------------------------------------------------------- /assignments/A02_Plot_Distributions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Assignment 2: Plotting Probability Distributions\n", 8 | "## Quantified Cognition\n", 9 | "### Psychology 5332\n", 10 | "\n", 11 | "By: Per B. Sederberg, PhD\n" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "# Objectives\n", 19 | "\n", 20 | "Upon completion of this assignment, the student will demonstrate the ability to:\n", 21 | "\n", 22 | "1. Use a Jupyter Notebook\n", 23 | "\n", 24 | "2. Parameterize probability densities\n", 25 | "\n", 26 | "3. Plot different density distributions\n" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "# Assignment\n", 34 | "\n", 35 | "- Perform this assignment by writing code in *this notebook* (***after making a copy and renaming it to have your userid in the title --- e.g., A02_Plot_Distributions_mst3k***).\n", 36 | "\n", 37 | "- Your task is to recreate a handful of distribution plots from Wikipedia.\n", 38 | "\n", 39 | "- Links to the specific distributions and plots to recreate are provided below. \n", 40 | "\n", 41 | "- *Note: Do not worry about exact line colors of fonts; the key is to recreate the shapes of the distributions.*\n", 42 | "\n", 43 | "- ***When you are done, save this notebook as HTML (`File -> Download as -> HTML`) and upload it to the matching assignment on UVACollab.***\n", 44 | "\n" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "# Name: \n", 52 | "# User ID: " 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 12, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "# Load in necessary modules\n", 62 | "import numpy as np\n", 63 | "import matplotlib.pyplot as plt\n", 64 | "import dists" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 13, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "# function to help plot a PDF\n", 74 | "def plot_pdf(dist, support=None, npoints=100, alpha=0.7):\n", 75 | " # see if must determine support range\n", 76 | " if support is None:\n", 77 | " # grab 99% interval and then expand a bit\n", 78 | " support = np.array(dist.interval(0.99))\n", 79 | " rng = np.diff(support)*.1\n", 80 | " support[0] -= rng\n", 81 | " support[1] += rng\n", 82 | " \n", 83 | " # set a range of linearly-spaced points\n", 84 | " x = np.linspace(support[0], support[1], npoints)\n", 85 | " \n", 86 | " # evaluate the pdf at those points\n", 87 | " pdf = dist.pdf(x)\n", 88 | " \n", 89 | " # plot the results\n", 90 | " plt.plot(x, pdf, lw=3, alpha=alpha)\n", 91 | " plt.xlabel('Value')\n", 92 | " plt.ylabel('Probability Density')" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "### Inverse Gamma Distibution\n", 100 | "\n", 101 | "https://en.wikipedia.org/wiki/Inverse-gamma_distribution\n", 102 | "\n", 103 | "#### Recreate this plot\n", 104 | "\n", 105 | "![](https://upload.wikimedia.org/wikipedia/commons/thumb/c/c8/Inv_gamma_pdf.svg/488px-Inv_gamma_pdf.svg.png)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 14, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "data": { 115 | "text/plain": [ 116 | "" 117 | ] 118 | }, 119 | "execution_count": 14, 120 | "metadata": {}, 121 | "output_type": "execute_result" 122 | }, 123 | { 124 | "data": { 125 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEJCAYAAACZjSCSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAABARUlEQVR4nO3dd3zd1X34/9dbe2/ZspYl772wsYFAIIwACSWMhtU0JORHSJr1SNMmbTOa5tumWSSQkFJCaEYTCAQCNAEcNmEYvLHlKU9JtmxZe917dXXP749zpXslS1dX0r269+q+n4/H53E/+56Pr3Xf92wxxqCUUip+JUQ6AUoppSJLA4FSSsU5DQRKKRXnNBAopVSc00CglFJxTgOBUkrFubAFAhF5SEROi8juMc5bJyL9InJjuNKilFJqdOHMEfwCuDLQCSKSCHwH2BjGdCillAogKVw3Nsa8JiJVY5z2WeBxYF2w9y0qKjJVVWPdVimllL+tW7eeMcYUj3QsbIFgLCJSBlwHvI9xBIKqqiq2bNkStnQppdR0JCLHRjsWycriHwFfNsb0j3WiiNwpIltEZEtTU1P4U6aUUnEkYjkCYC3wiIgAFAFXi4jbGPPk8BONMQ8ADwCsXbtWB0dSSqkQilggMMZUD6yLyC+AP44UBJRSSoVX2AKBiDwMXAwUiUg98A0gGcAYc3+43lcpFT36+vqor6/H4XBEOilxIy0tjfLycpKTk4O+Jpythm4Zx7m3hysdSqnIqa+vJzs7m6qqKrzFwCqMjDE0NzdTX19PdXX12Bd4ac9ipVTYOBwOCgsLNQhMERGhsLBw3DkwDQRTrc8BHSdAJwRScUKDwNSayL93JFsNxZ9Te+Av3wdXN5SugQ13QVpupFOllIpzmiOYKsc3wcv/boMAwIlt8KcvQcPWyKZLKRX3NBBMhQMb4fUfgcc9dL+zA179Lux7JiLJUkop0EAQfmdqYctDgLdOIHsWnPd3kJ7vO2fnb6G7OSLJU0oF7+Mf/zgzZsxg2bJl47ru61//OsuXL2fBggU88MADU/7+Y9FAEG5HX/OtF8yFy/8Nqi+Cq78HebPt/v4+2PlwZNKnlAra7bffznPPPTeuazZu3Mj27dvZsWMHjz/+OE8++eSUvn8wNBCEkzFQt9m3vepWSMux66nZcM7tvmNH/wLNh6Y0eUrFi5qaGi677DIWLFjAt771LT772c+yefPmsS8c5qKLLqKgoGBc1zz99NPcfvvt9PX18ZOf/IQbbrhh3O87mfcPhrYaCqfmQ9DbYtdTsmDG4qHHZy6BsrXQ4B1Ndfuv4dJvgDa3U9PQHb8Y/xdvsH5+++gDGDscDv76r/+axx57jDlz5rBo0SLOOecc1q3zXXPhhRfS2dl51rXf//73ueyyyyaVtq1bt7Ju3ToKCwupqqrihz/84ZDj4XzvYGkgCKf6d3zr5WshIfHsc1bfBie2g+mH03ttK6LytVOXRqWmuRdeeIHVq1ezdOlSAFwuF3//938/5Jy//OUvYXlvj8dDfX09t99+OzfddBOf/OQnufvuu/nqV78a9vceDw0E4WKMbTI6oPzckc/LKYX5l9mWRQA7H4GyczRXoFSIbN++nTVr1gBw4sQJsrKyuOCCC4acE65f5fv372f+/PkApKenc8EFF9DY2Dgl7z0eGgjCpb0Ouk7Z9aRUKFk++rnLboTDr4Dbaa9r2g8zFk1JMpWaKoGKb8IpNTWV+vp6AP7pn/4Jl8t11jmh+FV+6aWX8qtf/YqysrLBfdu3b8fpdNLf34/b7ea3v/0t9957b8jfe7K0sjhc6vyKhUpXQ1LK6Oem5cDs9/i2a18IX7qUijO33norr732GgsXLmTlypWcd955fOELX5jQvW655RbOO+889u/fT3l5OT//+c8BWwRUW1t7VkXujh076O3tZe7cuVxwwQV89KMfZeXKlRN+ltHef7I0RxAu9X4VYxXrxz5/3qVw6EW7fvwtOOejtmWRUmpSysvL2bo1ND34H3545Gbee/bs4YYbbiA9PX3I/u3bt/PrX/86ZO3+R3v/ydIcQTh0noLWo3Y9IcnmCMZSOBcK5th1jxuOvBb4fKVU1Fi2bBl33333Wfv37dvHokXRX8yrgSAcGt/1rZesgOT00c/1N8+vYujg8zpCqVIxrq6ujqSk6C940UAQDm3HfOvjqfSdfQEkpdn1zpO2OalSSoWZBoJwaG/wreeWB39dchpUX+jbrn0+dGlSSqlRaCAIh/Z633puxfiu9S8eqnvHN2y1UkqFiQaCUHO02+GlARJTILN4fNfnV9kFbKVxffi65SulFGggCL3hxUIT6SE826/X47E3J58mpZQKIGyBQEQeEpHTIrJ7lOO3ici73uVNEZl4L4toMqRYaBz1A/5mn+9bb9xlcxlKKRUm4cwR/AK4MsDxI8B7jTErgG8BE5+tIZq0H/etTzQQZBZB8UK7bjxw/O3Jp0sppUYRtkBgjHkNaAlw/E1jTKt3cxMwwW/NKDPRFkPDDSkeen3i91FKqTFESx3BHcCzox0UkTtFZIuIbGlqaprCZE3AZFoM+avcAOL9eJr2Q/eZyaVLKTUpdXV1XHLJJSxevJilS5dyzz33BH2tTlU5BhG5BBsIvjzaOcaYB4wxa40xa4uLx9kKZypNtsWQv7RcmOn3YWulsVIRlZSUxA9+8AP27t3Lpk2buO+++9izZ8+Y1+lUlWMQkRXAg8C1xpjYn709FC2G/FX5Fw+9Mbl7KRXHQjFV5axZswbnNcjOzmbx4sU0NDSMcZVOVRmQiFQCTwAfMcYciFQ6QioULYb8la+DhJ/Z/gStR6GzEbJLJn9fpSLhtzeF7963/m7UQ+GYqvLo0aNs376d9evHHlk4rqeqFJGHgYuBIhGpB74BJAMYY+4Hvg4UAj8V+8vZbYyJ7Tka2+t86zllo58XrJRMmLXSTl8JUPc2LLl28vdVKo6EeqrKrq4ubrjhBn70ox+Rk5MT8Ny4n6rSGHPLGMc/AXwiXO8fEf45grxJVBT7qzjXLxC8o4FAqXEK5VSVfX193HDDDdx2221cf/31Y763TlUZj0LVYshf2VqQRDu5fXMtdDdDZmFo7q3UVApQfBNOoZqq0hjDHXfcweLFi/niF7941nGdqlKFtsWQv9QsmLnUt12nncuUGo9QTVX5xhtv8Otf/5qXXnqJVatWsWrVKp555hlAp6pUA0LdYshfxXrfZDd1b8Oiq0N3b6WmuVBNVfme97wHM8pkUTpVpbJC3WLIX8U6wBtYmvZDb2vA05VSU0unqlRW1ynfevas0N47LddvpjMDdTo0tVKxQKeqjDe9fsMqZYS+wwcVG3zrWk+glAohDQSh0uMfCMLQqqfiXN/66T3gPLu5mVJKTYQGglDxzxGkhyFHkFEARbY9Msbj61ugVJQbrYJVhcdE/r01EISCMcNyBGEIBADlfrmCunfC8x5KhVBaWhrNzc0aDKaIMYbm5mbS0tLGdV3012LEAmenHQ8IIDndLuFQcS7s+I1dP7kT+hyQPL4PXKmpVF5eTn19PVE/fPw0kpaWRnn5+FouaiAIhXAXCw3ILoG8Smg7bgPPie0w+7zwvZ9Sk5ScnEx1dXWkk6HGoEVDodDjN4J2uIqFBvgXD9Vr8ZBSavI0EIRCj18Hr3DmCGBo66GGbdDfF973U0pNexoIQqE3zE1H/eVVQtZMu+52QOOu8L6fUmra00AQClPRYmiAiJ2wZoC2HlJKTZIGglCYyjoCsIPQDWjYAh5P+N9TKTVtaSAIhalqNTSgaD6k59t1Zyc07Q3/eyqlpi0NBKEwlUVD4C0e8pvVU4uHlFKToIFgsvoc0Ndj1xOSIDXwHKYhM2QQunds72allJoADQSTNaRYKC+0E9IEMmMxpGT50tBcOzXvq5SadsIWCETkIRE5LSK7RzkuInKviNSKyLsisiZcaQmrcI86OpqExKGth45vmrr3VkpNK+HMEfwCuDLA8auA+d7lTuC/wpiW8JnqimJ/FcN6GWvxkFJqAsIWCIwxrwEtAU65FviVsTYBeSIS4qm9psCQpqNTmCMAKFnuG+Cu6zS0Hp3a91dKTQuRrCMoA+r8tuu9+2LLVLcY8peYDKV+JWraekgpNQGRDAQj1aqOWLYhIneKyBYR2RJ1w9n2RLBoCIZ2LtMpLJVSExDJQFAPVPhtlwMnRjrRGPOAMWatMWZtcXHxlCQuaL1+A85NdY4AYNZKmzMA6GiA9vqpT4NSKqaNGQhE5PsisjQM7/008Lfe1kMbgHZjzMkwvE94RbKyGOzENKWrfdvaekgpNU7B5Aj2AQ+IyNsicpeI5AZzYxF5GHgLWCgi9SJyh/f6u7ynPAMcBmqBnwGfnkD6I6vfDb1t3g3xDfsw1Sr9JqfRQKCUGqcxZygzxjwIPCgiC4GPAe+KyBvAz4wxLwe47pYx7muAvxtneqOLo53Bao20HEiM0IRvpWts8VB/H7TX2eKh3PFNVaeUil9B1RGISCKwyLucAXYCXxSRR8KYtugXyaaj/rR4SCk1CcHUEdyNLR66GvgPY8w5xpjvGGOuAVYHvnqai3T9gD8tHlJKTVAwZRm7ga8aY3pGOHbuCPvix5AcQYTqBwZo8ZBSaoKCKRq6bXgQEJEXAYwx7WFJVayIdB8Cf1o8pJSaoFEDgYikiUgBUCQi+SJS4F2qgNIpS2E0c3b41tPzIpaMQVo8pJSagEBFQ58EvoD90t/mt78DuC+MaYodzk7f+lTNQxCIFg8ppSZg1ByBMeYeY0w18CVjTLXfstIY85MpTGP0cvjlCKIhEAwvHjr2ZuTSopSKGYGKht7nXW0QkeuHL1OUvujm9KsiSc2OXDr8zb7At37sDR2aWik1pkBFQ+8FXgKuGeGYAZ4IS4piiX/RUFoU5AjAFg8lpYHbAZ2N0HIYCudGOlVKqSg2aiAwxnzD+/qxqUtODHG7wO2065IIyRmRTc+ApBQ7Yc2R1+z2sTc0ECilAgqmQ9nnRSTHOzjcgyKyTUSumIrERbUhFcXZUzdXcTCGFA+9CR5P5NKilIp6wfQj+LgxpgO4ApiBHW/oP8Oaqljg33Q0WoqFBpQs91Ve97ZC097IpkcpFdWCCQQDP3WvBv7HGLOTkSeViS/+gSBaKooHJCRC5Qbf9tE3IpcWpVTUCyYQbBWRP2MDwUYRyQa0rGF40VC0qXqPb71ukx0yWymlRhBMILgD+AqwzjvURAq2eCi+RVsfguGKFkBmkV13dcPJHRFNjlIqeo0ZCIwxHuAUsERELgKWAnlhTlf0c0Z5IBAZWmk80IpIKaWGGXP0URH5DnATsAfo9+42QHx/s0RjH4Lhqi+CPU/Z9YatNs3RWIyllIqoYIah/hCw0BjjDHNaYku05wjAjjNUMBdaDoHHDcfeggXa8lcpNVQwdQSHgeRwJyTmOKK41ZC/6ot860dejVw6lFJRK5gcQQ+wwzsHwWCuwBjzubClKhZEe6uhAbPPh+2/tjmC5lroOAE5Ooq4UsonmBzB08C3gDeBrX7LmETkShHZLyK1IvKVEY7nisj/ichOEakRkdhpjRTN/Qj8peVA6SrftlYaK6WGGTNHYIz5pYikA5XGmP3B3tg74f19wOVAPbBZRJ42xuzxO+3vgD3GmGtEpBjYLyK/Mca4xvcYU8zjAWeXbzta6wgGVF8M9Vvs+pHXYMVN0TUkhlIqooIZa+gaYAfwnHd7lYg8HcS9zwVqjTGHvV/sjwDXDjvHANkiIkAW0AJEf88nVxc26djB5hKDKWGLoNLVkJJl13ua4VRNZNOjlIoqwRQN/Sv2S70NwBizA6gO4royoM5vu967z99PgMXACWAX8Hlvv4XoFivFQgMSk6DKr0/BoZcilxalVNQJJhC4R5ikPpjZTkYqexh+3fuxuY1SYBXwExE5q5xFRO4UkS0isqWpqSmItw6zWOhDMNycS3zrdW8PfQalVFwLJhDsFpFbgUQRmS8iP8ZWHI+lHqjw2y7H/vL39zHgCWPVAkeARcNvZIx5wBiz1hiztri4OIi3DrMhTUdzI5eO8SiohoI5dt3jhqOvRzY9SqmoEUwg+Cx2WAkn8DB28vovBHHdZmC+iFSLSApwM7YFkr/jwKUAIjITWIjttxDdYqXp6HBzL/Wt176o01gqpYDgxhrqMcb8izFmnfdX+b8YYxxBXOcGPgNsBPYCjxpjakTkLhG5y3vat4DzRWQX8CLwZWPMmYk/zhSJ5rkIApl9PiSm2PX2OtuvQCkV9wI2dxGRjwKfx/5SB/uFfq8x5lfB3NwY8wzwzLB99/utn8BOeBNbYq2yeEBKhg0Gh1+x27UvQtH8iCZJKRV5o+YIRORvsUVAf4+tzC0D/hH4vPdY/IqV4SVG4l88dOwNcPVELi1KqagQqGjo08B1xpiXjTHtxpg2Y8xLwA3eY/FrSB1BDBUNgc0B5Jbb9X6XndNYKRXXAgWCHGPM0eE7vfti7NsvxIbUEcRIq6EBIkNzBQc3aqWxUnEuUCDoneCx6S9WWw0NqL7IV2ncdhya9kU2PUqpiApUWbxYRN4dYb8Ac8KUnuhnTGzMRRBIapYNBrUv2O0Dz8GMxZFNk1IqYgIGgilLRSxxO6G/z64nJEFSamTTM1Hzr/AFgrrN0NMCGQWRTZNSKiJGDQTGmGNTmZCYMWR4idzYHcUzf7bNBZzeC6bfBoUVH450qpRSERBMz2LlL1b7EIxk/vt967UvQH/0D/yqlAo9DQTj5fAbfy/WA0H5OkjPt+uOdjsYnVIq7gQzH8EHRUQDxoBY7kMwXGISzLvMt73vT9qUVKk4FMwX/M3AQRH5rohoBXKsjjM0mnmX2UpvgJZD2pRUqTgUzKBzfwOsBg4B/yMib3nnB4jxcpEJivU+BMOl50H1e33be/8YsaQopSIjqCIfY0wH8Dh2uslZwHXANhH5bBjTFp1icS6CsSz6gG+9YQt0DJ82Qik1nQVTR/BXIvIH4CUgGTjXGHMVsBL4UpjTF31c/jmCrMilI5Ryy6B0jW97358ilxal1JQLJkdwI/BDY8wKY8z3jDGnwc5TAHw8rKmLRs4u33rKNAkEAIuv8a0ffmVo6yil1LQWTCA4aYx5zX+HiHwHwBjzYlhSFc1cfoFgOtQRDJixGArm2nWPGw5sjGx6lFJTJphAcPkI+64KdUJixnTNEYjA4g/6tvc/q3MVKBUnAk1M8ynvFJKLRORdv+UIMNJgdNOfMdM3RwBQsR6yZtr1vh47GJ1SatoLlCP4LXAN8JT3dWA5x9ukNP64nbbYBCAxGZJSIpueUEtIhKXX+bb3/Qn6xpyeWikV4wIFAuOdhObvgE6/BRGJz2Eq/fsQpEyz3MCAqgshs8iuu7qg9vnIpkcpFXZj5QgAtgJbvK9b/bbjz5BioWlUP+AvMQmWfMi3vff/wO2KWHKUUuE3aiAwxnzQ+1ptjJnjfR1YgpqYRkSuFJH9IlIrIl8Z5ZyLRWSHiNSIyKsTe4wpMiRHME0DAcCciyHdm+lztMOh+GscplQ8GXU+AhFZM9oxAGPMtkDHRSQRuA/b6qge2CwiTxtj9vidkwf8FLjSGHNcRGaMI+1Tz9XtW5+uOQKw9R9L/gq2/sJu73kK5r4vdifhUUoFFGiGsh8EOGaA941x73OBWmPMYQAReQS4Ftjjd86twBPGmOMAA53VolY81BEMmHupDQC9rXY58BwsuTbSqVJKhUGgGcoumeS9y4A6v+16YP2wcxYAySLyCpAN3GOM+dUk3zd84qGOYEBSCiy7ATY/aLf3PGVHKk3JjGy6lFIhF6ho6H3GmJdE5PqRjhtjnhjj3iPN4Th8sPsk4BzgUiAdeEtENhljDgxLy53AnQCVlZVjvG0YxUsdwYA5l8Dep6HrtC0W2/tHWHlTpFOllAqxQK2GBsYmvmaE5YOjXeSnHqjw2y4Hhg9rWQ88Z4zpNsacAV7DDmY3hDHmAWPMWmPM2uLi4iDeOkzipY5gQGISLPebx3j/n6C3LWLJUUqFR6CioW94Xz82wXtvBuaLSDXQgJ3g5tZh5zwF/EREkoAUbNHRDyf4fuEXT3UEA6reY3MFbcdth7qaP8Daif6XUEpFo2CGoS4UkXtFZJuIbBWRe0SkcKzrjDFu4DPARmAv8KgxpkZE7hKRu7zn7AWeww5Z8Q7woDFm92QeKKziqY5ggAisuNm3ffB5na9AqWkmmEHnHgGagBuwQ1I3Ab8L5ubGmGeMMQuMMXONMf/u3Xe/MeZ+v3O+Z4xZYoxZZoz50bifYCrFWx3BgLI1ULzIrpt+2P6byKZHKRVSwQSCAmPMt4wxR7zL/wPywpyu6BRvdQQDRGDN3zJY/9+wBRp3RTRJSqnQCSYQvCwiN4tIgnf5MBB/U1gNH3k0AnUExhjae/po7nLS3OWkpduFu98zNW9eOBeqL/Jtb/sVeKbovZVSYRWo+WgntrmnAF8E/td7KAHoAr4R9tRFk74eMN4vvqRU26JmCjR1OnnnSAuHm7o41NRFp8M95LiIMDMnldK8dOYWZ7GqIo+ZOamIjNR6d5JW3gzH34J+l608PvyS7VuglIppgVoNxUmzmCA5pzY30Nju4E+7TvLWoWaMGd79wscYQ2O7g8Z2B9uOtfLYljpm5KSypjKfC+cXU5KbFrpEZRTY3sW7HrPbOx+xcxhMt3kZlIozQf2sFZF8YD4w+K0yfPrKaW+KWgy5+z08vLmOV/efZqTv/7TkRNJTEgHweIuKhjvd4eS53Y08t7uRBSXZXLygmLVVBSQmhCCXsPgaOPQy9Jyxlec7H4Fz/7/J31cpFTFjBgIR+QTweWyHsB3ABuAtxh5raHqZghZDHY4+7nupltrTXUP2L5qVzYY5hcwtzmJWbtqQYh9HXz8n2x0ca+6m5kQHNSfacfb5yu4PNHZyoLGTJ7Y1cOWyEi6YV0RKUjBVQ6NISoVzboe/fN9u175gRystmj/xeyqlIiqYHMHngXXAJmPMJSKyCPhmeJMVhcKcIzje3MO9Lx2ktds39v+S0hyuXVXKvBmjF72kJSdSXZRJdVEmFy+cgcvtYc/JDv5yoImd9e2DxUpnupz876Zj/N/OE3xgxSzeu6CYpMQJBoTytVC6Bk54B6B952dw5bftDGdKqZgTTCBwGGMcIoKIpBpj9onIwrCnLNqEcdL6xnYH3924j15XP2Bba954TjnvX1oy7krflKQEVlXksaoij7YeF68eaOLFvafpdtpK5vbePn779nE21jRy7aoyzptTSMJ4i4xEYO3H4U+7oL8P2o7Z0UkXfWB891FKRYVgfhLWe+cNeBJ4XkSe4uwxg6a/ME1a3+Nyc+9LBweDQFpKIp+7dD5XLps16ZY/eRkpXLuqjO/euIKb1lWQl+GbY7m5y8VDrx/h3/64h70nO8Z/86xiOzrpgHd/ZwenU0rFnDEDgTHmOmNMmzHmX4GvAT8HPhTmdEWfMNQReDyG+189zKl2O0F8cmIC/3DFQlaU54Xk/gPSkhO5YmkJ375+OTetqyArzZcRrGvp4fsb9/PjFw9yqmOcE9UvugZyyuy62wlv/zcj1nArpaJaUIXEIrJGRD4HrADqjTHxN4ltGOoIfr+1npqG9sHtOy6spqoofOP9pyQlcMXSEv7z+hVcs7KUZL86gh11bXztyd38fms9jr7+4G6YmAQbPsVgj+NTu6FWp7VUKtYEM+jc14FfAoVAEfA/IvLVcCcs6oS4H8H+xk421jQObn9gxSzWVRVM+r7BSE9J5EOry/j29cs5f17R4P5+j+HZXSf55z/sYtPhwP0XBhXNh8V+o5Jv/xV0NYUh1UqpcAkmR3ALsM4Y8w3v0NQbgNvCm6woFMIcQb/H8Nu3jw1uryjP47rVZZO650TkZ6Zwx3uq+doHlzB3hu+Z2nv6+Nlrh/nuxv00tPWOfaPlH4bsWXbd7YR3tIhIqVgSTCA4il9HMiAVOBSW1ESzENYRvLzvNPWt9gs2JSmBj5w3OzxDQgSpqiiTf7pqEXdcWE1uevLg/gONnfzr0zU8urkucHFRUgps+DSDRUSNu2D/s+FNtFIqZEYNBCLyYxG5F3ACNSLyCxH5H2A3dqyh+BKiHEGHo48ndzQMbn9wRSkFmSkBrpgaIsL5c4v49+uWc8XSmYOByeMxbKxp5F/+sJstR1tGLy4qXmB7HQ/Y8RtoPRr+hCulJi1QP4It3tetwB/89r8SttREK48HXD2+7UnkCB7fWj/YVHRGTipXLJ052dSFVHpKIjetq+SCeUX85u3jHGi0OaG2Hhf/9cohlpblctv6SmbmjDCG0YqbbG6g9Qh43PDGvbajWVLqFD+FUmo8Rs0RGGN+ObAAD2MDwlbgt9598aOvGzsQK5CcMeEetHUtPbxRe2Zw+5ZzK4e03Ikm5fkZ/OP7F3LHhdVk+zU3rWlo52tP7ubJ7Q243MOGoU5Mggs+B4neHE5Hgx2uWikV1YJpNXQxcBC4D/gpcEBELgp0zbTjDE2x0J/3nBqsQ11enhvy/gKhNlBc9B/XL+eSRTMYqMbo9xj+b+cJvv7UbnbWtQ29KKfUjkU0oPYFOPrGVCVZKTUBwfwc/QFwhTHmvcaYi4D3E80TzIdDCCakaetx8fbh5sHta1aWTjZVUyYjJYm/2TCbr35gyZB+Dk2dTu598SA/fvEgTZ1O3wVz32eHpx7wzn9De/0UplgpNR7BBIJkY8z+gQ1jzAEgOcD504/TbwiGCeYIXth7mn6PzQ7Mm5HF3OLYm+qyqiiTf7l6MR85bzYZqb7iooHOaE/vPGGLi0Rg/Schu8Se4HbCX34AfUE0RVVKTblgAsFWEfm5iFzsXX6GrSsYk4hcKSL7RaRWRL4S4Lx1ItIvIjcGm/Ap5T9Xccr4e/46+vp5Zb9vHJ4rlpaEIlURkZAgXLxwBv9x3TIunO/rjNbX7+Gp7Q187cndbD/eiknOgPd8ERK9vxk6TsDb92v/AqWiUDCB4C6gBvgcdkjqPd59AYlIIrZe4SpgCXCLiCwZ5bzvABuDT/YU8+9DMIEB514/eGZIS6HVFXkhSljkZKclc/sF1fzzBxZTWZgxuP9Ml5OfvFTLD184yMmEGXDunb6Ljm+Cmj+McDelVCQFDAQikgBsNcbcbYy53jsA3Q+NMc5A13mdC9QaYw57xyZ6BLh2hPM+CzwORO/QlUNyBOMr0vF4DM/vOTW4ffmSmeMf9jmKzS3O4msfWMJHzptNZurQ1kVff6qGR5qqcFVf6rvg3d/B8bcjkFKl1GgCBgJjjAfYKSKVE7h3GVDnt13v3TdIRMqA64D7J3D/qTOkjmB8OYKd9W2c6bJxMzM1ifPnFo1xRewZLC66fjkX+7UuGgiC/3hkNUeTqn2d0d76MbQcjlyClVJDBFM0NAvbs/hFEXl6YAniupF+9g4vIP4R8GVjTMDhLkXkThHZIiJbmpoiMKDZJOoI3vJrKXTRgmLSkqfvLF5ZqUl8ZMNsvnHNUhaU+AJmpwu+13UVW1pS6Ojtw/S74NXvQXdzgLsppaZKMDOUTXRaynqgwm+7nLMntFkLPOIdzqAIuFpE3MaYJ/1PMsY8ADwAsHbt2qmvbZxgHUGPyz2knf35cwtDmKjoVVFgO6NtPdbKo1vqaO5y4UjI4Hfpt/Lh5v+mMNVJaa6H9Jf/HS7/Zkgn+lFKjV+gsYbSROQLwF8Di4A3jDGvDixB3HszMF9EqkUkBbgZGJKTMMZUG2OqjDFVwO+BTw8PAlHB4V80lBP0ZVuOtuLut3GrsjCD0rz0UKcsaokIa6sK+H8fWs71a8pJTU6gNamYZ3Jvpt1pOHCqk+NHD9LzwrfBHX/TWygVTQIVDf0S+4t9F7blzw/Gc2NjjBv4DLY10F7gUWNMjYjcJSJjtjqKKv51BGl5QV/mXyx03pz4yA0Ml5KUwAdWzOLb16/g4oXF1KfN4/mcGzBAS7eLgzVb2fXoN+nsGefsaEqpkAlUNLTEGLMcQER+Drwz3psbY54Bnhm2b8SKYWPM7eO9/5QwZliOILhijOYu5+CAbSJwbvXUTDoTrXLTk/nIeVVcungmj23J4y8HOrmw81mMgf66Lbzw0NdJuOCzXLG0lPSU6VuPolQ0CpQj6BtY8f66j0+uLhioy05Ot2PvB2HT4ZbB9SWluUMmjo9npXnpfP6y+Vxx/R3UlVw2uL+6eyc9r93Hl3+/k2d3nQx+ukyl1KQFyhGsFJGBn8ICpHu3BTDGmOALy2PZBOoHjDG8ddg3yuiGOfGdGxjJwpJsFnzk7zn+fCq9Nc/i6POw2LEdz5kEfr/lQ2ysaeSq5bO4eGExqUmaQ1AqnEYNBMYY/esDcPgmlyctuEBQ19LLyTZb5p2SlMCayvxwpCzmSUICs6/4LJ7cFNp2baSxw8HS3q0I8BLX8ujmOp7ddZIrl5Vw8cIZ07rprVKRFEzz0fg2pOloblCXbDnmKxZaU5mvX2CBiJCw/pMUeNzkHXmNlm4XKR3bSe5w8eecG+l0wGNb6nlmVyOXL5nJ+xbNGNKDWSk1efoXNZYhOYLgAsEOv74D51RpbmBMIrDh0yRIAkVHXqUgM4UZ3bVk9zzKExk30i/JdDvdPLm9ged2N3LxwmIuXzJT612UCpHonB4rmgxpOjp20dDpTgcN3onpkxKFJbPioypl0hISYMOnYMH7SRChKCuVK/Lq+WrGHyhN97VVcPT189zuRv7x9+/yP28c4WS7Dm2t1GRpIBiLo823HkSOYGedLwexZFauFguNhwic8zFYYscmTBChsv8438x4lLvWZjMrzzdPcr/H8PrBM3z1D7u554WD7DnR4RvLSCk1Llo0NJZxthraUdc6uL6yIriiJOVHBFbeYvtrbP9fABI6T7DuwA9Ze9E/sKOnnGd3N3LotG/WuHfr23i3vo3y/HQuWzKT9dWFpCTpbxylgqV/LWMZUjQU+Iu9x+XmwCnfF9SqaTDvQESIwOJr4PzPQYL3t4qjDXnxX1nt2cM/X72Yf7p6Easq8gZHOgWob+3lF28c5UuP7eSxLXWDo74qpQLTHMFYxtF8dFd9Ox7vdJRVRZlamTlZVRdAeh689n3o64H+PnjzXmivY96Km/jspfM51eHg+T2neKP2jJ0mE+h2unludyMbaxpZXpbHxQuLWV6WO63mgVAqlDQQjMURfI7Av7WQ5gZCZOZSeP+/w6vfhc6Tdl/NH6DlCJz/GWbmZPM3G2Zz3eoy/nLwDC/vOz2YEzDGV2xUkJnChQuKec+8IgoyNUAr5U+LhgLpd9shJgAQSBl9nCF3v4ddDb7cgwaCEMoptcFg1irfvpM74NmvwJmDgJ3058plJXz7+uV85n3zWFo2NGi3dLt4ansD//j7ndzzwkG2HmvB3e+ZumdQKoppjiAQ17B5CBJGj5sHT3cNzktcmJVCeX78DDk9JVIy4b1fhncfgT1P2X09Z+CFf4WVN8OiD9rOaQnC6sp8Vlfmc7rDwSv7m3i99gzdTtsE1T+XkJWWxIY5hZw/t5DKggxEtOhIxScNBIGMY9TRd+vbBtdXVuTpl0o4JCTAqluhaCG89RNbb+Bx29ZFJ3fChk9Dhm9cpxk5aXx4XQXXrSlj27FWXjvYxL6TvuDe5XDzwp5TvLDnFGX56WyYU8j66gIKs1Ij8XRKRYwGgkDG0at4d4MvaKwoywtTghQA5efAVd+B138ELYfsvsZd8Mw/wLl3QuX6IacnJyawfk4h6+cUcrrDwRuHzvBGbTOt3b4JcRpae3l8az2Pb61nQUk266sLOGd2PtlpyVP4YEpFhgaCQILsVdzS7eJEm6838YKSrHCnTGXNgMv/DXY9CnueBoytz3n9bqjcAGs/PmLwnpGTxnWry7l2ZRl7Gzt461AzW4+1DrY4AjjQ2MmBxk7+d9NxlpTmcG5VAasq88jSMY7UNKX/swMJMkew54QvYCyYma3DJk+VxCRbVDRrJbx1H/R4Z4Q7vglO1cA5t8PsC2CEYrqEBGFpaS5LS3P5mw39bDvWyqbDzew52cFAB2VjDDUN7dQ0tJPwlrC4JJs1s239Q2665hTU9KGBIJAgexXvPuELGEtLdWyhKTdzKVz9Pdj2azj8st3n7IQ3fwyHX4G1d0DOrFEvT0tO5Px5RZw/r4j2nj42H21h89EWav16L3s8hpoTHdSc6OB/Nx1jbnEWqyvzWVOZx4yctFHvrVQs0EAQyJAcQd6Ip3g8ZkiOYGmpDisRESmZsOEuWz/wzs98uYPGXfDMl+z4RYv/CpIDf2nnZiRz2ZKZXLZkJs1dTjYfbWXrsRYON3UPnmMM1J7uovZ0F49tqWNWXhqrKvJZUZ7L3OIsErXjmooxGggCCaKO4Ghz92DTxNz0ZG02Gmmlq+Hq78Oux2D/s4CxLYt2Pw6HXrZFSVXvGbG4aLjCrFSuXFbClctKaOl2seVoC9vr2jh4qhP/8e1Otjk42XaSZ3edJCM1iWWlOSwvy2VpWa4WIamYoIEgkCDqCGr8cgNLSnO02Wg0SMmAcz4K1RfZ3MFAy6LeFtvs9MBzsOo2mLkk6FsWZKZwxdISrlhaQoejj511bew43kbNiQ76/Dqm9TjdvHOkhXeO2MmJKgszvHUROcybkUVyovbhVNEnrIFARK4E7gESgQeNMf857PhtwJe9m13Ap4wxO8OZpnFxjl1H4F8/sKxMi4WiSkG17ZF8+BXY+bAvsDfXwovfhNI1tjNa/uxx3TYnLZkL5xdz4fxinO5+9pzoYFdDOzvr2mnrcQ0593hzD8ebe3h210lSkhJYMDObxbNyWDIrh4qCdP3hoKJC2AKBiCQC9wGXA/XAZhF52hizx++0I8B7jTGtInIV8ACw/uy7RcgYA871uNwcOu0rO16iFcXRRwTmXgIV6+0YRfufsUVFACe22aViPSy/EfIqx3371KTEwZ7MZoOhvrWXXQ3t7Gpo5+CpriFzJLjcHnY3tLPbOxRJZmoSC0uyWVSSzYKZ2ZTna2BQkRHOHMG5QK0x5jCAiDwCXAsMBgJjzJt+528CysOYnvFxO+0Cdijk5IyzTtnX2Dn4h15ZmEGOdj6KXikZsPo2mH+FrT848hrg/ZKuexvq3oGKc2Hph6BgzoTeQkSoKMigoiCDq5fPosflZl9jp21t1NBOU+fQYbG7nW62HWtl2zE7h0VmahILZmYxf2Y282dkUVmQQZIWJakpEM5AUAbU+W3XE/jX/h3As2FMz/gMbzo6wi+1Gr9B5pZpa6HYkFUM530aFn3Adkar3+I9YLwB4W0oWWFbGc1cGlSl8mgyUpJYU5nPmko7b3VTp5O9JzvYc7KDfSc76HS4h5zf7XSz/Xgb24+3AZCSlMCc4kzmzchiXnE2c4ozydRObSoMwvm/aqS/oBHnEhSRS7CB4D2jHL8TuBOgsnL82fcJGaPFkDFmyLASS8u0WCim5M+Gi/7BDme96/fQsMV3rPFdu+RXwcKrbKe0xMnn9oqzUynOLuaiBcUYYzjR7mDfyQ72NXZy8FTnWYHB5faw72Snd3wkOwR3SW4ac4qzmFOcyZyiTMry0jXXoCYtnIGgHqjw2y4HTgw/SURWAA8CVxljmke6kTHmAWz9AWvXrp2aiWnHmKv4dKdzcNz71OQE5hXrsBIxqaAa3vsP0HrUjmp67C0Gf6+0HoVN/wXbfwPzLoV5l0FmUUjeVkQoy0unLC+dSxfPxBjDyXYHB051Unu6iwOnOmnucp11XWO7g8Z2B2/WngHsOEqVhRlUFWZSVZRBdVEmJTlpWtegxiWcgWAzMF9EqoEG4GbgVv8TRKQSeAL4iDHmQBjTMn5j9Cqu8WsttKgkR3+Vxbr8Krjg87DiJtj3R9vSqL/PHnN22IrmmiehbA3MvRRKV0FC6IYSERFK89IpzUvn4oUzADuG1aGmrsHOa8dbegZnwBvQ1+/h0OmuIXM4pyYnUFFgg0NlQQaVBRnMyk3T/6NqVGELBMYYt4h8BtiIbT76kDGmRkTu8h6/H/g6UAj81PsLxm2MWRuuNI3LGH0I/IuFlmmx0PSRXQLrPmEDQu2LcGCj7X8AgIGGrXZJz7f9FKovgtzwtHEoyEyhILOAdVV2aG2X28Pxlm5qT3dz5Ew3R850jZhrcPZ5qD3VRa3f/NmJCTbQVBRkUJFvX8vz03V0VQWEuR+BMeYZ4Jlh++73W/8E8IlwpmHCnH6T0gyrI3D3e9jX6BcItKJ4+knNti2IFl9jv/gP/tkOVzGgt9UWJe15CgrmQvWFdtTT9PywJSklKYF5M7KZN8M3N0Z7bx9Hz3RztNkGh2PNPXT09p11bb/HUNfSQ11Lz5D9uRnJlOWlU56fTlleBqV5aZTmpZOWrAMnxhNtgjCaADmC2qYunH22N2lRVirF2TqRybSVkGiblVacCx0nbJHR4VeH1iG1HLLL1l/a3sqV50PFujHnsAiF3PRkVlbksdI7NaoxhraePo42d3O8xXZmO97SQ0v32TkHgPaePtp7+oaMlwV2lr1ZuemU5qUNvpbkputQ3NOUfqqjCdCreHixkFbMxYmcUjtW0fIP2zmTj7wKDdt8HdQwdvjrUzWw+UGYsch2Vitba5utTgERIT8zhfzMFFZX+nIn3U43da091LX0Ut/aQ31rLw2tvUOGx/DX3OWiucs12PltQFZaEiU5aZTkplGSk8ZM72txdqoOnxHDNBCMpvuMbz09b8gh/4ri4ZOkqziQmATla+3i7LLzHxx7A07vxddC2tjt03th6y9sr+WytXZQvMJ5Aee/DofM1CQWleSwqMT3o8bjMZzudNLQZgPDiTYHDW09nOpwnlUpPaDL4abW0TVkiG6w3S0KMlOYmZPGjOxUirPTmJmT6m0ym6pzdEQ5DQQj6XdDZ6NvO7t0cLXD0cfxZlvOKiIsKgk8l7Ga5lKzYP5ldulpsR3Sjm+Cpv0M6TbTdtwuNU9AShbMWmE7rpWsgMzCiCQ9IUHsL/vcNM7xG27J3e/hVKeTk229nGh3cLKtl5PeZquj5SCM8eUi9oxwPDcjmWJvMWqR32thVgr5GSk6dHeEaSAYSecJMP12PbNoyBj2NX7FQnNnZJKRov+EyiujwHZAW3iVrUyu32KXU7v9io+wU2oee9MuYIucZi6zy4zFAadFnQpJiQmDfRz8GWNo7nYN9mVo7HBwusPBqQ4nzd3OIUNzDzdQFzE8JwE2IOVnJFOY5Q0OmSkUZqVQkJlCYWYq+ZnJmqMIM/0WG0l7vW89d2hP5t06rIQKRno+zL/cLn0OOLkTTmy3dQu9rUPP7Thhl4N/ttu5FbZ+oXgxFC+KWI5hOBGhyPtlPXyk3b5+D02dTk53OjnV4eB0p5Mm72tzt2vUoiawRVQDuYkDdI54TmZqkrc5ra3/KMhIIT8zmfwMm6PIy0jWlk6ToIFgJEMCga+NeF+/hx31bYPbK8vzpi5NKnYlp9mZ0yrX2zKU1qN2CIuT70LTvqG5BYD2OrscfN5uZxRC0QLvMh/yZkNSypQ/RiDJiQmDHeKG6/cYWntcnO6wvfGbvL3yz3Q5ae5y0T5Cc9fhup1uW+E9rPmrv/SURPIykr2BIYX8jGRy05PJy0gmN90Gi5y0ZFKStFJ7OA0EI2n3GyvPLxDsPdmBw2WLjIqyUqko0NnI1DiJ2GEtCqrtwHZulw0Gp/fYfgoth8EMK4fvaYbjb9kF7Gi4eZV2lNTCuZBfbXMRidH555yY4MtJjMTl9tDS7RoMDi3dLu+2i5ZuJ609fQFzFAN6Xf30uvo52eYIeN5AwMhNt4EhNz2ZnHTfdk56EjlpyWSnJcVNb+zo/J8TaW0jB4ItR31Z+nNm52uzUTV5SSm24njWCjtJTp8DzhywwaFpH5w5CP3D+gB43DZgtByG2hfsvoQkGwzyq+yAenmzIa/CdoyLcilJCYOV1iPxeAwdjj6au1209dgipNYeF609fbR22/W2nj76gwgWEHzAAMhITSI7zRcYctKSyE5LJsu7P9u7PzstiazU2A0cGgiG6++DrlO+7ZwywLak2FHXNrj7nKrw9SBVcSw5zRcYADz90HYMmg7YmdWaDw5t0TbA44bWI3bxl55vA0Rehf1Rk1MOuWWQkhn+ZwmRhAQhz1vcMxpjDJ1ON23dfTYw9PbR1mOLndp6+gZfOxzB5S4G9Djd9DjdnGofO2gApCUnkpWaRJY3MGSnJZGZapfs1IF1e05mahKZKUmkJSdE/EelBoLhOk74suaZxYMthvaf6hycpD4vI4U5RbHzh6RiWEKiLQLynyzH2WmHz2455M0ZHIHuppGv7221S+O7Q/en5UHOLPtDJ3uWXc+eBZkzoraIKRARscU6aclUFp49idSAgYDR7g0Kg6+9dunoddPp3e5yugO2hBqJo68fR1//4MjEwaY9MzXRGxgSyUixwcL/NcNvf1le6MeIir1PPNyGVBT7RtEemEUKtFhIRVhq9tBcA9iOba1Hbe6h9Zh9ba8/uyJ6gKPNLqf3Dt0vCbZyOrsEsmb6LcX2h1FK1qQm64k0/4AxFo/H0OVy09HbR6fDTZfTt97pXe9y2sDR5T0+3sABNjh1Odx0OUb5rIb51MVzWesdiDBUNBAM519RnGcDgcdj2OadNQpsIFAqqqRmQckyuwzw9NtizrbjNih0NHhfT4weIIzH5i66m4BdZx9PTrcBIbPY9rHJKLKBI9P7mp4f0uG5IykhIfigAfYLvcfV7w0OtpVTl3cZWO90uOlxuel22vN6XO7BccuClZ4S+n9fDQTDjdBiqLapa3BEx+y0JObP0EloVAxISLSd1XJKh+73eL/sO+ptfUPHCeg8add7Rpwbyqev19dLekRih2RJL4CMfF9wSM+3+9LzbLFUanZM5yxGYot4bNn/zHH0CXT3e+h29tPtsoGhy9lPj9NNt6t/MGj0uNz0uOw5+QHqSiZKA8FwI/Qh2Hy0ZXDXmtn5JGh3eBXLEhIge6ZdhnO7oPu0DQpdp6DrtO+1u+nsFkxnMb56iZYApyUk2dFZB5c826M6LdcO8piWa4PFwGsIpgqNVkmJCeRmJJCbEbln1EDgz+2CzoEWQwI5ZXQ4+nj9oG8AOi0WUtNaUor9ATTSZDvG2FF5u8/4io96mu12T7NdHO1nXzcSj9t3TVDpSrOBIjXH1lOkZtslJdO7L9MWj6UMLJl2mWa5jnDRQOCvo4HBgcKyZkBSKs9tr8PltmV4FQUZLJmls5GpOCXi+wVfOHfkc/r7bG6gp8XO7NbT4sshDCyOdugbvYfwiNwO6HLYnEnwCbZ1GikZvuCQnGGXgfWUDHtOcqb3dWDbu56UGhfBRAOBv2Ethtp7+nhpn+8/3l+tKtXWQkoFkphsf0RlzQh8nttpA4KjHXrb7Kuz3bfP2eld77CD9A3vbR0UYwNOX8/QYeXHRWwT8qT0UV79luQ0SEz1vSaleo/5rSem2NeExKgKMBoI/A2rH3h298nBYXcrCjJY7Z0FSik1SUmpwQUMsEVSrm5bLOXsskHC6Q0Qzi77Onzd1W0rtifN2Pv09UIobjdAEuy/QWKqLY4bCByJyXY9Mdm7nTJsSbaTHeXMCmFiNBAM5RcIutJKeGWXr5POtZobUCoyRGz5f+o4W+t5+m1uwNXtW/p6baDo6/Gue/f1Dbw6fLmIvt4gKscnyHh8AWa8citiKxCIyJXAPUAi8KAx5j+HHRfv8auBHuB2Y8y2cKZpVHWb4YR9636P4bFaBnMDswszWaW5AaViS0Kir1J5ojz99sva7Tj71e2wgWNw3Rs4+npt0Ve/03vM6bc47DkTKuryCkMLqrAFAhFJBO4DLgfqgc0i8rQxxn8Co6uA+d5lPfBf3tepdWIHvPEjMB56Xf1s7crn9b5M8GYANDegVJxKSJxYbmQs/W5voHDawDD8tb/Pe9xlXwf3uYIrThuncOYIzgVqjTGHAUTkEeBaGDKT3bXAr4wxBtgkInkiMssYczKUCXH0dLHzjz89a78YD2LczGjZhsftwt3vodGTx6N5twxW5Fy+ZCYrynUCGqVUCCUm2SVKBv8LZyAoA/y66VLP2b/2RzqnDAhpIHC5nKQeeXHU4wOjCHUl5vL7/I/Rk5hNanICHz2vivVzomN2KKWUCpdwBoKRylKGD8kUzDmIyJ3AnQCVlZVnXRAK3QnZPJH3cboS85hdmMmdF80ZdXx0pZSaTsIZCOqBCr/tcuDEBM7BGPMA8ADA2rVrxz2+X1p6JrLuY2cfkCSMJGAkmYyS5Xwqr5Dc9GSKs1O1TkApFTfCGQg2A/NFpBpoAG4Gbh12ztPAZ7z1B+uB9lDXDwCkpKax8n03h/q2Sik1LYQtEBhj3CLyGWAjtvnoQ8aYGhG5y3v8fuAZbNPRWmzz0RF+tiullAqnsPYjMMY8g/2y9993v9+6Af4unGlQSikVWGzOtKyUUipkNBAopVSc00CglFJxTgOBUkrFOQ0ESikV58Q23IkdItIEHJvg5UXARGeoiDb6LNFpujzLdHkO0GcZMNsYUzzSgZgLBJMhIluMMWsjnY5Q0GeJTtPlWabLc4A+SzC0aEgppeKcBgKllIpz8RYIHoh0AkJInyU6TZdnmS7PAfosY4qrOgKllFJni7ccgVJKqWGmZSAQkStFZL+I1IrIV0Y4LiJyr/f4uyKyJhLpDEYQz3KxiLSLyA7v8vVIpHMsIvKQiJwWkd2jHI+lz2SsZ4mVz6RCRF4Wkb0iUiMinx/hnJj4XIJ8llj5XNJE5B0R2el9lm+OcE5oPxdjzLRasENeHwLmACnATmDJsHOuBp7FzpC2AXg70umexLNcDPwx0mkN4lkuAtYAu0c5HhOfSZDPEiufySxgjXc9GzgQw38rwTxLrHwuAmR515OBt4EN4fxcpmOO4Fyg1hhz2BjjAh4Brh12zrXAr4y1CcgTkVlTndAgBPMsMcEY8xrQEuCUWPlMgnmWmGCMOWmM2eZd7wT2YucM9xcTn0uQzxITvP/WXd7NZO8yvDI3pJ/LdAwEZUCd33Y9Z/+HCOacaBBsOs/zZiOfFZGlU5O0kIuVzyRYMfWZiEgVsBr769NfzH0uAZ4FYuRzEZFEEdkBnAaeN8aE9XMJ68Q0ETLSZMPDo2kw50SDYNK5Ddt1vEtErgaeBOaHO2FhECufSTBi6jMRkSzgceALxpiO4YdHuCRqP5cxniVmPhdjTD+wSkTygD+IyDJjjH+dVEg/l+mYI6gHKvy2y4ETEzgnGoyZTmNMx0A20tgZ4ZJFpGjqkhgysfKZjCmWPhMRScZ+cf7GGPPECKfEzOcy1rPE0ucywBjTBrwCXDnsUEg/l+kYCDYD80WkWkRSgJuBp4ed8zTwt96a9w1AuzHm5FQnNAhjPouIlIiIeNfPxX6mzVOe0smLlc9kTLHymXjT+HNgrzHm7lFOi4nPJZhniaHPpdibE0BE0oHLgH3DTgvp5zLtioaMMW4R+QywEdvq5iFjTI2I3OU9fj92HuWrgVqgB/hYpNIbSJDPciPwKRFxA73AzcbbrCCaiMjD2FYbRSJSD3wDWwkWU58JBPUsMfGZABcAHwF2ecujAf4ZqISY+1yCeZZY+VxmAb8UkURssHrUGPPHcH6Hac9ipZSKc9OxaEgppdQ4aCBQSqk4p4FAKaXinAYCpZSKcxoIlFIqzmkgUGoEIvKKiLx/2L4viMhPA5w/LebFVfFHA4FSI3sY24HP383e/UpNKxoIlBrZ74EPikgqDA5kVgrcKiJbRhsn3ntul9/6jSLyC+96sYg8LiKbvcsFYX8KpYKggUCpERhjmoF38I3xcjPwO+BfjDFrgRXAe0VkxThuew/wQ2PMOuAG4MEQJlmpCZt2Q0woFUIDxUNPeV8/DnxYRO7E/u3MApYA7wZ5v8uAJd7hbgByRCTbO36+UhGjgUCp0T0J3O2dBjAdaAW+BKwzxrR6i3zSRrjOf9wW/+MJwHnGmN7wJFepidGiIaVG4R2y+BXgIWzuIAfoBtpFZCZw1SiXnhKRxSKSAFznt//PwGcGNkRkVRiSrdS4aSBQKrCHgZXAI8aYncB2oAYbHN4Y5ZqvAH8EXgL8hwb+HLDWO9n4HuCusKVaqXHQ0UeVUirOaY5AKaXinAYCpZSKcxoIlFIqzmkgUEqpOKeBQCml4pwGAqWUinMaCJRSKs5pIFBKqTj3/wMWzepQ6D22SAAAAABJRU5ErkJggg==\n", 126 | "text/plain": [ 127 | "
" 128 | ] 129 | }, 130 | "metadata": { 131 | "needs_background": "light" 132 | }, 133 | "output_type": "display_data" 134 | } 135 | ], 136 | "source": [ 137 | "# An example to get you started\n", 138 | "plot_pdf(dists.invgamma(alpha=1, beta=1), support=[0, 3])\n", 139 | "plot_pdf(dists.invgamma(alpha=2, beta=1), support=[0, 3])\n", 140 | "\n", 141 | "# ... Add remaining plots ...\n", 142 | "\n", 143 | "# add a legend\n", 144 | "plt.legend([r'$\\alpha=1$, $\\beta = 1$',\n", 145 | " r'$\\alpha=2$, $\\beta = 1$'])" 146 | ] 147 | }, 148 | { 149 | "cell_type": "markdown", 150 | "metadata": {}, 151 | "source": [ 152 | "### Exponential Distibution\n", 153 | "\n", 154 | "https://en.wikipedia.org/wiki/Exponential_distribution\n", 155 | "\n", 156 | "#### Recreate this plot\n", 157 | "\n", 158 | "![](https://upload.wikimedia.org/wikipedia/commons/thumb/0/02/Exponential_probability_density.svg/488px-Exponential_probability_density.svg.png)" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 15, 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "# code to plot exponential distribution here\n", 168 | "\n", 169 | "\n" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "### Student's t Distibution\n", 177 | "\n", 178 | "https://en.wikipedia.org/wiki/Student%27s_t-distribution\n", 179 | "\n", 180 | "#### Recreate this plot\n", 181 | "\n", 182 | "![](https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Student_t_pdf.svg/488px-Student_t_pdf.svg.png)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 16, 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "# code for Student's t plots here\n", 192 | "# Hint: You'll need to think of an equivalent for when the degrees of freedom are infinite\n", 193 | "\n" 194 | ] 195 | } 196 | ], 197 | "metadata": { 198 | "kernelspec": { 199 | "display_name": "Python 3 (ipykernel)", 200 | "language": "python", 201 | "name": "python3" 202 | }, 203 | "language_info": { 204 | "codemirror_mode": { 205 | "name": "ipython", 206 | "version": 3 207 | }, 208 | "file_extension": ".py", 209 | "mimetype": "text/x-python", 210 | "name": "python", 211 | "nbconvert_exporter": "python", 212 | "pygments_lexer": "ipython3", 213 | "version": "3.9.12" 214 | } 215 | }, 216 | "nbformat": 4, 217 | "nbformat_minor": 4 218 | } 219 | --------------------------------------------------------------------------------