├── .gitignore
├── .gitignore.save
├── LICENSE
├── README.md
├── cr-mpt
├── aggregated_cr_model.R
├── cr_two_within_conditions.R
├── hierarchical_cr_two_within_conditions0.R
├── hierarchical_cr_two_within_conditions1.R
├── hierarchical_cr_two_within_conditions2.R
├── hierarchical_cr_two_within_conditions3.R
└── pr_cr-mpt.R
├── ebddm
├── bayesian
│ ├── ebddm_agg.txt
│ ├── ebddm_agg2.txt
│ ├── ebddm_agg3.txt
│ ├── ebddm_agg_rec.txt
│ ├── pr_ebrw_ebdd.Rmd
│ └── pr_ebrw_ebdd.html
└── ebddm_pred.R
├── ebrw
├── ebrw_fit.R
├── ebrw_pred.R
├── ebrw_sim.R
├── reproduce_nosofsky_palmeri_1997.Rmd
└── reproduce_nosofsky_palmeri_1997.html
├── gcm
├── bayesian
│ ├── GCM_agg.stan
│ ├── GCM_agg.txt
│ ├── GCM_agg_recognition.stan
│ ├── GCM_agg_recognition.txt
│ ├── GCM_agg_recognition2.stan
│ ├── GCM_agg_recognition2.txt
│ ├── GCM_recognition.txt
│ ├── attention_weight_prior_comparison.R
│ ├── reproduce_nosofsky_1989_bayes.html
│ ├── reproduce_nosofsky_1989_bayes.rmd
│ ├── reproduce_nosofsky_1989_bayes_stan.html
│ ├── reproduce_nosofsky_1989_bayes_stan.rmd
│ ├── reproduce_shin_nosofsky_1992_bayes.Rmd
│ └── reproduce_shin_nosofsky_1992_bayes.html
├── data
│ ├── Nosofsky_1989_DataSets.xlsx
│ ├── Nosofsky_1989_MDS_solution.xlsx
│ ├── Nosofsky_1989_indexvectors.doc
│ ├── Nosofsky_readme.doc
│ ├── README.md
│ ├── nosofsky_1989_responses.csv
│ ├── nosofsky_1989_similarities.csv
│ ├── shin_nosofsky_1992_cat1.csv
│ ├── shin_nosofsky_1992_cat2.csv
│ ├── shin_nosofsky_1992_cat3.csv
│ ├── shin_nosofsky_1992_responses.xls
│ ├── shin_nosofsky_1992_responses_cat1.csv
│ ├── shin_nosofsky_1992_responses_cat2.csv
│ ├── shin_nosofsky_1992_responses_cat3.csv
│ └── shin_nosofsky_1992_similarities.xlsx
├── gcm_fit.r
├── gcm_pred.r
├── gcm_rec_fit.r
├── gcm_rec_pred.r
├── reproduce_nosofsky_1989.html
├── reproduce_nosofsky_1989.rmd
├── reproduce_shin_nosofsky_1992.html
└── reproduce_shin_nosofsky_1992.rmd
├── minerva-al
├── minerva-al.R
├── minerva-al.Rproj
├── reference_implementation.Rmd
├── reference_implementation
│ ├── Acquisition_033.f90
│ ├── Acquisition_067.f90
│ ├── Acquisition_1.f90
│ ├── MinervaAL_tools.f90
│ ├── Number_generators.f90
│ ├── Reacquisition_033.f90
│ ├── Reacquisition_067.f90
│ ├── Reacquisition_1.f90
│ ├── Reacquisition_control_033.f90
│ ├── Reacquisition_control_067.f90
│ ├── Reacquisition_control_1.f90
│ ├── acquisition_033
│ ├── acquisition_067
│ ├── acquisition_1
│ ├── make.sh
│ ├── minervaal_tools.mod
│ ├── number_generators.mod
│ ├── reacquisition_033
│ ├── reacquisition_067
│ ├── reacquisition_1
│ ├── reacquisition_control_033
│ ├── reacquisition_control_067
│ ├── reacquisition_control_1
│ └── results
│ │ ├── Acquisition_extinction_033.txt
│ │ ├── Acquisition_extinction_067.txt
│ │ ├── Acquisition_extinction_1.txt
│ │ ├── Reacquisition_033.txt
│ │ ├── Reacquisition_067.txt
│ │ ├── Reacquisition_1.txt
│ │ ├── Reacquisition_control_033.txt
│ │ ├── Reacquisition_control_067.txt
│ │ └── Reacquisition_control_1.txt
├── reproduce_jamieson_etal_2012.Rmd
└── reproduce_jamieson_etal_2012.html
└── minerva2
├── minerva2.R
├── reproduce_hintzman_1988.Rmd
├── reproduce_hintzman_1988.html
├── reproduce_hintzman_1988_cache
└── html
│ ├── __packages
│ ├── frequency_judgments_a964883965c8d954a4979e3eea604080.rdb
│ └── frequency_judgments_a964883965c8d954a4979e3eea604080.rdx
└── reproduce_hintzman_1988_files
└── figure-html
└── unnamed-chunk-2-1.png
/.gitignore:
--------------------------------------------------------------------------------
1 | # R files
2 | .Rproj.user
3 | .Rhistory
4 | .RData
5 | *_cache
6 | *_files
7 |
8 | # System files
9 | *~
10 | .DS_Store
11 | Thumbs.db
12 |
--------------------------------------------------------------------------------
/.gitignore.save:
--------------------------------------------------------------------------------
1 | # R files
2 | .Rproj.user
3 | .Rhistory
4 | .RData
5 |
6 |
7 | # System files
8 | *~
9 | .DS_Store
10 | Thumbs.db
11 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Collection of cognitive models
2 |
3 | This is a little collection of my implementations of computational models of cognition in R.
4 |
5 | ## Cognitive model repositories
6 | If you are looking for more computational models of cognition, take a look at the following repositories:
7 |
8 | - [The Ohio state university cognitive modeling repository](http://www.cmr.osu.edu/): Contains datasets that can be modeled and the cognitive models themselves
9 | - [Repository of Neural and Cognitive Models](http://models.nengo.ca/): The goal is to provide enough information here that a researcher can run the models detailed in a particular publication or project, as well as gather and analyze the resulting data. The models here typically use Nengo, but other models are welcome.
10 |
11 | If you know of any other repositories that provide code and/or data for cognitive modeling, let me know so I can add them to the list.
12 |
--------------------------------------------------------------------------------
/cr-mpt/aggregated_cr_model.R:
--------------------------------------------------------------------------------
1 | # Simplified conjoint recognition model for aggregated responses
2 |
3 | model {
4 |
5 | # Data generating model ---------------------------------------------------
6 |
7 | for(i in 1:n_subject) {
8 |
9 | ## Targets
10 | # x[i, 1] ~ dbin(V[i] + (1 - V[i]) * G[i] + (1 - V[i]) * (1 - G[i]) * b[i], n_items[1])
11 | x[i, 1] ~ dbin(V[i] + (1 - V[i]) * (G[i] + (1 - G[i]) * b[i]), n_items[1]) # This form speeds up computations
12 |
13 | ## Lure distractors
14 | x[i, 2] ~ dbin(G[i] + (1 - G[i]) * b[i], n_items[1])
15 |
16 | ## New distractors
17 | y[i] ~ dbin(b[i], n_items[2])
18 | }
19 |
20 | # Prior ------------------------------------------------------------------
21 |
22 | for(i in 1:n_subject) {
23 | V[i] ~ dbeta(1, 1)
24 | G[i] ~ dbeta(1, 1)
25 | b[i] ~ dbeta(1, 1)
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/cr-mpt/cr_two_within_conditions.R:
--------------------------------------------------------------------------------
1 | # Simplified conjoint recognition model for aggregated responses
2 |
3 | data {
4 | dimx <- dim(x)
5 | n_subject <- dimx[1]
6 | n_c1 <- dimx[2] # Number of conditions in factor 1
7 | n_c2 <- dimx[3] # Number of conditions in factor 2
8 | }
9 |
10 | model {
11 |
12 | # Data generating model ---------------------------------------------------
13 |
14 | for(i in 1:n_subject) {
15 | for(c1 in 1:n_c1) {
16 | for(c2 in 1:n_c2) {
17 | ## Targets
18 | x[i, c1, c2, 1] ~ dbin(V[i, c1, c2] + (1 - V[i, c1, c2]) * (G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]), n_items[1])
19 |
20 | ## Lure distractors
21 | x[i, c1, c2, 2] ~ dbin(G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i], n_items[1])
22 | }
23 | }
24 | y[i] ~ dbin(b[i], n_items[2]) # New distractors
25 | }
26 |
27 | # Prior ------------------------------------------------------------------
28 |
29 | for(i in 1:n_subject) {
30 | for(c1 in 1:n_c1) {
31 | for(c2 in 1:n_c2) {
32 | V[i, c1, c2] ~ dbeta(1, 1)
33 | G[i, c1, c2] ~ dbeta(1, 1)
34 | }
35 | }
36 | b[i] ~ dbeta(1, 1)
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/cr-mpt/hierarchical_cr_two_within_conditions0.R:
--------------------------------------------------------------------------------
1 | # Hierarchical simplified conjoint recognition model
2 | # with random participant intercept and homogeneous
3 | # parameter variance across conditions
4 |
5 |
6 | data {
7 | dimx <- dim(x)
8 | n_c1 <- dimx[2] # Number of conditions in factor 1
9 | n_c2 <- dimx[3] # Number of conditions in factor 2
10 | n_param <- 3 # V, G, & b
11 | wish_df <- n_param + 1 # Resulting degrees of freedom for inverse Wishart distribution
12 | }
13 |
14 | model {
15 |
16 | # Data generating model ---------------------------------------------------
17 |
18 | for(i in 1:n_subject) {
19 | for(c1 in 1:n_c1) {
20 | for(c2 in 1:n_c2) {
21 |
22 | ## Targets
23 | x[i, c1, c2, 1] ~ dbin(V[i, c1, c2] + (1 - V[i, c1, c2]) * (G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]), n_items[1])
24 |
25 | ## Lures
26 | x[i, c1, c2, 2] ~ dbin(G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i], n_items[1])
27 | }
28 | }
29 |
30 | ## New distractors
31 | y[i] ~ dbin(b[i], n_items[2])
32 | }
33 |
34 | # Parameter transformation ------------------------------------------------
35 |
36 | for(i in 1:n_subject) {
37 | for(c1 in 1:n_c1) {
38 | for(c2 in 1:n_c2) {
39 | V[i, c1, c2] <- phi(V_hat[i, c1, c2])
40 | G[i, c1, c2] <- phi(G_hat[i, c1, c2])
41 | }
42 | }
43 | b[i] <- phi(b_hat[i])
44 | }
45 |
46 | ## Assamble scaled additive participant effects on probit scale
47 | for(c1 in 1:n_c1) {
48 | for(c2 in 1:n_c2) { # V = xi_part[1:(n_c1 + n_c2)]; G = xi_part[(n_c1 + n_c2 + 1):(n_param - 1)]
49 | V_hat[1:n_subject, c1, c2] <- mu_V_hat[c1, c2] + xi_part[1] * delta_mu_hat_part[1:n_subject, 1]
50 | G_hat[1:n_subject, c1, c2] <- mu_G_hat[c1, c2] + xi_part[2] * delta_mu_hat_part[1:n_subject, 2]
51 | }
52 | }
53 | b_hat[1:n_subject] <- mu_b_hat + xi_part[n_param] * delta_mu_hat_part[1:n_subject, n_param]
54 |
55 | # Level 1 prior -----------------------------------------------------------
56 |
57 | ## Random participant deviations with mean 0
58 | for(i in 1:n_subject) {
59 | delta_mu_hat_part[i, 1:n_param] ~ dmnorm(rep(0, n_param), sigma_inv)
60 | }
61 |
62 | ## Scaling parameter (see Gelaman & Hill, 2007, Chapter 13 & 17)
63 | for(i in 1:n_param) {
64 | xi_part[i] ~ dunif(0, 100)
65 | }
66 |
67 | # Level 2 prior on condition means ----------------------------------------
68 |
69 | ## Condition means
70 | for(c1 in 1:n_c1) {
71 | for(c2 in 1:n_c2) {
72 | mu_V_hat[c1, c2] ~ dnorm(0, 1)
73 | mu_G_hat[c1, c2] ~ dnorm(0, 1)
74 | }
75 | }
76 | mu_b_hat ~ dnorm(0, 1)
77 |
78 | ## Parameter variance and correlations
79 | sigma_inv[1:n_param, 1:n_param] ~ dwish(I_part[1:n_param, 1:n_param], wish_df)
80 | }
81 |
--------------------------------------------------------------------------------
/cr-mpt/hierarchical_cr_two_within_conditions1.R:
--------------------------------------------------------------------------------
1 | # Hierarchical simplified conjoint recognition model
2 | # with random participant intercept and heterogeneous
3 | # parameter variance across conditions
4 |
5 | data {
6 | dimx <- dim(x)
7 | n_c1 <- dimx[2] # Number of conditions in factor 1
8 | n_c2 <- dimx[3] # Number of conditions in factor 2
9 | n_param <- 3 # V, G, & b
10 | wish_df <- n_param + 1 # Resulting degrees of freedom for inverse Wishart distribution
11 | }
12 |
13 | model {
14 |
15 | # Data generating model ---------------------------------------------------
16 |
17 | for(i in 1:n_subject) {
18 | for(c1 in 1:n_c1) {
19 | for(c2 in 1:n_c2) {
20 |
21 | ## Targets
22 | x[i, c1, c2, 1] ~ dbin(V[i, c1, c2] + (1 - V[i, c1, c2]) * (G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]), n_items[1])
23 |
24 | ## Lures
25 | x[i, c1, c2, 2] ~ dbin(G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i], n_items[1])
26 | }
27 | }
28 |
29 | ## New distractors
30 | y[i] ~ dbin(b[i], n_items[2])
31 | }
32 |
33 | # Parameter transformation ------------------------------------------------
34 |
35 | for(i in 1:n_subject) {
36 | for(c1 in 1:n_c1) {
37 | for(c2 in 1:n_c2) {
38 | V[i, c1, c2] <- phi(V_hat[i, c1, c2])
39 | G[i, c1, c2] <- phi(G_hat[i, c1, c2])
40 | }
41 | }
42 | b[i] <- phi(b_hat[i])
43 | }
44 |
45 | ## Assamble scaled additive participant effects on probit scale
46 | for(c1 in 1:n_c1) {
47 | for(c2 in 1:n_c2) {
48 | V_hat[1:n_subject, c1, c2] <- mu_V_hat[c1, c2] + xi_part_V[c1, c2] * delta_mu_hat_part[1:n_subject, 1]
49 | G_hat[1:n_subject, c1, c2] <- mu_G_hat[c1, c2] + xi_part_G[c1, c2] * delta_mu_hat_part[1:n_subject, 2]
50 | }
51 | }
52 | b_hat[1:n_subject] <- mu_b_hat + xi_part_b * delta_mu_hat_part[1:n_subject, n_param]
53 |
54 | # Level 1 prior -----------------------------------------------------------
55 |
56 | ## Random participant deviations with mean 0
57 | for(i in 1:n_subject) {
58 | delta_mu_hat_part[i, 1:n_param] ~ dmnorm(rep(0, n_param), sigma_inv)
59 | }
60 |
61 | ## Scaling parameter (see Gelaman & Hill, 2007, Chapter 13 & 17)
62 | for(c1 in 1:n_c1) {
63 | for(c2 in 1:n_c2) {
64 | xi_part_V[c1, c2] ~ dunif(0, 100)
65 | xi_part_G[c1, c2] ~ dunif(0, 100)
66 | }
67 | }
68 | xi_part_b ~ dunif(0, 100)
69 |
70 | # Level 2 prior on condition means ----------------------------------------
71 |
72 | ## Condition means
73 | for(c1 in 1:n_c1) {
74 | for(c2 in 1:n_c2) {
75 | mu_V_hat[c1, c2] ~ dnorm(0, 1)
76 | mu_G_hat[c1, c2] ~ dnorm(0, 1)
77 | }
78 | }
79 | mu_b_hat ~ dnorm(0, 1)
80 |
81 | ## Parameter variance and correlations
82 | sigma_inv[1:n_param, 1:n_param] ~ dwish(I_part[1:n_param, 1:n_param], wish_df)
83 | }
84 |
--------------------------------------------------------------------------------
/cr-mpt/hierarchical_cr_two_within_conditions2.R:
--------------------------------------------------------------------------------
1 | # Hierarchical simplified conjoint recognition model
2 | # with random participant intercept, heterogeneous
3 | # parameter variance across conditions, and participant-
4 | # condition interaction term
5 |
6 | data {
7 | dimx <- dim(x)
8 | n_subject <- dimx[1]
9 | n_c1 <- dimx[2] # Number of conditions in factor 1
10 | n_c2 <- dimx[3] # Number of conditions in factor 2
11 | n_param <- 3 # V, G, & b
12 | wish_df <- n_param + 1 # Resulting degrees of freedom for inverse Wishart distribution
13 | }
14 |
15 | model {
16 |
17 | # Data generating model ---------------------------------------------------
18 |
19 | for(i in 1:n_subject) {
20 | for(c1 in 1:n_c1) {
21 | for(c2 in 1:n_c2) {
22 |
23 | ## Targets
24 | x[i, c1, c2, 1] ~ dbin(V[i, c1, c2] + (1 - V[i, c1, c2]) * (G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]), n_items[1])
25 |
26 | ## Lures
27 | x[i, c1, c2, 2] ~ dbin(G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i], n_items[1])
28 | }
29 | }
30 |
31 | ## New distractors
32 | y[i] ~ dbin(b[i], n_items[2])
33 | }
34 |
35 | # Parameter transformation ------------------------------------------------
36 |
37 | for(i in 1:n_subject) {
38 | for(c1 in 1:n_c1) {
39 | for(c2 in 1:n_c2) {
40 | V[i, c1, c2] <- phi(V_hat[i, c1, c2])
41 | G[i, c1, c2] <- phi(G_hat[i, c1, c2])
42 | }
43 | }
44 | b[i] <- phi(b_hat[i])
45 | }
46 |
47 | ## Assamble scaled additive participant effects on probit scale
48 | for(i in 1:n_subject) {
49 | for(c1 in 1:n_c1) {
50 | for(c2 in 1:n_c2) { # V = xi_part[1:(n_c1 + n_c2)]; G = xi_part[(n_c1 + n_c2 + 1):(n_param - 1)]
51 | V_hat[i, c1, c2] ~ dnorm(mu_V_hat[c1, c2] + xi_part_V[c1, c2] * delta_mu_hat_part[i, 1], tau_int[1])
52 | G_hat[i, c1, c2] ~ dnorm(mu_G_hat[c1, c2] + xi_part_G[c1, c2] * delta_mu_hat_part[i, 2], tau_int[2])
53 | }
54 | }
55 | b_hat[i] ~ dnorm(mu_b_hat + xi_part_b * delta_mu_hat_part[i, n_param], tau_int[n_param])
56 | }
57 |
58 | # Level 1 prior -----------------------------------------------------------
59 |
60 | ## Random participant deviations with mean 0
61 | for(i in 1:n_subject) {
62 | delta_mu_hat_part[i, 1:n_param] ~ dmnorm(rep(0, n_param), sigma_inv)
63 | }
64 |
65 | ## Participant-condition interaction
66 | for(i in 1:n_param) {
67 | sigma_int[i] ~ dunif(0, 100)
68 | }
69 | tau_int <- sigma_int^-2
70 |
71 | ## Scaling parameter (see Gelaman & Hill, 2007, Chapter 13 & 17)
72 | for(c1 in 1:n_c1) {
73 | for(c2 in 1:n_c2) {
74 | xi_part_V[c1, c2] ~ dunif(0, 100)
75 | xi_part_G[c1, c2] ~ dunif(0, 100)
76 | }
77 | }
78 | xi_part_b ~ dunif(0, 100)
79 |
80 | # Level 2 prior on condition means ----------------------------------------
81 |
82 | ## Condition means
83 | for(c1 in 1:n_c1) {
84 | for(c2 in 1:n_c2) {
85 | mu_V_hat[c1, c2] ~ dnorm(0, 1)
86 | mu_G_hat[c1, c2] ~ dnorm(0, 1)
87 | }
88 | }
89 | mu_b_hat ~ dnorm(0, 1)
90 |
91 | ## Parameter variance and correlations
92 | sigma_inv[1:n_param, 1:n_param] ~ dwish(I_part[1:n_param, 1:n_param], wish_df)
93 | }
94 |
--------------------------------------------------------------------------------
/cr-mpt/hierarchical_cr_two_within_conditions3.R:
--------------------------------------------------------------------------------
1 | # Hierarchical simplified conjoint recognition model
2 | # with random participant intercept and slope, heterogeneous
3 | # parameter variance across conditions
4 |
5 | data {
6 | dimx <- dim(x)
7 | n_c1 <- dimx[2] # Number of conditions in factor 1
8 | n_c2 <- dimx[3] # Number of conditions in factor 2
9 | n_param <- 2 * n_c1 * n_c2 + 1 # V & G for each condition, common b
10 | wish_df <- n_param + 1 # Resulting degrees of freedom for inverse Wishart distribution
11 | }
12 |
13 | model {
14 |
15 | # Data generating model ---------------------------------------------------
16 |
17 | for(i in 1:n_subject) {
18 | for(c1 in 1:n_c1) {
19 | for(c2 in 1:n_c2) {
20 |
21 | ## Targets
22 | x[i, c1, c2, 1] ~ dbin(V[i, c1, c2] + (1 - V[i, c1, c2]) * (G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]), n_items[1])
23 |
24 | ## Lures
25 | x[i, c1, c2, 2] ~ dbin(G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i], n_items[1])
26 | }
27 | }
28 |
29 | ## New distractors
30 | y[i] ~ dbin(b[i], n_items[2])
31 | }
32 |
33 | # Parameter transformation ------------------------------------------------
34 |
35 | for(i in 1:n_subject) {
36 | for(c1 in 1:n_c1) {
37 | for(c2 in 1:n_c2) {
38 | V[i, c1, c2] <- phi(V_hat[i, c1, c2])
39 | G[i, c1, c2] <- phi(G_hat[i, c1, c2])
40 | }
41 | }
42 | b[i] <- phi(b_hat[i])
43 | }
44 |
45 | ## Assamble scaled additive participant effects on probit scale
46 | for(c1 in 1:n_c1) {
47 | for(c2 in 1:n_c2) {
48 | V_hat[1:n_subject, c1, c2] <- mu_V_hat[c1, c2] + xi_part_V[c1, c2] * delta_mu_hat_part[1:n_subject, (2*c2-2)^2 + c1]
49 | G_hat[1:n_subject, c1, c2] <- mu_G_hat[c1, c2] + xi_part_G[c1, c2] * delta_mu_hat_part[1:n_subject, n_c1 + (2*c2-2)^2 + c1]
50 | }
51 | }
52 | b_hat[1:n_subject] <- mu_b_hat + xi_part_b * delta_mu_hat_part[1:n_subject, n_param]
53 |
54 | # Level 1 prior -----------------------------------------------------------
55 |
56 | ## Random participant deviations with mean 0
57 | for(i in 1:n_subject) {
58 | delta_mu_hat_part[i, 1:n_param] ~ dmnorm(rep(0, n_param), sigma_inv)
59 | }
60 |
61 | ## Scaling parameter (see Gelaman & Hill, 2007, Chapter 13 & 17)
62 | for(c1 in 1:n_c1) {
63 | for(c2 in 1:n_c2) {
64 | xi_part_V[c1, c2] ~ dunif(0, 100)
65 | xi_part_G[c1, c2] ~ dunif(0, 100)
66 | }
67 | }
68 | xi_part_b ~ dunif(0, 100)
69 |
70 | # Level 2 prior on condition means ----------------------------------------
71 |
72 | ## Condition means
73 | for(c1 in 1:n_c1) {
74 | for(c2 in 1:n_c2) {
75 | mu_V_hat[c1, c2] ~ dnorm(0, 1)
76 | mu_G_hat[c1, c2] ~ dnorm(0, 1)
77 | }
78 | }
79 | mu_b_hat ~ dnorm(0, 1)
80 |
81 | ## Parameter variance and correlations
82 | sigma_inv[1:n_param, 1:n_param] ~ dwish(I_part[1:n_param, 1:n_param], wish_df)
83 | }
84 |
--------------------------------------------------------------------------------
/cr-mpt/pr_cr-mpt.R:
--------------------------------------------------------------------------------
1 |
2 | # Libraries ---------------------------------------------------------------
3 |
4 | library("runjags")
5 | library("dplyr")
6 | library("ggplot2")
7 | library("abind")
8 |
9 | runjags.options(mode.continuous = TRUE)
10 |
11 | # Prediction functions ----------------------------------------------------
12 |
13 | cr_G <- function(fa, b) return((fa - b) / (1 - b))
14 | cr_V <- function(hits, G, b) return((hits - G - (1 - G) * b) / (1 - G - (1 - G) * b))
15 |
16 | cr_fa <- function(G, b) return(G + (1-G) * b)
17 | cr_hits <- function(V, G, b) return(V + (1-V) * G + (1-V) * (1-G) * b)
18 |
19 | cr_pred <- function(V, V_delta, G, G_delta, b, n_items) {
20 | n_subjects <- nrow(V)
21 | V2 <- V + V_delta
22 | V2[V2 > 1] <- 1
23 | V2[V2 < 0] <- 0
24 | G2 <- G + G_delta
25 | G2[G2 > 1] <- 1
26 | G2[G2 < 0] <- 0
27 | V <- abind::abind(V, V2, along = 3)
28 | G <- abind::abind(G, G2, along = 3)
29 | x <- array(NA, dim = c(n_subjects, ncol(V), 2, 2))
30 | y <- c()
31 |
32 | for(i in 1:n_subjects) {
33 | for(c1 in 1:ncol(V)) {
34 | for(c2 in 1:(length(V_delta) + 1)) {
35 | # Targets
36 | x[i, c1, c2, 1] <- V[i, c1, c2] + (1 - V[i, c1, c2]) * G[i, c1, c2] + (1 - V[i, c1, c2]) * (1 - G[i, c1, c2]) * b[i]
37 |
38 | # Lure distractors
39 | x[i, c1, c2, 2] <- G[i, c1, c2] + (1 - G[i, c1, c2]) * b[i]
40 | }
41 | }
42 | y[i] <- b[i] # New distractors
43 | }
44 |
45 | x <- round(x * n_items[1])
46 | y <- round(y * n_items[2])
47 |
48 | return(list(x = x, y = y, param = list(V = V, G = G, b = b, n_items = n_items, n_subjects = n_subjects)))
49 | }
50 |
51 |
52 | # Init functions ----------------------------------------------------------
53 |
54 | cr_init <- function(V, G, b, chains) {
55 |
56 | inits <- list()
57 | for(i in 1:chains) {
58 | V_init <- if(!is.null(V)) array(runif(length(V), 0, 1), dim = dim(V)) else NULL
59 | G_init <- if(!is.null(G)) array(runif(length(G), 0, 1), dim = dim(G)) else NULL
60 | b_init <- if(!is.null(b)) runif(length(b), 0, 1) else NULL
61 |
62 | i_inits <- list(V = V_init, G = G_init, b = b_init)
63 | inits[[i]] <- Filter(Negate(function(x) is.null(unlist(x))), i_inits)
64 | }
65 |
66 | inits
67 | }
68 |
69 |
70 | # Plotting functions ------------------------------------------------------
71 |
72 | plot_deviations <- function(samples, truth) {
73 | recovered <- as.data.frame(summary(samples))
74 | recovered$param <- factor(gsub("[^GVb]", "", rownames(recovered)))
75 | recovered$lie <- recovered$Mode - truth
76 |
77 | recovered %>%
78 | group_by(param) %>%
79 | ggplot(aes(x = lie, color = param)) +
80 | geom_density(aes(fill = param), alpha = 0.5, adjust = 0.5) +
81 | geom_vline(xintercept = 0, linetype = 2) +
82 | theme_minimal() +
83 | xlab("Estimation error")
84 | }
85 |
86 |
87 | # One condition, aggregated responses -------------------------------------
88 |
89 | n_subject <- 25
90 | V <- 0.5
91 | G <- 0.5
92 | b <- 0.5
93 | n_items <- 200
94 | n_items <- c(n_items, 1.5 * n_items)
95 |
96 | synthetic <- list(x = matrix(NA, ncol = 2, nrow = n_subject), y = NA)
97 | synthetic$x[, 1] <- rep(cr_hits(V, G, b) * n_items[1], n_subject)
98 | synthetic$x[, 2] <- rep(cr_fa(G, b) * n_items[1], n_subject)
99 | synthetic$y <- rep(b * n_items[2], n_subject)
100 | synthetic$n_items <- n_items
101 | synthetic$n_subject <- n_subject
102 |
103 | poi <- c("V", "G", "b")
104 |
105 | inits <- list(V = runif(n_subject, 0, 1), G = runif(n_subject, 0, 1), b = runif(n_subject, 0, 1))
106 |
107 | cr_samples <- run.jags(
108 | model = "cr_model.txt"
109 | , monitor = poi
110 | , inits = list(inits, inits, inits)
111 | , data = synthetic
112 | , n.chains = 3
113 | , sample = 1e4
114 | , burnin = 1e3
115 | , thin = 10
116 | , method = "rjparallel"
117 | )
118 |
119 | plot(cr_samples)
120 |
121 | plot_deviations(cr_samples, c(V, G, b))
122 | rm(cr_samples)
123 |
124 | # Aggregated responses ----------------------------------------------------
125 |
126 | V <- matrix(c(0.5, 0.5), ncol = 2)
127 | V_delta <- 0
128 | G <- matrix(c(0.5, 0.5), ncol = 2)
129 | G_delta <- 0
130 | b <- 0.5
131 | n_items <- 324
132 | n_items <- c(n_items, 1.5 * n_items)
133 |
134 | synthetic <- cr_pred(V, V_delta, G, G_delta, b, n_items)
135 |
136 | synthetic_jags <- synthetic[c("x", "y")]
137 | synthetic_jags$n_subject <- synthetic$param$n_subjects
138 | synthetic_jags$n_items <- synthetic$param$n_items
139 |
140 | poi <- c("V", "G", "b")
141 |
142 | cr_samples <- run.jags(
143 | model = "cr_two_within_conditions.txt"
144 | , monitor = poi
145 | , inits = cr_init(synthetic$param[[c("V")]], synthetic$param[[c("G")]], synthetic$param[[c("b")]], 3)
146 | , data = synthetic_jags
147 | , n.chains = 3
148 | , sample = 5e4
149 | , burnin = 5e4
150 | , thin = 10
151 | , method = "rjparallel"
152 | )
153 |
154 | plot(cr_samples)
155 |
156 | plot_deviations(cr_samples, c(synthetic$param$V, synthetic$param$G, synthetic$param$b))
157 | rm(cr_samples)
158 |
159 |
160 | # Individual participants -------------------------------------------------
161 |
162 | n_subjects <- 25
163 | V <- matrix(rep(c(0.5, 0.5), each = n_subjects), ncol = 2)
164 | V_delta <- 0
165 | G <- matrix(rep(c(0.5, 0.5), each = n_subjects), ncol = 2)
166 | G_delta <- 0
167 | b <- rep(0.5, n_subjects)
168 | n_items <- 324
169 | n_items <- c(n_items, 1.5 * n_items)
170 |
171 | synthetic <- cr_pred(V, V_delta, G, G_delta, b, n_items)
172 |
173 | synthetic_jags <- synthetic[c("x", "y")]
174 | synthetic_jags$n_subject <- synthetic$param$n_subjects
175 | synthetic_jags$n_items <- synthetic$param$n_items
176 |
177 | poi <- c("V", "G", "b")
178 |
179 | cr_samples <- run.jags(
180 | model = "cr_two_within_conditions.txt"
181 | , monitor = poi
182 | , data = synthetic_jags
183 | , inits = cr_init(synthetic$param[[c("V")]], synthetic$param[[c("G")]], synthetic$param[[c("b")]], 3)
184 | , n.chains = 3
185 | , sample = 5e4
186 | , burnin = 5e4
187 | , thin = 10
188 | , method = "rjparallel"
189 | )
190 |
191 | plot_deviations(cr_samples, c(synthetic$param$V, synthetic$param$G, synthetic$param$b))
192 | rm(cr_samples)
193 |
194 | # Hierarchical model with correlated participant effects ------------------
195 |
196 | n_subjects <- 10
197 | V <- matrix(pnorm(rnorm(n_subjects * 2, rep(qnorm(c(0.5, 0.5)), each = n_subjects), 0.75)), ncol = 2)
198 | G <- matrix(pnorm(rnorm(n_subjects * 2, rep(qnorm(c(0.5, 0.5)), each = n_subjects), 0.75)), ncol = 2)
199 | b <- pnorm(rnorm(n_subjects, qnorm(0.05), 0.75))
200 | V_delta <- 0
201 | G_delta <- 0
202 | n_items <- 324
203 | n_items <- c(n_items, 1.5 * n_items)
204 |
205 | synthetic <- cr_pred(V, V_delta, G, G_delta, b, n_items)
206 |
207 | synthetic_jags <- synthetic[c("x", "y")]
208 | synthetic_jags$n_subject <- synthetic$param$n_subjects
209 | synthetic_jags$n_items <- synthetic$param$n_items
210 | synthetic_jags$I_part <- diag((ncol(V) + ncol(G)) * 2 + 1) # Identiy matrix for participants
211 |
212 | poi <- c("V", "G", "b", "xi_part", "sigma", "pd", "dic")
213 |
214 | cr_samples <- run.jags(
215 | model = "hierarchical_cr_two_within_conditions0.R"
216 | , monitor = poi
217 | , data = synthetic_jags
218 | , n.chains = 3
219 | , sample = 1e4
220 | , burnin = 5e3
221 | , thin = 10
222 | , method = "rjparallel"
223 | )
224 |
225 | str(cr_samples$end.state)
226 |
227 | param_varcor <- function(x) {
228 | matches <- runjags:::matchvars(runjags:::checkvalidmonitorname("sigma_inv"), varnames(x))
229 | sigma_cols <- varnames(x)[matches]
230 | n_cols <- sqrt(length(sigma_cols))
231 | sigma_inv <- x[, sigma_cols, drop = FALSE]
232 | sigma <- apply(
233 | sigma_inv
234 | , 1
235 | , function(y) {
236 | sigma_inv_matrix <- matrix(y, ncol = n_cols)
237 | sigma_matrix <- solve(sigma_inv_matrix)
238 | as.vector(sigma_matrix)
239 | }
240 | )
241 | sigma <- t(sigma)
242 | colnames(sigma) <- gsub("_inv", "", colnames(sigma_inv))
243 |
244 | sigma_V <- x[, "xi_part[1]"] * sqrt(sigma[, "sigma[1,1]"])
245 | sigma_G <- x[, "xi_part[2]"] * sqrt(sigma[, "sigma[2,2]"])
246 | sigma_b <- x[, "xi_part[3]"] * sqrt(sigma[, "sigma[3,3]"])
247 |
248 | rho <- matrix(NA, nrow = nrow(x), ncol = n_cols ^ 2)
249 | colnames(rho) <- gsub("sigma", "rho", colnames(sigma))
250 | for (i in 1:n_cols) {
251 | for (j in 1:n_cols) {
252 | rho[, paste0("rho[", i, ",", j, "]")] <-
253 | sigma[, paste0("sigma[", i, ",", j, "]")] / sqrt(sigma[, paste0("sigma[", i, ",", i, "]")] * sigma[, paste0("sigma[", j, ",", j, "]")])
254 | }
255 | }
256 |
257 | cbind(cbind(sigma_V, sigma_G, sigma_b), rho)
258 | }
259 |
260 | add.summary(cr_samples, mutate = param_cor, vars = c("V", "G", "b", "sigma_V", "sigma_G", "sigma_b", "rho"))
261 |
262 | plot_deviations(cr_samples, unlist(list(c(V, V + V_delta), c(G, G + G_delta), b)))
263 |
--------------------------------------------------------------------------------
/ebddm/bayesian/ebddm_agg.txt:
--------------------------------------------------------------------------------
1 | # Exemplar-based Drift-Diffusion Model
2 |
3 | model {
4 | # Decision Data
5 | for (i in 1:ntests[1]) {
6 | rt[i] ~ dwiener(alpha, tau, beta, delta[i])
7 | #pred_rt[i] ~ dwiener(alpha, tau, beta, delta[i])
8 | }
9 |
10 | # Decision Probabilities
11 | for (i in 1:ntests[1]) {
12 | delta[i] <- logit(r[i])
13 | r[i] <- sum(numerator[i, ]) / sum(s[i, ])
14 | for (j in 1:nmemory[1]) {
15 | tmp[i, j, 1] <- s[i, j]
16 | tmp[i, j, 2] <- 0
17 | numerator[i, j] <- tmp[i, j, category[j]]
18 | }
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(wk * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | wk[1:2] <- c(w, 1-w) # Requires JAGS 4.0.0
29 |
30 | # Priors
31 | c ~ dunif(0, 25)
32 | w ~ dbeta(1, 1)
33 | alpha ~ dunif(0, 25)
34 | beta ~ dbeta(1, 1)
35 | tau ~ dunif(0, 2)
36 | }
37 |
--------------------------------------------------------------------------------
/ebddm/bayesian/ebddm_agg2.txt:
--------------------------------------------------------------------------------
1 | # Exemplar-based Drift-Diffusion Model (Probit link)
2 |
3 | model {
4 | # Decision Data
5 | for (i in 1:ntests[1]) {
6 | rt[i] ~ dwiener(alpha, tau, beta, delta[i])
7 | #pred_rt[i] ~ dwiener(alpha, tau, beta, delta[i])
8 | }
9 |
10 | # Decision Probabilities
11 | for (i in 1:ntests[1]) {
12 | delta[i] <- phi(r[i])
13 | r[i] <- sum(numerator[i, ]) / sum(s[i, ])
14 | for (j in 1:nmemory[1]) {
15 | tmp[i, j, 1] <- s[i, j]
16 | tmp[i, j, 2] <- 0
17 | numerator[i, j] <- tmp[i, j, category[j]]
18 | }
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(wk * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | wk[1:2] <- c(w, 1-w) # Requires JAGS 4.0.0
29 |
30 | # Priors
31 | w ~ dbeta(1, 1)
32 | c ~ dunif(0, 25)
33 | alpha ~ dunif(0, 25)
34 | beta ~ dbeta(1, 1)
35 | tau ~ dunif(0, 2)
36 | }
37 |
--------------------------------------------------------------------------------
/ebddm/bayesian/ebddm_agg3.txt:
--------------------------------------------------------------------------------
1 | # Exemplar-based Drift-Diffusion Model (Test link)
2 |
3 | model {
4 | # Decision Data
5 | for (i in 1:ntests[1]) {
6 | rt[i] ~ dwiener(alpha, tau, beta, delta[i])
7 | #pred_rt[i] ~ dwiener(alpha, tau, beta, delta[i])
8 | }
9 |
10 | # Decision Probabilities
11 | for (i in 1:ntests[1]) {
12 | delta[i] <- 2 * r[i] - 1
13 | r[i] <- sum(numerator[i, ]) / sum(s[i, ])
14 | for (j in 1:nmemory[1]) {
15 | tmp[i, j, 1] <- s[i, j]
16 | tmp[i, j, 2] <- 0
17 | numerator[i, j] <- tmp[i, j, category[j]]
18 | }
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(wk * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | wk[1:2] <- c(w, 1-w) # Requires JAGS 4.0.0
29 |
30 | # Priors
31 | w ~ dbeta(1, 1)
32 | c ~ dunif(0, 25)
33 | alpha ~ dunif(0, 25)
34 | beta ~ dbeta(1, 1)
35 | tau ~ dunif(0, 2)
36 | }
37 |
--------------------------------------------------------------------------------
/ebddm/bayesian/ebddm_agg_rec.txt:
--------------------------------------------------------------------------------
1 | # Exemplar-based Drift-Diffusion Model
2 |
3 | model {
4 | # Decision Data
5 | for (i in 1:ntests[1]) {
6 | rt[i] ~ dwiener(alpha, tau, beta, delta[i])
7 | #pred_rt[i] ~ dwiener(alpha, tau, beta, delta[i])
8 | }
9 |
10 | # Decision Probabilities
11 | for (i in 1:ntests[1]) {
12 | delta[i] <- logit(r[i])
13 | r[i] <- sum(numerator[i, ]) / sum(s[i, ])
14 | for (j in 1:nmemory[1]) {
15 | tmp[i, j, 1] <- s[i, j]
16 | tmp[i, j, 2] <- 0
17 | numerator[i, j] <- tmp[i, j, category[j]]
18 | }
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(wk * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | wk[1:2] <- c(w, 1-w) # Requires JAGS 4.0.0
29 |
30 | # Priors
31 | w ~ dbeta(1, 1)
32 | c ~ dgamma(0.001, 0.001)
33 | alpha ~ dgamma(0.001, 0.001) # dnorm(0, 1/1000000)T(0, )
34 | beta ~ dbeta(1, 1)
35 | tau ~ dunif(0, 2)
36 | }
37 |
--------------------------------------------------------------------------------
/ebddm/bayesian/pr_ebrw_ebdd.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Validate EB-DDM and compare to EB-RW"
3 | author: "Frederik Aust"
4 | output:
5 | knitrBootstrap::bootstrap_document:
6 | highlight: xcode
7 | theme: flatly
8 | menu: false
9 | ---
10 |
11 | ```{r echo = FALSE}
12 | library("dplyr")
13 | library("ggplot2")
14 | theme_set(papaja::theme_apa())
15 |
16 | library("runjags")
17 | rjags::load.module("glm")
18 | rjags::load.module("wiener")
19 |
20 | library("parallel")
21 |
22 | source("../ebddm_pred.R")
23 | source("../../ebrw/ebrw_sim.R")
24 | source("../../gcm/gcm_pred.r")
25 | ```
26 |
27 | # Parameter recovery for EB-DDM
28 |
29 | ```{r load_similarities}
30 | similarities <- read.csv2("../../gcm/data/nosofsky_1989_similarities.csv")[, -1]
31 | similarities$size <- c(0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 2, 2, 0, 2, 0, 0)
32 | similarities$angle <- c(0, 1, 2, 0, 0, 1, 2, 0, 1, 0, 0, 2, 0, 0, 2, 0)
33 | similarities$category <- rep(c(1, 1, 2, 2), 4)
34 |
35 | memory <- as.matrix(subset(similarities, angle != 0)[, 1:2])
36 | ```
37 |
38 | ```{r generate_ground_truth, cache = TRUE}
39 | ebddm_truth <- ebddm_pred(
40 | param = c(w = 0.7, c = 2, alpha = 3, tau = 0.15, beta = 0.5)
41 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
42 | , obs = as.matrix(similarities[, 1:2])
43 | , n_trials = 1000
44 | )
45 |
46 | ebddm_truth$resp_type <- ifelse(
47 | (ebddm_truth$resp == "upper" & similarities[ebddm_truth$stimulus, "category"] == 1) |
48 | (ebddm_truth$resp == "lower" & similarities[ebddm_truth$stimulus, "category"] == 2)
49 | , "correct", "error")
50 | ebddm_truth$rt <- ifelse(ebddm_truth$resp_type == "correct", ebddm_truth$q, -ebddm_truth$q)
51 |
52 | ebddm_truth$xi1 <- similarities[ebddm_truth$stimulus, "xi1"]
53 | ebddm_truth$xi2 <- similarities[ebddm_truth$stimulus, "xi2"]
54 | ```
55 |
56 |
57 | ```{r parameter_recovery, dependson = "generate_ground_truth", results = "hide", cache = TRUE}
58 | ebddm_truth$q <- ifelse(ebddm_truth$resp == "upper", ebddm_truth$q, -ebddm_truth$q)
59 | tests <- as.matrix(ebddm_truth[, c("xi1", "xi2")])
60 |
61 | init_values <- list(
62 | list(w = 0.5, c = 4, alpha = 5, beta = 0.3, tau = 0.2)
63 | , list(w = 0.8, c = 2, alpha = 3, beta = 0.7, tau = 0.1)
64 | , list(w = 0.2, c = 3, alpha = 8, beta = 0.5, tau = 0.25)
65 | )
66 |
67 | poi <- c(unique(unlist(lapply(init_values, names))))
68 |
69 | angle_data <- list(
70 | rt = ebddm_truth$q
71 | , tests = tests
72 | , memory = memory
73 | , ntests = dim(tests)
74 | , nmemory = dim(memory)
75 | , p = 1 # Shape of relationship between similarity and psychological distance
76 | , rho = 2 # Power of the Minkowski distance
77 | , category = unlist(subset(similarities, angle != 0)[, 4])
78 | )
79 |
80 | # Sample
81 | angle_samples <- run.jags(
82 | model = "ebddm_agg_rec.txt"
83 | , monitor = poi
84 | , inits = init_values
85 | , data = angle_data
86 | , n.chains = 3
87 | , sample = 1e3
88 | , burnin = 1e3
89 | , thin = 1
90 | , method = "rjparallel"
91 | )
92 | ```
93 |
94 | ```{r}
95 | knitr::kable(summary(angle_samples))
96 | ```
97 |
98 |
99 | ```{r posterior_predictive_checks}
100 | ebddm_truth <- ebddm_pred(
101 | param = c(w = 0.7, c = 2, alpha = 3, tau = 0.15, beta = 0.5)
102 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
103 | , obs = as.matrix(similarities[, 1:2])
104 | , n_trials = 1000
105 | )
106 |
107 | ebddm_truth$resp_type <- ifelse(
108 | (ebddm_truth$resp == "upper" & similarities[ebddm_truth$stimulus, "category"] == 1) |
109 | (ebddm_truth$resp == "lower" & similarities[ebddm_truth$stimulus, "category"] == 2)
110 | , "correct", "error")
111 |
112 | ebddm_truth$rt <- ifelse(ebddm_truth$resp_type == "correct", ebddm_truth$q, -ebddm_truth$q)
113 |
114 | posterior_predictive_distribution <- parallel::mclapply(angle_samples$mcmc, function(x) {
115 | apply(x, 1, function(y) {
116 | ebddm_pred(
117 | param = y
118 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
119 | , obs = as.matrix(similarities[, 1:2])
120 | , n_trials = 1
121 | )
122 | })
123 | })
124 |
125 | posterior_predictive_distribution <- lapply(posterior_predictive_distribution, function(x) do.call(rbind, x)) %>%
126 | do.call(rbind, .)
127 |
128 | posterior_predictive_distribution$resp_type <- ifelse(
129 | (posterior_predictive_distribution$resp == "upper" & similarities[posterior_predictive_distribution$stimulus, "category"] == 1) |
130 | (posterior_predictive_distribution$resp == "lower" & similarities[posterior_predictive_distribution$stimulus, "category"] == 2)
131 | , "correct", "error")
132 |
133 | posterior_predictive_distribution$rt <- ifelse(
134 | posterior_predictive_distribution$resp_type == "correct"
135 | , posterior_predictive_distribution$q
136 | , -posterior_predictive_distribution$q
137 | )
138 | ```
139 |
140 |
141 | ```{r histogram_observed_vs_predicted, fig.width = 12, fig.height = 9}
142 | ebddm_truth %>%
143 | group_by(resp_type) %>%
144 | ggplot() +
145 | geom_histogram(aes(fill = resp_type, x = rt, y = ..density..), binwidth = 0.25, position = "identity") +
146 | geom_histogram(data = posterior_predictive_distribution %>% group_by(resp_type, stimulus), aes(fill = resp_type, x = rt, y = -..density..), binwidth = 0.25, position = "identity", alpha = 0.3) +
147 | geom_hline(yintercept = 0, size = 0.25, color = grey(0.4)) +
148 | facet_wrap(~ stimulus, scales = "free_y")
149 | ```
150 |
151 | ```{r quantile_probability_plot}
152 | true_probability_quantiles <- ebddm_truth %>%
153 | mutate(
154 | correct = ifelse(resp_type == "correct", 1, 0)
155 | , q = abs(q) * 1000
156 | ) %>%
157 | group_by(stimulus, resp_type) %>%
158 | summarize(
159 | p = length(q) / (nrow(ebddm_truth) / (length(unique(ebddm_truth$stimulus))))
160 | , q1 = quantile(q, 0.1)
161 | , q3 = quantile(q, 0.3)
162 | , q5 = quantile(q, 0.5)
163 | , q7 = quantile(q, 0.7)
164 | , q9 = quantile(q, 0.9)
165 | ) %>%
166 | tidyr::gather(quantile, rt, q1:q9) %>%
167 | ungroup() %>%
168 | mutate(
169 | p = ifelse((p > 0.5 & resp_type == "error") | (p < 0.5 & resp_type == "correct"), 1 - p, p)
170 | , resp_type_quantile = paste(quantile, resp_type, sep = "_")
171 | , pp = round(ifelse(p > 0.5, 1 - p, p), 3)
172 | , stimulus = factor(pp, labels = unique(stimulus)[order(unique(pp))])
173 | )
174 |
175 | ebddm_probability_quantiles <- posterior_predictive_distribution %>%
176 | mutate(
177 | correct = ifelse(resp_type == "correct", 1, 0)
178 | , q = abs(rt) * 1000
179 | ) %>%
180 | group_by(stimulus, resp_type) %>%
181 | summarize(
182 | p = length(q) / (nrow(posterior_predictive_distribution) / (length(unique(posterior_predictive_distribution$stimulus))))
183 | , q1 = quantile(q, 0.1)
184 | , q3 = quantile(q, 0.3)
185 | , q5 = quantile(q, 0.5)
186 | , q7 = quantile(q, 0.7)
187 | , q9 = quantile(q, 0.9)
188 | ) %>%
189 | tidyr::gather(quantile, rt, q1:q9) %>%
190 | ungroup() %>%
191 | mutate(
192 | p = ifelse((p > 0.5 & resp_type == "error") | (p < 0.5 & resp_type == "correct"), 1 - p, p)
193 | , resp_type_quantile = paste(quantile, resp_type, sep = "_")
194 | , pp = round(ifelse(p > 0.5, 1 - p, p), 3)
195 | , stimulus = factor(pp, labels = unique(stimulus)[order(unique(pp))])
196 | )
197 |
198 | ggplot(true_probability_quantiles, aes(x = p, y = rt, group = resp_type_quantile, color = stimulus)) +
199 | geom_line(color = "grey70") +
200 | geom_point(data = ebddm_probability_quantiles, aes(x = p, y = rt, group = resp_type_quantile, color = stimulus), shape = 5) +
201 | geom_point()
202 | ```
203 |
204 |
205 |
206 | ```{r}
207 | ebddm_truth <- ebddm_pred(
208 | param = c(w = 0.7, c = 2, alpha = 3, tau = 0.15, beta = 0.5)
209 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
210 | , obs = as.matrix(similarities[, 1:2])
211 | , n_trials = 1000
212 | )
213 |
214 | ebddm_truth$resp_type <- ifelse(
215 | (ebddm_truth$resp == "upper" & similarities[ebddm_truth$stimulus, "category"] == 1) |
216 | (ebddm_truth$resp == "lower" & similarities[ebddm_truth$stimulus, "category"] == 2)
217 | , "correct", "error")
218 |
219 | ebddm_truth$rt <- ifelse(ebddm_truth$resp_type == "correct", ebddm_truth$q, -ebddm_truth$q)
220 | ```
221 |
222 |
223 | # Parameter recovery for EB-RW
224 |
225 | ```{r simulate_ebrw_data, results = "hide", cache = TRUE}
226 | ebrw_truth <- ebrw_sim(
227 | param = c(w = 0.7, c = 2, K = 4, alpha = 0.242, k = 125.07, mu = 491.07)
228 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
229 | , obs = as.matrix(similarities[, 1:2])
230 | , n_trials = 1000
231 | )
232 |
233 | ebrw_truth$resp_type <- ifelse(
234 | (ebrw_truth$response == 1 & similarities[ebrw_truth$stimulus, "category"] == 1) |
235 | (ebrw_truth$response == 0 & similarities[ebrw_truth$stimulus, "category"] == 2)
236 | , "correct", "error")
237 | ```
238 |
239 | ```{r fit_ebddm_to_ebrw_data, dependson = "simulate_ebrw_data", results = "hide", cache = TRUE}
240 | ebrw_truth$q <- ifelse(ebrw_truth$response == 1, ebrw_truth$rt, -ebrw_truth$rt) / 1000
241 | tests <- as.matrix(ebrw_truth[, paste0("xi", 1:2)])
242 |
243 | init_values <- list(
244 | list(w = 0.5, c = 4, alpha = 5, tau = 0.2, beta = 0.3)
245 | , list(w = 0.8, c = 2, alpha = 3, tau = 0.1, beta = 0.7)
246 | , list(w = 0.2, c = 3, alpha = 8, tau = 0.25, beta = 0.5)
247 | )
248 |
249 | poi <- c(unique(unlist(lapply(init_values, names))))
250 |
251 | angle_data <- list(
252 | rt = ebrw_truth$q
253 | , tests = tests
254 | , memory = memory
255 | , ntests = dim(tests)
256 | , nmemory = dim(memory)
257 | , p = 1 # Shape of relationship between similarity and psychological distance
258 | , rho = 2 # Power of the Minkowski distance
259 | , category = unlist(subset(similarities, angle != 0)[, 4])
260 | )
261 |
262 | # Sample
263 | ebrw_angle_samples <- run.jags(
264 | model = "ebddm_agg.txt"
265 | , monitor = poi
266 | , inits = init_values
267 | , data = angle_data
268 | , n.chains = 3
269 | , sample = 5e3
270 | , burnin = 1e3
271 | , thin = 1
272 | , method = "rjparallel"
273 | )
274 | ```
275 |
276 | ```{r}
277 | knitr::kable(summary(ebrw_angle_samples))
278 | ```
279 |
280 |
281 | ```{r fit_ebddm_to_ebrw_data2, dependson = "simulate_ebrw_data", results = "hide", cache = TRUE}
282 | init_values <- list(
283 | list(w = 0.5, c = 4, alpha = 5, tau = 0.2, beta = 0.3)
284 | , list(w = 0.8, c = 2, alpha = 3, tau = 0.1, beta = 0.7)
285 | , list(w = 0.2, c = 3, alpha = 8, tau = 0.25, beta = 0.5)
286 | )
287 |
288 | poi <- c(unique(unlist(lapply(init_values, names))))
289 |
290 | angle_data <- list(
291 | rt = ebrw_truth$q
292 | , tests = tests
293 | , memory = memory
294 | , ntests = dim(tests)
295 | , nmemory = dim(memory)
296 | , p = 1 # Shape of relationship between similarity and psychological distance
297 | , rho = 2 # Power of the Minkowski distance
298 | , category = unlist(subset(similarities, angle != 0)[, 4])
299 | )
300 |
301 | # Sample
302 | ebrw_angle_samples2 <- run.jags(
303 | model = "ebddm_agg2.txt"
304 | , monitor = poi
305 | , inits = init_values
306 | , data = angle_data
307 | , n.chains = 3
308 | , sample = 5e3
309 | , burnin = 1e3
310 | , thin = 1
311 | , method = "rjparallel"
312 | )
313 | ```
314 |
315 | ```{r}
316 | knitr::kable(summary(ebrw_angle_samples2))
317 | ```
318 |
319 |
320 | ```{r fit_ebddm_to_ebrw_data3, dependson = "simulate_ebrw_data", results = "hide", cache = TRUE}
321 | init_values <- list(
322 | list(w = 0.5, c = 4, alpha = 5, tau = 0.2, beta = 0.3)
323 | , list(w = 0.8, c = 2, alpha = 3, tau = 0.1, beta = 0.7)
324 | , list(w = 0.2, c = 3, alpha = 8, tau = 0.25, beta = 0.5)
325 | )
326 |
327 | poi <- c(unique(unlist(lapply(init_values, names))))
328 |
329 | angle_data <- list(
330 | rt = ebrw_truth$q
331 | , tests = tests
332 | , memory = memory
333 | , ntests = dim(tests)
334 | , nmemory = dim(memory)
335 | , p = 1 # Shape of relationship between similarity and psychological distance
336 | , rho = 2 # Power of the Minkowski distance
337 | , category = unlist(subset(similarities, angle != 0)[, 4])
338 | )
339 |
340 | # Sample
341 | ebrw_angle_samples3 <- run.jags(
342 | model = "ebddm_agg3.txt"
343 | , monitor = poi
344 | , inits = init_values
345 | , data = angle_data
346 | , n.chains = 3
347 | , sample = 5e3
348 | , burnin = 1e3
349 | , thin = 1
350 | , method = "rjparallel"
351 | )
352 | ```
353 |
354 | ```{r}
355 | knitr::kable(summary(ebrw_angle_samples3))
356 | ```
357 |
358 |
359 | ```{r posterior_predictive_checks_ebrw}
360 | posterior_predictive_distribution <- mclapply(ebrw_angle_samples$mcmc, function(x) {
361 | apply(x, 1, function(y) {
362 | ebddm_pred(
363 | param = y
364 | , mem = subset(similarities, angle != 0)[, c(1:2, 4)]
365 | , obs = as.matrix(similarities[, 1:2])
366 | , n_trials = 1
367 | )
368 | })
369 | })
370 |
371 | posterior_predictive_distribution <- lapply(posterior_predictive_distribution, function(x) do.call(rbind, x)) %>%
372 | do.call(rbind, .)
373 |
374 | posterior_predictive_distribution$resp_type <- ifelse(
375 | (posterior_predictive_distribution$resp == "upper" & similarities[posterior_predictive_distribution$stimulus, "category"] == 1) |
376 | (posterior_predictive_distribution$resp == "lower" & similarities[posterior_predictive_distribution$stimulus, "category"] == 2)
377 | , "correct", "error")
378 |
379 | posterior_predictive_distribution$q <- ifelse(
380 | posterior_predictive_distribution$resp_type == "correct"
381 | , posterior_predictive_distribution$q
382 | , -posterior_predictive_distribution$q
383 | )
384 | ```
385 |
386 |
387 | ```{r plot_observed_vs_predicted_ebrw, fig.width = 12, fig.height = 9}
388 | ebrw_truth$q <- ifelse(ebrw_truth$resp_type == "correct", ebrw_truth$rt, -ebrw_truth$rt) / 1000
389 |
390 | ebrw_truth %>%
391 | ggplot() +
392 | geom_histogram(aes(fill = resp_type, x = q, y = ..density..), binwidth = 0.25, position = "identity") +
393 | geom_histogram(data = posterior_predictive_distribution %>% group_by(resp_type, stimulus), aes(fill = resp_type, x = q, y = -..density..), binwidth = 0.25, position = "identity", alpha = 0.3) +
394 | geom_hline(yintercept = 0, size = 0.25, color = grey(0.4)) +
395 | facet_wrap(~ stimulus, scales = "free_y")
396 | ```
397 |
398 | ```{r}
399 | error_rates <- merge(
400 | aggregate(q ~ stimulus, ebrw_truth, function(x) mean(x > 0))
401 | , aggregate(q ~ stimulus, posterior_predictive_distribution, function(x) mean(x > 0))
402 | , by = "stimulus"
403 | )
404 | error_rates$diff <- error_rates$q.x - error_rates$q.y
405 |
406 | knitr::kable(error_rates)
407 | ```
408 |
409 | ```{r eval = FALSE}
410 | correct_rt <- merge(
411 | aggregate(q ~ stimulus, ebrw_truth, function(x) median(x[x > 0]))
412 | , aggregate(q ~ stimulus, posterior_predictive_distribution, function(x) median(x[x > 0]))
413 | , by = "stimulus"
414 | )
415 | correct_rt$diff <- correct_rt$q.x - correct_rt$q.y
416 |
417 | knitr::kable(correct_rt)
418 | ```
419 |
420 | ```{r eval = FALSE}
421 | error_rt <- merge(
422 | aggregate(q ~ stimulus, ebrw_truth, function(x) median(x[x < 0]))
423 | , aggregate(q ~ stimulus, posterior_predictive_distribution, function(x) median(x[x < 0]))
424 | , by = "stimulus"
425 | )
426 | error_rt$diff <- error_rt$q.x - error_rt$q.y
427 |
428 | knitr::kable(error_rt)
429 | ```
430 |
--------------------------------------------------------------------------------
/ebddm/ebddm_pred.R:
--------------------------------------------------------------------------------
1 | ebddm_pred <- function(param, mem, obs, rho = 2, n_trials = 100) {
2 | require("RWiener")
3 |
4 | # Define parameters
5 | # ndim <- ncol(mem) - 1 # -0, if no category information
6 | w <- param["w"]
7 | w[2] <- 1 - sum(w)
8 | c <- param["c"]
9 | alpha <- param["alpha"]
10 | tau <- param["tau"]
11 | beta <- param["beta"]
12 |
13 | # Prepare objects
14 | n_obs <- nrow(obs)
15 | mem <- as.matrix(mem)
16 | obs <- as.matrix(obs)
17 | results <- data.frame()
18 |
19 | # Model computations
20 | for(i in 1:n_obs) {
21 | iobs <- as.vector(obs[i, 1:ncol(obs)])
22 |
23 | ## Determine similarities & activation
24 | d <- w*abs(iobs - t(mem[, 1:2]))^rho
25 | d <- colSums(d)^(1/rho) # Eq. 3, Nosofsky (1988)
26 | s <- exp(-c*d) # Eq. 4, Nosofsky (1989)
27 | s_ab <- sum(s[mem[, 3] == 1]) + sum(s[mem[, 3] == 2])
28 |
29 | ## Compute response probability for category 1
30 | p <- sum(s[mem[, 3] == 1]) / s_ab # Eq. 2, Nosofsky (1989)
31 | delta <- log(p / (1 - p))
32 |
33 | results <- rbind(results, cbind(stimulus = i, rwiener(n_trials, alpha, tau, beta, delta), delta = delta))
34 | }
35 |
36 | results
37 | }
38 |
--------------------------------------------------------------------------------
/ebrw/ebrw_fit.R:
--------------------------------------------------------------------------------
1 | ebrw_fit <- function(data, ...) {
2 | predictions <- ebrw_pred(...)
3 | dev <- sum(((predictions[, 1] - data[, 1])^2)/(sd(data[, 1])/sqrt(length(data[, 1]))) + ((predictions[, 4] - data[, 2])^2)/(sd(data[, 2])/length(data[, 2]))) # Nosofsky & Stanton, 2005
4 | return(dev)
5 | }
6 |
--------------------------------------------------------------------------------
/ebrw/ebrw_pred.R:
--------------------------------------------------------------------------------
1 | # param A vector of starting parameters: c(w, c, A, alpha, k, mu)
2 | # w = A vector of attention weights
3 | # c = Similarity sensitivity
4 | # K = Threshold for 'Category A' response (equal thresholds assumed)
5 | # alpha = Retrieval constant
6 | # k = Response time scaling constant
7 | # mu = Reponse time constant
8 | # mem A matrix of exemplars in memory with one column for each dimension in psychological space
9 | # obs A matrix of observed exemplars with one column for each dimension in psychological space
10 | # rho An integer determining the distance metric in psychological space (2 = Eucledian distance; 1 = City block distance)
11 |
12 | ebrw_pred <- function(param, mem, obs, rho = 2) {
13 |
14 | # Define parameters
15 | ndim <- ncol(mem) - 1 # -0, if no category information
16 | w <- param[1:(ndim - 1)]
17 | w[ndim] <- 1 - sum(w)
18 | c <- param[ndim]
19 | A <- param[ndim + 1]
20 | B <- A
21 | alpha <- param[ndim + 2]
22 | k <- param[ndim + 3]
23 | mu <- param[ndim + 4]
24 |
25 |
26 | # Prepare objects
27 | n_obs <- nrow(obs)
28 | mem <- as.matrix(mem)
29 | obs <- as.matrix(obs)
30 | mean_rt <- rep(NA, n_obs)
31 | rt_A <- rep(NA, n_obs)
32 | rt_B <- rep(NA, n_obs)
33 | accuracy <- rep(NA, n_obs)
34 |
35 | # Model computations
36 | for(i in 1:n_obs) {
37 | iobs <- as.vector(obs[i, 1:(ncol(obs)-1)])
38 |
39 | ## Determine similarities & activation
40 | d <- w*abs(iobs - t(mem[, 1:2]))^rho
41 | d <- colSums(d)^(1/rho) # Eq. 3, Nosofsky (1988)
42 | s <- exp(-c*d) # Eq. 4, Nosofsky (1989)
43 | s_ab <- sum(s[mem[, 3] == 1]) + sum(s[mem[, 3] == 2])
44 |
45 | ## Compute response probability for category 1
46 | p <- sum(s[mem[, 3] == 1])/s_ab # Eq. 2, Nosofsky (1989)
47 | q <- 1-p
48 | t_step <- alpha + 1/s_ab # Eq. 10, Nosofsky & Palmeri (1997)
49 |
50 | if(p != 0.5) {
51 | p_A <- (1-(q/p)^B) / (1-(q/p)^(A+B)) # Eq. 16a, Nosofsky & Palmeri (1997)
52 |
53 | theta1 <- ((p/q)^(A+B) + 1) / ((p/q)^(A+B) - 1) # Eq. 19, Nosofsky & Palmeri (1997)
54 | theta2 <- ((p/q)^B + 1) / ((p/q)^B - 1) # Eq. 19, Nosofsky & Palmeri (1997)
55 | n_step_A <- 1/(p-q) * (theta1*(A+B) - theta2*B) # Eq. 18a, Nosofsky & Palmeri (1997)
56 |
57 | theta1 <- ((p/q)^-(A+B) + 1) / ((p/q)^-(A+B) - 1) # Eq. 21, Nosofsky & Palmeri (1997)
58 | theta2 <- ((p/q)^-A + 1) / ((p/q)^-A - 1) # Eq. 21, Nosofsky & Palmeri (1997)
59 | n_step_B <- 1/(q-p) * (theta1*(A+B) - theta2*A) # Eq. 20a, Nosofsky & Palmeri (1997)
60 |
61 | n_steps <- B/(q-p) - (A+B)/(q-p) * ((1-(q/p)^B)/(1-(q/p)^(A+B))) # Eq. 14a, Nosofsky & Palmeri (1997)
62 |
63 | } else {
64 | p_A <- B/(A+B) # Eq. 16b, Nosofsky & Palmeri (1997)
65 |
66 | n_step_A <- A/3*(2*B + A) # Eq. 18b, Nosofsky & Palmeri (1997)
67 | n_step_B <- B/3*(2*A + B) # Eq. 20b, Nosofsky & Palmeri (1997)
68 |
69 | n_steps <- A*B # Eq. 14b, Nosofsky & Palmeri (1997)
70 | }
71 |
72 | rt_A[i] <- (n_step_A * t_step) * k + mu
73 | rt_B[i] <- (n_step_B * t_step) * k + mu
74 | mean_rt[i] <- (n_steps * t_step) * k + mu
75 | accuracy[i] <- if(obs[i, 3] == 1) p_A else 1 - p_A
76 | }
77 |
78 | pred <- cbind(accuracy, rt_A, rt_B, mean_rt)
79 | return(pred)
80 | }
81 |
--------------------------------------------------------------------------------
/ebrw/ebrw_sim.R:
--------------------------------------------------------------------------------
1 | ebrw_sim <- function(param, mem, obs, rho = 2, n_trials = 1000) {
2 |
3 | # Define parameters
4 | ndim <- ncol(mem) - 1 # -0, if no category information
5 | w <- param[1:(ndim - 1)]
6 | w[ndim] <- 1 - sum(w)
7 | c <- param[ndim]
8 | A <- param[ndim + 1]
9 | B <- A
10 | alpha <- param[ndim + 2]
11 | k <- param[ndim + 3]
12 | mu <- param[ndim + 4]
13 |
14 |
15 | # Prepare objects
16 | n_obs <- nrow(obs)
17 | mem_sim <- as.matrix(mem[, 1:ndim])
18 | obs_sim <- as.matrix(obs[, 1:ndim])
19 | results <- expand.grid(stimulus = 1:n_obs, trial = 1:n_trials, response = NA, rt = NA)
20 | results <- results[order(results$stimulus), ]
21 |
22 | # Model computations
23 | for(i in 1:n_obs) {
24 | iobs <- as.vector(obs_sim[i, 1:ndim])
25 |
26 | for(j in 1:n_trials) {
27 | ## Determine similarities & activation
28 | d <- w*abs(iobs - t(mem[, 1:2]))^rho
29 | d <- colSums(d)^(1/rho) # Eq. 3, Nosofsky (1988)
30 | s <- exp(-c*d) # Eq. 4, Nosofsky (1989)
31 | s_ab <- sum(s[mem[, 3] == 1]) + sum(s[mem[, 3] == 2])
32 |
33 | ## Compute response probability for category 1
34 | p <- sum(s[mem[, 3] == 1])/s_ab # Eq. 2, Nosofsky (1989)
35 | q <- 1-p
36 | t_step <- alpha + 1/s_ab # Eq. 10, Nosofsky & Palmeri (1997)
37 |
38 | ## Simulate responses
39 | rw_count <- 0
40 | n_steps <- 0
41 | old <- 0
42 |
43 | while(rw_count < A && rw_count > -B) {
44 | rw_count <- rw_count + sample(c(1, -1), 1, prob = c(p, q))
45 | n_steps <- n_steps + 1
46 | }
47 |
48 | if(rw_count >= A) {
49 | results[results$stimulus == i & results$trial == j, c("response", "rt")] <- c(1, (n_steps * t_step) * k + mu)
50 | } else {
51 | results[results$stimulus == i & results$trial == j, c("response", "rt")] <- c(0, (n_steps * t_step) * k + mu)
52 | }
53 | }
54 | }
55 |
56 | merge(results, cbind(obs, stimulus = 1:nrow(obs)))
57 | }
58 |
--------------------------------------------------------------------------------
/ebrw/reproduce_nosofsky_palmeri_1997.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analysis by Nosofsky & Palmeri (1997)"
3 | author: "Frederik Aust"
4 | output:
5 | knitrBootstrap::bootstrap_document:
6 | highlight: xcode
7 | theme: flatly
8 | menu: false
9 | ---
10 |
11 | To validate this implementation of the Exemplar-Based Random Walk model (EBRW) for categorization data, I reproduced small parts of the model-based analyses reported in Nosofsky & Palmeri (1997). The original MDS solution was extracted from Figure 4 using
12 | [WebPlotDigitizer](http://arohatgi.info/WebPlotDigitizer/).
13 |
14 | ```{r echo = FALSE, message = FALSE}
15 | source("ebrw_pred.R")
16 | ```
17 |
18 | # Experiment 1, Participant 3
19 |
20 | ```{r data}
21 | # MDS solution
22 | similarities <- data.frame(
23 | saturation = c(-1.891, -0.588, -1.054, 0.224, -1.067, 0.249, 1.482, -0.466, 1.272, 0.217, 0.748, 1.016)
24 | , brightness = c(0.778, 1.059, 0.246, 1.049, -0.630, 0.495, 1.188, -0.693, 0.389, -2.024, -1.500, -0.372)
25 | , cat = c(1, 2, 2, 2, 1, 2, 2, 1, 2, 1, 1, 1)
26 | )
27 |
28 | # Observed and predicted data (Table C1, Nosofsky & Palmeri, 1997)
29 | nosofsky_palmeri <- list()
30 | nosofsky_palmeri$observed$correct <- c(0.975, 1.000, 0.956, 1.000, 1.000, 1.000, 1.000, 0.984, 0.984, 1.000, 1.000, 0.934)
31 | nosofsky_palmeri$observed$rt <- c(780, 709, 962, 661, 841, 749, 641, 834, 834, 697, 779, 1007)
32 | nosofsky_palmeri$predicted$correct <- c(99.84, 99.93, 94.67, 99.99, 99.12, 99.88, 100, 99.26, 99.69, 100, 99.99, 93.78)
33 | nosofsky_palmeri$predicted$rt <- c(766.33, 740.77, 985.17, 706.54, 837.17, 753.02, 692.40, 827.37, 786.71, 684.81, 710.44, 1003.30)
34 |
35 | # Best fitting parameters (Tabel C2, Nosofsky & Palmeri, 1997)
36 | best_param <- c(w = 0.716, c = 2.196, K = 5, alpha = 0.242, k = 125.07, mu = 491.07)
37 | ```
38 |
39 |
40 |
41 | ## Summary fits
42 |
43 | When comparing the predictions generated by this implementation of EBRW note that the MDS solution the predictions rely on is an approximation of the solution used by Nosofsky & Palmeri (1997). Especially the exact values for Stimulus 3 and 12 were difficult to extract due to the large diameter of the corresponding points in Figure 4. Minor numerical deviations of the prediction pattern are to be expected.
44 |
45 | ```{r prediction}
46 | blocked_predictions <- data.frame()
47 | for(i in 31:150) {
48 | predicted_responses <- ebrw_pred(
49 | best_param
50 | , mem = similarities[rep(seq_len(nrow(similarities)), i-1), ]
51 | , obs = similarities
52 | , rho = 2
53 | )[, c(1, 4)]
54 | blocked_predictions <- rbind(blocked_predictions, cbind(Stimulus = 1:12, block = i, predicted_responses))
55 | }
56 |
57 | average_predictions <- aggregate(cbind(accuracy, mean_rt) ~ Stimulus, blocked_predictions, mean)
58 | ```
59 |
60 |
61 |
62 | ### Categorization responses
63 |
64 | The predicted accuracy of categorization responses closely follows the predictions reported by Nosofsky & Palmeri (1997), $r = `r round(cor(nosofsky_palmeri$predicted$correct, round(average_predictions$accuracy * 100)), 3)`$.
65 |
66 | ```{r categorizations, results = "asis", echo = FALSE}
67 | knitr::kable(cbind(
68 | average_predictions$Stimulus
69 | , `Predictions by Nofsoky & Palmeri (1997)` = nosofsky_palmeri$predicted$correct
70 | , `Predictions of this implementation` = round(average_predictions$accuracy * 100, 2)
71 | , `$\\Delta$` = nosofsky_palmeri$predicted$correct - round(average_predictions$accuracy * 100, 2)
72 | ))
73 | ```
74 |
75 |
76 |
77 | ### Response times
78 |
79 | Again, the predicted response times closely follow the predictions reported by Nosofsky & Palmeri (1997), $r = `r round(cor(nosofsky_palmeri$predicted$rt, round(average_predictions$mean_rt)), 3)`$.
80 |
81 | ```{r response_times, results = "asis", echo = FALSE}
82 | knitr::kable(cbind(
83 | average_predictions$Stimulus
84 | , `Predictions by Nofsoky & Palmeri (1997)` = nosofsky_palmeri$predicted$rt
85 | , `Predictions of this implementation` = round(average_predictions$mean_rt, 2)
86 | , `$\\Delta$` = nosofsky_palmeri$predicted$rt - round(average_predictions$mean_rt, 2)
87 | ))
88 | ```
89 |
90 | # EBRW as generalization of GCM
91 |
92 | As explained by Nosofsky & Palmeri (1997, p. 270) the response rule implemented in EBRW is a generalization of GCM. The models' predictions of response probabilities are identical if $K = A = B = 1$.
93 |
94 | ```{r}
95 | source("../generalized_context_model/gcm_pred.r")
96 |
97 | ebrw_accuracy <- ebrw_pred(
98 | c(w = 0.716, c = 2.196, K = 1, alpha = 0, k = 1, mu = 0)
99 | , mem = similarities
100 | , obs = similarities
101 | , rho = 2
102 | )[, "accuracy"]
103 |
104 | gcm_pa <- gcm_pred(
105 | c(w = 0.716, c = 2.196, b = 0.5)
106 | , mem = similarities
107 | , obs = similarities[, -3]
108 | , rho = 2
109 | , p = 1
110 | )[, 1]
111 |
112 | gcm_accuracy <- gcm_pa
113 | gcm_accuracy[similarities$cat == 2] <- 1 - gcm_pa[similarities$cat == 2]
114 | ```
115 |
116 | ```{r echo = FALSE, results = "asis"}
117 | knitr::kable(cbind(
118 | average_predictions$Stimulus
119 | , `EBRW` = round(ebrw_accuracy * 100, 2)
120 | , `GCM` = round(gcm_accuracy * 100, 2)
121 | , `$\\Delta$` = round((ebrw_accuracy - gcm_accuracy) * 100, 2)
122 | ))
123 | ```
124 |
125 |
126 |
127 | # References
128 | Nosofsky, R. M., & Palmeri, T. J. (1997). An exemplar-based random walk model of speeded classification. *Psychological Review*, 104(2), 266–300. doi:[10.1037/0033-295X.104.2.266](http://doi.org/10.1037/0033-295X.104.2.266)
129 |
130 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg.stan:
--------------------------------------------------------------------------------
1 | // // Generalized Context Model
2 | data {
3 | real rho;
4 | int p;
5 | int ntests;
6 | int nmemory;
7 | int ndim;
8 | int ntrials[ntests];
9 | int category[nmemory];
10 | int y[ntests];
11 | matrix[ntests,2] tests;
12 | matrix[nmemory,2] memory;
13 | }
14 |
15 | parameters {
16 | real c;
17 | real w;
18 | real b;
19 | }
20 |
21 | transformed parameters {
22 | vector[ntests] r;
23 | vector[ndim] wk;
24 | real tmp1[ntests,nmemory,2];
25 | real tmp2[ntests,nmemory,2];
26 | real numerator[ntests,nmemory];
27 | real denominator[ntests,nmemory];
28 |
29 | wk[1] = w;
30 | wk[2] = 1 - w;
31 |
32 | for (i in 1:ntests) {
33 | for (j in 1:nmemory) {
34 | real s;
35 | vector[ndim] d;
36 |
37 | // Similarities
38 | for(k in 1:ndim) {
39 | d[k] = wk[k] * fabs(tests[i, k] - memory[j, k])^rho;
40 | }
41 | s = exp(-c * ((sum(d) + 0.000001)^(1/rho))^p);
42 |
43 | // Decision Probabilities
44 | tmp1[i,j,1] = b * s;
45 | tmp1[i,j,2] = 0;
46 | tmp2[i,j,1] = 0;
47 | tmp2[i,j,2] = (1 - b) * s;
48 |
49 | numerator[i, j] = tmp1[i,j,category[j]];
50 | denominator[i, j] = tmp1[i,j,category[j]] + tmp2[i,j,category[j]];
51 | }
52 | r[i] = sum(numerator[i, ]) / sum(denominator[i, ]);
53 | }
54 | }
55 |
56 | model {
57 | // Prior
58 | c ~ uniform(0, 5);
59 | w ~ beta(1, 1);
60 | b ~ beta(1, 1);
61 |
62 | // Decision Data
63 | y ~ binomial(ntrials, r);
64 | }
65 |
66 | generated quantities {
67 | vector[ntests] pred_y;
68 |
69 | for (i in 1:ntests) {
70 | pred_y[i] = binomial_rng(ntrials[i], r[i]);
71 | }
72 | }
73 |
74 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg.txt:
--------------------------------------------------------------------------------
1 | # Generalized Context Model
2 | data {
3 | ntests <- dim(tests)
4 | nmemory <- dim(memory)
5 | }
6 |
7 | model {
8 | # Decision Data
9 | for (i in 1:ntests[1]) {
10 | y[i] ~ dbin(r[i], ntrials[i])
11 | pred_y[i] ~ dbin(r[i], ntrials[i])
12 | }
13 |
14 | # Decision Probabilities
15 | for (i in 1:ntests[1]) {
16 | r[i] <- sum(numerator[i, ]) / sum(denominator[i, ])
17 | for (j in 1:nmemory[1]) {
18 | tmp1[i, j, 1] <- b * s[i, j]
19 | tmp1[i, j, 2] <- 0
20 | tmp2[i, j, 1] <- 0
21 | tmp2[i, j, 2] <- (1 - b) * s[i, j]
22 | numerator[i, j] <- tmp1[i, j, category[j]]
23 | denominator[i, j] <- tmp1[i, j, category[j]] + tmp2[i, j, category[j]]
24 | }
25 | }
26 |
27 | # Similarities
28 | for (i in 1:ntests[1]) {
29 | for (j in 1:nmemory[1]) {
30 | s[i, j] <- exp(-c * (sum(wk * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
31 | }
32 | }
33 |
34 | wk[1] <- w
35 | wk[2] <- 1-w
36 | #wk[1:2] <- c(w, 1-w) # Requires JAGS 4.0.0
37 |
38 | # Priors
39 | c ~ dunif(0, 5)
40 | w ~ dbeta(1, 1)
41 | b ~ dbeta(1, 1)
42 | }
43 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg_recognition.stan:
--------------------------------------------------------------------------------
1 | # Generalized Context Model
2 | data {
3 | //
4 | real rho;
5 | int p;
6 | int ntests;
7 | int nmemory;
8 | int ndim;
9 | int ntrials[ntests];
10 | int y[ntests];
11 | matrix[ntests, ndim] tests;
12 | matrix[nmemory, ndim] memory;
13 | }
14 |
15 | transformed data {
16 | vector[ndim] alpha;
17 |
18 | for(i in 1:ndim) {
19 | alpha[i] = 1;
20 | }
21 | }
22 |
23 |
24 | parameters {
25 | real c;
26 | simplex[ndim] w;
27 | real k;
28 | }
29 |
30 | transformed parameters {
31 | vector[ntests] r;
32 |
33 | # Decision Probabilities
34 | for (i in 1:ntests) {
35 | vector[nmemory] s;
36 | real f;
37 |
38 | for (j in 1:nmemory) {
39 | vector[ndim] d;
40 |
41 | // Similarities
42 | for(l in 1:ndim) {
43 | d[l] = w[l] * fabs(tests[i, l] - memory[j, l])^rho;
44 | }
45 | s[j] = exp(-c * ((sum(d) + 0.000001)^(1/rho))^p);
46 | }
47 |
48 | f = sum(s);
49 | r[i] = f / (f + k);
50 | }
51 | }
52 |
53 |
54 | model {
55 | # Priors
56 | c ~ uniform(0, 10);
57 | k ~ uniform(0, 5);
58 | w ~ dirichlet(alpha);
59 |
60 | # Decision Data
61 | y ~ binomial(ntrials, r);
62 | }
63 |
64 | generated quantities {
65 | int pred_y[ntests];
66 |
67 | for (i in 1:ntests) {
68 | pred_y[i] = binomial_rng(ntrials[i], r[i]);
69 | }
70 | }
71 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg_recognition.txt:
--------------------------------------------------------------------------------
1 | # Generalized Context Model
2 | data {
3 | ntests <- dim(tests)
4 | nmemory <- dim(memory)
5 | ndim <- ntests[2]
6 | }
7 |
8 | model {
9 | # Decision Data
10 | for (i in 1:ntests[1]) {
11 | y[i] ~ dbin(r[i], ntrials[i])
12 | pred_y[i] ~ dbin(r[i], ntrials[i])
13 | }
14 |
15 | # Decision Probabilities
16 | for (i in 1:ntests[1]) {
17 | f[i] <- sum(s[i, ])
18 | r[i] <- f[i] / (f[i] + k)
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(w * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | # Priors
29 | c ~ dunif(0, 10)
30 | k ~ dunif(0, 5)
31 |
32 | for(i in 1:ndim) { # Nice but non-trivial autocorrelation
33 | alpha[i] <- 1
34 | }
35 | w ~ ddirch(alpha)
36 | }
37 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg_recognition2.stan:
--------------------------------------------------------------------------------
1 | # Generalized Context Model
2 | data {
3 | //
4 | real rho;
5 | int p;
6 | int ntests;
7 | int nmemory;
8 | int ndim;
9 | int ntrials[ntests];
10 | int y[ntests];
11 | matrix[ntests, ndim] tests;
12 | matrix[nmemory, ndim] memory;
13 | }
14 |
15 | transformed data {
16 | vector[ndim] alpha;
17 |
18 | for(i in 1:ndim) {
19 | alpha[i] = 1;
20 | }
21 | }
22 |
23 |
24 | parameters {
25 | real c;
26 | real w_phi[ndim];
27 | real k;
28 | }
29 |
30 | transformed parameters {
31 | vector[ntests] r;
32 | vector[ndim] exp_w_phi;
33 | vector[ndim] w;
34 |
35 | for(i in 1:ndim) { # see http://andrewgelman.com/2009/04/29/conjugate_prior/
36 | exp_w_phi[i] = exp(w_phi[i]);
37 | }
38 | for(i in 1:ndim) {
39 | w[i] = exp_w_phi[i] / sum(exp_w_phi);
40 | }
41 |
42 | # Decision Probabilities
43 | for (i in 1:ntests) {
44 | vector[nmemory] s;
45 | real f;
46 |
47 | for (j in 1:nmemory) {
48 | vector[ndim] d;
49 |
50 | // Similarities
51 | for(l in 1:ndim) {
52 | d[l] = w[l] * fabs(tests[i, l] - memory[j, l])^rho;
53 | }
54 | s[j] = exp(-c * ((sum(d) + 0.000001)^(1/rho))^p);
55 | }
56 |
57 | f = sum(s);
58 | r[i] = f / (f + k);
59 | }
60 | }
61 |
62 |
63 | model {
64 | # Priors
65 | c ~ uniform(0, 10);
66 | k ~ uniform(0, 5);
67 |
68 | for(i in 1:ndim) { # see http://andrewgelman.com/2009/04/29/conjugate_prior/
69 | w_phi[i] ~ normal(0, 1);
70 | }
71 |
72 | # Decision Data
73 | y ~ binomial(ntrials, r);
74 | }
75 |
76 | generated quantities {
77 | int pred_y[ntests];
78 |
79 | for (i in 1:ntests) {
80 | pred_y[i] = binomial_rng(ntrials[i], r[i]);
81 | }
82 | }
83 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_agg_recognition2.txt:
--------------------------------------------------------------------------------
1 | # Generalized Context Model
2 | data {
3 | ntests <- dim(tests)
4 | nmemory <- dim(memory)
5 | ndim <- ntests[2]
6 | }
7 |
8 | model {
9 | # Decision Data
10 | for (i in 1:ntests[1]) {
11 | y[i] ~ dbin(r[i], ntrials[i])
12 | pred_y[i] ~ dbin(r[i], ntrials[i])
13 | }
14 |
15 | # Decision Probabilities
16 | for (i in 1:ntests[1]) {
17 | f[i] <- sum(s[i, ])
18 | r[i] <- f[i] / (f[i] + k)
19 | }
20 |
21 | # Similarities
22 | for (i in 1:ntests[1]) {
23 | for (j in 1:nmemory[1]) {
24 | s[i, j] <- exp(-c * (sum(w * abs(tests[i, ] - memory[j, ])^rho)^(1/rho))^p)
25 | }
26 | }
27 |
28 | # Priors
29 | c ~ dunif(0, 10)
30 | k ~ dunif(0, 5)
31 |
32 | for(i in 1:ndim) { # see http://andrewgelman.com/2009/04/29/conjugate_prior/ Much better than Dirichlet but slower
33 | w_phi[i] ~ dnorm(0, 1)
34 | exp_w_phi[i] <- exp(w_phi[i])
35 | w[i] <- exp_w_phi[i] / sum(exp_w_phi)
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/gcm/bayesian/GCM_recognition.txt:
--------------------------------------------------------------------------------
1 | # Generalized Context Model for Recognition Memory with Individual Differences
2 | model {
3 | # Decision Data
4 | for(i in 1:n_subjects) {
5 | for(j in 1:n_tests[1]) {
6 | y[i] ~ dbin(r[i], n_trials[i])
7 | }
8 | }
9 |
10 | # Decision Probabilities
11 | for(i in 1:n_subjects) {
12 | for(i in 1:n_tests[1]) {
13 | f[i, j] <- sum(s[i, j, ]) + f_bg[i]
14 | r[i, j] <- f[i, j] / (f[i, j] + k[i])
15 | }
16 | }
17 |
18 | # Similarities
19 | for(i in 1:n_subjects) {
20 | for(j in 1:n_tests[1]) {
21 | for(k in 1:n_memory[1]) {
22 | s[i, j, k] <- exp(-c[i] * (sum(wj[i, ] * abs(tests[i, j, ] - memory[i, k, ])^rho)^(1/rho))^p)
23 | }
24 | }
25 | }
26 |
27 | # Priors
28 | for(i in 1:n_subjects) {
29 | for(j in 1:(n_dim - 1)) {
30 | w[i, j] ~ dnorm(0, 1)
31 | }
32 | c[i] ~ dunif(0, 25)
33 | k[i] ~ dunif(0, 10)
34 | f_bg[i] ~ dunif(0, 1)
35 | }
36 |
37 | # Rescale parameters
38 | for(i in 1:n_subjects) {
39 | for(j in 1:(n_dim - 1)) {
40 | wj[i, j] <- phi(qnorm(1/n_dim, 0, 1) + w[i, j])
41 | }
42 | wj[i, n_dim] <- 1 - sum(wj[i, 1:(n_dim - 1)])
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/gcm/bayesian/attention_weight_prior_comparison.R:
--------------------------------------------------------------------------------
1 | library("runjags")
2 | library("rstan")
3 |
4 | trials <- 3 * 50
5 |
6 | tests <- c()
7 | data <- c()
8 | for(i in 1:3) {
9 | tests <- rbind(tests, read.csv2(paste0("../data/shin_nosofsky_1992_cat", i, ".csv")))
10 | data <- rbind(data, read.csv2(paste0("../data/shin_nosofsky_1992_responses_cat", i, ".csv")))
11 | }
12 | data$response <- round(data$Observed * trials)
13 |
14 | recognition_data <- list(
15 | y = data$response
16 | , tests = as.matrix(tests[, -c(1, 5:7)])
17 | , memory = as.matrix(subset(tests, Exemplar %in% paste0("O", 1:6))[, -c(1, 5:7)])
18 | , ntrials = rep(trials, nrow(tests))
19 | , p = 1 # Shape of relationship between similarity and psychological distance
20 | , rho = 2 # Power of the Minkowski distance
21 | )
22 |
23 | poi <- c("c", "w", "k", "pred_y")
24 |
25 |
26 | # Fit Dirichlet prior -----------------------------------------------------
27 |
28 | model1 <- system.time(
29 | gcm_samples1 <- run.jags(
30 | model = "GCM_agg_recognition.txt"
31 | , monitor = poi
32 | , data = recognition_data
33 | , n.chains = 3
34 | , sample = 5e4
35 | , burnin = 100
36 | , thin = 1
37 | , method = "rjparallel"
38 | )
39 | )
40 |
41 |
42 | # Fit Gelman prior --------------------------------------------------------
43 |
44 | model2 <- system.time(
45 | gcm_samples2 <- run.jags(
46 | model = "GCM_agg_recognition2.txt"
47 | , monitor = poi
48 | , data = recognition_data
49 | , n.chains = 3
50 | , sample = 5e4
51 | , burnin = 100
52 | , thin = 1
53 | , method = "rjparallel"
54 | )
55 | )
56 |
57 |
58 | model1 - model2
59 |
60 |
61 |
62 | # STAN --------------------------------------------------------------------
63 |
64 | memory <- as.matrix(subset(tests, Exemplar %in% paste0("O", 1:6))[, -c(1, 5:7)])
65 |
66 | recognition_data <- list(
67 | y = data$response
68 | , tests = as.matrix(tests[, -c(1, 5:7)])
69 | , memory = memory
70 | , ntrials = rep(trials, nrow(tests))
71 | , p = 1 # Shape of relationship between similarity and psychological distance
72 | , rho = 2 # Power of the Minkowski distance
73 | , ntests = dim(tests)[1]
74 | , nmemory = dim(memory)[1]
75 | , ndim = dim(memory)[2]
76 | )
77 |
78 | # Fit Dirichlet prior -----------------------------------------------------
79 |
80 | model3 <- system.time(
81 | gcm_samples3 <- stan(
82 | file = "GCM_agg_recognition.stan"
83 | , pars = poi
84 | , data = recognition_data
85 | , chains = 3
86 | , iter = 5e4 + 1100
87 | , warmup = 1100
88 | , thin = 1
89 | , cores = 3
90 | )
91 | )
92 |
93 | model1 - model3
94 |
95 | # Fit Gelman prior --------------------------------------------------------
96 |
97 | model4 <- system.time(
98 | gcm_samples4 <- stan(
99 | file = "GCM_agg_recognition2.stan"
100 | , pars = poi
101 | , data = recognition_data
102 | , chains = 3
103 | , iter = 5e4 + 1100
104 | , warmup = 1100
105 | , thin = 1
106 | , cores = 3
107 | )
108 | )
109 |
110 | model2 - model4
111 |
--------------------------------------------------------------------------------
/gcm/bayesian/reproduce_nosofsky_1989_bayes.rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analyses by Nosofsky (1989)"
3 | author: "Frederik Aust"
4 | output:
5 | knitrBootstrap::bootstrap_document:
6 | highlight: xcode
7 | theme: flatly
8 | menu: false
9 | ---
10 |
11 | To validate this implementation of the Bayesian Generalized Context Model (GCM), I reproduced small parts of the model-based analyses reported in Nosofsky (1989). The original MDS solutions and response data were provided by Robert Nosofsky (s. [note on data](../data/README.html)).
12 |
13 | ```{r echo = FALSE, message = FALSE}
14 | library("runjags")
15 | library("vioplot")
16 | ```
17 |
18 | ```{r}
19 | similarities <- read.csv2("../data/nosofsky_1989_similarities.csv")[, -1]
20 | similarities$size <- c(0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 2, 2, 0, 2, 0, 0)
21 | similarities$angle <- c(0, 1, 2, 0, 0, 1, 2, 0, 1, 0, 0, 2, 0, 0, 2, 0)
22 |
23 | data <- read.csv2("../data/nosofsky_1989_responses.csv")[, -1]
24 | data$n_size <- rowSums(data[, 1:2])
25 | data$n_angle <- rowSums(data[, 3:4])
26 | ```
27 |
28 |
29 |
30 | # Unconstrained GCM fits for the size condition
31 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .10$, $c = 1.60$, and $b_1 = .50$.
32 |
33 | ```{r message = FALSE, results = "hide"}
34 | tests <- as.matrix(similarities[, 1:2])
35 | memory <- as.matrix(subset(similarities, size != 0)[, 1:2])
36 | size_data <- list(
37 | y = data$Cat.1.s
38 | , tests = tests
39 | , memory = memory
40 | , ntrials = data$n_size
41 | , p = 2 # Shape of relationship between similarity and psychological distance
42 | , rho = 2 # Power of the Minkowski distance
43 | , category = unlist(subset(similarities, size != 0)[, 3])
44 | )
45 |
46 | init_values <- list(
47 | list(c = 4, w = 0.5, b = 0.5)
48 | , list(c = 2, w = 0.8, b = 0.3)
49 | , list(c = 3, w = 0.2, b = 0.7)
50 | )
51 |
52 | poi <- c(unique(unlist(lapply(init_values, names))), "pred_y")
53 |
54 | # Sample
55 | size_samples <- run.jags(
56 | model = "GCM_agg.txt"
57 | , monitor = poi
58 | , inits = init_values
59 | , data = size_data
60 | , n.chains = 3
61 | , sample = 5e4
62 | , burnin = 100
63 | , thin = 2
64 | , method = "rjparallel"
65 | )
66 | ```
67 |
68 | ```{r echo = FALSE, results = "asis"}
69 | knitr::kable(summary(size_samples)[c("c", "w", "b"), ])
70 | ```
71 |
72 |
73 |
74 |
75 | # Unconstrained GCM fits for the angle condition
76 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .98$, $c = 3.20$, and $b_1 = .43$.
77 |
78 | ```{r message = FALSE, results = "hide"}
79 | memory <- as.matrix(subset(similarities, angle != 0)[, 1:2])
80 | angle_data <- list(
81 | y = data$Cat.1.a
82 | , tests = tests
83 | , memory = memory
84 | , ntrials = data$n_angle
85 | , p = 2 # Shape of relationship between similarity and psychological distance
86 | , rho = 2 # Power of the Minkowski distance
87 | , category = unlist(subset(similarities, angle != 0)[, 4])
88 | )
89 |
90 | # Sample
91 | angle_samples <- run.jags(
92 | model = "GCM_agg.txt"
93 | , monitor = poi
94 | , inits = init_values
95 | , data = angle_data
96 | , n.chains = 3
97 | , sample = 5e4
98 | , burnin = 100
99 | , thin = 2
100 | , method = "rjparallel"
101 | )
102 | ```
103 |
104 | ```{r echo = FALSE, results = "asis"}
105 | knitr::kable(summary(angle_samples)[c("c", "w", "b"), ])
106 | ```
107 |
108 |
109 |
110 | # Predictions
111 | The resulting fits allow for a close partial reproduction of Nosofsky's Figure 6 (1989) plotting observed against predicted proportions of category 1 responses for each stimulus.
112 |
113 | ```{r echo = FALSE, warning = FALSE}
114 | par(pty = "s")
115 | plot(NA, NA
116 | , xlim = c(0, 1)
117 | , ylim = c(0, 1)
118 | , xlab = "Observed probability"
119 | , ylab = "Predicted probability"
120 | , pch = 17
121 | , asp = 1
122 | , las = 1
123 | )
124 |
125 | all_size_samples <- coda::as.mcmc(size_samples)
126 |
127 | for(i in 1:nrow(tests)) {
128 | vioplot(
129 | all_size_samples[, paste0("pred_y[", i, "]")] / data$n_size[i]
130 | , at = (data$Cat.1.s / data$n_size)[i]
131 | , col = scales::alpha(grey(0.7), 0.35)
132 | , border = FALSE
133 | , rectCol = grey(0.5)
134 | , colMed = "black"
135 | , pchMed = 17
136 | , add = TRUE
137 | , wex = 0.15
138 | )
139 | }
140 |
141 | all_angle_samples <- coda::as.mcmc(angle_samples)
142 |
143 | for(i in 1:nrow(tests)) {
144 | vioplot(
145 | all_angle_samples[, paste0("pred_y[", i, "]")] / data$n_angle[i]
146 | , at = (data$Cat.1.a / data$n_angle)[i]
147 | , col = scales::alpha(grey(0.7), 0.35)
148 | , border = FALSE
149 | , rectCol = grey(0.5)
150 | , colMed = "black"
151 | , pchMed = 0
152 | , add = TRUE
153 | , wex = 0.15
154 | )
155 | }
156 |
157 | abline(0, 1)
158 | legend(
159 | "topleft"
160 | , legend = c("Size", "Angle")
161 | , pch = c(17, 0)
162 | , inset = 0.1
163 | , bty = "n"
164 | )
165 | ```
166 |
167 |
168 |
169 | # References
170 | Nosofsky, R. M. (1989). Further tests of an exemplar-similarity approach to relating identification and categorization. *Perception & Psychophysics*, 45(4), 279–290. doi: [10.3758/BF03204942](http://dx.doi.org/10.3758/BF03204942)
171 |
--------------------------------------------------------------------------------
/gcm/bayesian/reproduce_nosofsky_1989_bayes_stan.rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analyses by Nosofsky (1989)"
3 | author: "Frederik Aust"
4 | output:
5 | knitrBootstrap::bootstrap_document:
6 | highlight: xcode
7 | theme: flatly
8 | menu: false
9 | ---
10 |
11 | To validate this implementation of the Bayesian Generalized Context Model (GCM), I reproduced small parts of the model-based analyses reported in Nosofsky (1989). The original MDS solutions and response data were provided by Robert Nosofsky (s. [note on data](../data/README.html)).
12 |
13 | ```{r echo = FALSE, message = FALSE}
14 | library("rstan")
15 | library("vioplot")
16 | ```
17 |
18 | ```{r}
19 | similarities <- read.csv2("../data/nosofsky_1989_similarities.csv")[, -1]
20 | similarities$size <- c(0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 2, 2, 0, 2, 0, 0)
21 | similarities$angle <- c(0, 1, 2, 0, 0, 1, 2, 0, 1, 0, 0, 2, 0, 0, 2, 0)
22 |
23 | data <- read.csv2("../data/nosofsky_1989_responses.csv")[, -1]
24 | data$n_size <- rowSums(data[, 1:2])
25 | data$n_angle <- rowSums(data[, 3:4])
26 | ```
27 |
28 |
29 |
30 | # Unconstrained GCM fits for the size condition
31 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .10$, $c = 1.60$, and $b_1 = .50$.
32 |
33 | ```{r message = FALSE, results = "hide"}
34 | tests <- as.matrix(similarities[, 1:2])
35 | memory <- as.matrix(subset(similarities, size != 0)[, 1:2])
36 | size_data <- list(
37 | y = data$Cat.1.s
38 | , tests = tests
39 | , memory = memory
40 | , ntests = dim(tests)[1]
41 | , nmemory = dim(memory)[1]
42 | , ndim = dim(tests)[2]
43 | , ntrials = data$n_size
44 | , p = 2 # Shape of relationship between similarity and psychological distance
45 | , rho = 2 # Power of the Minkowski distance
46 | , category = unlist(subset(similarities, size != 0)[, 3])
47 | )
48 |
49 | init_values <- list(
50 | list(c = 4, w = 0.5, b = 0.5)
51 | , list(c = 2, w = 0.8, b = 0.3)
52 | , list(c = 3, w = 0.2, b = 0.7)
53 | )
54 |
55 | poi <- c(unique(unlist(lapply(init_values, names))), "pred_y")
56 |
57 | # Sample
58 | size_samples <- stan(
59 | file = "GCM_agg.stan"
60 | , pars = poi
61 | , init = init_values
62 | , data = size_data
63 | , chains = 3
64 | , iter = 5e4
65 | , thin = 2
66 | , cores = 3
67 | , control = list(adapt_delta = 0.9)
68 | )
69 | ```
70 |
71 | ```{r echo = FALSE, results = "asis"}
72 | knitr::kable(summary(size_samples)$summary[c("c", "w", "b"), ])
73 | ```
74 |
75 |
76 |
77 |
78 | # Unconstrained GCM fits for the angle condition
79 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .98$, $c = 3.20$, and $b_1 = .43$.
80 |
81 | ```{r message = FALSE, results = "hide"}
82 | memory <- as.matrix(subset(similarities, angle != 0)[, 1:2])
83 | angle_data <- list(
84 | y = data$Cat.1.a
85 | , tests = tests
86 | , memory = memory
87 | , ntests = dim(tests)[1]
88 | , nmemory = dim(memory)[1]
89 | , ndim = dim(tests)[2]
90 | , ntrials = data$n_angle
91 | , p = 2 # Shape of relationship between similarity and psychological distance
92 | , rho = 2 # Power of the Minkowski distance
93 | , category = unlist(subset(similarities, angle != 0)[, 4])
94 | )
95 |
96 | # Sample
97 | angle_samples <- stan(
98 | file = "GCM_agg.stan"
99 | , pars = poi
100 | , init = init_values
101 | , data = angle_data
102 | , chains = 3
103 | , iter = 5e4
104 | , thin = 2
105 | , cores = 3
106 | , control = list(adapt_delta = 0.9)
107 | )
108 | ```
109 |
110 | ```{r echo = FALSE, results = "asis"}
111 | knitr::kable(summary(angle_samples)$summary[c("c", "w", "b"), ])
112 | ```
113 |
114 |
115 |
116 | # Predictions
117 | The resulting fits allow for a close partial reproduction of Nosofsky's Figure 6 (1989) plotting observed against predicted proportions of category 1 responses for each stimulus.
118 |
119 | ```{r echo = FALSE, warning = FALSE}
120 | par(pty = "s")
121 | plot(NA, NA
122 | , xlim = c(0, 1)
123 | , ylim = c(0, 1)
124 | , xlab = "Observed probability"
125 | , ylab = "Predicted probability"
126 | , pch = 17
127 | , asp = 1
128 | , las = 1
129 | )
130 |
131 | all_size_samples <- rstan::extract(size_samples, "pred_y")$pred_y
132 |
133 | for(i in 1:nrow(tests)) {
134 | vioplot(
135 | all_size_samples[, i] / data$n_size[i]
136 | , at = (data$Cat.1.s / data$n_size)[i]
137 | , col = scales::alpha(grey(0.7), 0.35)
138 | , border = FALSE
139 | , rectCol = grey(0.5)
140 | , colMed = "black"
141 | , pchMed = 17
142 | , add = TRUE
143 | , wex = 0.15
144 | )
145 | }
146 |
147 | all_angle_samples <- rstan::extract(angle_samples, "pred_y")$pred_y
148 |
149 | for(i in 1:nrow(tests)) {
150 | vioplot(
151 | all_angle_samples[, i] / data$n_angle[i]
152 | , at = (data$Cat.1.a / data$n_angle)[i]
153 | , col = scales::alpha(grey(0.7), 0.35)
154 | , border = FALSE
155 | , rectCol = grey(0.5)
156 | , colMed = "black"
157 | , pchMed = 0
158 | , add = TRUE
159 | , wex = 0.15
160 | )
161 | }
162 |
163 | abline(0, 1)
164 | legend(
165 | "topleft"
166 | , legend = c("Size", "Angle")
167 | , pch = c(17, 0)
168 | , inset = 0.1
169 | , bty = "n"
170 | )
171 | ```
172 |
173 |
174 |
175 | # References
176 | Nosofsky, R. M. (1989). Further tests of an exemplar-similarity approach to relating identification and categorization. *Perception & Psychophysics*, 45(4), 279–290. doi: [10.3758/BF03204942](http://dx.doi.org/10.3758/BF03204942)
177 |
--------------------------------------------------------------------------------
/gcm/bayesian/reproduce_shin_nosofsky_1992_bayes.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analyses by Nosofsky & Shin (1992)"
3 | author: "Frederik Aust"
4 | output:
5 | knitrBootstrap::bootstrap_document:
6 | highlight: xcode
7 | theme: flatly
8 | menu: false
9 | ---
10 |
11 | To validate this implementation of the Generalized Context Model (GCM) for recognition data, I reproduced small parts of the model-based analyses reported in Nosofsky & Shin (1992). The original MDS solutions and response data were provided by Shin & Nosofsky (1992; s. [note on data](../data/README.html)).
12 |
13 | ```{r echo = FALSE, message = FALSE}
14 | library("runjags")
15 | library("vioplot")
16 | ```
17 |
18 | # Experiment 1
19 |
20 | ```{r}
21 | trials <- 3 * 50
22 |
23 | tests <- c()
24 | data <- c()
25 | for(i in 1:3) {
26 | tests <- rbind(tests, read.csv2(paste0("../data/shin_nosofsky_1992_cat", i, ".csv")))
27 | data <- rbind(data, read.csv2(paste0("../data/shin_nosofsky_1992_responses_cat", i, ".csv")))
28 | }
29 | data$response <- round(data$Observed * trials)
30 | ```
31 |
32 |
33 |
34 | ## Summary fits
35 |
36 | ```{r message = FALSE, results = "hide"}
37 | recognition_data <- list(
38 | y = data$response
39 | , tests = as.matrix(tests[, -1])
40 | , memory = as.matrix(subset(tests, Exemplar %in% paste0("O", 1:6))[, -1])
41 | , ntrials = rep(trials, nrow(tests))
42 | , p = 1 # Shape of relationship between similarity and psychological distance
43 | , rho = 2 # Power of the Minkowski distance
44 | )
45 |
46 | init_values <- list(
47 | list(c = 4, w_phi = c(-0.4, -2.2, -1, 1.3, -0.2, 1.1), k = 0.5)
48 | , list(c = 2, w_phi = c(1.4, 1.9, -0.1, -0.7, 0.8, 1.9), k = 0.3)
49 | , list(c = 3, w_phi = c(1, -0.4, -1.5, -0.6, 0.8, -1.8), k = 0.7)
50 | )
51 |
52 | poi <- c("c", "w", "k", "pred_y")
53 |
54 | # Sample
55 | gcm_samples <- run.jags(
56 | model = "GCM_agg_recognition2.txt"
57 | , monitor = poi
58 | , inits = init_values
59 | , data = recognition_data
60 | , n.chains = 3
61 | , sample = 5e4
62 | , burnin = 100
63 | , thin = 5
64 | , method = "rjparallel"
65 | )
66 | ```
67 |
68 | In Table 5 Shin & Nosofsky (1992) report the following estimates for the summary fits of the old-new recognition data in experiment 1: $w_1 = .006$, $w_2 = .084$, $w_3 = .102$, $w_4 = .392$, $w_5 = .218$, $c = 4.905$, $k = 0.280$
69 |
70 | ```{r echo = FALSE, results = "asis"}
71 | knitr::kable(summary(gcm_samples)[c("c", paste0("w[", 1:6, "]"), "k"), ])
72 | ```
73 |
74 | The resulting fits closely resemble those reported in the paper.
75 |
76 |
77 |
78 | ## Predictions
79 | The resulting estimates allow for a close partial reproduction of Shin & Nosofsky's Figure 2A (1992) plotting observed against predicted proportions of old responses for each stimulus.
80 |
81 | ```{r echo = FALSE, warning = FALSE}
82 | all_gcm_samples <- coda::as.mcmc(gcm_samples)
83 |
84 | par(pty = "s")
85 | plot(NA, NA
86 | , xlim = c(0, 1)
87 | , ylim = c(0, 1)
88 | , xlab = "Predicted recognition probability"
89 | , ylab = "Observed probability"
90 | , asp = 1
91 | , las = 1
92 | )
93 |
94 | for(i in 1:nrow(tests)) {
95 | vioplot(
96 | all_gcm_samples[, paste0("pred_y[", i, "]")] / trials
97 | , at = (data$response / trials)[i]
98 | , horizontal = TRUE
99 | , col = scales::alpha(grey(0.7), 0.15)
100 | , border = FALSE
101 | , rectCol = grey(0.5)
102 | , colMed = "black"
103 | , pchMed = rep(c(1, rep(2, 6), rep(0, 3)), 3)[i]
104 | , add = TRUE
105 | , wex = 0.15
106 | )
107 | }
108 |
109 | abline(0, 1)
110 | legend(
111 | "bottomright"
112 | , legend = c("Prototype", "Old", "New")
113 | , pch = c(1, 2, 0)
114 | , inset = 0.1
115 | , bty = "n"
116 | )
117 | ```
118 |
119 |
120 |
121 | # References
122 | Shin, H. J., & Nosofsky, R. M. (1992). Similarity-scaling studies of dot-pattern classification and recognition. *Journal of Experimental Psychology: General*, 121(3), 278–304. doi:[10.1037/0096-3445.121.3.278](http://dx.doi.org/10.1037/0096-3445.121.3.278)
123 |
--------------------------------------------------------------------------------
/gcm/data/Nosofsky_1989_DataSets.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/Nosofsky_1989_DataSets.xlsx
--------------------------------------------------------------------------------
/gcm/data/Nosofsky_1989_MDS_solution.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/Nosofsky_1989_MDS_solution.xlsx
--------------------------------------------------------------------------------
/gcm/data/Nosofsky_1989_indexvectors.doc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/Nosofsky_1989_indexvectors.doc
--------------------------------------------------------------------------------
/gcm/data/Nosofsky_readme.doc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/Nosofsky_readme.doc
--------------------------------------------------------------------------------
/gcm/data/README.md:
--------------------------------------------------------------------------------
1 | # Note on data files
2 |
3 | Original data files were provided by Robert Nosofsky and retrieved from the [The Ohio state university cognitive modeling repository](http://cmr.osu.edu/browse/models?pid=64&sid=80:attention-similarity-and-the-identification-categorization-relationship).
4 |
5 | I created the .csv-files to reproduce the model-based analyses reported in Nosofsky, R. M. (1989). Further tests of an exemplar-similarity approach to relating identification and categorization. *Perception & Psychophysics*, 45(4), 279–290. doi: [10.3758/BF03204942](http://dx.doi.org/10.3758/BF03204942).
6 |
7 | Original data for Shin & Nosofsky (1992) is provided in Table 2 and Table A1 in Shin, H. J., & Nosofsky, R. M. (1992). Similarity-scaling studies of dot-pattern classification and recognition. *Journal of Experimental Psychology: General*, 121(3), 278–304. doi:[10.1037/0096-3445.121.3.278](http://dx.doi.org/10.1037/0096-3445.121.3.278). I created the .csv-files to reproduce the model-based analyses.
8 |
--------------------------------------------------------------------------------
/gcm/data/nosofsky_1989_responses.csv:
--------------------------------------------------------------------------------
1 | Stimulus;Cat-1-s;Cat-2-s;Cat-1-a;Cat-2-a
2 | 1;72;2;79;3
3 | 2;255;4;155;116
4 | 3;72;2;48;258
5 | 4;73;1;2;80
6 | 5;234;35;81;1
7 | 6;66;8;190;97
8 | 7;208;39;60;202
9 | 8;226;39;2;80
10 | 9;23;51;262;25
11 | 10;18;56;47;35
12 | 11;55;170;11;71
13 | 12;58;179;4;259
14 | 13;2;72;76;6
15 | 14;8;229;47;35
16 | 15;3;71;24;227
17 | 16;3;71;2;80
--------------------------------------------------------------------------------
/gcm/data/nosofsky_1989_similarities.csv:
--------------------------------------------------------------------------------
1 | Stimulus ;xi1;xi2
2 | 1;0,312;-0,241
3 | 2;0,918;-0,264
4 | 3;1,405;-0,187
5 | 4;2,062;-0,227
6 | 5;0,228;0,640
7 | 6;0,844;0,662
8 | 7;1,324;0,687
9 | 8;1,885;0,623
10 | 9;0,374;1,555
11 | 10;0,916;1,501
12 | 11;1,473;1,544
13 | 12;2,128;1,520
14 | 13;0,135;2,352
15 | 14;0,889;2,412
16 | 15;1,451;2,493
17 | 16;2,061;2,382
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_cat1.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Dimension 1;Dimension 2;Dimension 3;Dimension 4;Dimension 5;Dimension 6
2 | P1;-1,5597;0,0317;0,4041;-0,0806;0,7044;-0,6498
3 | O1;-0,3957;0,4043;-0,5009;1,3874;2,9568;1,3244
4 | O2;-0,4298;0,7670;0,1511;2,7576;-0,3260;-0,2771
5 | O3;-1,7295;0,3513;0,6034;-1,0291;0,2050;-0,1439
6 | O4;-1,0242;0,5849;0,8259;-0,7610;0,1401;2,1197
7 | O5;-1,7020;0,4002;0,7423;-0,7992;-0,0752;-0,1777
8 | O6;-1,7318;0,7908;0,6102;-1,3870;0,8574;-1,2062
9 | Nl;-0,6580;1,3329;1,3447;0,5528;1,2421;-0,1631
10 | Nm;-0,2911;-1,7077;1,2002;2,1050;-0,1559;-0,6702
11 | Nh;-0,7722;0,7045;-0,8799;-0,2577;-0,9780;-2,2524
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_cat2.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Dimension 1;Dimension 2;Dimension 3;Dimension 4;Dimension 5;Dimension 6
2 | P2;1,3353;0,3275;0,0543;-0,6616;0,1806;-0,2880
3 | O1;1,0572;0,3240;0,7356;0,0128;0,5301;0,3156
4 | O2;1,1589;0,9646;0,5663;-0,2257;-0,3710;0,2569
5 | O3;1,2564;0,0153;-0,4414;-1,2021;0,9971;-0,0669
6 | O4;1,4310;-0,0661;0,6084;-0,1159;-0,4888;-0,3197
7 | O5;0,9951;0,8524;0,5920;0,0367;-0,5004;-2,0262
8 | O6;1,2649;0,0256;-1,4573;-0,3197;1,0346;-0,8378
9 | Nl;1,2624;0,4207;0,3534;-0,7558;0,1718;-0,1813
10 | Nm;1,0582;0,5241;1,4669;-0,2608;-0,1230;-0,5886
11 | Nh;0,8404;-0,3782;0,8416;0,3203;-0,1264;1,7302
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_cat3.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Dimension 1;Dimension 2;Dimension 3;Dimension 4;Dimension 5;Dimension 6
2 | P3;0,1213;-2,0049;-0,5556;-0,1492;0,2473;0,3143
3 | O1;0,1726;1,4973;-2,2666;1,3864;-0,8222;-0,2460
4 | O2;-0,0099;1,5846;-0,8532;0,8982;-1,7078;1,9922
5 | O3;-0,7260;-2,1062;0,4022;0,9964;0,0206;-0,2523
6 | O4;-0,7126;-0,2446;-0,9565;-1,1702;-1,7786;0,8200
7 | O5;-0,5080;-0,5879;-1,7790;-1,4875;-0,1869;0,4917
8 | O6;0,6368;-0,6837;-0,2560;-0,8362;-0,0218;1,3730
9 | Nl;0,2183;-2,0001;0,3115;-0,0454;-0,0266;0,5123
10 | Nm;-0,6975;-1,0783;0,5422;0,6966;-2,5504;-0,4488
11 | Nh;0,1393;-1,0457;-2,4102;0,3945;0,9511;-0,4545
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_responses.xls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/shin_nosofsky_1992_responses.xls
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_responses_cat1.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Observed;Predicted
2 | P1;0,573;0,541
3 | O11;0,847;0,782
4 | O12;0,767;0,782
5 | O13;0,860;0,853
6 | O14;0,840;0,796
7 | O15;0,893;0,856
8 | O16;0,807;0,802
9 | N1l;0,120;0,224
10 | N1m ;0,140;0,115
11 | N1h;0,200;0,221
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_responses_cat2.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Observed;Predicted
2 | P2;0,795;0,719
3 | O21;0,647;0,821
4 | O22;0,727;0,834
5 | O23;0,813;0,813
6 | O24;0,867;0,829
7 | O25;0,807;0,789
8 | O26;0,753;0,795
9 | N2l;0,707;0,751
10 | N2m ;0,653;0,626
11 | N2h;0,267;0,275
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_responses_cat3.csv:
--------------------------------------------------------------------------------
1 | Exemplar;Observed;Predicted
2 | P3;0,347;0,327
3 | O31;0,893;0,783
4 | O32;0,913;0,783
5 | O33;0,673;0,786
6 | O34;0,887;0,790
7 | O35;0,680;0,797
8 | O36;0,773;0,808
9 | N3l;0,353;0,359
10 | N3m ;0,080;0,042
11 | N3h;0,193;0,180
12 |
--------------------------------------------------------------------------------
/gcm/data/shin_nosofsky_1992_similarities.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/gcm/data/shin_nosofsky_1992_similarities.xlsx
--------------------------------------------------------------------------------
/gcm/gcm_fit.r:
--------------------------------------------------------------------------------
1 | # data A matrix or data.frame with frequencies of category 1 responses, category 2 responses and total number of responses as columns
2 | # ... Arguments to be passed to gcm_pred()
3 |
4 | gcm_fit <- function(data, ...) {
5 | pred <- gcm_pred(...)
6 | data$pred <- pred
7 | dev <- -sum(apply(data, 1, function(x) dbinom(x[1], x[3], x[4], log = TRUE)))
8 | return(dev)
9 | }
10 |
--------------------------------------------------------------------------------
/gcm/gcm_pred.r:
--------------------------------------------------------------------------------
1 | # param A vector of starting parameters: c(w1, c, b)
2 | # w1 = Attentional weight for dimension 1 of the psychological similarity space (assuming two dimensions)
3 | # c = Similarity sensitivity
4 | # b = Bias towards category 1
5 | # mem A matrix of exemplars in memory with one column for each dimension in psychological space
6 | # obs A matrix of observed exemplars with one column for each dimension in psychological space
7 | # rho An integer determining the distance metric in psychological space (1 = City block distance; 2 = Eucledian distance)
8 | # p An integer determining the form of the similarity function (1 = Exponential; 2 = Gaussian)
9 |
10 | gcm_pred <- function(param, mem, obs, rho = 2, p = 1) {
11 | w <- param[1]
12 | w[2] <- 1-w
13 | c <- param[2]
14 | b <- param[3]
15 |
16 | # Prepare objects
17 | n_obs <- nrow(obs)
18 | mem <- as.matrix(mem)
19 | obs <- as.matrix(obs)
20 | all_resp <- matrix(rep(NA, n_obs), nrow = n_obs)
21 |
22 | # Model computations
23 | for(i in 1:n_obs) {
24 | iobs <- as.vector(obs[i, 1:ncol(obs)])
25 |
26 | ## Determine similarities & activation
27 | d <- w*abs(iobs - t(mem[, 1:2]))^rho
28 | d <- colSums(d)^(1/rho) # Eq. 3, Nosofsky (1988)
29 | s <- exp(-c*d^p) # Eq. 4, Nosofsky (1989)
30 | s_ab <- b*sum(s[mem[, 3] == 1]) + (1-b)*sum(s[mem[, 3] == 2])
31 |
32 | ## Compute response probability for category 1
33 | p_a <- b*sum(s[mem[, 3] == 1])/s_ab # Eq. 2, Nosofsky (1989)
34 | all_resp[i,] <- p_a
35 | }
36 | return(all_resp)
37 | }
38 |
--------------------------------------------------------------------------------
/gcm/gcm_rec_fit.r:
--------------------------------------------------------------------------------
1 | gcm_rec_fit <- function(
2 | par
3 | , data
4 | , mem
5 | , n
6 | , minimize = "individual"
7 | , design = NULL
8 | , ...
9 | ) {
10 | pred <- gcm_rec_pred(param = par, mem = mem, ...)
11 | pred <- ifelse(pred == 1, 0.99999, pred)
12 | pred <- ifelse(pred == 0, 0.00001, pred)
13 |
14 | if(minimize == "individual") {
15 | dev <- -2*sum(dbinom(data[, "response"], n, pred, log = TRUE))
16 | } else if(minimize == "condition") {
17 | cond_data <- aggregate(as.vector(data[, "response"]), by = design, FUN = sum)
18 | cond_n <- aggregate(as.vector(data[, "response"]), by = design, FUN = function(x) length(x)*n)
19 | cond_pred <- aggregate(as.vector(data[, "response"]), by = design, FUN = mean)
20 |
21 | dev <- -2*sum(dbinom(cond_data$x, cond_n$x, cond_pred$x, log = TRUE))
22 | }
23 | return(dev)
24 | }
25 |
--------------------------------------------------------------------------------
/gcm/gcm_rec_pred.r:
--------------------------------------------------------------------------------
1 | # param A vector of starting parameters: c(w, c, k)
2 | # w = Vector of two attentional weights for dimension 1 and 2 of the psychological similarity space (assuming three dimensions)
3 | # c = Similarity sensitivity
4 | # k = Response criterion parameter
5 | # mem A matrix of exemplars in memory with one column for each dimension in psychological space
6 | # obs A matrix of observed exemplars with one column for each dimension in psychological space
7 | # rho An integer determining the distance metric in psychological space (1 = City block distance; 2 = Eucledian distance)
8 | # p An integer determining the form of the similarity function (1 = Exponential; 2 = Gaussian)
9 |
10 | gcm_rec_pred <- function(param, mem, obs, rho = 2, p = 1, pred = "prop") {
11 | w <- param["w1"]
12 | w[2] <- param["w2"]
13 | w[3] <- param["w3"]
14 | w[4] <- param["w4"]
15 | w[5] <- param["w5"]
16 | w[6] <- 1-sum(w)
17 | c <- param["c"]
18 | k <- param["k"]
19 |
20 | # Prepare objects
21 | n_obs <- nrow(obs)
22 | mem <- as.matrix(mem)
23 | ndim <- ncol(mem)
24 | obs <- as.matrix(obs)
25 | all_resp <- c()
26 |
27 | # Model computations
28 | for(i in 1:n_obs) {
29 | iobs <- as.vector(obs[i, ])
30 |
31 | ## Determine similarities & activation
32 | d <- w*abs(iobs - t(mem))^rho
33 | d <- colSums(d)^(1/rho) # Eq. 3, Nosofsky (1988)
34 | s <- exp(-c*d^p)
35 |
36 | f <- sum(s) # Eq. 8, Shin & Nosofsky (1992)
37 |
38 | if(pred == "single") {
39 | iresp <- ifelse(f > k, 1, 0) # adapted from Eq. 6, Nosofksy (1991)
40 | } else if(pred == "prop") {
41 | iresp <- f/(f+k) # Eq. 9, Shin & Nosofsky (1992)
42 | }
43 |
44 | all_resp[i] <- iresp
45 | }
46 | return(all_resp)
47 | }
48 |
--------------------------------------------------------------------------------
/gcm/reproduce_nosofsky_1989.rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analyses by Nosofsky (1989)"
3 | author: "Frederik Aust"
4 | date: "19.12.2014"
5 | output:
6 | html_document:
7 | theme: spacelab
8 | toc: yes
9 | ---
10 |
11 | To validate this implementation of the Generalized Context Model (GCM), I reproduced small parts of the model-based analyses reported in Nosofsky (1989). The original MDS solutions and response data were provided by Robert Nosofsky (s. [note on data](data/README.html)).
12 |
13 | ```{r echo = FALSE}
14 | source("gcm_pred.r")
15 | source("gcm_fit.r")
16 | ```
17 |
18 | ```{r}
19 | sims <- read.csv2("data/nosofsky_1989_similarities.csv")[, -1]
20 | sims$size <- c(0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 2, 2, 0, 2, 0, 0)
21 | sims$angle <- c(0, 1, 2, 0, 0, 1, 2, 0, 1, 0, 0, 2, 0, 0, 2, 0)
22 |
23 | data <- read.csv2("data/nosofsky_1989_responses.csv")[, -1]
24 | data$n_size <- rowSums(data[, 1:2])
25 | data$n_angle <- rowSums(data[, 3:4])
26 | ```
27 |
28 |
29 |
30 | # Unconstrained GCM fits for the size condition
31 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .10$, $c = 1.60$, and $b_1 = .50$.
32 |
33 | ```{r}
34 | obs <- sims[, 1:2]
35 | mem <- subset(sims, size != 0)
36 | size_fit <- optim(
37 | par = c(0.5, 2, 0.5)
38 | , mem = mem[, 1:3]
39 | , obs = obs
40 | , rho = 2
41 | , p = 2
42 | , data = data[, c(1, 2, 5)]
43 | , fn = gcm_fit
44 | , method = "Nelder-Mead"
45 | )
46 |
47 | size_fit$par
48 |
49 | size_pred <- gcm_pred(size_fit$par, mem[, 1:3], obs, rho = 2, p = 2)
50 | ```
51 |
52 |
53 |
54 | # Unconstrained GCM fits for the angle condition
55 | The original parameter estimates reported in Table 5 by Nosofsky (1989) are $w_1 = .98$, $c = 3.20$, and $b_1 = .43$.
56 |
57 | ```{r}
58 | mem <- subset(sims, angle != 0)
59 | angle_fit <- optim(
60 | par = c(0.5, 2, 0.5)
61 | , mem = mem[, c(1:2, 4)]
62 | , obs = obs
63 | , rho = 2
64 | , p = 2
65 | , data = data[, c(3, 4, 6)]
66 | , fn = gcm_fit
67 | , method = "Nelder-Mead"
68 | )
69 |
70 | angle_fit$par
71 |
72 | angle_pred <- gcm_pred(angle_fit$par, mem[, c(1:2, 4)], obs, rho = 2, p = 2)
73 | ```
74 |
75 |
76 |
77 | # Predictions
78 | The resulting fits allow for a close partial reproduction of Nosofsky's Figure 6 (1989) plotting observed against predicted proportions of category 1 responses for each stimulus.
79 |
80 | ```{r echo = FALSE}
81 | par(pty = "s")
82 | plot(
83 | data$Cat.1.s / data$n_size
84 | , size_pred
85 | , xlab = "Observed probability"
86 | , ylab = "Predicted probability"
87 | , pch = 17
88 | , asp = 1
89 | , las = 1
90 | )
91 | points(
92 | data$Cat.1.a / data$n_angle
93 | , angle_pred
94 | , pch = 0
95 | )
96 | abline(0, 1)
97 | legend(
98 | "topleft"
99 | , legend = c("Size", "Angle")
100 | , pch = c(17, 0)
101 | , inset = 0.1
102 | , bty = "n"
103 | )
104 | ```
105 |
106 |
107 |
108 | # References
109 | Nosofsky, R. M. (1989). Further tests of an exemplar-similarity approach to relating identification and categorization. *Perception & Psychophysics*, 45(4), 279–290. doi: [10.3758/BF03204942](http://dx.doi.org/10.3758/BF03204942)
110 |
--------------------------------------------------------------------------------
/gcm/reproduce_shin_nosofsky_1992.rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of model-based analyses by Nosofsky & Shin (1992)"
3 | author: "Frederik Aust"
4 | date: "19.12.2014"
5 | output:
6 | html_document:
7 | theme: spacelab
8 | toc: yes
9 | ---
10 |
11 | To validate this implementation of the Generalized Context Model (GCM) for recognition data, I reproduced small parts of the model-based analyses reported in Nosofsky & Shin (1992). The original MDS solutions and response data were provided by Shin & Nosofsky (1992; s. [note on data](data/README.html)).
12 |
13 | ```{r echo = FALSE}
14 | source("gcm_rec_pred.r")
15 | source("gcm_rec_fit.r")
16 | ```
17 |
18 | # Experiment 1
19 |
20 | ```{r}
21 | trials <- 3 * 50
22 |
23 | obs <- c()
24 | data <- c()
25 | for(i in 1:3) {
26 | obs <- rbind(obs, read.csv2(paste0("data/shin_nosofsky_1992_cat", i, ".csv")))
27 | data <- rbind(data, read.csv2(paste0("data/shin_nosofsky_1992_responses_cat", i, ".csv")))
28 | }
29 | data$response <- round(data$Observed * trials)
30 | ```
31 |
32 |
33 |
34 | ## Summary fits
35 |
36 | ```{r}
37 | mem <- subset(obs, Exemplar %in% paste0("O", 1:6))
38 |
39 | ui <- structure(c(1, -1, 1, 0, 0, 0, 0, 0, 0, 1, -1, 0, 1, 0, 0, 0, 0, 0, 1, -1, 0, 0, 1, 0, 0, 0, 0, 1, -1, 0, 0, 0, 1, 0, 0, 0, 1, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1), .Dim = c(9L, 7L), .Dimnames = list(NULL, NULL))
40 | ci <- c(0, -1, 0, 0, 0, 0, 0, 0, 0)
41 |
42 | gcm_fit <- constrOptim(
43 | theta = c(w1 = 1/6, w2 = 1/6, w3 = 1/6, w4 = 1/6, w5 = 1/6, c = 3, k = 1)
44 | , f = gcm_rec_fit
45 | , mem = mem[, -1]
46 | , obs = obs[, -1]
47 | , rho = 2
48 | , n = trials
49 | , data = data
50 | , method = "Nelder-Mead"
51 | , ui = ui
52 | , ci = ci
53 | )
54 | ```
55 |
56 | In Table 5 Shin & Nosofsky (1992) report the following estimates for the summary fits of the old-new recognition data in experiment 1: $w_1 = .006$, $w_2 = .084$, $w_3 = .102$, $w_4 = .392$, $w_5 = .218$, $c = 4.905$, $k = 0.280$
57 |
58 | ```{r}
59 | round(gcm_fit$par, 3)
60 | gcm_fit$value/2 # -lnL
61 | ```
62 |
63 | The resulting fits closely resemble those reported in the paper, however, the estimates vary slightly depending on the starting parameters `theta` used when fitting the data.
64 |
65 |
66 |
67 | ## Predictions
68 | The resulting estimates allow for a close partial reproduction of Shin & Nosofsky's Figure 2A (1992) plotting observed against predicted proportions of old responses for each stimulus.
69 |
70 | ```{r echo = FALSE}
71 | cat_pred <- gcm_rec_pred(gcm_fit$par, mem[, -1], obs[, -1])
72 |
73 | par(pty = "s")
74 | plot(
75 | cat_pred
76 | , data$Observed
77 | , xlab = "Predicted recognition probability"
78 | , ylab = "Observed probability"
79 | , xlim = c(0, 1)
80 | , ylim = c(0, 1)
81 | , pch = c(1, rep(2, 6), rep(0, 3))
82 | , asp = 1
83 | , las = 1
84 | )
85 | abline(0, 1)
86 | legend(
87 | "bottomright"
88 | , legend = c("Prototype", "Old", "New")
89 | , pch = c(1, 2, 0)
90 | , inset = 0.1
91 | , bty = "n"
92 | )
93 | ```
94 |
95 |
96 |
97 | # References
98 | Shin, H. J., & Nosofsky, R. M. (1992). Similarity-scaling studies of dot-pattern classification and recognition. *Journal of Experimental Psychology: General*, 121(3), 278–304. doi:[10.1037/0096-3445.121.3.278](http://dx.doi.org/10.1037/0096-3445.121.3.278)
99 |
--------------------------------------------------------------------------------
/minerva-al/minerva-al.R:
--------------------------------------------------------------------------------
1 | # probe Feature vector for probe event (A).
2 | # memory Memory matrix with columns representin features and rows traces in memory (Mij).
3 | # cue_features A vector giving the indeces of features that are associated with cues.
4 |
5 | probe_memory <- function (probe, memory, cue_features) {
6 | if(is.null(memory)) { # Empty memory
7 | echo <- runif(length(probe), -0.001, 0.001) # First trial is noise (p. 65, Jamieson, Crump, & Hannah, 2012)
8 | normalized_echo <- echo / max(abs(echo)) # Eq. 4, Jamieson, Crump, & Hannah (2012)
9 |
10 | return(normalized_echo)
11 | } else {
12 | # Compare only features associated with cues (p. 64, Jamieson, Crump, & Hannah, 2012)
13 | probe <- probe[cue_features]
14 | relevant_memory <- memory[, cue_features, drop = FALSE]
15 |
16 | # Calculate echo
17 | # similarity <- colSums(probe * t(relevant_memory)) / (sqrt(sum(probe^2)) * sqrt(rowSums(relevant_memory^2))) # Eq. 7, Jamieson, Crump, & Hannah (2012)
18 | similarity <- colSums(probe * t(relevant_memory)) / (sqrt(sum(probe^2) * rowSums(relevant_memory^2))) # simplified Eq. 7, Jamieson, Crump, & Hannah (2012)
19 | activation <- similarity^3 # Eq. 2, Jamieson, Crump, & Hannah (2012)
20 | echo <- colSums(activation * memory) # Eq. 3, Jamieson, Crump, & Hannah (2012)
21 | echo <- echo + runif(length(echo), -0.001, 0.001) # Add noise (p. 64, Jamieson, Crump, & Hannah, 2012)
22 | normalized_echo <- echo / max(abs(echo)) # Eq. 4, Jamieson, Crump, & Hannah (2012)
23 |
24 | return(normalized_echo)
25 | }
26 | }
27 |
28 |
29 | # outcome Feature vector for outcome event (X).
30 | # normalized_echo Normalized echo (C'j) produced by probe event (A).
31 |
32 | expect_event <- function (outcome, normalized_echo) {
33 | # expectancy <- sum(outcome * normalized_echo) / sum(outcome != 0) # Eq. 5, Jamieson, Crump, & Hannah (2010)
34 | expectancy <- sum(outcome * normalized_echo) / sum(outcome != 0 & normalized_echo != 0) # Eq. 5, Jamieson, Crump, & Hannah (2012)
35 |
36 | expectancy
37 | }
38 |
39 |
40 | # normalized_echo Normalized echo (C'j) produced by probe event (A).
41 | # event Feature vector for the encountered event (e.g., E = A + X).
42 | # p_encode Probability with which a feature is encoded in memory (L).
43 | # memory Memory matrix with columns representin features and rows traces in memory (Mij).
44 |
45 | learn <- function (normalized_echo, event, p_encode, memory) {
46 | # Probability a feature is encoded in memory
47 | if (p_encode < 1) { # Speeds up simulation
48 | encoding_error <- rbinom(length(event), 1, p_encode)
49 | } else {
50 | encoding_error <- rep(1, length(event))
51 | }
52 |
53 | # Discrepency encoding
54 | memory <- rbind(memory, (event - normalized_echo) * encoding_error) # Eq. 6, Jamieson, Crump, & Hannah, (2012)
55 |
56 | memory
57 | }
58 |
--------------------------------------------------------------------------------
/minerva-al/minerva-al.Rproj:
--------------------------------------------------------------------------------
1 | Version: 1.0
2 |
3 | RestoreWorkspace: Default
4 | SaveWorkspace: Default
5 | AlwaysSaveHistory: Default
6 |
7 | EnableCodeIndexing: Yes
8 | UseSpacesForTab: Yes
9 | NumSpacesForTab: 2
10 | Encoding: UTF-8
11 |
12 | RnwWeave: knitr
13 | LaTeX: pdfLaTeX
14 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title : "Analysis of reference implementation results"
3 | author : "Frederik Aust"
4 | date : "`r format(Sys.time(), '%d %B, %Y')`"
5 |
6 | output:
7 | html_document:
8 | theme : "spacelab"
9 | df_print : "kable"
10 | code_folding : "show"
11 | toc : true
12 | toc_float : true
13 | ---
14 |
15 | ```{r init, include = FALSE}
16 | library("dplyr")
17 | ```
18 |
19 | # Acquisition and extinction
20 |
21 | ```{r}
22 | acquisition_files <- list.files(path = "reference_implementation/results", pattern = "Acquisition", full.names = TRUE)
23 |
24 | acquisition_extinction <- lapply(
25 | acquisition_files
26 | , read.delim
27 | , skip = 3
28 | , nrows = 24
29 | , sep = ""
30 | , header = FALSE
31 | ) %>%
32 | setNames(
33 | nm = stringr::str_extract(basename(acquisition_files), "\\d+") %>%
34 | gsub("0", "0.", .)
35 | ) %>%
36 | bind_rows(.id = "L") %>%
37 | mutate(V2 = as.factor(V2)) %>%
38 | group_by(L) %>%
39 | summarise_if(is.numeric, mean)
40 | ```
41 |
42 | ```{r plot-acquisition, fig.height = 5.5, fig.width = 9.5, echo = FALSE}
43 | plot(
44 | 1:200
45 | , rep(NA, 200)
46 | , ylim = c(0, 1)
47 | , lwd = 2.5
48 | , xlab = "Trial"
49 | , ylab = "Retrieval of X given A"
50 | , las = 1
51 | )
52 |
53 | matlines(
54 | t(as.matrix(acquisition_extinction[, -1]))
55 | , lwd = 2.5
56 | , col = "black"
57 | , lty = 1
58 | )
59 |
60 | matpoints(
61 | t(as.matrix(acquisition_extinction[, -1]))
62 | , pch = c(21, 22, 24)
63 | , bg = "white"
64 | , col = "black"
65 | , cex = 1.25
66 | )
67 | ```
68 |
69 |
70 | # Reacquisition
71 |
72 | ```{r}
73 | reacquisition_files <- list.files(path = "reference_implementation/results", pattern = "Reacquisition\\_\\d+", full.names = TRUE)
74 |
75 | reacquisition <- lapply(
76 | reacquisition_files
77 | , read.delim
78 | , skip = 3
79 | , nrows = 24
80 | , sep = ""
81 | , header = FALSE
82 | ) %>%
83 | setNames(
84 | nm = stringr::str_extract(basename(reacquisition_files), "\\d+") %>%
85 | gsub("0", "0.", .)
86 | ) %>%
87 | bind_rows(.id = "L") %>%
88 | mutate(V2 = as.factor(V2))
89 |
90 | mean_reacquisition <- reacquisition %>%
91 | group_by(L) %>%
92 | summarise_if(is.numeric, mean)
93 | ```
94 |
95 | ```{r}
96 | reacquisition_control_files <- list.files(path = "reference_implementation/results", pattern = "Reacquisition\\_\\D+", full.names = TRUE)
97 |
98 | reacquisition_control <- lapply(
99 | reacquisition_control_files
100 | , read.delim
101 | , skip = 3
102 | , nrows = 24
103 | , sep = ""
104 | , header = FALSE
105 | ) %>%
106 | setNames(
107 | nm = stringr::str_extract(basename(reacquisition_control_files), "\\d+") %>%
108 | gsub("0", "0.", .)
109 | ) %>%
110 | bind_rows(.id = "L") %>%
111 | mutate(V2 = as.factor(V2))
112 |
113 | mean_reacquisition_control <- reacquisition_control %>%
114 | group_by(L) %>%
115 | summarise_if(is.numeric, mean)
116 | ```
117 |
118 | ```{r plot-reacquisition, fig.height = 5.5, fig.width = 7.5, echo = FALSE}
119 | plot(
120 | 1:150
121 | , rep(NA, 150)
122 | , ylim = c(0, 1)
123 | , lwd = 2.5
124 | , xlab = "Trial"
125 | , ylab = "Retrieval of X given A"
126 | , las = 1
127 | )
128 |
129 | abline(h = 0.95, col = "grey")
130 |
131 | matlines(
132 | t(as.matrix(mean_reacquisition[, -1]))
133 | , col = "black"
134 | , lty = 1
135 | )
136 |
137 | matlines(
138 | t(as.matrix(mean_reacquisition_control[, -1]))
139 | , col = "black"
140 | , lty = 2
141 | )
142 | ```
143 |
144 | ```{r}
145 | mean_se <- function(x) paste0(round(mean(x), 2), " (", round(sd(x) / sqrt(length(x)), 2), ")")
146 |
147 | reacquisition_results <- matrix(NA, ncol = 3, nrow = 2)
148 |
149 | trails_to_master <- function(x) data.frame(n_trials = min(which(x >= 0.95)))
150 |
151 | reacquisition_trial_counts <- reacquisition %>%
152 | group_by(L, V2) %>%
153 | do(trails_to_master(.[, 101:ncol(reacquisition)])) %>%
154 | group_by(L) %>%
155 | summarize(n_trials = mean_se(n_trials))
156 |
157 | reacquisition_control_trial_counts <- reacquisition_control %>%
158 | group_by(L, V2) %>%
159 | do(trails_to_master(.[, 101:ncol(reacquisition_control)])) %>%
160 | group_by(L) %>%
161 | summarize(n_trials = mean_se(n_trials))
162 |
163 | knitr::kable(bind_rows(Reacquition = reacquisition_trial_counts, Control = reacquisition_control_trial_counts, .id = "Condition"))
164 | ```
165 |
166 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Acquisition_033.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Acquisition_Extinction
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 100, &
13 | N_subjects = 100, &
14 | N_phases = 2
15 |
16 | real, parameter :: L = 1.0/3.0
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 |
52 | !---------------------------------------------------
53 | ! Get the echo for the probe with 0.001 noise added
54 | !---------------------------------------------------
55 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
56 |
57 | !---------------------------------------------------
58 | ! Increment N_traces, store the response strength,
59 | ! and encode memory for the trial
60 | !---------------------------------------------------
61 | N_traces = N_traces + 1
62 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
63 | do v = 1, N_features
64 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
65 | enddo
66 |
67 | enddo
68 | enddo
69 | enddo
70 |
71 | !---------------------------------------------------
72 | ! Write the results of the simulation to a file
73 | ! as a matrix of N_subjects rows by N_trials columns
74 | !---------------------------------------------------
75 | Open(1, file='results/Acquisition_extinction_033.txt')
76 | write(1,*)
77 | write(1,'(A, F5.2)') 'Learning rate = ', L
78 | write(1,*)
79 | do i = 1, N_subjects
80 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
81 | enddo
82 | write(1,*)
83 | write(1,'(A6)',advance='no') 'M'
84 | do i = 1, N_traces
85 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
86 | enddo
87 | write(1,*)
88 | write(1,'(A6)',advance='no') 'SEM'
89 | do i = 1, N_traces
90 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
91 | enddo
92 | write(1,*)
93 | Close(1)
94 |
95 | END PROGRAM Acquisition_extinction
96 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Acquisition_067.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Acquisition_Extinction
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 100, &
13 | N_subjects = 100, &
14 | N_phases = 2
15 |
16 | real, parameter :: L = 2.0/3.0
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 |
52 | !---------------------------------------------------
53 | ! Get the echo for the probe with 0.001 noise added
54 | !---------------------------------------------------
55 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
56 |
57 | !---------------------------------------------------
58 | ! Increment N_traces, store the response strength,
59 | ! and encode memory for the trial
60 | !---------------------------------------------------
61 | N_traces = N_traces + 1
62 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
63 | do v = 1, N_features
64 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
65 | enddo
66 |
67 | enddo
68 | enddo
69 | enddo
70 |
71 | !---------------------------------------------------
72 | ! Write the results of the simulation to a file
73 | ! as a matrix of N_subjects rows by N_trials columns
74 | !---------------------------------------------------
75 | Open(1, file='results/Acquisition_extinction_067.txt')
76 | write(1,*)
77 | write(1,'(A, F5.2)') 'Learning rate = ', L
78 | write(1,*)
79 | do i = 1, N_subjects
80 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
81 | enddo
82 | write(1,*)
83 | write(1,'(A6)',advance='no') 'M'
84 | do i = 1, N_traces
85 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
86 | enddo
87 | write(1,*)
88 | write(1,'(A6)',advance='no') 'SEM'
89 | do i = 1, N_traces
90 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
91 | enddo
92 | write(1,*)
93 | Close(1)
94 |
95 | END PROGRAM Acquisition_extinction
96 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Acquisition_1.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Acquisition_Extinction
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 100, &
13 | N_subjects = 100, &
14 | N_phases = 2
15 |
16 | real, parameter :: L = 1
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 |
52 | !---------------------------------------------------
53 | ! Get the echo for the probe with 0.001 noise added
54 | !---------------------------------------------------
55 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
56 |
57 | !---------------------------------------------------
58 | ! Increment N_traces, store the response strength,
59 | ! and encode memory for the trial
60 | !---------------------------------------------------
61 | N_traces = N_traces + 1
62 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
63 | do v = 1, N_features
64 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
65 | enddo
66 |
67 | enddo
68 | enddo
69 | enddo
70 |
71 | !---------------------------------------------------
72 | ! Write the results of the simulation to a file
73 | ! as a matrix of N_subjects rows by N_trials columns
74 | !---------------------------------------------------
75 | Open(1, file='results/Acquisition_extinction_1.txt')
76 | write(1,*)
77 | write(1,'(A, F5.2)') 'Learning rate = ', L
78 | write(1,*)
79 | do i = 1, N_subjects
80 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
81 | enddo
82 | write(1,*)
83 | write(1,'(A6)',advance='no') 'M'
84 | do i = 1, N_traces
85 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
86 | enddo
87 | write(1,*)
88 | write(1,'(A6)',advance='no') 'SEM'
89 | do i = 1, N_traces
90 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
91 | enddo
92 | write(1,*)
93 | Close(1)
94 |
95 | END PROGRAM Acquisition_extinction
96 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/MinervaAL_tools.f90:
--------------------------------------------------------------------------------
1 | !********************************************
2 | ! Tools for the MinervaAL project:
3 | !
4 | ! subroutine Get_echo (C, P, M, n, r)
5 | ! subroutine Get_stimuli (oMat, n)
6 | ! subroutine Randomize_order (oVec, n)
7 | ! function Similarity (v1, v2, n)
8 | ! function Cosine (v1, v2, n)
9 | ! function Mean (iVec, n)
10 | ! function SEM (iVec, n)
11 | !
12 | !********************************************
13 | module MinervaAL_tools
14 | use Number_generators
15 | implicit none
16 |
17 |
18 | CONTAINS
19 |
20 | !--------------------------
21 | subroutine Get_echo (C, P, M, n, r)
22 | implicit none
23 | integer :: j, n
24 | real :: C(:), P(:), M(:,:), r
25 |
26 | C = 0.0
27 |
28 | do j = 1, 200
29 | C(j) = flat(r) * binomial(0.5)
30 | enddo
31 |
32 | do j = 1, n
33 | C(:) = C(:) + Cosine(P(1:180), M(1:180,j), 180)**3 * M(:,j)
34 | enddo
35 |
36 | C = C/maxval(abs(C))
37 |
38 | return
39 | end subroutine Get_echo
40 |
41 | !--------------------------
42 | subroutine Get_stimuli (oMat, n)
43 | implicit none
44 | integer :: n, j, v,k
45 | real :: oMat(:,:)
46 |
47 | oMat = 0.0
48 |
49 | v = 0
50 | do j = 1, n
51 | do k = 1+v, 20+v
52 | oMat(k,j) = 1.0
53 | enddo
54 | v = v + 20
55 | enddo
56 |
57 | return
58 | end subroutine Get_stimuli
59 |
60 | !-----------------------------
61 | subroutine Randomize_order (oVec, n)
62 | implicit none
63 | integer :: i, j, oVec(:), n
64 | logical :: b
65 | oVec = 0
66 | do i = 1, n
67 | do
68 | oVec(i) = FlatInt(n)
69 | b = .TRUE.
70 | do j = 1, n
71 | if (oVec(i) == oVec(j) .and. i /= j) b = .FALSE.
72 | enddo
73 | if (b) exit
74 | enddo
75 | enddo
76 | return
77 | end subroutine Randomize_order
78 |
79 | !------------------------------
80 | function Cosine (v1, v2, n)
81 | implicit none
82 | integer :: n
83 | real :: v1(n), v2(n), x(n), y(n), Cosine
84 |
85 | Cosine = 0.0
86 |
87 | if (sum(abs(v1)) /= 0.0 .and. sum(abs(v2)) /= 0.0) then
88 |
89 | x = v1/sqrt(dot_product(v1,v1))
90 | y = v2/sqrt(dot_product(v2,v2))
91 |
92 | Cosine = dot_product(x,y)
93 |
94 | endif
95 |
96 | return
97 | end function Cosine
98 |
99 | !------------------------------
100 | function Mean (iVec, n)
101 | implicit none
102 | integer :: n
103 | real :: iVec(n), Mean
104 |
105 | Mean = Sum(iVec)/n
106 |
107 | return
108 | end function Mean
109 |
110 | !------------------------------
111 | function SEM (iVec, n)
112 | implicit none
113 | integer :: i, n
114 | real :: iVec(n), SEM, M, Summ
115 |
116 | M = Mean(iVec, n)
117 |
118 | Summ = 0.0
119 | do i = 1, n
120 | SUMM = SUMM + (iVec(i) - M)**2
121 | enddo
122 |
123 | SEM = sqrt(Summ/(n-1)) / sqrt(real(n))
124 |
125 | return
126 | end function SEM
127 |
128 | END MODULE MinervaAL_tools
129 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Number_generators.f90:
--------------------------------------------------------------------------------
1 | module Number_generators
2 |
3 | !*********************************************************************
4 | !User accessible routines:
5 | !Subroutines:
6 | ! RandSeed sets the random-number seed from the wall clock
7 | ! FixSeed sets the random-number seed to -99
8 | ! both routines write the seed to standard output
9 | !
10 | !Functions:
11 | ! flat(range) returns a real from a flat distribution (0.0, .. range]
12 | ! range (real) sets the max value
13 | !
14 | ! FlatInt(range) returns an integer from a flat distribution(0, 1, .. range]
15 | ! range (integer) sets the upper value
16 | !
17 | ! gaussian(mu, sd) returns a real from a Normal(mu, sd)
18 | ! mu and sd (real) set the mean & SD of the Normal
19 | !
20 | ! goemetric(p) returns an integer (1..maxint] from a geometric distribution
21 | ! p (real) is the probability of success on each trial
22 | !
23 | ! binomial(p) returns +1 / 0 with p = p(+1)
24 | !
25 | ! exponential(lambda) returns a real from an exponential distribution
26 | ! lambda is the mean of the distribution
27 | !
28 | !***********************************************************************
29 | ! Internal routines:
30 | ! the random-number seed, idum, is private to the module
31 | ! ran3 (idum): returns a random number from a uniform distribution (0..1]
32 | ! rnorm(idum): returns a random number from a Unit Normal distribution
33 | ! expdev(idum): returns a random number from an exponential with mean = 1
34 | !***********************************************************************
35 |
36 | implicit none
37 | integer, private :: idum, inext, inextp, inext1, inextp1
38 | real, private, dimension(55) :: ma, ma2
39 | logical :: switch
40 |
41 | CONTAINS
42 |
43 |
44 | !***********************************************************************
45 | ! ran3: generates random values 0..1]
46 | ! seed (idum) is set to a negative integer on entry
47 | ! From Press et al.,(1992) Numerical recipes in FORTRAN (2nd ed.) CUP
48 | !
49 | ! This routine runs through the ma vector pre-established when the
50 | ! generator is initialized
51 | !***********************************************************************
52 | function ran3(idum)
53 | implicit none
54 | real :: ran3, mj, mk
55 | real, parameter :: MBIG =4000000.0, MSEED =1618033.0, MZ =0.0, FAC = 1.0/MBIG
56 | integer :: idum
57 |
58 | inext = inext + 1
59 | if(inext == 56) inext = 1
60 | inextp = inextp + 1
61 | if(inextp == 56)inextp= 1
62 |
63 | mj = ma(inext) - ma(inextp)
64 |
65 | if(mj < MZ) mj = mj + MBIG
66 | ma(inext) = mj
67 | ran3 = mj*FAC
68 | return
69 | end function ran3
70 |
71 |
72 |
73 |
74 | !****************************************************************
75 | ! fx_geometric: returns p(1st success) at trial first_success
76 | ! parameter: p_success = probability of a success on all trials
77 | !****************************************************************
78 | function fx_geometric(p_success, first_success)
79 | real, intent (in) :: p_success
80 | real :: fx_geometric
81 | integer, intent(in) :: first_success
82 |
83 | if (first_success < 1) then ! Can't have a negative number--return
84 | fx_geometric = 0.0 !impossible result, i.e., Prob = zero
85 | else
86 | fx_geometric = ((1.0 - p_success)**(first_success-1)) * p_success
87 | endif
88 | return
89 | end function fx_geometric
90 |
91 |
92 | !*****************************************************************
93 | ! expdev: returns a real from an exponential distribution lambda=1
94 | !*****************************************************************
95 | function expdev(idum)
96 | real :: expdev, dum
97 | integer, intent(inout) :: idum
98 |
99 | 10 dum = ran3(idum)
100 | if(dum == 0.0) goto 10
101 | expdev = -log(dum)
102 | return
103 | end function expdev
104 |
105 |
106 |
107 | !******************************************************************
108 | ! function exponential(lambda) returns a deviate from an
109 | ! exponential distribution with mean = lambda
110 | !******************************************************************
111 | function exponential (lambda)
112 | real, intent(in) :: lambda
113 | real :: exponential
114 |
115 | exponential = expdev(idum)*lambda
116 | return
117 | end function exponential
118 |
119 |
120 |
121 |
122 | !******************************************************************
123 | ! geometric: returns an integer 1 ... inf. The values are
124 | ! distributed according to a geometric distribution
125 | ! parameter: p_success = probability of a success on each trial
126 | !******************************************************************
127 | function geometric(p_success)
128 | integer :: j, geometric
129 | real, intent(in) :: p_success
130 | real :: prob, xs, rn
131 |
132 | j = 0
133 | xs = 0.0 ! start cummulative at zero
134 | rn = ran3(idum) ! get random probability value
135 |
136 | 10 j = j + 1 ! Search loop: searching the cummulative
137 | xs = xs + fx_geometric(p_success, j) ! probability of a success on trial j
138 | if (xs .le. rn) goto 10 ! Search until the cumulative exceeds
139 | ! rn. Return the number of failures
140 | geometric = j ! before the 1st success, i.e.,
141 | end function geometric ! where rn falls in the cumulative
142 |
143 |
144 |
145 |
146 | !*********************************************************************
147 | ! rnorm: Unit Normal distribution
148 | !*********************************************************************
149 | function rnorm (idum)
150 | logical, save :: switch
151 | data switch /.true./
152 | real :: fac, r, v1, v2, rnorm
153 | integer, intent(inout) :: idum
154 | real, save :: rnorm2
155 |
156 | if (switch) then
157 | 10 v1 = 2.0 * ran3(idum) - 1.0
158 | v2 = 2.0 * ran3(idum) - 1.0
159 | r = v1**2 + v2**2
160 | if ((r .ge. 1.0).or.(r .eq.0)) goto 10
161 |
162 | fac = sqrt(-2.0 * log(r)/r)
163 | rnorm2 = v1 * fac
164 | switch = .false.
165 | rnorm = v2 * fac
166 | else
167 | switch = .true.
168 | rnorm = rnorm2
169 | endif
170 | return
171 | end function rnorm
172 |
173 |
174 |
175 | !***********************************************************************
176 | ! gaussian: returns a Normal deviate from N(mu, sd)
177 | !***********************************************************************
178 | function gaussian(mu, sd)
179 | real, intent(in) :: mu, sd
180 | real :: gaussian
181 |
182 | gaussian = (rnorm(idum) * sd) + mu ! Calculate Normal(mu, sigma)
183 | return
184 | end function gaussian
185 |
186 |
187 |
188 | !***********************************************************************
189 | ! binomial: returns a +1 / -1 from a binomial p = (probability of +1)
190 | !***********************************************************************
191 | function binomial(p)
192 | real, intent(in) :: p
193 | real :: binomial
194 | if (ran3(idum) .lt. p) then
195 | binomial = +1.0
196 | else
197 | binomial = -1.0
198 | endif
199 | end function binomial
200 |
201 |
202 |
203 | !**********************************************************************
204 | ! flat : returns a real from a flat probability distribution
205 | ! parameter: range
206 | !**********************************************************************
207 | function flat(range)
208 | real :: flat
209 | real, intent(in) :: range
210 |
211 | flat = ran3(idum)*range
212 | return
213 | end function flat
214 |
215 |
216 | !**********************************************************************
217 | ! FlatInt : returns an integer from a flat probability distribution
218 | ! parameter: range
219 | !**********************************************************************
220 | function FlatInt(range)
221 | integer :: FlatInt
222 | integer, intent(in) :: range
223 |
224 | FlatInt = int(ran3(idum) * range) + 1
225 | return
226 | end function FlatInt
227 |
228 |
229 | !**********************************************************************
230 | ! FixSeed: Sets a fixed seed (-99) for the random-number routine
231 | ! and sets up the ma vector for the random number generator
232 | !**********************************************************************
233 | subroutine FixSeed
234 | real :: mj, mk
235 | real, parameter :: MBIG =4000000.0, MSEED =1618033.0, MZ =0.0
236 | integer :: i, ii, k
237 | idum = -99
238 | write(*,'(A, I0)') ' Seed for random-number generator = ', idum
239 |
240 | mj= MSEED - iabs(idum)
241 | mj = mod(mj, MBIG)
242 | ma(55) = mj
243 | mk=1
244 | do i=1,54
245 | ii = mod(21*i, 55)
246 | ma(ii) = mk
247 | mk = mj - mk
248 | if(mk < MZ) mk = mk + MBIG
249 | mj = ma(ii)
250 | enddo
251 | do k = 1, 4
252 | do i = 1, 55
253 | ma(i) = ma(i) - ma(1 + mod(i+30, 55) )
254 | if(ma(i) .lt. MZ) ma(i) = ma(i) + MBIG
255 | enddo
256 | enddo
257 | inext = 0
258 | inextp = 31
259 | return
260 | end subroutine FixSeed
261 |
262 |
263 |
264 | !**********************************************************************
265 | ! RandSeed: Sets a random seed for the random-number routines
266 | ! and sets up the ma vector for the random number generator
267 | !**********************************************************************
268 | subroutine RandSeed
269 | implicit none
270 | real :: mj, mk
271 | real, parameter :: MBIG =4000000.0, MSEED =1618033.0, MZ =0.0
272 | integer :: i, ii, k
273 | call system_clock(idum)
274 |
275 | 10 if (idum > 1000000) idum = idum / 10
276 | if (idum > 1000000) go to 10
277 |
278 | if (idum > 0) idum = idum *(-1)
279 |
280 | write(*,'(A, I0)') ' Seed for random-number generator = ', idum
281 |
282 | mj= MSEED - iabs(idum)
283 | mj = mod(mj, MBIG)
284 | ma(55) = mj
285 | mk=1
286 | do i=1,54
287 | ii = mod(21*i, 55)
288 | ma(ii) = mk
289 | mk = mj - mk
290 | if(mk < MZ) mk = mk + MBIG
291 | mj = ma(ii)
292 | enddo
293 | do k = 1, 4
294 | do i = 1, 55
295 | ma(i) = ma(i) - ma(1 + mod(i+30, 55) )
296 | if(ma(i) .lt. MZ) ma(i) = ma(i) + MBIG
297 | enddo
298 | enddo
299 | inext = 0
300 | inextp = 31
301 |
302 | return
303 | end subroutine RandSeed
304 |
305 |
306 | !**********************************************************************
307 | ! get_ran_seed: gets current state of the random-number generator
308 | !**********************************************************************
309 | subroutine get_ran_seed(dummy, MAA)
310 | integer, intent(out), dimension(2) :: dummy
311 | real, dimension(55) :: MAA
312 | dummy(1) = inext
313 | dummy(2) = inextp
314 | MAA = ma
315 | return
316 | end subroutine get_ran_seed
317 |
318 |
319 | !**********************************************************************
320 | ! assign_seed: restores the state of the random-number generator
321 | !**********************************************************************
322 | subroutine assign_seed(iseed, MAA)
323 | integer, dimension(2), intent(in) :: iseed
324 | integer :: get_ran_seed
325 | real, dimension(55) :: MAA
326 | inext = iseed(1)
327 | inextp = iseed(2)
328 | ma = MAA
329 | switch = .true.
330 | return
331 | end subroutine assign_seed
332 |
333 |
334 |
335 | END MODULE Number_Generators
336 |
337 |
338 |
339 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_033.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 0.33
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_033.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_067.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 0.67
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_067.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_1.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 1
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_1.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_control_033.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 0.33
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_control_033.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_control_067.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 0.67
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_control_067.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/Reacquisition_control_1.f90:
--------------------------------------------------------------------------------
1 | !------------------------------------
2 | ! Acquisition and Extinction
3 | !------------------------------------
4 | program Reacquisition
5 | use Number_generators
6 | use MinervaAL_tools
7 | implicit none
8 |
9 | integer, parameter :: N_cues = 10, &
10 | N_field = 20, &
11 | N_features = N_cues*N_field, &
12 | N_trials = 50, &
13 | N_subjects = 100, &
14 | N_phases = 5
15 |
16 | real, parameter :: L = 1
17 |
18 | real :: Echo(N_features), &
19 | Probe(N_features), &
20 | Cue_matrix(N_features, N_cues), &
21 | Memory(N_features, N_trials*N_phases), &
22 | Summary(N_trials*N_phases, N_subjects)
23 |
24 | integer :: i, &
25 | j, &
26 | k, &
27 | v, &
28 | N_traces
29 |
30 |
31 | call RandSeed
32 | call Get_stimuli(Cue_matrix, N_cues)
33 |
34 | Summary = 0.0
35 |
36 | do i = 1, N_subjects
37 | N_traces = 0
38 | Memory = 0.0
39 | do j = 1, N_phases
40 | do k = 1, N_trials
41 |
42 | !---------------------------------------------------
43 | ! Construct the probe to be relevant for the
44 | ! current learning phase: 1 = A, 9 = Context,
45 | ! and 10 = Outcome (X). For example, j == 1 is an
46 | ! A+ trial whereas j == 2 is an A- trial
47 | !---------------------------------------------------
48 | Probe = 0.0
49 | if (j == 1) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Acquisition
50 | if (j == 2) Probe(:) = Cue_matrix(:, 1) + Cue_matrix(:, 9) ! Extinction
51 | if (j == 3) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
52 | if (j == 4) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
53 | if (j == 5) Probe(:) = Cue_matrix(:, 2) + Cue_matrix(:, 9) + Cue_matrix(:, 10) ! Reacquisition
54 |
55 | !---------------------------------------------------
56 | ! Get the echo for the probe with 0.001 noise added
57 | !---------------------------------------------------
58 | call Get_echo(Echo, Probe, Memory, N_traces, 0.001)
59 |
60 | !---------------------------------------------------
61 | ! Increment N_traces, store the response strength,
62 | ! and encode memory for the trial
63 | !---------------------------------------------------
64 | N_traces = N_traces + 1
65 | Summary(N_traces, i) = Sum(Echo(181:200))/N_field ! A shorthand of formula 5 in Jamieson et al. (2012)
66 | do v = 1, N_features
67 | if (flat(1.0) < L) Memory(v, N_traces) = Probe(v) - Echo(v)
68 | enddo
69 |
70 | enddo
71 | enddo
72 | enddo
73 |
74 | !---------------------------------------------------
75 | ! Write the results of the simulation to a file
76 | ! as a matrix of N_subjects rows by N_trials columns
77 | !---------------------------------------------------
78 | Open(1, file='results/Reacquisition_control_1.txt')
79 | write(1,*)
80 | write(1,'(A, F5.2)') 'Learning rate = ', L
81 | write(1,*)
82 | do i = 1, N_subjects
83 | write(1, '(A2, I4, 1000F8.4)') 'Ss ', i, Summary(:, i)
84 | enddo
85 | write(1,*)
86 | write(1,'(A6)',advance='no') 'M'
87 | do i = 1, N_traces
88 | write(1, '(1000F8.4)',advance='no') Mean(Summary(i,:), N_subjects)
89 | enddo
90 | write(1,*)
91 | write(1,'(A6)',advance='no') 'SEM'
92 | do i = 1, N_traces
93 | write(1, '(1000F8.4)',advance='no') SEM(Summary(i,:), N_subjects)
94 | enddo
95 | write(1,*)
96 | Close(1)
97 |
98 | END PROGRAM Reacquisition
99 |
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/acquisition_033:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/acquisition_033
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/acquisition_067:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/acquisition_067
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/acquisition_1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/acquisition_1
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/make.sh:
--------------------------------------------------------------------------------
1 | gfortran Number_generators.f90 MinervaAL_tools.f90 Acquisition_1.f90 -o acquisition_1
2 | gfortran Number_generators.f90 MinervaAL_tools.f90 Acquisition_067.f90 -o acquisition_067
3 | gfortran Number_generators.f90 MinervaAL_tools.f90 Acquisition_033.f90 -o acquisition_033
4 |
5 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_1.f90 -o reacquisition_1
6 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_067.f90 -o reacquisition_067
7 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_033.f90 -o reacquisition_033
8 |
9 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_control_1.f90 -o reacquisition_control_1
10 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_control_067.f90 -o reacquisition_control_067
11 | gfortran Number_generators.f90 MinervaAL_tools.f90 Reacquisition_control_033.f90 -o reacquisition_control_033
12 |
13 | ./acquisition_1
14 | ./acquisition_067
15 | ./acquisition_033
16 |
17 | ./reacquisition_1
18 | ./reacquisition_067
19 | ./reacquisition_033
20 |
21 | ./reacquisition_control_1
22 | ./reacquisition_control_067
23 | ./reacquisition_control_033
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/minervaal_tools.mod:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/minervaal_tools.mod
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/number_generators.mod:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/number_generators.mod
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_033:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_033
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_067:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_067
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_1
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_control_033:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_control_033
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_control_067:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_control_067
--------------------------------------------------------------------------------
/minerva-al/reference_implementation/reacquisition_control_1:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva-al/reference_implementation/reacquisition_control_1
--------------------------------------------------------------------------------
/minerva-al/reproduce_jamieson_etal_2012.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of Simulation 1 by Jamieson, Crump & Hannah (2012)"
3 | author: "Frederik Aust"
4 | date: "13.1.2015"
5 | output:
6 | html_document:
7 | theme: spacelab
8 | code_folding: show
9 | toc: yes
10 | toc_float: yes
11 | ---
12 |
13 | To validate this implementation of Minerva-AL, I reproduced a simulation study reported in Jamieson, Crump & Hannah (2012).
14 |
15 | ```{r echo = FALSE}
16 | library("dplyr")
17 | library("tidyr")
18 |
19 | # Run fortran simulation
20 | if(.Platform$OS.type == "unix") {
21 | system("cd reference_implementation; sh make.sh")
22 | }
23 |
24 | source("minerva-al.R")
25 | ```
26 |
27 |
28 |
29 | # Simulation of acquisition and extinction (Section 1)
30 |
31 | I created cue, outcome and context vectors and defined the number of trials and replications according to the specifications in the paper.
32 |
33 | ```{r setup-events}
34 | n_features <- 120
35 | cue_features <- 1:100
36 | a <- context <- outcome <- rep(0, n_features)
37 |
38 | a[1:20] <- 1
39 | outcome[101:120] <- 1
40 | context[81:100] <- 1
41 |
42 | acquisition_event <- a + context + outcome
43 | extinction_event <- probe <- a + context
44 | ```
45 |
46 | ```{r setup-simulation}
47 | n_replications <- 100
48 | n_trials <- 200
49 |
50 | p_encode <- c(1/3, 2/3, 1)
51 | ```
52 |
53 | ```{r simulate-acquisition}
54 | sim_results <- matrix(0, ncol = n_trials, nrow = length(p_encode))
55 |
56 | for (r in 1:n_replications) {
57 | for (i in 1:3) {
58 | # Memory is empty on first trial
59 | normalized_echo <- probe_memory(probe, NULL, cue_features)
60 | expectancy <- expect_event(outcome, normalized_echo)
61 | memory <- learn(
62 | normalized_echo
63 | , acquisition_event
64 | , p_encode[i]
65 | , NULL
66 | )
67 |
68 | # Acquisition trials
69 | for(j in 2:(n_trials / 2)) {
70 | normalized_echo <- probe_memory(probe, memory, cue_features)
71 | expectancy <- c(expectancy, expect_event(outcome, normalized_echo))
72 | memory <- learn(
73 | normalized_echo
74 | , acquisition_event
75 | , p_encode[i]
76 | , memory
77 | )
78 | }
79 |
80 | # Extinction trials
81 | for(j in ((n_trials / 2) + 1):n_trials) {
82 | normalized_echo <- probe_memory(probe, memory, cue_features)
83 | expectancy <- c(expectancy, expect_event(outcome, normalized_echo))
84 | memory <- learn(
85 | normalized_echo
86 | , extinction_event
87 | , p_encode[i]
88 | , memory
89 | )
90 | }
91 |
92 | sim_results[i, ] <- sim_results[i, ] + expectancy
93 | }
94 | }
95 |
96 | sim_results <- sim_results / n_replications # Mean of replications
97 | ```
98 |
99 |
100 |
101 | ## Results
102 |
103 | The resulting expectancies correspond nicely to those reported by Jamieson, Hannah & Crump (2012) in Figure 1.
104 |
105 | ```{r plot-acquisition, fig.height = 5.5, fig.width = 9.5, echo = FALSE}
106 | plot(
107 | 1:200
108 | , rep(NA, 200)
109 | , ylim = c(0, 1)
110 | , lwd = 2.5
111 | , xlab = "Trial"
112 | , ylab = "Retrieval of X given A"
113 | , las = 1
114 | )
115 |
116 | matlines(
117 | t(sim_results)
118 | , lwd = 2.5
119 | , col = "black"
120 | , lty = 1
121 | )
122 |
123 | matpoints(
124 | t(sim_results)
125 | , pch = c(24, 22, 21)
126 | , bg = "white"
127 | , col = "black"
128 | , cex = 1.25
129 | )
130 | ```
131 |
132 | As a comparison, the following plot shows the results from the reference implementation I received from Randall Jamieson (thanks!).
133 |
134 | ```{r}
135 | acquisition_files <- list.files(path = "reference_implementation/results", pattern = "Acquisition", full.names = TRUE)
136 |
137 | acquisition_extinction <- lapply(
138 | acquisition_files
139 | , read.delim
140 | , skip = 3
141 | , nrows = 24
142 | , sep = ""
143 | , header = FALSE
144 | ) %>%
145 | setNames(
146 | nm = stringr::str_extract(basename(acquisition_files), "\\d+") %>%
147 | gsub("0", "0.", .)
148 | ) %>%
149 | bind_rows(.id = "L") %>%
150 | mutate(V2 = as.factor(V2)) %>%
151 | group_by(L) %>%
152 | summarise_if(is.numeric, mean)
153 | ```
154 |
155 | ```{r plot-acquisition2, fig.height = 5.5, fig.width = 9.5, echo = FALSE}
156 | plot(
157 | 1:200
158 | , rep(NA, 200)
159 | , ylim = c(0, 1)
160 | , lwd = 2.5
161 | , xlab = "Trial"
162 | , ylab = "Retrieval of X given A"
163 | , las = 1
164 | )
165 |
166 | matlines(
167 | t(as.matrix(acquisition_extinction[, -1]))
168 | , lwd = 2.5
169 | , col = "black"
170 | , lty = 1
171 | )
172 |
173 | matpoints(
174 | t(as.matrix(acquisition_extinction[, -1]))
175 | , pch = c(24, 22, 21)
176 | , bg = "white"
177 | , col = "black"
178 | , cex = 1.25
179 | )
180 | ```
181 |
182 | ```{r}
183 | prediction_differences <- sim_results - as.matrix(acquisition_extinction[, -1])
184 |
185 | plot(
186 | 1:200
187 | , rep(NA, 200)
188 | , ylim = c(-0.2, 0.2)
189 | , lwd = 2.5
190 | , xlab = "Trial"
191 | , ylab = "Absolut difference"
192 | , las = 1
193 | )
194 |
195 | matlines(
196 | t(prediction_differences)
197 | , col = "black"
198 | , lty = c(1, 3, 5)
199 | )
200 |
201 | legend("topright", inset = 0.05, legend = unlist(acquisition_extinction[, 1]), lty = c(1, 3, 5))
202 |
203 | summary(as.vector(prediction_differences))
204 | ```
205 |
206 | Additionally, the following plots visualize the information encoded for cue and outcome features across acquisition and extinction trials. The data are taken from one of the `r n_replications` simulations with encoding probability $L = 1$.
207 |
208 | ```{r plot-encoding, echo = FALSE}
209 | plot(
210 | 1:200
211 | , memory[, 1]
212 | , type = "l"
213 | , col = scales::alpha("black", 0.3)
214 | , ylim = c(-2, 2)
215 | , xlab = "Trial"
216 | , ylab = "Feature encoding"
217 | , main = "Features of cue A"
218 | , las = 1
219 | )
220 | for(i in 2:20) {
221 | lines(
222 | 1:200
223 | , memory[, i]
224 | , col = scales::alpha("black", 0.3)
225 | )
226 | }
227 |
228 |
229 | plot(
230 | 1:200
231 | , memory[, 101]
232 | , type = "l"
233 | , col = scales::alpha("black", 0.3)
234 | , ylim = c(-2, 2)
235 | , xlab = "Trial"
236 | , ylab = "Feature encoding"
237 | , main = "Features of outcome X"
238 | , las = 1
239 | )
240 | for(i in 102:120) {
241 | lines(
242 | 1:200
243 | , memory[, i]
244 | , col = scales::alpha("black", 0.3)
245 | )
246 | }
247 | ```
248 |
249 |
250 | # Simulation of reacquisition (Section 1)
251 |
252 | ```{r}
253 | b <- rep(0, n_features)
254 |
255 | b[21:40] <- 1
256 | control_event <- b + context + outcome
257 | control_probe <- b + context
258 | ```
259 |
260 | ```{r}
261 | n_replications <- 100
262 | n_trials <- 200
263 | ```
264 |
265 |
266 | ```{r simulate-reacquisition}
267 | reacquisition_sim_results <- control_sim_results <- matrix(NA, ncol = n_trials, nrow = length(p_encode) * n_replications)
268 |
269 | for (i in 1:length(p_encode)) {
270 | for (r in 1:n_replications) {
271 | # Memory is empty on first trial
272 | normalized_echo <- probe_memory(probe, NULL, cue_features)
273 | expectancy <- expect_event(outcome, normalized_echo)
274 | memory <- learn(
275 | normalized_echo
276 | , acquisition_event
277 | , p_encode[i]
278 | , NULL
279 | )
280 |
281 | # Acquisition trials
282 | for(j in 2:50) {
283 | normalized_echo <- probe_memory(probe, memory, cue_features)
284 | expectancy <- c(expectancy, expect_event(outcome, normalized_echo))
285 | memory <- learn(
286 | normalized_echo
287 | , acquisition_event
288 | , p_encode[i]
289 | , memory
290 | )
291 | }
292 |
293 | # Extinction trials
294 | for(j in 51:100) {
295 | normalized_echo <- probe_memory(probe, memory, cue_features)
296 | expectancy <- c(expectancy, expect_event(outcome, normalized_echo))
297 | memory <- learn(
298 | normalized_echo
299 | , extinction_event
300 | , p_encode[i]
301 | , memory
302 | )
303 | }
304 |
305 | # Reacquisition trials
306 | reacquisition_memory <- memory
307 | reacquisition_expectancy <- expectancy
308 |
309 | for(j in 101:200) {
310 | normalized_echo <- probe_memory(probe, reacquisition_memory, cue_features)
311 | reacquisition_expectancy <- c(reacquisition_expectancy, expect_event(outcome, normalized_echo))
312 | reacquisition_memory <- learn(
313 | normalized_echo
314 | , acquisition_event
315 | , p_encode[i]
316 | , reacquisition_memory
317 | )
318 | }
319 |
320 | # Control trials
321 | control_memory <- memory
322 | control_expectancy <- expectancy
323 |
324 | for(j in 101:200) {
325 | normalized_echo <- probe_memory(control_probe, control_memory, cue_features)
326 | control_expectancy <- c(control_expectancy, expect_event(outcome, normalized_echo))
327 | control_memory <- learn(
328 | normalized_echo
329 | , control_event
330 | , p_encode[i]
331 | , control_memory
332 | )
333 | }
334 |
335 | reacquisition_sim_results[(i-1)*n_replications + r, ] <- reacquisition_expectancy
336 | control_sim_results[(i-1)*n_replications + r, ] <- control_expectancy
337 | }
338 | }
339 | ```
340 |
341 | ## Results
342 |
343 | ```{r fig.height = 5.5, fig.width = 9.5, echo = FALSE}
344 | plot(
345 | 51:200
346 | , rep(0, 150)
347 | , pch = NA
348 | , ylim = c(0, 1)
349 | , lwd = 2.5
350 | , xlab = "Trial"
351 | , ylab = "Retrieval of X given cue"
352 | , las = 1
353 | ,
354 | )
355 |
356 | abline(h = 0.95, col = "grey")
357 |
358 | for(i in 1:length(p_encode)) {
359 | lines(51:200, colMeans(reacquisition_sim_results[(1:n_replications) + (i-1)*n_replications, 51:200]), lwd = 2)
360 | lines(51:200, colMeans(control_sim_results[(1:n_replications) + (i-1)*n_replications, 51:200]), lty = "dashed")
361 | }
362 |
363 |
364 | mean_se <- function(x) paste0(round(mean(x), 2), " (", round(sd(x) / sqrt(length(x)), 2), ")")
365 |
366 | reacquisition_results <- matrix(NA, ncol = 3, nrow = 2)
367 |
368 | for(i in 1:length(p_encode)) {
369 | reacquisition_results[, i] <- c(
370 | mean_se(apply(reacquisition_sim_results[(1:n_replications) + (i-1)*n_replications, 101:200], 1, function(x) min(which(x >= 0.95))))
371 | , mean_se(apply(control_sim_results[(1:n_replications) + (i-1)*n_replications, 101:200], 1, function(x) min(which(x >= 0.95))))
372 | )
373 | }
374 |
375 | knitr::kable(cbind(Condition = c("Reacquisition", "Control"), reacquisition_results), col.names = c("Condtion", round(p_encode, 2)))
376 | ```
377 |
378 | As a comparison, the following plot shows the results from the reference implementation I received from Randall Jamieson (thanks!).
379 |
380 | ```{r}
381 | reacquisition_files <- list.files(path = "reference_implementation/results", pattern = "Reacquisition\\_\\d+", full.names = TRUE)
382 |
383 | reacquisition <- lapply(
384 | reacquisition_files
385 | , read.delim
386 | , skip = 3
387 | , nrows = 24
388 | , sep = ""
389 | , header = FALSE
390 | ) %>%
391 | setNames(
392 | nm = stringr::str_extract(basename(reacquisition_files), "\\d+") %>%
393 | gsub("0", "0.", .)
394 | ) %>%
395 | bind_rows(.id = "L") %>%
396 | mutate(V2 = as.factor(V2))
397 |
398 | mean_reacquisition <- reacquisition %>%
399 | group_by(L) %>%
400 | summarise_if(is.numeric, mean)
401 | ```
402 |
403 | ```{r}
404 | reacquisition_control_files <- list.files(path = "reference_implementation/results", pattern = "Reacquisition\\_\\D+", full.names = TRUE)
405 |
406 | reacquisition_control <- lapply(
407 | reacquisition_control_files
408 | , read.delim
409 | , skip = 3
410 | , nrows = 24
411 | , sep = ""
412 | , header = FALSE
413 | ) %>%
414 | setNames(
415 | nm = stringr::str_extract(basename(reacquisition_control_files), "\\d+") %>%
416 | gsub("0", "0.", .)
417 | ) %>%
418 | bind_rows(.id = "L") %>%
419 | mutate(V2 = as.factor(V2))
420 |
421 | mean_reacquisition_control <- reacquisition_control %>%
422 | group_by(L) %>%
423 | summarise_if(is.numeric, mean)
424 | ```
425 |
426 | ```{r plot-reacquisition, fig.height = 5.5, fig.width = 7.5, echo = FALSE}
427 | plot(
428 | 1:150
429 | , rep(NA, 150)
430 | , ylim = c(0, 1)
431 | , lwd = 2.5
432 | , xlab = "Trial"
433 | , ylab = "Retrieval of X given A"
434 | , las = 1
435 | )
436 |
437 | abline(h = 0.95, col = "grey")
438 |
439 | matlines(
440 | cbind(51:200, t(as.matrix(mean_reacquisition[, 52:201])))
441 | , col = "black"
442 | , lty = 1
443 | , lwd = 2
444 | )
445 |
446 | matlines(
447 | cbind(51:200, t(as.matrix(mean_reacquisition_control[, 52:201])))
448 | , col = "black"
449 | , lty = 2
450 | )
451 | ```
452 |
453 | ```{r}
454 | reacquisition_results <- matrix(NA, ncol = 3, nrow = 2)
455 |
456 | trails_to_master <- function(x) data.frame(n_trials = min(which(x >= 0.95)))
457 |
458 | reacquisition_trial_counts <- reacquisition %>%
459 | group_by(L, V2) %>%
460 | do(trails_to_master(.[, 104:ncol(reacquisition)])) %>%
461 | group_by(L) %>%
462 | summarize(n_trials = mean_se(n_trials))
463 |
464 | reacquisition_control_trial_counts <- reacquisition_control %>%
465 | group_by(L, V2) %>%
466 | do(trails_to_master(.[, 104:ncol(reacquisition_control)])) %>%
467 | group_by(L) %>%
468 | summarize(n_trials = mean_se(n_trials))
469 |
470 | knitr::kable(
471 | bind_rows(Reacquition = reacquisition_trial_counts, Control = reacquisition_control_trial_counts, .id = "Condition") %>%
472 | spread(L, n_trials) %>%
473 | arrange(desc(Condition))
474 | )
475 | ```
476 |
477 | The following plot compares initial and reacquisition.
478 |
479 | ```{r}
480 | plot(
481 | 1:50
482 | , rep(0, 50)
483 | , pch = NA
484 | , ylim = c(0, 1)
485 | , lwd = 2.5
486 | , xlab = "Trial"
487 | , ylab = "Retrieval of X given A"
488 | , las = 1
489 | )
490 |
491 | abline(h = 0.95, col = "grey")
492 |
493 | for(i in 1:length(p_encode)) {
494 | lines(colMeans(reacquisition_sim_results[(1:n_replications) + (i-1)*n_replications, 1:50]), lty = 2)
495 | }
496 |
497 | for(i in 1:length(p_encode)) {
498 | lines(colMeans(reacquisition_sim_results[(1:n_replications) + (i-1)*n_replications, 101:150]), lwd = 2)
499 | }
500 | ```
501 |
502 | And the same for the reference implementation.
503 |
504 | ```{r}
505 | plot(
506 | 1:50
507 | , rep(NA, 50)
508 | , ylim = c(0, 1)
509 | , lwd = 2.5
510 | , xlab = "Trial"
511 | , ylab = "Retrieval of X given A"
512 | , las = 1
513 | )
514 |
515 | abline(h = 0.95, col = "grey")
516 |
517 | matlines(
518 | cbind(1:50, t(as.matrix(mean_reacquisition[, 102:151])))
519 | , col = "black"
520 | , lty = 1
521 | , lwd = 2
522 | )
523 |
524 | matlines(
525 | cbind(1:50, t(as.matrix(mean_reacquisition[, 2:51])))
526 | , col = "black"
527 | , lty = 2
528 | )
529 | ```
530 |
531 |
532 |
533 |
534 | # References
535 |
536 | Jamieson, R. K., Crump, M. J. C., & Hannah, S. D. (2012). An instance theory of associative learning. *Learning & Behavior*, 40(1), 61–82. doi:[10.3758/s13420-011-0046-2](http://dx.doi.org/10.3758/s13420-011-0046-2)
537 |
--------------------------------------------------------------------------------
/minerva2/minerva2.R:
--------------------------------------------------------------------------------
1 | probe_memory <- function(probe, memory, normalize = FALSE) {
2 | similarity <- colSums(probe * t(memory)) / colSums((probe != 0 | t(memory) != 0)) # Eq. 1, Hintzman (1984)
3 | activation <- similarity^3 # Eq. 2, Hintzman (1984)
4 | echo_intensity <- sum(activation) # Eq. 3, Hintzman (1984)
5 | echo_content <- colSums(activation * memory) # Eq. 4, Hintzman (1984)
6 | if(normalize) echo_content <- echo_content / max(abs(echo_content))
7 |
8 | list(content = echo_content, intensity = echo_intensity)
9 | }
10 |
11 | encode <- function(episode, memory, p_encode) {
12 | encoding_error <- rbinom(length(episode), 1, p_encode)
13 | new_memory <- rbind(memory, episode * encoding_error)
14 |
15 | new_memory
16 | }
17 |
18 | forget <- function(memory, p_forget) {
19 | forgetting <- rbinom(length(memory), 1, p_forget)
20 | forgetting <- matrix(forgetting, ncol = ncol(memory))
21 | new_memory <- memory * forgetting
22 |
23 | new_memory
24 | }
25 |
--------------------------------------------------------------------------------
/minerva2/reproduce_hintzman_1988.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Reproduction of simulations by Hintzman (1984)"
3 | author: "Frederik Aust"
4 | date: "06.02.2015"
5 | output:
6 | html_document:
7 | theme: spacelab
8 | toc: yes
9 | ---
10 |
11 | To validate this implementation of MINERVA2, I reproduced small parts of the simulations reported in Hintzman (1988).
12 |
13 | ```{r echo = FALSE}
14 | source("minerva2.R")
15 | ```
16 |
17 |
18 |
19 | # Frequency judgements
20 | For reasons of simplicity, I simulated one subject with 5000 runs to yield sufficiently smooth intensity distribution plots (instead of 1000 subjects with 1000 runs each).
21 |
22 | ```{r setup_simulation}
23 | frequencies <- 1:5
24 |
25 | n_features <- 20
26 | n_items <- 20
27 |
28 | p_encode <- 0.5
29 | ```
30 |
31 | ```{r frequency_judgments, cache = TRUE}
32 | results <- c()
33 |
34 | for(run in 1:5000) {
35 | # Generate items
36 | item_features <- sample(c(-1, 0, 1), n_items * n_features, replace = TRUE)
37 | items <- matrix(item_features, ncol = n_features)
38 |
39 | control_features <- sample(c(-1, 0, 1), 4 * n_features, replace = TRUE)
40 | control_items <- matrix(control_features, ncol = n_features)
41 |
42 | item_frequencies <- rep(frequencies, each = 4)
43 |
44 | # Set up memory
45 | ## Save four items per level of frequency into memory with L = 0.5
46 | memory <- c()
47 | for(i in 1:n_items) {
48 | new_traces <- rep(items[i, ], item_frequencies[i])
49 | new_traces <- matrix(new_traces, ncol = n_features, byrow = TRUE)
50 | memory <- rbind(memory, new_traces)
51 | }
52 | memory <- forget(memory, p_encode)
53 |
54 |
55 | # Test memory
56 | intensities <- c()
57 |
58 | ## Control items (frequency = 0)
59 | control_intensity <- apply(control_items, 1, function(x) probe_memory(x, memory)$intensity)
60 | intensities <- cbind(intensities, control_intensity)
61 |
62 | ## Learned items (frequency = [1, 4])
63 | for(i in frequencies) {
64 | probes <- items[which(item_frequencies == i), ]
65 |
66 | intensity <- apply(probes, 1, function(x) probe_memory(x, memory)$intensity)
67 | intensities <- cbind(intensities, intensity)
68 | }
69 |
70 | results <- rbind(results, intensities)
71 | }
72 |
73 | colnames(results) <- paste0("freq.", c(0, frequencies))
74 | ```
75 |
76 | ## Results
77 | The resulting echo intensities allow for a close reproduction of Hintzman's Figure 1 (1988).
78 |
79 | ```{r echo = FALSE}
80 | plot(
81 | density(results[, 1])
82 | , xlim = c(-0.5, 1.5)
83 | , xlab = "Echo Intensity"
84 | , ylab = "Probability"
85 | , main = ""
86 | , axes = FALSE
87 | , lwd = 2
88 | )
89 | invisible(apply(results[, 2:6], 2, function(x) lines(density(x), lwd = 2)))
90 | abline(h = 0, lwd = 2)
91 | axis(1)
92 | box()
93 | ```
94 |
95 |
96 |
97 | # References
98 | Hintzman, D. L. (1988). Judgments of frequency and recognition memory in a multiple-trace memory model. *Psychological Review*, 95(4), 528–551. doi:[10.1037/0033-295X.95.4.528](http://dx.doi.org/10.1037/0033-295X.95.4.528)
99 |
--------------------------------------------------------------------------------
/minerva2/reproduce_hintzman_1988_cache/html/__packages:
--------------------------------------------------------------------------------
1 | base
2 | methods
3 | datasets
4 | utils
5 | grDevices
6 | graphics
7 | stats
8 |
--------------------------------------------------------------------------------
/minerva2/reproduce_hintzman_1988_cache/html/frequency_judgments_a964883965c8d954a4979e3eea604080.rdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva2/reproduce_hintzman_1988_cache/html/frequency_judgments_a964883965c8d954a4979e3eea604080.rdb
--------------------------------------------------------------------------------
/minerva2/reproduce_hintzman_1988_cache/html/frequency_judgments_a964883965c8d954a4979e3eea604080.rdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva2/reproduce_hintzman_1988_cache/html/frequency_judgments_a964883965c8d954a4979e3eea604080.rdx
--------------------------------------------------------------------------------
/minerva2/reproduce_hintzman_1988_files/figure-html/unnamed-chunk-2-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/crsh/cognitive_models/92bed7d4d4345008c2d63e77b8b9273017dcf244/minerva2/reproduce_hintzman_1988_files/figure-html/unnamed-chunk-2-1.png
--------------------------------------------------------------------------------