├── .gitignore
├── CONTRIBUTING.md
├── LICENSE.txt
├── MANIFEST.in
├── README.md
├── WarpedVisualStim
├── DisplayLogAnalysis.py
├── DisplayStimulus.py
├── MonitorSetup.py
├── StimulusRoutines.py
├── __init__.py
├── examples
│ ├── example_combined_stimuli_comprehensive.py
│ ├── example_combined_stimuli_minimum.py
│ ├── example_drifting_grating_circle.py
│ ├── example_drifting_grating_circle_multiple.py
│ ├── example_flashing_circle.py
│ ├── example_locally_sparse_noise.py
│ ├── example_log_analysis.py
│ ├── example_minimum.py
│ ├── example_retinotopic_mapping.py
│ ├── example_retinotopic_mapping_random_direction.py
│ └── example_sinusoidal_luminance.py
├── test
│ ├── test_DisplayLogAnalysis.py
│ ├── test_DisplayStimulus.py
│ ├── test_MonitorSetup.py
│ ├── test_StimulusRoutines.py
│ ├── test_data
│ │ └── images_original.tif
│ ├── test_tools_FileTools_Logger.py
│ ├── test_tools_GenericTools.py
│ └── test_tools_ImageAnalysis.py
└── tools
│ ├── FileTools.py
│ ├── GenericTools.py
│ ├── IO
│ ├── __init__.py
│ └── nidaq.py
│ ├── ImageAnalysis.py
│ ├── PlottingTools.py
│ └── __init__.py
├── requirements.txt
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### JetBrains template
3 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm
4 |
5 | #*.iml
6 | *.pyc
7 |
8 |
9 | ## Directory-based project format:
10 | .idea/
11 | .cache/
12 |
13 |
14 | # if you remove the above rule, at least ignore the following:
15 |
16 | # User-specific stuff:
17 | # .idea/workspace.xml
18 | # .idea/tasks.xml
19 | # .idea/dictionaries
20 |
21 | # Sensitive or high-churn files:
22 | # .idea/dataSources.ids
23 | # .idea/dataSources.xml
24 | # .idea/sqlDataSources.xml
25 | # .idea/dynamic.xml
26 | # .idea/uiDesigner.xml
27 |
28 | # Gradle:
29 | # .idea/gradle.xml
30 | # .idea/libraries
31 |
32 | # Mongo Explorer plugin:
33 | # .idea/mongoSettings.xml
34 |
35 | ## File-based project format:
36 | *.ipr
37 | *.iws
38 |
39 | ## Plugin-specific files:
40 |
41 | # IntelliJ
42 | /out/
43 |
44 | # mpeltonen/sbt-idea plugin
45 | .idea_modules/
46 |
47 | # JIRA plugin
48 | atlassian-ide-plugin.xml
49 |
50 | # Crashlytics plugin (for Android Studio and IntelliJ)
51 | com_crashlytics_export_strings.xml
52 | crashlytics.properties
53 | crashlytics-build.properties
54 |
55 |
56 | /retinotopic_mapping/ipython_lizard/res/test_pkls
57 | .ipynb_checkpoints
58 |
59 | build/
60 | dis/
61 | WarpedVisualStim.egg-info/
62 | *.egg
63 | venv/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Allen Institute Contribution Agreement
2 |
3 | This document describes the terms under which you may make “Contributions” —
4 | which may include without limitation, software additions, revisions, bug fixes, configuration changes,
5 | documentation, or any other materials — to any of the projects owned or managed by the Allen Institute.
6 | If you have questions about these terms, please contact us at terms@alleninstitute.org.
7 |
8 | You certify that:
9 |
10 | • Your Contributions are either:
11 |
12 | 1. Created in whole or in part by you and you have the right to submit them under the designated license
13 | (described below); or
14 | 2. Based upon previous work that, to the best of your knowledge, is covered under an appropriate
15 | open source license and you have the right under that license to submit that work with modifications,
16 | whether created in whole or in part by you, under the designated license; or
17 |
18 | 3. Provided directly to you by some other person who certified (1) or (2) and you have not modified them.
19 |
20 | • You are granting your Contributions to the Allen Institute under the terms of the 2-Clause BSD license
21 | (the “designated license”).
22 |
23 | • You understand and agree that the Allen Institute projects and your Contributions are public and that
24 | a record of the Contributions (including all metadata and personal information you submit with them) is
25 | maintained indefinitely and may be redistributed consistent with the Allen Institute’s mission and the
26 | 2-Clause BSD license.
27 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | # Include license
2 | include LICENSE.txt
3 |
4 | # Include data files
5 | recursive-include WarpedVisualStim *.tif *.pkl *.py
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # WarpedVisualStim package
2 |
3 | by Jun Zhuang
4 | © 2019 Allen Institute
5 | email: junz<AT>alleninstitute<DOT>org
6 |
7 | The WarpedVisualStim package is a self-contained module
8 | for display spherically corrected visual stimuli on a flat
9 | screen in visual physiology experiments. It is a wrapper of
10 | the "[psychopy](https://www.psychopy.org/)" package.
11 |
12 | The visual stimuli generation and display is implemented in the modules
13 | `MonitorSetup.py`, `StimulusRoutines.py` and `DisplayStimulus.py`.
14 | These modules allow you to display flashing circle, sparse noise,
15 | locally sparse noise, drifting grading circle, static grading circle
16 | and others with spherical correction. The method for spherical
17 | correction is the same as Marshel et al. 2011 (2). These stimulus
18 | routines are highly customizable and designed to give the user
19 | significant flexibility and control in creative experimental design.
20 |
21 | It can also interact with a National Instrument equipment to receive
22 | trigger and emit timing signals.
23 |
24 | Please check the '\examples' folder for
25 | example scripts of visual stimulation.
26 |
27 | ### Contributors:
28 | * Jun Zhuang @zhuangj
29 | * John Yearseley @yearsj
30 | * Derric Williams @derricw
31 | * Sumiya Kuroda @sumiya-kuroda
32 |
33 | ### Level of support
34 | We are planning on occasional updating this tool with no fixed schedule. Community involvement is encouraged through both issues and pull requests.
35 |
36 | #### Language:
37 |
38 | 1. python 3.7
39 |
40 |
41 | #### Install:
42 | ```
43 | cd
44 | conda env create --name warpedvisualstim python=3.7
45 | activate warpedvisualstim (Windows)
46 | source activate warpedvisualstim (Mac or Linux)
47 | python setup.py install
48 | pip install psychopy
49 | pip install pytest
50 | ```
51 |
52 |
53 | #### Dependencies:
54 | 1. pytest
55 | 2. numpy
56 | 3. scipy
57 | 4. matplotlib
58 | 5. h5py
59 | 6. pillow
60 | 7. psychopy
61 | 8. pyglet
62 | 9. OpenCV
63 | 10. scikit-image
64 | 11. tifffile
65 | 12. PyDAQmx
66 | 13. configobj
67 | 14. sphinx, version 1.6.3 or later (just for documentation)
68 | 15. numpydoc, version 0.7.0 (just for documentation)
69 |
70 | for detailed installation instructions see the
71 | install page in documentation (`doc` branch).
72 |
73 | #### Issues:
74 |
75 | 1. Most image analysis parameters are defined as number of pixels, not microns.
76 | 2. Works in windows, but not fully tested on Mac and Linux.
--------------------------------------------------------------------------------
/WarpedVisualStim/DisplayLogAnalysis.py:
--------------------------------------------------------------------------------
1 | '''
2 | 2017-10-31 by Jun Zhuang
3 | this module provides analysis tools to extract information about visual stimuli
4 | saved in the log pkl files.
5 | '''
6 |
7 | import numpy as np
8 | from .tools import FileTools as ft
9 | from .tools import GenericTools as gt
10 |
11 | class DisplayLogAnalyzer(object):
12 | """
13 | class to take display_log (.pkl) file, check its integrity and extract stimuli and display
14 | organize into stim_dict dictionary, which is a intermediate step to put visual display
15 | information into nwb files.
16 | """
17 |
18 | def __init__(self, log_path):
19 |
20 | self.log_dict = ft.loadFile(log_path)
21 |
22 | if not self.log_dict['presentation']['is_by_index']:
23 | raise NotImplementedError('The visual stimuli display should be indexed.')
24 |
25 | self.check_integrity()
26 |
27 | def check_integrity(self):
28 |
29 | print(self.log_dict['presentation']['frame_stats'])
30 |
31 | if not self.log_dict['presentation']['keep_display']:
32 | raise ValueError('Stimulus presentation did not end normally.')
33 |
34 | total_frame1 = len(self.log_dict['presentation']['displayed_frames'])
35 | total_frame2 = len(self.log_dict['presentation']['frame_ts_start'])
36 | total_frame3 = len(self.log_dict['presentation']['frame_ts_end'])
37 | total_frame4 = len(self.log_dict['stimulation']['index_to_display'])
38 | if not total_frame1 == total_frame2 == total_frame3 == total_frame4:
39 | print('\nNumber of displayed frames: {}.'.format(total_frame1))
40 | print('\nNumber of frame start timestamps: {}.'.format(total_frame2))
41 | print('\nNumber of frame end timestamps: {}.'.format(total_frame3))
42 | print('\nNumber of frames to be displayed: {}.'.format(total_frame4))
43 | raise ValueError('Numbers of total frames do not agree with each other from various places.')
44 |
45 | if max(self.log_dict['stimulation']['index_to_display']) >= \
46 | len(self.log_dict['stimulation']['frames_unique']):
47 | raise ValueError('Display index beyond number of unique frames.')
48 |
49 | if self.log_dict['stimulation']['stim_name'] == 'CombinedStimuli':
50 | stimuli_sequence_out = [f[0] for f in self.log_dict['presentation']['displayed_frames']]
51 | stimuli_sequence_out = list(set(stimuli_sequence_out))
52 | stimuli_sequence_out.sort()
53 | stimuli_sequence_in = list(self.log_dict['stimulation']['individual_logs'].keys())
54 | stimuli_sequence_in.sort()
55 | if stimuli_sequence_out != stimuli_sequence_in:
56 | raise ValueError('Output stimuli sequence does not match input stimuli sequence.')
57 |
58 | @property
59 | def num_frame_tot(self):
60 | return len(self.log_dict['presentation']['displayed_frames'])
61 |
62 | def get_stim_dict(self):
63 | """
64 | Returns
65 | -------
66 | stim_dict: dictionary
67 | the structure of this dictionary should look like this:
68 |
69 | {
70 | '000_UniformContrastRetinotopicMapping': {
71 | ...
72 | 'stim_name' : '000_UniformContrastRetinotopicMapping',
73 | 'index_to_display':
74 | 'timestamps':
76 | 'frames_unique': list of tuple representing unique frames
77 | ...
78 | },
79 | '001_StimulusSeparatorRetinotopicMapping: {
80 | ...
81 | 'stim_name' : '000_UniformContrastRetinotopicMapping',
82 | 'index_to_display':
83 | 'timestamps':
85 | 'frames_unique': list of tuple representing unique frames
86 | ...
87 | },
88 | ...
89 | }
90 | """
91 |
92 | comments = ''
93 | description = ''
94 | source = 'retinotopic_mapping package'
95 | stim_dict = {}
96 |
97 | # if multiple stimuli were displayed in a sequence
98 | if self.log_dict['stimulation']['stim_name'] == 'CombinedStimuli':
99 | curr_frame_ind = 0
100 | stim_ids = list(self.log_dict['stimulation']['individual_logs'].keys())
101 | stim_ids.sort()
102 | for stim_id in stim_ids:
103 | curr_dict = self.log_dict['stimulation']['individual_logs'][stim_id]
104 | curr_stim_name = stim_id + 'RetinotopicMapping'
105 | curr_dict['stim_name'] = curr_stim_name
106 | curr_num_frames = len(curr_dict['index_to_display'])
107 | curr_dict.update({'timestamps': np.arange(curr_num_frames, dtype=np.uint64) + curr_frame_ind,
108 | 'comments': comments,
109 | 'source': source,
110 | 'description': description})
111 | curr_frame_ind = curr_frame_ind + curr_num_frames
112 | stim_dict.update({curr_stim_name: curr_dict})
113 |
114 | # if only one stimulus was displayed
115 | else:
116 | stim_name = self.log_dict['stimulation']['stim_name']
117 | if stim_name in ['UniformContrast', 'FlashingCircle', 'SparseNoise', 'LocallySparseNoise',
118 | 'DriftingGratingCirlce', 'StaticGratingCircle', 'StaticImages', 'StimulusSeparator',
119 | 'SinusoidalLuminance', 'DriftingGratingMultipleCircle']:
120 | curr_stim_name = '{:03d}_{}RetinotopicMapping'.format(0, stim_name)
121 | curr_dict = self.log_dict['stimulation']
122 | curr_dict['stim_name'] = curr_stim_name
123 | curr_dict.update({'timestamps': np.arange(self.num_frame_tot, dtype=np.uint64)})
124 | else:
125 | raise NotImplementedError('Do not understand stimulus: {}.'.format(stim_name))
126 |
127 | curr_dict.update({'comments': comments,
128 | 'source': source,
129 | 'description': description})
130 | stim_dict.update({curr_stim_name : curr_dict})
131 |
132 | return stim_dict
133 |
134 | def analyze_photodiode_onsets_sequential(self, stim_dict, pd_thr=-0.5):
135 | """
136 | Analyze photodiode onsets in a sequential way
137 |
138 | Parameters
139 | ----------
140 | stim_dict: dictionary
141 | should be the output of self.get_stim_dict()
142 |
143 | pd_thr : float
144 | the threshold to detect photodiode onset, the photodiode color was saved in each displayed frame (the
145 | last item of frame tuple) as float with range [-1., 1.]. pd_onset is defined as up crossing the pd_thr.
146 | retinotopic_mapping.tools.GenericTools.up_crossing() function is used to detect the up crossing. It
147 | detects the frame meeting the following criteria: 1) the current frame has photodiode color larger than
148 | pd_thr; 2) the previous frame has photodiode color no larger than pd_thr
149 |
150 | Returns
151 | -------
152 | pd_onsets: list
153 | list of photodiode onsets in sequential manner (in time). Each element in the list is a dictionary
154 | representing one photodiode onset. The dictionary has 3 fields:
155 | 1. stim_name: str, the name of the stimulus the onset belongs to
156 | 2. global_frame_ind: the index of this frame in the total frame displayed
157 | 3. global_pd_onset_ind: the index of this photodiode onset in the total photodiode onsets series
158 | of the stimuli display
159 | 4. str(s)_stim: string that represents the properties of the onset frame. For most frame it is just a
160 | string, for LocallySparseNoise, it is a set of strings with each string representing one
161 | probe on the onset frame.
162 | """
163 |
164 | print('\nAnalyzing photodiode onsets in a sequential manner ...')
165 |
166 | stim_ns = list(stim_dict.keys())
167 | stim_ns.sort()
168 |
169 | pd_onsets_seq = []
170 |
171 | global_pd_onset_ind = 0
172 | for stim_n in stim_ns:
173 |
174 | curr_stim_dict = stim_dict[stim_n]
175 | curr_stim_n = curr_stim_dict['stim_name']
176 |
177 | # print('\n{}'.format(curr_stim_n))
178 |
179 | curr_stim_pd_onsets = []
180 |
181 | pd_trace_unique = np.array([f[-1] for f in curr_stim_dict['frames_unique']])
182 | index_to_display = np.array(curr_stim_dict['index_to_display'], dtype=np.uint64)
183 | pd_trace = pd_trace_unique[index_to_display]
184 | pd_onset_indices = gt.up_crossings(data=pd_trace, threshold=pd_thr)
185 |
186 | for pd_onset_ind in pd_onset_indices:
187 | onset_frame = curr_stim_dict['frames_unique'][curr_stim_dict['index_to_display'][pd_onset_ind]]
188 | # print(onset_frame)
189 |
190 | curr_pd_onset = {'stim_name': curr_stim_n,
191 | 'global_frame_ind': curr_stim_dict['timestamps'][pd_onset_ind],
192 | 'global_pd_onset_ind': global_pd_onset_ind}
193 |
194 | if curr_stim_n[-34:] == '_UniformContrastRetinotopicMapping':
195 | str_uc = 'color{:05.2f}'.format(curr_stim_dict['color'])
196 | curr_pd_onset.update({'str_stim': str_uc})
197 | elif curr_stim_n[-36:] == '_StimulusSeparatorRetinotopicMapping':
198 | curr_pd_onset.update({'str_stim': 'color1'})
199 | elif curr_stim_n[-33:] == '_FlashingCircleRetinotopicMapping':
200 | str_fc = 'alt{:06.1f}_azi{:06.1f}_color{:05.2f}_rad{:05.1f}'\
201 | .format(curr_stim_dict['center'][0],
202 | curr_stim_dict['center'][1],
203 | curr_stim_dict['color'],
204 | float(curr_stim_dict['radius']))
205 | curr_pd_onset.update({'str_stim': str_fc})
206 | elif curr_stim_n[-30:] == '_SparseNoiseRetinotopicMapping':
207 | str_sn_probe = 'alt{:06.1f}_azi{:06.1f}_sign{:02d}'\
208 | .format(onset_frame[1][0], onset_frame[1][1], int(onset_frame[2]))
209 | curr_pd_onset.update({'str_stim': str_sn_probe})
210 | elif curr_stim_n[-37:] == '_LocallySparseNoiseRetinotopicMapping':
211 | str_lsn_probes = []
212 | for probe in onset_frame[1]:
213 | str_lsn_probes.append('alt{:06.1f}_azi{:06.1f}_sign{:02d}'
214 | .format(probe[0], probe[1], int(probe[2])))
215 | str_lsn_probes = set(str_lsn_probes)
216 | curr_pd_onset.update({'strs_stim': str_lsn_probes})
217 | elif curr_stim_n[-40:] == '_DriftingGratingCircleRetinotopicMapping':
218 | str_dgc = 'alt{:06.1f}_azi{:06.1f}_sf{:04.2f}_tf{:04.1f}_dire{:03d}_con{:04.2f}_rad{:03d}'\
219 | .format(curr_stim_dict['center'][0],
220 | curr_stim_dict['center'][1],
221 | onset_frame[2],
222 | onset_frame[3],
223 | int(onset_frame[4]),
224 | onset_frame[5],
225 | int(onset_frame[6]))
226 | curr_pd_onset.update({'str_stim': str_dgc})
227 | elif curr_stim_n[-38:] == '_StaticGratingCircleRetinotopicMapping':
228 | str_sgc = 'alt{:06.1f}_azi{:06.1f}_sf{:04.2f}_phase{:03d}_ori{:03d}_con{:04.2f}_rad{:03d}'.\
229 | format(curr_stim_dict['center'][0],
230 | curr_stim_dict['center'][1],
231 | onset_frame[1],
232 | int(onset_frame[2]),
233 | int(onset_frame[3]),
234 | onset_frame[4],
235 | int(onset_frame[5]))
236 | curr_pd_onset.update({'str_stim': str_sgc})
237 | elif curr_stim_n[-31:] == '_StaticImagesRetinotopicMapping':
238 | str_si = 'img_ind{:05d}'.format(onset_frame[1])
239 | curr_pd_onset.update({'str_stim': str_si})
240 | elif curr_stim_n[-38:] == '_SinusoidalLuminanceRetinotopicMapping':
241 | str_sl = 'onset'
242 | curr_pd_onset.update({'str_stim': str_sl})
243 | elif curr_stim_n[-48:] == '_DriftingGratingMultipleCircleRetinotopicMapping':
244 | str_dgc = 'alt{:06.1f}_azi{:06.1f}_sf{:04.2f}_tf{:04.1f}_dire{:03d}_con{:04.2f}_rad{:03d}'\
245 | .format(onset_frame[7][0],
246 | onset_frame[7][1],
247 | onset_frame[2],
248 | onset_frame[3],
249 | int(onset_frame[4]),
250 | onset_frame[5],
251 | int(onset_frame[6]))
252 | curr_pd_onset.update({'str_stim': str_dgc})
253 | else:
254 | raise LookupError('Do not understand stimulus name: {}'.format(curr_stim_n))
255 |
256 | curr_stim_pd_onsets.append(curr_pd_onset)
257 | global_pd_onset_ind = global_pd_onset_ind + 1
258 |
259 | print('{:<45}: number of photodiode_onset: {}'.format(curr_stim_n, len(curr_stim_pd_onsets)))
260 | pd_onsets_seq = pd_onsets_seq + curr_stim_pd_onsets
261 |
262 | # print('\n'.join([str(pd) for pd in pd_onsets]))
263 | print('\nTotal number of expected photodiode onsets: {}'.format(len(pd_onsets_seq)))
264 |
265 | # sanity check of global_pd_onset_ind
266 | # for i, po in enumerate(pd_onsets):
267 | # assert (i == po['global_pd_onset_ind'])
268 |
269 | return pd_onsets_seq
270 |
271 | def analyze_photodiode_onsets_combined(self, pd_onsets_seq, is_dgc_blocked=True):
272 | """
273 |
274 | Parameters
275 | ----------
276 | pd_onsets_seq: list
277 | product of self.analyze_photodiode_onsets_sequential()
278 |
279 | dgc_onset_type : str
280 | type of onset "block" or "cycle"
281 |
282 | returns
283 | -------
284 | pd_onsets_combined : dict
285 | """
286 |
287 | stim_ns = [po['stim_name'] for po in pd_onsets_seq]
288 | stim_ns = list(set(stim_ns))
289 | stim_ns.sort()
290 |
291 | # print('\n'.join(stim_ns))
292 |
293 | pd_onsets_combined = {}
294 |
295 | for stim_n in stim_ns:
296 | curr_pd_onsets_seq = [po for po in pd_onsets_seq if po['stim_name'] == stim_n]
297 | # print(len(curr_pd_onsets))
298 |
299 | curr_pd_onsets_com = {}
300 |
301 | if stim_n[-34:] == '_UniformContrastRetinotopicMapping':
302 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
303 | elif stim_n[-36:] == '_StimulusSeparatorRetinotopicMapping':
304 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
305 | elif stim_n[-33:] == '_FlashingCircleRetinotopicMapping':
306 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
307 | elif stim_n[-30:] == '_SparseNoiseRetinotopicMapping':
308 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
309 | elif stim_n[-37:] == '_LocallySparseNoiseRetinotopicMapping':
310 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_locally_sparse_noise(curr_pd_onsets_seq))
311 | elif (stim_n[-40:] == '_DriftingGratingCircleRetinotopicMapping') \
312 | or (stim_n[-48:] == '_DriftingGratingMultipleCircleRetinotopicMapping'):
313 | dgc_pd_onsets_com = self._analyze_pd_onset_combined_drifting_grating_circle(curr_pd_onsets_seq)
314 |
315 | if is_dgc_blocked:
316 | fs = self.log_dict['monitor']['refresh_rate']
317 | dgc_log_dict = self._get_dgc_log_dict(dgc_name=stim_n)
318 | block_dur = dgc_log_dict['block_dur']
319 | # midgap_dur = dgc_log_dict['midgap_dur']
320 | iteration = dgc_log_dict['iteration']
321 | block_frame_num = int(fs * block_dur)
322 |
323 | for dgc_n, dgc_onset in dgc_pd_onsets_com.items():
324 |
325 | # the code in this loop removes the pd onsets that have gap shorter than the
326 | # block duration.
327 | f_inds = dgc_onset['global_frame_ind']
328 | pdo_inds = dgc_onset['global_pd_onset_ind']
329 |
330 | block_onset_ind = []
331 | for i in range(f_inds.shape[0]):
332 | if i == 0:
333 | block_onset_ind.append(i)
334 | else:
335 | curr_gap = f_inds[i] - f_inds[block_onset_ind[-1]]
336 | if curr_gap > block_frame_num:
337 | block_onset_ind.append(i)
338 |
339 | # sanity check if number of detected block onsets of each condition
340 | # equals the iteration of dgc
341 | if len(block_onset_ind) != iteration:
342 | raise ValueError('condition "{}": number of the detected block onsets ({}) '
343 | 'does not equal iteration of DriftingGratingCircle ({}).'
344 | .format(dgc_n, len(block_onset_ind), iteration))
345 |
346 | dgc_onset['global_frame_ind'] = f_inds[block_onset_ind]
347 | dgc_onset['global_pd_onset_ind'] = pdo_inds[block_onset_ind]
348 |
349 | curr_pd_onsets_com.update(dgc_pd_onsets_com)
350 | elif stim_n[-38:] == '_StaticGratingCircleRetinotopicMapping':
351 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
352 | elif stim_n[-31:] == '_StaticImagesRetinotopicMapping':
353 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
354 | elif stim_n[-38:] == '_SinusoidalLuminanceRetinotopicMapping':
355 | curr_pd_onsets_com.update(self._analyze_pd_onset_combined_general(curr_pd_onsets_seq))
356 | else:
357 | raise LookupError('Do not understand stimulus name: {}'.format(stim_n))
358 |
359 | pd_onsets_combined.update({stim_n: curr_pd_onsets_com})
360 |
361 | # print('for debug ...')
362 | return pd_onsets_combined
363 |
364 | @staticmethod
365 | def _analyze_pd_onset_combined_general(pd_onsets_sequential):
366 |
367 | pd_onsets_combined = {}
368 |
369 | # get all types of probes
370 | strs_stim = [po['str_stim'] for po in pd_onsets_sequential]
371 | strs_stim = set(strs_stim)
372 |
373 | for str_stim in strs_stim: # for each probe
374 | pd_onset_list = []
375 | pd_frame_list = []
376 | for po in pd_onsets_sequential:
377 | if po['str_stim'] == str_stim:
378 | pd_onset_list.append(po['global_pd_onset_ind'])
379 | pd_frame_list.append(po['global_frame_ind'])
380 |
381 | pd_onsets_combined.update({str_stim: {'global_pd_onset_ind': np.array(pd_onset_list),
382 | 'global_frame_ind': np.array(pd_frame_list)}})
383 |
384 | return pd_onsets_combined
385 |
386 | @staticmethod
387 | def _analyze_pd_onset_combined_locally_sparse_noise(pd_onsets_sequential):
388 |
389 | pd_onsets_combined = {}
390 |
391 | # get all types of probes
392 | probes = set([])
393 | for po in pd_onsets_sequential:
394 | probes = probes | po['strs_stim']
395 |
396 | # print('\n'.join(probes))
397 |
398 | for probe in probes: # for each probe
399 | pd_onset_list = []
400 | pd_frame_list = []
401 | for po in pd_onsets_sequential:
402 | if probe in po['strs_stim']:
403 | pd_onset_list.append(po['global_pd_onset_ind'])
404 | pd_frame_list.append(po['global_frame_ind'])
405 |
406 | pd_onsets_combined.update({probe: {'global_pd_onset_ind': np.array(pd_onset_list),
407 | 'global_frame_ind': np.array(pd_frame_list)}})
408 |
409 | return pd_onsets_combined
410 |
411 | @staticmethod
412 | def _analyze_pd_onset_combined_drifting_grating_circle(pd_onsets_sequential):
413 | pd_onsets_combined = {}
414 |
415 | # get all types of probes
416 | strs_stim = [po['str_stim'] for po in pd_onsets_sequential]
417 | strs_stim = set(strs_stim)
418 |
419 | for str_stim in strs_stim: # for each probe
420 | pd_onset_list = []
421 | pd_frame_list = []
422 | for po in pd_onsets_sequential:
423 | if po['str_stim'] == str_stim:
424 | pd_onset_list.append(po['global_pd_onset_ind'])
425 | pd_frame_list.append(po['global_frame_ind'])
426 |
427 | pd_onsets_combined.update({str_stim: {'global_pd_onset_ind': np.array(pd_onset_list),
428 | 'global_frame_ind': np.array(pd_frame_list)}})
429 |
430 | return pd_onsets_combined
431 |
432 | def _get_dgc_log_dict(self, dgc_name):
433 |
434 | if self.log_dict['stimulation']['stim_name'] == 'CombinedStimuli':
435 | stim_log = self.log_dict['stimulation']['individual_logs'][dgc_name[:-18]]
436 | else:
437 | stim_log = self.log_dict['stimulation']
438 |
439 | return stim_log
440 |
--------------------------------------------------------------------------------
/WarpedVisualStim/DisplayStimulus.py:
--------------------------------------------------------------------------------
1 | '''
2 | Visual Stimulus codebase implements several classes to display stimulus routines.
3 | Can display frame by frame or compress data for certain stimulus routines and
4 | display by index. Used to manage information between experimental devices and
5 | interact with `StimulusRoutines` to produce visual display and log data. May also
6 | be used to save and export movies of experimental stimulus routines for
7 | presentation.
8 | '''
9 |
10 | from psychopy import visual, event
11 | import PIL
12 | import os
13 | import datetime
14 | import numpy as np
15 | import matplotlib.pyplot as plt
16 | import time
17 | from .tools import FileTools as ft
18 | import tifffile as tf
19 |
20 | try:
21 | from .tools.IO import nidaq as iodaq
22 | except Exception as e:
23 | print(e)
24 |
25 |
26 | def analyze_frames(ts_start, ts_end, refresh_rate, check_point=(0.02, 0.033, 0.05, 0.1)):
27 | """
28 | Analyze frame durations of time stamp data.
29 |
30 | Computes relevant statistics with respect to the presentation
31 | of a given stimulus. The statistics are computed in order
32 | to understand the timing of the frames since the monitor refresh
33 | rate isn't always an accurate tool for timing.
34 |
35 | Parameters
36 | ----------
37 | ts_start : 1d array
38 | list of time stamps of each frame start (in seconds).
39 | ts_end: 1d array
40 | list of time stamps of each frame end (in seconds).
41 | refresh_rate : float
42 | the refresh rate of imaging monitor measured (in Hz).
43 | check_point : tuple, optional
44 |
45 | Returns
46 | -------
47 | frame_duration : ndarray
48 | list containing the length of each time stamp.
49 | frame_stats : str
50 | string containing a statistical analysis of the image frames.
51 |
52 | """
53 |
54 | frame_interval = np.diff(ts_start)
55 | plt.figure()
56 | plt.hist(frame_interval, bins=np.linspace(0.0, 0.05, num=51))
57 | refresh_rate = float(refresh_rate)
58 |
59 | num_frames = ts_start.shape[0]
60 | disp_true = ts_end[-1] - ts_start[0]
61 | disp_expect = float(num_frames) / refresh_rate
62 | avg_frame_time = np.mean(frame_interval) * 1000
63 | sdev_frame_time = np.std(frame_interval) * 1000
64 | short_frame = min(frame_interval) * 1000
65 | short_frame_ind = np.where(frame_interval == np.min(frame_interval))[0][0]
66 | long_frame = max(frame_interval) * 1000
67 | long_frame_ind = np.where(frame_interval == np.max(frame_interval))[0][0]
68 |
69 | frame_stats = ''
70 | frame_stats += '\nTotal number of frames : {}.'.format(num_frames)
71 | frame_stats += '\nTotal length of display : {:.5f} second.'.format(disp_true)
72 | frame_stats += '\nExpected length of display : {:.5f} second.'.format(disp_expect)
73 | frame_stats += '\nMean of frame intervals : {:.2f} ms.'.format(avg_frame_time)
74 | frame_stats += '\nS.D. of frame intervals : {:.2f} ms.'.format(sdev_frame_time)
75 | frame_stats += '\nShortest frame: {:.2f} ms, index: {}.'.format(short_frame, short_frame_ind)
76 | frame_stats += '\nLongest frame : {:.2f} ms, index: {}.'.format(long_frame, long_frame_ind)
77 |
78 | for i in range(len(check_point)):
79 | check_number = check_point[i]
80 | frame_number = len(frame_interval[frame_interval > check_number])
81 | frame_stats += '\nNumber of frames longer than {:5.3f} second: {}; {:6.2f}%'. \
82 | format(check_number, frame_number, (float(frame_number) * 100 / num_frames))
83 |
84 | print(frame_stats)
85 |
86 | return frame_interval, frame_stats
87 |
88 |
89 | class DisplaySequence(object):
90 | """
91 | Display the stimulus routine from memory.
92 |
93 | Takes care of high level management of your computer
94 | hardware with respect to its interactions within a given experiment.
95 | Stimulus presentation routines are specified and external connection
96 | to National Instuments hardware devices is provided. Also takes care
97 | of the logging of relevant experimental data collected and where it
98 | will be stored on the computer used for the experiment.
99 |
100 | Parameters
101 | ----------
102 | log_dir : str
103 | system directory path to where log display will be saved.
104 | backupdir : str, optional
105 | copy of directory path to save backup, defaults to `None`.
106 | identifier: str, optional
107 | identifing string for this particular experiment, this will
108 | show up in the name of log file when display is done.
109 | display_iter : int, optional
110 | defaults to `1`
111 | mouse_id : str, optional
112 | label for mouse, defaults to 'Test'.
113 | user_id : str, optional
114 | label for person performing experiment, defaults to 'Name'.
115 | psychopy_mon : str, optional
116 | label for monitor used for displaying the stimulus, defaults to
117 | 'testMonitor'.
118 | is_interpolate : bool, optional
119 | defaults to `False`.
120 | is_triggered : bool, optional
121 | if `True`, stimulus will not display until triggered. if `False`,
122 | stimulus will display automatically. defaults to `False`.
123 | is_by_index : bool, optional
124 | determines if stimulus is displayed by index which saves memory
125 | and should speed up routines. Note that not every stimulus can be
126 | displayed by index and hence the default value is `False`.
127 | is_save_sequence : bool, optional
128 | defaults to False
129 | if True, the class will save the sequence of images to be displayed
130 | as a tif file, in the same folder of log file. If self.is_by_index
131 | is True, only unique frames will be saved. Note, this will save
132 | the whole sequence even if the display is interrupted in the middle.
133 | trigger_NI_dev : str, optional
134 | defaults to 'Dev1'.
135 | trigger_NI_port : int, optional
136 | defaults to `1`.
137 | trigger_NI_line : int, optional
138 | defaults to `0`.
139 | is_sync_pulse : bool, optional
140 | defaults to `True`.
141 | sync_pulse_NI_dev : str, optional
142 | defaults to 'Dev1'.
143 | sync_pulse_NI_port : int, optional
144 | defaults to 1.
145 | sync_pulse_NI_line : int, optional
146 | defaults to 1.
147 | trigger_event : str
148 | should be one of "negative_edge", "positive_edge", "high_level",
149 | or "low_level". defaults to "negative_edge".
150 | display_screen : int
151 | determines which monitor to display stimulus on. defaults to `0`.
152 | initial_background_color : float
153 | defaults to `0`. should be in the range from -1. (black) to 1. (white)
154 | color_weights : tuple, optional
155 | defaults to (1., 1., 1.)
156 | This should be a tuple with 3 elements. Each element specifies the
157 | weight of each color channel (R, G, B). The value range of each
158 | element is in the range [0., 1.]. This is designed in such way that
159 | if you want to suppress a certain channel i.e. red channel, you can
160 | change this parameter to (0., 1., 1.)
161 | """
162 |
163 | def __init__(self,
164 | log_dir,
165 | backupdir=None,
166 | identifier='000',
167 | display_iter=1,
168 | mouse_id='Test',
169 | user_id='Name',
170 | psychopy_mon='testMonitor',
171 | is_by_index=True,
172 | is_interpolate=False,
173 | is_triggered=False,
174 | is_save_sequence=False,
175 | trigger_event="negative_edge",
176 | trigger_NI_dev='Dev1',
177 | trigger_NI_port=1,
178 | trigger_NI_line=0,
179 | is_sync_pulse=False,
180 | sync_pulse_NI_dev='Dev1',
181 | sync_pulse_NI_port=1,
182 | sync_pulse_NI_line=1,
183 | display_screen=0,
184 | initial_background_color=0.,
185 | color_weights=(1., 1., 1.)):
186 | """
187 | initialize `DisplaySequence` object
188 | """
189 |
190 | self.sequence = None
191 | self.seq_log = {}
192 | self.identifier = str(identifier)
193 | self.psychopy_mon = psychopy_mon
194 | self.is_interpolate = is_interpolate
195 | self.is_triggered = is_triggered
196 | self.is_by_index = is_by_index
197 | self.is_save_sequence = is_save_sequence
198 | self.trigger_NI_dev = trigger_NI_dev
199 | self.trigger_NI_port = trigger_NI_port
200 | self.trigger_NI_line = trigger_NI_line
201 | self.trigger_event = trigger_event
202 | self.is_sync_pulse = is_sync_pulse
203 | self.sync_pulse_NI_dev = sync_pulse_NI_dev
204 | self.sync_pulse_NI_port = sync_pulse_NI_port
205 | self.sync_pulse_NI_line = sync_pulse_NI_line
206 | self.display_screen = display_screen
207 |
208 | if len(color_weights) != 3:
209 | raise ValueError('input color_weights should be a tuple with 3 numbers, each from -1. to 1.')
210 | for cw in color_weights:
211 | if cw < -1. or cw > 1.:
212 | raise ValueError('each element of color_weight should be no less than -1. and no greater than 1.')
213 | self.color_weights = color_weights
214 |
215 | if type(initial_background_color) == int or type(initial_background_color) == float:
216 | if initial_background_color < -1. or initial_background_color > 1.:
217 | raise ValueError('initial_background_color ({}) out of range. '
218 | 'Should be in [-1., 1].'.format(initial_background_color))
219 | self.initial_background_color = [(initial_background_color + 1.) * c - 1. for c in self.color_weights]
220 | else:
221 | raise ValueError('Do not understand initial_background_color. '
222 | 'Should be a number from -1. to 1.'.format(initial_background_color))
223 |
224 | self.keep_display = None
225 |
226 | if display_iter % 1 == 0:
227 | self.display_iter = display_iter
228 | else:
229 | raise ArithmeticError("`display_iter` should be a whole number.")
230 |
231 | self.log_dir = log_dir
232 | self.backupdir = backupdir
233 | self.mouse_id = mouse_id
234 | self.user_id = user_id
235 | self.seq_log = None
236 |
237 | self.clear()
238 |
239 | def set_any_array(self, any_array, log_dict=None):
240 | """
241 | set any numpy 3-d array as stimulus
242 |
243 | Parameters:
244 | -----------
245 | any_array : 3d array
246 | shape: (frame, height, width) in pixels, it will be rescaled to
247 | have value range [0., 1.] with dtype np.float16
248 | log_dict : dictionary, optional
249 | the dictionary containing the metadata of the stimulus. If presented,
250 | it will be saved in the display log .pkl file (in the field called
251 | 'stimulation') after displayed.
252 | """
253 | if len(any_array.shape) != 3:
254 | raise LookupError("Input numpy array should have dimension of 3!")
255 |
256 | vmax = np.amax(any_array).astype(np.float32)
257 | vmin = np.amin(any_array).astype(np.float32)
258 | v_range = (vmax - vmin)
259 | any_array_nor = ((any_array - vmin) / v_range).astype(np.float16)
260 | self.sequence = 2 * (any_array_nor - 0.5)
261 |
262 | if log_dict != None:
263 | if type(log_dict) is dict:
264 | self.seq_log = log_dict
265 | else:
266 | raise ValueError('`log_dict` should be a dictionary!')
267 | else:
268 | self.seq_log = {}
269 | self.clear()
270 |
271 | def set_stim(self, stim):
272 | """
273 | Calls the `generate_movie` method of the respective stim object and
274 | populates the attributes `self.sequence` and `self.seq_log`
275 |
276 | Parameters
277 | ----------
278 | stim : stim object inherited from retinotopic_mapping.StimulusRoutine.Stim class
279 | the type of stimulus to be presented in the experiment.
280 |
281 | Currently support:
282 | retinotopic_mapping.StimulusRoutine.UniformContrast
283 | retinotopic_mapping.StimulusRoutine.FlashingCircle
284 | retinotopic_mapping.StimulusRoutine.SparseNoise
285 | retinotopic_mapping.StimulusRoutine.LocallySparseNoise
286 | retinotopic_mapping.StimulusRoutine.DriftingGratingCircle
287 | retinotopic_mapping.StimulusRoutine.StaticGratingCircle
288 | retinotopic_mapping.StimulusRoutine.StaticImages
289 | retinotopic_mapping.StimulusRoutine.StimulusSeparator
290 | retinotopic_mapping.StimulusRoutine.CombinedStimuli
291 | retinotopic_mapping.StimulusRoutine.KSstim
292 | retinotopic_mapping.StimulusRoutine.KSstimAllDir
293 |
294 | """
295 | if self.is_by_index:
296 | if stim.stim_name in ['KSstim', 'KSstimAllDir']:
297 | raise LookupError('Stimulus {} does not support indexed display.'.format(stim.name))
298 |
299 | self.sequence, self.seq_log = stim.generate_movie_by_index()
300 | self.clear()
301 |
302 | else:
303 | if stim.stim_name in ['LocallySparseNoise', 'StaticGratingCircle', 'NaturalScene']:
304 | raise LookupError('Stimulus {} does not support full sequence display. Please use '
305 | 'indexed display instead (set self.is_by_index = True).')
306 |
307 | self.sequence, self.seq_log = stim.generate_movie()
308 | self.clear()
309 |
310 | def trigger_display(self):
311 | """
312 | Display stimulus, initialize and perform global experimental routines.
313 |
314 | Prepares all of the necessary parameters to display stimulus and store
315 | the data collected in the experiment. Interacts with PyschoPy to create
316 | and display each frame of the selected stimulus routine. Handles
317 | global calls to trigger and timing devices within the experimental setup.
318 |
319 | Examples
320 | --------
321 | > import matplotlib.pyplot as plt
322 | > import retinotopic_mapping.StimulusRoutines as stim
323 | > from retinotopic_mapping.MonitorSetup import Monitor, Indicator
324 | > from retinotopic_mapping.DisplayStimulus import DisplaySequence
325 | > mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
326 | > ind = Indicator(mon)
327 | > ds = DisplaySequence(log_dir='C:/data')
328 | > uc = stim.UniformContrast(monitor=mon, indicator=ind, duration=10., color=-1.)
329 | > ds.set_stim(uc)
330 | > ds.trigger_display()
331 | """
332 |
333 | # --------------- early preparation for display--------------------
334 | # test monitor resolution
335 | try:
336 | resolution = self.seq_log['monitor']['resolution'][::-1]
337 | except KeyError:
338 | resolution = (800, 600)
339 |
340 | # test monitor refresh rate
341 | try:
342 | refresh_rate = self.seq_log['monitor']['refresh_rate']
343 | except KeyError:
344 | print("No monitor refresh rate information, assuming 60Hz.\n")
345 | refresh_rate = 60.
346 |
347 | # prepare display frames log
348 | if self.sequence is None:
349 | raise LookupError("Please set the sequence to be displayed by using self.set_stim().\n")
350 | if not self.seq_log:
351 | raise LookupError("Please set the sequence log dictionary to be displayed "
352 | "by using self.set_stim().\n")
353 |
354 | # if display by index, check frame indices were not larger than the number of frames in
355 | # self.sequence
356 | if self.is_by_index:
357 | max_index = max(self.seq_log['stimulation']['index_to_display'])
358 | min_index = min(self.seq_log['stimulation']['index_to_display'])
359 | if max_index >= self.sequence.shape[0] or min_index < 0:
360 | raise ValueError('Max display index range: {} is out of self.sequence frame range: {}.'
361 | .format((min_index, max_index), (0, self.sequence.shape[0] - 1)))
362 | if 'frames_unique' not in self.seq_log['stimulation'].keys():
363 | raise LookupError('"frames_unique" is not found in self.seq_log["stimulation"]. This'
364 | 'is required when display by index.')
365 | else:
366 | if 'frames' not in self.seq_log['stimulation'].keys():
367 | raise LookupError('"frames" is not found in self.seq_log["stimulation"]. This'
368 | 'is required when display by full sequence.')
369 |
370 | # calculate expected display time
371 | if self.is_by_index:
372 | index_to_display = self.seq_log['stimulation']['index_to_display']
373 | display_time = (float(len(index_to_display)) *
374 | self.display_iter / refresh_rate)
375 | else:
376 | display_time = (float(self.sequence.shape[0]) *
377 | self.display_iter / refresh_rate)
378 | print('\nExpected display time: {} seconds.\n'.format(display_time))
379 |
380 | # generate file name
381 | self._get_file_name()
382 | print('File name: {}.\n'.format(self.file_name))
383 |
384 | # -----------------setup psychopy window and stimulus--------------
385 | # start psychopy window
386 | window = visual.Window(size=resolution,
387 | monitor=self.psychopy_mon,
388 | fullscr=True,
389 | screen=self.display_screen,
390 | color=self.initial_background_color)
391 |
392 | stim = visual.ImageStim(window, size=(2, 2), interpolate=self.is_interpolate)
393 |
394 | # initialize keep_display
395 | self.keep_display = True
396 |
397 | # handle display trigger
398 | if self.is_triggered:
399 | display_wait = self._wait_for_trigger(event=self.trigger_event)
400 | if not display_wait:
401 | window.close()
402 | self.clear()
403 | return None
404 | else:
405 | time.sleep(5.) # wait remote object to start
406 |
407 | # actual display
408 | self._display(window=window, stim=stim)
409 |
410 | # analyze frames
411 | try:
412 | self.frame_duration, self.frame_stats = \
413 | analyze_frames(ts_start=self.frame_ts_start, ts_end=self.frame_ts_end,
414 | refresh_rate=self.seq_log['monitor']['refresh_rate'])
415 | except KeyError:
416 | print("No monitor refresh rate information, assuming 60Hz.")
417 | self.frame_duration, self.frame_stats = \
418 | analyze_frames(ts_start=self.frame_ts_start, ts_end=self.frame_ts_end, refresh_rate=60.)
419 |
420 | save_path, log_dict = self.save_log()
421 |
422 | # clear display data
423 | self.clear()
424 |
425 | return save_path, log_dict
426 |
427 | def _wait_for_trigger(self, event):
428 | """
429 | time place holder for waiting for trigger
430 |
431 | Parameters
432 | ----------
433 | event : str from {'low_level','high_level','negative_edge','positive_edge'}
434 | an event triggered via a National Instuments experimental device.
435 | Returns
436 | -------
437 | Bool :
438 | returns `True` if trigger is detected and `False` if manual stop
439 | signal is detected.
440 | """
441 |
442 | # check NI signal
443 | trigger_task = iodaq.DigitalInput(self.trigger_NI_dev,
444 | self.trigger_NI_port,
445 | self.trigger_NI_line)
446 | trigger_task.StartTask()
447 |
448 | print("Waiting for trigger: {} on {}.".format(event, trigger_task.devstr))
449 |
450 | if event == 'low_level':
451 | last_TTL = trigger_task.read()
452 | while last_TTL != 0 and self.keep_display:
453 | last_TTL = trigger_task.read()[0]
454 | self._update_display_status()
455 | else:
456 | if self.keep_display:
457 | trigger_task.StopTask()
458 | print('Trigger detected. Start displaying...\n\n')
459 | return True
460 | else:
461 | trigger_task.StopTask()
462 | print('Keyboard interrupting signal detected. Stopping the program.')
463 | return False
464 | elif event == 'high_level':
465 | last_TTL = trigger_task.read()[0]
466 | while last_TTL != 1 and self.keep_display:
467 | last_TTL = trigger_task.read()[0]
468 | self._update_display_status()
469 | else:
470 | if self.keep_display:
471 | trigger_task.StopTask()
472 | print('Trigger detected. Start displaying...\n\n')
473 | return True
474 | else:
475 | trigger_task.StopTask()
476 | print('Keyboard interrupting signal detected. Stopping the program.')
477 | return False
478 | elif event == 'negative_edge':
479 | last_TTL = trigger_task.read()[0]
480 | while self.keep_display:
481 | current_TTL = trigger_task.read()[0]
482 | if (last_TTL == 1) and (current_TTL == 0):
483 | break
484 | else:
485 | last_TTL = int(current_TTL)
486 | self._update_display_status()
487 | else:
488 | trigger_task.StopTask()
489 | print('Keyboard interrupting signal detected. Stopping the program.')
490 | return False
491 | trigger_task.StopTask()
492 | print('Trigger detected. Start displaying...\n\n')
493 | return True
494 | elif event == 'positive_edge':
495 | last_TTL = trigger_task.read()[0]
496 | while self.keep_display:
497 | current_TTL = trigger_task.read()[0]
498 | if (last_TTL == 0) and (current_TTL == 1):
499 | break
500 | else:
501 | last_TTL = int(current_TTL)
502 | self._update_display_status()
503 | else:
504 | trigger_task.StopTask();
505 | print('Keyboard interrupting signal detected. Stopping the program.')
506 | return False
507 | trigger_task.StopTask()
508 | print('Trigger detected. Start displaying...\n\n')
509 | return True
510 | else:
511 | raise NameError("`trigger` not in {'negative_edge','positive_edge', 'high_level','low_level'}!")
512 |
513 | def _get_file_name(self):
514 | """
515 | generate the file name of log file
516 | """
517 |
518 | try:
519 | self.file_name = datetime.datetime.now().strftime('%y%m%d%H%M%S') + \
520 | '-' + self.seq_log['stimulation']['stim_name'] + \
521 | '-M' + self.mouse_id + '-' + self.user_id + '-' + \
522 | self.identifier
523 | except KeyError:
524 | self.file_name = datetime.datetime.now().strftime('%y%m%d%H%M%S') + \
525 | '-' + 'customStim' + '-M' + self.mouse_id + '-' + \
526 | self.user_id + '-' + self.identifier
527 |
528 | if self.is_triggered:
529 | self.file_name += '-Triggered'
530 | else:
531 | self.file_name += '-notTriggered'
532 |
533 | def _display(self, window, stim):
534 | """
535 | display stimulus
536 | """
537 | frame_ts_start = []
538 | frame_ts_end = []
539 | start_time = time.process_time()
540 |
541 | if self.is_by_index:
542 | index_to_display = self.seq_log['stimulation']['index_to_display']
543 | iter_frame_num = len(index_to_display)
544 | else:
545 | iter_frame_num = self.sequence.shape[0]
546 | index_to_display = range(iter_frame_num)
547 |
548 | # print('frame per iter: {}'.format(iter_frame_num))
549 |
550 | if self.is_sync_pulse:
551 | syncPulseTask = iodaq.DigitalOutput(self.sync_pulse_NI_dev,
552 | self.sync_pulse_NI_port,
553 | self.sync_pulse_NI_line)
554 | syncPulseTask.StartTask()
555 | _ = syncPulseTask.write(np.array([0]).astype(np.uint8))
556 |
557 | i = 0
558 | self.displayed_frames = []
559 |
560 | while self.keep_display and i < (iter_frame_num * self.display_iter):
561 |
562 | frame_num = i % iter_frame_num
563 | frame_index = index_to_display[frame_num]
564 |
565 | # print('i:{}; index_display_ind:{}; frame_ind{}.'.format(i, frame_num, frame_index))
566 |
567 | if self.color_weights == (1., 1., 1.):
568 | stim.setImage(self.sequence[frame_index][::-1])
569 | else:
570 | curr_frame = self.sequence[frame_index]
571 | curr_frame = ((curr_frame + 1.) * 255 / 2.)
572 | curr_frame_r = PIL.Image.fromarray((curr_frame * self.color_weights[0]).astype(np.uint8))
573 | curr_frame_g = PIL.Image.fromarray((curr_frame * self.color_weights[1]).astype(np.uint8))
574 | curr_frame_b = PIL.Image.fromarray((curr_frame * self.color_weights[2]).astype(np.uint8))
575 | curr_frame = PIL.Image.merge('RGB', (curr_frame_r, curr_frame_g, curr_frame_b))
576 | # plt.imshow(curr_frame)
577 | # plt.show()
578 | stim.setImage(curr_frame)
579 |
580 | stim.draw()
581 |
582 | # set sync pulse start signal
583 | if self.is_sync_pulse:
584 | _ = syncPulseTask.write(np.array([1]).astype(np.uint8))
585 |
586 | # save frame start timestamp
587 | frame_ts_start.append(time.process_time() - start_time)
588 |
589 | # show visual stim
590 | window.flip()
591 |
592 | # save displayed frame information
593 | if self.is_by_index:
594 | self.displayed_frames.append(self.seq_log['stimulation']['frames_unique'][frame_index])
595 | else:
596 | self.displayed_frames.append(self.seq_log['stimulation']['frames'][frame_index])
597 |
598 | # save frame end timestamp
599 | frame_ts_end.append(time.process_time() - start_time)
600 |
601 | # set sync pulse end signal
602 | if self.is_sync_pulse:
603 | _ = syncPulseTask.write(np.array([0]).astype(np.uint8))
604 |
605 | self._update_display_status()
606 | i += 1
607 |
608 | stop_time = time.process_time()
609 | window.close()
610 |
611 | if self.is_sync_pulse:
612 | syncPulseTask.StopTask()
613 |
614 | self.frame_ts_start = np.array(frame_ts_start)
615 | self.frame_ts_end = np.array(frame_ts_end)
616 | self.display_length = stop_time - start_time
617 |
618 | if self.keep_display == True:
619 | print('\nDisplay successfully completed.')
620 |
621 | def flag_to_close(self):
622 | self.keep_display = False
623 |
624 | def _update_display_status(self):
625 |
626 | if self.keep_display is None:
627 | raise LookupError('self.keep_display should start as True')
628 |
629 | # check keyboard input 'q' or 'escape'
630 | keyList = event.getKeys(['q', 'escape'])
631 | if len(keyList) > 0:
632 | self.keep_display = False
633 | print("Keyboard interrupting signal detected. Stop displaying. \n")
634 |
635 | def set_display_iteration(self, display_iter):
636 |
637 | if display_iter % 1 == 0:
638 | self.display_iter = display_iter
639 | else:
640 | raise ArithmeticError("`display_iter` should be a whole number.")
641 | self.clear()
642 |
643 | def save_log(self):
644 |
645 | if self.display_length is None:
646 | self.clear()
647 | raise LookupError("Please display sequence first!")
648 |
649 | if self.file_name is None:
650 | self._get_file_name()
651 |
652 | if self.keep_display == True:
653 | self.file_name += '-complete'
654 | elif self.keep_display == False:
655 | self.file_name += '-incomplete'
656 |
657 | # set up log object
658 | directory = os.path.join(self.log_dir, 'visual_display_log')
659 | if not (os.path.isdir(directory)):
660 | os.makedirs(directory)
661 |
662 | log_dict = dict(self.seq_log)
663 | displayLog = dict(self.__dict__)
664 | displayLog.pop('seq_log')
665 | displayLog.pop('sequence')
666 | log_dict.update({'presentation': displayLog})
667 |
668 | file_name = self.file_name + ".pkl"
669 |
670 | # generate full log dictionary
671 | path = os.path.join(directory, file_name)
672 | ft.saveFile(path, log_dict)
673 | # logger = ft.Logger(log_dict=log_dict, save_path=path)
674 | # logger.save_log()
675 |
676 | print("\nLog file generated successfully. Log file path: ")
677 | print('{}'.format(path))
678 | if self.is_save_sequence:
679 | tf.imsave(os.path.join(directory, self.file_name + '.tif'),
680 | self.sequence.astype(np.float32))
681 | print('\nSequence file generated successfully. File path: ')
682 | print('{}'.format(os.path.join(directory, self.file_name + '.tif')))
683 |
684 | backupFileFolder = self._get_backup_folder()
685 | if backupFileFolder is not None:
686 | if not (os.path.isdir(backupFileFolder)):
687 | os.makedirs(backupFileFolder)
688 | backupFilePath = os.path.join(backupFileFolder, file_name)
689 | ft.saveFile(backupFilePath, log_dict)
690 | # backup_logger = ft.Logger(log_dict=log_dict, save_path=backupFilePath)
691 | # backup_logger.save_log()
692 |
693 | if self.is_save_sequence:
694 | tf.imsave(os.path.join(backupFileFolder, self.file_name + '.tif'),
695 | self.sequence.astype(np.float32))
696 | print("\nBackup log file generated successfully. Backup log file path: ")
697 | print('{}'.format(backupFilePath))
698 | else:
699 | print("\nDid not find backup path, no backup was saved.")
700 |
701 | return path, log_dict
702 |
703 | def _get_backup_folder(self):
704 | if self.backupdir is not None:
705 | backup_folder = os.path.join(self.backupdir, 'visual_display_log')
706 | if not os.path.isdir(backup_folder):
707 | os.makedirs(backup_folder)
708 | return backup_folder
709 | else:
710 | return None
711 |
712 | def clear(self):
713 | """ clear display information. """
714 | self.display_length = None
715 | self.time_stamp = None
716 | self.frame_duration = None
717 | self.displayed_frames = None
718 | self.frame_stats = None
719 | self.file_name = None
720 | self.keep_display = None
721 |
722 |
723 | if __name__ == "__main__":
724 | pass
725 |
--------------------------------------------------------------------------------
/WarpedVisualStim/MonitorSetup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Notes
4 | -----
5 |
6 | """
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | from .tools import ImageAnalysis as ia
10 |
11 |
12 | class Monitor(object):
13 | """
14 | monitor object created by Jun, has the method "remap" to generate the
15 | spherical corrected coordinates in degrees
16 |
17 | This object contains the relevant data for the monitor used within a
18 | given experimental setup. When initialized, the rectangular coordinates
19 | of the pixels on the monitor are computed and stored as `lin_coord_x`,
20 | `lin_coord_y`. The rectangular coordinates are then transformed and
21 | warped by calling the `remap` method to populate the `deg_coord_x` and
22 | `deg_coord_y` attributes.
23 |
24 | Parameters
25 | ----------
26 | resolution : tuple of two positive integers
27 | value of the monitor resolution, (pixel number in height, pixel number in width)
28 | dis : float
29 | distance from eyeball to monitor (in cm)
30 | mon_width_cm : float
31 | width of monitor (in cm)
32 | mon_height_cm : float
33 | height of monitor (in cm)
34 | C2T_cm : float
35 | distance from gaze center to monitor top
36 | C2A_cm : float
37 | distance from gaze center to anterior edge of the monitor
38 | center_coordinates : tuple of two floats
39 | (altitude, azimuth), in degrees. the coordinates of the projecting point
40 | from the eye ball to the monitor. This allows to place the display monitor
41 | in any arbitrary position.
42 | visual_field : str from {'right','left'}, optional
43 | the eye that is facing the monitor, defaults to 'right'
44 | deg_coord_x : ndarray, optional
45 | array of warped x pixel coordinates, defaults to `None`
46 | deg_coord_y : ndarray, optional
47 | array of warped y pixel coordinates, defaults to `None`
48 | name : str, optional
49 | name of the monitor, defaults to `testMonitor`
50 | gamma : optional
51 | for gamma correction, defaults to `None`
52 | gamma_grid : optional
53 | for gamme correction, defaults to `None`
54 | luminance : optional
55 | monitor luminance, defaults to `None`
56 | downsample_rate : int, optional
57 | downsample rate of monitor pixels, defaults to 10
58 | refresh_rate : float, optional
59 | the refresh rate of the monitor in Hz, defaults to 60
60 | """
61 |
62 | def __init__(self,
63 | resolution,
64 | dis,
65 | mon_width_cm,
66 | mon_height_cm,
67 | C2T_cm=None,
68 | C2A_cm=None,
69 | center_coordinates=(0., 60.),
70 | visual_field='right',
71 | deg_coord_x=None,
72 | deg_coord_y=None,
73 | name='testMonitor',
74 | gamma=None,
75 | gamma_grid=None,
76 | luminance=None,
77 | downsample_rate=10,
78 | refresh_rate=60.):
79 | """
80 | Initialize monitor object.
81 |
82 | """
83 |
84 | if resolution[0] % downsample_rate != 0 \
85 | or resolution[1] % downsample_rate != 0:
86 | raise ArithmeticError('Resolution pixel numbers are not '
87 | 'divisible by down sampling rate.')
88 |
89 | self.resolution = resolution
90 | self.dis = dis
91 | self.mon_width_cm = mon_width_cm
92 | self.mon_height_cm = mon_height_cm
93 |
94 | if C2T_cm is None:
95 | self.C2T_cm = self.mon_height_cm / 2.
96 | else:
97 | self.C2T_cm = C2T_cm
98 |
99 | if C2A_cm is None:
100 | self.C2A_cm = self.mon_width_cm / 2.
101 | else:
102 | self.C2A_cm = C2A_cm
103 |
104 | self.center_coordinates = center_coordinates
105 | self.visual_field = visual_field
106 | self.deg_coord_x = deg_coord_x
107 | self.deg_coord_y = deg_coord_y
108 | self.name = name
109 | self.downsample_rate = downsample_rate
110 | self.gamma = gamma
111 | self.gamma_grid = gamma_grid
112 | self.luminance = luminance
113 | self.refresh_rate = 60
114 |
115 | # distance form projection point of the eye to bottom of the monitor
116 | self.C2B_cm = self.mon_height_cm - self.C2T_cm
117 | # distance form projection point of the eye to right of the monitor
118 | self.C2P_cm = self.mon_width_cm - self.C2A_cm
119 |
120 | resolution = [0, 0]
121 | resolution[0] = int(self.resolution[0] / downsample_rate)
122 | resolution[1] = int(self.resolution[1] / downsample_rate)
123 |
124 | map_coord_x, map_coord_y = np.meshgrid(range(resolution[1]),
125 | range(resolution[0]))
126 |
127 | if self.visual_field == "left":
128 | map_x = np.linspace(self.C2A_cm, -1.0 * self.C2P_cm, resolution[1])
129 |
130 | if self.visual_field == "right":
131 | map_x = np.linspace(-1 * self.C2A_cm, self.C2P_cm, resolution[1])
132 |
133 | map_y = np.linspace(self.C2T_cm, -1.0 * self.C2B_cm, resolution[0])
134 | old_map_x, old_map_y = np.meshgrid(map_x, map_y, sparse=False)
135 |
136 | self.lin_coord_x = old_map_x
137 | self.lin_coord_y = old_map_y
138 |
139 | self.remap()
140 |
141 | def set_gamma(self, gamma, gamma_grid):
142 | self.gamma = gamma
143 | self.gamma_grid = gamma_grid
144 |
145 | def set_luminance(self, luminance):
146 | self.luminance = luminance
147 |
148 | def set_downsample_rate(self, downsample_rate):
149 |
150 | if self.resolution[0] % downsample_rate != 0 \
151 | or self.resolution[1] % downsample_rate != 0:
152 | raise ArithmeticError('Resolution pixel numbers are not divisible by down sampling rate.')
153 |
154 | self.downsample_rate = downsample_rate
155 |
156 | resolution = [0, 0]
157 | resolution[0] = self.resolution[0] / downsample_rate
158 | resolution[1] = self.resolution[1] / downsample_rate
159 |
160 | # map_coord_x, map_coord_y = np.meshgrid(range(resolution[1]),
161 | # range(resolution[0]))
162 |
163 | if self.visual_field == "left":
164 | map_x = np.linspace(self.C2A_cm, -1.0 * self.C2P_cm, resolution[1])
165 |
166 | if self.visual_field == "right":
167 | map_x = np.linspace(-1 * self.C2P_cm, self.C2P_cm, resolution[1])
168 |
169 | map_y = np.linspace(self.C2T_cm, -1.0 * self.C2B_cm, resolution[0])
170 | old_map_x, old_map_y = np.meshgrid(map_x, map_y, sparse=False)
171 |
172 | self.lin_coord_x = old_map_x
173 | self.lin_coord_y = old_map_y
174 |
175 | self.remap()
176 |
177 | def remap(self):
178 | """
179 | warp the linear pixel coordinates to a spherical corrected representation.
180 |
181 | Function is called when the monitor object is initialized and populate
182 | the `deg_coord_x` and `deg_coord_y` attributes.
183 | """
184 |
185 | resolution = [0, 0]
186 | resolution[0] = int(self.resolution[0] / self.downsample_rate)
187 | resolution[1] = int(self.resolution[1] / self.downsample_rate)
188 |
189 | # map_coord_x, map_coord_y = np.meshgrid(range(resolution[1]),
190 | # range(resolution[0]))
191 |
192 | new_map_x = np.zeros(resolution, dtype=np.float32)
193 | new_map_y = np.zeros(resolution, dtype=np.float32)
194 |
195 | for j in range(resolution[1]):
196 | new_map_x[:, j] = ((180.0 / np.pi) *
197 | np.arctan(self.lin_coord_x[0, j] / self.dis))
198 | dis2 = np.sqrt(np.square(self.dis) +
199 | np.square(self.lin_coord_x[0, j]))
200 |
201 | for i in range(resolution[0]):
202 | new_map_y[i, j] = ((180.0 / np.pi) *
203 | np.arctan(self.lin_coord_y[i, 0] / dis2))
204 |
205 | self.deg_coord_x = new_map_x + self.center_coordinates[1]
206 | self.deg_coord_y = new_map_y + self.center_coordinates[0]
207 |
208 | def plot_map(self):
209 |
210 | resolution = [0, 0]
211 | resolution[0] = self.resolution[0] / self.downsample_rate
212 | resolution[1] = self.resolution[1] / self.downsample_rate
213 |
214 | mapcorX, mapcorY = np.meshgrid(range(resolution[1]), range(resolution[0]))
215 |
216 | f1 = plt.figure(figsize=(12, 7))
217 | f1.suptitle('Remap monitor', fontsize=14, fontweight='bold')
218 |
219 | OMX = f1.add_subplot(221)
220 | OMX.set_title('Linear Map X (cm)')
221 | currfig = plt.imshow(self.lin_coord_x)
222 | levels1 = range(int(np.floor(self.lin_coord_x.min() / 10) * 10),
223 | int((np.ceil(self.lin_coord_x.max() / 10) + 1) * 10), 10)
224 | im1 = plt.contour(mapcorX, mapcorY, self.lin_coord_x, levels1, colors='k', linewidth=2)
225 | # plt.clabel(im1, levels1, fontsize = 10, inline = 1, fmt='%2.1f')
226 | f1.colorbar(currfig, ticks=levels1)
227 | OMX.set_axis_off()
228 |
229 | OMY = f1.add_subplot(222)
230 | OMY.set_title('Linear Map Y (cm)')
231 | currfig = plt.imshow(self.lin_coord_y)
232 | levels2 = range(int(np.floor(self.lin_coord_y.min() / 10) * 10),
233 | int((np.ceil(self.lin_coord_y.max() / 10) + 1) * 10), 10)
234 | im2 = plt.contour(mapcorX, mapcorY, self.lin_coord_y, levels2, colors='k', linewidth=2)
235 | # plt.clabel(im2, levels2, fontsize = 10, inline = 1, fmt='%2.2f')
236 | f1.colorbar(currfig, ticks=levels2)
237 | OMY.set_axis_off()
238 |
239 | NMX = f1.add_subplot(223)
240 | NMX.set_title('Spherical Map X (deg)')
241 | currfig = plt.imshow(self.deg_coord_x)
242 | levels3 = range(int(np.floor(self.deg_coord_x.min() / 10) * 10),
243 | int((np.ceil(self.deg_coord_x.max() / 10) + 1) * 10), 10)
244 | im3 = plt.contour(mapcorX, mapcorY, self.deg_coord_x, levels3, colors='k', linewidth=2)
245 | # plt.clabel(im3, levels3, fontsize = 10, inline = 1, fmt='%2.1f')
246 | f1.colorbar(currfig, ticks=levels3)
247 | NMX.set_axis_off()
248 | #
249 | NMY = f1.add_subplot(224)
250 | NMY.set_title('Spherical Map Y (deg)')
251 | currfig = plt.imshow(self.deg_coord_y)
252 | levels4 = range(int(np.floor(self.deg_coord_y.min() / 10) * 10),
253 | int((np.ceil(self.deg_coord_y.max() / 10) + 1) * 10), 10)
254 | im4 = plt.contour(mapcorX, mapcorY, self.deg_coord_y, levels4, colors='k', linewidth=2)
255 | # plt.clabel(im4, levels4, fontsize = 10, inline = 1, fmt='%2.1f')
256 | f1.colorbar(currfig, ticks=levels4)
257 | NMY.set_axis_off()
258 |
259 | def generate_lookup_table(self):
260 | """
261 | generate lookup talbe between degree corrdinates and linear corrdinates
262 | return two matrix:
263 | lookupI: i index in linear matrix to this pixel after warping
264 | lookupJ: j index in linear matrix to this pixel after warping
265 | """
266 |
267 | # length of one degree on monitor at gaze point
268 | degDis = np.tan(np.pi / 180) * self.dis
269 |
270 | # generate degree coordinate without warpping
271 | degNoWarpCorX = self.lin_coord_x / degDis
272 | degNoWarpCorY = self.lin_coord_y / degDis
273 |
274 | # deg coordinates
275 | degCorX = self.deg_coord_x + self.center_coordinates[0]
276 | degCorY = self.deg_coord_y + self.center_coordinates[1]
277 |
278 | lookupI = np.zeros(degCorX.shape).astype(np.int32)
279 | lookupJ = np.zeros(degCorX.shape).astype(np.int32)
280 |
281 | for j in range(lookupI.shape[1]):
282 | currDegX = degCorX[0, j]
283 | diffDegX = degNoWarpCorX[0, :] - currDegX
284 | IndJ = np.argmin(np.abs(diffDegX))
285 | lookupJ[:, j] = IndJ
286 |
287 | for i in range(lookupI.shape[0]):
288 | currDegY = degCorY[i, j]
289 | diffDegY = degNoWarpCorY[:, IndJ] - currDegY
290 | indI = np.argmin(np.abs(diffDegY))
291 | lookupI[i, j] = indI
292 |
293 | return lookupI, lookupJ
294 |
295 | def warp_images(self, imgs, center_coor, deg_per_pixel=0.1, is_luminance_correction=True):
296 | """
297 | warp a image stack into visual degree coordinate system
298 |
299 | parameters
300 | ----------
301 | imgs : ndarray
302 | should be 2d or 3d, if 3d, axis will be considered as frame x rows x width
303 | center_coor : list or tuple of two floats
304 | the visual degree coordinates of the center of the image (altitude, azimuth)
305 | deg_per_pixel : float or list/tuple of two floats
306 | size of original pixel in visual degrees, (altitude, azimuth), if float, assume
307 | sizes in both dimension are the same
308 | is_luminance_correction : bool
309 | if True, wrapped images will have mean intensity equal 0, and values will be
310 | scaled up to reach minimum equal -1. or maximum equal 1.
311 |
312 | returns
313 | -------
314 | imgs_wrapped : 3d array, np.float32
315 | wrapped images, each frame should have exact same size of down sampled monitor
316 | resolution. the region on the monitor not covered by the image will have value
317 | of np.nan. value range [-1., 1.]
318 | coord_alt_wrapped : 2d array, np.float32
319 | the altitude coordinates of all pixels in the wrapped images in visual degrees.
320 | should have the same shape as each frame in 'imgs_wrapped'.
321 | coord_azi_wrapped : 2d array, np.float32
322 | the azimuth coordinates of all pixels in the wrapped images in visual degrees.
323 | should have the same shape as each frame in 'imgs_wrapped'.
324 | imgs_dewrapped : 3d array, dtype same as imgs
325 | unwrapped images, same dimension as input image stack. the region of original
326 | image that was not got displayed (outside of the monitor) will have value of
327 | np.nan. value range [-1., 1.]
328 | coord_alt_dewrapped : 2d array, np.float32
329 | the altitude coordinates of all pixels in the dewrapped images in visual degrees.
330 | should have the same shape as each frame in 'imgs_dewrapped'.
331 | coord_azi_dewrapped : 2d array, np.float32
332 | the azimuth coordinates of all pixels in the dewrapped images in visual degrees.
333 | should have the same shape as each frame in 'imgs_dewrapped'.
334 | """
335 |
336 | try:
337 | deg_per_pixel_alt = abs(float(deg_per_pixel[0]))
338 | deg_per_pixel_azi = abs(float(deg_per_pixel[1]))
339 | except TypeError:
340 | deg_per_pixel_alt = deg_per_pixel_azi = deg_per_pixel
341 |
342 | if len(imgs.shape) == 2:
343 | imgs_raw = np.array([imgs])
344 | elif len(imgs.shape) == 3:
345 | imgs_raw = imgs
346 | else:
347 | raise ValueError('input "imgs" should be 2d or 3d array.')
348 |
349 | # generate raw image pixel coordinates in visual degrees
350 | alt_start = center_coor[0] + (imgs_raw.shape[1] / 2) * deg_per_pixel_alt
351 | alt_axis = alt_start - np.arange(imgs_raw.shape[1]) * deg_per_pixel_alt
352 | azi_start = center_coor[1] - (imgs_raw.shape[2] / 2) * deg_per_pixel_azi
353 | azi_axis = np.arange(imgs_raw.shape[2]) * deg_per_pixel_azi + azi_start
354 | # img_coord_azi, img_coord_alt = np.meshgrid(azi_axis, alt_axis)
355 |
356 | # initialize output array
357 | imgs_wrapped = np.zeros((imgs_raw.shape[0],
358 | self.deg_coord_x.shape[0],
359 | self.deg_coord_x.shape[1]), dtype=np.float32)
360 | imgs_wrapped[:] = np.nan
361 |
362 | # for cropping imgs_raw
363 | x_min = x_max = y_max = y_min = None
364 |
365 | # for testing
366 | # img_count = np.zeros((imgs_raw.shape[1], imgs_raw.shape[2]), dtype=np.uint32)
367 |
368 | # loop through every display (wrapped) pixel
369 | for ii in range(self.deg_coord_x.shape[0]):
370 | for jj in range(self.deg_coord_x.shape[1]):
371 |
372 | # the wrapped coordinate of current display pixel [alt, azi]
373 | coord_w = [self.deg_coord_y[ii, jj], self.deg_coord_x[ii, jj]]
374 |
375 | # if the wrapped coordinates of current display pixel is covered
376 | # by the raw image
377 | if alt_axis[0] >= coord_w[0] >= alt_axis[-1] and \
378 | azi_axis[0] <= coord_w[1] <= azi_axis[-1]:
379 |
380 | # get raw pixels arround the wrapped coordinates of current display pixel
381 | u = (alt_axis[0] - coord_w[0]) / deg_per_pixel_alt
382 | l = (coord_w[1] - azi_axis[0]) / deg_per_pixel_azi
383 |
384 | # for testing:
385 | # img_count[int(u), int(l)] += 1
386 |
387 | if (u == round(u) and l == round(l)): # right hit on one raw pixel
388 | imgs_wrapped[:, ii, jj] = imgs_raw[:, int(u), int(l)]
389 |
390 | # for cropping
391 | if x_min is None:
392 | x_min = x_max = l
393 | y_min = y_max = u
394 | else:
395 | x_min = min(x_min, l)
396 | x_max = max(x_max, l)
397 | y_min = min(y_min, u)
398 | y_max = max(y_max, u)
399 |
400 | else:
401 | u = int(u)
402 | b = u + 1
403 | l = int(l)
404 | r = l + 1
405 | w_ul = 1. / ia.distance(coord_w, [alt_axis[u], azi_axis[l]])
406 | w_bl = 1. / ia.distance(coord_w, [alt_axis[b], azi_axis[l]])
407 | w_ur = 1. / ia.distance(coord_w, [alt_axis[u], azi_axis[r]])
408 | w_br = 1. / ia.distance(coord_w, [alt_axis[b], azi_axis[r]])
409 |
410 | w_sum = w_ul + w_bl + w_ur + w_br
411 |
412 | imgs_wrapped[:, ii, jj] = (imgs_raw[:, u, l] * w_ul +
413 | imgs_raw[:, b, l] * w_bl +
414 | imgs_raw[:, u, r] * w_ur +
415 | imgs_raw[:, b, r] * w_br) / w_sum
416 |
417 | # for cropping
418 | if x_min is None:
419 | x_min = l
420 | x_max = l + 1
421 | y_min = u
422 | y_max = u + 1
423 | else:
424 | x_min = min(x_min, l)
425 | x_max = max(x_max, l + 1)
426 | y_min = min(y_min, u)
427 | y_max = max(y_max, u + 1)
428 |
429 | # for testing
430 | # plt.imshow(img_count, interpolation='bicubic')
431 | # plt.colorbar()
432 | # plt.show()
433 |
434 | if is_luminance_correction:
435 | for frame_ind in range(imgs_wrapped.shape[0]):
436 | curr_frame = imgs_wrapped[frame_ind]
437 | curr_mean = np.nanmean(curr_frame.flat)
438 | curr_frame = curr_frame - curr_mean
439 | curr_amp = np.max([np.nanmax(curr_frame.flat), abs(np.nanmin(curr_frame.flat))])
440 | curr_frame = curr_frame / curr_amp
441 | imgs_wrapped[frame_ind] = curr_frame
442 |
443 | # crop image
444 | alt_range = np.logical_and(np.arange(imgs_raw.shape[1]) >= y_min,
445 | np.arange(imgs_raw.shape[1]) <= y_max)
446 | azi_range = np.logical_and(np.arange(imgs_raw.shape[2]) >= x_min,
447 | np.arange(imgs_raw.shape[2]) <= x_max)
448 |
449 | # print imgs_raw.shape
450 | # print imgs_raw.shape
451 | # print alt_range.shape
452 | # print azi_range.shape
453 | # print np.sum(alt_range)
454 | # print np.sum(azi_range)
455 |
456 | imgs_dewrapped = imgs_raw[:, alt_range, :]
457 | imgs_dewrapped = imgs_dewrapped[:, :, azi_range]
458 |
459 | # get degree coordinats of dewrapped images
460 | deg_coord_alt_ax_dewrapped = alt_axis[alt_range]
461 | deg_coord_azi_ax_dewrapped = azi_axis[azi_range]
462 | deg_coord_azi_dewrapped, deg_coord_alt_dewrapped = np.meshgrid(deg_coord_azi_ax_dewrapped,
463 | deg_coord_alt_ax_dewrapped)
464 | deg_coord_alt_dewrapped = deg_coord_alt_dewrapped.astype(np.float32)
465 | deg_coord_azi_dewrapped = deg_coord_azi_dewrapped.astype(np.float32)
466 |
467 | return imgs_wrapped, self.deg_coord_y, self.deg_coord_x, imgs_dewrapped, deg_coord_alt_dewrapped, \
468 | deg_coord_azi_dewrapped
469 |
470 |
471 | class Indicator(object):
472 | """
473 | flashing indicator for photodiode
474 |
475 | Parameters
476 | ----------
477 | monitor : monitor object
478 | The monitor used within the experimental setup
479 | width_cm : float, optional
480 | width of the size of the indicator in cm, defaults to `3.`
481 | height_cm : float, optional
482 | height of the size of the indicator in cm, defaults to `3.`
483 | position : str from {'northeast','northwest','southwest','southeast'}
484 | the placement of the indicator, defaults to 'northeast'
485 | is_sync : bool, optional
486 | determines whether the indicator is synchronized with the stimulus,
487 | defaults to True.
488 | freq : float, optional
489 | frequency of photodiode, defaults to `2.`
490 | """
491 |
492 | def __init__(self,
493 | monitor,
494 | width_cm=3.,
495 | height_cm=3.,
496 | position='northeast',
497 | is_sync=True,
498 | freq=2.):
499 | """
500 | Initialize indicator object
501 | """
502 |
503 | self.monitor = monitor
504 | self.width_cm = width_cm
505 | self.height_cm = height_cm
506 | self.width_pixel, self.height_pixel = self.get_size_pixel()
507 | self.position = position
508 | self.center_width_pixel, self.center_height_pixel = self.get_center()
509 | self.is_sync = is_sync
510 |
511 | if is_sync == False:
512 | self.freq = freq # if not synchronized with stimulation, self update frquency of the indicator
513 | self.frame_num = self.get_frames()
514 | else:
515 | self.freq = None
516 | self.frame_num = None
517 |
518 | def get_size_pixel(self):
519 |
520 | screen_width = (self.monitor.resolution[1] /
521 | self.monitor.downsample_rate)
522 | screen_height = (self.monitor.resolution[0] /
523 | self.monitor.downsample_rate)
524 |
525 | indicator_width = int((self.width_cm / self.monitor.mon_width_cm) *
526 | screen_width)
527 | indicator_height = int((self.height_cm / self.monitor.mon_height_cm) *
528 | screen_height)
529 |
530 | return indicator_width, indicator_height
531 |
532 | def get_center(self):
533 |
534 | screen_width = (self.monitor.resolution[1] /
535 | self.monitor.downsample_rate)
536 | screen_height = (self.monitor.resolution[0] /
537 | self.monitor.downsample_rate)
538 |
539 | if self.position == 'northeast':
540 | center_width = screen_width - self.width_pixel / 2
541 | center_height = self.height_pixel / 2
542 |
543 | elif self.position == 'northwest':
544 | center_width = self.width_pixel / 2
545 | center_height = self.height_pixel / 2
546 |
547 | elif self.position == 'southeast':
548 | center_width = screen_width - self.width_pixel / 2
549 | center_height = screen_height - self.height_pixel / 2
550 |
551 | elif self.position == 'southwest':
552 | center_width = self.width_pixel / 2
553 | center_height = screen_height - self.height_pixel / 2
554 | else:
555 | raise LookupError('`position` attribute not in '
556 | '{"northeast","northwest","southeast","southwest"}.')
557 |
558 | return int(center_width), int(center_height)
559 |
560 | def get_frames(self):
561 | """
562 | if not synchronized with stimulation, get frame numbers of each update
563 | of indicator
564 | """
565 |
566 | refresh_rate = self.monitor.refresh_rate
567 |
568 | if refresh_rate % self.freq != 0:
569 | raise ArithmeticError("`freq` not divisble by monitor ref rate.")
570 |
571 | return refresh_rate / self.freq
572 |
--------------------------------------------------------------------------------
/WarpedVisualStim/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | __version__ = '3.0.1'
3 |
4 | def test():
5 | import pytest
6 | curr_dir = os.path.dirname(os.path.realpath(__file__))
7 | test_dir = os.path.join(curr_dir, 'test')
8 | test_dir = test_dir.replace('\\', '/')
9 | pytest.main(test_dir)
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_combined_stimuli_comprehensive.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example script to test StimulusRoutines.CombinedStimuli class
4 | """
5 |
6 | import os
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | import WarpedVisualStim as rm
10 | import WarpedVisualStim.StimulusRoutines as stim
11 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
12 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
13 |
14 | # ============================ monitor setup ======================================
15 | mon_resolution = (1200, 1920) # enter your monitors resolution
16 | mon_width_cm = 52. # enter your monitors width in cm
17 | mon_height_cm = 32. # enter your monitors height in cm
18 | mon_refresh_rate = 60 # enter your monitors height in Hz
19 | mon_C2T_cm = mon_height_cm / 2.
20 | mon_C2A_cm = mon_width_cm / 2.
21 | mon_center_coordinates = (0., 60.)
22 | mon_dis = 15.
23 | mon_downsample_rate = 5
24 | # =================================================================================
25 |
26 | # ============================ indicator setup ====================================
27 | ind_width_cm = 3.
28 | ind_height_cm = 3.
29 | ind_position = 'northeast'
30 | ind_is_sync = True
31 | ind_freq = 1.
32 | # =================================================================================
33 |
34 | # ============================ DisplaySequence ====================================
35 | ds_log_dir = r'C:\data'
36 | # ds_log_dir = '/home/zhuangjun1981'
37 | ds_backupdir = None
38 | ds_identifier = 'TEST'
39 | ds_display_iter = 1
40 | ds_mouse_id = 'MOUSE'
41 | ds_user_id = 'USER'
42 | ds_psychopy_mon = 'testMonitor'
43 | ds_is_by_index = True
44 | ds_is_interpolate = False
45 | ds_is_triggered = False
46 | ds_is_save_sequence = False
47 | ds_trigger_event = "negative_edge"
48 | ds_trigger_NI_dev = 'Dev1'
49 | ds_trigger_NI_port = 1
50 | ds_trigger_NI_line = 0
51 | ds_is_sync_pulse = False
52 | ds_sync_pulse_NI_dev = 'Dev1'
53 | ds_sync_pulse_NI_port = 1
54 | ds_sync_pulse_NI_line = 1
55 | ds_display_screen = 0
56 | ds_initial_background_color = 0.
57 | ds_color_weights = (1., 1., 1.)
58 | # =================================================================================
59 |
60 | # ============================ generic stimulus parameters ========================
61 | pregap_dur = 2.
62 | postgap_dur = 3.
63 | background = 0.
64 | coordinate = 'degree'
65 | # =================================================================================
66 |
67 | # ============================ UniformContrast ====================================
68 | uc_duration = 10.
69 | uc_color = -1
70 | # =================================================================================
71 |
72 | # ============================ FlashingCircle =====================================
73 | fc_center = (20., 30.)
74 | fc_radius = 30.
75 | fc_color = -1.
76 | fc_flash_frame_num = 30
77 | fc_midgap_dur = 5.
78 | fc_iteration = 5.
79 | fc_is_smooth_edge = True
80 | fc_smooth_width_ratio = 0.2
81 | fc_smooth_func = stim.blur_cos
82 | # =================================================================================
83 |
84 | # ============================ SinusoidalLuminance ================================
85 | sl_max_level = 1.
86 | sl_min_level = -1.
87 | sl_frequency = 1.
88 | sl_cycle_num = 10
89 | sl_start_phase = 0.
90 | sl_midgap_dur = 0.
91 | # =================================================================================
92 |
93 | # ============================ SparseNoise ========================================
94 | sn_subregion = (-40., 60., 30., 90.)
95 | sn_grid_space = (20., 20.)
96 | sn_probe_size = (20., 10.)
97 | sn_probe_orientation = 30.
98 | sn_probe_frame_num = 15
99 | sn_sign = 'ON-OFF'
100 | sn_iteration = 2
101 | sn_is_include_edge = True
102 | # =================================================================================
103 |
104 | # ============================ LocallySparseNoise =================================
105 | lsn_subregion = (-10., 20., 0., 60.)
106 | lsn_min_distance = 40.
107 | lsn_grid_space = (10., 10.)
108 | lsn_probe_size = (10., 10.)
109 | lsn_probe_orientation = 0.
110 | lsn_probe_frame_num = 4
111 | lsn_sign = 'OFF'
112 | lsn_iteration = 2
113 | lsn_repeat = 3
114 | lsn_is_include_edge = True
115 | # =================================================================================
116 |
117 | # ============================ DriftingGratingCircle ==============================
118 | dgc_center = (10., 90.)
119 | dgc_sf_list = (0.01, 0.16)
120 | dgc_tf_list = (2., 8.,)
121 | dgc_dire_list = np.arange(0., 360., 180.)
122 | dgc_con_list = (0.8,)
123 | dgc_radius_list = (30.,)
124 | dgc_block_dur = 1.
125 | dgc_midgap_dur = 1.
126 | dgc_iteration = 2
127 | dgc_is_smooth_edge = True
128 | dgc_smooth_width_ratio = 0.2
129 | dgc_smooth_func = stim.blur_cos
130 | dgc_is_blank_block = True
131 | dgc_is_random_start_phase = False
132 | # =================================================================================
133 |
134 | # ============================ StaticGratingCirlce ================================
135 | sgc_center = (0., 40.)
136 | sgc_sf_list = (0.08, 0.16)
137 | sgc_ori_list = (0., 90.)
138 | sgc_con_list = (0.5,)
139 | sgc_radius_list = (25.,)
140 | sgc_phase_list = (0., 90., 180., 270.)
141 | sgc_display_dur = 0.25
142 | sgc_midgap_dur = 0.
143 | sgc_iteration = 10
144 | sgc_is_smooth_edge = True
145 | sgc_smooth_width_ratio = 0.2
146 | sgc_smooth_func = stim.blur_cos
147 | sgc_is_blank_block = True
148 | # =================================================================================
149 |
150 | # ============================ StaticImages =======================================
151 | si_img_center = (0., 60.)
152 | si_deg_per_pixel = (0.5, 0.5)
153 | si_display_dur = 0.25
154 | si_midgap_dur = 0.
155 | si_iteration = 10
156 | si_is_blank_block = True
157 | si_images_folder = os.path.join(os.path.dirname(rm.__file__), 'test', 'test_data')
158 | # =================================================================================
159 |
160 | # ============================ StimulusSeparator ==================================
161 | ss_indicator_on_frame_num = 4
162 | ss_indicator_off_frame_num = 4
163 | ss_cycle_num = 10
164 | # =================================================================================
165 |
166 | # ============================ CombinedStimuli ====================================
167 | cs_stim_ind_sequence = [0, 1, 2, 3, 4, 5, 6, 7, 8]
168 | # =================================================================================
169 |
170 |
171 |
172 | # ================ Initialize the monitor object ==================================
173 | mon = Monitor(resolution=mon_resolution, dis=mon_dis, mon_width_cm=mon_width_cm,
174 | mon_height_cm=mon_height_cm, C2T_cm=mon_C2T_cm, C2A_cm=mon_C2A_cm,
175 | center_coordinates=mon_center_coordinates,
176 | downsample_rate=mon_downsample_rate)
177 | # mon.plot_map()
178 | # plt.show()
179 | # =================================================================================
180 |
181 | # ================ Initialize the indicator object ================================
182 | ind = Indicator(mon, width_cm=ind_width_cm, height_cm=ind_height_cm,
183 | position=ind_position, is_sync=ind_is_sync, freq=ind_freq)
184 | # =================================================================================
185 |
186 | # ================ Initialize the DisplaySequence object ==========================
187 | ds = DisplaySequence(log_dir=ds_log_dir, backupdir=ds_backupdir,
188 | identifier=ds_identifier, display_iter=ds_display_iter,
189 | mouse_id=ds_mouse_id, user_id=ds_user_id,
190 | psychopy_mon=ds_psychopy_mon, is_by_index=ds_is_by_index,
191 | is_interpolate=ds_is_interpolate, is_triggered=ds_is_triggered,
192 | trigger_event=ds_trigger_event, trigger_NI_dev=ds_trigger_NI_dev,
193 | trigger_NI_port=ds_trigger_NI_port, trigger_NI_line=ds_trigger_NI_line,
194 | is_sync_pulse=ds_is_sync_pulse, sync_pulse_NI_dev=ds_sync_pulse_NI_dev,
195 | sync_pulse_NI_port=ds_sync_pulse_NI_port,
196 | sync_pulse_NI_line=ds_sync_pulse_NI_line,
197 | display_screen=ds_display_screen, is_save_sequence=ds_is_save_sequence,
198 | initial_background_color=ds_initial_background_color,
199 | color_weights=ds_color_weights)
200 | # =================================================================================
201 |
202 | # ========================== Uniform Contrast =====================================
203 | uc = stim.UniformContrast(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
204 | postgap_dur=postgap_dur, coordinate=coordinate,
205 | background=background, duration=uc_duration,
206 | color=uc_color)
207 | # =================================================================================
208 |
209 | # ======================= Flashing Circle =========================================
210 | fc = stim.FlashingCircle(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
211 | postgap_dur=postgap_dur, coordinate=coordinate,
212 | background=background, center=fc_center, radius=fc_radius,
213 | color=fc_color, flash_frame_num=fc_flash_frame_num,
214 | midgap_dur=fc_midgap_dur, iteration=fc_iteration,
215 | is_smooth_edge=fc_is_smooth_edge,
216 | smooth_width_ratio=fc_smooth_width_ratio,
217 | smooth_func=fc_smooth_func)
218 | # =================================================================================
219 |
220 | # ============================ SinusoidalLuminance ================================
221 | sl = stim.SinusoidalLuminance(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
222 | postgap_dur=postgap_dur, coordinate=coordinate,
223 | background=background, max_level=sl_max_level,
224 | min_level=sl_min_level, frequency=sl_frequency,
225 | cycle_num=sl_cycle_num, start_phase=sl_start_phase,
226 | midgap_dur=sl_midgap_dur)
227 | # =================================================================================
228 |
229 | # ======================== Sparse Noise ===========================================
230 | sn = stim.SparseNoise(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
231 | postgap_dur=postgap_dur, coordinate=coordinate,
232 | background=background, subregion=sn_subregion,
233 | grid_space=sn_grid_space, sign=sn_sign,
234 | probe_size=sn_probe_size, probe_orientation=sn_probe_orientation,
235 | probe_frame_num=sn_probe_frame_num, iteration=sn_iteration,
236 | is_include_edge=sn_is_include_edge)
237 | # =================================================================================
238 |
239 | # ======================= Locally Sparse Noise ====================================
240 | lsn = stim.LocallySparseNoise(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
241 | postgap_dur=postgap_dur, coordinate=coordinate,
242 | background=background, subregion=lsn_subregion,
243 | grid_space=lsn_grid_space, sign=lsn_sign,
244 | probe_size=lsn_probe_size, probe_orientation=lsn_probe_orientation,
245 | probe_frame_num=lsn_probe_frame_num, iteration=lsn_iteration,
246 | is_include_edge=lsn_is_include_edge, min_distance=lsn_min_distance,
247 | repeat=lsn_repeat)
248 | # =================================================================================
249 |
250 | # ======================= Drifting Grating Circle =================================
251 | dgc = stim.DriftingGratingCircle(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
252 | postgap_dur=postgap_dur, coordinate=coordinate,
253 | background=background, center=dgc_center,
254 | sf_list=dgc_sf_list, tf_list=dgc_tf_list,
255 | dire_list=dgc_dire_list, con_list=dgc_con_list,
256 | radius_list=dgc_radius_list, block_dur=dgc_block_dur,
257 | midgap_dur=dgc_midgap_dur, iteration=dgc_iteration,
258 | is_smooth_edge=dgc_is_smooth_edge,
259 | smooth_width_ratio=dgc_smooth_width_ratio,
260 | smooth_func=dgc_smooth_func, is_blank_block=dgc_is_blank_block,
261 | is_random_start_phase=dgc_is_random_start_phase)
262 | # =================================================================================
263 |
264 | # ======================= Static Grating Cricle ===================================
265 | sgc = stim.StaticGratingCircle(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
266 | postgap_dur=postgap_dur, coordinate=coordinate,
267 | background=background, center=sgc_center,
268 | sf_list=sgc_sf_list, ori_list=sgc_ori_list,
269 | con_list=sgc_con_list, radius_list=sgc_radius_list,
270 | phase_list=sgc_phase_list, display_dur=sgc_display_dur,
271 | midgap_dur=sgc_midgap_dur, iteration=sgc_iteration,
272 | is_smooth_edge=sgc_is_smooth_edge,
273 | smooth_width_ratio=sgc_smooth_width_ratio,
274 | smooth_func=sgc_smooth_func, is_blank_block=sgc_is_blank_block)
275 | # =================================================================================
276 |
277 | # =============================== Static Images ===================================
278 | si = stim.StaticImages(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
279 | postgap_dur=postgap_dur, coordinate=coordinate,
280 | background=background, img_center=si_img_center,
281 | deg_per_pixel=si_deg_per_pixel, display_dur=si_display_dur,
282 | midgap_dur=si_midgap_dur, iteration=si_iteration,
283 | is_blank_block=si_is_blank_block)
284 | # =================================================================================
285 |
286 | # ============================ wrape images =======================================
287 | print ('wrapping images ...')
288 | static_images_path = os.path.join(si_images_folder, 'wrapped_images_for_display.hdf5')
289 | if os.path.isfile(static_images_path):
290 | os.remove(static_images_path)
291 | si.wrap_images(si_images_folder)
292 | # =================================================================================
293 |
294 | # ======================= Stimulus Separator ======================================
295 | ss = stim.StimulusSeparator(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
296 | postgap_dur=postgap_dur, coordinate=coordinate,
297 | background=background,
298 | indicator_on_frame_num=ss_indicator_on_frame_num,
299 | indicator_off_frame_num=ss_indicator_off_frame_num,
300 | cycle_num=ss_cycle_num)
301 | # =================================================================================
302 |
303 | # ======================= Combined Stimuli ========================================
304 | cs = stim.CombinedStimuli(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
305 | postgap_dur=postgap_dur, coordinate=coordinate,
306 | background=background)
307 | # =================================================================================
308 |
309 | # ======================= Set Stimuli Sequence ====================================
310 | all_stim = [uc, fc, sl, sn, lsn, dgc, sgc, si, ss]
311 | stim_seq = [all_stim[stim_ind] for stim_ind in cs_stim_ind_sequence]
312 | cs.set_stimuli(stimuli=stim_seq, static_images_path=static_images_path)
313 | # =================================================================================
314 |
315 | # =============================== display =========================================
316 | ds.set_stim(cs)
317 | log_path, log_dict = ds.trigger_display()
318 | # =============================== display =========================================
319 |
320 |
321 | # =============================== convert log to .nwb =============================
322 | import os
323 | import WarpedVisualStim.DisplayLogAnalysis as dla
324 | import NeuroAnalysisTools.NwbTools as nt
325 | log_folder, log_fn = os.path.split(log_path)
326 | log_nwb_path = os.path.splitext(log_path)[0] + '.nwb'
327 | save_f = nt.RecordedFile(filename=log_nwb_path, identifier=os.path.splitext(log_fn)[0], description='')
328 | stim_log = dla.DisplayLogAnalyzer(log_path)
329 | save_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log)
330 | save_f.close()
331 | # =============================== convert log to .nwb =============================
332 |
333 | # =============================== show plot========================================
334 | plt.show()
335 | # =================================================================================
336 |
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_combined_stimuli_minimum.py:
--------------------------------------------------------------------------------
1 | import WarpedVisualStim.StimulusRoutines as stim
2 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
3 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
4 |
5 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
6 | ind = Indicator(mon)
7 | uc = stim.UniformContrast(mon, ind, duration=10., color=-1.)
8 | ss = stim.StimulusSeparator(mon, ind)
9 | cs = stim.CombinedStimuli(mon, ind)
10 | cs.set_stimuli([ss, uc, ss])
11 | ds = DisplaySequence(log_dir='C:/data')
12 | # ds = DisplaySequence(log_dir='/home/zhuangjun1981')
13 | ds.set_stim(cs)
14 | log_path, log_dict = ds.trigger_display()
15 |
16 | # convert log to .nwb
17 | import os
18 | import WarpedVisualStim.DisplayLogAnalysis as dla
19 | import NeuroAnalysisTools.NwbTools as nt
20 | log_folder, log_fn = os.path.split(log_path)
21 | log_nwb_path = os.path.splitext(log_path)[0] + '.nwb'
22 | save_f = nt.RecordedFile(filename=log_nwb_path, identifier=os.path.splitext(log_fn)[0], description='')
23 | stim_log = dla.DisplayLogAnalyzer(log_path)
24 | save_f.add_visual_display_log_retinotopic_mapping(stim_log=stim_log)
25 | save_f.close()
26 |
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_drifting_grating_circle.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | the minimum script to run 10 seconds of black screen
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import WarpedVisualStim.StimulusRoutines as stim
8 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
9 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
10 |
11 | # Initialize Monitor object
12 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
13 | ind = Indicator(mon)
14 | ds = DisplaySequence(log_dir='C:/data', is_by_index=True)
15 | dgc = stim.DriftingGratingCircle(monitor=mon, indicator=ind, background=0.,
16 | coordinate='degree', center=(10., 90.), sf_list=(0.02,),
17 | tf_list=(4.0, 2.0), dire_list=(45.,), con_list=(0.8,), radius_list=(20.,),
18 | block_dur=2., midgap_dur=1., iteration=3, pregap_dur=2.,
19 | postgap_dur=3., is_blank_block=True, is_random_start_phase=False)
20 | ds.set_stim(dgc)
21 | ds.trigger_display()
22 | plt.show()
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_drifting_grating_circle_multiple.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | the minimum script to run 10 seconds of black screen
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import WarpedVisualStim.StimulusRoutines as stim
8 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
9 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
10 |
11 | # Initialize Monitor object
12 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
13 | ind = Indicator(mon)
14 | ds = DisplaySequence(log_dir='C:/data', is_by_index=True, display_screen=1)
15 | dgc = stim.DriftingGratingMultipleCircle(monitor=mon, indicator=ind, background=0.,
16 | coordinate='degree', center_list=[(10., 90.),(0., 80.)], sf_list=(0.02,),
17 | tf_list=(4.0, 2.0), dire_list=(45.,), con_list=(0.8,), radius_list=(20.,),
18 | block_dur=2., midgap_dur=1., iteration=3, pregap_dur=2.,
19 | postgap_dur=3., is_blank_block=True, is_random_start_phase=False)
20 | ds.set_stim(dgc)
21 | ds.trigger_display()
22 | plt.show()
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_flashing_circle.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import WarpedVisualStim.StimulusRoutines as stim
3 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
4 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
5 |
6 | # Initialize Monitor object
7 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
8 | ind = Indicator(mon)
9 | ds = DisplaySequence(log_dir='C:/data', is_by_index=True)
10 | fc = stim.FlashingCircle(monitor=mon, indicator=ind, coordinate='degree', center=(0., 60.),
11 | radius=10., is_smooth_edge=False, smooth_width_ratio=0.2,
12 | smooth_func=stim.blur_cos, color=1., flash_frame_num=60,
13 | pregap_dur=2., postgap_dur=3., background=-1., midgap_dur=1.,
14 | iteration=1)
15 | ds.set_stim(fc)
16 | ds.trigger_display()
17 | plt.show()
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_locally_sparse_noise.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example script to test that everything is working. Running this script is a
4 | good first step for trying to debug your experimental setup and is also a
5 | great tool to familiarize yourself with the parameters that are used to
6 | generate each specific stimulus.
7 |
8 | !!!IMPORTANT!!!
9 | Note that once you are displaying stimulus, if you want to stop the code from
10 | running all you need to do is press either one of the 'Esc' or 'q' buttons.
11 | """
12 |
13 | import numpy as np
14 | import matplotlib.pyplot as plt
15 | import WarpedVisualStim.StimulusRoutines as stim
16 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
17 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
18 |
19 | """
20 | To get up and running quickly before performing any experiments it is
21 | sufficient to setup two monitors -- one for display and one for your python
22 | environment. If you don't have two monitors at the moment it is doable with
23 | only one.
24 |
25 | Edit the following block of code with your own monitors respective parameters.
26 | Since this script is for general debugging and playing around with the code,
27 | we will arbitrarily populate variables that describe the geometry of where
28 | the mouse will be located during an experiment. All we are interested in
29 | here is just making sure that we can display stimulus on a monitor and learning
30 | how to work with the different stimulus routines.
31 | """
32 |
33 |
34 | # ======================== monitor parameters ==================================
35 | mon_resolution = (1200,1920) #enter your monitors resolution (height, width)
36 | mon_width_cm = 52 #enter your monitors width in cm
37 | mon_height_cm = 32 #enter your monitors height in cm
38 | mon_refresh_rate = 60 #enter your monitors height in Hz
39 |
40 | # The following variables correspond to the geometry of your setup don't worry about them for now.
41 | mon_C2T_cm = mon_height_cm / 2. # center (projection point from mouse eye to the monitor) to monitor top edge in cm
42 | mon_C2A_cm = mon_width_cm / 2. # center (projection point from mouse eye to the monitor) to monitor anterior edge in cm
43 | mon_center_coordinates = (0., 60.) # the visual coordinates of center (altitude, azimuth)
44 | mon_dis_cm = 15. # cm from mouse eye to the monitor
45 | mon_downsample_rate = 10 # downsample rate of the displayed images relative to the monitor resolution.
46 | # the both numbers in mon_resolution should be divisble by this number
47 | # ======================== monitor parameters ==================================
48 |
49 | # ======================== indicator parameters ================================
50 | ind_width_cm = 3.
51 | ind_height_cm = 3.
52 | ind_position = 'northeast'
53 | ind_is_sync = 'True'
54 | ind_freq = 1.
55 | # ======================== indicator parameters ================================
56 |
57 | # ============================ generic stimulus parameters ======================
58 | pregap_dur = 2.
59 | postgap_dur = 3.
60 | background = 0.
61 | coordinate = 'degree'
62 | # ===============================================================================
63 |
64 | # ============================ LocallySparseNoise ===============================
65 | lsn_subregion = None
66 | lsn_min_distance = 40.
67 | lsn_grid_space = (4., 4.)
68 | lsn_probe_size = (4., 4.)
69 | lsn_probe_orientation = 0.
70 | lsn_probe_frame_num = 15
71 | lsn_sign = 'ON-OFF'
72 | lsn_iteration = 10
73 | lsn_is_include_edge = True
74 | # ===============================================================================
75 |
76 | # ============================ DisplaySequence ====================================
77 | ds_log_dir = r'C:\data'
78 | ds_backupdir = None
79 | ds_identifier = 'TEST'
80 | ds_display_iter = 1
81 | ds_mouse_id = 'MOUSE'
82 | ds_user_id = 'USER'
83 | ds_psychopy_mon = 'testMonitor'
84 | ds_is_by_index = True
85 | ds_is_interpolate = False
86 | ds_is_triggered = False
87 | ds_trigger_event = "negative_edge"
88 | ds_trigger_NI_dev = 'Dev1'
89 | ds_trigger_NI_port = 1
90 | ds_trigger_NI_line = 0
91 | ds_is_sync_pulse = False
92 | ds_sync_pulse_NI_dev = 'Dev1'
93 | ds_sync_pulse_NI_port = 1
94 | ds_sync_pulse_NI_line = 1
95 | ds_display_screen = 0
96 | ds_initial_background_color = 0.
97 | # =================================================================================
98 |
99 |
100 | # Initialize Monitor object
101 | mon = Monitor(resolution=mon_resolution, dis=mon_dis_cm, mon_width_cm=mon_width_cm, mon_height_cm=mon_height_cm,
102 | C2T_cm=mon_C2T_cm, C2A_cm=mon_C2A_cm, center_coordinates=mon_center_coordinates,
103 | downsample_rate=mon_downsample_rate)
104 |
105 | # plot warpped monitor coordinates
106 | mon.plot_map()
107 | plt.show()
108 |
109 | # initialize Indicator object
110 | ind = Indicator(mon, width_cm=ind_width_cm, height_cm=ind_height_cm, position=ind_position, is_sync=ind_is_sync,
111 | freq=ind_freq)
112 |
113 |
114 | # initialize LocallySparseNoise object
115 | lsn = stim.LocallySparseNoise(monitor=mon, indicator=ind, pregap_dur=pregap_dur,
116 | postgap_dur=postgap_dur, coordinate=coordinate,
117 | background=background, subregion=lsn_subregion,
118 | grid_space=lsn_grid_space, sign=lsn_sign,
119 | probe_size=lsn_probe_size, probe_orientation=lsn_probe_orientation,
120 | probe_frame_num=lsn_probe_frame_num, iteration=lsn_iteration,
121 | is_include_edge=lsn_is_include_edge, min_distance=lsn_min_distance)
122 |
123 | # initialize DisplaySequence object
124 | ds = DisplaySequence(log_dir=ds_log_dir, backupdir=ds_backupdir,
125 | identifier=ds_identifier, display_iter=ds_display_iter,
126 | mouse_id=ds_mouse_id, user_id=ds_user_id,
127 | psychopy_mon=ds_psychopy_mon, is_by_index=ds_is_by_index,
128 | is_interpolate=ds_is_interpolate, is_triggered=ds_is_triggered,
129 | trigger_event=ds_trigger_event, trigger_NI_dev=ds_trigger_NI_dev,
130 | trigger_NI_port=ds_trigger_NI_port, trigger_NI_line=ds_trigger_NI_line,
131 | is_sync_pulse=ds_is_sync_pulse, sync_pulse_NI_dev=ds_sync_pulse_NI_dev,
132 | sync_pulse_NI_port=ds_sync_pulse_NI_port,
133 | sync_pulse_NI_line=ds_sync_pulse_NI_line,
134 | display_screen=ds_display_screen,
135 | initial_background_color=ds_initial_background_color)
136 |
137 | # display
138 | # =============================== display =========================================
139 | ds.set_stim(lsn)
140 | ds.trigger_display()
141 | plt.show()
142 | # =================================================================================
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_log_analysis.py:
--------------------------------------------------------------------------------
1 | from WarpedVisualStim.DisplayLogAnalysis import DisplayLogAnalyzer
2 |
3 | import argparse
4 | from WarpedVisualStim.tools import FileTools as ft
5 | parser = argparse.ArgumentParser(description='Posthoc analysis on display_log files')
6 | parser.add_argument('-i', '--input', help=' Absolute path to display_log (.pkl) file', type=ft.validate_file, required=True)
7 | args = parser.parse_args()
8 |
9 | dla = DisplayLogAnalyzer(args.input)
10 | stim_dict = dla.get_stim_dict()
11 | pd_onsets_seq = dla.analyze_photodiode_onsets_sequential(stim_dict, pd_thr=-0.5)
12 | pd_onsets_combined = dla.analyze_photodiode_onsets_combined(pd_onsets_seq)
13 |
14 | print(pd_onsets_combined)
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_minimum.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | the minimum script to run 10 seconds of black screen
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import WarpedVisualStim.StimulusRoutines as stim
8 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
9 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
10 |
11 | # Initialize Monitor object
12 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
13 |
14 | # Initialize Inicator object
15 | ind = Indicator(mon)
16 |
17 | # Initialize DisplaySequence object
18 | ds = DisplaySequence(log_dir='C:/data')
19 |
20 | # Initialize UniformContrast object
21 | uc = stim.UniformContrast(monitor=mon, indicator=ind, duration=10., color=-1.)
22 |
23 | # set uniform contrast stimulus into the DisplaySequence object
24 | ds.set_stim(uc)
25 |
26 | # start display
27 | ds.trigger_display()
28 |
29 | # plot distribution of frame duration
30 | plt.show()
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_retinotopic_mapping.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example script to test StimulusRoutines.KSstimAllDir class
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import WarpedVisualStim.StimulusRoutines as stim
8 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
9 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
10 |
11 | # ============================ monitor setup ======================================
12 | mon_resolution = (1200, 1920) # enter your monitors resolution
13 | mon_width_cm = 52. # enter your monitors width in cm
14 | mon_height_cm = 32. # enter your monitors height in cm
15 | mon_refresh_rate = 60 # enter your monitors height in Hz
16 | mon_C2T_cm = mon_height_cm / 2.
17 | mon_C2A_cm = mon_width_cm / 2.
18 | mon_center_coordinates = (0., 60.)
19 | mon_dis = 15.
20 | mon_downsample_rate = 5
21 | # =================================================================================
22 |
23 | # ============================ indicator setup ====================================
24 | ind_width_cm = 3.
25 | ind_height_cm = 3.
26 | ind_position = 'northeast'
27 | ind_is_sync = True
28 | ind_freq = 1.
29 | # =================================================================================
30 |
31 | # ============================ DisplaySequence ====================================
32 | ds_log_dir = r'C:\data'
33 | ds_backupdir = None
34 | ds_identifier = 'TEST'
35 | ds_display_iter = 1.
36 | ds_mouse_id = 'MOUSE'
37 | ds_user_id = 'USER'
38 | ds_psychopy_mon = 'testMonitor'
39 | ds_is_by_index = False
40 | ds_is_interpolate = False
41 | ds_is_triggered = False
42 | ds_is_save_sequence = False
43 | ds_trigger_event = "negative_edge"
44 | ds_trigger_NI_dev = 'Dev1'
45 | ds_trigger_NI_port = 1
46 | ds_trigger_NI_line = 0
47 | ds_is_sync_pulse = False
48 | ds_sync_pulse_NI_dev = 'Dev1'
49 | ds_sync_pulse_NI_port = 1
50 | ds_sync_pulse_NI_line = 1
51 | ds_display_screen = 0
52 | ds_initial_background_color = 0.
53 | ds_color_weights = (1., 1., 1.)
54 | # =================================================================================
55 |
56 | # ============================ generic stimulus parameters ========================
57 | pregap_dur = 2.
58 | postgap_dur = 3.
59 | background = 0.
60 | coordinate = 'degree'
61 | # =================================================================================
62 |
63 | # ============================ KSstimAllDir ====================================
64 | ks_square_size = 25.
65 | ks_square_center = (0, 0)
66 | ks_flicker_frame = 10
67 | ks_sweep_width = 20.
68 | ks_step_width = 0.15
69 | ks_sweep_frame = 1
70 | ks_iteration = 1
71 | # =================================================================================
72 |
73 | # ================ Initialize the monitor object ==================================
74 | mon = Monitor(resolution=mon_resolution, dis=mon_dis, mon_width_cm=mon_width_cm,
75 | mon_height_cm=mon_height_cm, C2T_cm=mon_C2T_cm, C2A_cm=mon_C2A_cm,
76 | center_coordinates=mon_center_coordinates,
77 | downsample_rate=mon_downsample_rate)
78 | # mon.plot_map()
79 | # plt.show()
80 | # =================================================================================
81 |
82 | # ================ Initialize the indicator object ================================
83 | ind = Indicator(mon, width_cm=ind_width_cm, height_cm=ind_height_cm,
84 | position=ind_position, is_sync=ind_is_sync, freq=ind_freq)
85 | # =================================================================================
86 |
87 | # ========================== KSstimAllDir =====================================
88 | ks = stim.KSstimAllDir(monitor=mon, indicator=ind, pregap_dur=pregap_dur, postgap_dur=postgap_dur,
89 | background=background, coordinate=coordinate, square_size=ks_square_size,
90 | square_center=ks_square_center, flicker_frame=ks_flicker_frame,
91 | sweep_width=ks_sweep_width, step_width=ks_step_width, sweep_frame=ks_sweep_frame,
92 | iteration=ks_iteration)
93 | # =================================================================================
94 |
95 | # ================ Initialize the DisplaySequence object ==========================
96 | ds = DisplaySequence(log_dir=ds_log_dir, backupdir=ds_backupdir,
97 | identifier=ds_identifier, display_iter=ds_display_iter,
98 | mouse_id=ds_mouse_id, user_id=ds_user_id,
99 | psychopy_mon=ds_psychopy_mon, is_by_index=ds_is_by_index,
100 | is_interpolate=ds_is_interpolate, is_triggered=ds_is_triggered,
101 | trigger_event=ds_trigger_event, trigger_NI_dev=ds_trigger_NI_dev,
102 | trigger_NI_port=ds_trigger_NI_port, trigger_NI_line=ds_trigger_NI_line,
103 | is_sync_pulse=ds_is_sync_pulse, sync_pulse_NI_dev=ds_sync_pulse_NI_dev,
104 | sync_pulse_NI_port=ds_sync_pulse_NI_port,
105 | sync_pulse_NI_line=ds_sync_pulse_NI_line,
106 | display_screen=ds_display_screen, is_save_sequence=ds_is_save_sequence,
107 | initial_background_color=ds_initial_background_color,
108 | color_weights=ds_color_weights)
109 | # =================================================================================
110 |
111 | # =============================== display =========================================
112 | ds.set_stim(ks)
113 | ds.trigger_display()
114 | plt.show()
115 | # =================================================================================
116 |
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_retinotopic_mapping_random_direction.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Example script to test StimulusRoutines.KSstimSeqDir class
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import numpy as np
8 | import WarpedVisualStim.StimulusRoutines as stim
9 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
10 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
11 |
12 |
13 | #%% ============================ monitor setup ======================================
14 | mon_resolution = (1080, 1920) # enter your monitors resolution
15 | mon_width_cm = 59.5 # enter your monitors width in cm
16 | mon_height_cm = 33.6 # enter your monitors height in cm
17 | mon_refresh_rate = 60 # enter your monitors height in Hz
18 | mon_C2T_cm = mon_height_cm / 2.
19 | mon_C2A_cm = mon_width_cm / 2.
20 | mon_center_coordinates = (10., 45.)
21 | mon_dis = 17.
22 | mon_downsample_rate = 5
23 |
24 |
25 | #%% ============================ indicator setup ====================================
26 | ind_width_cm = 1.8
27 | ind_height_cm = 1.5
28 | ind_position = 'southeast'
29 | ind_is_sync = True
30 | ind_freq = 1.
31 |
32 |
33 | #%% ============================ generic stimulus parameters ========================
34 | pregap_dur = 4.
35 | postgap_dur = 1.
36 | background = 0.
37 | coordinate = 'degree'
38 |
39 |
40 | #%% ============================ DisplaySequence ====================================
41 | ds_log_dir = r'D:\\LZX'
42 | ds_backupdir = None
43 | ds_identifier = 'TEST'
44 | ds_display_iter = 1.
45 | ds_mouse_id = 'MOUSE'
46 | ds_user_id = 'LZX'
47 | ds_psychopy_mon = 'testMonitor'
48 | ds_is_by_index = False
49 | ds_is_interpolate = False
50 | ds_is_triggered = False
51 | ds_is_save_sequence = False
52 | ds_trigger_event = "negative_edge"
53 | ds_trigger_NI_dev = 'Dev1'
54 | ds_trigger_NI_port = 1
55 | ds_trigger_NI_line = 0
56 | ds_is_sync_pulse = False
57 | ds_sync_pulse_NI_dev = 'Dev1'
58 | ds_sync_pulse_NI_port = 1
59 | ds_sync_pulse_NI_line = 1
60 | ds_display_screen = 1
61 | ds_initial_background_color = 0.
62 | ds_color_weights = (1., 1., 1.)
63 |
64 |
65 | # ============================ KSstim ====================================
66 | ks_square_size = 25.
67 | ks_square_center = (0, 0)
68 | ks_flicker_frame = 10
69 | ks_sweep_width = 20.
70 | ks_step_width = 0.15
71 | ks_sweep_frame = 1
72 | ks_iteration = 1
73 |
74 |
75 | #%% ================ Initialize the monitor object ==================================
76 | mon = Monitor(resolution=mon_resolution, dis=mon_dis, mon_width_cm=mon_width_cm,
77 | mon_height_cm=mon_height_cm, C2T_cm=mon_C2T_cm, C2A_cm=mon_C2A_cm,
78 | center_coordinates=mon_center_coordinates,
79 | downsample_rate=mon_downsample_rate)
80 | # mon.plot_map()
81 | # plt.show()
82 |
83 |
84 | #%% ================ Initialize the indicator object ================================
85 | ind = Indicator(mon, width_cm=ind_width_cm, height_cm=ind_height_cm,
86 | position=ind_position, is_sync=ind_is_sync, freq=ind_freq)
87 |
88 |
89 | #%% ================ Initialize the DisplaySequence object ==========================
90 | ds = DisplaySequence(log_dir=ds_log_dir, backupdir=ds_backupdir,
91 | identifier=ds_identifier, display_iter=ds_display_iter,
92 | mouse_id=ds_mouse_id, user_id=ds_user_id,
93 | psychopy_mon=ds_psychopy_mon, is_by_index=ds_is_by_index,
94 | is_interpolate=ds_is_interpolate, is_triggered=ds_is_triggered,
95 | trigger_event=ds_trigger_event, trigger_NI_dev=ds_trigger_NI_dev,
96 | trigger_NI_port=ds_trigger_NI_port, trigger_NI_line=ds_trigger_NI_line,
97 | is_sync_pulse=ds_is_sync_pulse, sync_pulse_NI_dev=ds_sync_pulse_NI_dev,
98 | sync_pulse_NI_port=ds_sync_pulse_NI_port,
99 | sync_pulse_NI_line=ds_sync_pulse_NI_line,
100 | display_screen=ds_display_screen, is_save_sequence=ds_is_save_sequence,
101 | initial_background_color=ds_initial_background_color,
102 | color_weights=ds_color_weights)
103 |
104 |
105 | #%% =================== generate direction sequence =================================
106 | # A pseudo-random sequence of directions is generated for 10 rounds.
107 | # Each round contains 4 directions.
108 | # The same directions don't occur consecutively.
109 | dir_index_seq = np.array([3, 0, 2, 1,
110 | 2, 0, 1, 3,
111 | 2, 1, 3, 0,
112 | 1, 2, 3, 0,
113 | 1, 2, 0, 3,
114 | 2, 3, 1, 0,
115 | 2, 1, 0, 3,
116 | 2, 3, 0, 1,
117 | 0, 1, 2, 3,
118 | 2, 3, 0, 1], dtype=np.int16)
119 | all_dir = ['B2U', 'U2B', 'L2R', 'R2L']
120 | dir_seq = [all_dir[dir_index] for dir_index in dir_index_seq]
121 |
122 |
123 | #%% ============================== KSstimSeqDir =====================================
124 | ks = stim.KSstimSeqDir(monitor=mon, indicator=ind, direction=dir_seq, pregap_dur=pregap_dur, postgap_dur=postgap_dur,
125 | background=background, coordinate=coordinate, square_size=ks_square_size,
126 | square_center=ks_square_center, flicker_frame=ks_flicker_frame,
127 | sweep_width=ks_sweep_width, step_width=ks_step_width, sweep_frame=ks_sweep_frame,
128 | iteration=ks_iteration)
129 |
130 |
131 | #%% =============================== display =========================================
132 | ds.set_stim(ks)
133 | ds.trigger_display()
134 | plt.show()
135 |
136 |
137 |
--------------------------------------------------------------------------------
/WarpedVisualStim/examples/example_sinusoidal_luminance.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import WarpedVisualStim.StimulusRoutines as stim
3 | from WarpedVisualStim.MonitorSetup import Monitor, Indicator
4 | from WarpedVisualStim.DisplayStimulus import DisplaySequence
5 |
6 | # Initialize Monitor object
7 | mon = Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
8 |
9 | # Initialize Inicator object
10 | ind = Indicator(mon)
11 |
12 | # Initialize DisplaySequence object
13 | ds = DisplaySequence(log_dir='C:/data')
14 |
15 | # Initialize UniformContrast object
16 | sl = stim.SinusoidalLuminance(monitor=mon, indicator=ind,
17 | pregap_dur=1.,
18 | midgap_dur=0.,
19 | postgap_dur=3.,
20 | max_level=0.,
21 | min_level=-0.8,
22 | frequency=0.5, cycle_num=3,
23 | start_phase=0.)
24 |
25 | # set uniform contrast stimulus into the DisplaySequence object
26 | ds.set_stim(sl)
27 |
28 | # start display
29 | ds.trigger_display()
30 |
31 | # plot distribution of frame duration
32 | plt.show()
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_DisplayLogAnalysis.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | # from .. import DisplayLogAnalysis as dla
4 |
5 | import WarpedVisualStim.DisplayLogAnalysis as dla
6 |
7 | curr_folder = os.path.dirname(os.path.realpath(__file__))
8 | os.chdir(curr_folder)
9 |
10 | # class TestDisplayLogAnalysis(unittest.TestCase):
11 | #
12 | # def setUp(self):
13 | # self.log_path = os.path.join(curr_folder, 'test_data',
14 | # '180323212952-CombinedStimuli-MMOUSE-USER-TEST-notTriggered-complete.pkl')
15 | # self.log = dla.DisplayLogAnalyzer(log_path=self.log_path)
16 | #
17 | # def test_DisplayLogAnalyzer(self):
18 | #
19 | # stim_dict = self.log.get_stim_dict()
20 | # pd_onsets_seq_0 = self.log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=-0.5)
21 | # # print(len(pd_onsets_seq_0))
22 | # assert (len(pd_onsets_seq_0) == 310)
23 | # pd_onsets_seq = self.log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=0.5)
24 | # assert (len(pd_onsets_seq) == 572)
25 | # pd_onsets_com = self.log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, is_dgc_blocked=True)
26 | # assert (len(pd_onsets_com) == 14)
27 | #
28 | # def test_DisplayLogAnalyzer_LSN(self):
29 | # stim_dict = self.log.get_stim_dict()
30 | # # print(stim_dict['001_LocallySparseNoiseRetinotopicMapping'].keys())
31 | # pd_onsets_seq = self.log.analyze_photodiode_onsets_sequential(stim_dict=stim_dict, pd_thr=-0.5)
32 | # # print('\n'.join([str(p) for p in pd_onsets_seq]))
33 | # pd_onsets_com = self.log.analyze_photodiode_onsets_combined(pd_onsets_seq=pd_onsets_seq, is_dgc_blocked=True)
34 | #
35 | # repeat = stim_dict['006_LocallySparseNoiseRetinotopicMapping']['repeat']
36 | # iteration = stim_dict['006_LocallySparseNoiseRetinotopicMapping']['iteration']
37 | #
38 | # lsn_dict = pd_onsets_com['006_LocallySparseNoiseRetinotopicMapping']
39 | # # print('\n'.join(lsn_dict.keys()))
40 | # for probe_n, probe_onset in lsn_dict.items():
41 | # assert (len(probe_onset['global_pd_onset_ind']) == repeat * iteration)
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_DisplayStimulus.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | # from .. import DisplayStimulus as ds
4 |
5 | import WarpedVisualStim.DisplayStimulus as ds
6 |
7 | curr_folder = os.path.dirname(os.path.realpath(__file__))
8 | os.chdir(curr_folder)
9 |
10 | class TestSimulation(unittest.TestCase):
11 |
12 | def setUp(self):
13 |
14 | # from .. import MonitorSetup as ms
15 | import WarpedVisualStim.MonitorSetup as ms
16 |
17 | # Setup monitor/indicator objects
18 | self.monitor = ms.Monitor(resolution=(1200, 1600), dis=15.,
19 | mon_width_cm=40., mon_height_cm=30.,
20 | C2T_cm=15., C2A_cm=20., center_coordinates=(0., 60.),
21 | downsample_rate=10)
22 | # import matplotlib.pyplot as plt
23 | # self.monitor.plot_map()
24 | # plt.show()
25 |
26 | self.indicator = ms.Indicator(self.monitor, width_cm=3., height_cm=3., position='northeast',
27 | is_sync=True, freq=1.)
28 |
29 | def test_initial_background(self):
30 |
31 | import WarpedVisualStim.StimulusRoutines as stim
32 |
33 | log_dir = os.path.join(curr_folder, 'test_data')
34 |
35 | displayer = ds.DisplaySequence(log_dir=log_dir, backupdir=None, identifier='TEST', display_iter=1,
36 | mouse_id='MOUSE', user_id='USER', psychopy_mon='testMonitor',
37 | is_by_index=True, is_interpolate=False, is_triggered=False,
38 | is_save_sequence=False, trigger_event="negative_edge",
39 | trigger_NI_dev='Dev1', trigger_NI_port=1, trigger_NI_line=0,
40 | is_sync_pulse=False, sync_pulse_NI_dev='Dev1', sync_pulse_NI_port=1,
41 | sync_pulse_NI_line=1, display_screen=0, initial_background_color=0.,
42 | color_weights=(0., 1., 1.))
43 |
44 | # print(displayer.initial_background_color)
45 |
46 | uc = stim.UniformContrast(monitor=self.monitor, indicator=self.indicator, pregap_dur=0.1,
47 | postgap_dur=0.1, coordinate='degree',
48 | background=0., duration=0.1, color=0.8)
49 |
50 | displayer.set_stim(uc)
51 | log_path = displayer.trigger_display()
52 |
53 | import shutil
54 | log_dir = os.path.join(curr_folder, 'test_data', 'visual_display_log')
55 | shutil.rmtree(log_dir)
56 |
57 |
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_MonitorSetup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | # from .. import MonitorSetup as ms
4 |
5 | import WarpedVisualStim.MonitorSetup as ms
6 |
7 | curr_folder = os.path.dirname(os.path.realpath(__file__))
8 | os.chdir(curr_folder)
9 |
10 | class TestMonitorSetup(unittest.TestCase):
11 |
12 | def setUp(self):
13 | try:
14 | import skimage.external.tifffile as tf
15 | except ImportError:
16 | import tifffile as tf
17 | self.natural_scene = tf.imread(os.path.join(curr_folder,
18 | 'test_data',
19 | 'images_original.tif'))[0]
20 |
21 | def test_Monitor_remap(self):
22 | mon = ms.Monitor(resolution=(1200, 1600), dis=15.,
23 | mon_width_cm=40., mon_height_cm=30.,
24 | C2T_cm=15., C2A_cm=20., center_coordinates=(0., 60.),
25 | downsample_rate=10)
26 | mon.remap()
27 | assert(abs(mon.deg_coord_y[60, 80] - 0.) < 1.)
28 | assert(abs(mon.deg_coord_x[60, 80] - 60.) < 1.)
29 |
30 | # mon.plot_map()
31 | # import matplotlib.pyplot as plt
32 | # plt.show()
33 |
34 | mon = ms.Monitor(resolution=(1200, 1600), dis=15.,
35 | mon_width_cm=40., mon_height_cm=30.,
36 | C2T_cm=15., C2A_cm=20., center_coordinates=(20., -10.),
37 | downsample_rate=10)
38 | mon.remap()
39 | assert (abs(mon.deg_coord_y[60, 80] - 20.) < 1.)
40 | assert (abs(mon.deg_coord_x[60, 80] - (-10.)) < 1.)
41 | # mon.plot_map()
42 | # import matplotlib.pyplot as plt
43 | # plt.show()
44 |
45 | mon = ms.Monitor(resolution=(1200, 1600), dis=15.,
46 | mon_width_cm=40., mon_height_cm=30.,
47 | C2T_cm=5., C2A_cm=35., center_coordinates=(20., -10.),
48 | downsample_rate=10)
49 | mon.remap()
50 | assert (abs(mon.deg_coord_y[20, 140] - 20.) < 1.)
51 | assert (abs(mon.deg_coord_x[20, 140] - (-10.)) < 1.)
52 | # mon.plot_map()
53 | # import matplotlib.pyplot as plt
54 | # plt.show()
55 |
56 | def test_Monitor_generate_lookup_table(self):
57 | mon = ms.Monitor(resolution=(1200,1600), dis=15.,
58 | mon_width_cm=40., mon_height_cm=30.,
59 | C2T_cm=15.,C2A_cm=20., center_coordinates=(0., 60.),
60 | downsample_rate=10)
61 |
62 | lookup_i, lookup_j = mon.generate_lookup_table()
63 |
64 | # import matplotlib.pyplot as plt
65 | # f, (ax0, ax1) = plt.subplots(1, 2, figsize=(12, 5))
66 | # fig0 = ax0.imshow(lookup_i)
67 | # f.colorbar(fig0, ax=ax0)
68 | # fig1 = ax1.imshow(lookup_j)
69 | # f.colorbar(fig1, ax=ax1)
70 | # plt.show()
71 |
72 | def test_Monitor_warp_images(self):
73 | mon = ms.Monitor(resolution=(1200, 1600), dis=15.,
74 | mon_width_cm=40., mon_height_cm=30.,
75 | C2T_cm=15., C2A_cm=20., center_coordinates=(0., 60.),
76 | downsample_rate=10)
77 | import numpy as np
78 | nsw, altw, aziw, nsd, altd, azid = mon.warp_images(imgs=np.array([self.natural_scene]),
79 | center_coor=[0., 60.], deg_per_pixel=0.25,
80 | is_luminance_correction=True)
81 |
82 | # import matplotlib.pyplot as plt
83 | # f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 10))
84 | # fig1 = ax1.imshow(self.natural_scene, cmap='gray', vmin=0., vmax=255.)
85 | # ax1.set_axis_off()
86 | # ax1.set_title('original')
87 | # f.colorbar(fig1, ax=ax1)
88 | # fig2 = ax2.imshow(nsw[0], cmap='gray', vmin=-1., vmax=1.)
89 | # ax2.set_axis_off()
90 | # ax2.set_title('wrapped')
91 | # f.colorbar(fig2, ax=ax2)
92 | # fig3 = ax3.imshow(nsd[0], cmap='gray', vmin=0, vmax=255)
93 | # ax3.set_axis_off()
94 | # ax3.set_title('dewrapped')
95 | # f.colorbar(fig3, ax=ax3)
96 | # plt.show()
97 | #
98 | # print altd.shape
99 | # print azid.shape
100 |
101 | assert (altw.shape[0] == nsw.shape[1])
102 | assert (altw.shape[1] == nsw.shape[2])
103 | assert (aziw.shape[0] == nsw.shape[1])
104 | assert (aziw.shape[1] == nsw.shape[2])
105 | assert (altd.shape[0] == nsd.shape[1])
106 | assert (altd.shape[1] == nsd.shape[2])
107 | assert (azid.shape[0] == nsd.shape[1])
108 | assert (azid.shape[1] == nsd.shape[2])
109 | assert (np.nanmean(nsw.flat) < 1E6)
110 |
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_data/images_original.tif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuangjun1981/WarpedVisualStim/b4df9575109f9cf1525a2e85814a9c61052f2754/WarpedVisualStim/test/test_data/images_original.tif
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_tools_FileTools_Logger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | import shutil
4 | import numpy as np
5 | # from ..tools import FileTools as gt
6 |
7 | import WarpedVisualStim.tools.FileTools as gt
8 | import WarpedVisualStim.StimulusRoutines as stim
9 | import WarpedVisualStim.MonitorSetup as ms
10 | import WarpedVisualStim.DisplayStimulus as ds
11 |
12 |
13 | class TestLogger(unittest.TestCase):
14 |
15 | def setUp(self):
16 |
17 | self.curr_folder = os.path.dirname(os.path.realpath(__file__))
18 |
19 | self.save_folder = os.path.join(self.curr_folder, 'test_data')
20 |
21 | # Initialize Monitor object
22 | self.mon = ms.Monitor(resolution=(1200, 1920), dis=15., mon_width_cm=52., mon_height_cm=32.)
23 |
24 | # Initialize Inicator object
25 | self.ind = ms.Indicator(self.mon)
26 |
27 | # Initialize DisplaySequence object
28 | self.player = ds.DisplaySequence(log_dir=self.save_folder)
29 |
30 | def test_save_log(self):
31 | save_path = os.path.join(self.save_folder, 'test_log.hdf5')
32 | log_dict = {}
33 |
34 | log_dict[5] = 'hello'
35 | log_dict['next_dict'] = {(3, 4): [1, 2, 3],
36 | 'str_list': ['1', '2', '3']}
37 | log_dict['nan'] = np.nan
38 | log_dict['None'] = None
39 | log_dict['bool'] = [False, True]
40 |
41 | log_dict['third_dict'] = {'forth_dict': {'fifth_dict': {'a': 0, 0:'a'}}}
42 |
43 | # print(log_dict)
44 | # print(save_path)
45 |
46 | logger = gt.Logger(log_dict=log_dict, save_path=save_path)
47 | logger.save_log()
48 | logger.save_log()
49 |
50 | # _ = [os.remove(os.path.join(self.save_folder, f)) for f in
51 | # os.listdir(self.save_folder) if 'test_log' in f and f[-5:] == '.hdf5']
52 |
53 | def test_save_locally_sparse_noise(self):
54 |
55 | lsn = stim.LocallySparseNoise(monitor=self.mon, indicator=self.ind,
56 | min_distance=20., background=0., coordinate='degree',
57 | grid_space=(10., 10.), probe_size=(10., 10.),
58 | probe_orientation=0., probe_frame_num=6, subregion=[-10., 10., 0., 30.],
59 | sign='ON', iteration=1, pregap_dur=0.1, postgap_dur=0.2,
60 | is_include_edge=True, repeat=1)
61 |
62 | seq_lsn, dict_lsn = lsn.generate_movie_by_index()
63 |
64 | save_path = os.path.join(self.save_folder, 'LSN_log.hdf5')
65 | logger = gt.Logger(log_dict=dict_lsn, save_path=save_path)
66 | logger.save_log()
67 |
68 | # _ = [os.remove(os.path.join(self.save_folder, f)) for f in
69 | # os.listdir(self.save_folder) if 'LSN_log' in f and f[-5:] == '.hdf5']
70 |
71 | def test_save_log_uc(self):
72 |
73 | # Initialize UniformContrast object
74 | uc = stim.UniformContrast(monitor=self.mon, indicator=self.ind, duration=1., color=-1.)
75 |
76 | self.player.set_stim(uc)
77 | self.player.trigger_display()
78 |
79 | fns = [f for f in os.listdir(os.path.join(self.save_folder, 'visual_display_log'))
80 | if 'UniformContrast-MTest-Name-000-notTriggered-complete.hdf5' in f]
81 |
82 | for fn in fns:
83 | print('deleting {} ...'.format(fn))
84 | os.remove(os.path.join(self.save_folder, 'visual_display_log', fn))
85 |
86 |
87 |
88 |
89 |
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_tools_GenericTools.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | import numpy as np
4 | # from ..tools import GenericTools as gt
5 |
6 | import WarpedVisualStim.tools.GenericTools as gt
7 |
8 | curr_folder = os.path.dirname(os.path.realpath(__file__))
9 | os.chdir(curr_folder)
10 |
11 | class TestMonitorSetup(unittest.TestCase):
12 |
13 | def setUp(self):
14 | self.data = np.array([1.0, 0.9, 0.8, 0.7, 0.9, 1.2, 1.5, 0.5, 0.6, 0.6, 0.6, 0.9, 0.9, 0.9, 1.2,
15 | -0.3, -0.3, -0.2, 0.3, 2.0, 3.5, 0.8, 0.8, 0.6, 3.2, 1.4, 0.9, 0.9, 0.4])
16 |
17 | def test_up_crossings(self):
18 | assert (np.array_equal(gt.up_crossings(data=self.data, threshold=0.9), [5, 14, 19, 24]))
19 | assert (np.array_equal(gt.up_crossings(data=self.data, threshold=0.6), [11, 19, 24]))
20 | assert (np.array_equal(gt.up_crossings(data=self.data, threshold=0.5), [8, 19]))
21 | assert (len(gt.up_crossings(data=self.data, threshold=5.)) == 0)
22 |
23 | def test_down_crossings(self):
24 | assert (np.array_equal(gt.down_crossings(data=self.data, threshold=0.6), [7, 15, 28]))
25 | assert (np.array_equal(gt.down_crossings(data=self.data, threshold=0.5), [15, 28]))
26 | assert (np.array_equal(gt.down_crossings(data=self.data, threshold=2.), [21, 25]))
27 | assert (np.array_equal(gt.down_crossings(data=self.data, threshold=0.8), [3, 7, 15, 23, 28]))
28 | assert (len(gt.down_crossings(data=self.data, threshold=-3.)) == 0)
29 |
30 | def test_all_crossings(self):
31 | assert (np.array_equal(gt.all_crossings(data=self.data, threshold=0.6), [7, 11, 15, 19, 24, 28]))
32 | assert (len(gt.all_crossings(data=self.data, threshold=-0.3) == 0))
33 | assert (len(gt.all_crossings(data=self.data, threshold=3.5) == 0))
--------------------------------------------------------------------------------
/WarpedVisualStim/test/test_tools_ImageAnalysis.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | # from ..tools import ImageAnalysis as ia
4 |
5 | import WarpedVisualStim.tools.ImageAnalysis as ia
6 |
7 | curr_folder = os.path.dirname(os.path.realpath(__file__))
8 | os.chdir(curr_folder)
9 |
10 | class TestSimulation(unittest.TestCase):
11 |
12 | def setUp(self):
13 | pass
14 |
15 | def test_distance(self):
16 | assert (ia.distance(3., 4.) == 1.)
17 | assert (ia.distance([5., 8.], [9., 11.]) == 5.)
--------------------------------------------------------------------------------
/WarpedVisualStim/tools/FileTools.py:
--------------------------------------------------------------------------------
1 | __author__ = 'junz'
2 |
3 | import numpy as np
4 | import pickle
5 | import os
6 | import shutil
7 | import h5py
8 | import datetime
9 | from . import ImageAnalysis as ia
10 | import os
11 | import argparse
12 | from pathlib import Path
13 |
14 | try:
15 | import tifffile as tf
16 | except ImportError:
17 | import skimage.external.tifffile as tf
18 |
19 | try:
20 | import cv2
21 | except ImportError as e:
22 | print('can not import OpenCV. \n{}'.format(e))
23 |
24 |
25 | def saveFile(path, data):
26 | with open(path, 'wb') as f:
27 | pickle.dump(data, f, protocol=2)
28 |
29 |
30 | def loadFile(path):
31 | with open(path, 'rb') as f:
32 | data = pickle.load(f, encoding='bytes')
33 | return data
34 |
35 |
36 | def copy(src, dest):
37 | """
38 | copy everything from one path to another path. Work for both direcory and file.
39 | if src is a file, it will be copied into dest
40 | if src is a directory, the dest will have the same content as src
41 | """
42 |
43 | if os.path.isfile(src):
44 | print('Source is a file. Starting copy...')
45 | try:
46 | shutil.copy(src, dest)
47 | print('End of copy.')
48 | except Exception as error:
49 | print(error)
50 |
51 | elif os.path.isdir(src):
52 | print('Source is a directory. Starting copy...')
53 | try:
54 | shutil.copytree(src, dest)
55 | print('End of copy.')
56 | except Exception as error:
57 | print(error)
58 | else:
59 | raise IOError('Source is neither a file or a directory. Can not be copied!')
60 |
61 |
62 | def list_all_files(folder):
63 | """
64 | get a list of full path of all files in a folder (including subfolder)
65 | """
66 | files = []
67 | for folder_path, subfolder_paths, file_names in os.walk(folder):
68 | for file_name in file_names:
69 | files.append(os.path.join(folder_path, file_name))
70 | return files
71 |
72 |
73 | def batchCopy(pathList, destinationFolder, isDelete=False):
74 | """
75 | copy everything in the pathList into destinationFolder
76 | return a list of paths which can not be copied.
77 | """
78 |
79 | if not os.path.isdir(destinationFolder): os.mkdir(destinationFolder)
80 |
81 | unCopied = []
82 |
83 | for path in pathList:
84 | print('\nStart copying ' + path + ' ...')
85 | if os.path.isfile(path):
86 | print('This path is a file. Keep copying ...')
87 | try:
88 | shutil.copy(path, destinationFolder)
89 | print('End of copying.')
90 | if isDelete:
91 | print('Deleting this file ...')
92 | try:
93 | os.remove(path)
94 | print('End of deleting.\n')
95 | except Exception as error:
96 | print('Can not delete this file.\nError message:\n' + str(error) + '\n')
97 | else:
98 | print('')
99 | except Exception as error:
100 | unCopied.append(path)
101 | print('Can not copy this file.\nError message:\n' + str(error) + '\n')
102 |
103 | elif os.path.isdir(path):
104 | print('This path is a directory. Keep copying ...')
105 | try:
106 | _, folderName = os.path.split(path)
107 | shutil.copytree(path, os.path.join(destinationFolder, folderName))
108 | print('End of copying.')
109 | if isDelete:
110 | print('Deleting this directory ...')
111 | try:
112 | shutil.rmtree(path)
113 | print('End of deleting.\n')
114 | except Exception as error:
115 | print('Can not delete this directory.\nError message:\n' + str(error) + '\n')
116 | else:
117 | print('')
118 | except Exception as error:
119 | unCopied.append(path)
120 | print('Can not copy this directory.\nError message:\n' + str(error) + '\n')
121 | else:
122 | unCopied.append(path)
123 | print('This path is neither a file or a directory. Skip!\n')
124 |
125 | return unCopied
126 |
127 |
128 | # def importRawJCam(path,
129 | # dtype=np.dtype('>f'),
130 | # headerLength=96, # length of the header, measured as the data type defined above
131 | # columnNumIndex=14, # index of number of rows in header
132 | # rowNumIndex=15, # index of number of columns in header
133 | # frameNumIndex=16, # index of number of frames in header
134 | # decimation=None, # decimation number
135 | # exposureTimeIndex=17): # index of exposure time in header, exposure time is measured in ms
136 | # """
137 | # import raw JCam files into np.array
138 | #
139 | #
140 | # raw file format:
141 | # data type: 32 bit sigle precision floating point number
142 | # data format: big-endian single-precision float, high-byte-first motorola
143 | # header length: 96 floating point number
144 | # column number index: 14
145 | # row number index: 15
146 | # frame number index: 16
147 | # exposure time index: 17
148 | # """
149 | # imageFile = np.fromfile(path, dtype=dtype, count=-1)
150 | #
151 | # columnNum = np.int(imageFile[columnNumIndex])
152 | # rowNum = np.int(imageFile[rowNumIndex])
153 | #
154 | # if decimation is not None:
155 | # columnNum /= decimation
156 | # rowNum /= decimation
157 | #
158 | # frameNum = np.int(imageFile[frameNumIndex])
159 | #
160 | # if frameNum == 0: # if it is a single frame image
161 | # frameNum += 1
162 | #
163 | # exposureTime = np.float(imageFile[exposureTimeIndex])
164 | #
165 | # imageFile = imageFile[headerLength:]
166 | #
167 | # print('width =', str(columnNum), 'pixels')
168 | # print('height =', str(rowNum), 'pixels')
169 | # print('length =', str(frameNum), 'frame(s)')
170 | # print('exposure time =', str(exposureTime), 'ms')
171 | #
172 | # imageFile = imageFile.reshape((frameNum, rowNum, columnNum))
173 | #
174 | # return imageFile, exposureTime
175 | #
176 | #
177 | # def readBinaryFile(path,
178 | # position,
179 | # count=1,
180 | # dtype=np.dtype('>f'),
181 | # whence=os.SEEK_SET):
182 | # """
183 | # read arbitary part of a binary file,
184 | # data type defined by dtype,
185 | # start position defined by position (counts accordinating to dtype)
186 | # length defined by count.
187 | # """
188 | #
189 | # f = open(path, 'rb')
190 | # f.seek(position * dtype.alignment, whence)
191 | # data = np.fromfile(f, dtype=dtype, count=count)
192 | # f.close()
193 | # return data
194 | #
195 | #
196 | # def readBinaryFile2(f,
197 | # position,
198 | # count=1,
199 | # dtype=np.dtype('>f'),
200 | # whence=os.SEEK_SET):
201 | # """
202 | # similar as readBinaryFile but without opening and closing file object
203 | # """
204 | # f.seek((position * dtype.alignment), whence)
205 | # data = np.fromfile(f, dtype=dtype, count=count)
206 | # return data
207 | #
208 | #
209 | # def importRawJPhys(path,
210 | # dtype=np.dtype('>f'),
211 | # headerLength=96, # length of the header for each channel
212 | # channels=('photodiode2', 'read', 'trigger', 'photodiode'), # name of all channels
213 | # sf=10000): # sampling rate, Hz
214 | # """
215 | # import raw JPhys files into np.array
216 | # one dictionary contains header for each channel
217 | # the other contains values for each for each channel
218 | # """
219 | #
220 | # JPhysFile = np.fromfile(path, dtype=dtype, count=-1)
221 | # channelNum = len(channels)
222 | #
223 | # channelLength = len(JPhysFile) / channelNum
224 | #
225 | # if len(JPhysFile) % channelNum != 0:
226 | # raise ArithmeticError('Length of the file should be divisible by channel number!')
227 | #
228 | # header = {}
229 | # body = {}
230 | #
231 | # for index, channelname in enumerate(channels):
232 | # channelStart = index * channelLength
233 | # channelEnd = channelStart + channelLength
234 | #
235 | # header.update({channels[index]: JPhysFile[channelStart:channelStart + headerLength]})
236 | # body.update({channels[index]: JPhysFile[channelStart + headerLength:channelEnd]})
237 | #
238 | # body.update({'samplingRate': sf})
239 | #
240 | # return header, body
241 | #
242 | #
243 | # def importRawNewJPhys(path,
244 | # dtype=np.dtype('>f'),
245 | # headerLength=96, # length of the header for each channel
246 | # channels=('photodiode2',
247 | # 'read',
248 | # 'trigger',
249 | # 'photodiode',
250 | # 'sweep',
251 | # 'visualFrame',
252 | # 'runningRef',
253 | # 'runningSig',
254 | # 'reward',
255 | # 'licking'), # name of all channels
256 | # sf=10000): # sampling rate, Hz
257 | # """
258 | # import new style raw JPhys files into np.array
259 | # one dictionary contains header for each channel
260 | # the other contains values for each for each channel
261 | # """
262 | #
263 | # JPhysFile = np.fromfile(path, dtype=dtype, count=-1)
264 | # channelNum = len(channels)
265 | #
266 | # channelLength = len(JPhysFile) / channelNum
267 | # # print 'length of JPhys:', len(JPhysFile)
268 | # # print 'length of JPhys channel number:', channelNum
269 | #
270 | # if len(JPhysFile) % channelNum != 0:
271 | # raise ArithmeticError('Length of the file should be divisible by channel number!')
272 | #
273 | # JPhysFile = JPhysFile.reshape([channelLength, channelNum])
274 | #
275 | # headerMatrix = JPhysFile[0:headerLength, :]
276 | # bodyMatrix = JPhysFile[headerLength:, :]
277 | #
278 | # header = {}
279 | # body = {}
280 | #
281 | # for index, channelname in enumerate(channels):
282 | # header.update({channels[index]: headerMatrix[:, index]})
283 | # body.update({channels[index]: bodyMatrix[:, index]})
284 | #
285 | # body.update({'samplingRate': sf})
286 | #
287 | # return header, body
288 | #
289 | #
290 | # def importRawJPhys2(path,
291 | # imageFrameNum,
292 | # photodiodeThr=.95, # threshold of photo diode signal,
293 | # dtype=np.dtype('>f'),
294 | # headerLength=96, # length of the header for each channel
295 | # channels=('photodiode2', 'read', 'trigger', 'photodiode'), # name of all channels
296 | # sf=10000.): # sampling rate, Hz
297 | # """
298 | # extract important information from JPhys file
299 | # """
300 | #
301 | # JPhysFile = np.fromfile(path, dtype=dtype, count=-1)
302 | # channelNum = len(channels)
303 | #
304 | # channelLength = len(JPhysFile) / channelNum
305 | #
306 | # if channelLength % 1 != 0:
307 | # raise ArithmeticError('Bytes in each channel should be integer !')
308 | #
309 | # channelLength = int(channelLength)
310 | #
311 | # # get trace for each channel
312 | # for index, channelname in enumerate(channels):
313 | # channelStart = index * channelLength
314 | # channelEnd = channelStart + channelLength
315 | # # if channelname == 'expose':
316 | # # expose = JPhysFile[channelStart+headerLength:channelEnd]
317 | #
318 | # if channelname == 'read':
319 | # read = JPhysFile[channelStart + headerLength:channelEnd]
320 | #
321 | # if channelname == 'photodiode':
322 | # photodiode = JPhysFile[channelStart + headerLength:channelEnd]
323 | #
324 | # # if channelname == 'trigger':
325 | # # trigger = JPhysFile[channelStart+headerLength:channelEnd]
326 | #
327 | # # generate time stamp for each image frame
328 | # imageFrameTS = []
329 | # for i in range(1, len(read)):
330 | # if read[i - 1] < 3.0 and read[i] >= 3.0:
331 | # imageFrameTS.append(i * (1. / sf))
332 | #
333 | # if len(imageFrameTS) < imageFrameNum:
334 | # raise LookupError("Expose period number is smaller than image frame number!")
335 | # imageFrameTS = imageFrameTS[0:imageFrameNum]
336 | #
337 | # # first time of visual stimulation
338 | # visualStart = None
339 | #
340 | # for i in range(80, len(photodiode)):
341 | # if ((photodiode[i] - photodiodeThr) * (photodiode[i - 1] - photodiodeThr)) < 0 and \
342 | # ((photodiode[i] - photodiodeThr) * (
343 | # photodiode[i - 75] - photodiodeThr)) < 0: # first frame of big change
344 | # visualStart = i * (1. / sf)
345 | # break
346 | #
347 | # return np.array(imageFrameTS), visualStart
348 | #
349 | #
350 | # def importRawNewJPhys2(path,
351 | # imageFrameNum,
352 | # photodiodeThr=.95, # threshold of photo diode signal,
353 | # dtype=np.dtype('>f'),
354 | # headerLength=96, # length of the header for each channel
355 | # channels=('photodiode2',
356 | # 'read',
357 | # 'trigger',
358 | # 'photodiode',
359 | # 'sweep',
360 | # 'visualFrame',
361 | # 'runningRef',
362 | # 'runningSig',
363 | # 'reward',
364 | # 'licking'), # name of all channels
365 | # sf=10000.): # sampling rate, Hz
366 | # """
367 | # extract important information from new style JPhys file
368 | # """
369 | #
370 | # JPhysFile = np.fromfile(path, dtype=dtype, count=-1)
371 | # channelNum = len(channels)
372 | #
373 | # channelLength = len(JPhysFile) / channelNum
374 | #
375 | # if len(JPhysFile) % channelNum != 0:
376 | # raise ArithmeticError('Length of the file should be divisible by channel number!')
377 | #
378 | # JPhysFile = JPhysFile.reshape([channelLength, channelNum])
379 | #
380 | # bodyMatrix = JPhysFile[headerLength:, :]
381 | #
382 | # # get trace for each channel
383 | # for index, channelname in enumerate(channels):
384 | #
385 | # if channelname == 'read':
386 | # read = bodyMatrix[:, index]
387 | #
388 | # if channelname == 'photodiode':
389 | # photodiode = bodyMatrix[:, index]
390 | #
391 | # # if channelname == 'trigger':
392 | # # trigger = JPhysFile[channelStart+headerLength:channelEnd]
393 | #
394 | # # generate time stamp for each image frame
395 | # imageFrameTS = []
396 | # for i in range(1, len(read)):
397 | # if (read[i - 1] < 3.0) and (read[i] >= 3.0):
398 | # imageFrameTS.append(i * (1. / sf))
399 | #
400 | # if len(imageFrameTS) < imageFrameNum:
401 | # raise LookupError("Expose period number is smaller than image frame number!")
402 | # imageFrameTS = imageFrameTS[0:imageFrameNum]
403 | #
404 | # # first time of visual stimulation
405 | # visualStart = None
406 | #
407 | # for i in range(80, len(photodiode)):
408 | # if ((photodiode[i] - photodiodeThr) * (photodiode[i - 1] - photodiodeThr)) < 0 and \
409 | # ((photodiode[i] - photodiodeThr) * (
410 | # photodiode[i - 75] - photodiodeThr)) < 0: # first frame of big change
411 | # visualStart = i * (1. / sf)
412 | # break
413 | #
414 | # return np.array(imageFrameTS), visualStart
415 | #
416 | #
417 | # def getLog(logPath):
418 | # """
419 | # get log dictionary from a specific path (including file names)
420 | # """
421 | #
422 | # f = open(logPath, 'r')
423 | # displayLog = pickle.load(f)
424 | # f.close()
425 | # return displayLog
426 | #
427 | # def importRawJCamF(path,
428 | # saveFolder=None,
429 | # dtype=np.dtype(' len(rawstr):
540 | return '0' * (length - len(rawstr)) + rawstr
541 |
542 |
543 | # class Logger(object):
544 | #
545 | # def __init__(self, log_dict, save_path):
546 | #
547 | # self.log_dict = log_dict
548 | # self.save_path = save_path
549 | #
550 | # def save_log(self):
551 | #
552 | # if os.path.isfile(self.save_path):
553 | # save_name, save_ext = os.path.splitext(self.save_path)
554 | # t_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
555 | # self.save_path = '{}_{}{}'.format(save_name, t_str, save_ext)
556 | #
557 | # log_file = h5py.File(self.save_path, 'x')
558 | # self.write_dict(h5_grp=log_file, value=self.log_dict, key="visual_display_log")
559 | #
560 | # log_file.close()
561 | #
562 | # def write_dict(self, h5_grp, value, key="unknown"):
563 | #
564 | # if isinstance(value, dict):
565 | # next_grp = h5_grp.create_group(str(key))
566 | # for next_key, next_value in value.items():
567 | # self.write_dict(h5_grp=next_grp, value=next_value, key=next_key)
568 | # else:
569 | # if value is None:
570 | # h5_grp.create_dataset(name=str(key), data='None')
571 | # else:
572 | # try:
573 | # h5_grp.create_dataset(name=str(key), data=value)
574 | # except TypeError:
575 | # try:
576 | # new_value = np.array(value, dtype='S')
577 | # h5_grp.create_dataset(name=str(key), data=new_value)
578 | # except Exception as e:
579 | # print('Failed to save field: {}. Skip!'.format(key))
580 | # print(e)
581 |
582 |
583 | # ============================== obsolete =========================================
584 | #
585 | # def getMatchingParameterDict(path):
586 | #
587 | # with open(path,'r') as f:
588 | # txt = f.read()
589 | #
590 | # chunkStart = txt.find('[VasculatureMapMatching]') + 25
591 | # chunkEnd = txt.find('[',chunkStart)
592 | # chunk = txt[chunkStart:chunkEnd]
593 | # paraTxtList = chunk.split('\n')
594 | #
595 | # paraDict={}
596 | #
597 | # for paraTxt in paraTxtList:
598 | # key, value = tuple(paraTxt.split(' = '))
599 | # if 'List' in key:
600 | # value = value.split(';')
601 | #
602 | # if ('Hight' in key) or ('Width' in key) or ('Offset' in key):
603 | # value = int(value)
604 | # if (key == 'zoom') or (key == 'rotation'):
605 | # value = float(value)
606 | #
607 | # paraDict.update({key:value})
608 | #
609 | # return paraDict
610 |
611 | # def importDeciJCamF(path,
612 | # saveFolder = None,
613 | # dtype = np.dtype(' threshold
23 | return (~pos[:-1] & pos[1:]).nonzero()[0] + 1
24 |
25 |
26 | def down_crossings(data, threshold=0):
27 | """
28 | find the index where the data down cross the threshold. return the indices of all down crossings (the onset data
29 | point that is less than threshold, 1d-array). The input data should be 1d array.
30 | """
31 | if len(data.shape) != 1:
32 | raise ValueError('Input data should be 1-d array.')
33 |
34 | pos = data < threshold
35 | return (~pos[:-1] & pos[1:]).nonzero()[0] + 1
36 |
37 |
38 | def all_crossings(data, threshold=0):
39 | """
40 | find the index where the data cross the threshold in either directions. return the indices of all crossings (the
41 | onset data point that is less or greater than threshold, 1d-array). The input data should be 1d array.
42 | """
43 | if len(data.shape) != 1:
44 | raise ValueError('Input data should be 1-d array.')
45 |
46 | pos_up = data > threshold
47 | pos_down = data < threshold
48 | return ((~pos_up[:-1] & pos_up[1:]) | (~pos_down[:-1] & pos_down[1:])).nonzero()[0] + 1
49 |
--------------------------------------------------------------------------------
/WarpedVisualStim/tools/IO/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuangjun1981/WarpedVisualStim/b4df9575109f9cf1525a2e85814a9c61052f2754/WarpedVisualStim/tools/IO/__init__.py
--------------------------------------------------------------------------------
/WarpedVisualStim/tools/PlottingTools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Oct 31 11:07:20 2014
4 |
5 | @author: junz
6 | """
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | from matplotlib import cm
10 | import matplotlib.colors as col
11 | import scipy.ndimage as ni
12 | from . import ImageAnalysis as ia
13 | import tifffile as tf
14 | import cv2
15 |
16 |
17 | def get_rgb(colorStr):
18 | """
19 | get R,G,B int value from a hex color string
20 | """
21 | return int(colorStr[1:3], 16), int(colorStr[3:5], 16), int(colorStr[5:7], 16)
22 |
23 |
24 | def get_color_str(R, G, B):
25 | """
26 | get hex color string from R,G,B value (integer with uint8 format)
27 | """
28 | if not (isinstance(R, int) and isinstance(G, int) and isinstance(G, int)):
29 | raise TypeError('Input R, G and B should be integer!')
30 |
31 | if not ((0 <= R <= 255) and (0 <= G <= 255) and (
32 | 0 <= B <= 255)):
33 | raise ValueError('Input R, G and B should between 0 and 255!')
34 |
35 | # ================== old =========================
36 | # return '#' + ''.join(map(chr, (R, G, B))).encode('hex')
37 | # ================================================
38 |
39 | cstrs = [R, G, B]
40 | cstrs = ['{:02x}'.format(x) for x in cstrs]
41 | return '#' + ''.join(cstrs)
42 |
43 |
44 | def binary_2_rgba(img, foregroundColor='#ff0000', backgroundColor='#000000', foregroundAlpha=255, backgroundAlpha=0):
45 | """
46 | generate display image in (RGBA).(np.uint8) format which can be displayed by imshow
47 | :param img: input image, should be a binary array (np.bool, or np.(u)int
48 | :param foregroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
49 | :param backgroundColor: color for 0 in the array, RGB str, i.e. '#ff00ff'
50 | :param foregroundAlpha: alpha for 1 in the array, int, 0-255
51 | :param backgroundAlpha: alpha for 1 in the array, int, 0-255
52 | :return: displayImg, (RGBA).(np.uint8) format, ready for imshow
53 | """
54 |
55 | if img.dtype == np.bool:
56 | pass
57 | elif issubclass(img.dtype.type, np.integer):
58 | if np.amin(img) < 0 or np.amax(img) > 1:
59 | raise ValueError('Values of input image should be either 0 or 1.')
60 | else:
61 | raise TypeError('Data type of input image should be either np.bool or integer.')
62 |
63 | if isinstance(foregroundAlpha, int):
64 | if foregroundAlpha < 0 or foregroundAlpha > 255:
65 | raise ValueError('Value of foreGroundAlpha should be between 0 and 255.')
66 | else:
67 | raise TypeError('Data type of foreGroundAlpha should be integer.')
68 |
69 | if isinstance(backgroundAlpha, int):
70 | if backgroundAlpha < 0 or backgroundAlpha > 255:
71 | raise ValueError('Value of backGroundAlpha should be between 0 and 255.')
72 | else:
73 | raise TypeError('Data type of backGroundAlpha should be integer.')
74 |
75 | fR, fG, fB = get_rgb(foregroundColor)
76 | bR, bG, bB = get_rgb(backgroundColor)
77 |
78 | displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
79 | displayImg[img == 1] = np.array([fR, fG, fB, foregroundAlpha]).astype(np.uint8)
80 | displayImg[img == 0] = np.array([bR, bG, bB, backgroundAlpha]).astype(np.uint8)
81 |
82 | return displayImg
83 |
84 |
85 | def scalar_2_rgba(img, color='#ff0000'):
86 | """
87 | generate display a image in (RGBA).(np.uint8) format which can be displayed by imshow
88 | alpha is defined by values in the img
89 | :param img: input image
90 | :param alphaMatrix: matrix of alpha
91 | :param foreGroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
92 | :return: displayImg, (RGBA).(np.uint8) format, ready for imshow
93 | """
94 |
95 | R, G, B = get_rgb(color)
96 |
97 | RMatrix = (R * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
98 | GMatrix = (G * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
99 | BMatrix = (B * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
100 |
101 | alphaMatrix = (ia.array_nor(img.astype(np.float32)) * 255).astype(np.uint8)
102 |
103 | displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
104 | displayImg[:, :, 0] = RMatrix
105 | displayImg[:, :, 1] = GMatrix
106 | displayImg[:, :, 2] = BMatrix
107 | displayImg[:, :, 3] = alphaMatrix
108 |
109 | return displayImg
110 |
111 |
112 | def bar_graph(left,
113 | height,
114 | error,
115 | errorDir='both', # 'both', 'positive' or 'negative'
116 | width=0.1,
117 | plotAxis=None,
118 | lw=3,
119 | faceColor='#000000',
120 | edgeColor='none',
121 | capSize=10,
122 | label=None
123 | ):
124 | """
125 | plot a single bar with error bar
126 | """
127 |
128 | if not plotAxis:
129 | f = plt.figure()
130 | plotAxis = f.add_subplot(111)
131 |
132 | if errorDir == 'both':
133 | yerr = error
134 | elif errorDir == 'positive':
135 | yerr = [[0], [error]]
136 | elif errorDir == 'negative':
137 | yerr = [[error], [0]]
138 |
139 | plotAxis.errorbar(left + width / 2,
140 | height,
141 | yerr=yerr,
142 | lw=lw,
143 | capsize=capSize,
144 | capthick=lw,
145 | color=edgeColor)
146 |
147 | plotAxis.bar(left,
148 | height,
149 | width=width,
150 | color=faceColor,
151 | edgecolor=edgeColor,
152 | lw=lw,
153 | label=label)
154 |
155 | return plotAxis
156 |
157 |
158 | def random_color(numOfColor=10):
159 | """
160 | generate as list of random colors
161 | """
162 | numOfColor = int(numOfColor)
163 |
164 | colors = []
165 |
166 | Cmatrix = (np.random.rand(numOfColor, 3) * 255).astype(np.uint8)
167 |
168 | for i in range(numOfColor):
169 |
170 | r = hex(Cmatrix[i][0]).split('x')[1]
171 | if len(r) == 1:
172 | r = '0' + r
173 |
174 | g = hex(Cmatrix[i][1]).split('x')[1]
175 | if len(g) == 1:
176 | g = '0' + g
177 |
178 | b = hex(Cmatrix[i][2]).split('x')[1]
179 | if len(b) == 1:
180 | b = '0' + b
181 |
182 | colors.append('#' + r + g + b)
183 |
184 | return colors
185 |
186 |
187 | def show_movie(path, # tif file path or numpy arrary of the movie
188 | mode='raw', # 'raw', 'dF' or 'dFoverF'
189 | baselinePic=None, # picuture of baseline
190 | baselineType='mean', # way to calculate baseline
191 | cmap='gray'):
192 | """
193 | plot tf movie in the way defined by mode
194 | """
195 |
196 | if isinstance(path, str):
197 | rawMov = tf.imread(path)
198 | elif isinstance(path, np.ndarray):
199 | rawMov = path
200 |
201 | if mode == 'raw':
202 | mov = rawMov
203 | else:
204 | _, dFMov, dFoverFMov = ia.normalize_movie(rawMov,
205 | baselinePic=baselinePic,
206 | baselineType=baselineType)
207 | if mode == 'dF':
208 | mov = dFMov
209 | elif mode == 'dFoverF':
210 | mov = dFoverFMov
211 | else:
212 | raise LookupError('The "mode" should be "raw", "dF" or "dFoverF"!')
213 |
214 | if isinstance(path, str):
215 | tf.imshow(mov,
216 | cmap=cmap,
217 | vmax=np.amax(mov),
218 | vmin=np.amin(mov),
219 | title=mode + ' movie of ' + path)
220 | elif isinstance(path, np.ndarray):
221 | tf.imshow(mov,
222 | cmap=cmap,
223 | vmax=np.amax(mov),
224 | vmin=np.amin(mov),
225 | title=mode + ' Movie')
226 |
227 | return mov
228 |
229 |
230 | def standalone_color_bar(vmin, vmax, cmap, sectionNum=10):
231 | """
232 | plot a stand alone color bar.
233 | """
234 |
235 | a = np.array([[vmin, vmax]])
236 |
237 | plt.figure(figsize=(0.1, 9))
238 |
239 | img = plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax)
240 | plt.gca().set_visible(False)
241 | cbar = plt.colorbar()
242 | cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
243 |
244 |
245 | def alpha_blending(image, alphaData, vmin, vmax, cmap='Paired', sectionNum=10, background=-1, interpolation='nearest',
246 | isSave=False, savePath=None):
247 | """
248 | Generate image with transparency weighted by another matrix.
249 |
250 | Plot numpy array 'image' with colormap 'cmap'. And define the tranparency
251 | of each pixel by the value in another numpy array alphaData.
252 |
253 | All the elements in alphaData should be non-negative.
254 | """
255 |
256 | if image.shape != alphaData.shape:
257 | raise LookupError('"image" and "alphaData" should have same shape!!')
258 |
259 | if np.amin(alphaData) < 0:
260 | raise ValueError('All the elements in alphaData should be bigger than zero.')
261 |
262 | # normalize image
263 | image[image > vmax] = vmax
264 | image[image < vmin] = vmin
265 |
266 | image = (image - vmin) / (vmax - vmin)
267 |
268 | # get colored image of image
269 | exec ('colorImage = cm.' + cmap + '(image)')
270 |
271 | # normalize alphadata
272 | alphaDataNor = alphaData / np.amax(alphaData)
273 | alphaDataNor = np.sqrt(alphaDataNor)
274 |
275 | colorImage[:, :, 3] = alphaDataNor
276 |
277 | # plt.figure()
278 | # plot dummy figure for colorbar
279 | a = np.array([[vmin, vmax]])
280 | plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0)
281 | # plt.gca().set_visible(False)
282 | cbar = plt.colorbar()
283 | cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
284 | cbar.set_alpha(1)
285 | cbar.draw_all()
286 |
287 | # generate black background
288 | b = np.array(colorImage)
289 | b[:] = background
290 | b[:, :, 3] = 1
291 | plt.imshow(b, cmap='gray')
292 |
293 | # plot map
294 | plt.imshow(colorImage, interpolation=interpolation)
295 |
296 | return colorImage
297 |
298 |
299 | def plot_mask(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=None, closingIteration=None):
300 | """
301 | plot mask borders in a given color
302 | """
303 |
304 | if not plotAxis:
305 | f = plt.figure()
306 | plotAxis = f.add_subplot(111)
307 |
308 | cmap1 = col.ListedColormap(color, 'temp')
309 | cm.register_cmap(cmap=cmap1)
310 |
311 | if zoom != 1:
312 | mask = ni.interpolation.zoom(mask, zoom, order=0)
313 |
314 | mask2 = mask.astype(np.float32)
315 | mask2[np.invert(np.isnan(mask2))] = 1.
316 | mask2[np.isnan(mask2)] = 0.
317 |
318 | struc = ni.generate_binary_structure(2, 2)
319 | if borderWidth:
320 | border = mask2 - ni.binary_erosion(mask2, struc, iterations=borderWidth).astype(np.float32)
321 | else:
322 | border = mask2 - ni.binary_erosion(mask2, struc).astype(np.float32)
323 |
324 | if closingIteration:
325 | border = ni.binary_closing(border, iterations=closingIteration).astype(np.float32)
326 |
327 | border[border == 0] = np.nan
328 |
329 | currfig = plotAxis.imshow(border, cmap='temp', interpolation='nearest')
330 |
331 | return currfig
332 |
333 |
334 | def plot_mask_borders(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=2, closingIteration=None, **kwargs):
335 | """
336 | plot mask (ROI) borders by using pyplot.contour function. all the 0s and Nans in the input mask will be considered
337 | as background, and non-zero, non-nan pixel will be considered in ROI.
338 | """
339 | if not plotAxis:
340 | f = plt.figure()
341 | plotAxis = f.add_subplot(111)
342 |
343 | plotingMask = np.ones(mask.shape, dtype=np.uint8)
344 |
345 | plotingMask[np.logical_or(np.isnan(mask), mask == 0)] = 0
346 |
347 | if zoom != 1:
348 | plotingMask = cv2.resize(plotingMask.astype(np.float),
349 | dsize=(int(plotingMask.shape[1] * zoom), int(plotingMask.shape[0] * zoom)))
350 | plotingMask[plotingMask < 0.5] = 0
351 | plotingMask[plotingMask >= 0.5] = 1
352 | plotingMask = plotingMask.astype(np.uint8)
353 |
354 | if closingIteration is not None:
355 | plotingMask = ni.binary_closing(plotingMask, iterations=closingIteration).astype(np.uint8)
356 |
357 | plotingMask = ni.binary_erosion(plotingMask, iterations=borderWidth)
358 |
359 | currfig = plotAxis.contour(plotingMask, levels=[0.5], colors=color, linewidths=borderWidth, **kwargs)
360 |
361 | # put y axis in decreasing order
362 | y_lim = list(plotAxis.get_ylim())
363 | y_lim.sort()
364 | plotAxis.set_ylim(y_lim[::-1])
365 |
366 | plotAxis.set_aspect('equal')
367 |
368 | return currfig
369 |
370 |
371 | def grid_axis(rowNum, columnNum, totalPlotNum, **kwarg):
372 | """
373 | return figure handles and axis handels for multiple subplots and figures
374 | """
375 |
376 | figureNum = totalPlotNum // (rowNum * columnNum) + 1
377 |
378 | figureHandles = []
379 |
380 | for i in range(figureNum):
381 | f = plt.figure(**kwarg)
382 | figureHandles.append(f)
383 |
384 | axisHandles = []
385 | for i in range(totalPlotNum):
386 | currFig = figureHandles[i // (rowNum * columnNum)]
387 | currIndex = i % (rowNum * columnNum)
388 | currAxis = currFig.add_subplot(rowNum, columnNum, currIndex + 1)
389 | axisHandles.append(currAxis)
390 |
391 | return figureHandles, axisHandles
392 |
393 |
394 | def tile_axis(f, rowNum, columnNum, topDownMargin=0.05, leftRightMargin=0.05, rowSpacing=0.05, columnSpacing=0.05):
395 |
396 | if 2 * topDownMargin + (
397 | (rowNum - 1) * rowSpacing) >= 1:
398 | raise ValueError('Top down margin or row spacing are too big!')
399 | if 2 * leftRightMargin + (
400 | (columnNum - 1) * columnSpacing) >= 1:
401 | raise ValueError('Left right margin or column spacing are too big!')
402 |
403 | height = (1 - (2 * topDownMargin) - (rowNum - 1) * rowSpacing) / rowNum
404 | width = (1 - (2 * leftRightMargin) - (columnNum - 1) * columnSpacing) / columnNum
405 |
406 | xStarts = np.arange(leftRightMargin, 1 - leftRightMargin, (width + columnSpacing))
407 | yStarts = np.arange(topDownMargin, 1 - topDownMargin, (height + rowSpacing))[::-1]
408 |
409 | axisList = [[f.add_axes([xStart, yStart, width, height]) for xStart in xStarts] for yStart in yStarts]
410 |
411 | return axisList
412 |
413 |
414 | def save_figure_without_borders(f,
415 | savePath,
416 | removeSuperTitle=True,
417 | **kwargs):
418 | """
419 | remove borders of a figure
420 | """
421 | f.gca().get_xaxis().set_visible(False)
422 | f.gca().get_yaxis().set_visible(False)
423 | f.gca().set_title('')
424 | if removeSuperTitle:
425 | f.suptitle('')
426 | f.savefig(savePath, pad_inches=0, bbox_inches='tight', **kwargs)
427 |
428 |
429 | def merge_normalized_images(imgList, isFilter=True, sigma=50, mergeMethod='mean', dtype=np.float32):
430 | """
431 | merge images in a list in to one, for each image, local intensity variability will be removed by subtraction of
432 | gaussian filtered image. Then all images will be collapsed by the mergeMethod in to single image
433 | """
434 |
435 | imgList2 = []
436 |
437 | for currImg in imgList:
438 | imgList2.append(ia.array_nor(currImg.astype(dtype)))
439 |
440 | if mergeMethod == 'mean':
441 | mergedImg = np.mean(np.array(imgList2), axis=0)
442 | elif mergeMethod == 'min':
443 | mergedImg = np.min(np.array(imgList2), axis=0)
444 | elif mergeMethod == 'max':
445 | mergedImg = np.max(np.array(imgList2), axis=0)
446 | elif mergeMethod == 'median':
447 | mergedImg = np.median(np.array(imgList2), axis=0)
448 |
449 | if isFilter:
450 | mergedImgf = ni.filters.gaussian_filter(mergedImg.astype(np.float), sigma=sigma)
451 | return ia.array_nor(mergedImg - mergedImgf).astype(dtype)
452 | else:
453 | return ia.array_nor(mergedImg).astype(dtype)
454 |
455 |
456 | # def hue2RGB(hue):
457 | # """
458 | # get the RGB value as format as hex string from the decimal ratio of hue (from 0 to 1)
459 | # color model as described in:
460 | # https://en.wikipedia.org/wiki/Hue
461 | # """
462 | # if hue < 0: hue = 0
463 | # if hue > 1: hue = 1
464 | # color = colorsys.hsv_to_rgb(hue,1,1)
465 | # color = [int(x*255) for x in color]
466 | # return get_color_str(*color)
467 | #
468 | #
469 | def hot_2_rgb(hot):
470 | """
471 | get the RGB value as format as hex string from the decimal ratio of hot colormap (from 0 to 1)
472 | """
473 | if hot < 0: hot = 0
474 | if hot > 1: hot = 1
475 | cmap_hot = plt.get_cmap('hot')
476 | color = cmap_hot(hot)[0:3]
477 | color = [int(x * 255) for x in color]
478 | return get_color_str(*color)
479 |
480 |
481 | def value_2_rgb(value, cmap):
482 | """
483 | get the RGB value as format as hex string from the decimal ratio of a given colormap (from 0 to 1)
484 | """
485 | if value < 0: value = 0
486 | if value > 1: value = 1
487 | cmap = plt.get_cmap(cmap)
488 | color = cmap(value)[0:3];
489 | color = [int(x * 255) for x in color]
490 | return get_color_str(*color)
491 |
492 |
493 | if __name__ == '__main__':
494 | plt.ioff()
495 | print('for debug')
496 |
--------------------------------------------------------------------------------
/WarpedVisualStim/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zhuangjun1981/WarpedVisualStim/b4df9575109f9cf1525a2e85814a9c61052f2754/WarpedVisualStim/tools/__init__.py
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | six
2 | numpy
3 | scipy
4 | matplotlib
5 | opencv-python
6 | psychopy
7 | pytest
8 | scikit-image
9 | h5py
10 | pillow
11 | configobj
12 | PyDAQmx
13 | tifffile
14 | pyglet
15 | # numpydoc
16 | # sphinx
17 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | __author__ = 'junz'
2 |
3 | import sys
4 | import io
5 | import os
6 | import codecs
7 | import re
8 | from setuptools import setup, find_packages
9 |
10 | here = os.path.abspath(os.path.dirname(__file__))
11 | os.chdir(here)
12 |
13 | package_root = 'WarpedVisualStim'
14 |
15 | # get install requirements
16 | with open('requirements.txt') as req_f:
17 | install_reqs = req_f.read().splitlines()
18 | install_reqs = [ir for ir in install_reqs if '#' not in ir]
19 | install_reqs = install_reqs[::-1]
20 | print('\ninstall requirements:')
21 | print('\n'.join(install_reqs))
22 | print('')
23 |
24 | # get long_description
25 | def read(*filenames, **kwargs):
26 | encoding = kwargs.get('encoding', 'utf-8')
27 | sep = kwargs.get('sep', '\n')
28 | buf = []
29 | for filename in filenames:
30 | with io.open(filename, encoding=encoding) as f:
31 | buf.append(f.read())
32 | return sep.join(buf)
33 | long_description = read('README.md')
34 |
35 | # find version
36 | def find_version(f_path):
37 | version_file = codecs.open(f_path, 'r').read()
38 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
39 | version_file, re.M)
40 | if version_match:
41 | return version_match.group(1)
42 | raise RuntimeError("Unable to find version string.")
43 | version = find_version(os.path.join(here, 'WarpedVisualStim', '__init__.py'))
44 |
45 | # setup
46 | setup(
47 | name='WarpedVisualStim',
48 | version = version,
49 | url='https://github.com/zhuangjun1981/WarpedVisualStim',
50 | author='Jun Zhuang @ Allen Institute for Brain Science',
51 | install_requires=install_reqs,
52 | author_email='junz@alleninstitute.org',
53 | description='visual stimulus display tools',
54 | long_description=long_description,
55 | packages=find_packages(),
56 | include_package_data=True,
57 | package_data={'':['*.md']},
58 | platforms='any',
59 | classifiers=['Programming Language :: Python',
60 | 'Development Status :: 4 - Beta',
61 | 'Natural Language :: English',
62 | 'Operating System :: OS Independent',],
63 | )
64 |
--------------------------------------------------------------------------------