├── .gitattributes
├── .gitignore
├── Part1
├── Part1_Intro_to_Python.ipynb
├── script_0.py
├── script_1.py
├── script_2.py
└── script_3.py
├── Part2
├── Part2_PsychoPy.ipynb
├── images
│ ├── 1a.jpg
│ ├── 1b.jpg
│ ├── 2a.jpg
│ ├── 2b.jpg
│ ├── 3a.jpg
│ ├── 3b.jpg
│ ├── 4a.jpg
│ ├── 4b.jpg
│ ├── 5a.jpg
│ ├── 5b.jpg
│ ├── 6a.jpg
│ └── 6b.jpg
└── script_final.py
├── Part3
├── Part3_Scientific_Python.ipynb
├── Part3_exercises.ipynb
├── Part3_exercises.py
└── python.jpg
├── Part4
└── Part4_Practice_with_PsychoPy.ipynb
├── Part5
├── Part5_psychopy_ext.ipynb
├── __init__.py
├── images
│ ├── architecture.svg
│ ├── gui.png
│ ├── research_workflow.svg
│ ├── scheme.svg
│ └── tree.svg
├── run.py
├── scripts
│ ├── __init__.py
│ ├── changedet.py
│ ├── computer.py
│ └── trivial.py
└── stimuli.svg
├── Part6
├── Part6_MVPA.ipynb
└── slice.nii
├── Part7
├── Part7_Image_Statistics.ipynb
└── images
│ ├── forest.jpg
│ ├── kitten.jpg
│ ├── oudemarkt.jpg
│ └── sclos10.jpg
├── README.md
├── check_config.py
├── check_install.bat
├── check_install.py
├── index.ipynb
├── install_linux.sh
└── logo.png
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | *.py~
3 | .ipynb_checkpoints/
4 | *.zip
5 | *.svg
--------------------------------------------------------------------------------
/Part1/script_0.py:
--------------------------------------------------------------------------------
1 | #==============================================
2 | # Settings that we might want to tweak later on
3 | #==============================================
4 |
5 | # directory to save data in data
6 | # directory where images can be found image
7 | # image names without the suffixes 1,2,3,4,5,6
8 | # suffix for the first image a.jpg
9 | # suffix for the second image b.jpg
10 | # screen size in pixels 1200x800
11 | # image freezing time in seconds 30
12 | # image changing time in seconds 0.5
13 | # number of bubbles overlayed on the image 40
14 |
15 |
16 | #==========================================
17 | # Store info about the experiment session
18 | #==========================================
19 |
20 | # Get subject name, gender, age, handedness through a dialog box
21 | # If 'Cancel' is pressed, quit
22 | # Get date and time
23 | # Store this information as general session info
24 | # Create a unique filename for the experiment data
25 |
26 |
27 | #========================
28 | # Prepare condition lists
29 | #========================
30 |
31 | # Check if all images exist
32 | # Randomize the image order
33 |
34 | # Create the orientations list: half upright, half inverted
35 | # Randomize the orientation order
36 |
37 |
38 | #===============================
39 | # Creation of window and stimuli
40 | #===============================
41 |
42 | # Open a window
43 | # Define trial start text
44 | # Define the bitmap stimuli (contents can still change)
45 | # Define a bubble (position and size can still change)
46 |
47 |
48 | #==========================
49 | # Define the trial sequence
50 | #==========================
51 |
52 | # Define a list of trials with their properties:
53 | # - Which image (without the suffix)
54 | # - Which orientation
55 |
56 |
57 | #=====================
58 | # Start the experiment
59 | #=====================
60 |
61 | # Run through the trials. On each trial:
62 | # - Display trial start text
63 | # - Wait for a spacebar press to start the trial, or escape to quit
64 | # - Set the images, set the orientation
65 | # - Switch the image every 0.5s, and:
66 | # - Draw bubbles of increasing radius at random positions
67 | # - Listen for a spacebar or escape press
68 | # - Stop trial if spacebar or escape has been pressed, or if 30s have passed
69 | # - Analyze the keypress
70 | # - Escape press = quit the experiment
71 | # - Spacebar press = correct change detection; register response time
72 | # - No press = failed change detection; maximal response time
73 | # - Advance to the next trial
74 |
75 |
76 | #======================
77 | # End of the experiment
78 | #======================
79 |
80 | # Save all data to a file
81 | # Quit the experiment
82 |
--------------------------------------------------------------------------------
/Part1/script_1.py:
--------------------------------------------------------------------------------
1 | #==============================================
2 | # Settings that we might want to tweak later on
3 | #==============================================
4 |
5 | datapath = 'data' # directory to save data in
6 | impath = 'images' # directory where images can be found
7 | imlist = ['1','2','3','4','5','6'] # image names without the suffixes
8 | asfx = 'a.jpg' # suffix for the first image
9 | bsfx = 'b.jpg' # suffix for the second image
10 | scrsize = (1200,800) # screen size in pixels
11 | timelimit = 30 # image freezing time in seconds
12 | changetime = .5 # image changing time in seconds
13 | n_bubbles = 40 # number of bubbles overlayed on the image
14 |
15 |
16 | #========================================
17 | # Store info about the experiment session
18 | #========================================
19 |
20 | exp_name = 'Change Detection'
21 | exp_info = {}
22 |
23 | # Get subject name, gender, age, handedness through a dialog box
24 | # If 'Cancel' is pressed, quit
25 | # Get date and time
26 | # Store this information as general session info
27 |
28 | # Create a unique filename for the experiment data
29 | data_fname = exp_info['participant'] + '_' + exp_info['date']
30 |
31 |
32 | #========================
33 | # Prepare condition lists
34 | #========================
35 |
36 | # Check if all images exist
37 | # Randomize the image order
38 |
39 | # Create the orientations list: half upright, half inverted
40 | orilist = [0,1]*(len(imlist)/2)
41 |
42 | # Randomize the orientation order
43 |
44 |
45 | #===============================
46 | # Creation of window and stimuli
47 | #===============================
48 |
49 | # Open a window
50 |
51 | # Define trial start text
52 | text = "Press spacebar to start the trial"
53 |
54 | # Define the bitmap stimuli (contents can still change)
55 | # Define a bubble (position and size can still change)
56 |
57 |
58 | #==========================
59 | # Define the trial sequence
60 | #==========================
61 |
62 | # Define a list of trials with their properties:
63 | # - Which image (without the suffix)
64 | # - Which orientation
65 |
66 |
67 | #=====================
68 | # Start the experiment
69 | #=====================
70 |
71 | for trial in trials:
72 |
73 | # Display trial start text
74 |
75 | # Wait for a spacebar press to start the trial, or escape to quit
76 |
77 | # Set the image filename, set the orientation
78 |
79 | # Start the trial
80 | # Stop trial if spacebar or escape has been pressed, or if 30s have passed
81 | while not response and time < timelimit:
82 |
83 | # Switch the image
84 |
85 | # Draw bubbles of increasing radius at random positions
86 |
87 | # For the duration of 'changetime',
88 | # Listen for a spacebar or escape press
89 | while time < changetime:
90 | if response:
91 | break
92 |
93 | # Analyze the keypress
94 | if response:
95 | if escape_pressed:
96 | # Escape press = quit the experiment
97 | break
98 | elif spacebar_pressed:
99 | # Spacebar press = correct change detection; register response time
100 | else:
101 | # No press = failed change detection; maximal response time
102 |
103 | # Advance to the next trial
104 |
105 |
106 | #======================
107 | # End of the experiment
108 | #======================
109 |
110 | # Save all data to a file
111 | # Quit the experiment
--------------------------------------------------------------------------------
/Part1/script_2.py:
--------------------------------------------------------------------------------
1 | #===============
2 | # Import modules
3 | #===============
4 |
5 | from psychopy import data
6 |
7 | #==============================================
8 | # Settings that we might want to tweak later on
9 | #==============================================
10 |
11 | datapath = 'data' # directory to save data in
12 | impath = 'images' # directory where images can be found
13 | imlist = ['1','2','3','4','5','6'] # image names without the suffixes
14 | asfx = 'a.jpg' # suffix for the first image
15 | bsfx = 'b.jpg' # suffix for the second image
16 | scrsize = (1200,800) # screen size in pixels
17 | timelimit = 30 # image freezing time in seconds
18 | changetime = .5 # image changing time in seconds
19 | n_bubbles = 40 # number of bubbles overlayed on the image
20 |
21 |
22 | #========================================
23 | # Store info about the experiment session
24 | #========================================
25 |
26 | exp_name = 'Change Detection'
27 | exp_info = {}
28 |
29 | # Get subject name, gender, age, handedness through a dialog box
30 | # If 'Cancel' is pressed, quit
31 | # Get date and time
32 | # Store this information as general session info
33 |
34 | # Create a unique filename for the experiment data
35 | data_fname = exp_info['participant'] + '_' + exp_info['date']
36 |
37 |
38 | #========================
39 | # Prepare condition lists
40 | #========================
41 |
42 | # Check if all images exist
43 | # Randomize the image order
44 |
45 | # Create the orientations list: half upright, half inverted
46 | orilist = [0,1]*(len(imlist)/2)
47 |
48 | # Randomize the orientation order
49 |
50 |
51 | #===============================
52 | # Creation of window and stimuli
53 | #===============================
54 |
55 | # Open a window
56 |
57 | # Define trial start text
58 | text = "Press spacebar to start the trial"
59 |
60 | # Define the bitmap stimuli (contents can still change)
61 | # Define a bubble (position and size can still change)
62 |
63 |
64 | #==========================
65 | # Define the trial sequence
66 | #==========================
67 |
68 | # Define a list of trials with their properties:
69 | # - Which image (without the suffix)
70 | # - Which orientation
71 |
72 | # We need a list of dictionaries, e.g. {im:'6',ori:1}
73 | # Leave blank, we will soon learn how to fill this with imlist and orilist
74 | stim_order = [{},{},{},{},{},{}]
75 |
76 | trials = data.TrialHandler(stim_order, nReps=1, extraInfo=exp_info,
77 | method='sequential', originPath=datapath)
78 |
79 |
80 | #=====================
81 | # Start the experiment
82 | #=====================
83 |
84 | for trial in trials:
85 |
86 | # Display trial start text
87 |
88 | # Wait for a spacebar press to start the trial, or escape to quit
89 |
90 | # Set the images, set the orientation
91 | trial['im']
92 | trial['ori']
93 |
94 | # Start the trial
95 | # Stop trial if spacebar or escape has been pressed, or if 30s have passed
96 | while not response and time < timelimit:
97 |
98 | # Switch the image
99 |
100 | # Draw bubbles of increasing radius at random positions
101 |
102 | # For the duration of 'changetime',
103 | # Listen for a spacebar or escape press
104 | while time < changetime:
105 | if response:
106 | break
107 |
108 | # Analyze the keypress
109 | if response:
110 | if escape_pressed:
111 | # Escape press = quit the experiment
112 | break
113 | elif spacebar_pressed:
114 | # Spacebar press = correct change detection; register response time
115 | acc = 1
116 | rt =
117 | else:
118 | # No press = failed change detection; maximal response time
119 | acc = 0
120 | rt = timelimit
121 |
122 | # Add the current trial's data to the TrialHandler
123 | trials.addData('rt', rt)
124 | trials.addData('acc', acc)
125 |
126 | # Advance to the next trial
127 |
128 |
129 | #======================
130 | # End of the experiment
131 | #======================
132 |
133 | # Save all data to a file
134 | trials.saveAsWideText(data_fname + '.csv', delim=',')
135 |
136 | # Quit the experiment
--------------------------------------------------------------------------------
/Part1/script_3.py:
--------------------------------------------------------------------------------
1 | #===============
2 | # Import modules
3 | #===============
4 |
5 | import os # for file/folder operations
6 | import numpy.random as rnd # for random number generators
7 | from psychopy import visual, event, core, gui, data
8 |
9 |
10 | #==============================================
11 | # Settings that we might want to tweak later on
12 | #==============================================
13 |
14 | datapath = 'data' # directory to save data in
15 | impath = 'images' # directory where images can be found
16 | imlist = ['1','2','3','4','5','6'] # image names without the suffixes
17 | asfx = 'a.jpg' # suffix for the first image
18 | bsfx = 'b.jpg' # suffix for the second image
19 | scrsize = (1200,800) # screen size in pixels
20 | timelimit = 30 # image freezing time in seconds
21 | changetime = .5 # image changing time in seconds
22 | n_bubbles = 40 # number of bubbles overlayed on the image
23 |
24 |
25 | #========================================
26 | # Store info about the experiment session
27 | #========================================
28 |
29 | exp_name = 'Change Detection'
30 | exp_info = {}
31 |
32 | # Get subject name, gender, age, handedness through a dialog box
33 | # If 'Cancel' is pressed, quit
34 | # Get date and time
35 | # Store this information as general session info
36 |
37 | # Create a unique filename for the experiment data
38 | if not os.path.isdir(datapath):
39 | os.makedirs(datapath)
40 | data_fname = exp_info['participant'] + '_' + exp_info['date']
41 | data_fname = os.path.join(datapath, data_fname)
42 |
43 |
44 | #=========================
45 | # Prepare conditions lists
46 | #=========================
47 |
48 | # Check if all images exist
49 | for im in imlist:
50 | if (not os.path.exists(os.path.join(impath, im+asfx)) or
51 | not os.path.exists(os.path.join(impath, im+bsfx))):
52 | raise Exception('Image files not found in image folder: ' + str(im))
53 |
54 | # Randomize the image order
55 | rnd.shuffle(imlist)
56 |
57 | # Create the orientations list: half upright, half inverted
58 | orilist = [0,1]*(len(imlist)/2)
59 |
60 | # Randomize the orientation order
61 | rnd.shuffle(orilist)
62 |
63 |
64 | #===============================
65 | # Creation of window and stimuli
66 | #===============================
67 |
68 | # Open a window
69 |
70 | # Define trial start text
71 | text = "Press spacebar to start the trial"
72 |
73 | # Define the bitmap stimuli (contents can still change)
74 | # Define a bubble (position and size can still change)
75 |
76 |
77 | #==========================
78 | # Define the trial sequence
79 | #==========================
80 |
81 | # Define a list of trials with their properties:
82 | # - Which image (without the suffix)
83 | # - Which orientation
84 | stim_order = []
85 | for im, ori in zip(imlist, orilist):
86 | stim_order.append({'im': im, 'ori': ori})
87 |
88 | trials = data.TrialHandler(stim_order, nReps=1, extraInfo=exp_info,
89 | method='sequential', originPath=datapath)
90 |
91 |
92 | #=====================
93 | # Start the experiment
94 | #=====================
95 |
96 | for trial in trials:
97 |
98 | # Display trial start text
99 |
100 | # Wait for a spacebar press to start the trial, or escape to quit
101 |
102 | # Set the images, set the orientation
103 | im_fname = os.path.join(impath, trial['im'])
104 | trial['ori']
105 |
106 | # Empty the keypresses list
107 | keys = []
108 |
109 | # Start the trial
110 | # Stop trial if spacebar or escape has been pressed, or if 30s have passed
111 | while not response and time < timelimit:
112 |
113 | # Switch the image
114 |
115 | # Draw bubbles of increasing radius at random positions
116 | for radius in range(n_bubbles):
117 | radius/2.
118 | pos = ((rnd.random()-.5) * scrsize[0],
119 | (rnd.random()-.5) * scrsize[1] )
120 |
121 | # For the duration of 'changetime',
122 | # Listen for a spacebar or escape press
123 | while time < changetime:
124 | if response:
125 | break
126 |
127 | # Analyze the keypress
128 | if response:
129 | if escape_pressed:
130 | # Escape press = quit the experiment
131 | break
132 | elif spacebar_pressed:
133 | # Spacebar press = correct change detection; register response time
134 | acc = 1
135 | rt
136 | else:
137 | # No press = failed change detection; maximal response time
138 | acc = 0
139 | rt = timelimit
140 |
141 | # Add the current trial's data to the TrialHandler
142 | trials.addData('rt', rt)
143 | trials.addData('acc', acc)
144 |
145 | # Advance to the next trial
146 |
147 |
148 | #======================
149 | # End of the experiment
150 | #======================
151 |
152 | # Save all data to a file
153 | trials.saveAsWideText(data_fname + '.csv', delim=',')
154 |
155 | # Quit the experiment
156 |
--------------------------------------------------------------------------------
/Part2/images/1a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/1a.jpg
--------------------------------------------------------------------------------
/Part2/images/1b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/1b.jpg
--------------------------------------------------------------------------------
/Part2/images/2a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/2a.jpg
--------------------------------------------------------------------------------
/Part2/images/2b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/2b.jpg
--------------------------------------------------------------------------------
/Part2/images/3a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/3a.jpg
--------------------------------------------------------------------------------
/Part2/images/3b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/3b.jpg
--------------------------------------------------------------------------------
/Part2/images/4a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/4a.jpg
--------------------------------------------------------------------------------
/Part2/images/4b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/4b.jpg
--------------------------------------------------------------------------------
/Part2/images/5a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/5a.jpg
--------------------------------------------------------------------------------
/Part2/images/5b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/5b.jpg
--------------------------------------------------------------------------------
/Part2/images/6a.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/6a.jpg
--------------------------------------------------------------------------------
/Part2/images/6b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part2/images/6b.jpg
--------------------------------------------------------------------------------
/Part2/script_final.py:
--------------------------------------------------------------------------------
1 | #===============
2 | # Import modules
3 | #===============
4 |
5 | import os # for file/folder operations
6 | import numpy.random as rnd # for random number generators
7 | from psychopy import visual, event, core, gui, data
8 |
9 |
10 | #==============================================
11 | # Settings that we might want to tweak later on
12 | #==============================================
13 |
14 | datapath = 'data' # directory to save data in
15 | impath = 'images' # directory where images can be found
16 | imlist = ['1','2','3','4','5','6'] # image names without the suffixes
17 | asfx = 'a.jpg' # suffix for the first image
18 | bsfx = 'b.jpg' # suffix for the second image
19 | scrsize = (600,400) # screen size in pixels
20 | timelimit = 30 # image freezing time in seconds
21 | changetime = .5 # image changing time in seconds
22 | n_bubbles = 40 # number of bubbles overlayed on the image
23 |
24 |
25 | #========================================
26 | # Store info about the experiment session
27 | #========================================
28 |
29 | # Get subject name, gender, age, handedness through a dialog box
30 | exp_name = 'Change Detection'
31 | exp_info = {
32 | 'participant': '',
33 | 'gender': ('male', 'female'),
34 | 'age':'',
35 | 'left-handed':False
36 | }
37 | dlg = gui.DlgFromDict(dictionary=exp_info, title=exp_name)
38 |
39 | # If 'Cancel' is pressed, quit
40 | if dlg.OK == False:
41 | core.quit()
42 |
43 | # Get date and time
44 | exp_info['date'] = data.getDateStr()
45 | exp_info['exp_name'] = exp_name
46 |
47 | # Create a unique filename for the experiment data
48 | if not os.path.isdir(datapath):
49 | os.makedirs(datapath)
50 | data_fname = exp_info['participant'] + '_' + exp_info['date']
51 | data_fname = os.path.join(datapath, data_fname)
52 |
53 |
54 | #========================
55 | # Prepare condition lists
56 | #========================
57 |
58 | # Check if all images exist
59 | for im in imlist:
60 | if (not os.path.exists(os.path.join(impath, im+asfx)) or
61 | not os.path.exists(os.path.join(impath, im+bsfx))):
62 | raise Exception('Image files not found in image folder: ' + str(im))
63 |
64 | # Randomize the image order
65 | rnd.shuffle(imlist)
66 |
67 | # Create the orientations list: half upright, half inverted (rotated by 180 deg)
68 | orilist = [0,180]*(len(imlist)/2)
69 |
70 | # Randomize the orientation order
71 | rnd.shuffle(orilist)
72 |
73 |
74 | #===============================
75 | # Creation of window and stimuli
76 | #===============================
77 |
78 | # Open a window
79 | win = visual.Window(size=scrsize, color='white', units='pix', fullscr=False)
80 |
81 | # Define trial start text
82 | start_message = visual.TextStim(win,
83 | text="Press spacebar to start the trial. Hit spacebar again when you detect a change.",
84 | color='red', height=20)
85 |
86 | # Define bitmap stimulus (contents can still change)
87 | bitmap1 = visual.ImageStim(win, size=scrsize)
88 | bitmap2 = visual.ImageStim(win, size=scrsize)
89 |
90 | # Define a bubble (position and size can still change)
91 | bubble = visual.Circle(win, fillColor='black', lineColor='black')
92 |
93 |
94 | #==========================
95 | # Define the trial sequence
96 | #==========================
97 |
98 | # Define a list of trials with their properties:
99 | # - Which image (without the suffix)
100 | # - Which orientation
101 | stim_order = []
102 | for im, ori in zip(imlist, orilist):
103 | stim_order.append({'im': im, 'ori': ori})
104 |
105 | trials = data.TrialHandler(stim_order, nReps=1, extraInfo=exp_info,
106 | method='sequential', originPath=datapath)
107 |
108 |
109 | #=====================
110 | # Start the experiment
111 | #=====================
112 |
113 | # Initialize two clocks:
114 | # - for image change time
115 | # - for response time
116 | change_clock = core.Clock()
117 | rt_clock = core.Clock()
118 |
119 | # Run through the trials
120 | for trial in trials:
121 |
122 | # Display trial start text
123 | start_message.draw()
124 | win.flip()
125 |
126 | # Wait for a spacebar press to start the trial, or escape to quit
127 | keys = event.waitKeys(keyList=['space', 'escape'])
128 |
129 | # Set the images, set the orientation
130 | im_fname = os.path.join(impath, trial['im'])
131 | bitmap1.setImage(im_fname + asfx)
132 | bitmap1.setOri(trial['ori'])
133 | bitmap2.setImage(im_fname + bsfx)
134 | bitmap2.setOri(trial['ori'])
135 | bitmap = bitmap1
136 |
137 | # Set the clocks to 0
138 | change_clock.reset()
139 | rt_clock.reset()
140 |
141 | # Empty the keypresses list
142 | # Leave an 'escape' press in for immediate exit
143 | if 'space' in keys:
144 | keys = []
145 |
146 | # Start the trial
147 | # Stop trial if spacebar or escape has been pressed, or if 30s have passed
148 | while len(keys) == 0 and rt_clock.getTime() < timelimit:
149 |
150 | # Switch the image
151 | if bitmap == bitmap1:
152 | bitmap = bitmap2
153 | else:
154 | bitmap = bitmap1
155 |
156 | bitmap.draw()
157 |
158 | # Draw bubbles of increasing radius at random positions
159 | for radius in range(n_bubbles):
160 | bubble.setRadius(radius/2.)
161 | bubble.setPos(((rnd.random()-.5) * scrsize[0],
162 | (rnd.random()-.5) * scrsize[1] ))
163 | bubble.draw()
164 |
165 | # Show the new screen we've drawn
166 | win.flip()
167 |
168 | # For the duration of 'changetime',
169 | # Listen for a spacebar or escape press
170 | change_clock.reset()
171 | while change_clock.getTime() <= changetime:
172 | keys = event.getKeys(keyList=['space','escape'])
173 | if len(keys) > 0:
174 | break
175 |
176 | # Analyze the keypress
177 | if keys:
178 | if 'escape' in keys:
179 | # Escape press = quit the experiment
180 | break
181 | else:
182 | # Spacebar press = correct change detection; register response time
183 | acc = 1
184 | rt = rt_clock.getTime()
185 |
186 | else:
187 | # No press = failed change detection; maximal response time
188 | acc = 0
189 | rt = timelimit
190 |
191 |
192 | # Add the current trial's data to the TrialHandler
193 | trials.addData('rt', rt)
194 | trials.addData('acc', acc)
195 |
196 | # Advance to the next trial
197 |
198 |
199 | #======================
200 | # End of the experiment
201 | #======================
202 |
203 | # Save all data to a file
204 | trials.saveAsWideText(data_fname + '.csv', delim=',')
205 |
206 | # Quit the experiment
207 | win.close()
208 |
--------------------------------------------------------------------------------
/Part3/Part3_exercises.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "",
4 | "signature": "sha256:fbbaf755166ee8e7ebdd63d35c9afb2492ac0e276f7a7556fca9f9d55cb6eacc"
5 | },
6 | "nbformat": 3,
7 | "nbformat_minor": 0,
8 | "worksheets": [
9 | {
10 | "cells": [
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "# Exercise 1"
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "collapsed": false,
21 | "input": [
22 | "# EXERCISE 1: Try to compute the BMI of each subject, as well as the average BMI across subjects\n",
23 | "# BMI = weight/(length/100)**2\n",
24 | "\n",
25 | "subj_length = [180.0,165.0,190.0,172.0,156.0]\n",
26 | "subj_weight = [75.0,60.0,83.0,85.0,62.0]\n",
27 | "subj_bmi = []"
28 | ],
29 | "language": "python",
30 | "metadata": {},
31 | "outputs": []
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "### Solution\n",
38 | ""
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "# Exercise 2"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "collapsed": false,
59 | "input": [
60 | "# EXERCISE 2: Try to complete the program now!\n",
61 | "# Hint: np.mean() computes the mean of an ndarray\n",
62 | "# Note that unlike MATLAB, Python does not need the '.' before elementwise operators\n",
63 | "\n",
64 | "import numpy as np\n",
65 | "subj_length = np.array([180.0,165.0,190.0,172.0,156.0])\n",
66 | "subj_weight = np.array([75.0,60.0,83.0,85.0,62.0])"
67 | ],
68 | "language": "python",
69 | "metadata": {},
70 | "outputs": []
71 | },
72 | {
73 | "cell_type": "markdown",
74 | "metadata": {},
75 | "source": [
76 | "### Solution\n",
77 | ""
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "# Exercise 3"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "collapsed": false,
95 | "input": [
96 | "# EXERCISE 3: Create a 2x3 array containing the column-wise and the row-wise means of the original matrix\n",
97 | "# Do not use a for-loop, and also do not use the np.mean() function for now.\n",
98 | "\n",
99 | "arr = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype='float')"
100 | ],
101 | "language": "python",
102 | "metadata": {},
103 | "outputs": []
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": [
109 | "### Solution\n",
110 | ""
115 | ]
116 | },
117 | {
118 | "cell_type": "markdown",
119 | "metadata": {},
120 | "source": [
121 | "# Exercise 4"
122 | ]
123 | },
124 | {
125 | "cell_type": "code",
126 | "collapsed": false,
127 | "input": [
128 | "# EXERCISE 4: Create your own meshgrid3d function\n",
129 | "# Like np.meshgrid(), it should take two vectors and replicate them; one into columns, the other into rows\n",
130 | "# Unlike np.meshgrid(), it should return them as a single 3D array rather than 2D arrays\n",
131 | "# ...do not use the np.meshgrid() function"
132 | ],
133 | "language": "python",
134 | "metadata": {},
135 | "outputs": []
136 | },
137 | {
138 | "cell_type": "markdown",
139 | "metadata": {},
140 | "source": [
141 | "### Solution\n",
142 | ""
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "metadata": {},
160 | "source": [
161 | "# Exercise 5"
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "collapsed": false,
167 | "input": [
168 | "# EXERCISE 5: Make a better version of Exercise 3 with what you've just learned\n",
169 | "arr = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype='float')\n",
170 | "\n",
171 | "# What we had:\n",
172 | "print np.array([(arr[:,0]+arr[:,1]+arr[:,2])/3,(arr[0,:]+arr[1,:]+arr[2,:])/3])"
173 | ],
174 | "language": "python",
175 | "metadata": {},
176 | "outputs": []
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "metadata": {},
181 | "source": [
182 | "### Solution\n",
183 | ""
187 | ]
188 | },
189 | {
190 | "cell_type": "markdown",
191 | "metadata": {},
192 | "source": [
193 | "# Exercise 6"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "collapsed": false,
199 | "input": [
200 | "# EXERCISE 6: Create a Gabor patch of 100 by 100 pixels\n",
201 | "\n",
202 | "import numpy as np\n",
203 | "import matplotlib.pyplot as plt"
204 | ],
205 | "language": "python",
206 | "metadata": {},
207 | "outputs": []
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "### Solution\n",
214 | ""
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "# Exercise 7"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "collapsed": false,
247 | "input": [
248 | "# EXERCISE 7: Vectorize the above program\n",
249 | "# You get these lines for free...\n",
250 | "\n",
251 | "import numpy as np\n",
252 | "throws = np.random.randint(1,7,(5000,2000))\n",
253 | "one = (throws==1)\n",
254 | "two = (throws==2)\n",
255 | "three = (throws==3)"
256 | ],
257 | "language": "python",
258 | "metadata": {},
259 | "outputs": []
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {},
264 | "source": [
265 | "### Solution\n",
266 | ""
282 | ]
283 | },
284 | {
285 | "cell_type": "markdown",
286 | "metadata": {},
287 | "source": [
288 | "# Exercise 8"
289 | ]
290 | },
291 | {
292 | "cell_type": "code",
293 | "collapsed": false,
294 | "input": [
295 | "# EXERCISE 8: Visualize the difference between the PIL conversion to grayscale, and a simple average of RGB\n",
296 | "# Display pixels where the average is LESS luminant in red, and where it is MORE luminant in shades green\n",
297 | "# The luminance of these colors should correspond to the size of the difference\n",
298 | "# Extra: Maximize the overall contrast in your image\n",
299 | "# Extra2: Save as three PNG files, of different sizes (large, medium, small)\n",
300 | "\n",
301 | "import numpy as np\n",
302 | "from PIL import Image\n",
303 | "im = Image.open('python.jpg')"
304 | ],
305 | "language": "python",
306 | "metadata": {},
307 | "outputs": []
308 | },
309 | {
310 | "cell_type": "markdown",
311 | "metadata": {},
312 | "source": [
313 | "### Solution\n",
314 | ""
343 | ]
344 | },
345 | {
346 | "cell_type": "markdown",
347 | "metadata": {},
348 | "source": [
349 | "# Exercise 9"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "collapsed": false,
355 | "input": [
356 | "# EXERCISE 9: Plot y=sin(x) and y=sin(x^2) in two separate subplots, one above the other\n",
357 | "# Let x range from 0 to 2*pi\n",
358 | "\n",
359 | "import numpy as np\n",
360 | "import matplotlib.pyplot as plt\n",
361 | "import matplotlib.patches"
362 | ],
363 | "language": "python",
364 | "metadata": {},
365 | "outputs": []
366 | },
367 | {
368 | "cell_type": "markdown",
369 | "metadata": {},
370 | "source": [
371 | "### Solution\n",
372 | ""
393 | ]
394 | },
395 | {
396 | "cell_type": "markdown",
397 | "metadata": {},
398 | "source": [
399 | "# Exercise 10"
400 | ]
401 | },
402 | {
403 | "cell_type": "code",
404 | "collapsed": false,
405 | "input": [
406 | "# EXERCISE 10: Add regression lines\n",
407 | "\n",
408 | "import numpy as np\n",
409 | "from PIL import Image\n",
410 | "import matplotlib.pyplot as plt\n",
411 | "import matplotlib.lines as lines\n",
412 | "\n",
413 | "# Open image, convert to an array\n",
414 | "im = Image.open('python.jpg')\n",
415 | "im = im.resize((400,300))\n",
416 | "arr = np.array(im, dtype='float')\n",
417 | "\n",
418 | "# Split the RGB layers and flatten them\n",
419 | "R,G,B = np.dsplit(arr,3)\n",
420 | "R = R.flatten()\n",
421 | "G = G.flatten()\n",
422 | "B = B.flatten()\n",
423 | "\n",
424 | "# Do the plotting\n",
425 | "plt.figure(figsize=(5,5))\n",
426 | "plt.plot(R, B, marker='x', linestyle='None', color=(0,0,0.6))\n",
427 | "plt.plot(R, G, marker='.', linestyle='None', color=(0,0.35,0))\n",
428 | "\n",
429 | "# Tweak the plot\n",
430 | "plt.axis([0,255,0,255])\n",
431 | "plt.xlabel('Red value')\n",
432 | "plt.ylabel('Green/Blue value')"
433 | ],
434 | "language": "python",
435 | "metadata": {},
436 | "outputs": []
437 | },
438 | {
439 | "cell_type": "markdown",
440 | "metadata": {},
441 | "source": [
442 | "### Solution\n",
443 | ""
461 | ]
462 | }
463 | ],
464 | "metadata": {}
465 | }
466 | ]
467 | }
--------------------------------------------------------------------------------
/Part3/Part3_exercises.py:
--------------------------------------------------------------------------------
1 | # EXERCISE 1: Try to compute the BMI of each subject, as well as the average BMI across subjects
2 | # BMI = weight/(length/100)**2
3 | subj_length = [180.0,165.0,190.0,172.0,156.0]
4 | subj_weight = [75.0,60.0,83.0,85.0,62.0]
5 | subj_bmi = []
6 |
7 | n = len(subj_length)
8 | summed = 0.
9 | for subj in range(n):
10 | subj_bmi.append(subj_weight[subj]/(subj_length[subj]/100)**2)
11 | summed = summed + subj_bmi[subj]
12 | print subj_bmi
13 | print summed/n
14 |
15 |
16 | # EXERCISE 2: Try to complete the program now!
17 | # Hint: np.mean() computes the mean of an ndarray
18 | # Note that unlike MATLAB, Python does not need the '.' before elementwise operators
19 | import numpy as np
20 | subj_length = np.array([180.0,165.0,190.0,172.0,156.0])
21 | subj_weight = np.array([75.0,60.0,83.0,85.0,62.0])
22 | subj_bmi = subj_weight/(subj_length/100)**2
23 | mean_bmi = np.mean(subj_bmi)
24 | print subj_bmi
25 | print mean_bmi
26 |
27 |
28 | # EXERCISE 3: Create a 2x3 array containing the column-wise and the row-wise means of the original matrix
29 | # Do not use a for-loop, and also do not use the np.mean() function for now.
30 | arr = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype='float')
31 |
32 | res = np.array([(arr[:,0]+arr[:,1]+arr[:,2])/3,(arr[0,:]+arr[1,:]+arr[2,:])/3])
33 | print res
34 | print res.shape
35 |
36 |
37 | # EXERCISE 4: Create your own meshgrid3d function
38 | # Like np.meshgrid(), it should take two vectors and replicate them; one into columns, the other into rows
39 | # Unlike np.meshgrid(), it should return them as a single 3D array rather than 2D arrays
40 | # ...do not use the np.meshgrid() function
41 |
42 | def meshgrid3d(xvec, yvec):
43 | xlayer = np.tile(xvec,(len(yvec),1))
44 | ylayer = np.tile(yvec,(len(xvec),1)).T
45 | return np.dstack((xlayer,ylayer))
46 |
47 | xvec = np.arange(10)
48 | yvec = np.arange(5)
49 | xy = meshgrid3d(xvec, yvec)
50 | print xy
51 | print xy[:,:,0] # = first output of np.meshgrid()
52 | print xy[:,:,1] # = second output of np.meshgrid()
53 |
54 |
55 | # EXERCISE 5: Make a better version of Exercise 3 with what you've just learned
56 | arr = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype='float')
57 |
58 | # What we had:
59 | print np.array([(arr[:,0]+arr[:,1]+arr[:,2])/3,(arr[0,:]+arr[1,:]+arr[2,:])/3])
60 |
61 | # Now the new version:
62 | print np.vstack((np.mean(arr,1), np.mean(arr,0)))
63 |
64 |
65 | # EXERCISE 6: Create a Gabor patch of 100 by 100 pixels
66 | import numpy as np
67 | import matplotlib.pyplot as plt
68 |
69 | # Step 1: Define the 1D coordinate values
70 | # Tip: use 100 equally spaced values between -np.pi and np.pi
71 | vals = np.linspace(-np.pi, np.pi, 100)
72 |
73 | # Step 2: Create the 2D x and y coordinate arrays
74 | # Tip: use np.meshgrid()
75 | x,y = np.meshgrid(vals, vals)
76 |
77 | # Step 3: Create the grating
78 | # Tip: Use a frequency of 10
79 | grating = np.sin(x*10)
80 |
81 | # Step 4: Create the Gaussian
82 | # Tip: use np.exp() to compute a power of e
83 | gaussian = np.exp(-((x**2)+(y**2))/2)
84 |
85 | # Step 5: Create the Gabor
86 | gabor = grating*gaussian
87 |
88 | # Visualize your result
89 | # (we will discuss how this works later)
90 | plt.figure(figsize=(15,5))
91 | plt.subplot(131)
92 | plt.imshow(grating, cmap='gray')
93 | plt.subplot(132)
94 | plt.imshow(gaussian, cmap='gray')
95 | plt.subplot(133)
96 | plt.imshow(gabor, cmap='gray')
97 | plt.show()
98 |
99 |
100 | # EXERCISE 7: Vectorize the above program
101 | # You get these lines for free...
102 | import numpy as np
103 | throws = np.random.randint(1,7,(5000,2000))
104 | one = (throws==1)
105 | two = (throws==2)
106 | three = (throws==3)
107 |
108 | # Find out where all the 111 and 123 sequences occur
109 | find111 = one[:,:-2] & one[:,1:-1] & one[:,2:]
110 | find123 = one[:,:-2] & two[:,1:-1] & three[:,2:]
111 |
112 | # Then at what index they /first/ occur in each sequence
113 | first111 = np.argmax(find111, axis=1)
114 | first123 = np.argmax(find123, axis=1)
115 |
116 | # Compute the average first occurence location for both situations
117 | avg111 = np.mean(first111)
118 | avg123 = np.mean(first123)
119 |
120 | # Print the result
121 | print avg111, avg123
122 |
123 |
124 | # EXERCISE 8: Visualize the difference between the PIL conversion to grayscale, and a simple average of RGB
125 | # Display pixels where the average is LESS luminant in red, and where it is MORE luminant in shades green
126 | # The luminance of these colors should correspond to the size of the difference
127 | # Extra: Maximize the overall contrast in your image
128 | # Extra2: Save as three PNG files, of different sizes (large, medium, small)
129 |
130 | import numpy as np
131 | from PIL import Image
132 | im = Image.open('python.jpg')
133 |
134 | # Do both grayscale conversions
135 | im_avg = np.array(im)
136 | im_avg = np.mean(im_avg,2)
137 | im_pil = im.convert('L')
138 | im_pil = np.array(im_pil)
139 |
140 | # Compute the difference per pixel
141 | imd = im_avg-im_pil
142 |
143 | # Assign different colors according to the direction of difference
144 | outp = np.zeros((im_avg.shape)+(3,))
145 | outp[:,:,0][imd<0] = -imd[imd<0]
146 | outp[:,:,1][imd>0] = imd[imd>0]
147 |
148 | # Maximize contrast
149 | outp = outp * (255./np.max(outp))
150 |
151 | # Conversion back to a PIL image
152 | outp = outp.astype('uint8')
153 | outp_pil = Image.fromarray(outp, mode='RGB')
154 |
155 | # Save with three different sizes
156 | sz = np.array(outp_pil.size)
157 | sz_name = ['large','medium','small']
158 | for n,fct in enumerate([1,2,4]):
159 | outp_rsz = outp_pil.resize(sz/fct)
160 | outp_rsz.save('python_'+sz_name[n]+'.png')
161 |
162 |
163 | # EXERCISE 9: Plot y=sin(x) and y=sin(x^2) in two separate subplots, one above the other
164 | # Let x range from 0 to 2*pi
165 |
166 | import numpy as np
167 | import matplotlib.pyplot as plt
168 | import matplotlib.patches
169 |
170 | # X-axis values
171 | x = np.linspace(0,2*np.pi,1000)
172 |
173 | # Figure and Axes creation
174 | fig = plt.figure(figsize=(10,5))
175 | ax0 = fig.add_subplot(211)
176 | ax1 = fig.add_subplot(212)
177 |
178 | # Make the plots
179 | ax0.plot(x,np.sin(x),'r-', linewidth=2)
180 | ax1.plot(x,np.sin(x**2),'b-', linewidth=2)
181 |
182 | # Finetune the plots
183 | ax0.set_xlim([0,2*np.pi])
184 | ax0.set_xticks([])
185 | ax1.set_xlim([0,2*np.pi])
186 |
187 | # Show the figure
188 | fig.show()
189 |
190 |
191 | # EXERCISE 10: Add regression lines
192 | import numpy as np
193 | from PIL import Image
194 | import matplotlib.pyplot as plt
195 | import matplotlib.lines as lines
196 |
197 | # Open image, convert to an array
198 | im = Image.open('python.jpg')
199 | im = im.resize((400,300))
200 | arr = np.array(im, dtype='float')
201 |
202 | # Split the RGB layers and flatten them
203 | R,G,B = np.dsplit(arr,3)
204 | R = R.flatten()
205 | G = G.flatten()
206 | B = B.flatten()
207 |
208 | # Do the plotting
209 | plt.figure(figsize=(5,5))
210 | plt.plot(R, B, marker='x', linestyle='None', color=(0,0,0.6))
211 | plt.plot(R, G, marker='.', linestyle='None', color=(0,0.35,0))
212 |
213 | # Tweak the plot
214 | plt.axis([0,255,0,255])
215 | plt.xlabel('Red value')
216 | plt.ylabel('Green/Blue value')
217 |
218 | # Do the linear regressions
219 | regRB = np.polyfit(R,B,1)
220 | regRG = np.polyfit(R,G,1)
221 |
222 | # Create the line objects
223 | xaxv = np.arange(255.)
224 | lRB = lines.Line2D(xaxv,regRB[1]+xaxv*regRB[0], color='k')
225 | lRG = lines.Line2D(xaxv,regRG[1]+xaxv*regRG[0], color='k')
226 |
227 | # Fetch the current Axes, and attach the lines to it
228 | ax = plt.gca()
229 | ax.add_artist(lRB)
230 | ax.add_artist(lRG)
231 |
232 | # Show the result
233 | plt.show()
234 |
--------------------------------------------------------------------------------
/Part3/python.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part3/python.jpg
--------------------------------------------------------------------------------
/Part4/Part4_Practice_with_PsychoPy.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "",
4 | "signature": "sha256:433dde522e009da353d2ead527b2cb86418a2269f85867c5d61650df2d519e68"
5 | },
6 | "nbformat": 3,
7 | "nbformat_minor": 0,
8 | "worksheets": [
9 | {
10 | "cells": [
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | "*Back to the main [index](../index.ipynb)*"
16 | ]
17 | },
18 | {
19 | "cell_type": "heading",
20 | "level": 1,
21 | "metadata": {},
22 | "source": [
23 | "Practice with PsychoPy"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {},
29 | "source": [
30 | "*Part of the introductory series to using [Python for Vision Research](http://gestaltrevision.be/wiki/python/python) brought to you by the [GestaltReVision](http://gestaltrevision.be) group (KU Leuven, Belgium).*\n",
31 | "\n",
32 | "In this part, you will gain more experience with using PsychoPy for various stimulus drawing tasks.\n",
33 | "\n",
34 | "**Author:** [Jonas Kubilius](http://klab.lt) \n",
35 | "**Year:** 2014 \n",
36 | "**Copyright:** Public Domain as in [CC0](https://creativecommons.org/publicdomain/zero/1.0/)"
37 | ]
38 | },
39 | {
40 | "cell_type": "heading",
41 | "level": 1,
42 | "metadata": {},
43 | "source": [
44 | "Contents"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "- [Quick setup](#Quick-setup)\n",
52 | "- [Exercise 1:](#Exercise-1) Drawing a rectangle\n",
53 | "- [Exercise 2:](#Exercise-2:-Japanese-flag) A Japanese flag\n",
54 | "- [Exercise 3:](#Exercise-3) Radial and gabor stimuli\n",
55 | "- [Exercise 4:](#Exercise-4:-Hinton's-\"Lilac-Chaser\") Hinton's \"Lilac Chaser\"\n",
56 | "- [Resources](#Resources)"
57 | ]
58 | },
59 | {
60 | "cell_type": "heading",
61 | "level": 1,
62 | "metadata": {},
63 | "source": [
64 | "Quick setup"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {},
70 | "source": [
71 | "(you'll have to rerun this cell every time the kernel dies)"
72 | ]
73 | },
74 | {
75 | "cell_type": "code",
76 | "collapsed": false,
77 | "input": [
78 | "import numpy as np\n",
79 | "from psychopy import visual, core, event, monitors"
80 | ],
81 | "language": "python",
82 | "metadata": {},
83 | "outputs": []
84 | },
85 | {
86 | "cell_type": "heading",
87 | "level": 1,
88 | "metadata": {},
89 | "source": [
90 | "General tips"
91 | ]
92 | },
93 | {
94 | "cell_type": "markdown",
95 | "metadata": {},
96 | "source": [
97 | "1. Break down the task into concrete steps of what you need to do.\n",
98 | "2. For each step, look up the classes and functions you need in the PsychoPy [Documentation](http://www.psychopy.org/api/api.html).\n",
99 | "3. Once you find the relevant object, you need to call it properly. Suppose you need the [TextStim](http://www.psychopy.org/api/visual/textstim.html#psychopy.visual.TextStim). You will see in documentation that is it defined like this: `class psychopy.visual.TextStim(win, text='Hello World', font='', ...)`. If you imported the relevant PsychoPy modules with the command above, then Python knows what `visual` is. So you initialize the `TextStim` as: `visual.TextStim(...)`.\n",
100 | "4. Use the absolute minimum parameters necessary to initialize an object. Want a text `Hit spacebar`? Then `visual.TextStim(win, text='Hit spacebar')` will suffice. Notice that there are two types of parameters: normal arguments (`win`) and keyword arguments (`text=...`, `pos=...`). Normal arguments are always *required* in order to call an objects. Keyword arguments are *optional* because they have a default value which is used unless you pass a different value.\n",
101 | "5. Also notice that object names are case-sensitive. If you see `Window` written in the PsychoPy documentation, that means you have to call it exactly like that and not `window` or `WiNdOW`. The convention is that classes start with a capital letter and may have some more capital letters mixed in (e.g., `TextStim()`), while the rest is in lowercase (e.g., `flip()`). In PsychoPy, one unconventional thing is that functions usually have some capital letters, like `waitKeys()`. For your own scripts, try to stick to lowercase, like `show_stimuli()`."
102 | ]
103 | },
104 | {
105 | "cell_type": "heading",
106 | "level": 1,
107 | "metadata": {},
108 | "source": [
109 | "Exercise 1"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "*Draw a red rectangle on white background and show it until a key is pressed.*"
117 | ]
118 | },
119 | {
120 | "cell_type": "heading",
121 | "level": 2,
122 | "metadata": {},
123 | "source": [
124 | "Information"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {},
130 | "source": [
131 | "No idea what to do, right? Basically, you have to fill in the blanks and ellipses:"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "collapsed": false,
137 | "input": [
138 | "# open a window\n",
139 | "win = visual.Window(color='white')\n",
140 | "# create a red rectangle stimulus\n",
141 | "rect = visual.Rect(win, size=(.5,.3), fillColor='red')\n",
142 | "# draw the stimulus\n",
143 | "rect.draw()\n",
144 | "# flip the window\n",
145 | "win.flip()\n",
146 | "# wait for a key response\n",
147 | "event.waitKeys()\n",
148 | "# close window\n",
149 | "win.close()"
150 | ],
151 | "language": "python",
152 | "metadata": {},
153 | "outputs": []
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "OK, but you can't remember any PsychoPy commands? Me neither. I use [the online documentation](http://www.psychopy.org/api/visual.html) to help me out.\n",
160 | "\n",
161 | "*Tip:* Can't close the window? Restart the kernel (Kernel > Restart). Remember to reimport all packages that are listed at the top of this notebook after restart."
162 | ]
163 | },
164 | {
165 | "cell_type": "heading",
166 | "level": 2,
167 | "metadata": {},
168 | "source": [
169 | "Solution"
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "collapsed": false,
175 | "input": [
176 | "win = visual.Window(color='white')\n",
177 | "rect = visual.Rect(win, width=.5, height=.3, fillColor='red')\n",
178 | "rect.draw() \n",
179 | "win.flip() # don't forget to flip when done with drawing all stimuli so that the stimuli become visible\n",
180 | "event.waitKeys()\n",
181 | "win.close()"
182 | ],
183 | "language": "python",
184 | "metadata": {},
185 | "outputs": []
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "**Tip.** Notice the ``fillColor`` keyword. Make sure you understand why we use this and not just ``color``. Check out [the explanation of colors and color spaces](http://www.psychopy.org/general/colours.html)."
192 | ]
193 | },
194 | {
195 | "cell_type": "heading",
196 | "level": 1,
197 | "metadata": {},
198 | "source": [
199 | "Exercise 2: Japanese flag"
200 | ]
201 | },
202 | {
203 | "cell_type": "markdown",
204 | "metadata": {},
205 | "source": [
206 | "*Draw a red circle on white background and show it until a keys is pressed.*"
207 | ]
208 | },
209 | {
210 | "cell_type": "heading",
211 | "level": 2,
212 | "metadata": {},
213 | "source": [
214 | "Information"
215 | ]
216 | },
217 | {
218 | "cell_type": "markdown",
219 | "metadata": {},
220 | "source": [
221 | "Oh, that sounds trivial now? Just change Rectangle to Circle? Well, try it:"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "collapsed": false,
227 | "input": [
228 | "win = visual.Window(color='white')\n",
229 | "circle = visual.Circle(win, radius=.4, fillColor='red')\n",
230 | "circle.draw()\n",
231 | "win.flip() # don't forget to flip when done with drawing all stimuli so that the stimuli become visible\n",
232 | "event.waitKeys()\n",
233 | "win.close()"
234 | ],
235 | "language": "python",
236 | "metadata": {},
237 | "outputs": []
238 | },
239 | {
240 | "cell_type": "markdown",
241 | "metadata": {},
242 | "source": [
243 | "And oops, you get an ellipse (at least I do). Why?"
244 | ]
245 | },
246 | {
247 | "cell_type": "heading",
248 | "level": 2,
249 | "metadata": {},
250 | "source": [
251 | "Solution"
252 | ]
253 | },
254 | {
255 | "cell_type": "markdown",
256 | "metadata": {},
257 | "source": [
258 | "By default, PsychoPy is using normalized ('norm') units that are proportional to the window. Since the window is rectangular, everything gets distorted horizontally. In order to keep aspect ratio sane, use 'height' units. [Read more here](http://www.psychopy.org/general/units.html)"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "collapsed": false,
264 | "input": [
265 | "win = visual.Window(color='white', units='height')\n",
266 | "circle = visual.Circle(win, radius=.4, fillColor='red')\n",
267 | "circle.draw()\n",
268 | "win.flip() # don't forget to flip when done with drawing all stimuli so that the stimuli become visible\n",
269 | "event.waitKeys()\n",
270 | "win.close()"
271 | ],
272 | "language": "python",
273 | "metadata": {},
274 | "outputs": []
275 | },
276 | {
277 | "cell_type": "markdown",
278 | "metadata": {},
279 | "source": [
280 | "Usually, however, people use 'deg' units that allow defining stimuli in terms of their size in visual angle. However, to be able to use visual angle, you first have to define you monitor parameters: resolution, width, and viewing distance. (You can also apply gamma correction etc.)"
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "collapsed": false,
286 | "input": [
287 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
288 | "mon.setSizePix((1280,1024))\n",
289 | "win = visual.Window(color='white', units='deg', monitor=mon)\n",
290 | "circle = visual.Circle(win, radius=.4, fillColor='red')\n",
291 | "circle.draw()\n",
292 | "win.flip() # don't forget to flip when done with drawing all stimuli so that the stimuli become visible\n",
293 | "event.waitKeys()\n",
294 | "win.close()"
295 | ],
296 | "language": "python",
297 | "metadata": {},
298 | "outputs": []
299 | },
300 | {
301 | "cell_type": "markdown",
302 | "metadata": {},
303 | "source": [
304 | "**Pro Tip.** Hate specifying monitor resolution manually? (Note that wx is messed up and next time you run this snippet it's not gonna work because 'app' is somehow already there... just rename app to app2 then.)"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "collapsed": false,
310 | "input": [
311 | "import wx\n",
312 | "app = wx.App(False) # create an app if there isn't one and don't show it\n",
313 | "nmons = wx.Display.GetCount() # how many monitors we have\n",
314 | "mon_sizes = [wx.Display(i).GetGeometry().GetSize() for i in range(nmons)]\n",
315 | "print mon_sizes"
316 | ],
317 | "language": "python",
318 | "metadata": {},
319 | "outputs": []
320 | },
321 | {
322 | "cell_type": "heading",
323 | "level": 1,
324 | "metadata": {},
325 | "source": [
326 | "Exercise 3"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "*Draw a fixation cross, a radial stimulus on the left (like used in fMRI for retinotopic mapping) and a gabor patch on the left all on the default ugly gray background.*"
334 | ]
335 | },
336 | {
337 | "cell_type": "heading",
338 | "level": 2,
339 | "metadata": {},
340 | "source": [
341 | "Information"
342 | ]
343 | },
344 | {
345 | "cell_type": "markdown",
346 | "metadata": {},
347 | "source": [
348 | "Oh no, how do you make a gabor patch? And a radial stimulus? Something like that was in [Part 3](#A-bit-harder:-The-Gabor) so are we going to do the same? Well, think a bit. Chances are that other people needed these kind of stimulus in the past. Maybe PsychoPy has them built-in?"
349 | ]
350 | },
351 | {
352 | "cell_type": "heading",
353 | "level": 2,
354 | "metadata": {},
355 | "source": [
356 | "Solution"
357 | ]
358 | },
359 | {
360 | "cell_type": "code",
361 | "collapsed": false,
362 | "input": [
363 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
364 | "mon.setSizePix((1280,1024))\n",
365 | "win = visual.Window(units='deg', monitor=mon)\n",
366 | "\n",
367 | "# make stimuli\n",
368 | "fix_hor = visual.Line(win, start=(-.3, 0), end=(.3, 0), lineWidth=3)\n",
369 | "fix_ver = visual.Line(win, start=(0, -.3), end=(0, .3), lineWidth=3)\n",
370 | "radial = visual.RadialStim(win, mask='gauss',size=(3,3), pos=(-4, 0))\n",
371 | "gabor = visual.GratingStim(win, mask='gauss', size=(3,3), pos=(4, 0))\n",
372 | "\n",
373 | "# draw stimuli\n",
374 | "fix_hor.draw()\n",
375 | "fix_ver.draw()\n",
376 | "radial.draw()\n",
377 | "gabor.draw()\n",
378 | "\n",
379 | "win.flip()\n",
380 | "event.waitKeys()\n",
381 | "win.close()"
382 | ],
383 | "language": "python",
384 | "metadata": {},
385 | "outputs": []
386 | },
387 | {
388 | "cell_type": "heading",
389 | "level": 2,
390 | "metadata": {},
391 | "source": [
392 | "Follow-up: PsychoPy is not perfect yet"
393 | ]
394 | },
395 | {
396 | "cell_type": "markdown",
397 | "metadata": {},
398 | "source": [
399 | "PsychoPy has been around for long enough to be a stable package. However, it is still evolving and bugs may occur. Some of them are quite complex but others are something you can easily fix as long as you're not afraid of getting your hand dirty. You shouldn't be, and I'll illustrate that with the following example:\n",
400 | "\n",
401 | "*Draw the same shapes as before but this time make the fixation cross black.*\n",
402 | "\n",
403 | "So that should be a piece of cake, right? [According to the documentation](http://www.psychopy.org/api/visual/line.html#psychopy.visual.Line.color), simply adding ``color='black'`` to the LineStim should do the trick. Go ahead ant try it:"
404 | ]
405 | },
406 | {
407 | "cell_type": "code",
408 | "collapsed": false,
409 | "input": [
410 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
411 | "mon.setSizePix((1280,1024))\n",
412 | "win = visual.Window(units='deg', monitor=mon)\n",
413 | "\n",
414 | "# make stimulic\n",
415 | "fix_hor = visual.Line(win, start=(-.3, 0), end=(.3, 0), lineColor='black')\n",
416 | "fix_ver = visual.Line(win, start=(0, -.3), end=(0, .3), lineColor='black')\n",
417 | "radial = visual.RadialStim(win, size=(3,3), pos=(-4, 0))\n",
418 | "gabor = visual.GratingStim(win, mask='gauss', size=(3,3), pos=(4, 0))\n",
419 | "\n",
420 | "# draw stimuli\n",
421 | "fix_hor.draw()\n",
422 | "fix_ver.draw()\n",
423 | "radial.draw()\n",
424 | "gabor.draw()\n",
425 | "\n",
426 | "win.flip()\n",
427 | "event.waitKeys()\n",
428 | "win.close()"
429 | ],
430 | "language": "python",
431 | "metadata": {},
432 | "outputs": []
433 | },
434 | {
435 | "cell_type": "markdown",
436 | "metadata": {},
437 | "source": [
438 | "You should get an error along the lines of\n",
439 | "\n",
440 | "``TypeError: __init__() got an unexpected keyword argument 'color'``\n",
441 | "\n",
442 | "*(Because of this error, the window remains open -- simply restart the kernel to kill it, and reimport all modules at [Quick setup](#Quick-setup).)\n",
443 | "\n",
444 | "So now what? You need that black fixation cross real bad. Notice the error message tells you the whole hierarchy of how the problem came about:\n",
445 | "\n",
446 | "- it started with ``fix_hor = visual.Line(win, start=(-.3, 0), end=(.3, 0), color='black')`` -- clearly due to the ``color`` keyword cause it used to work before\n",
447 | "- which called ``ShapeStim.__init__(self, win, **kwargs)`` and that raised an error.\n",
448 | "\n",
449 | "If you were to check out [ShapeStim's documentation](http://www.psychopy.org/api/visual/shapestim.html), you'd see that ShapeStim only accepts ``fillColor`` and ``lineColor`` but not ``color`` keywords (even though later in the documentation it seems as if there were a ``color`` keyword too -- yet another bug).\n",
450 | "\n",
451 | "OK, so if you don't care, just use ``lineColor='black'`` and it will do the job.\n",
452 | "\n",
453 | "However, consider that Jon Peirce and other people has put lots of love in creating PsychoPy. If you find something not working, why not let them know? You can easily report bugs [on Psychopy's GitHub repo](https://github.com/psychopy/psychopy/issues) or, if you're not confident there is a bug, just post it on the [Psychopy's help forum](http://groups.google.com/group/psychopy-users).\n",
454 | "\n",
455 | "But the best of all is trying to fix it yourself, and reporting the bug together with a fix. That way you help not only yourself, but also many other users. Let's see if we can fix this one. First, notice that the problem is that ShapeStim does not recognize the ``color`` keyword. We are not going to mess with ShapeStim because it has ``fillColor`` and ``lineColor`` for a reason. So instead we can modify Line to accept this keyword. So -- open up the file where Line is defined and change it. In my case, this is ``C:\\Miniconda32\\envs\\psychopy\\lib\\site-packages\\psychopy\\visual\\line.py``.\n",
456 | "\n",
457 | "Simply insert ``color=None`` in ``def __init__()`` (line 21 in my case), and ``kwargs['lineColor'] = color`` just below ``kwargs['fillColor'] = None`` (line 50) and ``self.color = self.lineColor`` right after calling ``ShapeStim.__init__()`` (line 51). That's it! Just restart the kernel in this notebook, reimport all packages at the top (to update them with this change), and run the code above again. That should run now.\n",
458 | "\n",
459 | "Note that this is not the full fix yet because we still need to include ``colorSpace`` keyword and also functions such as ``setColor`` and ``setColorSpace``, and there may be yet other compactibility issues to verify. But for our modest purposes, it's fixed!\n",
460 | "\n",
461 | "Let it be a lesson for you as well about the whole idea behind open source -- if something is not working, just open the source file and fix it. You're in control here. Now go ahead and fix a bug in your Windows or OS X.\n",
462 | "\n",
463 | "**Advanced.** The proper way to submit you fixes is by forking the repo, making a patch, and submitting a pull request, [as explained on GitHub's help](https://help.github.com/articles/using-pull-requests)."
464 | ]
465 | },
466 | {
467 | "cell_type": "heading",
468 | "level": 1,
469 | "metadata": {},
470 | "source": [
471 | "Exercise 4: Hinton's \"Lilac Chaser\""
472 | ]
473 | },
474 | {
475 | "cell_type": "markdown",
476 | "metadata": {},
477 | "source": [
478 | "*In this exercise, we will create the famous [Hinton's \"Lilac Chaser\"](http://michaelbach.de/ot/col-lilacChaser/index.html). The display consists of 12 equally-spaced blurry pink dots on a larger circle (on a light gray background). Dots are disappearing one after another to create an illusion of a green dot moving.*"
479 | ]
480 | },
481 | {
482 | "cell_type": "heading",
483 | "level": 2,
484 | "metadata": {},
485 | "source": [
486 | "Hints"
487 | ]
488 | },
489 | {
490 | "cell_type": "markdown",
491 | "metadata": {},
492 | "source": [
493 | "If your math is a bit rusty at the moment, here is how to find the coordinates for placing the pink dots on a circle:"
494 | ]
495 | },
496 | {
497 | "cell_type": "code",
498 | "collapsed": false,
499 | "input": [
500 | "r = 5 # radius of the big circle\n",
501 | "ncircles = 12\n",
502 | "angle = 2 * np.pi / ncircles # angle between two pink dots in radians\n",
503 | "\n",
504 | "for i in range(ncircles): \n",
505 | " pos = (r*np.cos(angle*i), r*np.sin(angle*i))"
506 | ],
507 | "language": "python",
508 | "metadata": {},
509 | "outputs": []
510 | },
511 | {
512 | "cell_type": "heading",
513 | "level": 2,
514 | "metadata": {},
515 | "source": [
516 | "Part 1"
517 | ]
518 | },
519 | {
520 | "cell_type": "markdown",
521 | "metadata": {},
522 | "source": [
523 | "*Draw 12 equally-spaced dots on a larger circle. (Don't worry about making them blurry for now.) Also make a fixation spot.*"
524 | ]
525 | },
526 | {
527 | "cell_type": "heading",
528 | "level": 2,
529 | "metadata": {},
530 | "source": [
531 | "Solution"
532 | ]
533 | },
534 | {
535 | "cell_type": "code",
536 | "collapsed": false,
537 | "input": [
538 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
539 | "mon.setSizePix((1280,1024))\n",
540 | "win = visual.Window(color='lightgray', units='deg', monitor=mon)\n",
541 | "\n",
542 | "# draw a fixation\n",
543 | "fix = visual.Circle(win, radius=.1, fillColor='black')\n",
544 | "fix.draw()\n",
545 | "\n",
546 | "r = 5 # radius of the larger circle\n",
547 | "ncircles = 12\n",
548 | "angle = 2 * np.pi / ncircles\n",
549 | "\n",
550 | "# make and draw stimuli\n",
551 | "for i in range(ncircles):\n",
552 | " pos = (r*np.cos(angle*i), r*np.sin(angle*i))\n",
553 | " circle = visual.Circle(win, radius=.5, fillColor='purple', lineColor='purple', pos=pos)\n",
554 | " circle.draw()\n",
555 | "\n",
556 | "win.flip()\n",
557 | "event.waitKeys()\n",
558 | "win.close()"
559 | ],
560 | "language": "python",
561 | "metadata": {},
562 | "outputs": []
563 | },
564 | {
565 | "cell_type": "heading",
566 | "level": 2,
567 | "metadata": {},
568 | "source": [
569 | "Part 2"
570 | ]
571 | },
572 | {
573 | "cell_type": "markdown",
574 | "metadata": {},
575 | "source": [
576 | "*Make dots disappear one at a time.*"
577 | ]
578 | },
579 | {
580 | "cell_type": "heading",
581 | "level": 2,
582 | "metadata": {},
583 | "source": [
584 | "Solution"
585 | ]
586 | },
587 | {
588 | "cell_type": "code",
589 | "collapsed": true,
590 | "input": [
591 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
592 | "mon.setSizePix((1280,1024))\n",
593 | "win = visual.Window(color='lightgray', units='deg', monitor=mon)\n",
594 | "\n",
595 | "# make a fixation\n",
596 | "fix = visual.Circle(win, radius=.1, fillColor='black')\n",
597 | "\n",
598 | "r = 5 # radius of the larger circle\n",
599 | "ncircles = 12\n",
600 | "angle = 2 * np.pi / ncircles\n",
601 | "\n",
602 | "# make and draw stimuli\n",
603 | "dis = 0 # which one will disappear\n",
604 | "\n",
605 | "while len(event.getKeys()) == 0: \n",
606 | " for i in range(ncircles):\n",
607 | " if i != dis:\n",
608 | " pos = (r*np.cos(angle*i), r*np.sin(angle*i))\n",
609 | " circle = visual.Circle(win, radius=.5, fillColor='purple', lineColor='purple', pos=pos)\n",
610 | " circle.draw()\n",
611 | " dis = (dis + 1) % ncircles\n",
612 | " fix.draw()\n",
613 | " win.flip()\n",
614 | " core.wait(.1)\n",
615 | " \n",
616 | "win.close()"
617 | ],
618 | "language": "python",
619 | "metadata": {},
620 | "outputs": []
621 | },
622 | {
623 | "cell_type": "heading",
624 | "level": 2,
625 | "metadata": {},
626 | "source": [
627 | "Part 3 (advanced)"
628 | ]
629 | },
630 | {
631 | "cell_type": "markdown",
632 | "metadata": {},
633 | "source": [
634 | "*Optimize your code; make dots blurry.*"
635 | ]
636 | },
637 | {
638 | "cell_type": "heading",
639 | "level": 2,
640 | "metadata": {},
641 | "source": [
642 | "Solution"
643 | ]
644 | },
645 | {
646 | "cell_type": "code",
647 | "collapsed": false,
648 | "input": [
649 | "mon = monitors.Monitor('My screen', width=37.5, distance=57)\n",
650 | "mon.setSizePix((1280,1024))\n",
651 | "win = visual.Window(color='lightgray', units='deg', monitor=mon)\n",
652 | "\n",
653 | "r = 5 # radius of the larger circle\n",
654 | "ncircles = 12\n",
655 | "angle = 2 * np.pi / ncircles\n",
656 | "\n",
657 | "# make a stimuli\n",
658 | "fix = visual.Circle(win, radius=.1, fillColor='black')\n",
659 | "circle = visual.GratingStim(win, size=(2,2), tex=None, mask='gauss', color='purple')\n",
660 | "\n",
661 | "# make and draw stimuli\n",
662 | "dis = 0 # which one will disappear\n",
663 | "\n",
664 | "while len(event.getKeys()) == 0: \n",
665 | " for i in range(ncircles):\n",
666 | " if i != dis:\n",
667 | " pos = (r*np.cos(angle*i), r*np.sin(angle*i))\n",
668 | " circle.setPos(pos)\n",
669 | " circle.draw()\n",
670 | " dis = (dis + 1) % ncircles\n",
671 | " fix.draw()\n",
672 | " win.flip()\n",
673 | " core.wait(.1)\n",
674 | " \n",
675 | "win.close()"
676 | ],
677 | "language": "python",
678 | "metadata": {},
679 | "outputs": []
680 | },
681 | {
682 | "cell_type": "heading",
683 | "level": 1,
684 | "metadata": {},
685 | "source": [
686 | "Resources"
687 | ]
688 | },
689 | {
690 | "cell_type": "markdown",
691 | "metadata": {},
692 | "source": [
693 | "- [PsychoPy](http://www.psychopy.org/)\n",
694 | "- [Documentation](http://www.psychopy.org/api/api.html)\n",
695 | "- [Help forum](http://groups.google.com/group/psychopy-users)\n",
696 | "- Where are all my packages? ``import site; print site.getsitepackages()``\n",
697 | "- [GitHub repository](https://github.com/psychopy/psychopy) with the latest (but unstable) version of Psychopy where bugs might have been fixed\n",
698 | "- [Report bugs](https://github.com/psychopy/psychopy/issues)\n",
699 | "- Cite PsychoPy in your papers (at least one of the folllowing):\n",
700 | "\n",
701 | " * Peirce, JW (2007) PsychoPy - Psychophysics software in Python. J Neurosci Methods, 162(1-2):8-13\n",
702 | " * Peirce JW (2009) Generating stimuli for neuroscience using PsychoPy. Front. Neuroinform. 2:10. doi:10.3389/neuro.11.010.2008\n",
703 | "\n"
704 | ]
705 | }
706 | ],
707 | "metadata": {}
708 | }
709 | ]
710 | }
711 |
--------------------------------------------------------------------------------
/Part5/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part5/__init__.py
--------------------------------------------------------------------------------
/Part5/images/architecture.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
585 |
--------------------------------------------------------------------------------
/Part5/images/gui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part5/images/gui.png
--------------------------------------------------------------------------------
/Part5/images/tree.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
420 |
--------------------------------------------------------------------------------
/Part5/run.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | from psychopy_ext import ui
3 |
4 | __author__ = "Jonas Kubilius"
5 | __version__ = "0.1"
6 | exp_choices = [
7 | ui.Choices('scripts.trivial', name='Quick demo'),
8 | ui.Choices('scripts.changedet', name='Change Detection Experiment')
9 | ]
10 |
11 | # bring up the graphic user interface or interpret command line inputs
12 | # usually you can skip the size parameter
13 | ui.Control(exp_choices, title='Demo Project', size=(560,550))
14 |
--------------------------------------------------------------------------------
/Part5/scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part5/scripts/__init__.py
--------------------------------------------------------------------------------
/Part5/scripts/changedet.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy.random as rnd # for random number generators
4 |
5 | from psychopy import visual, core, event
6 | from psychopy_ext import exp
7 |
8 | from collections import OrderedDict
9 |
10 | import fix
11 | import computer
12 | PATHS = exp.set_paths('change_detection', computer)
13 | PATHS['images'] = '../Part2/images/'
14 |
15 |
16 | class ChangeDet(fix.Experiment):
17 | """
18 | Change Detection Experiment
19 | ===========================
20 |
21 | In this experiment you will see photographs flickering with a tiny detail in them changing.
22 | Your task is to detect where the change is occuring.
23 | To make it harder, there are bubbles randomly covering the part of the photos.
24 |
25 | Hit **spacebar to begin**. When you detect a change, hit **spacebar** again.
26 | """
27 | def __init__(self,
28 | name='exp',
29 | info=OrderedDict([('exp_name', 'Change Detection'),
30 | ('subjid', 'cd_'),
31 | ('gender', ('male', 'female')),
32 | ('age', 18),
33 | ('left-handed', False)
34 | ]),
35 | rp=None,
36 | actions='run',
37 | order='sequential'
38 | ):
39 | super(ChangeDet, self).__init__(name=name, info=info,
40 | rp=rp, actions=actions,
41 | paths=PATHS, computer=computer)
42 |
43 | # user-defined parameters
44 | self.imlist = ['1','2','3','4','5','6'] # image names without the suffixes
45 | self.asfx = 'a.jpg' # suffix for the first image
46 | self.bsfx = 'b.jpg' # suffix for the second image
47 | self.scrsize = (900, 600) # screen size in px
48 | self.stimsize = (9, 6) # stimulus size in degrees visual angle
49 | self.timelimit = 30 # sec
50 | self.n_bubbles = 40
51 | self.changetime = .500 #sec
52 |
53 | self.computer.valid_responses = {'space': 1}
54 |
55 | self.trial_instr = ('Press spacebar to start the trial.\n\n'
56 | 'Hit spacebar again when you detect a change.')
57 |
58 | def create_win(self, *args, **kwargs):
59 | super(ChangeDet, self).create_win(size=self.scrsize, units='deg',
60 | *args, **kwargs)
61 |
62 | def create_stimuli(self):
63 | """Define your stimuli here, store them in self.s
64 | """
65 | self.s = {}
66 | self.s['bitmap1'] = visual.ImageStim(self.win, size=self.stimsize)
67 | self.s['bitmap2'] = visual.ImageStim(self.win, size=self.stimsize)
68 | self.s['bubble'] = visual.Circle(self.win, fillColor='black', lineColor='black')
69 |
70 | def create_trial(self):
71 | """Define trial composition
72 | """
73 | self.trial = [exp.Event(self,
74 | dur=self.timelimit, # in seconds
75 | display=[self.s['bitmap1'], self.s['bitmap2']],
76 | func=self.show_stim)
77 | ]
78 |
79 | def create_exp_plan(self):
80 | """Put together trials
81 | """
82 | # Check if all images exist
83 | for im in self.imlist:
84 | if (not os.path.exists(os.path.join(self.paths['images'], im+self.asfx)) or
85 | not os.path.exists(os.path.join(self.paths['images'], im+self.bsfx))):
86 | raise Exception('Image files not found in image folder: ' + str(im))
87 |
88 | # Randomize the image order
89 | rnd.shuffle(self.imlist)
90 |
91 | # Create the orientations list: half upright, half inverted
92 | orilist = [0,180]*(len(self.imlist)/2)
93 |
94 | # Randomize the orientation order
95 | rnd.shuffle(orilist)
96 |
97 | exp_plan = []
98 | for trialno, (im, ori) in enumerate(zip(self.imlist, orilist)):
99 | exp_plan.append(OrderedDict([
100 | ('im', im),
101 | ('ori', ori),
102 | ('onset', ''), # empty ones will be filled up
103 | ('dur', ''), # during runtime
104 | ('corr_resp', 1),
105 | ('subj_resp', ''),
106 | ('accuracy', ''),
107 | ('rt', ''),
108 | ]))
109 | self.exp_plan = exp_plan
110 |
111 | def before_trial(self):
112 | """Set up stimuli prior to a trial
113 | """
114 | im_fname = os.path.join(self.paths['images'], self.this_trial['im'])
115 | self.s['bitmap1'].setImage(im_fname + self.asfx)
116 | self.s['bitmap1'].setOri(self.this_trial['ori'])
117 | self.s['bitmap2'].setImage(im_fname + self.bsfx)
118 | self.s['bitmap2'].setOri(self.this_trial['ori'])
119 | self.bitmap = self.s['bitmap1']
120 |
121 | if self.thisTrialN > 0: # no need for instructions for the first trial
122 | self.show_text(text=self.trial_instr, wait=0)
123 |
124 | def show_stim(self, *args, **kwargs):
125 | """Control stimuli during the trial
126 | """
127 | # Empty the keypresses list
128 | event.clearEvents()
129 | keys = []
130 | change_clock = core.Clock()
131 |
132 | # Start the trial
133 | # Stop trial if spacebar or escape has been pressed, or if 30s have passed
134 |
135 | while len(keys) == 0 and self.trial_clock.getTime() < self.this_event.dur:
136 | # Switch the image
137 | if self.bitmap == self.s['bitmap1']:
138 | self.bitmap = self.s['bitmap2']
139 | else:
140 | self.bitmap = self.s['bitmap1']
141 |
142 | self.bitmap.draw()
143 |
144 | # Draw bubbles of increasing radius at random positions
145 | for radius in range(self.n_bubbles):
146 | self.s['bubble'].setRadius(radius/100.)
147 | self.s['bubble'].setPos(((rnd.random()-.5) * self.stimsize[0],
148 | (rnd.random()-.5) * self.stimsize[1] ))
149 | self.s['bubble'].draw()
150 |
151 | # Show the new screen we've drawn
152 | self.win.flip()
153 |
154 | # For the duration of 'changetime',
155 | # Listen for a spacebar or escape press
156 |
157 | change_clock.reset()
158 | while change_clock.getTime() <= self.changetime:
159 | keys = self.last_keypress(keyList=self.computer.valid_responses.keys(),
160 | timeStamped=self.trial_clock)
161 |
162 | if len(keys) > 0:
163 | break
164 | return keys
165 |
166 | if __name__ == "__main__":
167 | ChangeDet(rp={'no_output':True, 'debug':True}).run()
168 |
--------------------------------------------------------------------------------
/Part5/scripts/computer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | """
4 | Computer configuration file
5 | ===========================
6 |
7 | Specify default settings for all computers where you run your experiment such
8 | as a monitor size or root path to storing data. This is intended as a more
9 | portable and extended version of PsychoPy's MonitorCenter.
10 |
11 | A computer is recognized by its mac address which is dependent on its
12 | hardware and by its name. In the future versions of psychopy_ext,
13 | if anything in the hardware changes, you'll see a warning.
14 |
15 | # TODO: split computer configuration and defaults possibly by moving to a
16 | config file
17 |
18 | """
19 |
20 | import uuid, platform
21 |
22 | recognized = True
23 | # computer defaults
24 | root = '.' # means store output files here
25 | stereo = False # not like in Psychopy; this merely creates two Windows
26 | default_keys = {'exit': ('lshift', 'escape'), # key combination to exit
27 | 'trigger': 'space'} # hit to start the experiment
28 | valid_responses = {'f': 0, 'j': 1} # organized as input value: output value
29 | # monitor defaults
30 | distance = 80
31 | width = 37.5
32 | # window defaults
33 | screen = 0 # default screen is 0
34 | view_scale = (1, 1)
35 |
36 | # Get computer properties
37 | # Computer is recognized by its mac address
38 | mac = uuid.getnode()
39 | system = platform.uname()[0]
40 | name = platform.uname()[1]
41 |
42 | if mac == 153254424819 and system == 'Linux': # Lab computer, Ubuntu booted
43 | distance = 80
44 | width = 37.5
45 | root = '/media/qbilius/Data/data/'
46 |
47 | elif mac == 153254424819 and system == 'Windows': # Lab computer, Windows booted
48 | root = 'D:/data/'
49 |
50 | elif mac == 145320949993177: # fMRI computer
51 | distance = 127
52 | width = 60
53 | view_scale = [1,-1] # top-bottom inverted
54 | default_keys['trigger'] = 5
55 | valid_responses = {'9': 0, '8': 1, '7': 2, '6': 3}
56 |
57 | else:
58 | recognized = False
59 |
--------------------------------------------------------------------------------
/Part5/scripts/trivial.py:
--------------------------------------------------------------------------------
1 | from psychopy import visual
2 | from psychopy_ext import exp
3 |
4 | from collections import OrderedDict
5 |
6 | import computer
7 | PATHS = exp.set_paths('trivial', computer)
8 |
9 | class Exp1(exp.Experiment):
10 | """
11 | Instructions (in reST format)
12 | =============================
13 |
14 | **Hit 'j'** to advance to the next trial, *Left-Shift + Esc* to exit.
15 | """
16 | def __init__(self,
17 | name='exp',
18 | info=OrderedDict([('subjid', 'quick_'),
19 | ('session', 1),
20 | ]),
21 | rp=None,
22 | actions='run'
23 | ):
24 | super(Exp1, self).__init__(name=name, info=info,
25 | rp=rp, actions=actions,
26 | paths=PATHS, computer=computer)
27 |
28 | # user-defined parameters
29 | self.ntrials = 8
30 | self.stimsize = 2 # in deg
31 |
32 | def create_stimuli(self):
33 | """Define your stimuli here, store them in self.s
34 | """
35 | self.create_fixation()
36 | self.s = {}
37 | self.s['fix']= self.fixation
38 | self.s['stim'] = visual.GratingStim(self.win, mask='gauss',
39 | size=self.stimsize)
40 |
41 | def create_trial(self):
42 | """Define trial composition
43 | """
44 | self.trial = [exp.Event(self,
45 | dur=.200, # in seconds
46 | display=[self.s['stim'], self.s['fix']],
47 | func=self.idle_event),
48 | exp.Event(self,
49 | dur=0,
50 | display=self.s['fix'],
51 | func=self.wait_until_response)
52 | ]
53 |
54 | def create_exp_plan(self):
55 | """Put together trials
56 | """
57 | exp_plan = []
58 | for trialno in range(self.ntrials):
59 | exp_plan.append(OrderedDict([
60 | ('trialno', trialno),
61 | ('onset', ''), # empty ones will be filled up
62 | ('dur', ''), # during runtime
63 | ('corr_resp', 1),
64 | ('subj_resp', ''),
65 | ('accuracy', ''),
66 | ('rt', ''),
67 | ]))
68 | self.exp_plan = exp_plan
69 |
70 |
--------------------------------------------------------------------------------
/Part5/stimuli.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/Part6/slice.nii:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part6/slice.nii
--------------------------------------------------------------------------------
/Part7/images/forest.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part7/images/forest.jpg
--------------------------------------------------------------------------------
/Part7/images/kitten.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part7/images/kitten.jpg
--------------------------------------------------------------------------------
/Part7/images/oudemarkt.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part7/images/oudemarkt.jpg
--------------------------------------------------------------------------------
/Part7/images/sclos10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/Part7/images/sclos10.jpg
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Python for Vision Research
2 | ==========================
3 |
4 | An introductory series to learning Python for vision research.
5 |
6 | This course is provided entirely in IPython notebooks and you can [view its contents on nbviewer](http://nbviewer.ipython.org/github/gestaltrevision/python_for_visres/blob/master/index.ipynb).
7 |
--------------------------------------------------------------------------------
/check_config.py:
--------------------------------------------------------------------------------
1 | from psychopy import wizard
2 | wizard.BenchmarkWizard()
--------------------------------------------------------------------------------
/check_install.bat:
--------------------------------------------------------------------------------
1 | CALL python check_install.py
2 | PAUSE
--------------------------------------------------------------------------------
/check_install.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 |
4 | class Logger(object):
5 | """Log all output
6 | """
7 | def __init__(self):
8 | self.terminal = sys.stdout
9 | self.log = open("workshop_req_check.txt", "w")
10 |
11 | def write(self, message):
12 | self.terminal.write(message)
13 | self.log.write(message)
14 |
15 | sys.stdout = Logger()
16 | sys.stderr = sys.stdout
17 |
18 |
19 | from distutils.version import LooseVersion as V
20 |
21 | def check_reqs(cmod, reqs):
22 | """Check requirements for each session
23 | """
24 | reqs_fix = []
25 | for lvl,req in enumerate(reqs):
26 | for k,v in req.iteritems():
27 | if k in cmod:
28 | if v > cmod[k]:
29 | reqs_fix.append(k)
30 | if reqs_fix:
31 | if lvl == 0:
32 | print " NOT PREPARED, check error messages for:"
33 | elif lvl == 1:
34 | print " MINIMALLY PREPARED, check error messages for:"
35 | elif lvl == 2:
36 | print " MOSTLY PREPARED, check error messages for:"
37 | for k in reqs_fix:
38 | print " -", k
39 | return
40 | print " FULLY PREPARED"
41 |
42 |
43 | def mod_check(mod, modname, req_ver=None, max_ver=None, fun=None):
44 | print ' ', modname
45 | try:
46 | __import__(mod)
47 | except ImportError:
48 | cmod[mod] = 0
49 | print ' Not found, please (re-)install'
50 | else:
51 | if req_ver or max_ver:
52 | m = sys.modules[mod]
53 | try:
54 | this_ver = m.__version__
55 | except AttributeError:
56 | this_ver = m.version
57 |
58 | if req_ver and V(this_ver) < V(req_ver):
59 | cmod[mod] = 1
60 | print ' Version %s+ recommended, now: %s' % (req_ver, this_ver)
61 | else:
62 | cmod[mod] = 2
63 |
64 | if max_ver and V(this_ver) > V(max_ver):
65 | cmod[mod] = 1
66 | print ' Version under %s recommended, now: %s' % (max_ver, this_ver)
67 | elif cmod[mod] != 1:
68 | cmod[mod] = 2
69 |
70 | if fun is not None:
71 | try:
72 | fun()
73 | except:
74 | print sys.exc_info()[1]
75 | print ' Failed test'
76 | cmod[mod] = 0
77 | else:
78 | print ' OK'
79 | else:
80 | print ' OK'
81 |
82 | def test_psychopy():
83 | print
84 | print '*'*61
85 | print '*' + ' '*59 + '*'
86 | print '* A new window will open. Please follow instructions on it. *'
87 | print '*' + ' '*59 + '*'
88 | print '*'*61
89 | print
90 | from psychopy import visual, event
91 | win = visual.Window()
92 | text = visual.TextStim(win, "Press any key to continue...")
93 | text.draw()
94 | win.flip()
95 | event.waitKeys()
96 | win.close()
97 |
98 | def test_ipynb():
99 | import subprocess, time
100 | print
101 | print '*'*61
102 | print '*' + ' '*59 + '*'
103 | print '* An IPython notebook should open in your browser. *'
104 | print '* Please wait for this test to finish. Do not hit Control-C *'
105 | print '*' + ' '*59 + '*'
106 | print '*'*61
107 | print
108 | proc = subprocess.Popen(['ipython', 'notebook'], shell=False)
109 | time.sleep(10)
110 | proc.terminate()
111 |
112 | def test_PIL():
113 | import PIL
114 | import PIL.Image
115 |
116 | this_ver = PIL.Image.VERSION
117 | req_ver = '1.1.7'
118 |
119 | if V(this_ver) < V(req_ver):
120 | cmod['PIL'] = 1
121 | print ' Version %s+ recommended, now: %s' % (req_ver, this_ver)
122 | else:
123 | cmod['PIL'] = 2
124 |
125 | try:
126 | this_ver = PIL.PILLOW_VERSION
127 | req_ver = '2.2'
128 | if V(this_ver) < V(req_ver):
129 | print ' Version %s+ recommended, now: %s' % (req_ver, this_ver)
130 | cmod['PIL'] = 1
131 | else:
132 | cmod['PIL'] = 2
133 | except AttributeError:
134 | print ' You are using plain PIL, Pillow is recommended instead'
135 | cmod['PIL'] = 1
136 |
137 | # Print system info
138 | print sys.platform
139 | print sys.path
140 |
141 | # Check the individual modules
142 | # 0=broken, 1=suboptimal, 2=ok
143 | cmod = {}
144 |
145 | print
146 | print '='*79
147 | print 'MODULE CHECK'
148 | print
149 |
150 | mod_check('sys', 'Python: base installation', '2.6','2.8')
151 | mod_check('spyderlib', 'Spyder: IDE', '2.2.5')
152 | mod_check('numpy', 'NumPy: numerical computing', '1.6')
153 | mod_check('scipy', 'SciPy: scientific functions', '0.10')
154 | mod_check('matplotlib', 'Matplotlib: plot graphs', '1.0')
155 | mod_check('psychopy_ext', 'PsychoPy_ext: streamline research', '0.5.3')
156 | mod_check('seaborn', 'Seaborn: statistical data visualization', '0.2')
157 | mod_check('docutils', 'Docutils: documentation utilities')
158 | mod_check('svgwrite', 'Svgwrite: create svg images')
159 | mod_check('pandas', 'Pandas: data analysis toolkit', '0.12')
160 | mod_check('nibabel', 'NiBabel: access neuroimaging files')
161 | mod_check('h5py', 'h5py: store huge amounts of numerical data')
162 | mod_check('mvpa2', 'PyMVPA: fMRI MVPA package', '2.3.1')
163 | mod_check('PIL', 'Pillow: handle images', None, None, test_PIL)
164 | mod_check('psychopy', 'PsychoPy: build experiments', '1.79.01', None, test_psychopy)
165 | mod_check('IPython', 'IPython: interactive notebooks', '0.13', None, test_ipynb)
166 |
167 | print
168 | print '='*79
169 | print 'HOW WELL ARE YOU PREPARED?'
170 | print
171 |
172 | # Now check if requirements are met for each session
173 | # Format of reqs: [minimally, mostly, fully] prepared, else not prepared
174 |
175 | print "Session: Introduction to Python"
176 | reqs = [{'sys':1},
177 |
178 | {'sys':1,
179 | 'numpy':1,
180 | 'psychopy':1},
181 |
182 | {'sys':2,
183 | 'spyderlib':2,
184 | 'numpy':2,
185 | 'psychopy':2,
186 | 'IPython':2}]
187 | check_reqs(cmod, reqs)
188 |
189 | # ***
190 | print
191 | print "Session: Introduction to PsychoPy"
192 | reqs_psych = [{'sys':1,
193 | 'psychopy':1},
194 |
195 | {'sys':2,
196 | 'numpy':1,
197 | 'scipy':1,
198 | 'psychopy':1},
199 |
200 | {'sys':2,
201 | 'spyderlib':2,
202 | 'numpy':2,
203 | 'scipy':2,
204 | 'psychopy':2,
205 | 'IPython':2}]
206 | check_reqs(cmod, reqs_psych)
207 |
208 | # ***
209 | print
210 | print "Session: Transitioning from MATLAB to Python"
211 | reqs = [{'sys':1,
212 | 'numpy':1,
213 | 'scipy':1,
214 | 'PIL':1,
215 | 'matplotlib':1},
216 |
217 | {'sys':1,
218 | 'spyderlib':1,
219 | 'numpy':2,
220 | 'scipy':1,
221 | 'PIL':1,
222 | 'matplotlib':1},
223 |
224 | {'sys':2,
225 | 'spyderlib':2,
226 | 'numpy':2,
227 | 'scipy':2,
228 | 'PIL':2,
229 | 'IPython':2,
230 | 'matplotlib':2}]
231 | check_reqs(cmod, reqs)
232 |
233 | # ***
234 | print
235 | print "Session: More practice with PsychoPy"
236 | check_reqs(cmod, reqs_psych)
237 |
238 | # ***
239 | print
240 | print "Session: Streamline research with psychopy_ext"
241 | reqs = [{'sys':2,
242 | 'numpy':2,
243 | 'scipy':2,
244 | 'matplotlib':2,
245 | 'psychopy':2,
246 | 'psychopy_ext':2,
247 | 'pandas':2},
248 |
249 | {'sys':2,
250 | 'numpy':2,
251 | 'scipy':2,
252 | 'matplotlib':2,
253 | 'psychopy':2,
254 | 'psychopy_ext':2,
255 | 'seaborn':2,
256 | 'docutils':1,
257 | 'pandas':2},
258 |
259 | {'sys':2,
260 | 'spyderlib':2,
261 | 'numpy':2,
262 | 'scipy':2,
263 | 'matplotlib':2,
264 | 'psychopy':2,
265 | 'psychopy_ext':2,
266 | 'seaborn':2,
267 | 'docutils':2,
268 | 'svgwrite':2,
269 | 'pandas':2,
270 | 'IPython':2}]
271 | check_reqs(cmod, reqs)
272 |
273 | # ***
274 | print
275 | print "Session: Multi-voxel pattern analysis"
276 | reqs = [{'sys':2,
277 | 'numpy':2,
278 | 'psychopy_ext':2,
279 | 'nibabel':2,
280 | 'h5py':2,
281 | 'mvpa2':2},
282 |
283 | {'sys':2,
284 | 'numpy':2,
285 | 'psychopy_ext':2,
286 | 'nibabel':2,
287 | 'h5py':2,
288 | 'mvpa2':2},
289 |
290 | {'sys':2,
291 | 'spyderlib':2,
292 | 'numpy':2,
293 | 'psychopy_ext':2,
294 | 'nibabel':2,
295 | 'h5py':2,
296 | 'mvpa2':2,
297 | 'IPython':2}]
298 | check_reqs(cmod, reqs)
299 |
300 | # ***
301 | print
302 | print "Session: Natural image statistics"
303 | reqs = [{'sys':1,
304 | 'numpy':1,
305 | 'scipy':1,
306 | 'PIL':1,
307 | 'matplotlib':1},
308 |
309 | {'sys':1,
310 | 'numpy':2,
311 | 'scipy':2,
312 | 'PIL':1,
313 | 'matplotlib':1},
314 |
315 | {'sys':2,
316 | 'numpy':2,
317 | 'scipy':2,
318 | 'PIL':2,
319 | 'matplotlib':2,
320 | 'IPython':2}]
321 | check_reqs(cmod, reqs)
322 |
323 | # ***
324 | print
325 | print '='*79
326 | print 'WHAT TO DO NOW?'
327 | print
328 | print "1. Check in the list above how well you're prepared for the sessions"
329 | print " you signed up."
330 | print "2. Ideally, you should be fully prepared. Mostly prepared might"
331 | print " still suffice but not everything may work. Minimally prepared means"
332 | print " you will not be able to execute significant parts of the code."
333 | print "3. If you're underprepared, download and install missing packages,"
334 | print " and rerun this script. You may find information at"
335 | print " http://gestaltrevision.be/wiki/python/check_install useful."
336 | print "4. A file `workshop_req_check.txt` was generated in the same folder"
337 | print " where this script is. When ready, please **email** it to "
338 | print " so that we can verify that "
339 | print " you're ready for the workshop."
340 | print
341 | print '='*79
342 | print
343 |
344 | sys.stdout.log.close()
345 |
--------------------------------------------------------------------------------
/index.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "",
4 | "signature": "sha256:af2e35feac78e826e64c2754c44049da93dc293fb29f4586280506c058657394"
5 | },
6 | "nbformat": 3,
7 | "nbformat_minor": 0,
8 | "worksheets": [
9 | {
10 | "cells": [
11 | {
12 | "cell_type": "markdown",
13 | "metadata": {},
14 | "source": [
15 | ""
16 | ]
17 | },
18 | {
19 | "cell_type": "heading",
20 | "level": 1,
21 | "metadata": {},
22 | "source": [
23 | "Python for Vision Research"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {},
29 | "source": [
30 | "Welcome to the introductory series to using [Python in vision research](http://gestaltrevision.be/wiki/python/python) brought to you by the [GestaltReVision](http://gestaltrevision.be) group (KU Leuven, Belgium).\n",
31 | "\n",
32 | "The series consist of seven IPython notebooks meant as a three-day crash course for vision researchers in programming with [Python](http://python.org), building experiments with [PsychoPy](http://psychopy.org) and [psychopy_ext](http://psychopy_ext.klab.lt/), learning the fMRI multi-voxel pattern analysis with [PyMVPA](http://www.pymvpa.org/), and understading image processing in Python.\n",
33 | "\n",
34 | "There are more extensive resources for vision scientists on [our GestaltReVision wiki](http://gestaltrevision.be/wiki/python/python).\n",
35 | "\n",
36 | "Please report any bugs or share ideas on our [GitHub repo](https://github.com/gestaltrevision/python_for_visres/issues)."
37 | ]
38 | },
39 | {
40 | "cell_type": "heading",
41 | "level": 1,
42 | "metadata": {},
43 | "source": [
44 | "Available tutorials"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "1. [An introduction to Python](Part1/Part1_Intro_to_Python.ipynb)\n",
52 | "2. [Introduction to PsychoPy for creating experiments](Part2/Part2_PsychoPy.ipynb)\n",
53 | "3. [Transitioning from MATLAB to Python](Part3/Part3_Scientific_Python.ipynb)\n",
54 | "4. [More practice with PsychoPy](Part4/Part4_Practice_with_PsychoPy.ipynb)\n",
55 | "5. [Streamline research with psychopy_ext](Part5/Part5_psychopy_ext.ipynb)\n",
56 | "6. [Multi-voxel pattern analysis](Part6/Part6_MVPA.ipynb)\n",
57 | "7. [Natural image statistics](Part7/Part7_Image_Statistics.ipynb)"
58 | ]
59 | },
60 | {
61 | "cell_type": "heading",
62 | "level": 1,
63 | "metadata": {},
64 | "source": [
65 | "License"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "**Authors:** Maarten Demeyer, [Jonas Kubilius](http://klab.lt) \n",
73 | "**Year:** 2014 \n",
74 | "**Copyright:** Public Domain as in [CC0](https://creativecommons.org/publicdomain/zero/1.0/) (except where noted otherwise). Feel free to reuse these materials for your teaching or any other purposes."
75 | ]
76 | },
77 | {
78 | "cell_type": "heading",
79 | "level": 1,
80 | "metadata": {},
81 | "source": [
82 | "Contents of this notebook"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "- [What you'll need](#What-you'll-need)\n",
90 | " - [Tutorial code](#Tutorial-code)\n",
91 | " - [Python and its packages](#Python-and-its-packages)\n",
92 | " - [Windows](#Windows)\n",
93 | " - [Linux](#Linux)\n",
94 | " - [Mac OS](#Mac-OS)\n",
95 | " - [What? I don't want to mess with my machine!](#What?-I-don't-want-to-mess-with-my-machine!)\n",
96 | "- [Checking your installation](#Checking-your-installation)\n",
97 | "- [Getting started](#Getting-started)\n",
98 | " - [Opening IPython notebooks](#Opening-IPython-notebooks)\n",
99 | " - [Windows](#Windows)\n",
100 | " - [Mac OS / Linux](#Mac-OS-/-Linux)\n",
101 | " - [Then:](#Then:)\n",
102 | " - [Editing Python scripts](#Editing-Python-scripts)"
103 | ]
104 | },
105 | {
106 | "cell_type": "heading",
107 | "level": 1,
108 | "metadata": {},
109 | "source": [
110 | "What you'll need"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "Unfortunately, getting Python and all necessary packages is one of the major difficulties for beginners. So please be patient and try to get everything in order because once you get it running, magic can start."
118 | ]
119 | },
120 | {
121 | "cell_type": "heading",
122 | "level": 2,
123 | "metadata": {},
124 | "source": [
125 | "Tutorial code"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "All materials, including this notebook, are available [here](http://github.com/gestaltrevision/python_for_visres)."
133 | ]
134 | },
135 | {
136 | "cell_type": "heading",
137 | "level": 2,
138 | "metadata": {},
139 | "source": [
140 | "Python and its packages"
141 | ]
142 | },
143 | {
144 | "cell_type": "heading",
145 | "level": 3,
146 | "metadata": {},
147 | "source": [
148 | "Windows"
149 | ]
150 | },
151 | {
152 | "cell_type": "markdown",
153 | "metadata": {},
154 | "source": [
155 | "The easiest method is to [install our own Python distribution](https://drive.google.com/file/d/0B8XUj38c04GBbThuc0V6eG9maEU/edit?usp=sharing) that has all necessary packages included."
156 | ]
157 | },
158 | {
159 | "cell_type": "heading",
160 | "level": 3,
161 | "metadata": {},
162 | "source": [
163 | "Linux"
164 | ]
165 | },
166 | {
167 | "cell_type": "markdown",
168 | "metadata": {},
169 | "source": [
170 | "Linux user? Awesome! Go to the [NeuroDebian](http://neuro.debian.net/) website, follow the instructions on how to add their repositories, and just sit back and relax while your system installs packages through ``sudo apt-get install `` and ``sudo pip install ``, as listed below:\n",
171 | "\n",
172 | "```bash\n",
173 | "sudo apt-get install python-pip python-numpy python-scipy python-imaging python-matplotlib\n",
174 | "sudo apt-get install psychopy spyder ipython-notebook\n",
175 | "sudo apt-get install python-pandas python-docutils python-nibabel python-mvpa2 python-h5py\n",
176 | "sudo pip install pillow\n",
177 | "sudo pip install seaborn svgwrite\n",
178 | "sudo pip install psychopy_ext\n",
179 | "```"
180 | ]
181 | },
182 | {
183 | "cell_type": "heading",
184 | "level": 3,
185 | "metadata": {},
186 | "source": [
187 | "Mac OS"
188 | ]
189 | },
190 | {
191 | "cell_type": "markdown",
192 | "metadata": {},
193 | "source": [
194 | "Unfortunately, we have a very limited in-house Mac experience. We do offer instructions [how to install most packages](http://gestaltrevision.be/pdfs/workshops/python_summer_school_Mac.pdf) but you may (or are quite likely to) run into problems and we will probably not know how to help you."
195 | ]
196 | },
197 | {
198 | "cell_type": "heading",
199 | "level": 3,
200 | "metadata": {},
201 | "source": [
202 | "What? I don't want to mess with my machine!"
203 | ]
204 | },
205 | {
206 | "cell_type": "markdown",
207 | "metadata": {},
208 | "source": [
209 | "If you are having troubles installing Python and its packages, or you just don't want to change anything in your current setup, an excellent alternative is to install the NeuroDebian Virtual Machine. This will create a Linux desktop environment within your operating system, allowing you to follow the much simpler Linux installation procedures instead. Moreover, this Linux environment will be entirely separate from any other Python installation you may have on your machine, so you won't mess anything up by trying. Detailed instructions can be found [here](http://gestaltrevision.be/wiki/python/ndvm).\n",
210 | "\n",
211 | "Alternatively, if you are only interested in Scietific Python and or PsychoPy (sessions 1-4 and 7), the [Standalone PsychoPy installation](http://sourceforge.net/projects/psychpy/files/PsychoPy/) will largely suffice. You will not have IPython installed, but all examples should be working otherwise.\n",
212 | "\n",
213 | "For even more options, [check our wiki](http://gestaltrevision.be/wiki/python/pythoninstall)."
214 | ]
215 | },
216 | {
217 | "cell_type": "heading",
218 | "level": 1,
219 | "metadata": {},
220 | "source": [
221 | "Checking your installation"
222 | ]
223 | },
224 | {
225 | "cell_type": "markdown",
226 | "metadata": {},
227 | "source": [
228 | "Run the following cell (by pressing the \u25b6 button above or Ctrl+Enter) to check whether your computer contains all needed packages. This will generate a txt output file which you can e-mail to us, should a problem arise. Press a key when asked to, and close the new IPython Notebook window that will be opened. If the cell keeps running for up to 10 seconds after closing the notebook, just wait; this is normal."
229 | ]
230 | },
231 | {
232 | "cell_type": "code",
233 | "collapsed": false,
234 | "input": [
235 | "import os\n",
236 | "\n",
237 | "os.system('python check_install.py')\n",
238 | "\n",
239 | "try:\n",
240 | " f = open('workshop_req_check.txt','r')\n",
241 | " for line in f.readlines():\n",
242 | " print line,\n",
243 | " f.close()\n",
244 | "except IOError:\n",
245 | " print 'The script could not be executed!'"
246 | ],
247 | "language": "python",
248 | "metadata": {},
249 | "outputs": [
250 | {
251 | "output_type": "stream",
252 | "stream": "stdout",
253 | "text": [
254 | "win32\n",
255 | "['D:\\\\Dropbox\\\\Destymas\\\\python_for_visres', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\pymvpa2-2.3.1-py2.7-win32.egg', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\tabular-0.1-py2.7.egg', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\psychopy-1.80.06-py2.7.egg', 'C:\\\\Miniconda32\\\\python27.zip', 'C:\\\\Miniconda32\\\\DLLs', 'C:\\\\Miniconda32\\\\lib', 'C:\\\\Miniconda32\\\\lib\\\\plat-win', 'C:\\\\Miniconda32\\\\lib\\\\lib-tk', 'C:\\\\Miniconda32', 'C:\\\\Miniconda32\\\\lib\\\\site-packages', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\PIL', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\win32', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\win32\\\\lib', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\Pythonwin', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\setuptools-3.6-py2.7.egg', 'C:\\\\Miniconda32\\\\lib\\\\site-packages\\\\wx-3.0-msw']\n",
256 | "\n",
257 | "===============================================================================\n",
258 | "MODULE CHECK\n",
259 | "\n",
260 | " Python: base installation\n",
261 | " OK\n",
262 | " Spyder: IDE\n",
263 | " OK\n",
264 | " NumPy: numerical computing\n",
265 | " OK\n",
266 | " SciPy: scientific functions\n",
267 | " OK\n",
268 | " Matplotlib: plot graphs\n",
269 | " OK\n",
270 | " PsychoPy_ext: streamline research\n",
271 | " OK\n",
272 | " Seaborn: statistical data visualization\n",
273 | " OK\n",
274 | " Docutils: documentation utilities\n",
275 | " OK\n",
276 | " Svgwrite: create svg images\n",
277 | " OK\n",
278 | " Pandas: data analysis toolkit\n",
279 | " OK\n",
280 | " NiBabel: access neuroimaging files\n",
281 | " OK\n",
282 | " h5py: store huge amounts of numerical data\n",
283 | " OK\n",
284 | " PyMVPA: fMRI MVPA package\n",
285 | " OK\n",
286 | " Pillow: handle images\n",
287 | " OK\n",
288 | " PsychoPy: build experiments\n",
289 | "C:\\Miniconda32\\lib\\site-packages\\psychopy-1.80.06-py2.7.egg\\psychopy\\preferences\\configobj.py:28: DeprecationWarning: The compiler package is deprecated and removed in Python 3.x.\n",
290 | " import compiler\n",
291 | "\n",
292 | "*************************************************************\n",
293 | "* *\n",
294 | "* A new window will open. Please follow instructions on it. *\n",
295 | "* *\n",
296 | "*************************************************************\n",
297 | "\n",
298 | "0.0716 \tWARNING \tCreating new monitor...\n",
299 | "0.0718 \tWARNING \tCreating new monitor...\n",
300 | " OK\n",
301 | " IPython: interactive notebooks\n",
302 | "\n",
303 | "*************************************************************\n",
304 | "* *\n",
305 | "* An IPython notebook should open in your browser. *\n",
306 | "* Please wait for this test to finish. Do not hit Control-C *\n",
307 | "* *\n",
308 | "*************************************************************\n",
309 | "\n",
310 | " OK\n",
311 | "\n",
312 | "===============================================================================\n",
313 | "HOW WELL ARE YOU PREPARED?\n",
314 | "\n",
315 | "Session: Introduction to Python\n",
316 | " FULLY PREPARED\n",
317 | "\n",
318 | "Session: Introduction to PsychoPy\n",
319 | " FULLY PREPARED\n",
320 | "\n",
321 | "Session: Transitioning from MATLAB to Python\n",
322 | " FULLY PREPARED\n",
323 | "\n",
324 | "Session: More practice with PsychoPy\n",
325 | " FULLY PREPARED\n",
326 | "\n",
327 | "Session: Streamline research with psychopy_ext\n",
328 | " FULLY PREPARED\n",
329 | "\n",
330 | "Session: Natural image statistics\n",
331 | " FULLY PREPARED\n",
332 | "\n",
333 | "Session: Multi-voxel pattern analysis\n",
334 | " FULLY PREPARED\n",
335 | "\n",
336 | "===============================================================================\n",
337 | "WHAT TO DO NOW?\n",
338 | "\n",
339 | "1. Check in the list above how well you're prepared for the sessions\n",
340 | " you signed up.\n",
341 | "2. Ideally, you should be fully prepared. Mostly prepared might\n",
342 | " still suffice but not everything may work. Minimally prepared means\n",
343 | " you will not be able to execute significant parts of the code.\n",
344 | "3. If you're underprepared, download and install missing packages,\n",
345 | " and rerun this script. You may find information at\n",
346 | " http://gestaltrevision.be/wiki/python/check_install useful.\n",
347 | "4. A file `workshop_req_check.txt` was generated in the same folder\n",
348 | " where this script is. When ready, please **email** it to \n",
349 | " so that we can verify that \n",
350 | " you're ready for the workshop.\n",
351 | "\n",
352 | "===============================================================================\n",
353 | "\n"
354 | ]
355 | }
356 | ],
357 | "prompt_number": 2
358 | },
359 | {
360 | "cell_type": "heading",
361 | "level": 1,
362 | "metadata": {},
363 | "source": [
364 | "Getting started"
365 | ]
366 | },
367 | {
368 | "cell_type": "heading",
369 | "level": 2,
370 | "metadata": {},
371 | "source": [
372 | "Opening IPython notebooks"
373 | ]
374 | },
375 | {
376 | "cell_type": "heading",
377 | "level": 3,
378 | "metadata": {},
379 | "source": [
380 | "Windows"
381 | ]
382 | },
383 | {
384 | "cell_type": "markdown",
385 | "metadata": {},
386 | "source": [
387 | "1. Open command-line (``Start button > Type cmd > Enter``)\n",
388 | "2. Navigate to the folder where the IPython notebook is using the ``cd`` command\n",
389 | " e.g. ``cd C:\\Users\\u000001\\Documents\\python_for_visres``.\n",
390 | " \n",
391 | " *Tip:* if you need to switch partitions, first type partition letter (no ``cd``) and hit enter, e.g., ``D:``"
392 | ]
393 | },
394 | {
395 | "cell_type": "heading",
396 | "level": 3,
397 | "metadata": {},
398 | "source": [
399 | "Mac OS / Linux"
400 | ]
401 | },
402 | {
403 | "cell_type": "markdown",
404 | "metadata": {},
405 | "source": [
406 | "1. Open command line (look for ``Terminal.app`` or `xterm`, or similar)\n",
407 | "2. Navigate to the folder where the IPython notebook is using the ``cd`` command, e.g. ``cd ~/python_for_visres``."
408 | ]
409 | },
410 | {
411 | "cell_type": "heading",
412 | "level": 3,
413 | "metadata": {},
414 | "source": [
415 | "Then:"
416 | ]
417 | },
418 | {
419 | "cell_type": "markdown",
420 | "metadata": {},
421 | "source": [
422 | "1. Type ``ipython notebook`` and hit enter. A new tab on your default browser should open with notebook choices listed. Doesn't work properly? Make sure you're not using an outdated or ridiculous browser (like some old Internet Explorer).\n",
423 | "2. Click on the notebook you want to open and play with."
424 | ]
425 | },
426 | {
427 | "cell_type": "heading",
428 | "level": 2,
429 | "metadata": {},
430 | "source": [
431 | "Editing Python scripts"
432 | ]
433 | },
434 | {
435 | "cell_type": "markdown",
436 | "metadata": {},
437 | "source": [
438 | "Python scripts are just text files. If you want, you can open them using any text editor, even Notepad. However, it is best to use specialized text editors for the task because they help you to code. Although in this tutorial we rely exclusively on IPython to write and run our scripts, in real life people usually use other text editors or integrated developments environments (IDEs) to create and run their scripts. Here is a list of some of our favorites:\n",
439 | "\n",
440 | "- Old school:\n",
441 | " - [Spyder](https://code.google.com/p/spyderlib/) - Matlab-like environment, good for beginners.\n",
442 | " - [Gedit](https://wiki.gnome.org/Apps/Gedit) with plugins\n",
443 | " - [NinjaIDE](http://ninja-ide.org/)\n",
444 | " - [Geany](http://www.geany.org/) (Linux/Windows)\n",
445 | " - [Notepad++](http://notepad-plus-plus.org/) (Windows)\n",
446 | " - [Textmate](http://macromates.com/) (Mac)\n",
447 | " - [Kod](http://kodapp.com/) (Mac)\n",
448 | " \n",
449 | "- Modern:\n",
450 | " - [SublimeText](http://www.sublimetext.com/)\n",
451 | " - [Zed](http://zedapp.org/)\n",
452 | " - [Atom](https://atom.io/)\n",
453 | " - [Brackets](http://brackets.io/)\n",
454 | " - [LightTable](http://www.lighttable.com/)"
455 | ]
456 | }
457 | ],
458 | "metadata": {}
459 | }
460 | ]
461 | }
462 |
--------------------------------------------------------------------------------
/install_linux.sh:
--------------------------------------------------------------------------------
1 | !#/bin/bash
2 |
3 | sudo apt-get install python-pip python-numpy python-scipy python-imaging python-matplotlib
4 | sudo apt-get install psychopy spyder ipython-notebook
5 | sudo apt-get install python-pandas python-docutils python-nibabel python-mvpa2
6 |
7 | sudo apt-get install libjpeg-dev libtk8.6
8 |
9 | sudo pip install pillow
10 | sudo pip install seaborn svgwrite
11 | sudo pip install psychopy_ext
12 |
13 | sudo pip install jsonschema jsonpointer mistune
14 | sudo pip install --upgrade tornado pyzmq
15 | sudo pip install --upgrade ipython
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gestaltrevision/python_for_visres/1d603cda84edc302c2f120b95123345921fbd772/logo.png
--------------------------------------------------------------------------------