├── .gitignore ├── .gitattributes ├── figures ├── figure_1_1.png ├── figure_1_2.png ├── figure_1_3.png ├── figure_2_1.png ├── figure_2_10.png ├── figure_2_2.png ├── figure_2_3.png ├── figure_2_4.png ├── figure_2_5.png ├── figure_2_6.png ├── figure_2_7.png ├── figure_2_8.png ├── figure_2_9.png ├── figure_3_1.png ├── figure_3_2.png ├── figure_4_1.png ├── figure_4_2.png ├── figure_4_3.png ├── figure_5_1.png ├── figure_5_2.png ├── figure_5_3.png ├── figure_5_4.png ├── figure_5_5.png ├── figure_5_6.png ├── figure_5_7.png ├── figure_6_1.png ├── figure_6_2.png ├── figure_7_1.png ├── figure_7_2.png ├── figure_7_3.png ├── figure_7_4.png ├── figure_8_1.png ├── figure_8_2.png ├── figure_8_3.png ├── figure_8_4.png ├── figure_8_5.png ├── figure_8_6.png ├── figure_8_7.png └── figure_8_8.png ├── example_scripts ├── ch02_psychopy │ ├── texture.png │ ├── get_fontName.py │ ├── demo_screenshot.py │ ├── demo_callOnFlip.py │ ├── first_script.py │ ├── demo_aperture.py │ ├── keyboard_PsychHID.py │ ├── demo_shapes.py │ ├── demo_GratingStim.py │ ├── trial_recycle.py │ ├── demo_mouse.py │ └── simon_effect.py ├── ch04_pylink │ ├── images │ │ ├── quebec.jpg │ │ └── woods.jpg │ ├── basic_example.py │ └── free_viewing.py ├── ch07_advanced_topics │ ├── quebec.jpeg │ ├── coregraphics_PsychoPy │ │ ├── error.wav │ │ ├── qbeep.wav │ │ ├── type.wav │ │ ├── demo.py │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ ├── bitmap_backdrop.py │ └── TTL_through_host.py ├── ch05_data_viewer │ ├── image_load │ │ ├── woods.jpg │ │ └── image_load.py │ ├── PsychoPy_examples │ │ ├── Stroop_task │ │ │ ├── type.wav │ │ │ ├── error.wav │ │ │ ├── qbeep.wav │ │ │ ├── Stroop_task.py │ │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ │ ├── video_task │ │ │ ├── error.wav │ │ │ ├── qbeep.wav │ │ │ ├── type.wav │ │ │ ├── driving.mp4 │ │ │ ├── video_task.py │ │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ │ └── pursuit_task │ │ │ ├── error.wav │ │ │ ├── qbeep.wav │ │ │ ├── type.wav │ │ │ ├── pursuit_task.py │ │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ ├── trial_segmentation.py │ ├── trial_variable.py │ ├── interest_area.py │ └── simple_drawing.py ├── ch08_data_visualization │ ├── freeview │ │ ├── freeview.edf │ │ └── images │ │ │ ├── quebec.jpg │ │ │ └── woods.jpg │ ├── heatmap_trial_1.png │ ├── heatmap_trial_2.png │ ├── chord_diagram.py │ ├── parse_ASC_re.py │ ├── gaze_trace_plot.py │ ├── parse_ASC_4scanpath.py │ ├── heatmap.py │ └── transition.html ├── ch06_data_retrieval │ ├── PsychoPy_examples │ │ ├── gaze_trigger │ │ │ ├── error.wav │ │ │ ├── qbeep.wav │ │ │ ├── type.wav │ │ │ ├── woods.jpg │ │ │ ├── gaze_trigger.py │ │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ │ └── gaze_contingent_window │ │ │ ├── error.wav │ │ │ ├── qbeep.wav │ │ │ ├── type.wav │ │ │ ├── woods.jpg │ │ │ ├── gaze_contingent_window.py │ │ │ └── EyeLinkCoreGraphicsPsychoPy.py │ ├── sample_retrieval.py │ ├── event_retrieval.py │ └── event_retrieval_2.py ├── ch01_python_basics │ ├── pygame_ch01.py │ └── file_operation.py └── ch03_pygame │ ├── event_demo.py │ ├── text_demo.py │ ├── draw_demo.py │ ├── display_demo.py │ └── posner_cueing.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /figures/figure_1_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_1_1.png -------------------------------------------------------------------------------- /figures/figure_1_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_1_2.png -------------------------------------------------------------------------------- /figures/figure_1_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_1_3.png -------------------------------------------------------------------------------- /figures/figure_2_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_1.png -------------------------------------------------------------------------------- /figures/figure_2_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_10.png -------------------------------------------------------------------------------- /figures/figure_2_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_2.png -------------------------------------------------------------------------------- /figures/figure_2_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_3.png -------------------------------------------------------------------------------- /figures/figure_2_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_4.png -------------------------------------------------------------------------------- /figures/figure_2_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_5.png -------------------------------------------------------------------------------- /figures/figure_2_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_6.png -------------------------------------------------------------------------------- /figures/figure_2_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_7.png -------------------------------------------------------------------------------- /figures/figure_2_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_8.png -------------------------------------------------------------------------------- /figures/figure_2_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_2_9.png -------------------------------------------------------------------------------- /figures/figure_3_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_3_1.png -------------------------------------------------------------------------------- /figures/figure_3_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_3_2.png -------------------------------------------------------------------------------- /figures/figure_4_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_4_1.png -------------------------------------------------------------------------------- /figures/figure_4_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_4_2.png -------------------------------------------------------------------------------- /figures/figure_4_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_4_3.png -------------------------------------------------------------------------------- /figures/figure_5_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_1.png -------------------------------------------------------------------------------- /figures/figure_5_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_2.png -------------------------------------------------------------------------------- /figures/figure_5_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_3.png -------------------------------------------------------------------------------- /figures/figure_5_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_4.png -------------------------------------------------------------------------------- /figures/figure_5_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_5.png -------------------------------------------------------------------------------- /figures/figure_5_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_6.png -------------------------------------------------------------------------------- /figures/figure_5_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_5_7.png -------------------------------------------------------------------------------- /figures/figure_6_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_6_1.png -------------------------------------------------------------------------------- /figures/figure_6_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_6_2.png -------------------------------------------------------------------------------- /figures/figure_7_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_7_1.png -------------------------------------------------------------------------------- /figures/figure_7_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_7_2.png -------------------------------------------------------------------------------- /figures/figure_7_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_7_3.png -------------------------------------------------------------------------------- /figures/figure_7_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_7_4.png -------------------------------------------------------------------------------- /figures/figure_8_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_1.png -------------------------------------------------------------------------------- /figures/figure_8_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_2.png -------------------------------------------------------------------------------- /figures/figure_8_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_3.png -------------------------------------------------------------------------------- /figures/figure_8_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_4.png -------------------------------------------------------------------------------- /figures/figure_8_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_5.png -------------------------------------------------------------------------------- /figures/figure_8_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_6.png -------------------------------------------------------------------------------- /figures/figure_8_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_7.png -------------------------------------------------------------------------------- /figures/figure_8_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/figures/figure_8_8.png -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch02_psychopy/texture.png -------------------------------------------------------------------------------- /example_scripts/ch04_pylink/images/quebec.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch04_pylink/images/quebec.jpg -------------------------------------------------------------------------------- /example_scripts/ch04_pylink/images/woods.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch04_pylink/images/woods.jpg -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/quebec.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch07_advanced_topics/quebec.jpeg -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/image_load/woods.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/image_load/woods.jpg -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/freeview/freeview.edf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch08_data_visualization/freeview/freeview.edf -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/heatmap_trial_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch08_data_visualization/heatmap_trial_1.png -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/heatmap_trial_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch08_data_visualization/heatmap_trial_2.png -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/freeview/images/quebec.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch08_data_visualization/freeview/images/quebec.jpg -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/freeview/images/woods.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch08_data_visualization/freeview/images/woods.jpg -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/error.wav -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/type.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/type.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/error.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/type.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/error.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/error.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/type.wav -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/driving.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/driving.mp4 -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/error.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/type.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/woods.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/woods.jpg -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/error.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/error.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/qbeep.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/qbeep.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/type.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/type.wav -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/woods.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhiguo-eyelab/Pylink_book/HEAD/example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/woods.jpg -------------------------------------------------------------------------------- /example_scripts/ch01_python_basics/pygame_ch01.py: -------------------------------------------------------------------------------- 1 | import pygame 2 | 3 | # Initialize Pygame 4 | pygame.init() 5 | 6 | # Open a Pygame window 7 | win = pygame.display.set_mode((400,300)) 8 | 9 | # Draw a circle 10 | win.fill((0,0,0)) 11 | pygame.draw.circle(win, (255,0,0), (200,150), 30) 12 | pygame.display.flip() 13 | 14 | # Show the window for 30 seconds 15 | pygame.time.wait(30000) 16 | 17 | # Quit Pygame 18 | pygame.quit() 19 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/get_fontName.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: get_fontNames.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2020 6 | # 7 | # Description: 8 | # Retrieve the names of all available system fonts 9 | # Run this script from the command line 10 | 11 | from matplotlib import font_manager 12 | 13 | f_list = font_manager.get_fontconfig_fonts() 14 | f_names = [] 15 | for font in f_list: 16 | try: 17 | f = font_manager.FontProperties(fname=font).get_name() 18 | f_names.append(f) 19 | except: 20 | pass 21 | 22 | print(f_names) 23 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/chord_diagram.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: chord_diagram.py 4 | # Author: Zhiguo Wang 5 | # Date: 4/28/2021 6 | # 7 | # Description: 8 | # A cord diagram to capture the transitions between interest areas 9 | 10 | import chord 11 | 12 | # The co-occurrence matrix 13 | trans_matrix = [[0, 3, 1, 4, 1], 14 | [3, 0, 3, 6, 1], 15 | [1, 3, 0, 9, 1], 16 | [4, 6, 9, 0, 0], 17 | [1, 1, 1, 0, 0]] 18 | 19 | # Column and row names for the transition matrix 20 | ia_label = ['Brother', 'Mother', 'Father', 'Sister', 'Kite'] 21 | 22 | # Create a chord diagram and save it to an HTML file 23 | chord.Chord(trans_matrix, ia_label).to_html('transition.html') 24 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_screenshot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_screenshot.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # Get the actual frame rate, then take a screenshot 9 | 10 | from psychopy import visual, core 11 | 12 | # Open a window 13 | win = visual.Window(size=[1280, 800], units="pix", fullscr=True) 14 | 15 | # Get frame rate (frame per second) 16 | fps = win.getActualFrameRate() 17 | print(f'Frame rate is: {fps} FPS') 18 | 19 | # Show the screen for 1.0 second 20 | win.color = (0, 0, 0) 21 | win.flip() 22 | core.wait(1.0) 23 | 24 | # Grab a screenshot and save it to a JPEG 25 | win.getMovieFrame() 26 | win.saveMovieFrames("gray_window.jpg") 27 | 28 | # Close the window and quit PsychoPy 29 | win.close() 30 | core.quit() 31 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_callOnFlip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_callOnFlip.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # Check the timing accuracy of the .callonFlip() function 9 | 10 | from psychopy import visual, core 11 | 12 | win = visual.Window(size=[1200, 800], units="pix", fullscr=True) 13 | 14 | # A function to print out the current time 15 | def print_time(): 16 | current_t = core.getTime() 17 | print(f'print_time() was executed at time: {current_t:.3f}') 18 | 19 | # In a for-loop, check if print_time() is executed at the same time as 20 | # the window flip 21 | for i in range(10): 22 | win.callOnFlip(print_time) 23 | flip_t = win.flip() 24 | print(f'Actual window flipping time was: {flip_t:.3f}') 25 | core.wait(0.5) 26 | 27 | # Close the window and quit PsychoPy 28 | win.close() 29 | core.quit() 30 | -------------------------------------------------------------------------------- /example_scripts/ch03_pygame/event_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: event_demo.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # A short script showing how to handle Pygame events 9 | 10 | import sys 11 | import pygame 12 | from pygame.locals import * 13 | 14 | # Initialize Pygame and open a window 15 | pygame.init() 16 | scn = pygame.display.set_mode((640, 480)) 17 | 18 | # Constantly polling for new events in a while-loop 19 | while True: 20 | ev_list = pygame.event.get() 21 | for ev in ev_list: 22 | # Mouse motion 23 | if ev.type == MOUSEMOTION: 24 | print(ev.pos) 25 | 26 | # Mouse button down 27 | if ev.type == MOUSEBUTTONDOWN: 28 | print(ev.button) 29 | 30 | # Key down 31 | if ev.type == KEYDOWN: 32 | print(ev.key) 33 | 34 | # Quit Pygame if the "close window" button is pressed 35 | if ev.type == QUIT: 36 | pygame.quit() 37 | sys.exit() 38 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/first_script.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: first_script.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # A short but functioning script in PsychoPy 9 | 10 | from psychopy import visual, core, event, monitors 11 | 12 | # Set up the monitor parameters, so we can use 'deg' as the screen units 13 | mon_mac15 = monitors.Monitor("mac15", distance=57.0, width=32.0) 14 | mon_mac15.setSizePix([1280, 800]) 15 | 16 | # Open a window 17 | win = visual.Window([800, 600], monitor=mon_mac15, units="deg") 18 | 19 | # Prepare a Gabor in memory 20 | gabor = visual.GratingStim(win, tex="sin", mask="gauss", size=6.0, ori=45.0) 21 | 22 | # Draw the Gabor on screen and wait for a key press 23 | while not event.getKeys(): 24 | # Draw the Gabor on screen 25 | gabor.draw() 26 | win.flip() 27 | 28 | # Update the phase of the Gabor following each screen refresh 29 | gabor.phase += 0.05 30 | 31 | # close the window and quit PsychoPy 32 | win.close() 33 | core.quit() 34 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_aperture.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_aperture.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2020 6 | # 7 | # Description: 8 | # Simulating a macular degeneration condition in PsychoPy 9 | 10 | from psychopy import visual, core, event 11 | 12 | # Open a window 13 | win = visual.Window(size=(800, 600), units="pix", allowStencil=True) 14 | 15 | # Create an aperture of arbitrary shape 16 | vert = [(0.1, .50), (.45, .20), (.10, -.5), (-.60, -.5), (-.5, .20)] 17 | apt = visual.Aperture(win, size=200, shape=vert, inverted=True) 18 | apt.enabled = True 19 | 20 | # Initialize a mouse object 21 | mouse = event.Mouse(visible=False) 22 | 23 | # Prepare the stimuli 24 | text = visual.TextStim(win, text="Moving window example by Zhiguo "*24, 25 | height=30, color='black', wrapWidth=760) 26 | 27 | # Mouse-contingent moving window 28 | while not event.getKeys(): 29 | apt.pos = mouse.getPos() 30 | text.draw() 31 | win.flip() 32 | 33 | # Close the window and quit PsychoPy 34 | win.close() 35 | core.quit() 36 | 37 | -------------------------------------------------------------------------------- /example_scripts/ch03_pygame/text_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: text_demo.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # Text rendering example in Pygame 9 | 10 | import pygame, sys 11 | 12 | # Initialize Pygame 13 | pygame.init() 14 | 15 | # Open a window 16 | win = pygame.display.set_mode((300,200)) 17 | 18 | # Create a font object and enable 'underline' 19 | fnt = pygame.font.SysFont('arial', 32, bold=True, italic=True) 20 | fnt.set_underline(True) 21 | 22 | # Using the size() method to estimates the width and height of 23 | # the rendered text surface 24 | demo_text = 'Hello, World!' 25 | w, h = fnt.size(demo_text) 26 | 27 | # Render the text to get a text surface 28 | win.fill((0, 0, 0)) 29 | text_surf = fnt.render(demo_text, True, (255,0,0)) 30 | 31 | # Show(blit) the text surface at the window center 32 | win.blit(text_surf, (150-w/2,100-h/2)) 33 | pygame.display.flip() 34 | 35 | # Show the text until a key is pressed 36 | while True: 37 | for ev in pygame.event.get(): 38 | if ev.type == pygame.KEYUP: 39 | pygame.quit() 40 | sys.exit() 41 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/keyboard_PsychHID.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_PsychHID.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # Using PsychHID to register keyboard events in PsychoPy 9 | 10 | from psychopy.hardware import keyboard 11 | from psychopy import core, visual 12 | 13 | win = visual.Window((200, 200)) 14 | 15 | # Create a keyboard object 16 | kb = keyboard.Keyboard() 17 | 18 | # We define a function to print out the key presses 19 | def waitKey(): 20 | ''' a function to detect a single keypress''' 21 | 22 | got_key = False 23 | while not got_key: 24 | keys = kb.getKeys() 25 | if keys: 26 | for key in keys: 27 | print(key.name, key.duration, key.rt, key.tDown) 28 | got_key = True 29 | 30 | # A simple response time task 31 | # Change the window color following each key press 32 | for i in range(10): 33 | win.color = (i % 2 * 1.0, -1, -1) 34 | win.flip() 35 | kb.clock.reset() # reset the clock 36 | waitKey() 37 | 38 | # Close the window and quit PsychoPy 39 | win.close() 40 | core.quit() 41 | -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # This short script shows how to request Pylink to use the 9 | # EyeLinkCoreGraphcicPsychoPy library 10 | 11 | import pylink 12 | from psychopy import visual, core, event, monitors 13 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 14 | 15 | # Connect to the tracker 16 | tk = pylink.EyeLink('100.1.1.1') 17 | 18 | # Open a PsychoPy window 19 | SCN_WIDTH, SCN_HEIGHT = (1280, 800) 20 | win = visual.Window((SCN_WIDTH, SCN_HEIGHT), 21 | fullscr=False, 22 | units='pix') 23 | 24 | # Pass display dimension (left, top, right, bottom) to the tracker 25 | coords = f"screen_pixel_coords = 0 0 {SCN_WIDTH - 1} {SCN_HEIGHT - 1}" 26 | tk.sendCommand(coords) 27 | 28 | # Create a custom graphics environment (genv) for calibration 29 | genv = EyeLinkCoreGraphicsPsychoPy(tk, win) 30 | pylink.openGraphicsEx(genv) 31 | 32 | # Calibrate the tracker 33 | # when a gray screen comes up, press Enter to show the camera image 34 | # press C to calibrate, V to validate, O to exit the calibration routine 35 | tk.doTrackerSetup() 36 | 37 | # Close the connection to the tracker 38 | tk.close() 39 | 40 | # Quit PsychoPy 41 | win.close() 42 | core.quit() 43 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/trial_segmentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: trial_segmentation.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This script illustrates the TRIALID and TRIAL_RESULT messages 9 | # that Data Viewer uses to segment a recording into trials 10 | 11 | import pylink 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink() 15 | 16 | # Open an EDF on the Host 17 | # filename must not exceed 8 characters 18 | tk.openDataFile('seg.edf') 19 | 20 | # Run through five trials 21 | for trial in range(1, 6): 22 | # Print out a message to show the current trial 23 | print(f'Trial #: {trial}') 24 | 25 | # Log a TRIALID message to mark trial start 26 | tk.sendMessage(f'TRIALID {trial}') 27 | 28 | # Start recording 29 | tk.startRecording(1, 1, 1, 1) 30 | 31 | # Pretending that we are doing something for 2-sec 32 | pylink.pumpDelay(2000) 33 | 34 | # Stop recording 35 | tk.stopRecording() 36 | 37 | # Log a TRIAL_RESULT message to mark trial ends 38 | tk.sendMessage('TRIAL_RESULT 0') 39 | 40 | # Wait for 100 to catch session end events 41 | pylink.msecDelay(100) 42 | 43 | # Close the EDF file and download it from the Host PC 44 | tk.closeDataFile() 45 | tk.receiveDataFile('seg.edf', 'trial_segmentation_demo.edf') 46 | 47 | # Close the link 48 | tk.close() 49 | -------------------------------------------------------------------------------- /example_scripts/ch01_python_basics/file_operation.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # 3 | # Filename: file_operation.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/3/2020 6 | # 7 | # Description: 8 | # Open, read, and write plain text files 9 | 10 | file_name = 'file_op.txt' 11 | # Open a file with 'write' permission, 12 | # Write three lines of texts into the file, then 13 | # close the file 14 | print('Wring three lines of text to %s' % file_name) 15 | file = open(file_name, 'w') 16 | for i in range(3): 17 | file.write('This is line %d\n' % (i+1)) 18 | file.close() 19 | 20 | # Open file_op.txt in read-only mode, read the first line with the 21 | # readline() method, then close the file 22 | print('\n\nRead the first line in %s' % file_name) 23 | file = open('file_op.txt', 'r') 24 | line = file.readline() 25 | print(line) 26 | file.close() 27 | 28 | # Open file_op.txt in read-only mode, read four characters 29 | print('\n\nRead the first four characters in %s' % file_name) 30 | file = open('file_op.txt', 'r') 31 | txt = file.read(4) 32 | print(txt) 33 | file.close() 34 | 35 | # Open file_op.txt in read-only mode, then loop over all lines 36 | file = open('file_op.txt', 'r') 37 | for line in file: 38 | print(line) 39 | file.close() 40 | 41 | # Open file_op.txt in in a "with" statement 42 | with open('file_op.txt', 'r') as file: 43 | for line in file: 44 | print(line.rstrip()) 45 | -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/bitmap_backdrop.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: bitmap_backdrop.py 4 | # Author: Zhiguo Wang 5 | # Date: 4/27/2021 6 | # 7 | # Description: 8 | # Transfer an image to the Host to use as the backdrop 9 | 10 | import pylink 11 | from PIL import Image 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink('100.1.1.1') 15 | 16 | # Pass display dimension (left, top, right, bottom) to the tracker 17 | tk.sendCommand('screen_pixel_coords = 0 0 1023 767') 18 | 19 | # Put the tracker in offline mode before we transfer the image 20 | tk.setOfflineMode() 21 | 22 | # convert the image to the format supported by 23 | # the bitmapBackdrop() command 24 | im = Image.open('quebec.jpeg') # open an image with PIL 25 | w, h = im.size # get the width and height of the image 26 | pixels = im.load() # access the pixel data 27 | # reformat the pixels 28 | pixels_img = [[pixels[i, j] for i in range(w)] for j in range(h)] 29 | 30 | # Transfer the images to the Host PC screen 31 | tk.sendCommand('clear_screen 0') 32 | tk.sendCommand('echo PIXELs_FROM_IMAGE') 33 | tk.bitmapBackdrop(w, h, pixels_img, 0, 0, w, h, 34 | 50, 50, pylink.BX_MAXCONTRAST) 35 | 36 | # Show the image for 3-sec on the Host PC 37 | pylink.msecDelay(3000) 38 | 39 | # Clear up the Host screen 40 | tk.sendCommand('clear_screen 0') 41 | 42 | # Close the connection 43 | tk.close() 44 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/parse_ASC_re.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: parse_ASC_re.py 4 | # Author: Zhiguo Wang 5 | # Date: 5/25/2021 6 | # 7 | # Description: 8 | # Parse the ASC file with regular expressions (re). 9 | 10 | import os 11 | import re 12 | import pandas as pd 13 | 14 | # Open the converted ASC file 15 | asc = open(os.path.join('freeview', 'freeview.asc')) 16 | 17 | efix = [] # fixation end events 18 | esac = [] # saccade end events 19 | for line in asc: 20 | # Extract all numbers and put them in a list 21 | tmp_data = [float(x) for x in re.findall(r'-?\d+\.?\d*', line)] 22 | 23 | # retrieve events parsed from the right eye recording 24 | if re.search('^EFIX R', line): 25 | efix.append(tmp_data) 26 | elif re.search('^ESACC R', line): 27 | esac.append(tmp_data) 28 | else: 29 | pass 30 | 31 | # Put the extracted data into pandas data frames 32 | # EFIX R 80790054 80790349 296 981.3 554.5 936 33 | efix_colname = ['startT', 'endT', 'duration', 'avgX', 'avgY', 'avgPupil'] 34 | efixFRM = pd.DataFrame(efix, columns=efix_colname) 35 | # ESACC R 80790350 80790372 23 982.6 551.8 864.9 587.9 1.94 151 36 | esac_colname = ['startT', 'endT', 'duration', 'startX', 'startY', 37 | 'endX', 'endY', 'amplitude', 'peakVel'] 38 | esacFRM = pd.DataFrame(esac, columns=esac_colname) 39 | 40 | # Close the ASC file 41 | asc.close() 42 | -------------------------------------------------------------------------------- /example_scripts/ch04_pylink/basic_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: basic_example.py 4 | # Author: Zhiguo Wang 5 | # Date: 3/16/2021 6 | # 7 | # Description: 8 | # A very basic script showing how to connect/disconnect the tracker, 9 | # open/close EDF data file, configure tracking parameter, calibrate 10 | # tracker, start/stop recording, and log messages in the data file 11 | 12 | import pylink 13 | 14 | # Step 1: Connect to the tracker 15 | tk = pylink.EyeLink('100.1.1.1') 16 | 17 | # Step 2: open an EDF data file on the EyeLink Host PC 18 | tk.openDataFile('test.edf') 19 | 20 | # Step 3: set some tracking parameters 21 | tk.sendCommand("sample_rate 1000") 22 | 23 | # Step 4: open a calibration window 24 | pylink.openGraphics() 25 | 26 | # Step 5: calibrate the tracker, then run five trials 27 | tk.doTrackerSetup() 28 | 29 | for i in range(5): 30 | # log a message in the EDF data file 31 | tk.sendMessage(f'Trial: {i}') 32 | 33 | # start recording 34 | tk.startRecording(1, 1, 1, 1) 35 | 36 | # record data for 2 seconds 37 | pylink.msecDelay(2000) 38 | 39 | # stop recording 40 | tk.stopRecording() 41 | 42 | # Step 6: close the EDF data file and download it from the Host PC 43 | tk.closeDataFile() 44 | tk.receiveDataFile('test.edf', 'test.edf') 45 | 46 | # Step 7: close the link to the tracker, then close the window 47 | tk.close() 48 | pylink.closeGraphics() 49 | -------------------------------------------------------------------------------- /example_scripts/ch03_pygame/draw_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: draw_demo.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # A script illustrating the drawing functions in Pygame 9 | 10 | import pygame 11 | import sys 12 | from pygame.locals import * 13 | 14 | # Initialize pygame and open a window 15 | pygame.init() 16 | 17 | # Open a window 18 | scn = pygame.display.set_mode((640, 480)) 19 | 20 | # An empty list to store clicked screen positions 21 | points = [] 22 | 23 | while True: 24 | # Poll Pygame events 25 | for ev in pygame.event.get(): 26 | # Quit Pygame and Python if the "close window" 27 | # button is clicked 28 | if ev.type == QUIT: 29 | pygame.quit() 30 | sys.exit() 31 | 32 | # Append the current mouse position to the list when 33 | # a mouse button down event is detected 34 | if ev.type == MOUSEBUTTONDOWN: 35 | points.append(ev.pos) 36 | 37 | # Clear the screen 38 | scn.fill((255, 255, 255)) 39 | 40 | # Draw a polygon after three mouse clicks 41 | if len(points) >= 3: 42 | pygame.draw.polygon(scn, (0, 255, 0), points) 43 | 44 | # Highlight the screen locations that have been clicked 45 | for point in points: 46 | pygame.draw.circle(scn, (0, 0, 255), point, 10) 47 | 48 | # Flip the video buffer to show the drawings 49 | pygame.display.flip() 50 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_shapes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: shapes.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # Drawing in PsychoPy 9 | 10 | from psychopy import visual, event, core 11 | 12 | # Open a Window 13 | win = visual.Window(size=[800, 600], units='pix') 14 | 15 | # Line 16 | line_vertices = [(-400, -300), (400, 300)] 17 | line = visual.ShapeStim(win, vertices=line_vertices, 18 | lineColor='white', closeShape=False) 19 | 20 | # Rectangle 21 | rect_vertices = [(-400, -300), (-320, -300), (-320, -240), (-400, -240)] 22 | rect = visual.ShapeStim(win, vertices=rect_vertices, 23 | fillColor='blue', lineWidth=0) 24 | 25 | # Polygon 26 | poly = visual.Polygon(win, edges=6, radius=100, fillColor='green') 27 | 28 | # Move the rectangle along the line and change the filling color of 29 | # the polygon when it overlaps with the rectangle 30 | while True: 31 | if rect.overlaps(poly): 32 | poly.fillColor = 'red' 33 | else: 34 | poly.fillColor = 'green' 35 | line.draw() 36 | poly.draw() 37 | rect.draw() 38 | win.flip() 39 | # Update the position of the rectangle following each flip 40 | rect.pos += (4, 3) 41 | # Break out when the rectangle reaches the end of the line 42 | if rect.contains((400, 300)): 43 | break 44 | 45 | # Close the window and quit PsychoPy 46 | win.close() 47 | core.quit() 48 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/trial_variable.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: trial_variable.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This script illustrates the TRIAL_VAR messages 9 | # that Data Viewer uses to parse variables 10 | 11 | import pylink 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink() 15 | 16 | # Open an EDF on the Host; filename must not exceed 8 characters 17 | tk.openDataFile('vars.edf') 18 | 19 | # Run through five trials 20 | for trial in range(1, 6): 21 | # Print out a message to show the current trial 22 | print(f'Trial #: {trial}') 23 | 24 | # Log a TRIALID message to mark trial start 25 | tk.sendMessage(f'TRIALID {trial}') 26 | 27 | # Start recording 28 | tk.startRecording(1, 1, 1, 1) 29 | 30 | # Pretending that we are doing something for 2-sec 31 | pylink.pumpDelay(2000) 32 | 33 | # Stop recording 34 | tk.stopRecording() 35 | 36 | # Send TRIAL_VAR messages to store variables in the EDF 37 | tk.sendMessage('!V TRIAL_VAR condition step') 38 | tk.sendMessage('!V TRIAL_VAR gap_duration 200') 39 | tk.sendMessage('!V TRIAL_VAR direction Right') 40 | 41 | # Log a TRIAL_RESULT message to mark trial ends 42 | tk.sendMessage('TRIAL_RESULT 0') 43 | 44 | # Wait for 100 to catch session end events 45 | pylink.msecDelay(100) 46 | 47 | # Close the EDF file and download it from the Host PC 48 | tk.closeDataFile() 49 | tk.receiveDataFile('vars.edf', 'trial_variable_demo.edf') 50 | 51 | # Close the link 52 | tk.close() 53 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/interest_area.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Filename: interest_area.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This script illustrates the IAREA messages 9 | # that Data Viewer uses to reconstruct interest areas 10 | 11 | import pylink 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink() 15 | 16 | # Open an EDF on the Host; filename must not exceed 8 characters 17 | tk.openDataFile('ias.edf') 18 | 19 | # Run through five trials 20 | for trial in range(1, 6): 21 | # Print out a message to show the current trial 22 | print(f'Trial #: {trial}') 23 | 24 | # Log a TRIALID message to mark trial start 25 | tk.sendMessage(f'TRIALID {trial}') 26 | 27 | # Start recording 28 | tk.startRecording(1, 1, 1, 1) 29 | 30 | # Send IAREA messages to store interest area definitions 31 | tk.sendMessage('!V IAREA ELLIPSE 1 0 0 100 100 head') 32 | tk.sendMessage('!V IAREA RECTANGLE 2 85 85 285 185 body') 33 | tk.sendMessage('!V IAREA FREEHAND 3 285,125 385,50 335,125 tail') 34 | 35 | # Pretending that we are doing something for 2-sec 36 | pylink.pumpDelay(2000) 37 | 38 | # Stop recording 39 | tk.stopRecording() 40 | 41 | # Log a TRIAL_RESULT message to mark trial ends 42 | tk.sendMessage('TRIAL_RESULT 0') 43 | 44 | # Wait for 100 to catch session end events 45 | pylink.msecDelay(100) 46 | 47 | # Close the EDF file and download it from the Host PC 48 | tk.closeDataFile() 49 | tk.receiveDataFile('ias.edf', 'interst_area_demo.edf') 50 | 51 | # Close the link 52 | tk.close() 53 | -------------------------------------------------------------------------------- /example_scripts/ch03_pygame/display_demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: display_demo.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/11/2021 6 | # 7 | # Description: 8 | # Open a window to assess monitor refresh consistency 9 | 10 | import pygame 11 | from pygame.locals import * 12 | 13 | # Initialize Pygame & its modules 14 | pygame.init() 15 | 16 | # Get the native resolution supported by the monitor 17 | scn_res = pygame.display.list_modes()[0] 18 | 19 | # Open a window 20 | win = pygame.display.set_mode(scn_res, DOUBLEBUF | HWSURFACE | FULLSCREEN) 21 | 22 | # An empty list to store the monitor refresh intervals 23 | intv = [] 24 | 25 | # Flip the video buffer, then grab the timestamp of the first retrace 26 | pygame.display.flip() 27 | 28 | # Get the timestamp of the 'previous' screen retrace 29 | t_before_flip = pygame.time.get_ticks() 30 | 31 | # Use a for-loop to flip the video buffer 200 times 32 | for i in range(200): 33 | # Switching the window color between black and white 34 | if i % 2 == 0: 35 | win.fill((255, 255, 255)) 36 | else: 37 | win.fill((0, 0, 0)) 38 | # Flip the video buffer to show the screen 39 | pygame.display.flip() 40 | 41 | # Get the timestamp of the 'current' screen retrace 42 | t_after_flip = pygame.time.get_ticks() 43 | # Get the refresh interval 44 | flip_intv = t_after_flip - t_before_flip 45 | # Store the refresh interval to "intv" 46 | intv.append(flip_intv) 47 | # Reset the timestamp of the 'previous' retrace 48 | t_before_flip = t_after_flip 49 | 50 | # Print out the max, min, and average refresh intervals 51 | intv_max = max(intv) 52 | intv_min = min(intv) 53 | intv_avg = sum(intv)*1.0/len(intv) 54 | print('Max: {}, Min: {}, Mean: {}'.format(intv_max, intv_min, intv_avg)) 55 | 56 | # Quit Pygame 57 | pygame.quit() 58 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_GratingStim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_GratingStim.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # The GratingStim() function in PsychoPy 9 | 10 | from psychopy import visual, core 11 | import numpy as np 12 | 13 | # Open a window 14 | win = visual.Window(size=(600, 400), units="pix", color=[0, 0, 0]) 15 | 16 | # Prepare the stimuli in memory 17 | grating = visual.GratingStim(win, tex='sqr', mask=None, 18 | size=128, sf=1/32.0, pos=(-200, 100)) 19 | gabor = visual.GratingStim(win, tex='sin', mask='gauss', 20 | size=128, sf=1/32.0, pos=(0, 100)) 21 | checker = visual.GratingStim(win, tex='sqrXsqr', mask='circle', 22 | size=128, sf=1/32.0, pos=(200, 100)) 23 | # Customize texture 24 | # a 8 x 8 grid of random values between -1 and 1 25 | custom_texture = np.random.random((8, 8))*2 - 1 26 | numpy_texture = visual.GratingStim(win, tex=custom_texture, mask=None, 27 | size=128, pos=(-200, -100)) 28 | # Use an image as the texture 29 | image_texture = visual.GratingStim(win, tex='texture.png', mask='raisedCos', 30 | size=128, pos=(0, -100)) 31 | # You get a rectangle with no texture or mask 32 | no_texture = visual.GratingStim(win, tex=None, mask=None, 33 | size=128, pos=(200, -100)) 34 | 35 | # Show the stimuli 36 | grating.draw() 37 | gabor.draw() 38 | checker.draw() 39 | numpy_texture.draw() 40 | image_texture.draw() 41 | no_texture.draw() 42 | win.flip() 43 | 44 | # Take a screenshot and save it to a PNG 45 | win.getMovieFrame() 46 | win.saveMovieFrames('gratings.png') 47 | 48 | # Show the stimuli for 5 seconds, then close the window and quit PsychoPy 49 | core.wait(5.0) 50 | win.close() 51 | core.quit() 52 | 53 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/trial_recycle.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: trial_recycle.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This demo shows how to recycle a trial 9 | 10 | import random 11 | from psychopy import visual, event, core 12 | 13 | win = visual.Window(size=(600, 400), units='pix') 14 | 15 | def run_trial(trial_id): 16 | """a simple function to run a single trial""" 17 | 18 | # Show some info on the screen 19 | task_instruction = f'This is Trial: {trial_id}\n\n' + \ 20 | 'RIGHT--> Next trial\n' + \ 21 | 'LEFT--> Recycle current trial' 22 | msg = visual.TextStim(win, text=task_instruction) 23 | msg.draw() 24 | win.flip() 25 | 26 | # wait for a response 27 | key = event.waitKeys(keyList=['left', 'right']) 28 | 29 | # clear the window 30 | win.clearBuffer() 31 | win.flip() 32 | core.wait(0.5) 33 | 34 | if 'right' in key: 35 | return False 36 | if 'left' in key: 37 | return True 38 | 39 | trial_list = ['t1', 't2', 't3', 't4', 't5'] 40 | 41 | # Recycle trials with a while-loop 42 | while len(trial_list) > 0: 43 | # randomize the trial list 44 | random.shuffle(trial_list) 45 | 46 | # grab the last trial and pop it out the trial_list 47 | trial_to_test = trial_list.pop() 48 | 49 | # run a single trial 50 | should_recycle = run_trial(trial_to_test) 51 | 52 | # add the trial back to the trial_list if the 53 | # return value is True (i.e., need to recycle) 54 | if should_recycle: 55 | trial_list.append(trial_to_test) 56 | 57 | # show what trials are left in the trial list 58 | print(f'The trial you just completed is: {trial_to_test}') 59 | print(f'Trials left in the list: {trial_list}') 60 | 61 | # Close the window and quit PsychoPy 62 | win.close() 63 | core.quit() 64 | -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/TTL_through_host.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: TTL_through_host.py 4 | # Author: Zhiguo Wang 5 | # Date: 4/27/2021 6 | # 7 | # Description: 8 | # Sending TTLs through the EyeLink Host PC 9 | # Here we assume an EyeLink 1000 Plus tracker is being tested, so the 10 | # base address is 0x8. For EyeLink 1000, the base address is 0x378 11 | 12 | 13 | import pylink 14 | 15 | # Connect to the tracker 16 | tk = pylink.EyeLink('100.1.1.1') 17 | 18 | # Open and EDF data file on the Host 19 | tk.openDataFile('ttl_test.edf') 20 | 21 | # Start recording 22 | tk.startRecording(1, 1, 1, 1) 23 | 24 | # Using the Pylink function writeIOPort to send TTLs 25 | 26 | # Clear the Data Register 27 | tk.writeIOPort(0x8, 0) 28 | pylink.pumpDelay(100) 29 | 30 | for i in range(201, 209): 31 | # Write a TTL to the Data Register 32 | tk.writeIOPort(0x8, i) 33 | # TTL signal duration--20 ms 34 | pylink.pumpDelay(20) 35 | # Clear the Data Register 36 | tk.writeIOPort(0x8, 0) 37 | # Wait for 1 second before we send the next TTL 38 | pylink.pumpDelay(1000) 39 | 40 | # Using the Host 'write_ioport' command to send TTLs 41 | # The "*" in the command request the Host to log the command in 42 | # the EDF data file 43 | 44 | # Clear the Data Register 45 | tk.sendCommand('write_ioport 0x8 0') 46 | pylink.pumpDelay(100) 47 | 48 | for j in range(1, 9): 49 | # Write a TTL to the Data Register 50 | tk.sendCommand(f'*write_ioport 0x8 {j}') 51 | # TTL signal duration--20 ms 52 | pylink.pumpDelay(20) 53 | # Clear the Data Register 54 | tk.sendCommand('write_ioport 0x8 0') 55 | # Wait for 1 second before we send the next TTL 56 | pylink.pumpDelay(1000) 57 | 58 | # Stop recording 59 | tk.stopRecording() 60 | 61 | # Close the EDF data file and download it from the Host PC 62 | tk.closeDataFile() 63 | tk.receiveDataFile('ttl_test.edf', 'ttl_test.edf') 64 | 65 | # Close the link to the tracker 66 | tk.close() 67 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/image_load/image_load.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Filename: image_load.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This script illustrates the IAREA messages 9 | # that Data Viewer uses to reconstruct interest areas 10 | 11 | import pylink 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink() 15 | 16 | # Open an EDF on the Host; filename must not exceed 8 characters 17 | tk.openDataFile('imgload.edf') 18 | 19 | # Assume the screen resolution is 1280 x 800 pixels 20 | SCN_W, SCN_H = (1280, 800) 21 | 22 | # Pass the screen coordinates to the tracker 23 | coords = f"screen_pixel_coords = 0 0 {SCN_W - 1} {SCN_H - 1}" 24 | tk.sendCommand(coords) 25 | 26 | # Record a DISPLAY_COORDS message to let Data Viewer know the 27 | # correct screen resolution to use when visualizing the data 28 | tk.sendMessage(f'DISPLAY_COORDS 0 0 {SCN_W - 1} {SCN_H - 1}') 29 | 30 | # Run through five trials 31 | for trial in range(1, 6): 32 | # Print out a message to show the current trial 33 | print(f'Trial #: {trial}') 34 | 35 | # Log a TRIALID message to mark trial start 36 | tk.sendMessage(f'TRIALID {trial}') 37 | 38 | # Start recording 39 | tk.startRecording(1, 1, 1, 1) 40 | 41 | # Assuming an image is presented in the task and we would like 42 | # to have the same image in the background when visualizing data 43 | # in Data Viewer 44 | tk.sendMessage('!V IMGLOAD FILL woods.jpg') 45 | 46 | # Pretending that we are doing something for 2-sec 47 | pylink.pumpDelay(2000) 48 | 49 | # Stop recording 50 | tk.stopRecording() 51 | 52 | # Log a TRIAL_RESULT message to mark trial ends 53 | tk.sendMessage('TRIAL_RESULT 0') 54 | 55 | # Wait for 100 to catch session end events 56 | pylink.msecDelay(100) 57 | 58 | # Close the EDF file and download it from the Host PC 59 | tk.closeDataFile() 60 | tk.receiveDataFile('imgload.edf', 'imgload_demo.edf') 61 | 62 | # Close the link 63 | tk.close() 64 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/simple_drawing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Filename: simple_drawing.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # This script illustrates the various drawing commands 9 | # supported by Data Viewer 10 | 11 | import pylink 12 | 13 | # Connect to the tracker 14 | tk = pylink.EyeLink() 15 | 16 | # Open an EDF on the Host; filename must not exceed 8 characters 17 | tk.openDataFile('drawing.edf') 18 | 19 | # Assume the screen resolution is 1280 x 800 pixels 20 | SCN_W, SCN_H = (1280, 800) 21 | 22 | # Pass the screen coordinates to the tracker 23 | coords = f'screen_pixel_coords = 0 0 {SCN_W - 1} {SCN_H - 1}' 24 | tk.sendCommand(coords) 25 | 26 | # Record a DISPLAY_COORDS message to let Data Viewer know the 27 | # correct screen resolution to use when visualizing the data 28 | tk.sendMessage(f'DISPLAY_COORDS 0 0 {SCN_W - 1} {SCN_H - 1}') 29 | 30 | # Run through five trials 31 | for trial in range(1, 6): 32 | # Print out a message to show the current trial 33 | print(f'Trial #: {trial}') 34 | 35 | # Log a TRIALID message to mark trial start 36 | tk.sendMessage(f'TRIALID {trial}') 37 | 38 | # Start recording 39 | tk.startRecording(1, 1, 1, 1) 40 | 41 | # Clear the screen to show a white background 42 | tk.sendMessage('!V CLEAR 255 255 255') 43 | # Draw a central fixation dot 44 | tk.sendMessage('!V FIXPOINT 0 0 0 0 0 0 512 384 25 0') 45 | # Draw the non-target 46 | tk.sendMessage('!V FIXPOINT 0 0 0 255 255 255 312 384 80 75') 47 | # Draw the target 48 | tk.sendMessage('!V FIXPOINT 255 0 0 255 0 0 712 384 80 0') 49 | 50 | # Pretending that we are doing something for 2-sec 51 | pylink.pumpDelay(2000) 52 | 53 | # Stop recording 54 | tk.stopRecording() 55 | 56 | # Log a TRIAL_RESULT message to mark trial ends 57 | tk.sendMessage('TRIAL_RESULT 0') 58 | 59 | # Wait for 100 to catch session end events 60 | pylink.msecDelay(100) 61 | 62 | # Close the EDF file and download it from the Host PC 63 | tk.closeDataFile() 64 | tk.receiveDataFile('drawing.edf', 'drawing_demo.edf') 65 | 66 | # Close the link 67 | tk.close() 68 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/demo_mouse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: demo_mouse.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # A short demo illustrating the various mouse functions in PsychoPy 9 | 10 | from psychopy import visual, event, core 11 | 12 | # Open a window 13 | win = visual.Window(size=(800, 600), winType='pyglet', units='pix') 14 | 15 | # Create a Mouse object 16 | mouse = event.Mouse(visible=True, win=win) 17 | 18 | # Prepare the stimuli in memory 19 | text_prompt = visual.TextStim(win=win, text='Do you like PsychoPy?', 20 | height=30, pos=(0, 250)) 21 | text_yes = visual.TextStim(win=win, text='YES', height=30, 22 | pos=(-200, 150), color='red') 23 | text_no = visual.TextStim(win=win, text='NO', height=30, 24 | pos=(200, 150), color='green') 25 | circle_yes = visual.Polygon(win=win, edges=32, radius=60, 26 | pos=(-200, 150), fillColor='white') 27 | circle_no = visual.Polygon(win=win, edges=32, radius=60, 28 | pos=(200, 150), fillColor='white') 29 | fix_cross = visual.TextStim(win=win, text='+', height=30, pos=(0, -150)) 30 | mouse_traj = visual.ShapeStim(win=win, lineColor='black', 31 | closeShape=False, lineWidth=5) 32 | 33 | # Clear cached events 34 | event.clearEvents() 35 | 36 | # Set the mouse position, so the movement starts from the fixation cross 37 | mouse.setPos((0, -150)) 38 | 39 | # Use a list to store the mouse position 40 | traj = [mouse.getPos()] 41 | 42 | # In a while loop, check if the "yes" or "no" circle has been clicked 43 | while not (mouse.isPressedIn(circle_no) or mouse.isPressedIn(circle_yes)): 44 | # Following a position change, add the new mouse position to 'traj' 45 | if mouse.mouseMoved(): 46 | traj.append(mouse.getPos()) 47 | 48 | # Put stimuli on display and draw the mouse trajectory 49 | text_prompt.draw() 50 | circle_no.draw() 51 | circle_yes.draw() 52 | text_no.draw() 53 | text_yes.draw() 54 | fix_cross.draw() 55 | mouse_traj.vertices = traj # this can be slow 56 | mouse_traj.draw() 57 | win.flip() 58 | 59 | # Close the window and quit PsychoPy 60 | win.close() 61 | core.quit() 62 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/gaze_trace_plot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: gaze_trace_plot.py 4 | # Author: Zhiguo Wang 5 | # Date: 5/25/2021 6 | # 7 | # Description: 8 | # Extract the samples from an ASC file, then plot a gaze trace plot. 9 | 10 | import os 11 | import re 12 | import numpy as np 13 | import pandas as pd 14 | import matplotlib.pyplot as plt 15 | 16 | # Convert EDFs to ASC files with the edf2asc command-line tool 17 | # If you run this script from IDLE on macOS, be sure to launch IDLE 18 | # from the command-line (e.g., enter "idle3.6" in the terminal) 19 | # 20 | # Options for the command line “edf2asc” converter 21 | # -r, output right-eye data only 22 | # -y, overwrite ASC file if exists 23 | cmd = 'edf2asc -r -y freeview/freeview.edf' 24 | os.system(cmd) 25 | 26 | # Open the converted ASC file 27 | asc = open(os.path.join('freeview', 'freeview.asc')) 28 | 29 | new_trial = False 30 | trial_DFs = {} # samples from all trials in a tuple 31 | trial = 0 32 | for line in asc: 33 | # Extract numerical values from the data line 34 | values = [float(x) for x in re.findall(r'-?\d+\.?\d*', line)] 35 | 36 | # Look for the message marking image onset 37 | if re.search('image_onset', line): 38 | new_trial = True 39 | trial += 1 40 | print(f'processing trial # {trial}...') 41 | 42 | # Store samples in lists (timestamp, x, y, pupil size) 43 | tmp_DF = [] 44 | 45 | # A sample data line always starts with a numerical literal 46 | if new_trial and re.search('^\d', line): 47 | # 80855874 1506.4 269.0 729.0 ... 48 | # 80855875 . . 0.0 ... 49 | if len(values) == 4: # normal sample line 50 | tmp_DF.append(values) 51 | else: # sample line with missing values (e.g., tracking loss) 52 | tmp_DF.append([values[0], np.nan, np.nan, np.nan]) 53 | 54 | if re.search('image_offset', line): # message marking image offset 55 | # Put samples in a pandas data frame and store it in trial_DFs 56 | colname = ['timestamp', 'gaze_x', 'gaze_y', 'pupil'] 57 | trial_DFs[trial] = pd.DataFrame(tmp_DF, columns=colname) 58 | new_trial = False 59 | 60 | # close the ASC file 61 | asc.close() 62 | 63 | # Plot the gaze trace and pupil size data from trial # 1 64 | trial_DFs[1].plot(y=['gaze_x', 'gaze_y', 'pupil']) 65 | plt.show() 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Eye-tracking with Python and Pylink 2 | 3 | Python is becoming more and more popular among scientists. Its application in psychology has expanded from scripting computer-based experimental tasks to all aspects of research, including but not limited to data collection, analysis, and visualization. I had little knowledge of computer programming when I started my graduate study about 16 years ago. I experimented with different programming languages and tools but finally settled down with Python for its easy syntax, flexibility, versatility, and open community. 4 | 5 | The content of this book is based mainly on my research experience and the courses I have taught previously. There are quite a few Python books on the market featuring programming tools designed for psychologists (e.g., PsychoPy). What sets this book apart is its focus on eye-tracking. 6 | 7 | Eye-tracking is a widely used research technique in psychology and neuroscience labs. The eye-trackers used in the lab are typically faster, more accurate, and of course, more expensive than the ones seen in consumer goods (e.g., VR goggles) or usability labs. The eye-trackers featured in this book are the EyeLink series trackers manufactured by SR Research Ltd. EyeLink eye-trackers are arguably the best research-grade tracker available on the market. This high-speed eye-tracker has unmatched precision and accuracy, and it has been used in well over 9000 peer-reviewed publications. 8 | 9 | This book will first introduce the building blocks and syntax of Python, then discuss libraries that we can use to program psychology experiments, i.e., PsychoPy and Pygame. For eye-tracking, this book will cover the Pylink library in detail. The example scripts accompanying this book are freely available on GitHub, https://github.com/zhiguo-eyelab/Pylink_book. This book is a useful reference for eye-tracking researchers, but you can also use it in graduate or undergraduate level programming courses. 10 | 11 | Special thanks go to Dr. Sam Hutton, Dr. Yanliang Sun, and my colleagues at SR Research Ltd. for their valuable feedback on an earlier version of this book. 12 | 13 | _________________________________________________________ 14 | Copyright © 2020 by Zhiguo Wang 15 | 16 | All rights reserved. 17 | 18 | This book or any portion thereof may not be reproduced or used in any manner whatsoever 19 | without the express written permission of the author except for the use of brief quotations in a book review. 20 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/parse_ASC_4scanpath.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: parse_ASC_4scanpath.py 4 | # Author: Zhiguo Wang 5 | # Date: 5/25/2021 6 | # 7 | # Description: 8 | # Parse an ASC file to extract fixations, then plot the scanpath. 9 | 10 | import os 11 | from PIL import Image, ImageDraw 12 | from math import sqrt 13 | 14 | # Open the converted ASC file 15 | asc = open(os.path.join('freeview', 'freeview.asc')) 16 | 17 | new_trial = False 18 | trial = 0 19 | for line in asc: 20 | # Convert the current data line into a list 21 | tmp_data = line.rstrip().split() 22 | 23 | # Get the correct screen resolution from the DISPLAY_COORDS message 24 | # MSG 4302897 DISPLAY_COORDS 0 0 1279 799 25 | if 'DISPLAY_COORDS' in line: 26 | scn_w = int(tmp_data[-2]) + 1 27 | scn_h = int(tmp_data[-1]) + 1 28 | 29 | # Look for the message marking image onset 30 | if 'image_onset' in line: 31 | new_trial = True 32 | trial += 1 33 | print(f'Processing trial # {trial} ...') 34 | 35 | # Store the position and duration of all fixations in lists 36 | fix_coords = [] 37 | fix_duration = [] 38 | 39 | if new_trial: 40 | # Path to the background image 41 | # MSG 3558923 !V IMGLOAD FILL images/woods.jpg 42 | if 'IMGLOAD' in line: 43 | bg_image = tmp_data[-1] 44 | 45 | # Retrieve the coordinates and duration of all fixations 46 | # EFIX R 80790054 80790349 296 981.3 554.5 936 63.50 63.50 47 | if 'EFIX' in line: 48 | duration, x, y = [int(float(x)) for x in tmp_data[4:7]] 49 | fix_coords.append((x, y)) 50 | fix_duration.append(duration) 51 | 52 | # Look for the message marking image offset, draw the scanpath 53 | if 'image_offset' in line: 54 | # Open the image and resize it to fill up the screen 55 | img = os.path.join('freeview', bg_image) 56 | pic = Image.open(img).resize((scn_w, scn_h)) 57 | 58 | # Create an ImageDraw object 59 | draw = ImageDraw.Draw(pic) 60 | 61 | # Draw the scanpath 62 | draw.line(fix_coords, fill=(0, 0, 255), width=2) 63 | 64 | # Draw circles to represent the fixations, the diameter reflects 65 | # the fixation duration, scaled to its maximum 66 | for i, d in enumerate(fix_duration): 67 | sz = sqrt(d / max(fix_duration) * 256) 68 | gx, gy = fix_coords[i] 69 | draw.ellipse([gx-sz, gy-sz, gx+sz, gy+sz], 70 | fill=(255, 255, 0), outline=(0, 0, 255)) 71 | 72 | # Save the scanpath for each trial 73 | pic.save(f'scanpath_trial_{trial}.png', 'PNG') 74 | 75 | # Close the ASC file 76 | asc.close() 77 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/heatmap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 # # Filename: heatmap_simple.py # Author: Zhiguo Wang # Date: 4/28/2021 # # Description: # Extract fixations from an ASC file to create a heatmap import os import re import numpy as np from PIL import Image from matplotlib import cm # Convert EDFs to ASC files with the edf2asc command-line tool # If you run this script from IDLE on macOS, be sure to launch IDLE # from the command-line (e.g., enter "idle3.6" in the terminal) # # Options for the command line “edf2asc” converter # -e, output event data only # -res, output resolution data if present # -y, overwrite ASC file if exists cmd = 'edf2asc -e -res -y freeview/freeview.edf' os.system(cmd) # Open the converted ASC file asc = open('freeview/freeview.asc', 'r') # Transparency for the heatmap alpha = 0.5 new_trial = False trial = 0 for line in asc: # Extract all numerical values from the data line values = [float(x) for x in re.findall(r'-?\d+\.?\d*', line)] # Get the correct screen resolution from the GAZE_COORDS message # MSG 4302897 DISPLAY_COORDS 0 0 1279 799 if re.search('DISPLAY_COORDS', line): scn_w = int(values[-2]) + 1 scn_h = int(values[-1]) + 1 # Look for the message marking image onset if re.search('image_onset', line): new_trial = True trial += 1 print(f'processing trial # {trial}...') # Initialize the heatmap matrix w, h = np.meshgrid(np.linspace(0, scn_w, scn_w), np.linspace(0, scn_h, scn_h)) heatmap = np.exp(-(w**2 + h**2)) * 0 if new_trial: if re.search('EFIX', line): # EFIX R 80790373 80790527 155 855.5 596.0 881 63.60 63.75 start_t, end_t, duration, x, y, peakv, res_x, res_y = values # add the new fixation to the heatmap heatmap += duration * np.exp(-(1.0*(w - x)**2/(2*res_x**2) + 1.0*(h - y)**2/(2*res_y**2))) # Path to the background image # MSG 3558923 !V IMGLOAD FILL images/woods.jpg if 'IMGLOAD' in line: bg_image = line.rstrip().split()[-1] # Look for the message marking image offset, create a heatmap if re.search('image_offset', line): # Open the image and resize it to fill up the screen img = os.path.join('freeview', bg_image) pic = Image.open(img).convert('RGBA').resize((scn_w, scn_h)) # Apply a colormap (from the colormap library of MatplotLib) heatmap = np.uint8(cm.jet(heatmap/np.max(heatmap))*255) # blending heatmap = Image.fromarray(heatmap) blended = Image.blend(pic, heatmap, alpha) # Save the heatmap as a PNG file blended.save(f'heatmap_trial_{trial}.png', 'PNG') new_trial = False # Close the ASC file asc.close() -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/sample_retrieval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: sample_retrieval.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # A short script illustrating online retrieval of sample data 9 | 10 | import pylink 11 | 12 | # Connect to the tracker 13 | tk = pylink.EyeLink('100.1.1.1') 14 | 15 | # Open an EDF data file on the Host PC 16 | tk.openDataFile('smp_test.edf') 17 | 18 | # Put the tracker in offline mode before we change tracking parameters 19 | tk.setOfflineMode() 20 | 21 | # Set sample rate to 1000 Hz 22 | tk.sendCommand('sample_rate 1000') 23 | 24 | # Make gaze, HREF, and raw (PUPIL) data available over the link 25 | sample_flag = 'LEFT,RIGHT,GAZE,GAZERES,PUPIL,HREF,AREA,STATUS,INPUT' 26 | tk.sendCommand(f'link_sample_data = {sample_flag}') 27 | 28 | # Open an SDL window for calibration 29 | pylink.openGraphics() 30 | 31 | # Set up the camera and calibrate the tracker 32 | tk.doTrackerSetup() 33 | 34 | # Put tracker in idle/offline mode before we start recording 35 | tk.setOfflineMode() 36 | 37 | # Start recording 38 | error = tk.startRecording(1, 1, 1, 1) 39 | 40 | # Wait for moment 41 | pylink.msecDelay(100) 42 | 43 | # Open a plain text file to store the retrieved sample data 44 | text_file = open('sample_data.csv', 'w') 45 | 46 | # Current tracker time 47 | t_start = tk.trackerTime() 48 | smp_time = -1 49 | while True: 50 | # Break after 5 seconds have elapsed 51 | if tk.trackerTime() - t_start > 5000: 52 | break 53 | 54 | # Poll the latest samples 55 | smp = tk.getNewestSample() 56 | if smp is not None: 57 | # Grab gaze, HREF, raw, & pupil size data 58 | if smp.isRightSample(): 59 | gaze = smp.getRightEye().getGaze() 60 | href = smp.getRightEye().getHREF() 61 | raw = smp.getRightEye().getRawPupil() 62 | pupil = smp.getRightEye().getPupilSize() 63 | elif smp.isLeftSample(): 64 | gaze = smp.getLeftEye().getGaze() 65 | href = smp.getLeftEye().getHREF() 66 | raw = smp.getLeftEye().getRawPupil() 67 | pupil = smp.getLeftEye().getPupilSize() 68 | 69 | timestamp = smp.getTime() 70 | 71 | # Save gaze, HREF, raw, & pupil data to the plain text 72 | # file, if the sample is new 73 | if timestamp > smp_time: 74 | smp_data = map(str, [timestamp, gaze, href, raw, pupil]) 75 | text_file.write('\t'.join(smp_data) + '\n') 76 | smp_time = timestamp 77 | 78 | # Stop recording 79 | tk.stopRecording() 80 | 81 | # Close the plain text file 82 | text_file.close() 83 | 84 | # Close the EDF data file on the Host 85 | tk.closeDataFile() 86 | 87 | # Download the EDF data file from Host 88 | tk.receiveDataFile('smp_test.edf', 'smp_test.edf') 89 | 90 | # Close the link to the tracker 91 | tk.close() 92 | 93 | # Close the window 94 | pylink.closeGraphics() 95 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/event_retrieval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: event_retrieval.py 4 | # Author: Zhiguo Wang 5 | # Date: 5/26/2021 6 | # 7 | # Description: 8 | # A short script illustrating online retrieval of eye events 9 | 10 | import pylink 11 | 12 | # Connect to the tracker 13 | tk = pylink.EyeLink('100.1.1.1') 14 | 15 | # Open an EDF data file on the Host PC 16 | tk.openDataFile('ev_test.edf') 17 | 18 | # Put the tracker in offline mode before we change tracking parameters 19 | tk.setOfflineMode() 20 | 21 | # Set sample rate to 1000 Hz 22 | tk.sendCommand('sample_rate 1000') 23 | 24 | # Make all types of event data are available over the link 25 | event_flgs = 'LEFT,RIGHT,FIXATION,FIXUPDATE,SACCADE,BLINK,BUTTON,INPUT' 26 | tk.sendCommand(f'link_event_filter = {event_flgs}') 27 | 28 | # Open an SDL window for calibration 29 | pylink.openGraphics() 30 | 31 | # Set up the camera and calibrate the tracker 32 | tk.doTrackerSetup() 33 | 34 | # Put tracker in idle/offline mode before we start recording 35 | tk.setOfflineMode() 36 | 37 | # Start recording 38 | tk.startRecording(1, 1, 1, 1) 39 | 40 | # Wait for the block start event to arrive, give a warning 41 | # if no event or sample is available 42 | block_start = tk.waitForBlockStart(100, 1, 1) 43 | if block_start == 0: 44 | print("ERROR: No link data received!") 45 | 46 | # Check eye availability; 0-left, 1-right, 2-binocular 47 | # read data from the right eye if tracking in binocular mode 48 | eye_to_read = tk.eyeAvailable() 49 | if eye_to_read == 2: 50 | eye_to_read = 1 51 | 52 | # Get the current tracker time 53 | t_start = tk.trackerTime() 54 | while True: 55 | # Break after 5 seconds have elapsed 56 | if tk.trackerTime() - t_start > 5000: 57 | break 58 | 59 | # Retrieve the oldest event in the buffer 60 | dt = tk.getNextData() 61 | if dt in [pylink.STARTSACC, pylink.ENDSACC, 62 | pylink.STARTFIX, pylink.ENDFIX]: 63 | ev = tk.getFloatData() 64 | # Look for right eye events only; 0-left, 1-right 65 | if ev.getEye() == eye_to_read: 66 | # Send a message to the tracker when an event is 67 | # received over the link; include the timestamp 68 | # in the message to examine the link delay 69 | if dt == pylink.STARTSACC: 70 | tk.sendMessage(f'STARTSACC {ev.getTime()}') 71 | if dt == pylink.ENDSACC: 72 | tk.sendMessage(f'ENDSACC {ev.getTime()}') 73 | if dt == pylink.STARTFIX: 74 | tk.sendMessage(f'STARTFIX {ev.getTime()}') 75 | if dt == pylink.ENDFIX: 76 | tk.sendMessage(f'ENDFIX {ev.getTime()}') 77 | 78 | # Stop recording 79 | tk.stopRecording() 80 | 81 | # Close the EDF data file on the Host 82 | tk.closeDataFile() 83 | 84 | # Download the EDF data file from Host 85 | tk.receiveDataFile('ev_test.edf', 'ev_test.edf') 86 | 87 | # Close the link to the tracker 88 | tk.close() 89 | 90 | # Close the window 91 | pylink.closeGraphics() 92 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/gaze_contingent_window.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: gaze_contingent_window.py 4 | # Author: Zhiguo Wang 5 | # Date: 4/26/2021 6 | # 7 | # Description: 8 | # A gaze-contingent window task implemented in PsychoPy 9 | 10 | import pylink 11 | from psychopy import visual, core, event, monitors 12 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 13 | 14 | # Connect to the tracker 15 | tk = pylink.EyeLink('100.1.1.1') 16 | 17 | # Open an EDF data file 18 | tk.openDataFile('psychopy.edf') 19 | 20 | # Put the tracker in offline mode before we change tracking parameters 21 | tk.setOfflineMode() 22 | 23 | # Make all types of sample data available over the link 24 | sample_flags = 'LEFT,RIGHT,GAZE,GAZERES,PUPIL,HREF,AREA,STATUS,INPUT' 25 | tk.sendCommand(f'link_sample_data = {sample_flags}') 26 | 27 | # Screen resolution 28 | SCN_W, SCN_H = (1280, 800) 29 | 30 | # Open a PsyhocPy window with the "allowStencil" option 31 | win = visual.Window((SCN_W, SCN_H), fullscr=False, 32 | units='pix', allowStencil=True) 33 | 34 | # Pass the display pixel coordinates (left, top, right, bottom) to the tracker 35 | coords = f"screen_pixel_coords = 0 0 {SCN_W - 1} {SCN_H - 1}" 36 | tk.sendCommand(coords) 37 | 38 | # Request Pylink to use the custom EyeLinkCoreGraphicsPsychoPy library 39 | # to draw calibration graphics (target, camera image, etc.) 40 | genv = EyeLinkCoreGraphicsPsychoPy(tk, win) 41 | pylink.openGraphicsEx(genv) 42 | 43 | # Calibrate the tracker 44 | calib_msg = visual.TextStim(win, text='Press ENTER to calibrate') 45 | calib_msg.draw() 46 | win.flip() 47 | tk.doTrackerSetup() 48 | 49 | # Set up an aperture and use it as a gaze-contingent window 50 | gaze_window = visual.Aperture(win, shape='square', size=200) 51 | gaze_window.enabled = True 52 | 53 | # Load a background image to fill up the screen 54 | img = visual.ImageStim(win, image='woods.jpg', size=(SCN_W, SCN_H)) 55 | 56 | # Put tracker in Offline mode before we start recording 57 | tk.setOfflineMode() 58 | 59 | # Start recording 60 | tk.startRecording(1, 1, 1, 1) 61 | 62 | # Cache some samples 63 | pylink.msecDelay(100) 64 | 65 | # show the image indefinitely until a key is pressed 66 | gaze_pos = (-32768, -32768) 67 | while not event.getKeys(): 68 | # Check for new samples 69 | dt = tk.getNewestSample() 70 | if dt is not None: 71 | if dt.isRightSample(): 72 | gaze_x, gaze_y = dt.getRightEye().getGaze() 73 | elif dt.isLeftSample(): 74 | gaze_x, gaze_y = dt.getLeftEye().getGaze() 75 | 76 | # Draw the background image 77 | img.draw() 78 | 79 | # Update the window with the current gaze position 80 | gaze_window.pos = (gaze_x - SCN_W/2.0, SCN_H/2.0 - gaze_y) 81 | win.flip() 82 | 83 | # Stop recording 84 | tk.stopRecording() 85 | 86 | # Close the EDF data file on the Host 87 | tk.closeDataFile() 88 | 89 | # Download the EDF data file from Host 90 | tk.receiveDataFile('psychopy.edf', 'psychopy.edf') 91 | 92 | # Close the link to the tracker 93 | tk.close() 94 | 95 | # Close the graphics 96 | win.close() 97 | core.quit() 98 | 99 | -------------------------------------------------------------------------------- /example_scripts/ch02_psychopy/simon_effect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: simon_effect.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # Measuring the Simon effect in PsychoPy 9 | 10 | import random 11 | from psychopy import visual, core, event, gui 12 | 13 | # Open a window and prepare the stimuli 14 | win = visual.Window((1280, 800), units='pix', fullscr=False, color='black') 15 | text_msg = visual.TextStim(win, text='message') 16 | tar_stim = visual.GratingStim(win, tex='None', mask='circle', size=60.0) 17 | 18 | # Possible target position 19 | tar_pos = {'left': (-200, 0), 'right': (200, 0)} 20 | 21 | # A list of all possible trial parameter combinations 22 | trials = [ 23 | ['left', 'red', 'z', 'congruent'], 24 | ['left', 'blue', 'slash', 'incongruent'], 25 | ['right', 'red', 'z', 'incongruent'], 26 | ['right', 'blue', 'slash', 'congruent'] 27 | ] 28 | 29 | 30 | def run_trial(trial_pars, data_file, participant): 31 | """ Run a single trial. 32 | 33 | trial_pars -- target position, color, and correct key, e.g., 34 | ['left', 'red', 'z', 'cong'] 35 | data_file -- a file to save trial data 36 | participant -- information about the participant in a dictionary, 37 | {'id':1, 'name':zw}""" 38 | 39 | # Unpacking the parameter list 40 | pos, color, cor_key, congruency = trial_pars 41 | 42 | # Set target position and color 43 | tar_stim.pos = tar_pos[pos] 44 | tar_stim.color = color 45 | 46 | # Present a fixation cross for 750 ms 47 | text_msg.text = '+' 48 | text_msg.draw() 49 | win.flip() 50 | core.wait(0.750) 51 | 52 | # Present the target and wait for a key response 53 | tar_stim.draw() 54 | win.flip() 55 | t_tar_onset = core.getTime() 56 | tar_resp = event.waitKeys(1500, ['z', 'slash'], timeStamped=True) 57 | 58 | # write data to file 59 | trial_data = list(participant.values()) + \ 60 | trial_pars + [t_tar_onset] + \ 61 | list(tar_resp[0]) 62 | trial_data = map(str, trial_data) # convert list items to string 63 | data_file.write(','.join(trial_data) + '\n') 64 | 65 | # clear the screen and set an ITI of 500 ms 66 | win.color = 'black' 67 | win.flip() 68 | core.wait(0.500) 69 | 70 | # ------ Real experiment starts here ------- 71 | 72 | # Get participant info with a dialog 73 | participant = {'Participant ID': 0, 'Participant Initials': 'zw'} 74 | dlg = gui.DlgFromDict(participant, title='Enter participant info here') 75 | 76 | # Open a data file with write permission 77 | d_file = open(participant['Participant Initials']+'.csv', 'w') 78 | 79 | # Show task instructions 80 | text_msg.text = 'Press Z to RED\nPress / to BLUE\n\nPress to start' 81 | text_msg.draw() 82 | win.flip() 83 | event.waitKeys(keyList=['space']) 84 | 85 | # Randomly shuffle the trial list and iterate over all of them 86 | random.seed = 1000 # Set a random seed 87 | trial_list = trials[:]*2 88 | random.shuffle(trial_list) 89 | for pars in trial_list: 90 | run_trial(pars, d_file, participant) 91 | 92 | # Close the data file 93 | d_file.close() 94 | 95 | # Close the window and quit PsychoPy 96 | win.close() 97 | core.quit() 98 | 99 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/event_retrieval_2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: event_retrieval_2.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/26/2021 6 | # 7 | # Description: 8 | # A short script illustrating online retrieval of eye events 9 | 10 | import pylink 11 | 12 | # Connect to the tracker 13 | tk = pylink.EyeLink('100.1.1.1') 14 | 15 | # Open an EDF data file on the Host PC 16 | tk.openDataFile('ev_test2.edf') 17 | 18 | # Put the tracker in offline mode before we change tracking parameters 19 | tk.setOfflineMode() 20 | 21 | # Set sample rate to 1000 Hz 22 | tk.sendCommand('sample_rate 1000') 23 | 24 | # Make all types of event data are available over the link 25 | event_flgs = 'LEFT,RIGHT,FIXATION,FIXUPDATE,SACCADE,BLINK,BUTTON,INPUT' 26 | tk.sendCommand(f'link_event_filter = {event_flgs}') 27 | 28 | # Open an SDL window for calibration 29 | pylink.openGraphics() 30 | 31 | # Set up the camera and calibrate the tracker 32 | tk.doTrackerSetup() 33 | 34 | # Put tracker in idle/offline mode before we start recording 35 | tk.setOfflineMode() 36 | 37 | # Start recording 38 | error = tk.startRecording(1, 1, 1, 1) 39 | 40 | # Wait for the block start event to arrive, give a warning 41 | # if no event or sample is available 42 | block_start = tk.waitForBlockStart(100, 1, 1) 43 | if block_start == 0: 44 | print("ERROR: No link data received!") 45 | 46 | # Check eye availability; 0-left, 1-right, 2-binocular 47 | # read data from the right eye if tracking in binocular mode 48 | eye_to_read = tk.eyeAvailable() 49 | if eye_to_read == 2: 50 | eye_to_read = 1 51 | 52 | # Current tracker time 53 | t_start = tk.trackerTime() 54 | while True: 55 | # Break after 5 seconds have elapsed 56 | if tk.trackerTime() - t_start > 5000: 57 | break 58 | 59 | # Retrieve the oldest event in the buffer 60 | dt = tk.getNextData() 61 | if dt == pylink.ENDSACC: 62 | ev = tk.getFloatData() 63 | # Look for right eye events only; 0-left, 1-right 64 | if ev.getEye() == eye_to_read: 65 | print('ENDSACC Event: \n', 66 | 'Amplitude', ev.getAmplitude(), '\n', 67 | 'Angle', ev.getAngle(), '\n', 68 | 'AverageVelocity', ev.getAverageVelocity(), '\n', 69 | 'PeakVelocity', ev.getPeakVelocity(), '\n', 70 | 'StartTime', ev.getStartTime(), '\n', 71 | 'StartGaze', ev.getStartGaze(), '\n', 72 | 'StartHREF', ev.getStartHREF(), '\n', 73 | 'StartPPD', ev.getStartPPD(), '\n', 74 | 'StartVelocity', ev.getStartVelocity(), '\n', 75 | 'EndTime', ev.getEndTime(), '\n', 76 | 'EndGaze', ev.getEndGaze(), '\n', 77 | 'EndHREF', ev.getEndHREF(), '\n', 78 | 'EndPPD', ev.getEndPPD(), '\n', 79 | 'EndVelocity', ev.getEndVelocity(), '\n', 80 | 'Eye', ev.getEye(), '\n', 81 | 'Time', ev.getTime(), '\n', 82 | 'Type', ev.getType(), '\n') 83 | 84 | # Stop recording 85 | tk.stopRecording() 86 | 87 | # Close the EDF data file on the Host 88 | tk.closeDataFile() 89 | 90 | # Download the EDF data file from Host 91 | tk.receiveDataFile('ev_test2.edf', 'ev_test2.edf') 92 | 93 | # Close the link to the tracker 94 | tk.close() 95 | 96 | # Close the window 97 | pylink.closeGraphics() 98 | -------------------------------------------------------------------------------- /example_scripts/ch04_pylink/free_viewing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: free_viewing.py 4 | # Author: Zhiguo Wang 5 | # Date: 3/18/2020 6 | # 7 | # Description: 8 | # A free-viewing task implemented in Pygame. 9 | # Press any key to terminate a trial 10 | 11 | import os 12 | import random 13 | import pylink 14 | import pygame 15 | from pygame.locals import * 16 | 17 | # Screen resolution 18 | SCN_W, SCN_H = (1280, 800) 19 | 20 | # Initialize Pygame 21 | pygame.init() 22 | 23 | # Step 1: Connect to the tracker 24 | tk = pylink.EyeLink('100.1.1.1') 25 | 26 | # Step 2: open an EDF data file on the EyeLink Host PC 27 | tk.openDataFile('freeview.edf') 28 | # Optional file header 29 | tk.sendCommand("add_file_preamble_text 'Free Viewing Task'") 30 | 31 | # Step 3: Set tracking parameters, e.g., sampling rate 32 | # 33 | # Put the tracker in offline mode before we change its parameters 34 | tk.setOfflineMode() 35 | 36 | # Set the sampling rate to 1000 Hz 37 | tk.sendCommand("sample_rate 1000") 38 | 39 | # Send screen resolution to the tracker 40 | tk.sendCommand(f'screen_pixel_coords = 0 0 {SCN_W-1} {SCN_H-1}') 41 | 42 | # Record a DISPLAY_SCREEN message to let Data Viewer know the 43 | # correct screen resolution to use when visualizing the data 44 | tk.sendMessage(f'DISPLAY_COORDS 0 0 {SCN_W - 1} {SCN_H - 1}') 45 | 46 | # Set the calibration type to 9-point (HV9) 47 | tk.sendCommand("calibration_type = HV9") 48 | 49 | # Step 4: open a Pygame window; then, call pylink.openGraphics() 50 | # to request Pylink to use this window for calibration 51 | pygame.display.set_mode((SCN_W, SCN_H), DOUBLEBUF | FULLSCREEN) 52 | pygame.mouse.set_visible(False) # hide the mouse cursor 53 | pylink.openGraphics() 54 | 55 | # Step 5: calibrate the tracker, then run through the trials 56 | tk.doTrackerSetup() 57 | 58 | # Parameters of all trials stored in a list 59 | t_pars = [ 60 | ['quebec.jpg', 'no_people'], 61 | ['woods.jpg', 'with_peole'] 62 | ] 63 | 64 | # Define a function to group the lines of code that will be executed 65 | # in each trial 66 | def run_trial(params): 67 | ''' Run a trial 68 | 69 | params: image, condition in a list, 70 | e.g., ['quebec.jpg', 'no_people'] ''' 71 | 72 | # Unpacking the trial parameters 73 | pic, cond = params 74 | 75 | # Load the picture, scale the image to fill up the screen 76 | pic_path = os.path.join('images', pic) 77 | img = pygame.image.load(pic_path) 78 | img = pygame.transform.scale(img, (SCN_W, SCN_H)) 79 | 80 | # Record_status_message: show some info on the Host PC 81 | tk.sendCommand(f"record_status_message 'Picture: {pic}'") 82 | 83 | # Drift-check; re-calibrate if ESCAPE is pressed 84 | # parameters: x, y, draw_target, allow_setup 85 | tk.doDriftCorrect(int(SCN_W/2), int(SCN_H/2), 1, 1) 86 | 87 | # Start recording 88 | # parameters: file_event, file_sample, link_event, link_sample 89 | tk.startRecording(1, 1, 1, 1) 90 | # Wait for 100 ms to cache some samples 91 | pylink.msecDelay(100) 92 | 93 | # Present the image 94 | surf = pygame.display.get_surface() 95 | surf.blit(img, (0, 0)) 96 | pygame.display.flip() 97 | 98 | # Log a message to mark image onset 99 | tk.sendMessage('image_onset') 100 | 101 | # Log a '!V IMGLOAD' message to the EDF data file, so Data Viewer 102 | # knows where to find the image when visualizing the gaze data 103 | img_path = f'images/{pic}' 104 | tk.sendMessage(f'!V IMGLOAD FILL {img_path}') 105 | 106 | # Wait for a key response 107 | pygame.event.clear() # clear all cached events 108 | got_key = False 109 | while not got_key: 110 | for ev in pygame.event.get(): 111 | if ev.type == KEYDOWN: 112 | tk.sendMessage(f'Keypress {ev.key}') 113 | got_key = True 114 | 115 | # clear the screen 116 | surf.fill((128, 128, 128)) 117 | pygame.display.flip() 118 | 119 | # Log a message to mark image offset 120 | tk.sendMessage('image_offset') 121 | 122 | # stop recording 123 | tk.stopRecording() 124 | 125 | # Run through the trials in a random order 126 | random.shuffle(t_pars) 127 | for trial in t_pars: 128 | run_trial(trial) 129 | 130 | # Step 6: close the EDF data file and download it 131 | tk.closeDataFile() 132 | tk.receiveDataFile('freeview.edf', 'freeview.edf') 133 | 134 | # Step 7: close the link to the tracker and quit Pygame 135 | tk.close() 136 | pygame.quit() 137 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/gaze_trigger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: gaze_trigger.py 4 | # Author: Zhiguo Wang 5 | # Date: 5/26/2021 6 | # 7 | # Description: 8 | # A gaze trigger implemented in PsychoPy 9 | 10 | import pylink 11 | from psychopy import visual, core, event, monitors 12 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 13 | from math import hypot 14 | 15 | # Connect to the tracker 16 | tk = pylink.EyeLink('100.1.1.1') 17 | 18 | # Open an EDF data file on the Host PC 19 | tk.openDataFile('psychopy.edf') 20 | 21 | # Put the tracker in offline mode before we change tracking parameters 22 | tk.setOfflineMode() 23 | 24 | # Make all types of eye events available over the link, especially the 25 | # FIXUPDATE event, which reports the current status of a fixation at 26 | # predefined intervals (default = 50 ms) 27 | event_flags = 'LEFT,RIGHT,FIXATION,FIXUPDATE,SACCADE,BLINK,BUTTON,INPUT' 28 | tk.sendCommand(f'link_event_filter = {event_flags}') 29 | 30 | # Screen resolution 31 | SCN_W, SCN_H = (1280, 800) 32 | 33 | # Open a PsyhocPy window 34 | win = visual.Window((SCN_W, SCN_H), fullscr=False, units='pix') 35 | 36 | # Pass the display pixel coordinates (left, top, right, bottom) to the tracker 37 | coords = f"screen_pixel_coords = 0 0 {SCN_W - 1} {SCN_H - 1}" 38 | tk.sendCommand(coords) 39 | 40 | # Request Pylink to use the custom EyeLinkCoreGraphicsPsychoPy library 41 | # to draw calibration graphics (target, camera image, etc.) 42 | genv = EyeLinkCoreGraphicsPsychoPy(tk, win) 43 | pylink.openGraphicsEx(genv) 44 | 45 | # Calibrate the tracker 46 | calib_msg = visual.TextStim(win, text='Press ENTER twice to calibrate') 47 | calib_msg.draw() 48 | win.flip() 49 | tk.doTrackerSetup() 50 | 51 | # Run 3 trials in a for-loop 52 | # in each trial, first show a fixation dot, wait for the participant 53 | # to gaze at the fixation dot, then present an image for 2 secs 54 | for i in range(3): 55 | # Prepare the fixation dot in memory 56 | fix = visual.GratingStim(win, tex='None', mask='circle', size=30.0) 57 | 58 | # Load the image 59 | img = visual.ImageStim(win, image='woods.jpg', size=(SCN_W, SCN_H)) 60 | 61 | # Put tracker in Offline mode before we start recording 62 | tk.setOfflineMode() 63 | 64 | # Start recording 65 | tk.startRecording(1, 1, 1, 1) 66 | 67 | # Wait for the block start event to arrive, give a warning 68 | # if no event or sample is available 69 | block_start = tk.waitForBlockStart(100, 1, 1) 70 | if block_start == 0: 71 | print("ERROR: No link data received!") 72 | 73 | # Check eye availability; 0-left, 1-right, 2-binocular 74 | # read data from the right eye if tracking in binocular mode 75 | eye_to_read = tk.eyeAvailable() 76 | if eye_to_read == 2: 77 | eye_to_read = 1 78 | 79 | # Show the fixation dot 80 | fix.draw() 81 | win.flip() 82 | 83 | # Gaze trigger 84 | # wait for gaze on the fixation dot (for a minimum of 300 ms) 85 | fix_dot_x, fix_dot_y = (SCN_W/2.0, SCN_H/2.0) 86 | triggered = False 87 | fixation_start_time = -32768 88 | while not triggered: 89 | # Check if any new events are available 90 | dt = tk.getNextData() 91 | if dt == pylink.FIXUPDATE: 92 | ev = tk.getFloatData() 93 | if ev.getEye() == eye_to_read: 94 | # 1 deg = ? pixels in the current fixation 95 | ppd_x, ppd_y = ev.getEndPPD() 96 | 97 | # Get the gaze error 98 | gaze_x, gaze_y = ev.getAverageGaze() 99 | gaze_error = hypot((gaze_x - fix_dot_x)/ppd_x, 100 | (gaze_y - fix_dot_y)/ppd_y) 101 | 102 | if gaze_error < 1.5: 103 | # Update fixation_start_time, following the first 104 | # FIXUPDATE event 105 | if fixation_start_time < 0: 106 | fixation_start_time = ev.getStartTime() 107 | else: 108 | # Break if the gaze is on the fixation dot 109 | # for > 300 ms 110 | if (ev.getEndTime() - fixation_start_time) >= 300: 111 | triggered = True 112 | else: 113 | fixation_start_time = -32768 114 | 115 | # Show the image for 2 secs 116 | img.draw() 117 | win.flip() 118 | core.wait(2.0) 119 | 120 | # Clear the screen 121 | win.color = (0, 0, 0) 122 | win.flip() 123 | core.wait(0.5) 124 | 125 | # Stop recording 126 | tk.stopRecording() 127 | 128 | # Close the EDF data file on the Host 129 | tk.closeDataFile() 130 | 131 | # Download the EDF data file from Host 132 | tk.receiveDataFile('psychopy.edf', 'psychopy.edf') 133 | 134 | # Close the link to the tracker 135 | tk.close() 136 | 137 | # Close the graphics 138 | win.close() 139 | core.quit() 140 | -------------------------------------------------------------------------------- /example_scripts/ch03_pygame/posner_cueing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: posner_cueing.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/7/2021 6 | # 7 | # Description: 8 | # A Posner cueing task implemented in Pygame 9 | 10 | import random 11 | import pygame 12 | import sys 13 | from pygame import display, draw, Rect, time, event, key, mouse 14 | from pygame.locals import * 15 | 16 | # Set a few constants 17 | sz = 90 # size of the placeholder 18 | colors = {'gray': (128, 128, 128), 19 | 'white': (255, 255, 255), 20 | 'black': (0, 0, 0)} 21 | pos = {'left': (212, 384), 22 | 'center': (512, 384), 23 | 'right': (812, 384)} 24 | 25 | # List of all unique trials, [cue_pos, tar_pos, isi, cueing, cor_key] 26 | trials = [] 27 | for cue_pos in ['left', 'right']: 28 | for tar_pos in ['left', 'right']: 29 | for isi in [0, 100, 300, 700]: 30 | if cue_pos == tar_pos: 31 | cueing = 'cued' 32 | else: 33 | cueing = 'uncued' 34 | if tar_pos == 'left': 35 | cor_key = 'z' 36 | else: 37 | cor_key = '/' 38 | trials.append([cue_pos, tar_pos, isi, cueing, cor_key]) 39 | 40 | 41 | def draw_frame(frame, trial_pars): 42 | ''' Draw the possible screens. 43 | 44 | frame -- which frame to draw, e.g., 'fix', 'cue', 'target' 45 | trial_pars -- parameters, [cue_pos, tar_pos, isti, cueing, cor_key]''' 46 | 47 | # Unpack the trial parameters 48 | cue_pos, tar_pos, isi, cueing, cor_key = trial_pars 49 | 50 | # Clear the screen and fill it with black 51 | win.fill(colors['black']) 52 | 53 | # The place holders are visible on all screens 54 | # Here, 'pos' is a dictionary; 55 | # we retrieve both the key and value pairs in a for-loop 56 | for key, (x, y) in pos.items(): 57 | # Draw the place holder 58 | draw.rect(win, colors['gray'], Rect(x - sz/2, y - sz/2, sz, sz), 1) 59 | 60 | # The fixation cross is visible on all screens 61 | if key == 'center': 62 | draw.line(win, colors['gray'], (x - 20, y), (x + 20, y), 3) 63 | draw.line(win, colors['gray'], (x, y - 20), (x, y + 20), 3) 64 | 65 | # Draw the fixation screen-- three placeholders with a cross 66 | if frame == 'fix': 67 | pass 68 | 69 | # Draw the cue (a bright box--a Rect) 70 | if frame == 'cue': 71 | c_x, c_y = pos[cue_pos] # coordinates of the cue 72 | draw.rect(win, colors['white'], Rect(c_x - sz/2, c_y - sz/2, 73 | sz, sz), 5) 74 | 75 | # Draw the target (a filled white disk) 76 | if frame == 'target': 77 | draw.circle(win, colors['white'], pos[tar_pos], 20) 78 | 79 | display.flip() 80 | 81 | 82 | def run_trial(trial_pars, subj_info, data_file): 83 | ''' Run a single trial. 84 | 85 | trial_pars -- a list specifying trial parameters, 86 | [cue_pos, tar_pos, isi, cueing, cor_key] 87 | subj_info -- info about the subject [id, name, age] 88 | data_file -- an open file to save the trial data.''' 89 | 90 | # Show the fixation then wait for 1000 ms 91 | draw_frame('fix', trial_pars) 92 | time.wait(1000) 93 | 94 | # Show the cue for 100 ms 95 | draw_frame('cue', trial_pars) 96 | time.wait(100) 97 | 98 | # Inter-stimulus interval (ISI) 99 | draw_frame('fix', trial_pars) 100 | time.wait(trial_pars[2]) 101 | 102 | # Show the target and register a keypress response 103 | draw_frame('target', trial_pars) 104 | tar_onset = time.get_ticks() 105 | tar_resp = -32768 # response time 106 | resp_key = -32768 # key pressed 107 | 108 | # Check for key presses 109 | time_out = False 110 | got_key = False 111 | event.clear() # clear bufferred events 112 | while not (time_out or got_key): 113 | # Cehck for time out (1500 ms) 114 | if time.get_ticks() - tar_onset > 1500: 115 | time_out = True 116 | 117 | # Check if any key has been pressed 118 | for ev in event.get(): 119 | if ev.type == KEYDOWN: 120 | if ev.key in [K_z, K_SLASH]: 121 | tar_resp = time.get_ticks() 122 | resp_key = key.name(ev.key) 123 | got_key = True 124 | 125 | # write data to file 126 | trial_data = subj_info + trial_pars + [tar_onset, tar_resp, resp_key] 127 | trial_data = map(str, trial_data) 128 | data_file.write(','.join(trial_data) + '\n') 129 | 130 | # ITI (inter-trial_interval) 131 | draw_frame('fix', trial_pars) 132 | time.wait(1500) 133 | 134 | # -- Real experiment starts from here -- 135 | # Get subject info from the Python shell 136 | subj_id = input('Subject ID (e.g., 01): ') 137 | subj_age = input('Subject Age: ') 138 | subj_info = [subj_id, subj_age] 139 | 140 | # Open a CSV file to store the data 141 | d_file = open('d_{}.csv'.format(subj_info[0]), 'w') 142 | 143 | # Open a window, add the FULLSCREEN flag for precise timing 144 | win = display.set_mode((1024, 768), HWSURFACE | DOUBLEBUF) 145 | # Hide the mouse cursor 146 | mouse.set_visible(False) 147 | 148 | # Randomly shuffle the trial list and test them one by one 149 | test_trials = trials[:]*1 # how many trials to test 150 | random.shuffle(test_trials) # randomize 151 | for pars in test_trials: 152 | run_trial(pars, subj_info, d_file) 153 | 154 | # Close the data files 155 | d_file.close() 156 | 157 | # Quit Pygame 158 | pygame.quit() 159 | sys.exit() 160 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/video_task.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: video_task.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # Play video and record eye movements in Psychopy 9 | 10 | import pylink 11 | import os 12 | import random 13 | from psychopy import visual, core, event, monitors 14 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 15 | from psychopy.constants import FINISHED 16 | 17 | # Screen resolution 18 | SCN_W, SCN_H = (1280, 800) 19 | 20 | # SETP 1: Connect to the tracker 21 | tk = pylink.EyeLink('100.1.1.1') 22 | 23 | # Step 2: Open an EDF data file on the Host 24 | tk.openDataFile('video.edf') 25 | # Add preamble text (file header) 26 | tk.sendCommand("add_file_preamble_text 'Movie playback demo'") 27 | 28 | # Step 3: Set up tracking parameters 29 | # 30 | # put the tracker in idle mode before we change its parameters 31 | tk.setOfflineMode() 32 | 33 | # Sample rate, 250, 500, 1000, or 2000 (depending on the tracker models, 34 | # not all sample rate options are supported) 35 | tk.sendCommand('sample_rate 500') 36 | 37 | # Pass screen resolution to the tracker 38 | tk.sendCommand(f"screen_pixel_coords = 0 0 {SCN_W-1} {SCN_H-1}") 39 | 40 | # Send a DISPLAY_COORDS message so Data Viewer knows the correct screen size 41 | tk.sendMessage(f"DISPLAY_COORDS = 0 0 {SCN_W-1} {SCN_H-1}") 42 | 43 | # Choose a calibration type, H3, HV3, HV5, HV13 (HV = horizontal/vertical) 44 | tk.sendCommand("calibration_type = HV9") 45 | 46 | # Step 4: # open a window for graphics and calibration 47 | # 48 | # Create a monitor object to store monitor information 49 | customMon = monitors.Monitor('demoMon', width=35, distance=65) 50 | 51 | # Open a PsychoPy window 52 | win = visual.Window((SCN_W, SCN_H), fullscr=False, 53 | monitor=customMon, units='pix') 54 | 55 | # Request Pylink to use the PsychoPy window for calibration 56 | graphics = EyeLinkCoreGraphicsPsychoPy(tk, win) 57 | pylink.openGraphicsEx(graphics) 58 | 59 | # Step 5: Calibrate the tracker, and run through all the trials 60 | calib_prompt = "Press ENTER to calibrate the tracker" 61 | calib_msg = visual.TextStim(win, text=calib_prompt, color='white', ) 62 | calib_msg.draw() 63 | win.flip() 64 | 65 | # Calibrate the tracker 66 | tk.doTrackerSetup() 67 | 68 | # Step 6: Run through a couple of trials 69 | # put the videos we would like to play in a list 70 | trials = [ 71 | ['t1', 'driving.mp4'], 72 | ['t2', 'driving.mp4'] 73 | ] 74 | 75 | 76 | # Here, we define a helper function to group the code executed on each trial 77 | def run_trial(pars): 78 | """ pars corresponds to a row in the trial list""" 79 | 80 | # Retrieve parameters from the trial list 81 | trial_num, movie_file = pars 82 | 83 | # Load the video to display 84 | mov = visual.MovieStim3(win, filename=movie_file, size=(960, 540)) 85 | 86 | # Take the tracker offline 87 | tk.setOfflineMode() 88 | 89 | # Send the standard "TRIALID" message to mark the start of a trial 90 | tk.sendMessage(f"TRIALID {trial_num} {movie_file}") 91 | 92 | # Record_status_message : show some info on the Host PC 93 | msg = f"record_status_message 'Movie File: {movie_file}'" 94 | tk.sendCommand(msg) 95 | 96 | # Drift check/correction, params, x, y, draw_target, allow_setup 97 | tk.doDriftCorrect(int(SCN_W/2.0), int(SCN_H/2.0), 1, 1) 98 | 99 | # Put the tracker in idle mode before we start recording 100 | tk.setOfflineMode() 101 | 102 | # Start recording 103 | # params: file_sample, file_event, link_sampe, link_event (1-yes, 0-no) 104 | tk.startRecording(1, 1, 1, 1) 105 | 106 | # Wait for 100 ms to cache some samples 107 | pylink.msecDelay(100) 108 | 109 | # The size of the video 110 | mo_width, mo_height = mov.size 111 | 112 | # play the video till the end 113 | frame_n = 0 114 | prev_frame_timestamp = mov.getCurrentFrameTime() 115 | while mov.status is not FINISHED: 116 | # draw a movie frame and flip the video buffer 117 | mov.draw() 118 | win.flip() 119 | 120 | # if a new frame is drawn, check frame timestamp and 121 | # send a VFRAME message 122 | current_frame_timestamp = mov.getCurrentFrameTime() 123 | if current_frame_timestamp != prev_frame_timestamp: 124 | frame_n += 1 125 | # send a message to mark the onset of each video frame 126 | tk.sendMessage(f'Video_Frame: {frame_n}') 127 | # VFRAME message: "!V VFRAME frame_num movie_pos_x, 128 | # movie_pos_y, path_to_movie_file" 129 | x = int(SCN_W/2.0 - mo_width/2.0) 130 | y = int(SCN_H/2.0 - mo_height/2.0) 131 | path_to_movie = os.path.join('..', movie_file) 132 | msg = f"!V VFRAME {frame_n} {x} {y} {path_to_movie}" 133 | tk.sendMessage(msg) 134 | prev_frame_timestamp = current_frame_timestamp 135 | 136 | # Send a message to mark video playback end 137 | tk.sendMessage("Video_terminates") 138 | 139 | # Clear the subject display 140 | win.color = (0, 0, 0) 141 | win.flip() 142 | 143 | # Stop recording 144 | tk.stopRecording() 145 | 146 | # Send a'TRIAL_RESULT' message to mark the end of the trial 147 | tk.sendMessage('TRIAL_RESULT') 148 | 149 | # Run a block of 2 trials, in random order 150 | test_list = trials[:] 151 | random.shuffle(test_list) 152 | for trial in test_list: 153 | run_trial(trial) 154 | 155 | # Step 7: Close the EDF data file 156 | pylink.pumpDelay(100) # wait for 100 ms to catch session end events 157 | tk.closeDataFile() 158 | 159 | # Step 8: Download EDF file to a local folder ('edfData') 160 | msg = 'Downloading EDF file from the EyeLink Host PC ...' 161 | edfTransfer = visual.TextStim(win, text=msg, color='white') 162 | edfTransfer.draw() 163 | win.flip() 164 | 165 | if not os.path.exists('edfData'): 166 | os.mkdir('edfData') 167 | tk.receiveDataFile('video.edf', 'edfData/video_demo.edf') 168 | 169 | # Step 9: Close the connection to tracker, close graphics 170 | tk.close() 171 | win.close() 172 | core.quit() 173 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/pursuit_task.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: pursuit_task.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # A simple smooth pursuit task implemented in PsychoPy 9 | 10 | import pylink 11 | import os 12 | import random 13 | from psychopy import visual, core, event, monitors 14 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 15 | from math import sin, pi 16 | 17 | # Monitor resolution 18 | SCN_W, SCN_H = (1280, 800) 19 | 20 | # Step 1: Connect to the tracker 21 | tk = pylink.EyeLink('100.1.1.1') 22 | 23 | # Step 2: Open an EDF data file on the Host 24 | tk.openDataFile('pursuit.edf') 25 | # Add preamble text (file header) 26 | tk.sendCommand("add_file_preamble_text 'Smooth pursuit demo'") 27 | 28 | # Step 3: Setup Host parameters 29 | # put the tracker in idle mode before we change its parameters 30 | tk.setOfflineMode() 31 | pylink.msecDelay(50) 32 | 33 | # Sample rate, 250, 500, 1000, or 2000 (depending on the tracker models, 34 | # not all sample rate options are supported) 35 | tk.sendCommand('sample_rate 500') 36 | 37 | # Pass screen resolution to the tracker 38 | tk.sendCommand(f"screen_pixel_coords = 0 0 {SCN_W-1} {SCN_H-1}") 39 | 40 | # Send a DISPLAY_COORDS message so Data Viewer knows the correct screen size 41 | tk.sendMessage(f"DISPLAY_COORDS = 0 0 {SCN_W-1} {SCN_H-1}") 42 | 43 | # Choose a calibration type, H3, HV3, HV5, HV13 (HV = horizontal/vertical) 44 | tk.sendCommand("calibration_type = HV9") 45 | 46 | # Step 4: # open a window for graphics and calibration 47 | # 48 | # Create a monitor object to store monitor information 49 | customMon = monitors.Monitor('demoMon', width=35, distance=65) 50 | 51 | # Open a PsychoPy window 52 | win = visual.Window((SCN_W, SCN_H), fullscr=False, 53 | monitor=customMon, units='pix') 54 | 55 | # Request Pylink to use the PsychoPy window for calibration 56 | graphics = EyeLinkCoreGraphicsPsychoPy(tk, win) 57 | pylink.openGraphicsEx(graphics) 58 | 59 | # Step 5: prepare the pursuit target, the clock and the movement parameters 60 | target = visual.GratingStim(win, tex=None, mask='circle', size=25) 61 | pursuitClock = core.Clock() 62 | 63 | # Parameters for the Sinusoidal movement pattern 64 | # [amp_x, amp_y, phase_x, phase_y, angular_freq_x, angular_freq_y] 65 | mov_pars = [ 66 | [300, 300, pi*3/2, 0, 1/8.0, 1/8.0], 67 | [300, 300, pi/2, 0, /8.0, 1/8.0] 68 | ] 69 | 70 | # Step 6: calibrate the tracker 71 | calib_prompt = 'Press Enter twice to calibrate the tracker' 72 | calib_msg = visual.TextStim(win, text=calib_prompt, color='white', units='pix') 73 | calib_msg.draw() 74 | win.flip() 75 | 76 | # Calibrate the tracker 77 | tk.doTrackerSetup() 78 | 79 | 80 | # Step 7: Run through a couple of trials 81 | # define a function to group the code that will executed on each trial 82 | def run_trial(trial_duration, movement_pars): 83 | """ Run a smooth pursuit trial 84 | 85 | trial_duration: the duration of the pursuit movement 86 | movement_pars: [amp_x, amp_y, phase_x, phase_y, freq_x, freq_y] 87 | The following equation defines a sinusoidal movement pattern 88 | y(t) = amplitude * sin(2 * pi * frequency * t + phase) 89 | for circular or elliptic movements, the phase in x and y directions 90 | should be pi/2 (direction matters).""" 91 | 92 | # Parse the movement pattern parameters 93 | amp_x, amp_y, phase_x, phase_y, freq_x, freq_y = movement_pars 94 | 95 | # Take the tracker offline 96 | tk.setOfflineMode() 97 | 98 | # Send the standard "TRIALID" message to mark the start of a trial 99 | tk.sendMessage("TRIALID") 100 | 101 | # Record_status_message : show some info on the Host PC 102 | tk.sendCommand("record_status_message 'Pursuit demo'") 103 | 104 | # Drift check/correction, params, x, y, draw_target, allow_setup 105 | tar_x = amp_x*sin(phase_x) 106 | tar_y = amp_y*sin(phase_y) 107 | target.pos = (tar_x, tar_y) 108 | target.draw() 109 | win.flip() 110 | tk.doDriftCorrect(int(tar_x + SCN_W/2.0), int(SCN_H/2.0 - tar_y), 0, 1) 111 | 112 | # Put the tracker in idle mode before we start recording 113 | tk.setOfflineMode() 114 | 115 | # Start recording 116 | # params: file_sample, file_event, link_sampe, link_event (1-yes, 0-no) 117 | tk.startRecording(1, 1, 1, 1) 118 | 119 | # Wait for 100 ms to cache some samples 120 | pylink.msecDelay(100) 121 | 122 | # Send a message to mark movement onset 123 | frame = 0 124 | while True: 125 | target.pos = (tar_x, tar_y) 126 | target.draw() 127 | win.flip() 128 | flip_time = core.getTime() 129 | frame += 1 130 | if frame == 1: 131 | tk.sendMessage('Movement_onset') 132 | move_start = core.getTime() 133 | else: 134 | _x = int(tar_x + SCN_W/2.0) 135 | _y = int(SCN_H/2.0 - tar_y) 136 | tar_msg = f'!V TARGET_POS target {_x}, {_y} 1 0' 137 | tk.sendMessage(tar_msg) 138 | 139 | time_elapsed = flip_time - move_start 140 | 141 | # update the target position 142 | tar_x = amp_x*sin(2 * pi * freq_x * time_elapsed + phase_x) 143 | tar_y = amp_y*sin(2 * pi * freq_y * time_elapsed + phase_y) 144 | 145 | # break if the time elapsed exceeds the trial duration 146 | if time_elapsed > trial_duration: 147 | break 148 | 149 | # clear the window 150 | win.color = (0, 0, 0) 151 | win.flip() 152 | 153 | # Stop recording 154 | tk.stopRecording() 155 | 156 | # Send trial variables to record in the EDF data file 157 | tk.sendMessage(f"!V TRIAL_VAR amp_x {amp_x:.2f}") 158 | tk.sendMessage(f"!V TRIAL_VAR amp_y {amp_y:.2f}") 159 | tk.sendMessage(f"!V TRIAL_VAR phase_x {phase_x:.2f}") 160 | pylink.pumpDelay(2) # give the tracker a break 161 | tk.sendMessage(f"!V TRIAL_VAR phase_y {phase_y:.2f}") 162 | tk.sendMessage(f"!V TRIAL_VAR freq_x {freq_x:.2f}") 163 | tk.sendMessage(f"!V TRIAL_VAR freq_y {freq_y:.2f}") 164 | tk.sendMessage(f"!V TRIAL_VAR duration {trial_duration:.2f}") 165 | 166 | # Send a 'TRIAL_RESULT' message to mark the end of the trial 167 | tk.sendMessage('TRIAL_RESULT') 168 | 169 | # Run a block of 2 trials, in random order 170 | test_list = mov_pars[:] 171 | random.shuffle(test_list) 172 | for trial in test_list: 173 | run_trial(8.0, trial) 174 | 175 | # Step 8: Close the EDF data file and put the tracker in idle mode 176 | tk.setOfflineMode() # put the tracker in Offline 177 | pylink.pumpDelay(100) # wait for 100 ms 178 | tk.closeDataFile() 179 | 180 | # Step 9: Download EDF file to a local folder ('edfData') 181 | msg = 'Downloading EDF file from the EyeLink Host PC ...' 182 | edf = visual.TextStim(win, text=msg, color='white') 183 | edf.draw() 184 | win.flip() 185 | 186 | if not os.path.exists('edfData'): 187 | os.mkdir('edfData') 188 | tk.receiveDataFile('pursuit.edf', 'edfData/pursuit_demo.edf') 189 | 190 | # Step 10: Close the connection to tracker, close graphics 191 | tk.close() 192 | win.close() 193 | core.quit() 194 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/Stroop_task.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: Stroop_task.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/6/2021 6 | # 7 | # Description: 8 | # A Stroop task implemented in PsychoPy 9 | 10 | import pylink 11 | import os 12 | import random 13 | from psychopy import visual, core, event, monitors 14 | from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy 15 | 16 | # Monitor resolution 17 | SCN_W, SCN_H = (1280, 800) 18 | 19 | # Step 1: Connect to the tracker 20 | tk = pylink.EyeLink('100.1.1.1') 21 | 22 | # Step 2: Open an EDF data file on the Host 23 | tk.openDataFile('stroop.edf') 24 | # Add preamble text (file header) 25 | tk.sendCommand("add_file_preamble_text 'Stroop task demo'") 26 | 27 | # Step 3: Set up tracking parameters 28 | # 29 | # Put the tracker in idle mode before we change its parameters 30 | tk.setOfflineMode() 31 | 32 | # Sample rate, 250, 500, 1000, or 2000 (depending on the tracker models, 33 | # not all sample rate options are supported) 34 | tk.sendCommand('sample_rate 500') 35 | 36 | # Pass screen resolution to the tracker 37 | tk.sendCommand(f"screen_pixel_coords = 0 0 {SCN_W-1} {SCN_H-1}") 38 | 39 | # Send a DISPLAY_COORDS message so Data Viewer knows the correct screen size 40 | tk.sendMessage(f"DISPLAY_COORDS = 0 0 {SCN_W-1} {SCN_H-1}") 41 | 42 | # Choose a calibration type, H3, HV3, HV5, HV13 (HV = horizontal/vertical) 43 | tk.sendCommand("calibration_type = HV9") 44 | 45 | # Step 4: # open a window for graphics and calibration 46 | # 47 | # Create a monitor object to store monitor information 48 | customMon = monitors.Monitor('demoMon', width=35, distance=65) 49 | 50 | # Open a PsychoPy window 51 | win = visual.Window((SCN_W, SCN_H), fullscr=False, 52 | monitor=customMon, units='pix') 53 | 54 | # Request Pylink to use the PsychoPy window for calibration 55 | graphics = EyeLinkCoreGraphicsPsychoPy(tk, win) 56 | pylink.openGraphicsEx(graphics) 57 | 58 | # Step 5: calibrate the tracker, and run through all the trials 59 | calib_prompt = "Press LEFT to RED\nRIGHT to BLEU\n\n ENTER to calibrate" 60 | calib_msg = visual.TextStim(win, text=calib_prompt, color='white') 61 | calib_msg.draw() 62 | win.flip() 63 | 64 | # Calibrate the tracker 65 | tk.doTrackerSetup() 66 | 67 | # Step 6: Run through all the trials 68 | # 69 | # Specify all possible experimental trials in a list; the columns are 70 | # 'text', 'text_color', 'correct_answer' and "congruency" 71 | my_trials = [ 72 | ['red', 'red', 'left', 'cong'], 73 | ['red', 'blue', 'right', 'incg'], 74 | ['blue', 'blue', 'right', 'cong'], 75 | ['blue', 'red', 'left', 'incg'] 76 | ] 77 | 78 | 79 | # For convenience, define a run_trial function to group 80 | # the lines of code repeatedly executed in each trial 81 | def run_trial(params): 82 | """ Run a single trial 83 | 84 | params: a list containing tiral parameters, e.g., 85 | ['red', 'red', 'left', 'cong']""" 86 | 87 | # Unpacking the parameters 88 | text, text_color, correct_answer, congruency = params 89 | 90 | # Prepare the stimuli 91 | word = visual.TextStim(win=win, text=text, font='Arial', 92 | height=100.0, color=text_color) 93 | 94 | # Take the tracker offline 95 | tk.setOfflineMode() 96 | 97 | # Send a "TRIALID" message to mark the start of a trial 98 | tk.sendMessage(f"TRIALID {text} {text_color} {congruency}") 99 | 100 | # Record_status_message : show some info on the Host PC 101 | msg = f"record_status_message 'Congruency-{congruency}'" 102 | tk.sendCommand(msg) 103 | 104 | # Drift check/correction, params, x, y, draw_target, allow_setup 105 | tk.doDriftCorrect(int(SCN_W/2.0), int(SCN_H/2.0), 1, 1) 106 | 107 | # Put the tracker in idle mode before we start recording 108 | tk.setOfflineMode() 109 | 110 | # Start recording 111 | # params: file_sample, file_event, link_sampe, link_event (1-yes, 0-no) 112 | tk.startRecording(1, 1, 1, 1) 113 | 114 | # Wait for 100 ms to cache some samples 115 | pylink.msecDelay(100) 116 | 117 | # Draw the target word on the screen 118 | word.draw() 119 | win.flip() 120 | # Record the onset time of the stimuli 121 | tar_onset = core.getTime() 122 | # Send a message to mark the onset of visual stimuli 123 | tk.sendMessage("stim_onset") 124 | 125 | # Save a screenshot to use as background graphics in Data Viewer 126 | if not os.path.exists('screenshots'): 127 | os.mkdir('screenshots') 128 | screenshot = f'screenshots/cond_{text}_{text_color}.jpg' 129 | win.getMovieFrame() 130 | win.saveMovieFrames(screenshot) 131 | 132 | # The command we used to take screenshots takes time to return 133 | # we need to provide a "time offset" in the IMGLOAD message, so 134 | # Data Viewer knows the correct onset time of the screen 135 | msg_offset = int((core.getTime() - tar_onset) * 1000) 136 | # Send an IMGLOAD message to let DV know which screenshot to load 137 | scn_shot = '../' + screenshot 138 | tk.sendMessage(f'{msg_offset} !V IMGLOAD FILL {scn_shot}') 139 | 140 | # Clear bufferred events (in PsychoPy), then wait for key presses 141 | event.clearEvents(eventType='keyboard') 142 | gotKey = False 143 | key_pressed, RT, ACC = ['None', 'None', 'None'] 144 | while not gotKey: 145 | keyp = event.getKeys(['left', 'right', 'escape']) 146 | if len(keyp) > 0: 147 | key_pressed = keyp[0] # which key was pressed 148 | RT = core.getTime() - tar_onset # response time 149 | # correct=1, incorrect=0 150 | ACC = int(key_pressed == correct_answer) 151 | 152 | # Send a message mark the key response 153 | tk.sendMessage(f"Key_resp {key_pressed}") 154 | gotKey = True 155 | 156 | # Clear the window at the end of a trials 157 | win.color = (0, 0, 0) 158 | win.flip() 159 | 160 | # Stop recording 161 | tk.stopRecording() 162 | 163 | # Send trial variables to record in the EDF data file 164 | tk.sendMessage(f"!V TRIAL_VAR word {text}") 165 | tk.sendMessage(f"!V TRIAL_VAR color {text_color}") 166 | tk.sendMessage(f"!V TRIAL_VAR congruency {congruency}") 167 | pylink.pumpDelay(2) # give the link a break 168 | tk.sendMessage(f"!V TRIAL_VAR key_pressed {key_pressed}") 169 | tk.sendMessage(f"!V TRIAL_VAR RT {round(RT * 1000)}") 170 | tk.sendMessage(f"!V TRIAL_VAR ACC {ACC}") 171 | 172 | # Send a 'TRIAL_RESULT' message to mark the end of trial 173 | tk.sendMessage(f"TRIAL_RESULT {ACC}") 174 | 175 | # Run a block of 8 trials, in random order 176 | trials_to_test = my_trials[:]*2 177 | random.shuffle(trials_to_test) 178 | for trial in trials_to_test: 179 | run_trial(trial) 180 | 181 | # Step 7: Close the EDF data file 182 | tk.setOfflineMode() # Put tracker in offline mode 183 | pylink.pumpDelay(100) # wait for 100 ms 184 | tk.closeDataFile() 185 | 186 | # Step 8: Download EDF file to a local folder ('edfData') 187 | msg = 'Downloading EDF file from the EyeLink Host PC ...' 188 | edf = visual.TextStim(win, text=msg, color='white') 189 | edf.draw() 190 | win.flip() 191 | 192 | if not os.path.exists('edfData'): 193 | os.mkdir('edfData') 194 | tk.receiveDataFile('stroop.edf', 'edfData/stroop_demo.edf') 195 | 196 | # Step 9: Close the connection to tracker, close graphics 197 | tk.close() 198 | win.close() 199 | core.quit() 200 | -------------------------------------------------------------------------------- /example_scripts/ch07_advanced_topics/coregraphics_PsychoPy/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position, so the cursor stays on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/Stroop_task/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position so the cursor stay on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/pursuit_task/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position so the cursor stay on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch05_data_viewer/PsychoPy_examples/video_task/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position so the cursor stay on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_trigger/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position so the cursor stay on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch06_data_retrieval/PsychoPy_examples/gaze_contingent_window/EyeLinkCoreGraphicsPsychoPy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # Filename: EyeLinkCoreGraphicsPsychoPy.py 4 | # Author: Zhiguo Wang 5 | # Date: 2/4/2021 6 | # 7 | # Description: 8 | # An EyeLink coregraphics library (calibration routine) 9 | # for PsychoPy experiments. 10 | 11 | import os 12 | import platform 13 | import array 14 | import string 15 | import pylink 16 | from psychopy import visual, event, core 17 | from math import sin, cos, pi 18 | from PIL import Image, ImageDraw 19 | from psychopy.sound import Sound 20 | 21 | 22 | class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay): 23 | def __init__(self, tracker, win): 24 | '''Initialize 25 | 26 | tracker: an EyeLink instance (connection) 27 | win: the PsychoPy window we use for calibration''' 28 | 29 | pylink.EyeLinkCustomDisplay.__init__(self) 30 | 31 | # background and target color 32 | self._backgroundColor = win.color 33 | self._foregroundColor = 'black' 34 | 35 | # window to use for calibration 36 | self._display = win 37 | # make the mouse cursor invisible 38 | self._display.mouseVisible = False 39 | 40 | # display width & height 41 | self._w, self._h = win.size 42 | 43 | # resolution fix for Mac retina displays 44 | if 'Darwin' in platform.system(): 45 | sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina' 46 | is_ret = os.system(sys_cmd) 47 | if is_ret == 0: 48 | self._w = int(self._w / 2.0) 49 | self._h = int(self._h / 2.0) 50 | 51 | # store camera image pixels in an array 52 | self._imagebuffer = array.array('I') 53 | 54 | # store the color palette for camera image drawing 55 | self._pal = None 56 | 57 | # initial size of the camera image 58 | self._size = (384, 320) 59 | 60 | # initial mouse configuration 61 | self._mouse = event.Mouse(False) 62 | self.last_mouse_state = -1 63 | 64 | # camera image title 65 | self._msgHeight = self._size[1]/16.0 66 | self._title = visual.TextStim(self._display, '', 67 | wrapWidth=self._w, 68 | color=self._foregroundColor) 69 | 70 | # calibration target 71 | self._targetSize = self._w/64. 72 | self._tar = visual.Circle(self._display, 73 | size=self._targetSize, 74 | lineColor=self._foregroundColor, 75 | lineWidth=self._targetSize/2) 76 | 77 | # calibration sounds (beeps) 78 | self._target_beep = Sound('type.wav', stereo=True) 79 | self._error_beep = Sound('error.wav', stereo=True) 80 | self._done_beep = Sound('qbeep.wav', stereo=True) 81 | 82 | # a reference to the tracker connection 83 | self._tracker = tracker 84 | 85 | # for a clearer view we always enlarge the camera image 86 | self.imgResize = None 87 | 88 | def setup_cal_display(self): 89 | '''Set up the calibration display ''' 90 | 91 | self._display.clearBuffer() 92 | 93 | def clear_cal_display(self): 94 | '''Clear the calibration display''' 95 | 96 | self._display.color = self._backgroundColor 97 | self._display.flip() 98 | 99 | def exit_cal_display(self): 100 | '''Exit the calibration/validation routine''' 101 | 102 | self.clear_cal_display() 103 | 104 | def record_abort_hide(self): 105 | '''This function is called if aborted''' 106 | 107 | pass 108 | 109 | def erase_cal_target(self): 110 | '''Erase the target''' 111 | 112 | self.clear_cal_display() 113 | 114 | def draw_cal_target(self, x, y): 115 | '''Draw the target''' 116 | 117 | self.clear_cal_display() 118 | 119 | # target position 120 | xVis = (x - self._w/2.0) 121 | yVis = (self._h/2.0 - y) 122 | 123 | # draw the calibration target 124 | self._tar.pos = (xVis, yVis) 125 | self._tar.draw() 126 | self._display.flip() 127 | 128 | def play_beep(self, beepid): 129 | ''' Play a sound during calibration/drift-correction.''' 130 | 131 | if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]: 132 | self._target_beep.play() 133 | elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]: 134 | self._error_beep.play() 135 | elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]: 136 | self._done_beep.play() 137 | core.wait(0.4) 138 | 139 | def getColorFromIndex(self, colorindex): 140 | '''Retrieve the colors for camera image elements, e.g., crosshair''' 141 | 142 | if colorindex == pylink.CR_HAIR_COLOR: 143 | return (255, 255, 255) 144 | elif colorindex == pylink.PUPIL_HAIR_COLOR: 145 | return (255, 255, 255) 146 | elif colorindex == pylink.PUPIL_BOX_COLOR: 147 | return (0, 255, 0) 148 | elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR: 149 | return (255, 0, 0) 150 | elif colorindex == pylink.MOUSE_CURSOR_COLOR: 151 | return (255, 0, 0) 152 | else: 153 | return (128, 128, 128) 154 | 155 | def draw_line(self, x1, y1, x2, y2, colorindex): 156 | '''Draw a line ''' 157 | 158 | color = self.getColorFromIndex(colorindex) 159 | 160 | # scale the coordinates 161 | w, h = self._img.im.size 162 | x1 = int(x1 / 192 * w) 163 | x2 = int(x2 / 192 * w) 164 | y1 = int(y1 / 160 * h) 165 | y2 = int(y2 / 160 * h) 166 | 167 | # draw the line 168 | if not any([x < 0 for x in [x1, x2, y1, y2]]): 169 | self._img.line([(x1, y1), (x2, y2)], color) 170 | 171 | def draw_lozenge(self, x, y, width, height, colorindex): 172 | ''' draw a lozenge to show the defined search limits ''' 173 | 174 | color = self.getColorFromIndex(colorindex) 175 | 176 | # scale the coordinates 177 | w, h = self._img.im.size 178 | x = int(x / 192 * w) 179 | y = int(y / 160 * h) 180 | width = int(width / 192 * w) 181 | height = int(height / 160 * h) 182 | 183 | # draw the lozenge 184 | if width > height: 185 | rad = int(height / 2.) 186 | if rad == 0: 187 | return 188 | else: 189 | self._img.line([(x + rad, y), (x + width - rad, y)], color) 190 | self._img.line([(x + rad, y + height), 191 | (x + width - rad, y + height)], color) 192 | self._img.arc([x, y, x + rad*2, y + rad*2], 90, 270, color) 193 | self._img.arc([x + width - rad*2, y, x + width, y + height], 194 | 270, 90, color) 195 | else: 196 | rad = int(width / 2.) 197 | if rad == 0: 198 | return 199 | else: 200 | self._img.line([(x, y + rad), (x, y + height - rad)], color) 201 | self._img.line([(x + width, y + rad), 202 | (x + width, y + height - rad)], color) 203 | self._img.arc([x, y, x + rad*2, y + rad*2], 180, 360, color) 204 | self._img.arc([x, y + height-rad*2, x + rad*2, y + height], 205 | 0, 180, color) 206 | 207 | def get_mouse_state(self): 208 | '''Get the current mouse position and status''' 209 | 210 | w, h = self._display.size 211 | X, Y = self._mouse.getPos() 212 | 213 | # scale the mouse position so the cursor stay on the camera image 214 | mX = (X + w/2.0)/w*self._size[0]/2.0 215 | mY = (h/2.0 - Y)/h*self._size[1]/2.0 216 | 217 | state = self._mouse.getPressed()[0] 218 | 219 | return ((mX, mY), state) 220 | 221 | def get_input_key(self): 222 | '''This function is repeatedly pooled to check 223 | keyboard events''' 224 | 225 | ky = [] 226 | for keycode, modifier in event.getKeys(modifiers=True): 227 | k = pylink.JUNK_KEY 228 | if keycode == 'f1': k = pylink.F1_KEY 229 | elif keycode == 'f2': k = pylink.F2_KEY 230 | elif keycode == 'f3': k = pylink.F3_KEY 231 | elif keycode == 'f4': k = pylink.F4_KEY 232 | elif keycode == 'f5': k = pylink.F5_KEY 233 | elif keycode == 'f6': k = pylink.F6_KEY 234 | elif keycode == 'f7': k = pylink.F7_KEY 235 | elif keycode == 'f8': k = pylink.F8_KEY 236 | elif keycode == 'f9': k = pylink.F9_KEY 237 | elif keycode == 'f10': k = pylink.F10_KEY 238 | elif keycode == 'pageup': k = pylink.PAGE_UP 239 | elif keycode == 'pagedown': k = pylink.PAGE_DOWN 240 | elif keycode == 'up': k = pylink.CURS_UP 241 | elif keycode == 'down': k = pylink.CURS_DOWN 242 | elif keycode == 'left': k = pylink.CURS_LEFT 243 | elif keycode == 'right': k = pylink.CURS_RIGHT 244 | elif keycode == 'backspace': k = ord('\b') 245 | elif keycode == 'return': k = pylink.ENTER_KEY 246 | elif keycode == 'space': k = ord(' ') 247 | elif keycode == 'escape': k = 27 248 | elif keycode == 'tab': k = ord('\t') 249 | elif keycode in string.ascii_letters: 250 | k = ord(keycode) 251 | elif k == pylink.JUNK_KEY: 252 | k = 0 253 | 254 | # plus & minus signs for CR adjustment 255 | if keycode in ['num_add', 'equal']: 256 | k = ord('+') 257 | if keycode in ['num_subtract', 'minus']: 258 | k = ord('-') 259 | 260 | # handles key modifier 261 | if modifier['alt'] is True: mod = 256 262 | elif modifier['ctrl'] is True: mod = 64 263 | elif modifier['shift'] is True: mod = 1 264 | else: 265 | mod = 0 266 | 267 | ky.append(pylink.KeyInput(k, mod)) 268 | 269 | return ky 270 | 271 | def exit_image_display(self): 272 | '''Clear the camera image''' 273 | 274 | self.clear_cal_display() 275 | self._display.flip() 276 | 277 | def alert_printf(self, msg): 278 | '''Print error messages.''' 279 | 280 | print("Error: " + msg) 281 | 282 | def setup_image_display(self, width, height): 283 | ''' set up the camera image 284 | 285 | return 1 to show high-resolution camera images''' 286 | 287 | self.last_mouse_state = -1 288 | self._size = (width, height) 289 | 290 | return 1 291 | 292 | def image_title(self, text): 293 | '''Draw title text below the camera image''' 294 | 295 | self._title.text = text 296 | 297 | def draw_image_line(self, width, line, totlines, buff): 298 | '''Display image pixel by pixel, line by line''' 299 | 300 | for i in range(width): 301 | try: 302 | self._imagebuffer.append(self._pal[buff[i]]) 303 | except: 304 | pass 305 | 306 | if line == totlines: 307 | bufferv = self._imagebuffer.tostring() 308 | img = Image.frombytes("RGBX", (width, totlines), bufferv) 309 | self._img = ImageDraw.Draw(img) 310 | # draw the cross hairs 311 | self.draw_cross_hair() 312 | # scale the camera image 313 | self.imgResize = img.resize((width*2, totlines*2)) 314 | cam_img = visual.ImageStim(self._display, 315 | image=self.imgResize, 316 | units='pix') 317 | cam_img.draw() 318 | # draw the camera image title 319 | self._title.pos = (0, - totlines - self._msgHeight) 320 | self._title.draw() 321 | self._display.flip() 322 | 323 | # clear the camera image buffer 324 | self._imagebuffer = array.array('I') 325 | 326 | def set_image_palette(self, r, g, b): 327 | '''Given a set of RGB colors, create a list of 24bit numbers 328 | representing the color palette. 329 | For instance, RGB of (1,64,127) would be saved as 82047, 330 | or 00000001 01000000 011111111''' 331 | 332 | self._imagebuffer = array.array('I') 333 | 334 | sz = len(r) 335 | i = 0 336 | self._pal = [] 337 | while i < sz: 338 | rf = int(b[i]) 339 | gf = int(g[i]) 340 | bf = int(r[i]) 341 | self._pal.append((rf << 16) | (gf << 8) | (bf)) 342 | i = i+1 343 | -------------------------------------------------------------------------------- /example_scripts/ch08_data_visualization/transition.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 14 | 31 | 32 | Chord Diagram 33 | 34 | 35 | 40 | 41 | 64 | 65 | 66 |
67 |
68 | 69 | 70 | 449 | 450 | 451 | 452 | --------------------------------------------------------------------------------