├── .gitignore ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── conftest.py ├── examples ├── README.md ├── closed-loop │ └── README.md └── open-loop │ ├── open-loop-log.json │ └── plot_loop.py ├── eyeloop ├── __init__.py ├── config.py ├── constants │ ├── README.md │ ├── __init__.py │ ├── engine_constants.py │ ├── minimum_gui_constants.py │ └── processor_constants.py ├── engine │ ├── README.md │ ├── __init__.py │ ├── engine.py │ ├── models │ │ ├── __init__.py │ │ ├── circular.py │ │ └── ellipsoid.py │ ├── params │ │ └── __init__.py │ └── processor.py ├── extractors │ ├── DAQ.py │ ├── README.md │ ├── __init__.py │ ├── calibration.py │ ├── closed_loop.py │ ├── converter.py │ ├── frametimer.py │ ├── open_loop.py │ ├── template.py │ └── visstim.py ├── guis │ ├── README.md │ ├── __init__.py │ ├── blink_test.py │ └── minimum │ │ ├── README.md │ │ ├── __init__.py │ │ ├── graphics │ │ ├── instructions_md │ │ │ ├── overview.svg │ │ │ ├── rotation.svg │ │ │ └── start.svg │ │ ├── tip_1_cr.png │ │ ├── tip_1_cr_error.png │ │ ├── tip_1_cr_first.png │ │ ├── tip_2_cr.png │ │ ├── tip_3_pupil.png │ │ ├── tip_3_pupil_error.png │ │ ├── tip_4_pupil.png │ │ └── tip_5_start.png │ │ └── minimum_gui.py ├── importers │ ├── README.md │ ├── __init__.py │ ├── cv.py │ ├── importer.py │ └── vimba.py ├── run_eyeloop.py └── utilities │ ├── __init__.py │ ├── argument_parser.py │ ├── file_manager.py │ ├── format_print.py │ ├── general_operations.py │ ├── logging_config.yaml │ ├── parser.py │ └── shared_logging.py ├── misc ├── contributors.md ├── imgs │ ├── aarhusuniversity.svg │ ├── closed-loop.svg │ ├── constant.svg │ ├── contributors.svg │ ├── dandrite.svg │ ├── engine_ill.svg │ ├── extractor_overview.svg │ ├── extractor_scheme.svg │ ├── eyeloop overview.svg │ ├── importer_overview.svg │ ├── logo.svg │ ├── models.svg │ ├── nordicembl.svg │ ├── sample_1.gif │ ├── sample_2.gif │ ├── sample_3.gif │ ├── sample_4.gif │ ├── setup.svg │ ├── software logic.svg │ └── yoneharalab.svg └── travis-sample │ └── Frmd7.m4v ├── requirements.txt ├── requirements_examples.txt ├── requirements_testing.txt ├── setup.py ├── tests ├── test_integration.py └── testdata │ ├── short_human_3blink.mp4 │ └── short_mouse_noblink.m4v └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # OS 10 | .DS_Store 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | .idea/ 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | /tests/reports/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # eyeloop generated files 109 | data/ 110 | *.avi 111 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "3.8" 5 | 6 | before_install: 7 | - "pip install -U pip" 8 | - "python setup.py install" 9 | install: 10 | - pip install pytest pandas 11 | dist: xenial 12 | services: 13 | - xvfb 14 | 15 | script: pytest 16 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at . All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt requirements_testing.txt tox.ini 2 | recursive-include eyeloop/guis/minimum/graphics *.svg *.png 3 | include eyeloop/utilities/logging_config.yaml 4 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EyeLoop [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/simonarvin/eyeloop/issues) [![Build Status](https://travis-ci.com/simonarvin/eyeloop.svg?branch=master)](https://travis-ci.com/simonarvin/eyeloop) ![version](https://img.shields.io/badge/version-0.35--beta-brightgreen) ![lab](https://img.shields.io/badge/yonehara-lab-blue) ![beta](https://img.shields.io/badge/-beta-orange) 2 | 3 |

4 | 5 |

6 |

7 | 8 |

9 | 10 |

11 |     12 |

13 | 14 | EyeLoop is a Python 3-based eye-tracker tailored specifically to dynamic, closed-loop experiments on consumer-grade hardware. Users are encouraged to contribute to EyeLoop's development. 15 | 16 | ## Features ## 17 | - [x] **High-speed** > 1000 Hz on non-specialized hardware (no dedicated processing units necessary). 18 | - [x] Modular, readable, **customizable**. 19 | - [x] **Open-source**, and entirely Python 3. 20 | - [x] **Works on any platform**, easy installation. 21 | 22 | ## Overview ## 23 | - [How it works](#how-it-works) 24 | - [Getting started](#getting-started) 25 | - [Your first experiment](#designing-your-first-experiment) 26 | - [Data](#data) 27 | - [User interface](#graphical-user-interface) 28 | - [Authors](#authors) 29 | - [Examples](https://github.com/simonarvin/eyeloop/blob/master/examples) 30 | - [*EyeLoop Playground*](https://github.com/simonarvin/eyeloop_playground) 31 | 32 | ## How it works ## 33 |

34 | 35 |

36 | 37 | EyeLoop consists of two functional domains: the engine and the optional modules. The engine performs the eye-tracking, whereas the modules perform optional tasks, such as: 38 | 39 | - Experiments 40 | - Data acquisition 41 | - Importing video sequences to the engine 42 | 43 | > The modules import or extract data from the engine, and are therefore called *Importers* and *Extractors*, respectively. 44 | 45 | One of EyeLoop's most appealing features is its modularity: Experiments are built simply by combining modules with the core Engine. Thus, the Engine has one task only: to compute eye-tracking data based on an *imported* sequence, and offer the generated data for *extraction*. 46 | 47 | > How does [the Engine](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/engine/README.md) work?\ 48 | > How does [the Importer](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/importers/README.md) work?\ 49 | > How does [the Extractor](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/extractors/README.md) work? 50 | 51 | ## Getting started ## 52 | 53 | ### Installation ### 54 | 55 | Install EyeLoop by cloning the repository: 56 | ``` 57 | git clone https://github.com/simonarvin/eyeloop.git 58 | ``` 59 | 60 | >Dependencies: ```python -m pip install -r requirements.txt``` 61 | 62 | >Using pip: 63 | > ```pip install .``` 64 | 65 | You may want to use a Conda or Python virtual environment when 66 | installing `eyeloop`, to avoid mixing up with your system dependencies. 67 | 68 | >Using pip and a virtual environment: 69 | 70 | > ```python -m venv venv``` 71 | 72 | > ```source venv/bin/activate``` 73 | 74 | > ```(venv) pip install .``` 75 | 76 | Alternatively: 77 | 78 | >- numpy: ```python pip install numpy``` 79 | >- opencv: ```python pip install opencv-python``` 80 | 81 | To download full examples with footage, check out EyeLoop's playground repository: 82 | 83 | ``` 84 | git clone https://github.com/simonarvin/eyeloop_playground.git 85 | ``` 86 | 87 | --- 88 | 89 | ### Initiation ### 90 | 91 | EyeLoop is initiated through the command-line utility `eyeloop`. 92 | ``` 93 | eyeloop 94 | ``` 95 | To access the video sequence, EyeLoop must be connected to an appropriate *importer class* module. Usually, the default opencv importer class (*cv*) is sufficient. For some machine vision cameras, however, a vimba-based importer (*vimba*) is neccessary. 96 | ``` 97 | eyeloop --importer cv/vimba 98 | ``` 99 | > [Click here](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/importers/README.md) for more information on *importers*. 100 | 101 | To perform offline eye-tracking, we pass the video argument ```--video``` with the path of the video sequence: 102 | ``` 103 | eyeloop --video [file]/[folder] 104 | ``` 105 |

106 | 107 |

108 | 109 | EyeLoop can be used on a multitude of eye types, including rodents, human and non-human primates. Specifically, users can suit their eye-tracking session to any species using the ```--model``` argument. 110 | 111 | ``` 112 | eyeloop --model ellipsoid/circular 113 | ``` 114 | > In general, the ellipsoid pupil model is best suited for rodents, whereas the circular model is best suited for primates. 115 | 116 | To learn how to optimize EyeLoop for your video material, see [*EyeLoop Playground*](https://github.com/simonarvin/eyeloop_playground). 117 | 118 | To see all command-line arguments, pass: 119 | 120 | ``` 121 | eyeloop --help 122 | ``` 123 | 124 | ## Designing your first experiment ## 125 | 126 |

127 | 128 |

129 | 130 | In EyeLoop, experiments are built by stacking modules. By default, EyeLoop imports two base *extractors*, namely a FPS-counter and a data acquisition tool. To add custom extractors, e.g., for experimental purposes, use the argument tag ```--extractors```: 131 | 132 | ``` 133 | eyeloop --extractors [file_path]/p (where p = file prompt) 134 | ``` 135 | 136 | Inside the *extractor* file, or a composite python file containing several *extractors*, define the list of *extractors* to be added: 137 | ```python 138 | extractors_add = [extractor1, extractor2, etc] 139 | ``` 140 | 141 | *Extractors* are instantiated by EyeLoop at start-up. Then, at every subsequent time-step, the *extractor's* ```fetch()``` function is called by the engine. 142 | ```python 143 | class Extractor: 144 | def __init__(self) -> None: 145 | ... 146 | def fetch(self, core) -> None: 147 | ... 148 | ``` 149 | ```fetch()``` gains access to all eye-tracking data in real-time via the *core* pointer. 150 | 151 | > [Click here](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/extractors/README.md) for more information on *extractors*. 152 | 153 | ### Open-loop example ### 154 | 155 | As an example, we'll here design a simple *open-loop* experiment where the brightness of a PC monitor is linked to the phase of the sine wave function. We create anew python-file, say "*test_ex.py*", and in it define the sine wave frequency and phase using the instantiator: 156 | ```python 157 | class Experiment: 158 | def __init__(self) -> None: 159 | self.frequency = ... 160 | self.phase = 0 161 | ``` 162 | Then, by using ```fetch()```, we shift the phase of the sine wave function at every time-step, and use this to control the brightness of a cv-render. 163 | ```python 164 | ... 165 | def fetch(self, engine) -> None: 166 | self.phase += self.frequency 167 | sine = numpy.sin(self.phase) * .5 + .5 168 | brightness = numpy.ones((height, width), dtype=float) * sine 169 | cv2.imshow("Experiment", brightness) 170 | ``` 171 | 172 | To add our test extractor to EyeLoop, we'll need to define an extractors_add array: 173 | ```python 174 | extractors_add = [Experiment()] 175 | ``` 176 | 177 | Finally, we test the experiment by running command: 178 | ``` 179 | eyeloop --extractors path/to/test_ex.py 180 | ``` 181 | 182 | > See [Examples](https://github.com/simonarvin/eyeloop/blob/master/examples) for demo recordings and experimental designs. 183 | 184 | > For extensive test data, see [*EyeLoop Playground*](https://github.com/simonarvin/eyeloop_playground) 185 | 186 | 187 | ## Data ## 188 | EyeLoop produces a json-datalog for each eye-tracking session. The datalog's first column is the timestamp. 189 | The next columns define the pupil (if tracked): 190 | 191 | ```((center_x, center_y), radius1, radius2, angle)``` 192 | 193 | The next columns define the corneal reflection (if tracked): 194 | 195 | ```((center_x, center_y), radius1, radius2, angle)``` 196 | 197 | The next columns contain any data produced by custom Extractor modules 198 | 199 | 200 | ## Graphical user interface ## 201 | The default graphical user interface in EyeLoop is [*minimum-gui*.](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/guis/minimum/README.md) 202 | 203 | > EyeLoop is compatible with custom graphical user interfaces through its modular logic. [Click here](https://github.com/simonarvin/eyeloop/blob/master/eyeloop/guis/README.md) for instructions on how to build your own. 204 | 205 | ## Running unit tests ## 206 | 207 | Install testing requirements by running in a terminal: 208 | 209 | `pip install -r requirements_testing.txt` 210 | 211 | Then run tox: `tox` 212 | 213 | Reports and results will be outputted to `/tests/reports` 214 | 215 | 216 | ## Known issues ## 217 | - [ ] Respawning/freezing windows when running *minimum-gui* in Ubuntu. 218 | 219 | ## References ## 220 | If you use any of this code or data, please cite [Arvin et al. 2021] ([article](https://www.frontiersin.org/articles/10.3389/fncel.2021.779628/full)). 221 | ```latex 222 | 223 | @ARTICLE{Arvin2021-tg, 224 | title = "{EyeLoop}: An open-source system for high-speed, closed-loop 225 | eye-tracking", 226 | author = "Arvin, Simon and Rasmussen, Rune and Yonehara, Keisuke", 227 | journal = "Front. Cell. Neurosci.", 228 | volume = 15, 229 | pages = "494", 230 | year = 2021 231 | } 232 | 233 | ``` 234 | 235 | ## License ## 236 | This project is licensed under the GNU General Public License v3.0. Note that the software is provided "as is", without warranty of any kind, express or implied. 237 | 238 | ## Authors ## 239 | 240 | **Lead Developer:** 241 | Simon Arvin, sarv@dandrite.au.dk 242 |

243 | 244 |

245 | 246 | **Researchers:** 247 | 248 | - Simon Arvin, sarv@dandrite.au.dk 249 | - Rune Rasmussen, runerasmussen@biomed.au.dk 250 | - Keisuke Yonehara, keisuke.yonehara@dandrite.au.dk 251 | 252 | **Corresponding Author:** 253 | Keisuke Yonehera, keisuke.yonehara@dandrite.au.dk

254 | 255 | --- 256 |

257 |      258 |      259 | 260 |

261 |

262 | 263 |      264 | 265 |

266 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | # Extra configuration for pytest 2 | # The presence of this file in {project_dir} will force pytest to add the dir to PYTHONPATH 3 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples # 2 | 3 | These examples are described in-depth in the [preprint article](https://www.biorxiv.org/content/10.1101/2020.07.03.186387v1). They are described in a more programmatic perspective in this repository. 4 | Click into each folder to get started! 5 | 6 |

7 | 8 |

9 | -------------------------------------------------------------------------------- /examples/closed-loop/README.md: -------------------------------------------------------------------------------- 1 | # Closed-loop experiment # 2 | 3 |

4 | 5 |
Fig. Closed-loop experiment exhibiting properties of dynamical systems. (A) Closed-loop experiment using reciprocal feedback. Monitor brightness is set as a function of the pupil area. (B) State velocity, v, depends on pupil area, A. (C) Four trials of the closed loop with differing parameters showing distinct dynamic behavior. 6 |

7 | 8 | One of EyeLoop’s most appealing applications is closed-loop experiments (Fig). To demonstrate this, we designed an extractor to use the pupil area to modulate the brightness of a monitor, in effect a reciprocal feedback loop: Here, the light reflex causes the pupil to dilate in dim settings, which causes the extractor to increase monitor brightness. In turn, this causes the pupil to constrict, causing the extractor to decrease brightness and return the experiment to its initial state. 9 | 10 | The brightness formula contains four critical variables (Fig B): The rate of change, I, which is dependent on the pupil area, and its scalar, q. The velocity, v, which applies the rate of change to monitor brightness, and, the velocity friction, f, which decays the velocity towards zero. Interestingly, by varying these parameters, we observe behaviors characteristic of dynamical systems: For the reference and the slow decay trials, we find emergent limit-cycle oscillations (Fig C). This dynamic is dramatically impaired by a small scalar, and abolished in the low rate trial. These findings illustrate how a simple closed-loop experiment may generate self-sustaining dynamics emerging from the eyes engaging with the system, and the system engaging with the eyes. 11 | 12 | ## How to reproduce ## 13 | In *run_eyeloop.py*, import the closed-loop *Extractor* module and the *calibrator*: 14 | ```python 15 | from eyeloop.extractors.closed_loop import ClosedLoop_Extractor 16 | from eyeloop.extractors.calibration import Calibration_Extractor 17 | ``` 18 | 19 | First, load the *calibrator*: 20 | ```python 21 | ENGINE.load_extractors(Calibration_Extractor()) 22 | ``` 23 | 24 |

25 | 26 |

27 | 28 | Position a PC monitor in front of the eye of the subject, turn off the lights and run the experiment. 29 | ``` 30 | eyeloop 31 | ``` 32 | 33 | > Note: Adjust the width, height and x, y coordinates of the visual stimulus (inside calibration.py, closed_loop.py to fit your setup. 34 | 35 | This returns a calibration value (saved in file format ```{time_stamp}._cal_```). Now, load the closed-loop *Extractor* and paste this value inside ```{time_stamp}._cal_``` as the first parameter: 36 | ```python 37 | ENGINE.load_extractors(ClosedLoop_Extractor(_CAL_)) 38 | ``` 39 | 40 | For example, 41 | 42 | ```python 43 | ENGINE.load_extractors(ClosedLoop_Extractor(794.58)) 44 | ``` 45 | 46 | That's it! Enjoy your experiment. 47 | 48 | ``` 49 | eyeloop 50 | ``` 51 | 52 | > Note: If you're using a Vimba-based camera, use command ```eyeloop --importer vimba``` 53 | -------------------------------------------------------------------------------- /examples/open-loop/plot_loop.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from eyeloop.utilities.parser import Parser 3 | import matplotlib.pyplot as plt 4 | import matplotlib.gridspec as gridspec 5 | import matplotlib.ticker as mticker 6 | 7 | 8 | class Loop_parser(Parser): 9 | def __init__(self): 10 | super().__init__(animal='human') 11 | self.set_style() 12 | 13 | def custom_lower_panel_ticks(self, y, pos) -> str: 14 | """ 15 | Changes brightness-ticks to 'Dark' and 'Light'. 16 | """ 17 | 18 | if y == 0: 19 | return "Light" 20 | elif y == 1: 21 | return "Dark" 22 | 23 | def set_style(self): 24 | """ 25 | Sets the overall style of the plots. 26 | """ 27 | 28 | self.color = ["k", "orange", "b", "g", "red", "purple"] 29 | plt.rcParams.update({'font.family': "Arial"}) 30 | plt.rcParams.update({'font.weight': "regular"}) 31 | plt.rcParams.update({'axes.linewidth': 1}) 32 | 33 | def segmentize(self, key: str) -> np.ndarray: 34 | """ 35 | Segmentizes the data log based on a key signal, such as a trigger. 36 | """ 37 | 38 | segments = [] 39 | for index, entry in enumerate(self.data): 40 | if key in entry and entry[key] == 1: 41 | segments.append(index) 42 | return np.array(segments) 43 | 44 | def plot_open_loop(self, rows: int = 2, columns: int = 3) -> None: 45 | """ 46 | Retrieves and parses the open-loop demo data set. 47 | """ 48 | 49 | print("Select the open-loop data log demo.") 50 | self.load_log() 51 | 52 | print("Computing pupil area.") 53 | pupil_area = self.compute_area() 54 | 55 | print("Extracting monitor brightness.") 56 | monitor_brightness = self.extract_unique_key("open_looptest") 57 | 58 | print("Extracting time-stamps.") 59 | time = self.extract_time() 60 | 61 | print("Segmentizing trial based on 'trigger' entries.") 62 | _segments = self.segmentize("trigger") 63 | 64 | print("Prepares {}x{} grid plot.".format(rows, columns)) 65 | fig = plt.figure(figsize=(5, 4)) 66 | fig.tight_layout() 67 | main_grid = gridspec.GridSpec(columns, rows, hspace=1, wspace=0.3) 68 | 69 | margin = 50 70 | for grid_index, _ in enumerate(_segments): 71 | segment_index = grid_index * 2 72 | if segment_index == len(_segments) or grid_index == rows * columns: 73 | break 74 | 75 | # We extend each segment by a margin. 76 | # We define the pupil area and monitor brightness values based on this 'padded' segment. 77 | start_crop = _segments[segment_index] - margin 78 | end_crop = _segments[segment_index + 1] + margin 79 | pupil_area_segment = pupil_area[start_crop: end_crop] 80 | monitor_brightness_segment = monitor_brightness[start_crop: end_crop] 81 | 82 | # We extend the time-stamps similarly. 83 | # However, to align the time-line to the segment's start, we add the margin to 'time-zero'. 84 | time_segment = time[start_crop: end_crop] 85 | time_zero = time_segment[margin] 86 | segment_duration = [entry - time_zero for entry in time_segment] 87 | 88 | # We define a 2x1 grid for the pupil area plot and the monitor brightness plot. 89 | segment_grid = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=main_grid[grid_index], hspace=0.0, 90 | height_ratios=[3, 2]) 91 | 92 | # We add the upper panel and the lower panel. 93 | upper_panel = fig.add_subplot(segment_grid[0]) 94 | lower_panel = fig.add_subplot(segment_grid[1], sharex=upper_panel) 95 | 96 | # We fix some graphical details, such as removing axes spines. 97 | lower_panel.axis(ymin=-.3, ymax=1.3) 98 | lower_panel.yaxis.set_major_formatter(mticker.FuncFormatter(self.custom_lower_panel_ticks)) 99 | lower_panel.spines['right'].set_visible(False) 100 | lower_panel.yaxis.set_ticks_position('left') 101 | 102 | upper_panel.spines['right'].set_visible(False) 103 | upper_panel.spines['top'].set_visible(False) 104 | upper_panel.yaxis.set_ticks_position('left') 105 | upper_panel.xaxis.set_ticks_position('bottom') 106 | 107 | # Finally, we plot the data. 108 | upper_panel.plot(segment_duration, pupil_area_segment, self.color[grid_index], linewidth=2) 109 | lower_panel.plot(segment_duration, monitor_brightness_segment, "k", linewidth=1) 110 | 111 | print("Showing data plots.") 112 | plt.show() 113 | 114 | 115 | parser = Loop_parser() 116 | parser.plot_open_loop() 117 | -------------------------------------------------------------------------------- /eyeloop/__init__.py: -------------------------------------------------------------------------------- 1 | from eyeloop import config 2 | from eyeloop import constants 3 | from eyeloop import engine 4 | from eyeloop import extractors 5 | from eyeloop import guis 6 | from eyeloop import importers 7 | from eyeloop import run_eyeloop 8 | from eyeloop import utilities 9 | 10 | __all__ = [ 11 | "constants", 12 | "engine", 13 | "extractors", 14 | "guis", 15 | "importers", 16 | "utilities", 17 | "run_eyeloop", 18 | "config" 19 | ] 20 | -------------------------------------------------------------------------------- /eyeloop/config.py: -------------------------------------------------------------------------------- 1 | version = "0.35-beta" 2 | importer = 0 3 | eyeloop = 0 4 | engine = 0 5 | arguments = 0 6 | file_manager = 0 7 | graphical_user_interface = 0 8 | 9 | #blink = 142.08 10 | import numpy as np 11 | blink = np.zeros(300, dtype=np.float64) 12 | 13 | blink_i = 0 14 | -------------------------------------------------------------------------------- /eyeloop/constants/README.md: -------------------------------------------------------------------------------- 1 | # Constants # 2 | 3 |

4 | 5 |

6 | 7 | EyeLoop's engine, *Shape* processors, and *minimum-gui* have constants, that are rarely changed in updates. These include: 8 | - Color scheme (*minimum-gui*). 9 | - Walk-out parameters (steps, angular increments) 10 | - Corneal reflection filtering parameters. 11 | 12 | The constants are static during run-time, but users are free to modify them. 13 | > Hint: Consider playing around with the walk-out parameters (*engine_constants.py*) to improve detection in your recordings. 14 | -------------------------------------------------------------------------------- /eyeloop/constants/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/constants/__init__.py -------------------------------------------------------------------------------- /eyeloop/constants/engine_constants.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | angular_iter = 12 4 | angular_range = np.arange(angular_iter, dtype=np.int8) 5 | 6 | anglesteps_cos = np.array([np.cos(np.radians(i * 360 / angular_iter)) for i in angular_range], dtype=np.float64) 7 | anglesteps_sin = np.array([np.sin(np.radians(i * 360 / angular_iter)) for i in angular_range], dtype=np.float64) 8 | number_row = np.arange(1, len(anglesteps_cos) + 1, 1) 9 | zeros = np.zeros(len(number_row), dtype=int) 10 | -------------------------------------------------------------------------------- /eyeloop/constants/minimum_gui_constants.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | # rgb color codes 4 | green = (0, 220, 0) 5 | black = (0, 0, 0) 6 | red = (0, 0, 220) 7 | blue = (200, 50, 0) 8 | pink = (200, 0, 200) 9 | bluish = (255, 74, 98) 10 | 11 | font = cv2.FONT_HERSHEY_PLAIN 12 | -------------------------------------------------------------------------------- /eyeloop/constants/processor_constants.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import eyeloop.config as config 3 | 4 | 5 | angular_iter = 24 6 | angular_range = np.arange(angular_iter, dtype=np.int8) 7 | point_source = np.zeros(angular_iter, dtype=np.float64) 8 | step_list_source = np.zeros(angular_iter, dtype=np.int8) 9 | 10 | diagonal_size = 2**10 11 | 12 | step_size = np.deg2rad(360 / angular_iter) 13 | limit = np.arange(250) # max size of shape; normalize qqqq 14 | cos_sin_steps = np.array([(np.cos(i * step_size), np.sin(i * step_size)) for i in angular_range], dtype=np.float64) 15 | 16 | kernel = np.ones((1, 1), np.uint8) 17 | 18 | main_diagonal = np.eye(diagonal_size, diagonal_size, dtype=bool) 19 | 20 | #atan(1/2.414) = 22.5 ~atan(1/2) = 26.57 deg 21 | 22 | half_diagonal = np.full((diagonal_size, diagonal_size), False, dtype=bool) 23 | fourth_diagonal = half_diagonal.copy() 24 | third_diagonal = half_diagonal.copy() 25 | onefourth = 1/4 26 | onethird = 1/3 27 | 28 | 29 | invhalf_diagonal = half_diagonal.copy() 30 | invfourth_diagonal = half_diagonal.copy() 31 | invthird_diagonal = half_diagonal.copy() 32 | 33 | for i, _ in enumerate(half_diagonal): 34 | half_diagonal[int(i/2), i] = True 35 | fourth_diagonal[int(i/4), i] = True 36 | third_diagonal[int(i/3), i] = True 37 | 38 | invhalf_diagonal[i, int(i/2)] = True 39 | invfourth_diagonal[i, int(i/4)] = True 40 | invthird_diagonal[i, int(i/3)] = True 41 | 42 | 43 | rr_stock = np.zeros((32), dtype=np.float64) 44 | 45 | rr_2d = np.zeros((32, 2), dtype=np.float64) 46 | rr_2d_cr = np.zeros((4, 2), dtype=np.float64) 47 | 48 | rx_multiply = np.ones((32), dtype=np.float64) 49 | ry_multiply = rx_multiply.copy() 50 | 51 | crop_stock = np.zeros((32), dtype=int) 52 | crop_stock_cr = np.zeros((4), dtype=int) 53 | center_shape = (2, 31) 54 | 55 | 56 | onehalf_ry_add = [8,10,12,14] 57 | onehalf_rx_add = [8,11,12,15] 58 | onehalf_rx_subtract = [9,10,13,14] 59 | onehalf_ry_subtract = [9,11,13,15] 60 | onehalf_ry_multiplier = [8,9,10,11] 61 | onehalf_rx_multiplier = [12,13,14,15] 62 | 63 | 64 | onefourth_ry_add = [16,19,20,21] 65 | onefourth_rx_add = [16,17,20,23] 66 | onefourth_rx_subtract = [18,19,21,22] 67 | onefourth_ry_subtract = [17,18,22,23] 68 | onefourth_ry_multiplier = [16,17,18,19] 69 | onefourth_rx_multiplier = [20,21,22,23] 70 | 71 | 72 | onethird_ry_add = [24,25,28,29] 73 | onethird_rx_add = [24,27,28,31] 74 | onethird_rx_subtract = [25,26,29,30] 75 | onethird_ry_subtract = [26,27,30,31] 76 | onethird_ry_multiplier = [24,25,26,27] 77 | onethird_rx_multiplier = [28,29,30,31] 78 | 79 | 80 | rx_multiplied = np.array(np.concatenate((onehalf_rx_multiplier, onefourth_rx_multiplier, onethird_rx_multiplier)), dtype=int) 81 | ry_multiplied = np.array(np.concatenate((onehalf_ry_multiplier, onefourth_ry_multiplier, onethird_ry_multiplier)), dtype=int) 82 | ones_ = np.ones(4, dtype=np.float64) 83 | rx_multiply = np.array(np.concatenate((ones_ * .5, ones_ * onefourth, ones_*onethird))) 84 | 85 | ry_multiply = np.array(np.concatenate((ones_ * .5, ones_ * onefourth, ones_*onethird))) 86 | 87 | #rx_multiply[onethird_rx_multiplier] = onethird 88 | #rx_multiply[onefourth_rx_multiplier] = onefourth 89 | #rx_multiply[onehalf_rx_multiplier] = .5 90 | 91 | #ry_multiply[onethird_ry_multiplier] = onethird 92 | #ry_multiply[onefourth_ry_multiplier] = onefourth 93 | #ry_multiply[onehalf_ry_multiplier] = .5 94 | 95 | 96 | ry_add = np.array(np.concatenate(([0, 2, 4],onehalf_ry_add,onefourth_ry_add,onethird_ry_add)),dtype=int) 97 | rx_add = np.array(np.concatenate(([1, 2, 5],onehalf_rx_add,onefourth_rx_add,onethird_rx_add)), dtype=int) 98 | 99 | 100 | ry_subtract = np.array(np.concatenate(([3, 5, 7],onehalf_ry_subtract,onefourth_ry_subtract,onethird_ry_subtract ))) 101 | 102 | 103 | rx_subtract = np.array(np.concatenate(([3, 4, 6],onehalf_rx_subtract,onefourth_rx_subtract,onethird_rx_subtract))) 104 | 105 | 106 | 107 | black = [35, 35, 35] 108 | 109 | angle_dev = -22.5 110 | -------------------------------------------------------------------------------- /eyeloop/engine/README.md: -------------------------------------------------------------------------------- 1 | # Engine # 2 |

3 | 4 |

5 | 6 | The engine processes each frame of the video sequentially. First, the user selects the corneal reflections, then the pupil. The frame is binarized, filtered, and smoothed by a gaussian kernel. Then, the engine utilizes a walk-out algorithm to detect contours. This produces a matrix of points, which is filtered to discard bad matches. Using the corneal reflections, any overlap between the corneal reflections and the pupil is removed. Finally, the shape is parameterized by a fitting model: either an ellipsoid (suitable for rodents, cats, etc.), or a circle model (human, non-human primates, rodents, etc.). The target species is easily changed: 7 | ``` 8 | python eyeloop/run_eyeloop.py --model circular/ellipsoid 9 | ``` 10 | Lastly, the data is formatted in JSON and passed to all modules, such as for rendering, or data acquisition and experiments. 11 | 12 | ## Shape processors ## 13 | EyeLoop's engine communicates with the *Shape* class, which processes the walkout contour detection. Accordingly, at least two *Shape*'s are defined by the instantiator, one for the pupil and *n* for the corneal reflections: 14 | 15 | ```python 16 | class Engine: 17 | def __init__(self, ...): 18 | max_cr_processor = 3 #max number of corneal reflections 19 | self.cr_processors = [Shape(self, type = 2) for _ in range(max_cr_processor)] 20 | self.pupil_processor= Shape(self) 21 | ... 22 | 23 | ``` 24 | -------------------------------------------------------------------------------- /eyeloop/engine/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/engine/__init__.py -------------------------------------------------------------------------------- /eyeloop/engine/engine.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from typing import Optional 4 | from os.path import dirname, abspath 5 | import glob, os 6 | 7 | import cv2 8 | 9 | import eyeloop.config as config 10 | from eyeloop.constants.engine_constants import * 11 | from eyeloop.engine.processor import Shape 12 | from eyeloop.utilities.general_operations import to_int, tuple_int 13 | 14 | logger = logging.getLogger(__name__) 15 | PARAMS_DIR = f"{dirname(dirname(abspath(__file__)))}/engine/params" 16 | 17 | class Engine: 18 | def __init__(self, eyeloop): 19 | 20 | self.live = True # Access this to check if Core is running. 21 | 22 | self.eyeloop = eyeloop 23 | self.model = config.arguments.model # Used for assigning appropriate circular model. 24 | 25 | self.extractors = [] 26 | 27 | if config.arguments.tracking == 0: # Recording mode. --tracking 0 28 | self.iterate = self.record 29 | else: # Tracking mode. --tracking 1 (default) 30 | self.iterate = self.track 31 | 32 | self.angle = 0 33 | 34 | self.cr_processor_1 = Shape(type = 2, n = 1) 35 | self.cr_processor_2 = Shape(type = 2, n = 2) 36 | self.pupil_processor = Shape() 37 | 38 | # Via "gui", assign "refresh_pupil" to function "processor.refresh_source" 39 | # when the pupil has been selected. 40 | self.refresh_pupil = lambda x: None 41 | 42 | def load_extractors(self, extractors: list = None) -> None: 43 | if extractors is None: 44 | extractors = [] 45 | logger.info(f"loading extractors: {extractors}") 46 | self.extractors = extractors 47 | 48 | 49 | def run_extractors(self) -> None: 50 | """ 51 | Calls all extractors at the end of each time-step. 52 | Assign additional extractors to core engine via eyeloop.py. 53 | """ 54 | 55 | for extractor in self.extractors: 56 | try: 57 | extractor.fetch(self) 58 | except Exception as e: 59 | print("Error in module class: {}".format(extractor.__name__)) 60 | print("Error message: ", e) 61 | 62 | def record(self) -> None: 63 | """ 64 | Runs Core engine in record mode. Timestamps all frames in data output log. 65 | Runs gui update_record function with no tracking. 66 | Argument -s 1 67 | """ 68 | 69 | timestamp = time.time() 70 | 71 | self.dataout = { 72 | "time": timestamp 73 | } 74 | 75 | config.graphical_user_interface.update_record(self.source) 76 | 77 | self.run_extractors() 78 | 79 | def arm(self, width, height, image) -> None: 80 | 81 | self.width, self.height = width, height 82 | config.graphical_user_interface.arm(width, height) 83 | self.center = (width//2, height//2) 84 | 85 | self.iterate(image) 86 | 87 | if config.arguments.blinkcalibration != "": 88 | config.blink = np.load(config.arguments.blinkcalibration) 89 | self.blink_sampled = lambda _:None 90 | logger.info("(success) blink calibration loaded") 91 | 92 | if config.arguments.clear == False or config.arguments.params != "": 93 | 94 | try: 95 | if config.arguments.params != "": 96 | latest_params = max(glob.glob(config.arguments.params), key=os.path.getctime) 97 | 98 | print(config.arguments.params + " loaded") 99 | 100 | else: 101 | latest_params = max(glob.glob(PARAMS_DIR + "/*.npy"), key=os.path.getctime) 102 | 103 | params_ = np.load(latest_params, allow_pickle=True).tolist() 104 | 105 | self.pupil_processor.binarythreshold, self.pupil_processor.blur = params_["pupil"][0], params_["pupil"][1] 106 | 107 | self.cr_processor_1.binarythreshold, self.cr_processor_1.blur = params_["cr1"][0], params_["cr1"][1] 108 | self.cr_processor_2.binarythreshold, self.cr_processor_2.blur = params_["cr2"][0], params_["cr2"][1] 109 | 110 | print("(!) Parameters reloaded. Run --clear 1 to prevent this.") 111 | 112 | 113 | param_dict = { 114 | "pupil" : [self.pupil_processor.binarythreshold, self.pupil_processor.blur], 115 | "cr1" : [self.cr_processor_1.binarythreshold, self.cr_processor_1.blur], 116 | "cr2" : [self.cr_processor_2.binarythreshold, self.cr_processor_2.blur] 117 | } 118 | 119 | logger.info(f"loaded parameters:\n{param_dict}") 120 | 121 | return 122 | 123 | except: 124 | pass 125 | 126 | 127 | filtered_image = image[np.logical_and((image < 220), (image > 30))] 128 | self.pupil_processor.binarythreshold = np.min(filtered_image) * 1 + np.median(filtered_image) * .1#+ 50 129 | self.cr_processor_1.binarythreshold = self.cr_processor_2.binarythreshold = float(np.min(filtered_image)) * .7 + 150 130 | 131 | param_dict = { 132 | "pupil" : [self.pupil_processor.binarythreshold, self.pupil_processor.blur], 133 | "cr1" : [self.cr_processor_1.binarythreshold, self.cr_processor_1.blur], 134 | "cr2" : [self.cr_processor_2.binarythreshold, self.cr_processor_2.blur] 135 | } 136 | 137 | logger.info(f"loaded parameters:\n{param_dict}") 138 | 139 | 140 | def blink_sampled(self, t:int = 1): 141 | 142 | if t == 1: 143 | if config.blink_i% 20 == 0: 144 | print(f"calibrating blink detector {round(config.blink_i/config.blink.shape[0]*100,1)}%") 145 | else: 146 | logger.info("(success) blink detection calibrated") 147 | path = f"{config.file_manager.new_folderpath}/blinkcalibration_{self.dataout['time']}.npy" 148 | np.save(path, config.blink) 149 | print("blink calibration file saved") 150 | 151 | def track(self, img) -> None: 152 | """ 153 | Executes the tracking algorithm on the pupil and corneal reflections. 154 | First, blinking is analyzed. 155 | Second, corneal reflections are detected. 156 | Third, corneal reflections are inverted at pupillary overlap. 157 | Fourth, pupil is detected. 158 | Finally, data is logged and extractors are run. 159 | """ 160 | mean_img = np.mean(img) 161 | try: 162 | 163 | config.blink[config.blink_i] = mean_img 164 | config.blink_i += 1 165 | self.blink_sampled(1) 166 | 167 | except IndexError: 168 | self.blink_sampled(0) 169 | self.blink_sampled = lambda _:None 170 | config.blink_i = 0 171 | 172 | self.dataout = { 173 | "time": time.time() 174 | } 175 | 176 | if np.abs(mean_img - np.mean(config.blink[np.nonzero(config.blink)])) > 10: 177 | 178 | self.dataout["blink"] = 1 179 | self.pupil_processor.fit_model.params = None 180 | logger.info("Blink detected.") 181 | else: 182 | 183 | self.pupil_processor.track(img) 184 | 185 | self.cr_processor_1.track(img) 186 | #self.cr_processor_2.track(img.copy(), img) 187 | 188 | 189 | try: 190 | config.graphical_user_interface.update(img) 191 | except Exception as e: 192 | logger.exception("Did you assign the graphical user interface (GUI) correctly? Attempting to release()") 193 | self.release() 194 | return 195 | 196 | self.run_extractors() 197 | 198 | def activate(self) -> None: 199 | """ 200 | Activates all extractors. 201 | The extractor activate() function is optional. 202 | """ 203 | 204 | for extractor in self.extractors: 205 | try: 206 | extractor.activate() 207 | except AttributeError: 208 | logger.warning(f"Extractor {extractor} has no activate() method") 209 | 210 | def release(self) -> None: 211 | """ 212 | Releases/deactivates all running process, i.e., importers, extractors. 213 | """ 214 | try: 215 | config.graphical_user_interface.out.release() 216 | except: 217 | pass 218 | 219 | param_dict = { 220 | "pupil" : [self.pupil_processor.binarythreshold, self.pupil_processor.blur], 221 | "cr1" : [self.cr_processor_1.binarythreshold, self.cr_processor_1.blur], 222 | "cr2" : [self.cr_processor_2.binarythreshold, self.cr_processor_2.blur] 223 | } 224 | 225 | path = f"{config.file_manager.new_folderpath}/params_{self.dataout['time']}.npy" 226 | np.save(path, param_dict) 227 | print("Parameters saved") 228 | 229 | self.live = False 230 | config.graphical_user_interface.release() 231 | 232 | 233 | for extractor in self.extractors: 234 | try: 235 | extractor.release(self) 236 | except AttributeError: 237 | logger.warning(f"Extractor {extractor} has no release() method") 238 | else: 239 | pass 240 | 241 | config.importer.release() 242 | -------------------------------------------------------------------------------- /eyeloop/engine/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/engine/models/__init__.py -------------------------------------------------------------------------------- /eyeloop/engine/models/circular.py: -------------------------------------------------------------------------------- 1 | # original script: https://github.com/AlliedToasters/circle-fit 2 | # original script author: Michael Klear/AlliedToasters 3 | # hyper-fit doi: https://doi.org/10.1016/j.csda.2010.12.012 4 | # hyper-fit authors: Kenichi Kanatani & Prasanna Rangarajan 5 | 6 | import numpy as np 7 | np.seterr('raise') 8 | 9 | from eyeloop.utilities.general_operations import tuple_int 10 | 11 | 12 | class Circle: 13 | def __init__(self, processor) -> None: 14 | self.shape_processor = processor 15 | self.fit = self.hyper_fit 16 | self.params = None 17 | 18 | def hyper_fit(self, r) -> tuple: 19 | """ 20 | Fits coords to circle using hyperfit algorithm. 21 | Inputs: 22 | - coords, list or numpy array with len>2 of the form: 23 | [ 24 | [x_coord, y_coord], 25 | ..., 26 | [x_coord, y_coord] 27 | ] 28 | or numpy array of shape (n, 2) 29 | Outputs: 30 | - xc : x-coordinate of solution center (float) 31 | - yc : y-coordinate of solution center (float) 32 | - R : Radius of solution (float) 33 | - residu : s, sigma - variance of data wrt solution (float) 34 | """ 35 | X, Y = r[:,0], r[:,1] 36 | n = X.shape[0] 37 | 38 | mean_X = np.mean(X) 39 | mean_Y = np.mean(Y) 40 | Xi = X - mean_X 41 | Yi = Y - mean_Y 42 | Xi_sq = Xi**2 43 | Yi_sq = Yi**2 44 | Zi = Xi_sq + Yi_sq 45 | 46 | # compute moments 47 | 48 | Mxy = np.sum(Xi * Yi) / n 49 | Mxx = np.sum(Xi_sq) / n 50 | Myy = np.sum(Yi_sq) / n 51 | Mxz = np.sum(Xi * Zi) / n 52 | Myz = np.sum(Yi * Zi) / n 53 | 54 | Mz = Mxx + Myy 55 | 56 | # finding the root of the characteristic polynomial 57 | 58 | det = (Mxx * Myy - Mxy**2)*2 59 | #print(det) 60 | try: 61 | Xcenter = (Mxz * Myy - Myz * Mxy)/ det 62 | Ycenter = (Myz * Mxx - Mxz * Mxy)/ det 63 | except: 64 | return False 65 | 66 | x = Xcenter + mean_X 67 | y = Ycenter + mean_Y 68 | r = np.sqrt(Xcenter ** 2 + Ycenter ** 2 + Mz) 69 | self.params = ((x, y), r, r, 0) 70 | #self.center, self.width, self.height, self.angle = self.params 71 | 72 | return self.params[0] 73 | -------------------------------------------------------------------------------- /eyeloop/engine/models/ellipsoid.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | np.seterr('raise') 3 | 4 | from eyeloop.utilities.general_operations import tuple_int 5 | 6 | """Demonstration of least-squares fitting of ellipses 7 | __author__ = "Ben Hammel, Nick Sullivan-Molina" 8 | __credits__ = ["Ben Hammel", "Nick Sullivan-Molina"] 9 | __maintainer__ = "Ben Hammel" 10 | __email__ = "bdhammel@gmail.com" 11 | __status__ = "Development" 12 | Requirements 13 | ------------ 14 | Python 2.X or 3.X 15 | np 16 | matplotlib 17 | References 18 | ---------- 19 | (*) Halir, R., Flusser, J.: 'Numerically Stable Direct Least Squares 20 | Fitting of Ellipses' 21 | (**) http://mathworld.wolfram.com/Ellipse.html 22 | (***) White, A. McHale, B. 'Faraday rotation data analysis with least-squares 23 | elliptical fitting' 24 | """ 25 | 26 | 27 | class Ellipse: 28 | def __init__(self, processor): 29 | self.shape_processor = processor 30 | self.params = None 31 | 32 | def fit(self, r): 33 | """Least Squares fitting algor6ithm 34 | Theory taken from (*) 35 | Solving equation Sa=lCa. with a = |a b c d f g> and a1 = |a b c> 36 | a2 = |d f g> 37 | Args 38 | ---- 39 | data (list:list:float): list of two lists containing the x and y data of the 40 | ellipse. of the form [[x1, x2, ..., xi],[y1, y2, ..., yi]] 41 | Returns 42 | ------ 43 | coef (list): list of the coefficients describing an ellipse 44 | [a,b,c,d,f,g] corresponding to ax**2+2bxy+cy**2+2dx+2fy+g 45 | """ 46 | 47 | x, y = r[:,0], r[:,1] 48 | 49 | 50 | # Quadratic part of design matrix [eqn. 15] from (*) 51 | 52 | D1 = np.mat(np.vstack([x ** 2, x * y, y ** 2])).T 53 | # Linear part of design matrix [eqn. 16] from (*) 54 | D2 = np.mat(np.vstack([x, y, np.ones(len(x))])).T 55 | 56 | # forming scatter matrix [eqn. 17] from (*) 57 | S1 = D1.T * D1 58 | S2 = D1.T * D2 59 | S3 = D2.T * D2 60 | 61 | # Constraint matrix [eqn. 18] 62 | C1 = np.mat('0. 0. 2.; 0. -1. 0.; 2. 0. 0.') 63 | 64 | # Reduced scatter matrix [eqn. 29] 65 | M = C1.I * (S1 - S2 * S3.I * S2.T) 66 | 67 | # M*|a b c >=l|a b c >. Find eigenvalues and eigenvectors from this equation [eqn. 28] 68 | eval, evec = np.linalg.eig(M) 69 | 70 | # eigenvector must meet constraint 4ac - b^2 to be valid. 71 | cond = 4 * np.multiply(evec[0, :], evec[2, :]) - np.power(evec[1, :], 2) 72 | a1 = evec[:, np.nonzero(cond.A > 0)[1]] 73 | # self.fitscore=eval[np.nonzero(cond.A > 0)[1]] 74 | 75 | # |d f g> = -S3^(-1)*S2^(T)*|a b c> [eqn. 24] 76 | #a2 = -S3.I * S2.T * a1 77 | 78 | # eigenvectors |a b c d f g> 79 | self.coef = np.vstack([a1, -S3.I * S2.T * a1]) 80 | 81 | 82 | """finds the important parameters of the fitted ellipse 83 | 84 | Theory taken form http://mathworld.wolfram 85 | Args 86 | ----- 87 | coef (list): list of the coefficients describing an ellipse 88 | [a,b,c,d,f,g] corresponding to ax**2+2bxy+cy**2+2dx+2fy+g 89 | Returns 90 | _______ 91 | center (List): of the form [x0, y0] 92 | width (float): major axis 93 | height (float): minor axis 94 | phi (float): rotation of major axis form the x-axis in radians 95 | """ 96 | 97 | # eigenvectors are the coefficients of an ellipse in general form 98 | # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***) 99 | a = self.coef[0, 0] 100 | b = self.coef[1, 0] / 2. 101 | c = self.coef[2, 0] 102 | d = self.coef[3, 0] / 2. 103 | f = self.coef[4, 0] / 2. 104 | g = self.coef[5, 0] 105 | 106 | # if (a - c) == 0: 107 | # return True 108 | 109 | # finding center of ellipse [eqn.19 and 20] from (**) 110 | af = a * f 111 | cd = c * d 112 | bd = b * d 113 | ac = a * c 114 | 115 | b_sq = b ** 2. 116 | z_ = (b_sq - ac) 117 | x0 = (cd - b * f) / z_#(b ** 2. - a * c) 118 | y0 = (af - bd) / z_#(b ** 2. - a * c) 119 | 120 | # Find the semi-axes lengths [eqn. 21 and 22] from (**) 121 | ac_subtr = a - c 122 | numerator = 2 * (af * f + cd * d + g * b_sq - 2 * bd * f - ac * g) 123 | denom = ac_subtr * np.sqrt(1 + 4 * b_sq / ac_subtr**2) 124 | denominator1, denominator2 = (np.array([-denom, denom], dtype=np.float64) - c - a) * z_ 125 | 126 | width = np.sqrt(numerator / denominator1) 127 | height = np.sqrt(numerator / denominator2) 128 | 129 | phi = .5 * np.arctan((2. * b) / ac_subtr) 130 | self.params = ((x0, y0), width, height, np.rad2deg(phi) % 360) 131 | 132 | #self.center, self.width, self.height, self.angle = self.params 133 | return self.params[0] 134 | -------------------------------------------------------------------------------- /eyeloop/engine/params/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/engine/params/__init__.py -------------------------------------------------------------------------------- /eyeloop/extractors/DAQ.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from pathlib import Path 4 | 5 | 6 | class DAQ_extractor: 7 | def __init__(self, output_dir): 8 | self.output_dir = output_dir 9 | self.datalog_path = Path(output_dir, f"datalog.json") 10 | self.file = open(self.datalog_path, "a") 11 | 12 | def activate(self): 13 | return 14 | 15 | def fetch(self, core): 16 | try: 17 | self.file.write(json.dumps(core.dataout) + "\n") 18 | 19 | except ValueError: 20 | pass 21 | 22 | def release(self, core): 23 | try: 24 | self.file.write(json.dumps(core.dataout) + "\n") 25 | self.file.close() 26 | except ValueError: 27 | pass 28 | self.fetch(core) 29 | #return 30 | #logging.debug("DAQ_extractor.release() called") 31 | 32 | # def set_digital_line(channel, value): 33 | # digital_output = PyDAQmx.Task() 34 | # digital_output.CreateDOChan(channel,'do', DAQmxConstants.DAQmx_Val_ChanPerLine) 35 | # digital_output.WriteDigitalLines(1, 36 | # True, 37 | # 1.0, 38 | # DAQmxConstants.DAQmx_Val_GroupByChannel, 39 | # numpy.array([int(value)], dtype=numpy.uint8), 40 | # None, 41 | # None) 42 | # digital_output.ClearTask() 43 | -------------------------------------------------------------------------------- /eyeloop/extractors/README.md: -------------------------------------------------------------------------------- 1 | # Extractors # 2 | 3 |

4 | 5 |

6 | 7 | *Extractors* form the *executive branch* of EyeLoop: Experiments, such as open- or closed-loops, are designed using *Extractors*. Similarly, data acquisition utilizes the *Extractor* class. So how does it work? 8 | 9 | > Check [Examples](https://github.com/simonarvin/eyeloop/blob/master/examples) for full Extractors. 10 | 11 | ## How the *Engine* handles *Extractors* ## 12 | 13 | *Extractors* are utilized by EyeLoop's *Engine* via the *Extractor array*. Users must first *load* all extractors into the *Engine* via *run_eyeloop.py*: 14 | ```python 15 | class EyeLoop: 16 | def __init__(self) -> None: 17 | extractors = [...] 18 | ENGINE = Engine(...) 19 | ENGINE.load_extractors(extractors) 20 | ``` 21 | 22 | The *Extractor array* is *activated* by the *Engine* when the trial is initiated: 23 | ```python 24 | class Engine: 25 | def activate(self) -> None: 26 | for extractor in self.extractors: 27 | extractor.activate() 28 | ``` 29 | 30 | Finally, the *Extractor array* is loaded by the *Engine* at each time-step: 31 | ```python 32 | def run_extractors(self) -> None: 33 | for extractor in self.extractors: 34 | extractor.fetch(self) 35 | ``` 36 | 37 | At the termination of the *Engine*, the *Extractor array* is *released*: 38 | ```python 39 | def release(self) -> None: 40 | for extractor in self.extractors: 41 | extractor.release() 42 | ``` 43 | 44 | ## Structure ## 45 | The *Extractor* class contains four functions: 46 | ### 1: ```__init__``` ### 47 | 48 | The instantiator sets class variables as soon as the Extractor array is generated, i.e., before the trial has begun. 49 | ```python 50 | class Extractor: 51 | def __init__(self, ...): 52 | (set variables) 53 | ``` 54 | 55 | ### 2: ```activate``` ### 56 | 57 | The ```activate()``` function is called once when the trial is started. 58 | ```python 59 | ... 60 | def activate(self): 61 | ... 62 | ``` 63 | 64 | An experiment *Extractor* might activate the experiment when the trial is initiatiated, by resetting timers: 65 | ```python 66 | ... 67 | def activate(self) -> None: 68 | self.start = time.time() 69 | ``` 70 | 71 | ### 3: ```fetch``` ### 72 | 73 |

74 | 75 |

76 | 77 | The ```fetch()``` function is called at the end of every time-step. It receives the *Engine* pointer, gaining access to all eye-tracking data in real-time. 78 | ```python 79 | ... 80 | def fetch(self, Engine): 81 | ... 82 | ``` 83 | 84 | A data acquisition *Extractor* would fetch the data via ```Engine.dataout``` and save it, or pass it to a dedicated data acquisition board. 85 | ```python 86 | ... 87 | def fetch(self, Engine): 88 | self.log.write(json.dumps(Engine.dataout) + "\n") 89 | ``` 90 | 91 | ### 4: ```release``` ### 92 | 93 | The ```release()``` function is called when the *Engine* is terminated. 94 | ```python 95 | ... 96 | def release(self): 97 | ... 98 | ``` 99 | -------------------------------------------------------------------------------- /eyeloop/extractors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/extractors/__init__.py -------------------------------------------------------------------------------- /eyeloop/extractors/calibration.py: -------------------------------------------------------------------------------- 1 | import time 2 | from datetime import datetime 3 | import cv2 4 | import numpy as np 5 | 6 | class Calibration_Extractor: 7 | def __init__(self, x=0, y=0, w=100, h=100): 8 | self.x, self.y = x, y 9 | self.raw = np.zeros((h, w), dtype=float) 10 | 11 | self.mean = [] 12 | self.fetch = lambda x: None 13 | 14 | self.settle_time = 10 15 | self.duration = 15 16 | 17 | def activate(self): 18 | self.start = time.time() 19 | self.fetch = self.r_fetch 20 | print("\nCalibration started.\nSettle time: {} seconds\nDuration: {} seconds".format(self.settle_time, 21 | self.duration)) 22 | 23 | def r_fetch(self, core): 24 | delta = time.time() - self.start 25 | if delta > self.settle_time: 26 | if len(self.mean) == 0: 27 | print("Calibration settled. Collecting data for {} seconds.".format(self.duration)) 28 | if delta - self.settle_time > self.duration: 29 | mean_value = round(np.mean(self.mean), 2) 30 | 31 | now = datetime.now() 32 | time_str = now.strftime("%Y%m%d%H%M%S") 33 | file_name = "{}._cal_".format(time_str) 34 | f = open(file_name, "w") 35 | f.write(str(mean_value)) 36 | f.close() 37 | 38 | print("Calibration file saved as {}".format(file_name)) 39 | 40 | print("\nCalibration finished.\nMean size: {}\nTerminating EyeLoop..".format(mean_value)) 41 | 42 | core.release() 43 | return 44 | 45 | #((pupil_width, pupil_height), pupil_center, pupil_angle), 46 | w, h = core.dataout["pupil"][0] 47 | 48 | if w == -1: 49 | core.dataout["calibration"] = -1 50 | return 51 | 52 | size = float(w * h) 53 | self.mean.append(size) 54 | 55 | core.dataout["calibration"] = [self.raw[0][0], np.mean(self.mean)] 56 | 57 | core.dataout["calibration"] = [self.raw[0][0], -1] 58 | cv2.imshow("Calibration", self.raw) 59 | cv2.moveWindow("Calibration", self.x, self.y) 60 | -------------------------------------------------------------------------------- /eyeloop/extractors/closed_loop.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | 7 | class ClosedLoop_Extractor: 8 | def __init__(self, MAXSIZE = 3231, x=-0, y=0, w=100, h=100): 9 | """ 10 | RUN CALIBRATE, THEN SET MAXSIZE (= ._cal_ file value) 11 | """ 12 | 13 | self.basesize = MAXSIZE / 2 14 | self.size = 0 15 | 16 | self.brightness = 0 17 | self.unit = np.ones((h, w), dtype=float) 18 | 19 | self.x, self.y = x, y 20 | 21 | self.velocity = 0 22 | self.index = 0 23 | 24 | self.q_coef = 0.1 # Scalar 25 | self.I_coef = .1 # Rate 26 | self.friction = .05 27 | 28 | self.state_dict = { 29 | 1: "closed-loop", 30 | 2: "white", 31 | 0: "black" 32 | } 33 | 34 | self.protocol = [ 35 | {"t": 6, 36 | "s": 0, 37 | "p": {}}, 38 | 39 | {"t": 60, 40 | "s": 1, 41 | "p": { 42 | "q_coef": 0.001, 43 | "I_coef": 0.01, 44 | "friction": 0.1 45 | }}, 46 | 47 | {"t": 6, 48 | "s": 2, 49 | "p": {}}, 50 | 51 | {"t": 60, 52 | "s": 1, 53 | "p": { 54 | "q_coef": 0.001, 55 | "I_coef": 0.01, 56 | "friction": 0.05 57 | }}, 58 | 59 | {"t": 6, 60 | "s": 2, 61 | "p": {}}, 62 | 63 | {"t": 60, 64 | "s": 1, 65 | "p": { 66 | "q_coef": 0.001, 67 | "I_coef": 0.001, 68 | "friction": 0.1 69 | }}, 70 | 71 | {"t": 6, 72 | "s": 2, 73 | "p": {}}, 74 | 75 | {"t": 60, 76 | "s": 1, 77 | "p": { 78 | "q_coef": 0.001, 79 | "I_coef": 0.05, 80 | "friction": 0.1 81 | }}, 82 | 83 | {"t": 6, 84 | "s": 2, 85 | "p": {}}, 86 | 87 | {"t": 60, 88 | "s": 1, 89 | "p": { 90 | "q_coef": 0.00025, 91 | "I_coef": 0.01, 92 | "friction": 0.1 93 | }}, 94 | 95 | {"t": 6, 96 | "s": 0, 97 | "p": {}}, 98 | 99 | {"t": 60, 100 | "s": 1, 101 | "p": { 102 | "q_coef": 0.0001, 103 | "I_coef": 0.01, 104 | "friction": 0.1 105 | }}, 106 | 107 | {"t": 6, 108 | "s": 0, 109 | "p": {}}, 110 | 111 | {"t": 60, 112 | "s": 1, 113 | "p": { 114 | "q_coef": 0.0005, 115 | "I_coef": 0.2, 116 | "friction": 0.1 117 | }}, 118 | 119 | {"t": 6, 120 | "s": 2, 121 | "p": {}}, 122 | 123 | {"t": 60, 124 | "s": 1, 125 | "p": { 126 | "q_coef": 0.0005, 127 | "I_coef": 0.01, 128 | "friction": 0.1 129 | }}, 130 | 131 | {"t": 6, 132 | "s": 2, 133 | "p": {}}, 134 | 135 | {"t": 60, 136 | "s": 1, 137 | "p": { 138 | "q_coef": 0.0005, 139 | "I_coef": 0.1, 140 | "friction": 0.1 141 | }} 142 | ] 143 | 144 | self.total_steps = len(self.protocol) 145 | self.state = 0 146 | self.fetch = lambda x: None 147 | 148 | source = self.unit * self.brightness 149 | 150 | def activate(self): 151 | self.start = time.time() 152 | self.step_start = time.time() 153 | self.current = self.start 154 | 155 | self.fetch = self.r_fetch 156 | self.change_parameters(self.protocol[self.index]) 157 | 158 | def timer(self): 159 | return time.time() - self.step_start 160 | 161 | def condition(self, step): 162 | cond = not (0 < self.current < step["t"]) 163 | if cond: 164 | self.step_start = time.time() 165 | return cond 166 | 167 | def change_parameters(self, step): 168 | self.state = step["s"] 169 | 170 | print( 171 | "\nTransitioning to step {}/{}.\nstate changed to {}\nDuration: {}".format(self.index + 1, self.total_steps, 172 | self.state_dict[self.state], 173 | step["t"]).upper(), "seconds") 174 | for key, value in step["p"].items(): 175 | print(" {} set to {}".format(key, value)) 176 | exec("self." + key + '=' + str(value)) 177 | 178 | def release(self): 179 | cv2.destroyAllWindows() 180 | 181 | def r_fetch(self, core): 182 | w, h = core.dataout["pupil"][0] 183 | size = float(w * h) 184 | 185 | if self.state == 2: 186 | # WHITE 187 | self.brightness = 1 188 | elif self.state == 0: 189 | # BLACK 190 | self.brightness = 0 191 | elif self.state == 1: 192 | # CLOSED LOOP 193 | 194 | if w != -1: 195 | self.velocity += (self.brightness - self.q_coef * size ** 2 / self.basesize) * self.I_coef 196 | self.velocity -= self.velocity * self.friction 197 | self.velocity = round(self.velocity, 3) 198 | 199 | self.brightness -= self.velocity 200 | 201 | self.brightness = min(self.brightness, 1) 202 | self.brightness = max(self.brightness, 0) 203 | 204 | step = self.protocol[self.index] 205 | 206 | self.current = self.timer() 207 | if self.condition(step): 208 | self.index += 1 209 | core.dataout["trigger"] = 1 210 | if self.index == len(self.protocol): 211 | print("Protocol finished. Terminating EyeLoop..") 212 | core.release() 213 | return 214 | else: 215 | self.change_parameters(self.protocol[self.index]) 216 | else: 217 | core.dataout["trigger"] = 0 218 | 219 | # self.arr.append(self.brightness) 220 | core.dataout["closed_looptest"] = self.brightness 221 | core.dataout["closed_loopparam"] = step 222 | 223 | source = self.unit * self.brightness ** 2 224 | 225 | cv2.imshow("Brightness Test", source) 226 | cv2.moveWindow("Brightness Test", self.x, self.y) 227 | -------------------------------------------------------------------------------- /eyeloop/extractors/converter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Conversion_extractor: 5 | def __init__(self, type=1, animal: str = "mouse", angle=0, center=None, interfaces=None): 6 | self.angle = angle 7 | self.center = center 8 | self.animal = animal 9 | if animal == "mouse": 10 | self.effective_rotation_radius = 1.25 # mm mouse 11 | self.bulbucorneal_distance = 0.2 # mm 12 | elif animal == "marmoset": 13 | self.effective_rotation_radius = 3.4 # mm :marmoset https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1913220/ 14 | self.bulbucorneal_distance = 2.1 # marmoset: https://pubmed.ncbi.nlm.nih.gov/8333154/ 15 | 16 | elif animal == "human": 17 | self.effective_rotation_radius = 6.4 # human https://pubmed.ncbi.nlm.nih.gov/9293516/ 18 | self.bulbucorneal_distance = 4.6 # human 19 | 20 | self.err_fraction = self.effective_rotation_radius / ( 21 | self.effective_rotation_radius - self.bulbucorneal_distance) 22 | 23 | if interfaces is None: 24 | interfaces = [] 25 | self.interfaces = interfaces 26 | 27 | if type == 1 or type == "coordinates": # coordinates 28 | self.fetch = self.coordinates 29 | elif type == 2 or type == "area": # area 30 | self.fetch = self.area 31 | 32 | def rotate(self, point, ang, origin): 33 | """ 34 | Rotate a point counterclockwise by an angle around an origin. 35 | 36 | The angle should be given in degrees 37 | """ 38 | 39 | angle = np.radians(ang) 40 | # print(ang, angle) 41 | ox, oy = origin 42 | px, py = point 43 | 44 | qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy) 45 | qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy) 46 | 47 | return (qx, qy) 48 | 49 | def to_angular(self, point1, point2): 50 | """ 51 | Based on Sakatani et al. (2004) 52 | """ 53 | try: 54 | 55 | x = (point2[0] - point1[0]) * self.err_fraction + point1[0] 56 | except: 57 | return 58 | 59 | y = (point2[1] - point1[1]) * self.err_fraction + point1[1] 60 | # rad=np.radians(((point1[0] - x)/self.effective_rotation_radius)) 61 | 62 | ang_pos_hor = np.arcsin(np.clip(np.radians(((point1[0] - x) / self.effective_rotation_radius)), -1, 1)) 63 | 64 | ang_pos_ver = np.arcsin(np.clip(np.radians(-(point1[1] - y) / self.effective_rotation_radius), -1, 1)) 65 | 66 | return ang_pos_hor, ang_pos_ver 67 | 68 | def area(self, core): 69 | """ 70 | Computes pupil area based on mathematical eye model. 71 | Method described in article [doi: QQQQ]. 72 | """ 73 | 74 | try: 75 | dataout = core.dataout 76 | except: 77 | # offline; core = offline dataout. 78 | dataout = core 79 | 80 | try: 81 | width, height = dataout["pupil"][0][0], dataout["pupil"][0][1] 82 | pupil, cornea = dataout["pupil"][1], dataout["cr"][1] 83 | except Exception as e: 84 | raise Exception(e) 85 | 86 | try: 87 | pupil_coordinate = self.to_angular(pupil, cornea) 88 | 89 | angular_width = self.to_angular((pupil[0] + width, pupil[1]), cornea) 90 | angular_height = self.to_angular((pupil[0], pupil[1] + height), cornea) 91 | 92 | extremes = (abs(angular_width[0] - pupil_coordinate[0]), abs(angular_height[1] - pupil_coordinate[1])) 93 | 94 | radius_1 = np.sin(extremes[0]) * self.effective_rotation_radius 95 | 96 | radius_2 = np.sin(extremes[1]) * self.effective_rotation_radius 97 | area_1 = np.pi * radius_2 ** 2 98 | area_2 = np.pi * radius_2 ** 2 99 | 100 | return np.nanmean([area_1, area_2]) 101 | except: 102 | return float("nan") 103 | 104 | def coordinates(self, core): 105 | 106 | try: 107 | dataout = core.dataout 108 | except: 109 | # offline; core = offline dataout. 110 | dataout = core 111 | 112 | try: 113 | # print(dataout["pupil_cen"], dataout["cr_cen"]) 114 | pupil, cornea = dataout["pupil"][1], dataout["cr"][1] 115 | 116 | # pupil = self.rotate(pupil, self.angle, self.center) 117 | # cornea = self.rotate(cornea, self.angle, self.center) 118 | 119 | return self.to_angular(pupil, cornea) 120 | except Exception as e: 121 | print(e) 122 | return float("nan") 123 | 124 | # for interface in self.interfaces: 125 | # interface.fetch(ang_pos_hor, ang_pos_ver) 126 | -------------------------------------------------------------------------------- /eyeloop/extractors/frametimer.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | import eyeloop.config as config 4 | 5 | class FPS_extractor: 6 | """ 7 | Simple fps-counter. Acts as an interface. Pass it to CORE(interfaces=[..]) in puptrack.py. 8 | """ 9 | 10 | def __init__(self): 11 | self.fetch = lambda _: None 12 | self.activate = lambda: None 13 | 14 | self.last_frame = 0 15 | 16 | self.thread = threading.Timer(1, self.get_fps) 17 | self.thread.start() 18 | 19 | 20 | def get_fps(self): 21 | print(f" Processing {config.importer.frame - self.last_frame} frames per second.") 22 | self.last_frame = config.importer.frame 23 | self.thread = threading.Timer(1, self.get_fps) 24 | self.thread.start() 25 | 26 | 27 | def release(self, core): 28 | self.thread.cancel() 29 | -------------------------------------------------------------------------------- /eyeloop/extractors/open_loop.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | 7 | class Open_Loop_extractor(): 8 | def __init__(self, x: int = 50, y: int = 50, w: int = 50, h: int = 50) -> None: 9 | 10 | self.fetch = lambda x: None 11 | 12 | self.raw = np.ones((h, w), dtype=float) 13 | self.frequency = .01 14 | self.phase = 0 15 | 16 | self.x, self.y = x, y 17 | 18 | self.state = 0 19 | 20 | self.state_dict = { 21 | 1: "open-loop", 22 | 2: "white", 23 | 0: "black" 24 | } 25 | 26 | self.index = 0 27 | 28 | self.protocol = [ 29 | {"t": 6, 30 | "s": 0, 31 | "p": {}}, 32 | 33 | {"t": 60, 34 | "s": 1, 35 | "p": {"frequency": 0.1}}, 36 | 37 | {"t": 6, 38 | "s": 2, 39 | "p": {}}, 40 | 41 | {"t": 60, 42 | "s": 1, 43 | "p": {"frequency": 0.2}}, 44 | 45 | {"t": 6, 46 | "s": 0, 47 | "p": {}}, 48 | 49 | {"t": 60, 50 | "s": 1, 51 | "p": {"frequency": 0.4}}, 52 | 53 | {"t": 6, 54 | "s": 2, 55 | "p": {}}, 56 | 57 | {"t": 60, 58 | "s": 1, 59 | "p": {"frequency": 0.8}}, 60 | 61 | {"t": 6, 62 | "s": 0, 63 | "p": {}}, 64 | 65 | {"t": 60, 66 | "s": 1, 67 | "p": {"frequency": 0.01}}, 68 | 69 | {"t": 6, 70 | "s": 2, 71 | "p": {}}, 72 | 73 | {"t": 60, 74 | "s": 1, 75 | "p": {"frequency": 0.05}}, 76 | 77 | {"t": 6, 78 | "s": 2, 79 | "p": {}} 80 | ] 81 | 82 | self.total_steps = len(self.protocol) 83 | 84 | def activate(self) -> None: 85 | self.start = time.time() 86 | self.step_start = time.time() 87 | self.current = self.start 88 | 89 | self.fetch = self.r_fetch 90 | self.change_parameters(self.protocol[self.index]) 91 | 92 | def timer(self, time) -> float: 93 | return time - self.step_start 94 | 95 | def condition(self, step, time) -> bool: 96 | cond = not (0 < self.current < step["t"]) 97 | if cond: 98 | self.step_start = time #time.time() 99 | return cond 100 | 101 | def release(self): 102 | return 103 | 104 | def change_parameters(self, step) -> None: 105 | self.state = step["s"] 106 | 107 | print( 108 | "\nTransitioning to step {}/{}.\nstate changed to {}\nDuration: {}".format(self.index + 1, self.total_steps, 109 | self.state_dict[self.state], 110 | step["t"]).upper(), "seconds") 111 | for key, value in step["p"].items(): 112 | print(" {} set to {}".format(key, value)) 113 | exec("self." + key + '=' + str(value)) 114 | 115 | def r_fetch(self, engine): 116 | if self.state == 2: 117 | # WHITE 118 | source = self.raw * 255 119 | self.phase = 0 120 | 121 | elif self.state == 0: 122 | # BLACK 123 | source = self.raw * 0 124 | self.phase = 0 125 | 126 | elif self.state == 1: 127 | # OPEN LOOP 128 | source = self.raw * np.sin(self.phase) * .5 + .5 129 | self.phase += self.frequency 130 | 131 | step = self.protocol[self.index] 132 | engine.dataout["open_looptest"] = source[0][0] 133 | engine.dataout["open_loopparam"] = step 134 | 135 | self.current = self.timer(engine.dataout["time"]) 136 | 137 | if self.condition(step,engine.dataout["time"]): 138 | self.index += 1 139 | engine.dataout["trigger"] = 1 140 | if self.index == len(self.protocol): 141 | print("Protocol finished. Terminating Puptrack..") 142 | engine.release() 143 | return 144 | else: 145 | self.change_parameters(self.protocol[self.index]) 146 | else: 147 | engine.dataout["trigger"] = 0 148 | 149 | cv2.imshow("Open-loop", source) 150 | cv2.moveWindow("Open-loop", self.x, self.y) 151 | -------------------------------------------------------------------------------- /eyeloop/extractors/template.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | 7 | class Template_extractor(): 8 | def __init__(self, x: int = 50, y: int = 50, w: int = 50, h: int = 50) -> None: 9 | self.x, self.y, self.width, self.height = x, y, w, h 10 | 11 | def activate(self) -> None: 12 | return 13 | 14 | def release(self): 15 | return 16 | 17 | def fetch(self, engine): 18 | return 19 | -------------------------------------------------------------------------------- /eyeloop/extractors/visstim.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | #import matplotlib.pyplot as plt 3 | import numpy as np 4 | import time 5 | import multiprocessing 6 | ctx = multiprocessing.get_context("spawn") 7 | 8 | import logging 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | class vis_stim: 13 | 14 | def __init__(self): 15 | self.x_ = -1080 16 | self.y_ = -1000 17 | self.height = int(500) 18 | self.width = int(500) 19 | self.wait_fps = 10 20 | self.data_queue = [] 21 | self.online = True 22 | self.queue = ctx.Queue() 23 | 24 | max_px_width = 1920 25 | screen_width = 62 26 | self.origin_=(0, 0) 27 | pixels_per_cm = max_px_width/screen_width 28 | 29 | eye_coord = { 30 | "y" : 20 * pixels_per_cm, #positive: right; negative: left 31 | "z" : 20 * pixels_per_cm, #positve: down; negative: up; 32 | "x" : 10 * pixels_per_cm #away from screen 33 | } 34 | 35 | self.x = eye_coord["x"] 36 | self.resolution = 1 37 | self.square = True 38 | 39 | self.duration = 0 40 | self.protocol_step = 0 41 | self.orientation = 0 42 | self.temporal_freq = 0 43 | self.spatial_freq = 0 44 | self.inter_stim_duration = 1 45 | 46 | 47 | self.initial_pause = .1 48 | 49 | self.cos_or = 0 50 | self.sin_or = 0 51 | self.origin = (self.width, self.height) 52 | 53 | self.stock_mesh = np.meshgrid(np.arange(0, self.width, self.resolution) - eye_coord["y"], np.arange(0, self.height, self.resolution) - eye_coord["z"]) 54 | self.reference = 0 55 | self.timer = 0 56 | self.a = 0 57 | 58 | self.inter_stim_canvas = np.zeros(self.origin).T 59 | 60 | self.PROTOCOL = self.load_protocol() 61 | self.fetch = lambda x: None 62 | 63 | #pg.init() 64 | #self.screen = pg.display.set_mode(self.origin)#, pg.DOUBLEBUF | pg.HWSURFACE | pg.FULLSCREEN) 65 | #self.clock = pg.time.Clock() 66 | #self.screen.fill((0,0,0)) 67 | 68 | #cv2.namedWindow("canvas", cv2.WND_PROP_FULLSCREEN) 69 | #cv2.moveWindow("canvas",0,0) 70 | #cv2.namedWindow("canvas", cv2.WND_PROP_FULLSCREEN) 71 | 72 | 73 | 74 | def activate(self): 75 | print("Activating corrected visual stimulus..") 76 | self.reference = time.time() 77 | #cv2.setWindowProperty("canvas", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) 78 | 79 | #cv2.namedWindow("canvas", cv2.WND_PROP_FULLSCREEN) 80 | #cv2.setWindowProperty("canvas", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) 81 | #cv2.moveWindow("canvas",self.x_, self.y_) 82 | #cv2.destroyWindow("canvas") 83 | self.fetch = self.ifetch 84 | 85 | 86 | def ifetch(self, core): 87 | 88 | current_time = time.time() 89 | self.timer = time.time() - self.reference 90 | 91 | core.dataout["vstim"] = {"state" : "PRESTIM"} 92 | cv2.namedWindow("canvas") 93 | cv2.moveWindow("canvas",self.x_,self.y_) 94 | cv2.imshow("canvas", self.inter_stim_canvas) 95 | cv2.waitKey(1) 96 | 97 | if self.timer >= self.initial_pause: 98 | print("initial pause ended,\nstarting trial") 99 | self.fetch = self.rfetch 100 | core.dataout["vstim"] = {"state" : "ACTIVATED"} 101 | cv2.destroyWindow("canvas") 102 | 103 | self.stim_thread = ctx.Process(target=self.stim, args=[self.queue]) 104 | 105 | self.stim_thread.start() 106 | #self.stim_thread.join() 107 | #self.stim_thread = threading.Thread(target=self.stim) 108 | # self.stim_thread.daemon = True 109 | # self.stim_thread.start() 110 | 111 | return 112 | 113 | #cv2.imshow("canvas", self.inter_stim_canvas) 114 | 115 | def release(self): 116 | try: 117 | self.online = False 118 | #self.stim_thread.join() 119 | except: 120 | pass 121 | 122 | def stim(self, queue): 123 | #self.screen.fill((30, 30, 30)) 124 | 125 | cv2.namedWindow("canvas") 126 | cv2.moveWindow("canvas",self.x_,self.y_) 127 | while self.online: 128 | 129 | current_time = time.time() 130 | self.timer = time.time() - self.reference 131 | 132 | if self.timer >= self.duration: 133 | if self.timer - self.duration <= self.inter_stim_duration: 134 | 135 | queue.put({"state" : "INTERSTIM", "step" : self.protocol_step}) 136 | 137 | #cv2.imshow("canvas", self.inter_stim_canvas) 138 | #print("B") 139 | cv2.imshow("canvas", self.inter_stim_canvas) 140 | cv2.waitKey(self.wait_fps) 141 | #cv2.waitKey(1) 142 | #self.screen.blit(self.inter_stim_canvas, self.origin_) 143 | #pg.display.flip() 144 | continue 145 | 146 | 147 | try: 148 | params = self.PROTOCOL[self.protocol_step] 149 | except: 150 | logger.info(f"vis-stim protocol ended;") 151 | 152 | return 153 | 154 | self.reference = current_time 155 | self.timer = 0 156 | self.duration = params["duration"] 157 | self.temporal_freq = np.radians(params["temporal_freq"]) 158 | orientation = np.radians(params["orientation"]) 159 | self.spatial_freq = np.degrees(params["spatial_freq"]) 160 | 161 | vertical_, horisontal_ = self.stock_mesh.copy() 162 | 163 | cos_or = np.cos(-orientation) 164 | sin_or = np.sin(-orientation) 165 | 166 | z_ = vertical_ * cos_or - horisontal_ * sin_or 167 | y_ = vertical_ * sin_or + horisontal_ * cos_or 168 | 169 | 170 | self.b = (np.pi/2 - np.arccos(z_/np.sqrt(self.x**2 + y_**2 + z_**2))) #+ np.arctan(-horisontal_/self.x) 171 | self.a = 2 * np.pi * self.spatial_freq 172 | #self.b *= -np.arctan(-horisontal_/self.x) 173 | 174 | #core.dataout["vis_stim_switch"] = params 175 | queue.put({"state" : "STIM", "step" : self.protocol_step, "params" : str(params)}) 176 | 177 | self.protocol_step += 1 178 | 179 | 180 | #t = time.time() 181 | 182 | S = np.cos(self.a * (self.b - self.timer * self.temporal_freq)) 183 | 184 | S_ = cv2.resize(S, self.origin) 185 | 186 | if self.square: 187 | _, S_ = cv2.threshold(S_, .5, 1, cv2.THRESH_BINARY) 188 | cv2.imshow("canvas", S_) 189 | cv2.waitKey(self.wait_fps) 190 | #ss=time.time() 191 | #S_ = pg.surfarray.make_surface(np.stack((np.array(S_).T * 255,) * 3, axis = -1)) 192 | #print(time.time()-ss) 193 | #self.screen.blit(S_, self.origin_) 194 | #pg.display.flip() 195 | #self.clock.tick(self.fps) 196 | 197 | 198 | 199 | def load_protocol(self, static = True): 200 | if static: 201 | P = [] 202 | or_list = np.arange(8) * 45 203 | for orientation in or_list: 204 | 205 | P.append({ 206 | "orientation" : orientation, #degrees 207 | "temporal_freq" : 5,#5, # degree per second 208 | "spatial_freq" : 0.25,#0.05, #cycles per degree 209 | "duration" : 2 #in secs 210 | }) 211 | else: 212 | P = np.load("file/path/here") 213 | 214 | return P 215 | 216 | def rfetch(self, core = None): 217 | 218 | while not self.queue.empty(): 219 | core.dataout["vstim"] = self.queue.get() 220 | 221 | 222 | #print(f"fps: {int(1/(time.time() - t))}") 223 | 224 | if __name__ == 'visstim': 225 | extractors_add = [vis_stim()] 226 | -------------------------------------------------------------------------------- /eyeloop/guis/README.md: -------------------------------------------------------------------------------- 1 | # Building your first custom graphical user interface # 2 | .. in progress .. 3 | 4 | To integrate a custom graphical user interface, pass it to the engine in eyeloop.py: 5 | ```python 6 | graphical_user_interface = ... 7 | ENGINE = Engine(self, graphical_user_interface, file_manager, arguments) 8 | ``` 9 | The graphical user interface should contain a ```load_engine(self, ENGINE)``` function: 10 | ```python 11 | def load_engine(self, ENGINE) -> None: 12 | self.ENGINE = ENGINE 13 | ... 14 | ``` 15 | 16 | The graphical user interface is responsible for: 17 | - Selecting corneal reflections (array ```ENGINE.cr_processors```). 18 | - Selecting the pupil (Shape object ```ENGINE.pupil_processor```). 19 | - Adjusting binarization and gaussian parameters (via ```ENGINE.cr_processors``` and ```ENGINE.pupil_processor```). 20 | - Rotating the video feed (via ```ENGINE.angle```). 21 | 22 | Additional functions are easily integrated. 23 | -------------------------------------------------------------------------------- /eyeloop/guis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/__init__.py -------------------------------------------------------------------------------- /eyeloop/guis/blink_test.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import eyeloop.config as config 3 | import numpy as np 4 | from os.path import dirname, abspath 5 | 6 | class GUI: 7 | def __init__(self) -> None: 8 | self.frames = [] 9 | 10 | self.last_frame = 500 11 | self.pick = np.arange(10, self.last_frame, 50) 12 | self.filter = np.zeros(len(self.pick)) 13 | 14 | def arm(self, width: int, height: int) -> None: 15 | return 16 | 17 | def update(self, img): 18 | if config.importer.frame == self.last_frame: 19 | 20 | stack = np.hstack(tuple(self.frames)) 21 | cv2.imshow("Blink_test", stack) 22 | 23 | 24 | key = cv2.waitKey(1) 25 | _input = input("type the non-blinking frames, split by comma (0 from the left)") 26 | frames = np.array(_input.split(","),dtype=int) 27 | frames_ = np.array(self.frames) 28 | #print(frames) 29 | path = dirname(dirname(abspath(__file__)))+"/blink_.npy" 30 | 31 | data = [np.mean(frames_[frames])] 32 | 33 | np.save(path, data) 34 | print(f"data saved: {data}") 35 | 36 | config.engine.release() 37 | else: 38 | if config.importer.frame in self.pick: 39 | self.frames.append(img) 40 | 41 | # cv2.imshow("Blink_test", img) 42 | # key = cv2.waitKey(1) 43 | #if key == ord("q"): 44 | # config.engine.release() 45 | #self.release() 46 | 47 | 48 | def release(self): 49 | #self.out.release() 50 | cv2.destroyAllWindows() 51 | -------------------------------------------------------------------------------- /eyeloop/guis/minimum/README.md: -------------------------------------------------------------------------------- 1 | # minimum-gui instructions # 2 | *minimum-gui* is EyeLoop's default minimalistic graphical user interface. It operates with bare minimum processing overhead. In the present text, we describe how to utilize *minimum-gui*. 3 | 4 |

5 | 6 |

7 | 8 | **Overview**\ 9 | *minimum-gui* consists of two panels. The left panel contains the source video sequence and an eye-tracking preview. The right panel contains the binary filter of the pupil (top) and corneal reflections (bottom). 10 | 11 | ## Getting started ## 12 | - **A**: Select the corneal reflections by hovering and key-pressing 2, 3 or 4. This initiates the tracking algorithm, which is rendered in the preview panel. Adjust binarization (key-press W/S) and gaussian (key-press E/D) parameters to improve detection. To switch between corneal reflections, key-press the corresponding index (for example, key-press 2 to modify corneal reflection "2"). 13 | - **B**: Select the pupil by hovering and key-pressing 1. Similar to the corneal reflections, this initiates tracking. Adjust binarization (key-press R/F) and gaussian parameters (key-press T/G) for optimal detection. 14 | - **C**: To initiate the eye-tracking trial, key-press Z and confirm by key-pressing Y. 15 | 16 | > Key-press "q" to stop tracking. 17 | 18 | ## Optional ## 19 | ### Rotation ### 20 |

21 | 22 |

23 | Since EyeLoop's conversion algorithm computes the angular coordinates of the eye based on the video sequence, users must align it to the horizontal and vertical real-world axes. To obtain alignment, key-press O or P to rotate the video stream in real-time. 24 | 25 | > Note: EyeLoop's *converter* module contains a corrective function, that transforms the eye-tracking coordinates based on any given angle. This enables users to apply a rotational vector in post hoc analysis. 26 | 27 | ### Markers ### 28 | To crudely remove interfering artefacts, pass command-line argument ```--markers 1``` when initiating EyeLoop. This enables *markers*, which are placed in pairs forming rectangles exempt from the eye-tracking algorithm. In *minimum-gui*, press B to place a marker, and press V to undo. 29 | 30 | -------------------------------------------------------------------------------- /eyeloop/guis/minimum/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/__init__.py -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/instructions_md/overview.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | -1Asset 1 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | Preview 161 | Binary 162 | Pupil 163 | Cornealreflection 164 | 165 | 166 | 167 | -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/instructions_md/rotation.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | -1Asset 2 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_1_cr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_1_cr.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_1_cr_error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_1_cr_error.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_1_cr_first.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_1_cr_first.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_2_cr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_2_cr.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_3_pupil.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_3_pupil.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_3_pupil_error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_3_pupil_error.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_4_pupil.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_4_pupil.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/graphics/tip_5_start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/guis/minimum/graphics/tip_5_start.png -------------------------------------------------------------------------------- /eyeloop/guis/minimum/minimum_gui.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | 6 | import eyeloop.config as config 7 | from eyeloop.constants.minimum_gui_constants import * 8 | from eyeloop.utilities.general_operations import to_int, tuple_int 9 | import threading 10 | 11 | import logging 12 | logger = logging.getLogger(__name__) 13 | 14 | class GUI: 15 | def __init__(self) -> None: 16 | 17 | 18 | dir_path = os.path.dirname(os.path.realpath(__file__)) 19 | tool_tip_dict = ["tip_1_cr", "tip_2_cr", "tip_3_pupil", "tip_4_pupil", "tip_5_start", "tip_1_cr_error", "", 20 | "tip_3_pupil_error"] 21 | self.first_tool_tip = cv2.imread("{}/graphics/{}.png".format(dir_path, "tip_1_cr_first"), 0) 22 | self.tool_tips = [cv2.imread("{}/graphics/{}.png".format(dir_path, tip), 0) for tip in tool_tip_dict] 23 | 24 | self._state = "adjustment" 25 | self.inquiry = "none" 26 | self.terminate = -1 27 | self.update = self.adj_update#real_update 28 | self.skip = 0 29 | self.first_run = True 30 | 31 | self.pupil_ = lambda _: False 32 | self.cr1_ = lambda _: False 33 | self.cr2_ = lambda _: False 34 | 35 | 36 | 37 | 38 | 39 | def tip_mousecallback(self, event, x: int, y: int, flags, params) -> None: 40 | if event == cv2.EVENT_LBUTTONDOWN: 41 | if 10 < y < 35: 42 | if 20 < x < 209: 43 | x -= 27 44 | x = int(x / 36) + 1 45 | 46 | self.update_tool_tip(x) 47 | 48 | def mousecallback(self, event, x, y, flags, params) -> None: 49 | x = x % self.width 50 | self.cursor = (x, y) 51 | 52 | def release(self): 53 | #self.out.release() 54 | cv2.destroyAllWindows() 55 | 56 | def remove_mousecallback(self) -> None: 57 | cv2.setMouseCallback("CONFIGURATION", lambda *args: None) 58 | cv2.setMouseCallback("Tool tip", lambda *args: None) 59 | 60 | def update_tool_tip(self, index: int, error: bool = False) -> None: 61 | if error: 62 | cv2.imshow("Tool tip", self.tool_tips[index + 4]) 63 | else: 64 | cv2.imshow("Tool tip", self.tool_tips[index - 1]) 65 | 66 | def key_listener(self, key: int) -> None: 67 | try: 68 | key = chr(key) 69 | except: 70 | return 71 | 72 | if self.inquiry == "track": 73 | if "y" == key: 74 | print("Initiating tracking..") 75 | self.remove_mousecallback() 76 | cv2.destroyWindow("CONFIGURATION") 77 | cv2.destroyWindow("BINARY") 78 | cv2.destroyWindow("Tool tip") 79 | 80 | cv2.imshow("TRACKING", self.bin_stock) 81 | cv2.moveWindow("TRACKING", 100, 100) 82 | 83 | self._state = "tracking" 84 | self.inquiry = "none" 85 | 86 | self.update = self.real_update 87 | 88 | config.engine.activate() 89 | 90 | return 91 | elif "n" == key: 92 | print("Adjustments resumed.") 93 | self._state = "adjustment" 94 | self.inquiry = "none" 95 | return 96 | 97 | if self._state == "adjustment": 98 | if key == "p": 99 | config.engine.angle -= 3 100 | 101 | elif key == "o": 102 | config.engine.angle += 3 103 | 104 | elif "1" == key: 105 | try: 106 | #config.engine.pupil = self.cursor 107 | self.pupil_processor.reset(self.cursor) 108 | self.pupil_ = self.pupil 109 | 110 | self.update_tool_tip(4) 111 | 112 | print("Pupil selected.\nAdjust binarization via R/F (threshold) and T/G (smoothing).") 113 | except Exception as e: 114 | self.update_tool_tip(3, True) 115 | logger.info(f"pupil selection failed; {e}") 116 | 117 | elif "2" == key: 118 | try: 119 | 120 | self.cr_processor_1.reset(self.cursor) 121 | self.cr1_ = self.cr_1 122 | 123 | self.current_cr_processor = self.cr_processor_1 124 | 125 | self.update_tool_tip(2) 126 | 127 | print("Corneal reflex selected.\nAdjust binarization via W/S (threshold) and E/D (smoothing).") 128 | 129 | except Exception as e: 130 | self.update_tool_tip(1, True) 131 | logger.info(f"CR selection failed; {e}") 132 | 133 | elif "3" == key: 134 | try: 135 | self.update_tool_tip(2) 136 | self.cr_processor_2.reset(self.cursor) 137 | self.cr2_ = self.cr_2 138 | 139 | self.current_cr_processor = self.cr_processor_2 140 | 141 | print("\nCorneal reflex selected.") 142 | print("Adjust binarization via W/S (threshold) and E/D (smoothing).") 143 | 144 | except: 145 | self.update_tool_tip(1, True) 146 | print("Hover and click on the corneal reflex, then press 3.") 147 | 148 | 149 | elif "z" == key: 150 | print("Start tracking? (y/n)") 151 | self.inquiry = "track" 152 | 153 | elif "w" == key: 154 | 155 | self.current_cr_processor.binarythreshold += 1 156 | 157 | # print("Corneal reflex binarization threshold increased (%s)." % self.CRProcessor.binarythreshold) 158 | 159 | elif "s" == key: 160 | 161 | self.current_cr_processor.binarythreshold -= 1 162 | # print("Corneal reflex binarization threshold decreased (%s)." % self.CRProcessor.binarythreshold) 163 | 164 | elif "e" == key: 165 | 166 | self.current_cr_processor.blur = tuple([x + 2 for x in self.current_cr_processor.blur]) 167 | # print("Corneal reflex blurring increased (%s)." % self.CRProcessor.blur) 168 | 169 | elif "d" == key: 170 | 171 | if self.current_cr_processor.blur[0] > 1: 172 | self.current_cr_processor.blur -= tuple([x - 2 for x in self.current_cr_processor.blur]) 173 | # print("Corneal reflex blurring decreased (%s)." % self.CRProcessor.blur) 174 | 175 | elif "r" == key: 176 | 177 | self.pupil_processor.binarythreshold += 1 178 | # print("Pupil binarization threshold increased (%s)." % self.pupil_processor.binarythreshold) 179 | elif "f" == key: 180 | 181 | self.pupil_processor.binarythreshold -= 1 182 | # print("Pupil binarization threshold decreased (%s)." % self.pupil_processor.binarythreshold) 183 | 184 | elif "t" == key: 185 | 186 | self.pupil_processor.blur = tuple([x + 2 for x in self.pupil_processor.blur]) 187 | 188 | # print("Pupil blurring increased (%s)." % self.pupil_processor.blur) 189 | 190 | elif "g" == key: 191 | if self.pupil_processor.blur[0] > 1: 192 | self.pupil_processor.blur = tuple([x - 2 for x in self.pupil_processor.blur]) 193 | # print("Pupil blurring decreased (%s)." % self.pupil_processor.blur) 194 | 195 | if "q" == key: 196 | # Terminate tracking 197 | config.engine.release() 198 | 199 | def arm(self, width: int, height: int) -> None: 200 | self.fps = np.round(1/config.arguments.fps, 2) 201 | 202 | self.pupil_processor = config.engine.pupil_processor 203 | 204 | self.cr_index = 0 205 | self.current_cr_processor = config.engine.cr_processor_1 # primary corneal reflection 206 | self.cr_processor_1 = config.engine.cr_processor_1 207 | self.cr_processor_2 = config.engine.cr_processor_2 208 | 209 | self.width, self.height = width, height 210 | self.binary_width = max(width, 300) 211 | self.binary_height = max(height, 200) 212 | 213 | fourcc = cv2.VideoWriter_fourcc(*'MPEG') 214 | output_vid = Path(config.file_manager.new_folderpath, "output.avi") 215 | self.out = cv2.VideoWriter(str(output_vid), fourcc, 50.0, (self.width, self.height)) 216 | 217 | self.bin_stock = np.zeros((self.binary_height, self.binary_width)) 218 | self.bin_P = self.bin_stock.copy() 219 | self.bin_CR = self.bin_stock.copy() 220 | #self.CRStock = self.bin_stock.copy() 221 | 222 | self.src_txt = np.zeros((20, width, 3)) 223 | self.prev_txt = self.src_txt.copy() 224 | cv2.putText(self.src_txt, 'Source', (15, 12), font, .7, (255, 255, 255), 0, cv2.LINE_4) 225 | cv2.putText(self.prev_txt, 'Preview', (15, 12), font, .7, (255, 255, 255), 0, cv2.LINE_4) 226 | cv2.putText(self.prev_txt, 'EyeLoop', (width - 50, 12), font, .5, (255, 255, 255), 0, cv2.LINE_8) 227 | 228 | self.bin_stock_txt = np.zeros((20, self.binary_width)) 229 | self.bin_stock_txt_selected = self.bin_stock_txt.copy() 230 | self.crstock_txt = self.bin_stock_txt.copy() 231 | self.crstock_txt[0:1, 0:self.binary_width] = 1 232 | self.crstock_txt_selected = self.crstock_txt.copy() 233 | 234 | cv2.putText(self.bin_stock_txt, 'P | R/F | T/G || bin/blur', (10, 15), font, .7, 1, 0, cv2.LINE_4) 235 | cv2.putText(self.bin_stock_txt_selected, '(*) P | R/F | T/G || bin/blur', (10, 15), font, .7, 1, 0, cv2.LINE_4) 236 | 237 | cv2.putText(self.crstock_txt, 'CR | W/S | E/D || bin/blur', (10, 15), font, .7, 1, 0, cv2.LINE_4) 238 | cv2.putText(self.crstock_txt_selected, '(*) CR | W/S | E/D || bin/blur', (10, 15), font, .7, 1, 0, cv2.LINE_4) 239 | 240 | cv2.imshow("CONFIGURATION", np.hstack((self.bin_stock, self.bin_stock))) 241 | cv2.imshow("BINARY", np.vstack((self.bin_stock, self.bin_stock))) 242 | 243 | cv2.moveWindow("BINARY", 105 + width * 2, 100) 244 | cv2.moveWindow("CONFIGURATION", 100, 100) 245 | 246 | cv2.imshow("Tool tip", self.first_tool_tip) 247 | 248 | cv2.moveWindow("Tool tip", 100, 1000 + height + 100) 249 | try: 250 | cv2.setMouseCallback("CONFIGURATION", self.mousecallback) 251 | cv2.setMouseCallback("Tool tip", self.tip_mousecallback) 252 | except: 253 | print("Could not bind mouse-buttons.") 254 | 255 | def place_cross(self, source: np.ndarray, point: tuple, color: tuple) -> None: 256 | try: 257 | source[to_int(point[1] - 3):to_int(point[1] + 4), to_int(point[0])] = color 258 | source[to_int(point[1]), to_int(point[0] - 3):to_int(point[0] + 4)] = color 259 | except: 260 | pass 261 | 262 | 263 | def update_record(self, frame_preview) -> None: 264 | cv2.imshow("Recording", frame_preview) 265 | if cv2.waitKey(1) == ord('q'): 266 | config.engine.release() 267 | 268 | def skip_track(self): 269 | self.update = self.real_update 270 | 271 | 272 | def pupil(self, source_rgb): 273 | try: 274 | pupil_center, pupil_width, pupil_height, pupil_angle = self.pupil_processor.fit_model.params 275 | 276 | cv2.ellipse(source_rgb, tuple_int(pupil_center), tuple_int((pupil_width, pupil_height)), pupil_angle, 0, 360, red, 1) 277 | self.place_cross(source_rgb, pupil_center, red) 278 | return True 279 | except Exception as e: 280 | logger.info(f"pupil not found: {e}") 281 | return False 282 | 283 | def cr_1(self, source_rgb): 284 | try: 285 | #cr_center, cr_width, cr_height, cr_angle = params = self.cr_processor_1.fit_model.params 286 | 287 | #cv2.ellipse(source_rgb, tuple_int(cr_center), tuple_int((cr_width, cr_height)), cr_angle, 0, 360, green, 1) 288 | self.place_cross(source_rgb, self.cr_processor_1.center, green) 289 | return True 290 | except Exception as e: 291 | logger.info(f"cr1 func: {e}") 292 | return False 293 | 294 | def cr_2(self, source_rgb): 295 | try: 296 | #cr_center, cr_width, cr_height, cr_angle = params = self.cr_processor_2.fit_model.params 297 | 298 | #cv2.ellipse(source_rgb, tuple_int(cr_center), tuple_int((cr_width, cr_height)), cr_angle, 0, 360, green, 1) 299 | self.place_cross(source_rgb, self.cr_processor_2.center, green) 300 | return True 301 | except Exception as e: 302 | logger.info(f"cr2 func: {e}") 303 | return False 304 | 305 | def adj_update(self, img): 306 | source_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) 307 | 308 | #if self.pupil_(source_rgb): 309 | self.bin_P = self.bin_stock.copy() 310 | 311 | if self.pupil_(source_rgb): 312 | self.bin_P[0:20, 0:self.binary_width] = self.bin_stock_txt_selected 313 | else: 314 | self.bin_P[0:20, 0:self.binary_width] = self.bin_stock_txt 315 | 316 | #self.bin_CR = self.bin_stock.copy() 317 | 318 | try: 319 | pupil_area = self.pupil_processor.source 320 | 321 | offset_y = int((self.binary_height - pupil_area.shape[0]) / 2) 322 | offset_x = int((self.binary_width - pupil_area.shape[1]) / 2) 323 | self.bin_P[offset_y:min(offset_y + pupil_area.shape[0], self.binary_height), 324 | offset_x:min(offset_x + pupil_area.shape[1], self.binary_width)] = pupil_area 325 | except: 326 | pass 327 | 328 | self.cr1_(source_rgb) 329 | self.cr2_(source_rgb) 330 | 331 | self.bin_CR = self.bin_stock.copy() 332 | 333 | try: 334 | cr_area = self.current_cr_processor.source 335 | offset_y = int((self.binary_height - cr_area.shape[0]) / 2) 336 | offset_x = int((self.binary_width - cr_area.shape[1]) / 2) 337 | self.bin_CR[offset_y:min(offset_y + cr_area.shape[0], self.binary_height), 338 | offset_x:min(offset_x + cr_area.shape[1], self.binary_width)] = cr_area 339 | self.bin_CR[0:20, 0:self.binary_width] = self.crstock_txt_selected 340 | except: 341 | self.bin_CR[0:20, 0:self.binary_width] = self.crstock_txt 342 | pass 343 | 344 | 345 | 346 | 347 | #print(cr_area) 348 | 349 | cv2.imshow("BINARY", np.vstack((self.bin_P, self.bin_CR))) 350 | cv2.imshow("CONFIGURATION", source_rgb) 351 | #self.out.write(source_rgb) 352 | 353 | self.key_listener(cv2.waitKey(50)) 354 | if self.first_run: 355 | cv2.destroyAllWindows() 356 | self.first_run = False 357 | 358 | 359 | def real_update(self, img) -> None: 360 | source_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) 361 | self.pupil_(source_rgb) 362 | self.cr1_(source_rgb) 363 | self.cr2_(source_rgb) 364 | 365 | cv2.imshow("TRACKING", source_rgb) 366 | 367 | threading.Timer(self.fps, self.skip_track).start() #run feed every n secs (n=1) 368 | self.update = lambda _: None 369 | 370 | if cv2.waitKey(1) == ord("q"): 371 | 372 | 373 | config.engine.release() 374 | -------------------------------------------------------------------------------- /eyeloop/importers/README.md: -------------------------------------------------------------------------------- 1 | # Importers # 2 |

3 | 4 |

5 | 6 | To use a video sequence for eye-tracking, we use an *importer* class as a bridge to EyeLoop's engine. The importer fetches the video sequence from the camera, or offline from a directory, and imports it. Briefly, the importer main class ```IMPORTER``` includes functions to rotate, resize and save the video stream. Additionally, it *arms* the engine by passing neccesary variables. 7 | 8 | ## Why use an importer? ## 9 | The reason for using an *importer* class, rather than having video importation "*built-in*", is to avoid incompatibilities. For example, while most web-cameras are compatible with opencv (importer *cv*), Vimba-based cameras (Allied Vision cameras), are not. Thus, by modularizing the importation of image frames, EyeLoop is easily integrated in markedly different setups. 10 | 11 | ## Importers ## 12 | 13 | - Most cameras are compatible with the *cv Importer* (default). 14 | - Allied Vision cameras require the Vimba-based *Importer*, *vimba*. 15 | 16 | ## Building your first custom importer ## 17 | To build our first custom importer, we instantiate our *Importer* class: 18 | ```python 19 | class Importer(IMPORTER): 20 | def __init__(self) -> None: 21 | self.scale = config.arguments.scale 22 | ``` 23 | Here, we define critical variables, such as scaling. Then, we load the first frame, retrieve its dimensions and, lastly, *arm* the engine: 24 | 25 | ```python 26 | ... 27 | (load image) 28 | width, height = (image dimensions) 29 | self.arm(width, height, image) 30 | ``` 31 | Finally, the ```route()``` function loads the video frames and passes them to the engine sequentially: 32 | ```python 33 | def route(self) -> None: 34 | while True: 35 | image = ... 36 | config.engine.update_feed(image) 37 | self.frame += 1 38 | ``` 39 | Optionally, add a ```release()``` function to control termination of the importation process: 40 | ```python 41 | def release(self) -> None: 42 | terminate() 43 | ``` 44 | 45 | That's it! 46 | > Consider checking out [*cv Importer*](https://github.com/simonarvin/eyeloop/blob/master/importers/cv.py) as a code example. 47 | -------------------------------------------------------------------------------- /eyeloop/importers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/importers/__init__.py -------------------------------------------------------------------------------- /eyeloop/importers/cv.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from pathlib import Path 3 | from typing import Optional, Callable 4 | 5 | import cv2 6 | 7 | import eyeloop.config as config 8 | from eyeloop.importers.importer import IMPORTER 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class Importer(IMPORTER): 14 | 15 | def __init__(self) -> None: 16 | super().__init__() 17 | self.route_frame: Optional[Callable] = None # Dynamically assigned at runtime depending on input type 18 | 19 | def first_frame(self) -> None: 20 | self.vid_path = Path(config.arguments.video) 21 | 22 | 23 | # load first frame 24 | if str(self.vid_path.name) == "0" or self.vid_path.is_file(): # or stream 25 | if str(self.vid_path.name) == "0": 26 | self.capture = cv2.VideoCapture(0) 27 | else: 28 | self.capture = cv2.VideoCapture(str(self.vid_path)) 29 | 30 | self.route_frame = self.route_cam 31 | width = self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) 32 | height = self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT) 33 | 34 | _, image = self.capture.read() 35 | if self.capture.isOpened(): 36 | try: 37 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 38 | except: 39 | image = image[..., 0] 40 | else: 41 | raise ValueError( 42 | "Failed to initialize video stream.\n" 43 | "Make sure that the video path is correct, or that your webcam is plugged in and compatible with opencv.") 44 | 45 | elif self.vid_path.is_dir(): 46 | 47 | config.file_manager.input_folderpath = self.vid_path 48 | 49 | config.file_manager.input_folderpath = self.vid_path 50 | 51 | image = config.file_manager.read_image(self.frame) 52 | 53 | try: 54 | height, width, _ = image.shape 55 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 56 | self.route_frame = self.route_sequence_sing 57 | except Exception: # TODO fix bare except 58 | logger.exception("first_frame() error: ") 59 | height, width = image.shape 60 | self.route_frame = self.route_sequence_flat 61 | 62 | else: 63 | raise ValueError(f"Video path at {self.vid_path} is not a file or directory!") 64 | 65 | self.arm(width, height, image) 66 | 67 | def route(self) -> None: 68 | self.first_frame() 69 | while True: 70 | if self.route_frame is not None: 71 | self.route_frame() 72 | else: 73 | break 74 | 75 | def proceed(self, image) -> None: 76 | image = self.resize(image) 77 | self.rotate_(image, config.engine.angle) 78 | config.engine.iterate(image) 79 | self.save_(image) 80 | self.frame += 1 81 | 82 | def route_sequence_sing(self) -> None: 83 | 84 | image = config.file_manager.read_image(self.frame) 85 | 86 | self.proceed(image[..., 0]) 87 | 88 | def route_sequence_flat(self) -> None: 89 | 90 | image = config.file_manager.read_image(self.frame) 91 | 92 | self.proceed(image) 93 | 94 | def route_cam(self) -> None: 95 | """ 96 | Routes the capture frame to: 97 | 1: eyeloop for online processing 98 | 2: frame save for offline processing 99 | """ 100 | 101 | _, image = self.capture.read() 102 | if image is not None: 103 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 104 | self.proceed(image) 105 | else: 106 | logger.info("No more frames to process, exiting.") 107 | self.release() 108 | 109 | def release(self) -> None: 110 | logger.debug(f"cv.Importer.release() called") 111 | if self.capture is not None: 112 | self.capture.release() 113 | 114 | self.route_frame = None 115 | cv2.destroyAllWindows() 116 | super().release() 117 | -------------------------------------------------------------------------------- /eyeloop/importers/importer.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | import eyeloop.config as config 5 | from eyeloop.utilities.general_operations import tuple_int 6 | 7 | 8 | class IMPORTER: 9 | 10 | def __init__(self): 11 | self.live = True 12 | self.scale = config.arguments.scale 13 | 14 | self.frame = 0 15 | self.vid_path = config.arguments.video 16 | self.capture = None 17 | 18 | if config.arguments.save == 1: 19 | self.save_ = self.save 20 | else: 21 | self.save_ = lambda _: None 22 | 23 | if config.arguments.rotation == 1: 24 | self.rotate_ = self.rotate 25 | else: 26 | self.rotate_ = lambda img, _: None 27 | 28 | def arm(self, width, height, image): 29 | 30 | self.dimensions = tuple_int((width * self.scale, height * self.scale)) 31 | 32 | width, height = self.dimensions 33 | 34 | self.center = (width // 2, height // 2) 35 | 36 | if self.scale == 1: 37 | self.resize = lambda img: img 38 | else: 39 | self.resize = self.resize_image 40 | 41 | self.resize(image) 42 | 43 | # image = self.rotate(image, self.ENGINE.angle) 44 | 45 | config.engine.arm(width, height, image) 46 | 47 | def rotate(self, image: np.ndarray, angle: int) -> np.ndarray: 48 | """ 49 | Performs rotaiton of the image to align visual axes. 50 | """ 51 | 52 | if angle == 0: 53 | return 54 | 55 | M = cv2.getRotationMatrix2D(self.center, angle, 1) 56 | 57 | image[:] = cv2.warpAffine(image, M, self.dimensions, cv2.INTER_NEAREST) 58 | 59 | def resize_image(self, image: np.ndarray) -> np.ndarray: 60 | """ 61 | Resizes image to scale value. -sc 1 (default) 62 | """ 63 | 64 | return cv2.resize(image, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST) 65 | 66 | def save(self, image: np.ndarray) -> None: 67 | config.file_manager.save_image(image, self.frame) 68 | 69 | def release(self): 70 | self.release = lambda:None 71 | config.engine.release() 72 | -------------------------------------------------------------------------------- /eyeloop/importers/vimba.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from pymba import Frame 4 | from pymba import Vimba 5 | 6 | import eyeloop.config as config 7 | from eyeloop.importers.importer import IMPORTER 8 | 9 | 10 | # For pymba documentation, see: 11 | # https://github.com/morefigs/pymba 12 | 13 | class Importer(IMPORTER): 14 | 15 | def first_frame(self) -> None: 16 | # load first frame 17 | with Vimba() as vimba: 18 | camera = vimba.camera(0) 19 | camera.open() 20 | camera.arm('SingleFrame') 21 | frame = camera.acquire_frame() 22 | camera.disarm() 23 | 24 | image = frame.buffer_data_numpy() 25 | height, width = image.shape 26 | 27 | self.arm(width, height, image) 28 | 29 | def acquire_frame(self, frame: Frame, delay: int = 1) -> None: 30 | """ 31 | Routes the capture frame to two destinations: 32 | 1: EyeLoop for online processing 33 | 2: frame save for offline processing 34 | 35 | :param frame: The frame object to display. 36 | :param delay: Display delay in milliseconds, use 0 for indefinite. 37 | """ 38 | 39 | image = frame.buffer_data_numpy() 40 | 41 | # image = cv2.cvtColor(image,cv2.COLOR_GRAY2RGB) 42 | 43 | image = self.resize(image) 44 | self.rotate_(image, config.engine.angle) 45 | config.engine.iterate(image) 46 | self.save(image) 47 | 48 | self.frame += 1 49 | 50 | def release(self) -> None: 51 | self.live = False 52 | 53 | def route(self) -> None: 54 | self.first_frame() 55 | 56 | with Vimba() as vimba: 57 | camera = vimba.camera(0) 58 | 59 | camera.open() 60 | 61 | camera.ExposureTime = 200 # play around with this if exposure is too low 62 | camera.ExposureAuto = "Off" 63 | camera.AcquisitionFrameRateMode = 'Basic' 64 | 65 | max_fps = camera.AcquisitionFrameRate 66 | camera.AcquisitionFrameRate = max_fps 67 | 68 | # arm the camera and provide a function to be called upon frame ready 69 | camera.arm('Continuous', self.acquire_frame) 70 | camera.start_frame_acquisition() 71 | 72 | while self.live: 73 | time.sleep(0.1) 74 | 75 | print("Terminating capture...") 76 | 77 | camera.stop_frame_acquisition() 78 | camera.disarm() 79 | 80 | camera.close() 81 | -------------------------------------------------------------------------------- /eyeloop/run_eyeloop.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import logging 3 | import sys 4 | from pathlib import Path 5 | import tkinter as tk 6 | from tkinter import filedialog 7 | import os 8 | import numpy as np 9 | 10 | import eyeloop.config as config 11 | from eyeloop.engine.engine import Engine 12 | from eyeloop.extractors.DAQ import DAQ_extractor 13 | from eyeloop.extractors.frametimer import FPS_extractor 14 | 15 | from eyeloop.utilities.argument_parser import Arguments 16 | from eyeloop.utilities.file_manager import File_Manager 17 | from eyeloop.utilities.format_print import welcome 18 | from eyeloop.utilities.shared_logging import setup_logging 19 | 20 | EYELOOP_DIR = Path(__file__).parent 21 | PROJECT_DIR = EYELOOP_DIR.parent 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | class EyeLoop: 27 | """ 28 | EyeLoop is a Python 3-based eye-tracker tailored specifically to dynamic, closed-loop experiments on consumer-grade hardware. 29 | Lead developer: Simon Arvin 30 | Git: https://github.com/simonarvin/eyeloop 31 | """ 32 | 33 | def __init__(self, args, logger=None): 34 | 35 | welcome("Server") 36 | 37 | config.arguments = Arguments(args) 38 | config.file_manager = File_Manager(output_root=config.arguments.output_dir, img_format = config.arguments.img_format) 39 | if logger is None: 40 | logger, logger_filename = setup_logging(log_dir=config.file_manager.new_folderpath, module_name="run_eyeloop") 41 | 42 | #if config.arguments.blink == 0: 43 | self.run() 44 | #else: 45 | # self.test_blink() 46 | 47 | def test_blink(self): 48 | from eyeloop.guis.blink_test import GUI 49 | config.graphical_user_interface = GUI() 50 | config.engine = Engine(self) 51 | self.run_importer() 52 | 53 | def run(self): 54 | #try: 55 | # config.blink = np.load(f"{EYELOOP_DIR}/blink_.npy")[0] * .8 56 | #except: 57 | # print("\n(!) NO BLINK DETECTION. Run 'eyeloop --blink 1' to calibrate\n") 58 | 59 | 60 | from eyeloop.guis.minimum.minimum_gui import GUI 61 | config.graphical_user_interface = GUI() 62 | 63 | config.engine = Engine(self) 64 | 65 | fps_counter = FPS_extractor() 66 | data_acquisition = DAQ_extractor(config.file_manager.new_folderpath) 67 | 68 | file_path = config.arguments.extractors 69 | 70 | if file_path == "p": 71 | root = tk.Tk() 72 | root.withdraw() 73 | file_path = filedialog.askopenfilename() 74 | 75 | extractors_add = [] 76 | 77 | if file_path != "": 78 | try: 79 | logger.info(f"including {file_path}") 80 | sys.path.append(os.path.dirname(file_path)) 81 | module_import = os.path.basename(file_path).split(".")[0] 82 | 83 | extractor_module = importlib.import_module(module_import) 84 | extractors_add = extractor_module.extractors_add 85 | 86 | except Exception as e: 87 | logger.info(f"extractors not included, {e}") 88 | 89 | extractors_base = [fps_counter, data_acquisition] 90 | extractors = extractors_add + extractors_base 91 | 92 | config.engine.load_extractors(extractors) 93 | 94 | self.run_importer() 95 | 96 | def run_importer(self): 97 | try: 98 | logger.info(f"Initiating tracking via Importer: {config.arguments.importer}") 99 | importer_module = importlib.import_module(f"eyeloop.importers.{config.arguments.importer}") 100 | config.importer = importer_module.Importer() 101 | config.importer.route() 102 | 103 | # exec(import_command, globals()) 104 | 105 | except ImportError: 106 | logger.exception("Invalid importer selected") 107 | 108 | 109 | def main(): 110 | EyeLoop(sys.argv[1:], logger=None) 111 | 112 | 113 | if __name__ == '__main__': 114 | 115 | main() 116 | -------------------------------------------------------------------------------- /eyeloop/utilities/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/eyeloop/utilities/__init__.py -------------------------------------------------------------------------------- /eyeloop/utilities/argument_parser.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | 4 | EYELOOP_DIR = Path(__file__).parent.parent 5 | PROJECT_DIR = EYELOOP_DIR.parent 6 | 7 | 8 | class Arguments: 9 | """ 10 | Parses all command-line arguments and config.pupt parameters. 11 | """ 12 | 13 | def __init__(self, args) -> None: 14 | self.config = None 15 | self.markers = None 16 | self.video = None 17 | self.output_dir = None 18 | self.importer = None 19 | self.scale = None 20 | self.tracking = None 21 | self.model = None 22 | 23 | self.parsed_args = self.parse_args(args) 24 | self.build_config(parsed_args=self.parsed_args) 25 | 26 | @staticmethod 27 | def parse_args(args): 28 | parser = argparse.ArgumentParser(description='Help list') 29 | parser.add_argument("-v", "--video", default="0", type=str, 30 | help="Input a video sequence for offline processing.") 31 | 32 | parser.add_argument("-o", "--output_dir", default=str(PROJECT_DIR.joinpath("data").absolute()), type=str, 33 | help="Specify output destination.") 34 | parser.add_argument("-c", "--config", default="0", type=str, help="Input a .pupt config file (preset).") 35 | parser.add_argument("-i", "--importer", default="cv", type=str, 36 | help="Set import route of stream (cv, vimba, ...)") 37 | parser.add_argument("-sc", "--scale", default=1, type=float, help="Scale the stream (default: 1; 0-1)") 38 | parser.add_argument("-m", "--model", default="ellipsoid", type=str, 39 | help="Set pupil model type (circular; ellipsoid = default).") 40 | parser.add_argument("-ma", "--markers", default=0, type=int, 41 | help="Enable/disable artifact removing markers (0: disable/default; 1: enable)") 42 | parser.add_argument("-tr", "--tracking", default=1, type=int, 43 | help="Enable/disable tracking (1/enabled: default).") 44 | 45 | parser.add_argument("-ex", "--extractors", default="", type=str, 46 | help="Set file-path of extractor Python file. p = start file prompt.") 47 | 48 | parser.add_argument("-imgf", "--img_format", default="frame_$.jpg", type=str, 49 | help="Set img format for import (default: frame_$.jpg where $ = 1, 2,...)") 50 | 51 | parser.add_argument("-sv", "--save", default=1, type=int, 52 | help="Save video feed or not (yes/no, 1/0; default = 1)") 53 | 54 | parser.add_argument("-rt", "--rotation", default=0, type=int, 55 | help="Enable online rotation (yes/no, 1/0; default = 0)") 56 | 57 | parser.add_argument("-fps", "--framerate", default=1, type=float, 58 | help="How often to update preview window (default = 1/second)") 59 | 60 | parser.add_argument("-cl", "--clear", default=0, type=float, 61 | help="Clear parameters (yes/no, 1/0) - default = 0") 62 | 63 | parser.add_argument("-p", "--params", default="", type=str, 64 | help="Load pupil/cr parameter file (.npy)") 65 | 66 | parser.add_argument("-b", "--blink", default="", type=str, 67 | help="Load blink calibration file (.npy)") 68 | 69 | return parser.parse_args(args) 70 | 71 | def build_config(self, parsed_args): 72 | self.config = parsed_args.config 73 | 74 | if self.config != "0": # config file was set. 75 | self.parse_config(self.config) 76 | 77 | self.markers = parsed_args.markers 78 | self.video = Path(parsed_args.video.strip("\'\"")).absolute() # Handle quotes used in argument 79 | self.output_dir = Path(parsed_args.output_dir.strip("\'\"")).absolute() 80 | self.importer = parsed_args.importer.lower() 81 | self.scale = parsed_args.scale 82 | self.tracking = parsed_args.tracking 83 | self.model = parsed_args.model.lower() 84 | self.extractors = parsed_args.extractors 85 | self.img_format = parsed_args.img_format 86 | self.save = parsed_args.save 87 | self.rotation = parsed_args.rotation 88 | self.fps = parsed_args.framerate 89 | self.clear = parsed_args.clear 90 | self.params = parsed_args.params 91 | self.blinkcalibration = parsed_args.blink 92 | #self.blink = parsed_args.blink 93 | 94 | def parse_config(self, config: str) -> None: 95 | with open(config, "r") as content: 96 | print("Loading config preset: ", config) 97 | for line in content: 98 | split = line.split("=") 99 | parameter = split[0] 100 | parameter = split[1].rstrip("\n").split("\"") 101 | 102 | if len(parameter) != 1: 103 | parameter = parameter[1] 104 | else: 105 | parameter = parameter[0] 106 | 107 | if parameter == "video": 108 | print("Video preset: ", parameter) 109 | self.video = parameter 110 | elif parameter == "dest": 111 | print("Destination preset: ", parameter) 112 | self.output_dir = Path(parameter).absolute() 113 | 114 | elif parameter == "import": 115 | print("Importer preset: ", parameter) 116 | self.importer = parameter 117 | elif parameter == "model": 118 | print("Model preset: ", parameter) 119 | self.model = parameter 120 | elif parameter == "markers": 121 | print("Markers preset: ", parameter) 122 | self.markers = parameter 123 | elif parameter == "extractors": 124 | print("Extractors preset: ", parameter) 125 | self.extractors = parameter 126 | elif parameter == "img_format": 127 | print("img_format preset: ", parameter) 128 | self.img_format = parameter 129 | elif parameter == "save": 130 | print("save preset: ", parameter) 131 | self.save = parameter 132 | elif parameter == "rotation": 133 | print("rotation preset: ", parameter) 134 | self.rotation = parameter 135 | elif parameter == "framerate": 136 | print("framerate preset: ", parameter) 137 | self.fps = parameter 138 | 139 | print("") 140 | -------------------------------------------------------------------------------- /eyeloop/utilities/file_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | from pathlib import Path 3 | from typing import Union 4 | 5 | import cv2 6 | import numpy as np 7 | 8 | 9 | class File_Manager: 10 | """ 11 | The file manager... 12 | - Generates a unique, time-stamped folder 13 | which extractors may access via file_manager.new_folderpath. 14 | - Reads image sequences for offline analysis. 15 | - Saves images from camera streams. 16 | """ 17 | 18 | def __init__(self, output_root: Union[Path, str], img_format:str) -> None: 19 | self.output_root = output_root 20 | self.input_folderpath = "" 21 | self.img_format = img_format 22 | 23 | 24 | self.output_root.mkdir(exist_ok=True, parents=True) 25 | 26 | timestamp = time.strftime("%Y%m%d-%H%M%S") 27 | self.new_folderpath = self.output_root / f"trial_{timestamp}" 28 | self.new_folderpath.mkdir(exist_ok=True) 29 | print(f"Outputting data to {self.new_folderpath}") # TODO convert to logging call 30 | 31 | def save_image(self, image: np.ndarray, frame: int) -> None: 32 | """ 33 | Saves video sequence to new folderpath. 34 | """ 35 | img_pth = Path(self.new_folderpath, self.img_format.replace("$", str(frame), 1)) 36 | cv2.imwrite(str(img_pth), image) 37 | 38 | def read_image(self, frame: int) -> np.ndarray: 39 | """ 40 | Reads video sequence from the input folderpath. 41 | Command-line argument -v [dir] sets this vid_path. 42 | """ 43 | 44 | img_pth = Path(self.input_folderpath, self.img_format.replace("$", str(frame), 1)) 45 | image=cv2.imread(str(img_pth)) 46 | 47 | if image is None: 48 | raise ValueError("No more frames.") 49 | 50 | return np.array(image) 51 | -------------------------------------------------------------------------------- /eyeloop/utilities/format_print.py: -------------------------------------------------------------------------------- 1 | from os import system, name 2 | 3 | import eyeloop.config as config 4 | 5 | tab = " " 6 | linebreak = "\n{}{}\n".format(tab, 30 * "_") 7 | 8 | journal = "doi: 10.1101/2020.07.03.186387" 9 | git = "repo: https://github.com/simonarvin/eyeloop" 10 | 11 | 12 | def clear() -> None: 13 | # for windows 14 | if name == 'nt': 15 | _ = system('cls') 16 | 17 | # for mac and linux(here, os.name is 'posix') 18 | else: 19 | _ = system('clear') 20 | 21 | 22 | def logo(label="") -> None: 23 | logo = r""" 24 | >> {} 25 | 26 | ___ ___ __ __ __ 27 | |__ \ / |__ | / \ / \ |__) 28 | |___ | |___ |___ \__/ \__/ | 29 | 30 | v{} 31 | """.format(label, config.version) 32 | return logo 33 | 34 | 35 | def welcome(label="") -> None: 36 | clear() 37 | msg = r""" 38 | {} 39 | Developed by Simon Arvin 40 | Yonehara Laboratory 41 | Danish Research Institute of 42 | Translational Neuroscience (DANDRITE) 43 | 44 | {} 45 | {}{}""".format(logo(label), git, journal, linebreak) 46 | print(msg) 47 | -------------------------------------------------------------------------------- /eyeloop/utilities/general_operations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def distance(p1: np.ndarray, p2: np.ndarray) -> np.ndarray: 5 | return np.linalg.norm(p1 - p2) 6 | 7 | 8 | def to_int(non_int: float) -> int: 9 | return int(non_int + 0.5) 10 | 11 | 12 | def tuple_int(non_int: tuple) -> tuple: 13 | return tuple(int(element + 0.5) for element in non_int) 14 | -------------------------------------------------------------------------------- /eyeloop/utilities/logging_config.yaml: -------------------------------------------------------------------------------- 1 | # Logging config 2 | version: 1 3 | disable_existing_loggers: False 4 | formatters: 5 | simple: 6 | format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 7 | 8 | handlers: 9 | info_file_handler: 10 | class: logging.handlers.RotatingFileHandler 11 | level: INFO 12 | formatter: simple 13 | filename: logs/python_logging.log 14 | encoding: utf8 15 | 16 | debug_file_handler: 17 | class: logging.handlers.RotatingFileHandler 18 | level: DEBUG 19 | formatter: simple 20 | filename: logs/python_logging.log 21 | encoding: utf8 22 | 23 | error_file_handler: 24 | class: logging.handlers.RotatingFileHandler 25 | level: ERROR 26 | formatter: simple 27 | filename: logs/python_logging.log 28 | encoding: utf8 29 | 30 | console: 31 | class: logging.StreamHandler 32 | level: DEBUG 33 | formatter: simple 34 | stream: ext://sys.stdout 35 | root: 36 | level: DEBUG 37 | handlers: [debug_file_handler, console] 38 | propogate: True 39 | 40 | -------------------------------------------------------------------------------- /eyeloop/utilities/parser.py: -------------------------------------------------------------------------------- 1 | import json 2 | from tkinter import filedialog 3 | 4 | import numpy as np 5 | from eyeloop.extractors.converter import Conversion_extractor 6 | 7 | class Parser(): 8 | data = [] 9 | file_path = "" 10 | 11 | def __init__(self, animal: str) -> None: 12 | self.animal = animal 13 | 14 | def load_log(self, file_path="") -> None: 15 | if file_path == "": 16 | file_path = filedialog.askopenfilename(filetypes=(("json files", "*.json"), ("all files", "*.*"))) 17 | 18 | try: 19 | file = open(file_path, "r") 20 | except FileNotFoundError: 21 | raise ValueError("Please select a valid log.") 22 | self.file_path = file_path 23 | 24 | for line in file.readlines(): 25 | self.data.append(json.loads(line)) 26 | file.close() 27 | 28 | def crop(self, start, end=-1): 29 | if end != -1: 30 | self.data = self.data[start:end] 31 | else: 32 | self.data = self.data[start:] 33 | 34 | def compute_area(self) -> np.ndarray: 35 | converter = Conversion_extractor("area", self.animal) 36 | return np.array([converter.fetch(entry) for entry in self.data]) 37 | 38 | def compute_coordinates(self) -> np.ndarray: 39 | converter = Conversion_extractor("coordinates", self.animal) 40 | return np.array([converter.fetch(entry) for entry in self.data]) 41 | 42 | def extract_time(self) -> np.ndarray: 43 | return np.array([entry["time"] for entry in self.data]) 44 | 45 | def extract_frame(self) -> np.ndarray: 46 | return np.array([entry["frame"] for entry in self.data]) 47 | 48 | def extract_unique_key(self, key: str) -> np.ndarray: 49 | extract = [] 50 | for entry in self.data: 51 | try: 52 | extract.append(entry[key]) 53 | except: 54 | pass # key not in log entry. Skip to next. 55 | return np.array(extract) 56 | 57 | def to_csv(self): 58 | try: 59 | import pandas as pd 60 | except: 61 | raise Exception("Please make sure that pandas is installed (pip install pandas).") 62 | 63 | file = pd.read_json(self.file_path, lines=True) 64 | new_path = self.file_path + "_csv" 65 | file.to_csv(new_path, index=None) 66 | print("Json succesfully converted to csv.") 67 | print("Csv saved at {}".format(new_path)) 68 | 69 | 70 | def legacy_to_modern(self): 71 | mod_path = self.file_path + "_mod" 72 | 73 | if "cr_dim" in self.data[0]: #Legacy 2 74 | with open(mod_path, "a") as datalog: 75 | for i, entry in enumerate(self.data): 76 | #{"time": 1590337139.0425687, "frame": 12, "blink": -1, "cr_dim": [-1, -1], "cr_cen": -1, "cr_ang": -1, "pupil_dim": [-1, -1], "pupil_cen": -1, "pupil_ang": -1} 77 | 78 | dataout = { 79 | "time": entry["time"], 80 | "frame": entry["frame"], 81 | "blink": entry["blink"], 82 | "cr": (entry["cr_dim"], entry["cr_cen"], entry["cr_ang"]), 83 | "pupil": (entry["pupil_dim"], entry["pupil_cen"], entry["pupil_ang"]) 84 | } 85 | 86 | datalog.write(json.dumps(dataout) + "\n") 87 | print("Legacy log converted to modern.") 88 | -------------------------------------------------------------------------------- /eyeloop/utilities/shared_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.config 3 | import os 4 | from datetime import datetime 5 | from pathlib import Path 6 | 7 | import yaml 8 | 9 | 10 | def setup_logging(log_config_path=f"{Path(__file__).parent}/logging_config.yaml", log_dir="logs", module_name=None) -> \ 11 | (logging.Logger, str): 12 | """ 13 | Setup logging configuration. Returns logger object. 14 | 15 | :param log_config_path: Path to logging config yaml file 16 | :param log_dir: Directory that log files will be written into (relative or full path) 17 | :param module_name: Module name to append to log filename. If none given __name__ will be used. 18 | :returns: Tuple of (the newly created logging object, path to log file (possibly None if no config was found)) 19 | """ 20 | log_filename = None 21 | 22 | # Check for permissions and change log dir if write access isn't granted 23 | 24 | if Path(log_dir).exists() is False: 25 | print(f"log dir not found, Attempting to create dir {log_dir}") 26 | Path(log_dir).mkdir(parents=True, exist_ok=True) 27 | 28 | print(f"Writing log to {Path(log_dir).absolute()}") 29 | 30 | if module_name is None: 31 | module_name = __name__ 32 | 33 | if os.path.exists(log_config_path): 34 | with open(log_config_path, 'rt') as f: 35 | config = yaml.safe_load(f.read()) 36 | 37 | # Set 38 | for handler_name, handler in config["handlers"].items(): 39 | if handler_name != "console": 40 | log_filename = rf"{log_dir}/{datetime.now().strftime('%Y-%m-%d-%H%M%S')}_{module_name}.log" 41 | handler["filename"] = log_filename 42 | 43 | logging.config.dictConfig(config) 44 | 45 | else: 46 | raise ValueError(f"Loading logger config failed from {log_config_path} for module {module_name}") 47 | 48 | new_logger = logging.getLogger(module_name) 49 | 50 | return new_logger, log_filename 51 | -------------------------------------------------------------------------------- /misc/contributors.md: -------------------------------------------------------------------------------- 1 | # Contributions # 2 | 3 |

4 | 5 |

6 | 7 | EyeLoop is an actively maintained open-source repository based in Yonehara lab at the Danish Research Institute of Translational Neuroscience. 8 | Contributors are welcome to join in and help improve dynamic eye-tracking tailored toward neuroscientific research. 9 | 10 | We are continuously improving EyeLoop and adding new features. Artists, programmers, biologists, students, ..., are all welcome. 11 | 12 | If interested, please do not hesitate to write at: sarv@dandrite.au.dk. 13 | 14 | ## Planned new features ## 15 | 16 | - [ ] Multi-eye tracking. 17 | - [ ] Ultra-high speed tracking. 18 | -------------------------------------------------------------------------------- /misc/imgs/aarhusuniversity.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 8 | 9 | -1Asset 1 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /misc/imgs/constant.svg: -------------------------------------------------------------------------------- 1 | -1Asset 14 -------------------------------------------------------------------------------- /misc/imgs/extractor_scheme.svg: -------------------------------------------------------------------------------- 1 | -1Asset 21 -------------------------------------------------------------------------------- /misc/imgs/logo.svg: -------------------------------------------------------------------------------- 1 | -1Asset 19 -------------------------------------------------------------------------------- /misc/imgs/models.svg: -------------------------------------------------------------------------------- 1 | -1Asset 80 -------------------------------------------------------------------------------- /misc/imgs/sample_1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/misc/imgs/sample_1.gif -------------------------------------------------------------------------------- /misc/imgs/sample_2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/misc/imgs/sample_2.gif -------------------------------------------------------------------------------- /misc/imgs/sample_3.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/misc/imgs/sample_3.gif -------------------------------------------------------------------------------- /misc/imgs/sample_4.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/misc/imgs/sample_4.gif -------------------------------------------------------------------------------- /misc/imgs/setup.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | -1Asset 79 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | -------------------------------------------------------------------------------- /misc/imgs/yoneharalab.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 14 | 15 | -1Asset 10 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /misc/travis-sample/Frmd7.m4v: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/misc/travis-sample/Frmd7.m4v -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-contrib-python>=4.2.* 2 | pymba==0.3.* 3 | numpy==1.19.* 4 | PyYaml 5 | -------------------------------------------------------------------------------- /requirements_examples.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | -------------------------------------------------------------------------------- /requirements_testing.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | tox 3 | pandas 4 | pytest-html 5 | pytest-cov 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from setuptools import setup, find_packages 3 | 4 | install_requires = [] 5 | 6 | with open('requirements.txt') as f: 7 | for line in f.readlines(): 8 | req = line.strip() 9 | if not req or req.startswith('#') or '://' in req: 10 | continue 11 | install_requires.append(req) 12 | 13 | setup( 14 | name='eyeloop', 15 | description='EyeLoop is a Python 3-based eye-tracker tailored specifically to dynamic, ' 16 | 'closed-loop experiments on consumer-grade hardware.', 17 | long_description=open('README.md').read(), 18 | long_description_content_type="text/markdown", 19 | url='https://github.com/simonarvin/eyeloop', 20 | license='GPL', 21 | license_file='LICENSE', 22 | platforms='any', 23 | python_requires='>=3.7', 24 | version='0.35', 25 | entry_points={ 26 | 'console_scripts': [ 27 | 'eyeloop=eyeloop.run_eyeloop:main' 28 | ] 29 | }, 30 | packages=find_packages(include=["eyeloop*"]), 31 | include_package_data=True, 32 | install_requires=install_requires, 33 | project_urls={ 34 | "Documentation": "https://github.com/simonarvin/eyeloop", 35 | "Source": "https://github.com/simonarvin/eyeloop", 36 | "Tracker": "https://github.com/simonarvin/eyeloop/issues" 37 | } 38 | ) 39 | -------------------------------------------------------------------------------- /tests/test_integration.py: -------------------------------------------------------------------------------- 1 | # Basic integration tests 2 | import json 3 | import logging 4 | from pathlib import Path 5 | 6 | import pandas as pd 7 | import pytest 8 | 9 | from eyeloop import run_eyeloop 10 | 11 | TESTDATA_DIR = Path(__file__).parent / "testdata" 12 | 13 | TEST_VIDEOS = { # Basic short videos to use for end to end testing 14 | "short_human_3blink": { 15 | "path": Path(TESTDATA_DIR, "short_human_3blink.mp4").absolute(), 16 | "animal": "human", 17 | "blinks": 3, 18 | "n_frames": 282, 19 | }, 20 | "short_mouse_noblink": { 21 | "path": Path(TESTDATA_DIR, "short_mouse_noblink.m4v").absolute(), 22 | "animal": "mouse", 23 | "blinks": 0, 24 | "n_frames": 307, 25 | } 26 | } 27 | logger = logging.getLogger(__name__) 28 | 29 | 30 | def output_json_parser(json_file: Path) -> pd.DataFrame: 31 | data_log = json_file.read_text().splitlines() 32 | data_dicts = [json.loads(line) for line in data_log] 33 | return pd.DataFrame(data_dicts) 34 | 35 | 36 | class TestIntegration: 37 | @pytest.mark.parametrize("test_video_name", ["short_human_3blink", "short_mouse_noblink"]) 38 | def test_integration(self, tmpdir, test_video_name: str): 39 | test_video = TEST_VIDEOS[test_video_name] 40 | print(f"Running test on video {test_video}") 41 | testargs = ["--video", str(test_video["path"]), 42 | "--output_dir", str(tmpdir)] 43 | eyeloop_obj = run_eyeloop.EyeLoop(args=testargs, logger=logger) 44 | 45 | # Ensure output is expected 46 | data_dir = list(Path(tmpdir).glob("trial_*"))[0] 47 | vid_frames = list(Path(data_dir).glob("frame_*.jpg")) 48 | assert len(vid_frames) == test_video["n_frames"] + 1 # Account for 0-indexing 49 | datalog = Path(data_dir, "datalog.json") 50 | assert datalog.exists() 51 | 52 | #data_df = output_json_parser(datalog) 53 | #assert len(data_df.index) == test_video["n_frames"] 54 | #assert Path(data_dir, "output.avi").exists() 55 | # TODO add assertions based on blink, cr and pupil values 56 | 57 | #def test_no_video_stream_error(self): 58 | # with pytest.raises(ValueError) as excinfo: 59 | # run_eyeloop.EyeLoop(args=[]) 60 | # assert "Failed to initialize video stream" in str(excinfo.value) 61 | 62 | # Tests for each importer 63 | 64 | # TODO Add tests that use that animal tag of the videos 65 | -------------------------------------------------------------------------------- /tests/testdata/short_human_3blink.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/tests/testdata/short_human_3blink.mp4 -------------------------------------------------------------------------------- /tests/testdata/short_mouse_noblink.m4v: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonarvin/eyeloop/cd22fb79e8d2c186ed23e2b15cef57887bbdeffe/tests/testdata/short_mouse_noblink.m4v -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # tox (https://tox.readthedocs.io/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py38 8 | 9 | [testenv] 10 | # using -Ur to force updating of requirements as the files change doesn't work yet, see https://github.com/tox-dev/tox/issues/149 11 | deps = 12 | pytest 13 | pytest-cov 14 | pytest-html 15 | -Ur{toxinidir}/requirements.txt 16 | -Ur{toxinidir}/requirements_testing.txt 17 | 18 | commands = 19 | pytest --cov=eyeloop --cov-report html:tests/reports/coverage --html=tests/reports/pytest_results.html 20 | 21 | [pytest] 22 | log_cli = True 23 | log_format = %(asctime)s %(levelname)s %(message)s 24 | log_date_format = %Y-%m-%d %H:%M:%S 25 | log_cli_date_format = %Y-%m-%d %H:%M:%S 26 | log_cli_format = %(asctime)s %(levelname)s %(message)s 27 | log_cli_level = DEBUG 28 | 29 | 30 | --------------------------------------------------------------------------------