├── pyEyeTrack
├── EyeTracking
│ ├── __init__.py
│ ├── AbstractEyeTrackingClass.py
│ ├── PupilBlinkingClass.py
│ ├── BlinkingClass.py
│ └── PupilTrackingClass.py
├── AudioVideoRecording
│ ├── __init__.py
│ ├── VideoRecordingClass.py
│ └── AudioRecordingClass.py
├── __init__.py
├── DataHandling.py
└── PyEyeTrackRunnerClass.py
├── setup.cfg
├── Examples
├── Example_2
│ ├── wallpaper.jpg
│ ├── Ex_2.py
│ ├── README.md
│ └── Ex_2_ImageUI.py
├── Example_5
│ ├── wallpaper.jpg
│ ├── Ex_5.py
│ ├── README.md
│ └── Ex_5_ImageUI_EscExit.py
├── Example_3
│ ├── Ex_3_slot_machine_symbols.png
│ ├── Ex_3.py
│ ├── README.md
│ ├── Ex_3_WidgetMachine.py
│ └── Ex_3_SlotsMachine.py
├── Example_1
│ ├── Ex_1.py
│ ├── README.md
│ ├── SampleFile.txt
│ └── Ex_1_SampleTextUI.py
└── Example_4
│ ├── Ex_4.py
│ └── README.md
├── requirements.txt
├── .gitignore
├── LICENSE
├── README.md
└── setup.py
/pyEyeTrack/EyeTracking/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pyEyeTrack/AudioVideoRecording/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 |
2 | [metadata]
3 | description-file = README.md
--------------------------------------------------------------------------------
/pyEyeTrack/__init__.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
--------------------------------------------------------------------------------
/Examples/Example_2/wallpaper.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/algoasylum/pyEyeTrack/HEAD/Examples/Example_2/wallpaper.jpg
--------------------------------------------------------------------------------
/Examples/Example_5/wallpaper.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/algoasylum/pyEyeTrack/HEAD/Examples/Example_5/wallpaper.jpg
--------------------------------------------------------------------------------
/Examples/Example_3/Ex_3_slot_machine_symbols.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/algoasylum/pyEyeTrack/HEAD/Examples/Example_3/Ex_3_slot_machine_symbols.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | dlib==19.4
2 | keyboard==0.13.3=pypi_0
3 | numpy==1.18.1=pypi_0
4 | opencv==3.3.1
5 | pandas==0.24.0
6 | pyaudio==0.2.11
7 | pyqt==5.6.0
--------------------------------------------------------------------------------
/Examples/Example_3/Ex_3.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
2 |
3 |
4 | ptr = pyEyeTrack()
5 | ptr.pyEyeTrack_runner(
6 | UI=True,
7 | UI_file_name='Ex_3_SlotsMachine',
8 | blinkDetection=True,
9 | eyeTrackingLog=False)
10 |
--------------------------------------------------------------------------------
/Examples/Example_2/Ex_2.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
2 |
3 | ptr = pyEyeTrack()
4 | ptr.pyEyeTrack_runner(
5 | UI=True,
6 | UI_file_name='Ex_2_ImageUI',
7 | blinkDetection=True,
8 | eyeTrackingLog=True,
9 | eyeTrackingFileName='User_2')
10 |
--------------------------------------------------------------------------------
/Examples/Example_1/Ex_1.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
2 |
3 | ptr = pyEyeTrack()
4 | ptr.pyEyeTrack_runner(
5 | UI=True,
6 | UI_file_name='Ex_1_SampleTextUI',
7 | pupilTracking=True,
8 | eyeTrackingLog=True,
9 | eyeTrackingFileName='User_1')
10 |
--------------------------------------------------------------------------------
/Examples/Example_4/Ex_4.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
2 |
3 |
4 | ptr = pyEyeTrack()
5 | ptr.pyEyeTrack_runner(
6 | pupilTracking=True,
7 | blinkDetection=True,
8 | video_source=r"#add path to the video file",
9 | eyeTrackingLog=True,
10 | eyeTrackingFileName='User_4')
11 |
--------------------------------------------------------------------------------
/Examples/Example_5/Ex_5.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.PyEyeTrackRunnerClass import pyEyeTrack
2 |
3 |
4 | ptr = pyEyeTrack()
5 | ptr.pyEyeTrack_runner(
6 | UI=True,
7 | UI_file_name='Ex_5_ImageUI_EscExit',
8 | audioRecorder=True,
9 | audioName='audio5',
10 | videoName='video5',
11 | videoRecorder=True)
12 |
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist/
2 | MANIFEST
3 | pyEyeTrack/__pycache__
4 | test.py
5 | pyEyeTrack/EyeTracking/__pycache__
6 | pyEyeTrack/AudioVideoRecording/__pycache__
7 | PyEyeTrack.egg-info
8 | pyEyeTrack/EyeTracking/test.py
9 | shape_predictor_68_face_landmarks.dat
10 | shape_predictor_68_face_landmarks.dat.bz2
11 | Output
12 | Examples/Example_1/__pycache__
13 | Examples/Example_2/__pycache__
14 | Examples/Example_3/__pycache__
15 | Examples/Example_4/__pycache__
16 | Examples/Example_5/__pycache__
--------------------------------------------------------------------------------
/Examples/Example_4/README.md:
--------------------------------------------------------------------------------
1 | ## Pupil Tracking and Blink Detection on video input
2 |
3 | Description: This application/demo tracks eyes and returns the pupil coordinates from a video.
4 |
5 | Working: The library takes video input specified by the user and returns the pupil coordinates.
6 |
7 | Library Function: The application makes use of the pupil tracking and blink detection functionalities of the library.
8 |
9 | Output: PyEyeTrack returns a CSV containing the x and y coordinates and the blink log of both the eyes..
10 |
11 |
--------------------------------------------------------------------------------
/Examples/Example_5/README.md:
--------------------------------------------------------------------------------
1 | ## Capturing audio and video along with UI
2 | Description: This application/demo records audio and video on user-specified UI. This video can be used later to perform pupil tracking as done in example 4.
3 |
4 | Working/ Library Function: This application/demo uses audio recording and video recording functionalities of the library. The application can take any user-specified UI as the input.
5 |
6 | Output: The program returns an audio and video file.
7 |
8 | Dependency:
9 | To run this example install [PyQt 5.6.0](https://anaconda.org/conda-forge/pyqt/)
10 |
11 |
--------------------------------------------------------------------------------
/Examples/Example_2/README.md:
--------------------------------------------------------------------------------
1 | ## Handling UI with blinks
2 |
3 | Description: This application/demo showcases the human-computer interaction aspect of eye-tracking. The application detects the blinks of the user. The user can close the image UI with their blinks.
4 |
5 | Working: The application displays an image specified by the user and tracks the user's eyes. On detecting two blinks, the UI closes.
6 |
7 | Library Function: The application makes use of the blink detection functionality along with the user-specified UI.
8 |
9 | Output: None
10 |
11 | Dependency:
12 | To run this example install [PyQt 5.6.0](https://anaconda.org/conda-forge/pyqt/)
13 |
14 |
--------------------------------------------------------------------------------
/Examples/Example_3/README.md:
--------------------------------------------------------------------------------
1 | ## The slots machine game
2 |
3 | Description: This application/demo is a slots machine game.
4 |
5 | Working: The library runs the slots UI and detects user's blinks. On the keypress of the spacebar, the game starts. When the first blink is detected, the first slot stops spinning. With every blink, the respective slot stops spinning. The goal is to get the same image on all three slots to win the game.
6 |
7 | Library Function: The application makes use of the blink detection functionality along with the user-specified UI.
8 |
9 | Output: None
10 |
11 | Dependency:
12 | To run this example install [PyQt 5.6.0](https://anaconda.org/conda-forge/pyqt/)
13 |
14 |
15 |
--------------------------------------------------------------------------------
/Examples/Example_1/README.md:
--------------------------------------------------------------------------------
1 | ## Pupil Tracking on Text UI
2 |
3 | Description: This application/demo tracks the eyes of the user while the user reads a sample text displayed on the UI.
4 |
5 | Working: As the program runs, a blank PyQt window appears on the screen. Press 'ctrl' to display a line of text on the screen. This timestamp is recorded as the start time of the user reading the line. Press 'shift' after reading the line. This timestamp is recorded as the end time of the user reading the line. On the keypress of Escape, the program terminates.
6 |
7 | Library Function: The program makes use of the pupil tracking functionality of the library.
8 |
9 | Output: The application returns a CSV having the x and y coordinates of both the eyes.
10 |
11 | Dependency:
12 | To run this example install [PyQt 5.6.0](https://anaconda.org/conda-forge/pyqt/)
13 |
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 algoasylum
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Examples/Example_1/SampleFile.txt:
--------------------------------------------------------------------------------
1 | Elon Reeve Musk is a South African-born American entrepreneur and businessman.
2 | He founded X.com (which later became PayPal), SpaceX and Tesla Motors .
3 | Musk became multimillionaire in his twenties when he sold his start-up company.
4 | He bolstered his portfolio with the purchase of SolarCity in the early days.
5 | He cemented his standing as a leader of industry by taking on an advisory role.
6 | His mother is a model and the oldest woman to star in a Covergirl campaign.
7 | When Musk was growing up, she worked five jobs at one point to support her family.
8 | His father, Errol Musk, is a wealthy South African engineer.
9 | Musk spent his early childhood with brother Kimbal and sister Tosca in South Africa.
10 | Musk moved to Canada to attend Queens University.
11 | Musk left Canada to study business and physics at the University of Pennsylvania.
12 | He graduated with a degree in economics and a second degree in physics.
13 | Musk headed to Stanford University in California to pursue a PhD in energy physics.
14 | Musk released a concept for a new form of transportation called the "Hyperloop".
15 | Press ESC
16 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PyEyeTrack - The Python eYe Tracking Library
2 |
3 | [](https://github.com/algoasylum/pyEyeTrack/blob/master/LICENSE)
4 |
5 | *pyEyeTrack* is a python-based pupil-tracking library. The library tracks eyes with the commodity webcam and gives a real-time stream of eye coordinates. It provides the functionality of eye-tracking and blink detection and encapsulates these in a generic interface that allows clients to use these functionalities in a variety of use-cases.
6 |
7 | ## Features
8 | * Real-time Pupil Tracking
9 | * Real-time Blink Detection
10 | * Customizable and modularized design
11 | * Concurrent
12 |
13 | ## Installation Command
14 | `pip install PyEyeTrack`
15 |
16 | Dependencies:
17 | dlib 19.4: `conda install -c conda-forge dlib=19.4`
18 |
19 | You may want to use a conda virtual environment to avoid mix up with system dependencies.
20 |
21 | ## Documentation
22 | Find the official documentation [here](http://pyeyetrack.algoasylum.com/).
23 |
24 | ## License
25 | MIT
26 |
27 | ## pyEyeTrack Support
28 | For any issues regarding PyEyeTrack, contact the PyEyeTrack support at pyeyetrack@gmail.com
29 |
30 | ## Contributors
31 | Kanchan Sarolkar, Kimaya Badhe, Neha Chaudhari, Samruddhi Kanhed and Shrirang Karandikar.
32 |
--------------------------------------------------------------------------------
/Examples/Example_5/Ex_5_ImageUI_EscExit.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QGridLayout, QWidget
3 | from PyQt5.QtGui import QPixmap
4 | from PyQt5.QtCore import Qt
5 | import threading
6 | import time
7 |
8 |
9 | class Example(QWidget):
10 | """
11 | This class is used to display an image on a full screen UI.
12 |
13 | Args:
14 | QWidget : The QWidget class provides the basic capability to render
15 | to the screen, and to handle user input events.
16 | """
17 |
18 | def __init__(self, path='wallpaper.jpg'):
19 | super().__init__()
20 |
21 | self.im = QPixmap(path)
22 |
23 | self.label = QLabel()
24 |
25 | self.label.setScaledContents(True)
26 | self.label.setPixmap(self.im)
27 |
28 | self.grid = QGridLayout()
29 | self.grid.addWidget(self.label, 1, 1)
30 | self.setLayout(self.grid)
31 | self.showFullScreen()
32 |
33 | self.setWindowTitle("PyQT show image")
34 |
35 | def keyPressEvent(self, event):
36 | """
37 | This function monitors key press events while the program runs.
38 | On the key press of Escape - the program terminates]
39 | """
40 |
41 | if event.key() == Qt.Key_Escape:
42 | self.close()
43 |
44 |
45 | def main():
46 | """
47 | This function takes path of an image.
48 | The function displays the image on the PyQt window.
49 | The function closes the window when the code detects the escape
50 | key press.
51 | """
52 | #path = input('Enter image path: ')
53 | path = 'Examples\Example_5\wallpaper.jpg'
54 |
55 | app = QApplication(sys.argv)
56 |
57 | ex = Example(path)
58 | ex.show()
59 |
60 | app.exec_()
61 |
--------------------------------------------------------------------------------
/Examples/Example_3/Ex_3_WidgetMachine.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'machine_a_sous.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.11.3
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 |
11 |
12 | class Ui_Form(object):
13 | """
14 | This code has been adopted from https://github.com/flvoyer/SlotMachine.
15 | This code is used in the execution of the slots machine UI.
16 | """
17 |
18 | def setupUi(self, Form):
19 | Form.setObjectName("Form")
20 | Form.setEnabled(True)
21 | Form.resize(940, 320)
22 | self.mLabel = QtWidgets.QLabel(Form)
23 | self.mLabel.setGeometry(QtCore.QRect(10, 10, 300, 300))
24 | self.mLabel.setAutoFillBackground(False)
25 | self.mLabel.setStyleSheet("")
26 | self.mLabel.setText("")
27 | self.mLabel.setObjectName("mLabel")
28 | self.mLabel2 = QtWidgets.QLabel(Form)
29 | self.mLabel2.setGeometry(QtCore.QRect(320, 10, 300, 300))
30 | self.mLabel2.setAutoFillBackground(False)
31 | self.mLabel2.setStyleSheet("")
32 | self.mLabel2.setText("")
33 | self.mLabel2.setObjectName("mLabel2")
34 | self.mLabel3 = QtWidgets.QLabel(Form)
35 | self.mLabel3.setGeometry(QtCore.QRect(630, 10, 300, 300))
36 | self.mLabel3.setAutoFillBackground(False)
37 | self.mLabel3.setStyleSheet("")
38 | self.mLabel3.setText("")
39 | self.mLabel3.setObjectName("mLabel3")
40 |
41 | self.retranslateUi(Form)
42 | QtCore.QMetaObject.connectSlotsByName(Form)
43 |
44 | def retranslateUi(self, Form):
45 | _translate = QtCore.QCoreApplication.translate
46 | Form.setWindowTitle(_translate("Form", "Form"))
47 |
48 |
49 | if __name__ == "__main__":
50 | import sys
51 | app = QtWidgets.QApplication(sys.argv)
52 | Form = QtWidgets.QWidget()
53 | ui = Ui_Form()
54 | ui.setupUi(Form)
55 | Form.show()
56 | sys.exit(app.exec_())
57 |
--------------------------------------------------------------------------------
/pyEyeTrack/DataHandling.py:
--------------------------------------------------------------------------------
1 | from queue import Queue
2 |
3 | q = Queue()
4 |
5 |
6 | class QueueHandling():
7 | """
8 | This class is used to handles the queue in real-time.
9 | Methods:
10 | add_data(data)
11 | This function adds the data to the queue.
12 | get_data()
13 | This function returns the data elements in the queue.
14 | is_empty()
15 | This function checks if the queue is empty.
16 | search_element(key)
17 | This function is used to search if a specified element is
18 | present in the queue.
19 | """
20 |
21 | def __init__(self):
22 | global q
23 |
24 | def add_data(self, data):
25 | """
26 | This function adds the data to the queue.
27 |
28 | Args:
29 | data ([tuple]): The tuple consists of the pupil center coordinates
30 | with timestamps.
31 | """
32 | q.put(data)
33 |
34 | def get_data(self):
35 | """
36 | This function returns the data elements in the queue.
37 |
38 | Returns:
39 | tuple: The tuple consists of the pupil center coordinates with
40 | timestamps.
41 | """
42 | return q.get()
43 |
44 | def is_empty(self):
45 | """
46 | This function checks if the queue is empty.
47 |
48 | Returns:
49 | boolean: The function returns True if the queue is empty.
50 | If the queue has data elements, then it returns False.
51 | """
52 | if q.empty():
53 | return True
54 | else:
55 | return False
56 |
57 | def search_element(self, key):
58 | """
59 | This function is used to search if a specified element is present in
60 | the queue.
61 |
62 | Args:
63 | key ([char/integer/float/string]): The key is a data element of
64 | the queue.
65 |
66 | Returns:
67 | boolean : The function returns true if the key exists in the queue,
68 | else returns false.
69 | """
70 | if key in list(q.queue):
71 | return True
72 | return False
73 |
--------------------------------------------------------------------------------
/pyEyeTrack/AudioVideoRecording/VideoRecordingClass.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pyaudio
3 | import wave
4 | import threading
5 | import time
6 | import subprocess
7 | import os
8 | import sys
9 |
10 |
11 | class VideoRecorder():
12 | """
13 | VideoRecorder class is used to record video.
14 |
15 | Methods:
16 | record()
17 | The function records video while ret is True.
18 | stop()
19 | The function stops recording video.
20 | All the openCV objects are released.
21 |
22 | """
23 |
24 | def __init__(self, file_name='video'):
25 | self.open = True
26 | self.device_index = 0
27 | self.fps = 6
28 | self.fourcc = "MJPG"
29 | self.frameSize = (640, 480)
30 | self.file_name = file_name + ".avi"
31 | self.video_cap = cv2.VideoCapture(self.device_index)
32 | self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
33 | self.video_out = cv2.VideoWriter(
34 | self.file_name,
35 | self.video_writer,
36 | self.fps,
37 | self.frameSize)
38 |
39 | def record(self):
40 | """
41 | The function records video while ret is True.
42 | Frame is written in the video every 160 ms.
43 | """
44 |
45 | while(self.open):
46 | ret, video_frame = self.video_cap.read()
47 | if(ret):
48 | try:
49 | self.video_out.write(video_frame)
50 | time.sleep(0.16)
51 | except OSError as e:
52 | if e.errno == os.errno.ENOSPC:
53 | print("No space left on device.")
54 | else:
55 | break
56 |
57 | def stop(self):
58 | """
59 | The function stops recording video.
60 | All the openCV objects are released.
61 | """
62 | if self.open:
63 | print("video stop")
64 | self.open = False
65 | self.video_out.release()
66 | self.video_cap.release()
67 | cv2.destroyAllWindows()
68 |
69 | else:
70 | pass
71 |
72 | def main(self):
73 | """
74 | The function launches video recording function as a thread.
75 | """
76 | video_thread = threading.Thread(target=self.record)
77 | video_thread.start()
78 |
--------------------------------------------------------------------------------
/Examples/Example_2/Ex_2_ImageUI.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QGridLayout, QWidget
3 | from PyQt5.QtGui import QPixmap
4 | from PyQt5.QtCore import Qt
5 | import time
6 | from pyEyeTrack.DataHandling import QueueHandling
7 |
8 |
9 | class Example(QWidget):
10 | """
11 | This class is used to display an image on a full screen UI.
12 |
13 | Args:
14 | QWidget : The QWidget class provides the basic capability to
15 | render to the screen, and to handle user input events.
16 | """
17 |
18 | def __init__(self, path):
19 | super().__init__()
20 |
21 | self.queue_handler = QueueHandling()
22 |
23 | self.im = QPixmap(path)
24 |
25 | self.label = QLabel()
26 |
27 | self.label.setScaledContents(True)
28 | self.label.setPixmap(self.im)
29 |
30 | self.grid = QGridLayout()
31 | self.grid.addWidget(self.label, 1, 1)
32 | self.setLayout(self.grid)
33 | self.showFullScreen()
34 |
35 | self.setWindowTitle("PyQT show image")
36 |
37 | self.blink_count = 0
38 |
39 | # Calls the stopByBlinks function to stop the UI after
40 | # 2 blinks are detected
41 | self.stopByBlinks()
42 |
43 | self.queue_handler.add_data('Stop')
44 |
45 | def stopByBlinks(self):
46 | """
47 | This function monitors the blink count.
48 | It primarily detects if the user has blinked twice.
49 | The function acceses the queue to detect a blink.
50 | It increments the blinkcounter every time the queue has a new entry.
51 | Once the count reaches two, the function returns the control.
52 | """
53 | QApplication.processEvents()
54 |
55 | while self.blink_count < 2:
56 |
57 | queue_element = self.queue_handler.get_data()
58 | if queue_element:
59 | self.blink_count += 1
60 |
61 | return
62 |
63 |
64 | def main():
65 | """
66 | This function takes path of an image.
67 | The function displays the image on the PyQt window and closes it
68 | when two blinks are detected.
69 | """
70 | path = r'Examples\Example_2\wallpaper.jpg'
71 | app = QApplication(sys.argv)
72 |
73 | ex = Example(path)
74 |
75 | ex.show()
76 |
77 | ex.close()
78 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name = 'PyEyeTrack',
5 | packages = find_packages(),
6 | package_data={'': [r'pyEyeTrack\EyeTracking\shape_predictor_68_face_landmarks.dat']},
7 | dependency_links = ['http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'],
8 | include_package_data=True,
9 | version = '1.0.1',
10 | license='MIT',
11 | description = 'PyEyeTrack is a python-based pupil-tracking library. The library tracks eyes with the commodity webcam \
12 | and gives a real-time stream of eye coordinates. It provides the functionality of eye-tracking and \
13 | blink detection and encapsulates these in a generic interface that allows clients to use these \
14 | functionalities in a variety of use-cases.',
15 | long_description='# PyEyeTrack - The Python eYe Tracking Library
\
16 | [](https://github.com/algoasylum/pyEyeTrack/blob/master/LICENSE)
\
17 | *pyEyeTrack* is a python-based pupil-tracking library. The library tracks eyes with the commodity webcam and gives a real-time stream of eye coordinates. It provides the functionality of eye-tracking and blink detection and encapsulates these in a generic interface that allows clients to use these functionalities in a variety of use-cases.
'
18 |
19 | 'Features
\
20 | - Real-time Pupil Tracking
\
21 | - Real-time Blink Detection
\
22 | - Customizable and modularized design
\
23 | - Concurrent
\
24 | Documentation
\
25 | Find the official documentation [here](https://algoasylum.github.io/PyTrack/).',
26 |
27 |
28 | long_description_content_type='text/markdown',
29 | author = 'Kanchan Sarolkar, Kimaya Badhe, Neha Chaudhari, Samruddhi Kanhed and Shrirang Karandikar',
30 | author_email = 'pyeyetrack@gmail.com',
31 | url = 'https://github.com/algoasylum/PyEyeTrack',
32 | download_url = 'https://github.com/algoasylum/pyEyeTrack/archive/v_1_0_1.tar.gz',
33 | keywords = ['Eye Tracking','blink detection','User Interface','Webcamera'],
34 | install_requires=[
35 | 'keyboard==0.13.3',
36 | 'tqdm==4.48.0',
37 | 'numpy==1.18.1',
38 | 'opencv-python>=4.0.*',
39 | 'pandas==0.24.0',
40 | 'pyaudio==0.2.11',
41 | ],
42 | classifiers=[
43 | 'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
44 | 'Intended Audience :: Developers', # Define that your audience are developers
45 | 'Topic :: Software Development :: Build Tools',
46 | 'License :: OSI Approved :: MIT License', # Again, pick a license
47 | 'Programming Language :: Python :: 3.6',
48 | ],
49 | )
50 |
--------------------------------------------------------------------------------
/pyEyeTrack/AudioVideoRecording/AudioRecordingClass.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pyaudio
3 | import wave
4 | import threading
5 | import time
6 | import subprocess
7 | import os
8 | import shutil
9 | import sys
10 | import re
11 |
12 |
13 | class AudioRecorder():
14 | """
15 | AudioRecorder class is used to record audio.
16 |
17 | Methods:
18 | record()
19 | This function records audio until the open
20 | flag is set to False.
21 | stop()
22 | This function stops recording the audio.
23 | The audio frames are written into
24 | .wav file.
25 |
26 | """
27 |
28 | def __init__(self, file_name='audio'):
29 | self.open = True
30 | self.rate = 44100
31 | self.frames_per_buffer = 1024
32 | self.channels = 2
33 | self.format = pyaudio.paInt16
34 | self.file_name = file_name + ".wav"
35 | self.audio = pyaudio.PyAudio()
36 |
37 | self.stream = self.audio.open(
38 | format = self.format,
39 | channels = self.channels,
40 | rate = self.rate,
41 | input = True,
42 | frames_per_buffer=self.frames_per_buffer)
43 | self.audioframes = []
44 |
45 | def record(self):
46 | """
47 | The function records audio until the open flag
48 | is set to False.
49 | The frames read by the stream are appended
50 | into audio frames.
51 | """
52 | self.stream.start_stream()
53 | while(self.open):
54 | data = self.stream.read(self.frames_per_buffer)
55 | self.audioframes.append(data)
56 | if not self.open:
57 | break
58 |
59 | def stop(self):
60 | """
61 | The function stops recording audio, thereby also
62 | stopping the thread.
63 | Audio frames are written into a .wav file.
64 | The stream is stopped and closed.
65 | """
66 | if self.open:
67 | try:
68 | print("Audio Stop")
69 | self.open = False
70 | self.stream.stop_stream()
71 | self.stream.close()
72 | self.audio.terminate()
73 |
74 | waveFile = wave.open(self.file_name, 'wb')
75 | waveFile.setnchannels(self.channels)
76 | waveFile.setsampwidth(self.audio.get_sample_size(self.format))
77 | waveFile.setframerate(self.rate)
78 | waveFile.writeframes(b''.join(self.audioframes))
79 | waveFile.close()
80 | except OSError as e:
81 | if e.errno == os.errno.ENOSPC:
82 | print("No space left on device")
83 |
84 | else:
85 | pass
86 |
87 | def main(self):
88 | """
89 | The function launches audio recording function as a thread.
90 | """
91 | audio_thread = threading.Thread(target=self.record,)
92 | audio_thread.start()
93 |
--------------------------------------------------------------------------------
/Examples/Example_1/Ex_1_SampleTextUI.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QTextEdit, QScrollArea
3 | from PyQt5.QtCore import Qt
4 | from PyQt5 import QtCore
5 | from PyQt5 import QtGui
6 | from PyQt5.QtGui import QTextCursor
7 | import time
8 | import csv
9 | import pandas as pd
10 |
11 |
12 | class MainWindow(QMainWindow):
13 | """
14 | This class is used to display text line by line provided to
15 | it in the form of a txt file.
16 | The user needs to press Ctrl to display a line.
17 | When the user has finished reading, the shift key should be pressed.
18 | Meanwhile, the library will track the user's eyes as the user
19 | reads the line.
20 |
21 | Args:
22 | QMainWindow : The main window provides a framework for building
23 | an application’s user interface.
24 |
25 | Methods:
26 | openfile(i)
27 | This function opens and reads the file specified by the path.
28 | keyPressEvent(event)
29 | This function monitors key press events while the program runs.
30 | """
31 |
32 | def __init__(self, path, *args, **kwargs):
33 | super(MainWindow, self).__init__(*args, **kwargs)
34 | self.path = path
35 | self.setWindowTitle("Text UI")
36 | self.showFullScreen()
37 | self.i = 0
38 | self.editor = QTextEdit()
39 | f = self.editor.font()
40 | # sets the size to 27
41 | f.setPointSize(27)
42 | self.editor.setFont(f)
43 | self.setCentralWidget(self.editor)
44 | self.cursor = QTextCursor(self.editor.document())
45 | self.cursor.setPosition(0)
46 | self.editor.setTextCursor(self.cursor)
47 | # Opens and writes the timestamps to the CSV
48 | with open('Timestamps_per_line.csv', 'w') as file:
49 | self.fieldnames = ('Start_Time', 'End_Time', 'video_Time')
50 | self.writer = csv.DictWriter(file, fieldnames=self.fieldnames)
51 | self.writer.writeheader()
52 | start = time.time()
53 | self.writer.writerow({'video_Time': start})
54 |
55 | def openfile(self, i):
56 | """
57 | This function opens and reads the file specified by the path.
58 | It returns each line of the file when called.
59 |
60 | Args:
61 | i ([integer]): 'i' specifies the line number
62 |
63 | Returns:
64 | an element of the list 'data': It returns the specified
65 | line from the file.
66 | """
67 | file = open(self.path, "r")
68 | with file:
69 | text = file.read()
70 | data = text.splitlines()
71 | return data[i]
72 |
73 | def keyPressEvent(self, event):
74 | """
75 | This function monitors key press events while the program runs.
76 | On the key press of Escape - the program terminates.
77 |
78 | On the key press of Ctrl - a new line appears on the display.
79 | The timestamp at the time of key press is added to a csv.
80 | This timestamp denotes the start time of the user reading the line.
81 |
82 | On the key press of shift - The timestamp at the time of key press
83 | is added to csv.
84 | This timestamp denotes the end time of the user reading the line.
85 | Args:
86 | event ([key press]): Detects key presses
87 | """
88 |
89 | if event.key() == Qt.Key_Escape:
90 | self.close()
91 |
92 | if event.key() == Qt.Key_Shift:
93 | ts_end = time.time()
94 | with open('Timestamps_per_line.csv', 'a') as file:
95 | writer = csv.DictWriter(file, fieldnames=self.fieldnames)
96 | writer.writerow({'End_Time': ts_end})
97 | self.i = self.i + 1
98 |
99 | if event.key() == Qt.Key_Control:
100 | data = self.openfile(self.i)
101 | ts_start = time.time()
102 | with open('Timestamps_per_line.csv', 'a') as file:
103 | writer = csv.DictWriter(file, fieldnames=self.fieldnames)
104 | writer.writerow({'Start_Time': ts_start})
105 |
106 | if self.i % 8 == 0:
107 | self.cursor.setPosition(0)
108 | self.editor.setTextCursor(self.cursor)
109 | self.editor.setText(data)
110 | self.editor.append("")
111 | else:
112 | self.editor.append(data)
113 | self.editor.append("")
114 |
115 |
116 | def main():
117 | """
118 | This function takes the path of a text file and displays and it on a
119 | PyQt window.
120 | """
121 | path = "SampleFile.txt"
122 | app = QApplication(sys.argv)
123 |
124 | window = MainWindow(path)
125 | window.show()
126 | app.exec_()
127 |
--------------------------------------------------------------------------------
/pyEyeTrack/EyeTracking/AbstractEyeTrackingClass.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import keyboard
3 | from abc import ABC, abstractmethod
4 | import dlib
5 |
6 | import sys
7 | import os
8 | import bz2
9 | from functools import partial
10 | from tqdm import tqdm
11 |
12 |
13 | SHAPE_PREDICTOR_FNAME = 'shape_predictor_68_face_landmarks.dat'
14 | SHAPE_PREDICTOR_BZ2_FNAME = SHAPE_PREDICTOR_FNAME + '.bz2'
15 | SHAPE_PREDICTOR_URL = 'http://dlib.net/files/{}'.format(SHAPE_PREDICTOR_BZ2_FNAME)
16 |
17 | def _download_file(url, out_path):
18 | try:
19 | from urllib import urlretrieve # Python 2
20 | except ImportError:
21 | from urllib.request import urlretrieve # Python 3
22 |
23 | def reporthook(t, b=1, bsize=1, tsize=None, last_b=[0]):
24 | if tsize is not None:
25 | t.total = tsize
26 | t.update((b - last_b[0]) * bsize)
27 | last_b[0] = b
28 |
29 | with tqdm(unit='B', unit_scale=True, miniters=1, desc=out_path) as t:
30 | urlretrieve(url, filename=out_path, reporthook=partial(reporthook, t))
31 |
32 | def _bz2_decompress_inplace(path, out_path):
33 | with open(path, 'rb') as source, open(out_path, 'wb') as dest:
34 | dest.write(bz2.decompress(source.read()))
35 |
36 | def check():
37 | print("shape_predictor_68_face_landmarks.dat file is needed.")
38 | print("Press n -if you already have it and place it in the current folder")
39 | print("Press y -file will start downloading.")
40 |
41 | download_input = input()
42 | if download_input == 'y':
43 | script_path = os.path.dirname(os.path.abspath(__file__))
44 |
45 | _download_file(SHAPE_PREDICTOR_URL, SHAPE_PREDICTOR_BZ2_FNAME)
46 | _bz2_decompress_inplace(SHAPE_PREDICTOR_BZ2_FNAME,
47 | SHAPE_PREDICTOR_FNAME)
48 |
49 |
50 | check()
51 |
52 | class EyeTracking(ABC):
53 |
54 | """
55 | EyeTracking is an abstract class that is used to implement
56 | different types of eye-tracking events.
57 | In this library we have used this class to implement
58 | blink detection and pupil-tracking.
59 |
60 | Attributes:
61 | detector: default face detector in dlib
62 | predictor: used to map the facial landmark on the
63 | detected face
64 |
65 | Methods:
66 | csv_writer(file_name)
67 | an abstract method that is to be used for
68 | .csv file generation.
69 | functionality(frame)
70 | an abstract method used to implement type of eye-tracking.
71 | e.g. blinking
72 | start()
73 | method to start eye-tracking
74 | """
75 | file_path = os.path.abspath(SHAPE_PREDICTOR_FNAME)
76 | detector = dlib.get_frontal_face_detector()
77 | predictor = dlib.shape_predictor(file_path)
78 |
79 | def __init__(self, source):
80 |
81 | # acquire the webcam based on device id
82 | self.cap = cv2.VideoCapture(source)
83 | self.frame = 0 # frame from the video or live-stream
84 | self.landmarks = "xx" # variable to store facial landmarks
85 | self.close_flag = False # flag used to close the application
86 |
87 | @abstractmethod
88 | def csv_writer(self, file_name):
89 | """
90 | Implements writer to write the data dictonary to .csv file.
91 |
92 | Args:
93 | file_name (string): name of the .csv file to be generated.
94 | """
95 | pass
96 |
97 | @abstractmethod
98 | def functionality(self, frame):
99 | """
100 | Implement the eye-tracking functionality required.
101 | Args:
102 | frame (numpy array): it is the frame in the video or captured by
103 | the camera
104 | """
105 | pass
106 |
107 | def start(self):
108 | """
109 | This function reads the input from the video or the live-stream.
110 | It also processes the frame acquired and detects the facein the frame.
111 | Then all the facial landmarks are mapped to face detected in the frame.
112 | The frame and the facial landmarks are thenused by the subclassed to
113 | implement blink detection or pupil tracking.
114 | The application terminates if the 'esc' key is pressed or if the
115 | close_flag is set to 'True'. If the face is not detected for 10 cycles
116 | of the loop the application will terminate.
117 |
118 | """
119 | face_not_detected = 0
120 | while True:
121 |
122 | if keyboard.is_pressed(
123 | 'esc') or self.close_flag or face_not_detected >= 10:
124 | break
125 |
126 |
127 | ret, self.frame = self.cap.read()
128 |
129 | if not ret:
130 | print("Can't receive frame (stream end?). Exiting ...")
131 | break
132 |
133 | self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
134 |
135 | faces, _, _ = self.detector.run(self.frame, 0, 0)
136 |
137 | if len(faces) == 0:
138 | print('Face not detected. Find better lighting.')
139 | face_not_detected += 1
140 | continue
141 |
142 | face_not_detected = 0
143 |
144 | self.landmarks = self.predictor(self.frame, faces[0])
145 |
146 | self.functionality(self.frame)
147 |
148 | self.cap.release()
149 |
--------------------------------------------------------------------------------
/Examples/Example_3/Ex_3_SlotsMachine.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore
2 | from PyQt5.QtWidgets import *
3 | from PyQt5.QtGui import *
4 | from PyQt5.QtCore import *
5 | import time
6 | import random
7 | import os
8 | import sys
9 | import threading
10 | import subprocess
11 | import Ex_3_WidgetMachine as SlotM
12 | from pyEyeTrack.DataHandling import QueueHandling
13 |
14 |
15 | class MyImageViewerWidget(QFrame):
16 | """
17 | This code has been adopted from https://github.com/flvoyer/SlotMachine.
18 | This code executes the Slots machine UI.
19 |
20 | Args:
21 | QFrame : The QFrame class can also be used directly for creating
22 | simple placeholder frames without any contents.
23 |
24 | Methods:
25 | select_random_image()
26 | This function selects an image randomly and returns it after
27 | cropping it.
28 |
29 | spin()
30 | This function spins the slots of the slot machine.
31 |
32 | """
33 |
34 | def __init__(self, *args):
35 | super(MyImageViewerWidget, self).__init__(*args)
36 | self.setGeometry(0, 0, 800, 600)
37 | self.ui = SlotM.Ui_Form()
38 | self.ui.setupUi(self)
39 | self.games_played = 0
40 | root_directory = os.path.dirname(os.path.abspath(__file__))
41 | path = os.path.join(root_directory, "Ex_3_slot_machine_symbols.png")
42 | self.px = QPixmap(path)
43 |
44 | self.x = [0, 0, 0, 300, 300, 300, 600, 600, 600]
45 | self.y = [0, 300, 600, 0, 300, 600, 0, 300, 600]
46 |
47 | rect = QRect(0, 0, 300, 300)
48 | cropped = self.px.copy(rect)
49 | self.ui.mLabel.setPixmap(cropped)
50 | self.ui.mLabel2.setPixmap(cropped)
51 | self.ui.mLabel3.setPixmap(cropped)
52 | self.queue_handler = QueueHandling()
53 |
54 | def select_random_image(self):
55 | """
56 | This function selects an image randomly and returns it after cropping it.
57 |
58 | Returns:
59 | image : selects and returns a cropped image
60 | """
61 | selected_image_index = random.randint(0, len(self.x) - 1)
62 | self.rect = QRect(
63 | self.x[selected_image_index],
64 | self.y[selected_image_index],
65 | 300,
66 | 300)
67 | cropped = self.px.copy(self.rect)
68 | return cropped, selected_image_index
69 |
70 | def spin(self):
71 | """
72 | This function spins the slots till three blinks are detected.
73 | The function accesses the queue to monitor the blinks.
74 | On the first blink, the first slot stops spinning.
75 | With every blink detection, consecutive slot stops spinning.
76 | The function checks if the images on all three slots are the same.
77 | If so, the user wins the jackpot.
78 | """
79 | blink_count = 0
80 | for _ in range(0, 200):
81 | time.sleep((50 + 25 * 9) / 1000)
82 |
83 | if self.queue_handler.is_empty() == False:
84 | queue_element = self.queue_handler.get_data()
85 | if queue_element:
86 | blink_count += 1
87 | print('blink: ', blink_count)
88 |
89 | if blink_count >= 3:
90 | break
91 |
92 | if blink_count < 3:
93 | cropped, c = self.select_random_image()
94 | self.ui.mLabel3.setPixmap(cropped)
95 |
96 | if blink_count < 2:
97 | cropped, b = self.select_random_image()
98 | self.ui.mLabel2.setPixmap(cropped)
99 |
100 | if blink_count < 1:
101 | cropped, a = self.select_random_image()
102 | self.ui.mLabel.setPixmap(cropped)
103 |
104 | QApplication.processEvents()
105 |
106 | self.games_played += 1
107 | if a == b and c == b:
108 | print("===============")
109 | print("=== JACKPOT ===")
110 | print("===============")
111 |
112 | else:
113 | print("Game Over!")
114 | self.queue_handler.add_data('Stop')
115 |
116 | if self.games_played > 1:
117 | return
118 |
119 |
120 | class MyMainWindow(QMainWindow):
121 | """
122 | This class creates an empty window with the specified parameters.
123 |
124 | Args:
125 | QMainWindow : The main window provides a framework for building
126 | an application’s user interface.
127 |
128 | Methods:
129 | KeyPressEvent(e)
130 | This function detects key presses.
131 |
132 | """
133 |
134 | def __init__(self, parent=None):
135 |
136 | QWidget.__init__(self, parent=parent)
137 | self.setGeometry(500, 450, 940, 320)
138 | self.setFixedSize(940, 320)
139 | self.setWindowTitle('Slot Machine')
140 |
141 | self.mDisplay = MyImageViewerWidget(self)
142 |
143 | def keyPressEvent(self, e):
144 | """
145 | This function detects a key press.
146 | On the detection of key press of space bar, the spin function is
147 | called.
148 | On the key press of space bar, the game starts.
149 | """
150 | if e.key() == QtCore.Qt.Key_Space:
151 | self.mDisplay.spin()
152 |
153 |
154 | def main():
155 | """
156 | Executes the UI
157 | """
158 | app = QApplication(sys.argv)
159 | w = MyMainWindow()
160 | w.show()
161 | app.exec_()
162 |
--------------------------------------------------------------------------------
/pyEyeTrack/EyeTracking/PupilBlinkingClass.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import time
3 | from math import hypot
4 | from pyEyeTrack.DataHandling import QueueHandling
5 | from pyEyeTrack.EyeTracking.BlinkingClass import Blinking
6 | from pyEyeTrack.EyeTracking.PupilTrackingClass import PupilTracking
7 | from pyEyeTrack.EyeTracking.AbstractEyeTrackingClass import EyeTracking
8 |
9 |
10 | class PupilBlinking(Blinking, PupilTracking, EyeTracking):
11 |
12 | """
13 | A subclass of EyeTracking that does blink detection and
14 | pupil-tracking.
15 |
16 | Methods:
17 | functionality(frame)
18 | Implements pupil tracking and blink detection for a
19 | given frame.
20 | csv_writer(file_name)
21 | Generates a .csv file with the timestamp,pupil center and
22 | blink detection for both eyes.
23 | """
24 |
25 | def __init__(self, source):
26 | super().__init__(source)
27 | # dictionary to store the location of the pupil center,
28 | # blink and the corresponding timestamp
29 | # stores the blink ratio everytime the subject blinks
30 | self.eye_data_log = {
31 | "Timestamps": [],
32 | "Left_Eye_X": [],
33 | "Left_Eye_Y": [],
34 | "Right_Eye_X": [],
35 | "Right_Eye_Y": [],
36 | "Blink": []}
37 | # intialized queue to do real-time data transfer
38 | self.queue_handler = QueueHandling()
39 |
40 | def functionality(self, frame):
41 | """
42 | This method overrides the method in the superclass.
43 | This method gets the blink ratios for both the eyes and
44 | calculates the average blink ratio.
45 |
46 | If the value of the average blink ratio is greater than the
47 | BLINK_RATIO_THRESHOLD,we presume that the subject blinked.
48 | Here, we set the value of the 'Blink' field in the dictonary
49 | to 'False'.We add the data to the dictonary as well as the
50 | queue to facilitate real-time data transfer.
51 | The values of the pupil centers are set to 0 when the
52 | subject is blinking.
53 |
54 | If the blink ratio is less than the BLINK_RATIO_THRESHOLD,
55 | we calcute the location of the pupil center for both the eyes.
56 | Once the pupil centers are acquired we append them in eye_data_log
57 | dictonary along with the timestamp.
58 | Here, we set the 'Blink' field in the dictonary to 'False'.
59 | We also add this data to the queue for real-time data transfer.
60 |
61 | Finally, we also toggle the close_flag if the string 'Stop' is
62 | found in the queue. This can be used by the user to
63 | stop the application.
64 |
65 | Args:
66 | frame (numpy array): it is the frame in the video or
67 | captured by the camera
68 | """
69 |
70 | left_eye_ratio = self.get_blink_ratio(
71 | [36, 37, 38, 39, 40, 41], self.landmarks)
72 | right_eye_ratio = self.get_blink_ratio(
73 | [42, 43, 44, 45, 46, 47], self.landmarks)
74 | blink_ratio = (left_eye_ratio + right_eye_ratio) / 2
75 |
76 | if blink_ratio > self.BLINK_RATIO_THRESHOLD:
77 | timestamp_blinking = time.time()
78 | self.eye_data_log["Timestamps"].append(timestamp_blinking)
79 | self.eye_data_log["Left_Eye_X"].append(0)
80 | self.eye_data_log["Left_Eye_Y"].append(0)
81 | self.eye_data_log["Right_Eye_X"].append(0)
82 | self.eye_data_log["Right_Eye_Y"].append(0)
83 | self.eye_data_log["Blink"].append(True)
84 | blink_data = (timestamp_blinking, 0, 0, 0, 0, True)
85 | self.queue_handler.add_data(blink_data)
86 | else:
87 |
88 | landmarks_coordinates_left_eye = self.detect_eye(
89 | [36, 37, 38, 39, 40, 41], self.landmarks)
90 | landmarks_coordinates_right_eye = self.detect_eye(
91 | [42, 43, 44, 45, 46, 47], self.landmarks)
92 |
93 | pupil_center_left_eye = self.get_pupil_center_coordinates(
94 | landmarks_coordinates_left_eye, 0, frame)
95 | pupil_center_right_eye = self.get_pupil_center_coordinates(
96 | landmarks_coordinates_right_eye, 0, frame)
97 | timestamp_pupil_centers = time.time()
98 |
99 | self.eye_data_log["Timestamps"].append(timestamp_pupil_centers)
100 | self.eye_data_log["Left_Eye_X"].append(pupil_center_left_eye[0])
101 | self.eye_data_log["Left_Eye_Y"].append(pupil_center_left_eye[1])
102 | self.eye_data_log["Right_Eye_X"].append(pupil_center_right_eye[0])
103 | self.eye_data_log["Right_Eye_Y"].append(pupil_center_right_eye[1])
104 | self.eye_data_log["Blink"].append(False)
105 | pupil_center_data = (
106 | timestamp_pupil_centers,
107 | pupil_center_left_eye[0],
108 | pupil_center_left_eye[1],
109 | pupil_center_right_eye[0],
110 | pupil_center_right_eye[1],
111 | False)
112 | self.queue_handler.add_data(pupil_center_data)
113 |
114 | if self.queue_handler.search_element('Stop'):
115 | self.close_flag = True
116 |
117 | def csv_writer(self, file_name):
118 | """
119 | Generates a .csv file with the timestamp and pupil centers with
120 | the given file name.
121 |
122 | Args:
123 | file_name (string): name of the .csv file to be generated.
124 | """
125 | file_name = file_name + ".csv"
126 | DF = pd.DataFrame(self.eye_data_log)
127 | DF.to_csv(file_name)
128 |
129 | def start(self):
130 | return super().start()
131 |
--------------------------------------------------------------------------------
/pyEyeTrack/EyeTracking/BlinkingClass.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.EyeTracking.AbstractEyeTrackingClass import EyeTracking
2 | import pandas as pd
3 | import time
4 | from math import hypot
5 | from pyEyeTrack.DataHandling import QueueHandling
6 |
7 |
8 | class Blinking (EyeTracking):
9 |
10 | """
11 | A subclass of EyeTracking that does blink detection.
12 |
13 | Methods:
14 | midpoint(point_1, point2)
15 | Calculates midpoint of two given points.
16 | get_blink_ratio(eye_points, facial_landmarks)
17 | Calculates the blink ratio.
18 | functionality(frame)
19 | Implements the blink detection for a given frame.
20 | csv_writer(file_name)
21 | Generates a .csv file with the timestamp and blink ratio.
22 | """
23 |
24 | def __init__(self, source):
25 |
26 | super().__init__(source) #constuctor of the superclass- EyeTracking
27 | self.timestamps = [] #stores timestamps of the blink
28 | self.blink_ratios = [] #stores the blink ratio
29 | self.queue_handler = QueueHandling() #queue for real-time data transfer
30 | self.BLINK_RATIO_THRESHOLD = 5.7 #value of the blink ratio threshold
31 |
32 | def midpoint(self, point_1, point_2):
33 | """
34 | This function calculates the midpoint of two dlib.point
35 | objects and returns the result as a tuple of integers
36 |
37 | Args:
38 | point_1 (dlib.point): first point to calculate the midpoint
39 | point_2 (dlib.point): second point to calculate the midpoint
40 |
41 | Returns:
42 | (int, int): a tuple containing the x and y coordinates
43 | of the midpoint.
44 | """
45 | return (int((point_1.x + point_2.x) / 2),
46 | int((point_1.y + point_2.y) / 2))
47 |
48 | def get_blink_ratio(self, eye_points, facial_landmarks):
49 | """
50 | This function calculates the blink ratio for a single eye.
51 | blink_ratio is the ratio of the horizontal length of the eye
52 | to the vertical length of the eye.
53 | The horizontal and vertical lengths are obtained by calculating
54 | the Euclidean distance between landmarks of the eye.
55 |
56 | Args:
57 | eye_points (list): the list of indicies of the facial
58 | landmarks which represent an eye
59 | facial_landmarks (dlib.full_object_detection):
60 | this object helps get the location of
61 | the eye in the frame.
62 |
63 | Returns:
64 | float: returns the blink ratio i.e. ratio of the
65 | horizontal length of the eye to the vertical
66 | length of the eye
67 | """
68 | corner_left = (
69 | facial_landmarks.part(eye_points[0]).x,
70 | facial_landmarks.part(eye_points[0]).y)
71 | corner_right = (
72 | facial_landmarks.part(eye_points[3]).x,
73 | facial_landmarks.part(eye_points[3]).y)
74 | center_top = self.midpoint(
75 | facial_landmarks.part(eye_points[1]),
76 | facial_landmarks.part(eye_points[2]))
77 | center_bottom = self.midpoint(
78 | facial_landmarks.part(eye_points[5]),
79 | facial_landmarks.part(eye_points[4]))
80 |
81 | horizontal_length = hypot(
82 | (corner_left[0] - corner_right[0]),
83 | (corner_left[1] - corner_right[1]))
84 | vertical_length = hypot(
85 | (center_top[0] - center_bottom[0]),
86 | (center_top[1] - center_bottom[1]))
87 |
88 | blink_ratio = horizontal_length / vertical_length
89 | return blink_ratio
90 |
91 | def functionality(self, frame):
92 | """
93 | This method overrides the method in the superclass.
94 | This method gets the blink ratios for both the eyes
95 | and calculates the average blink ratio. If the value of the
96 | average blink ratio is greater than the BLINK_RATIO_THRESHOLD
97 | we presume that the subject blinked.
98 | If the subject blinks we add the timestamp of the blink
99 | and the value of the blink ratio to the respective lists.
100 | We also add True to the queue on blink detection.
101 | This queue can be acessed by the user to see if the subject
102 | blinked in real-time.
103 | Finally, we also toggle the close_flag if the string 'Stop' is found
104 | in the queue. This can be used by the user to stop the application.
105 |
106 | Args:
107 | frame (numpy array): it is the frame in the video or
108 | captured by the camera
109 | """
110 |
111 | left_eye_ratio = self.get_blink_ratio(
112 | [36, 37, 38, 39, 40, 41], self.landmarks)
113 | right_eye_ratio = self.get_blink_ratio(
114 | [42, 43, 44, 45, 46, 47], self.landmarks)
115 | blink_ratio = (left_eye_ratio + right_eye_ratio) / 2
116 |
117 | if blink_ratio > self.BLINK_RATIO_THRESHOLD:
118 | timestamp = time.time()
119 | self.queue_handler.add_data(True)
120 | self.timestamps.append(timestamp)
121 | self.blink_ratios.append(blink_ratio)
122 |
123 |
124 | if self.queue_handler.search_element('Stop'):
125 | self.close_flag = True
126 |
127 | def csv_writer(self, file_name='blink_log'):
128 | """
129 | Generates a .csv file with the timestamp and blink ratio
130 | with the given file name.
131 |
132 | Args:
133 | file_name (string): name of the .csv file to be generated.
134 | """
135 | file_name = file_name + ".csv"
136 | df = pd.DataFrame({"Timestamps": self.timestamps,
137 | "Blink Ratio": self.blink_ratios})
138 | df.to_csv(file_name)
139 |
--------------------------------------------------------------------------------
/pyEyeTrack/PyEyeTrackRunnerClass.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.EyeTracking.PupilTrackingClass import PupilTracking
2 | from pyEyeTrack.EyeTracking.BlinkingClass import Blinking
3 | from pyEyeTrack.EyeTracking.PupilBlinkingClass import PupilBlinking
4 | from pyEyeTrack.AudioVideoRecording.VideoRecordingClass import VideoRecorder
5 | from pyEyeTrack.AudioVideoRecording.AudioRecordingClass import AudioRecorder
6 | import threading
7 | import importlib
8 | import sys
9 | import os
10 |
11 |
12 | class pyEyeTrack():
13 | """PyEyeTrack is a pupil tracking library, built on top of the
14 | Python programming language. The library provides various
15 | functionalities like pupil tracking, blink detection, video,
16 | and audio recording on the user-specified UI.
17 | """
18 |
19 | def __init__(self):
20 | pass
21 |
22 | def dynamic_import(self, module):
23 | return importlib.import_module(module)
24 |
25 | def pyEyeTrack_runner(
26 | self,
27 | UI=False,
28 | UI_file_name="User_ImageUI_EscExit",
29 | pupilTracking=False,
30 | blinkDetection=False,
31 | video_source=0,
32 | eyeTrackingLog=True,
33 | eyeTrackingFileName='EyeTrackLog',
34 | videoRecorder=False,
35 | videoName='video',
36 | audioRecorder=False,
37 | audioName='audio',
38 | destinationPath='/Output'):
39 | """
40 | This function enables the user to run the functionalities of the
41 | library simultaneously.
42 | Functionalities include running the UI specified by the user,
43 | pupil tracking, blink detection, video recording and audio recording.
44 | The user can set flags to run the combination of these functionalities.
45 | The function also allows the user to name the output file.
46 |
47 | Args:
48 | UI (bool, optional): This parameter enables the user to run UI.
49 | Default: False.
50 |
51 | UI_file_name (str, optional): This parameter takes the file name
52 | of the UI. Default: "User_ImageUI_EscExit".
53 |
54 | pupilTracking (bool, optional): This parameter enables the user to
55 | run pupil tracking. Default: False.
56 |
57 | blinkDetection (bool, optional): This parameter enables the user
58 | to run blink detection. Default: False.
59 |
60 | video_source (int/str, optional): This parameter takes either
61 | device index or a video file as input. Default: 0.
62 |
63 | eyeTrackingLog (bool, optional): This parameter enables the user to
64 | generate a CSV of pupil tracking/ blink detection. Default: True.
65 |
66 | eyeTrackingFileName (str, optional): This parameter takes the file name
67 | for the CSV. Default: 'EyeTrackLog'.
68 |
69 | videoRecorder (bool, optional): This parameter enables the user to
70 | record video. Default: False.
71 |
72 | videoName (str, optional): This parameter enables the user to specify
73 | the filename with which the recorded video is to be saved.
74 | Default: 'video'.
75 |
76 | audioRecorder (bool, optional): This parameter enables the user to
77 | record audio. Default: False.
78 |
79 | audioName (str, optional): This parameter enables the user to specify
80 | the filename with which the recorded video is to be saved.
81 | Default: 'audio'.
82 |
83 | destinationPath (str, optional): The parameter enables the user to specify
84 | the location of the output files. Default: ‘/Output’.
85 |
86 | """
87 |
88 | startEyeTracking = False
89 | outputPath = destinationPath
90 |
91 | if os.access(
92 | destinationPath,
93 | os.W_OK) == False and destinationPath != '/Output':
94 | print('You may not have write permission.Try changing the destination path.')
95 | sys.exit()
96 |
97 | if os.path.exists(
98 | destinationPath) == False and destinationPath != '/Output':
99 | os.mkdir(destinationPath)
100 | elif destinationPath == '/Output':
101 | currentPath = os.getcwd()
102 | outputPath = currentPath + '/Output'
103 | if os.path.exists(outputPath) == False:
104 | os.mkdir(outputPath)
105 |
106 | outputPath = outputPath + '/'
107 |
108 | if (pupilTracking or blinkDetection) and videoRecorder:
109 | print('Video Recording and Eye Tracking functionalities '
110 | 'require access to the webcam simultaneously and are therefore '
111 | 'recommended not to run these functionalities simultaneously.')
112 | sys.exit()
113 |
114 |
115 | if pupilTracking or blinkDetection:
116 | startEyeTracking = True
117 |
118 | if video_source != 0:
119 | if os.path.exists(video_source) == False:
120 | print('Please specify correct path for the video source.')
121 | sys.exit()
122 |
123 | if blinkDetection and pupilTracking:
124 | eyeTracking = PupilBlinking(video_source)
125 | eyeTrackingThread = threading.Thread(target=eyeTracking.start)
126 |
127 | if blinkDetection and pupilTracking == False:
128 | eyeTracking = Blinking(video_source)
129 | eyeTrackingThread = threading.Thread(target=eyeTracking.start)
130 |
131 | if pupilTracking and blinkDetection == False:
132 | eyeTracking = PupilTracking(video_source)
133 | eyeTrackingThread = threading.Thread(target=eyeTracking.start)
134 |
135 | if videoRecorder:
136 | videoOutputPath = outputPath + videoName
137 | videoRecorder = VideoRecorder(videoOutputPath)
138 | videoRecorderThread = threading.Thread(target=videoRecorder.main)
139 |
140 | if audioRecorder:
141 | audioOutputPath = outputPath + audioName
142 | audioRecorder = AudioRecorder(outputPath + audioName)
143 | audioRecorderThread = threading.Thread(target=audioRecorder.main)
144 |
145 | if UI:
146 | module = self.dynamic_import(UI_file_name)
147 | if hasattr(module, 'main'):
148 | uiThread = threading.Thread(target=module.main)
149 | else:
150 | print(
151 | 'UI needs a main method. Please Refer documentation for more information.')
152 | sys.exit()
153 |
154 | if UI:
155 | uiThread.start()
156 |
157 | if startEyeTracking:
158 | eyeTrackingThread.start()
159 |
160 | if videoRecorder:
161 | videoRecorderThread.start()
162 |
163 | if audioRecorder:
164 | audioRecorderThread.start()
165 |
166 | if UI:
167 | uiThread.join()
168 |
169 | if startEyeTracking:
170 | eyeTrackingThread.join()
171 | if eyeTrackingLog:
172 | eyeTrackingOutput = outputPath + eyeTrackingFileName
173 | eyeTracking.csv_writer(eyeTrackingOutput)
174 |
175 | if videoRecorder:
176 | videoRecorderThread.join()
177 | videoRecorder.stop()
178 |
179 | if audioRecorder:
180 | audioRecorderThread.join()
181 | audioRecorder.stop()
182 |
183 |
184 |
--------------------------------------------------------------------------------
/pyEyeTrack/EyeTracking/PupilTrackingClass.py:
--------------------------------------------------------------------------------
1 | from pyEyeTrack.EyeTracking.AbstractEyeTrackingClass import EyeTracking
2 | import numpy as np
3 | import pandas as pd
4 | import cv2
5 | import time
6 | from pyEyeTrack.DataHandling import QueueHandling
7 |
8 |
9 | class PupilTracking(EyeTracking):
10 | """
11 | A subclass of EyeTracking that does pupil tracking
12 | i.e. this class will give the pupil centers for both the eyes.
13 |
14 | Methods:
15 | detect_eye(eye_points,facial_landmarks)
16 | Returns the location of the eye in the frame.
17 | get_connected_components(thresholded_pupil_region)
18 | Calculates the pupil center.
19 | get_approximate_pupil_rectangle(eye_landmarks_coordinates,frame)
20 | Returns the part of the frame with only the pupil
21 | get_pupil_center_coordinates(eye_landmarks_coordinates,threshold,
22 | frame)
23 | Returns pupil center for a single eye.
24 | functionality(frame)
25 | Implements pupil tracking for a given frame.
26 | csv_writer(file_name)
27 | Generates a .csv file with the timestamp and pupil center
28 | for both eyes.
29 |
30 | """
31 |
32 | def __init__(self, source):
33 | super().__init__(source)
34 | self.eye_data_log = {"Timestamps": [], "Left_Eye_X": [], "Left_Eye_Y": []
35 | , "Right_Eye_X": [], "Right_Eye_Y": []}
36 | # dictionary to store the location of the pupil center and
37 | # the corresponding timestamp
38 | self.queue_handler = QueueHandling()
39 | # intialized queue to do real-time data transfer
40 |
41 | def detect_eye(self, eye_points, facial_landmarks):
42 | """
43 | This function returns a numpy array of the x, y coordinates of the
44 | landmarks that define the eye in the frame.
45 |
46 | Args:
47 | eye_points (list): the list of indicies of the facial landmarks
48 | which represent an eye
49 | facial_landmarks (dlib.full_object_detection): this object helps
50 | get the location of the eye in the frame
51 |
52 | Returns:
53 | numpy array: the array of points that define the location of the
54 | eye in the frame.
55 | """
56 |
57 | eye_landmarks_coordinates = np.array(
58 | [(facial_landmarks.part(eye_points[0]).x,
59 | facial_landmarks.part(eye_points[0]).y),
60 | (facial_landmarks.part(eye_points[1]).x,
61 | facial_landmarks.part(eye_points[1]).y),
62 | (facial_landmarks.part(eye_points[2]).x,
63 | facial_landmarks.part(eye_points[2]).y),
64 | (facial_landmarks.part(eye_points[3]).x,
65 | facial_landmarks.part(eye_points[3]).y),
66 | (facial_landmarks.part(eye_points[4]).x,
67 | facial_landmarks.part(eye_points[4]).y),
68 | (facial_landmarks.part(eye_points[5]).x,
69 | facial_landmarks.part(eye_points[5]).y)],
70 | np.int32)
71 | return eye_landmarks_coordinates
72 |
73 | def get_connected_components(self, thresholded_pupil_region):
74 | """
75 | This function returns the pupil ceter of the eye.
76 | The input parameter is the thresholded pupil region.
77 | The pupil center is the centroid of the connected component
78 | with the largest area. Since we already have the approximate
79 | pupil area, we assume that the connected component with the
80 | largest area to be the the pupil.
81 |
82 | Args:
83 | thresholded_pupil_region (numpy array): the approximate
84 | pupil area after filtering and thresholding is applied.
85 |
86 | Returns:
87 | (float, float): a tuple with the x, y coordinate of the
88 | pupil center.
89 | """
90 |
91 | _, _, stats, centroids = cv2.connectedComponentsWithStats(
92 | thresholded_pupil_region, 4)
93 |
94 | area = []
95 | index = 0
96 | for stat in stats:
97 | area.append((stat[4], index))
98 | index = index + 1
99 |
100 | maximum_area = max(area)
101 | index_of_maximum_area = maximum_area[1]
102 |
103 | pupil_center = centroids[index_of_maximum_area]
104 |
105 | return pupil_center
106 |
107 | def get_approximate_pupil_rectangle(
108 | self, eye_landmarks_coordinates, frame):
109 | """
110 | In this function we first find the minimum and maximum for
111 | x coordinate of the location the eye and similarly for the y coordinate.
112 | Here we have altered the values such that after cropping the area would
113 | give us only the region inside the eye. This is the approximately
114 | the region where the pupil lies.
115 |
116 | Args:
117 | eye_landmarks_coordinates (numpy array): array of the x,y
118 | coordinates of the location the eye
119 | frame (numpy array): it is the frame in the video or captured
120 | by the camera
121 |
122 | Returns:
123 | numpy array: the area of the eye cropped tightly
124 | """
125 |
126 | eye_landmark_min_x = np.min(eye_landmarks_coordinates[:, 0]) + 10
127 | eye_landmark_max_x = np.max(eye_landmarks_coordinates[:, 0]) - 10
128 | eye_landmark_min_y = np.min(eye_landmarks_coordinates[:, 1]) + 1
129 | eye_landmark_max_y = np.max(eye_landmarks_coordinates[:, 1]) - 1
130 | approximate_pupil_region = frame[eye_landmark_min_y: eye_landmark_max_y,
131 | eye_landmark_min_x: eye_landmark_max_x]
132 |
133 | return approximate_pupil_region
134 |
135 | def get_pupil_center_coordinates(
136 | self,
137 | eye_landmarks_coordinates,
138 | threshold,
139 | frame):
140 | """
141 | This function returns the pupil center for a single eye. First we acquire
142 | the approximate region of the frame in which the pupil lies.
143 | Then we perform thresholding on this cropped part of the frame.
144 | We then send this proceesed part to the get_connected_components function
145 | which returns the pupil center.
146 |
147 | Args:
148 | eye_landmarks_coordinates (numpy array): array of the x,y coordinates
149 | of the location the eye
150 | threshold (int): the value that should be used for thresholding
151 | frame (numpy array): it is the frame in the video or captured by the camera
152 |
153 | Returns:
154 | (float, float): a tuple containing x and y coordinates of the pupil center.
155 | """
156 |
157 | approximate_pupil_region = self.get_approximate_pupil_rectangle(
158 | eye_landmarks_coordinates, frame)
159 |
160 | median_blur_filter = cv2.medianBlur(approximate_pupil_region, 5)
161 | _, thresholded_pupil_region = cv2.threshold(
162 | median_blur_filter, threshold, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
163 |
164 | return self.get_connected_components(thresholded_pupil_region)
165 |
166 | def functionality(self, frame):
167 | """
168 | This method overrides the method in the superclass.
169 | This method gets the pupil center for both the eyes in the frame.
170 | Once the pupil centers are acquired we append them in eye_data_log
171 | dictonary along with the timestamp.
172 | We also add this data to the queue for real-time data transfer.
173 | Finally, we also toggle the close_flag if the string 'Stop' is found
174 | in the queue. This can be used by the user to stop the application.
175 |
176 | Args:
177 | frame (numpy array): it is the frame in the video or captured
178 | by the camera
179 | """
180 |
181 | landmarks_coordinates_left_eye = self.detect_eye(
182 | [36, 37, 38, 39, 40, 41], self.landmarks)
183 | landmarks_coordinates_right_eye = self.detect_eye(
184 | [42, 43, 44, 45, 46, 47], self.landmarks)
185 |
186 | pupil_center_left_eye = self.get_pupil_center_coordinates(
187 | landmarks_coordinates_left_eye, 0, frame)
188 | pupil_center_right_eye = self.get_pupil_center_coordinates(
189 | landmarks_coordinates_right_eye, 0, frame)
190 |
191 | timestamp = time.time()
192 | self.eye_data_log["Timestamps"].append(timestamp)
193 | self.eye_data_log["Left_Eye_X"].append(pupil_center_left_eye[0])
194 | self.eye_data_log["Left_Eye_Y"].append(pupil_center_left_eye[1])
195 | self.eye_data_log["Right_Eye_X"].append(pupil_center_right_eye[0])
196 | self.eye_data_log["Right_Eye_Y"].append(pupil_center_right_eye[1])
197 | pupil_center_data = (
198 | timestamp,
199 | pupil_center_left_eye[0],
200 | pupil_center_left_eye[1],
201 | pupil_center_right_eye[0],
202 | pupil_center_right_eye[1])
203 | self.queue_handler.add_data(pupil_center_data)
204 |
205 | if self.queue_handler.search_element('Stop'):
206 | self.close_flag = True
207 |
208 | def csv_writer(self, file_name):
209 | """
210 | Generates a .csv file with the timestamp and pupil centers with the
211 | given file name.
212 |
213 | Args:
214 | file_name (string): name of the .csv file to be generated.
215 | """
216 | file_name = file_name + ".csv"
217 | DF = pd.DataFrame(self.eye_data_log)
218 | DF.to_csv(file_name)
219 |
--------------------------------------------------------------------------------