├── run.sh
├── env_setup.txt
├── images
├── human.png
├── logo.png
├── robot_icon.png
├── robot
│ ├── normal.png
│ ├── look_down.png
│ ├── look_left.png
│ ├── look_up.png
│ ├── look_right.png
│ └── mouth_open.png
└── robot_icon_small.png
├── samples
└── people.jpg
├── screenshots
├── s1.png
├── s2.png
├── s3.png
├── s9.png
└── video_demo.png
├── tests
├── samples
│ └── penguin.jpg
├── speak_it.py
├── extensions_checks.py
├── speech_to_text.py
├── image_tests.py
└── recognize_speech.py
├── dataset
└── readme
├── preferences.ini
├── requirements.txt
├── run_face_trainer.sh
├── sample_local_settings.py
├── singleton.py
├── prefs_dialog.py
├── settings.py
├── utils.py
├── about_dialog.py
├── preferences.py
├── README.md
├── .gitignore
├── video_capture.py
├── global_signals.py
├── image_widget.py
├── logger.py
├── cmd_listener.py
├── mouth.py
├── opencv.py
├── ear.py
├── eye_detection.py
├── robot.py
├── face_trainer.py
├── face_recognition.py
├── dashboard.py
├── ocr_credit_card.py
├── app.py
├── about_dialog.ui
├── prefs_dialog.ui
└── LICENSE
/run.sh:
--------------------------------------------------------------------------------
1 | python app.py
2 |
--------------------------------------------------------------------------------
/env_setup.txt:
--------------------------------------------------------------------------------
1 | sudo apt install portaudio19-dev -y
2 |
--------------------------------------------------------------------------------
/images/human.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/human.png
--------------------------------------------------------------------------------
/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/logo.png
--------------------------------------------------------------------------------
/samples/people.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/samples/people.jpg
--------------------------------------------------------------------------------
/screenshots/s1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/screenshots/s1.png
--------------------------------------------------------------------------------
/screenshots/s2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/screenshots/s2.png
--------------------------------------------------------------------------------
/screenshots/s3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/screenshots/s3.png
--------------------------------------------------------------------------------
/screenshots/s9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/screenshots/s9.png
--------------------------------------------------------------------------------
/images/robot_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot_icon.png
--------------------------------------------------------------------------------
/images/robot/normal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/normal.png
--------------------------------------------------------------------------------
/images/robot/look_down.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/look_down.png
--------------------------------------------------------------------------------
/images/robot/look_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/look_left.png
--------------------------------------------------------------------------------
/images/robot/look_up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/look_up.png
--------------------------------------------------------------------------------
/screenshots/video_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/screenshots/video_demo.png
--------------------------------------------------------------------------------
/tests/samples/penguin.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/tests/samples/penguin.jpg
--------------------------------------------------------------------------------
/images/robot/look_right.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/look_right.png
--------------------------------------------------------------------------------
/images/robot/mouth_open.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot/mouth_open.png
--------------------------------------------------------------------------------
/images/robot_icon_small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stoic1979/robovision/HEAD/images/robot_icon_small.png
--------------------------------------------------------------------------------
/tests/speak_it.py:
--------------------------------------------------------------------------------
1 | import pyttsx3
2 | engine = pyttsx3.init()
3 | engine.say('Good morning. I am Neo.')
4 | engine.runAndWait()
5 |
--------------------------------------------------------------------------------
/dataset/readme:
--------------------------------------------------------------------------------
1 | Directory for generated dataset.
2 |
3 | Contents are:
4 |
5 | 1. Generated training data for face trainer - trainer.yml and labels.pickle.
6 |
--------------------------------------------------------------------------------
/preferences.ini:
--------------------------------------------------------------------------------
1 | #
2 | # preferences in .ini format
3 | #
4 | [General]
5 | Nickname=neo
6 |
7 | [Dataset]
8 | OutputDir=.
9 |
10 | [Scheduler]
11 | Key: Value
12 |
13 | [Debug]
14 | EnableDebug=True
15 |
--------------------------------------------------------------------------------
/tests/extensions_checks.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | files = ["a.jpg", "b.c", "r.jpeg", "m.png", "d.txt"]
4 |
5 | for file in files:
6 | extension = os.path.splitext(file)[1]
7 | if extension in [".jpg", ".jpeg", ".png"]:
8 | print("yes: ", extension)
9 | else:
10 | print("no: ", extension)
11 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | flake8==3.5.0
2 | imutils==0.4.6
3 | mccabe==0.6.1
4 | numpy==1.14.4
5 | opencv-contrib-python==3.4.1.15
6 | opencv-python==3.4.1.15
7 | Pillow==5.1.0
8 | PyAudio==0.2.11
9 | pycodestyle==2.3.1
10 | pyflakes==1.6.0
11 | PyQt5==5.10.1
12 | pyttsx3==2.7
13 | sip==4.19.8
14 | SpeechRecognition==3.8.1
15 |
--------------------------------------------------------------------------------
/tests/speech_to_text.py:
--------------------------------------------------------------------------------
1 | #
2 | # a quick test for speech to text
3 | #
4 |
5 | import speech_recognition as sr
6 |
7 |
8 | def main():
9 |
10 | r = sr.Recognizer()
11 |
12 | with sr.Microphone() as source:
13 | print ('say something')
14 | audio = r.listen(source)
15 | print ('done')
16 | try:
17 | text = r.recognize_google(audio)
18 | print('Neo said:\n' + text)
19 | except Exception as e:
20 | print (e)
21 |
22 |
23 | if __name__ == "__main__":
24 | main()
25 |
--------------------------------------------------------------------------------
/run_face_trainer.sh:
--------------------------------------------------------------------------------
1 | #
2 | # convenience script to run face trainer
3 | #
4 |
5 | # remove previous trainig data
6 | echo "[INFO] removing previously trained dataset"
7 | rm dataset/face_trainer.yml
8 | rm dataset/face_trainer_labels.pickle
9 | echo
10 |
11 | # do the training
12 | echo "[INFO] dong face identification training"
13 | python face_trainer.py
14 | echo
15 |
16 | # generated training data
17 |
18 | echo "[INFO] Dataset generated"
19 | echo
20 | echo "--------------------------------------------------------"
21 | # face trained data
22 | ls dataset/face_trainer.yml -ial
23 | du -sh dataset/face_trainer.yml
24 | echo
25 |
26 | # face labels
27 | ls dataset/face_trainer_labels.pickle -ial
28 | du -sh dataset/face_trainer_labels.pickle
29 | echo "--------------------------------------------------------"
30 | echo
31 |
--------------------------------------------------------------------------------
/tests/image_tests.py:
--------------------------------------------------------------------------------
1 | #
2 | # script for evaluating some image processing features of opencv
3 | #
4 |
5 | import cv2
6 |
7 |
8 | class ImageTests:
9 |
10 |
11 | def __init__(self):
12 | pass
13 |
14 |
15 | def grayscale(self):
16 |
17 | print("[ImageTests] :: testing grayscale image")
18 |
19 | # load an color image in grayscale
20 | img = cv2.imread('./samples/penguin.jpg', 0)
21 |
22 | cv2.imshow('image', img)
23 |
24 | """
25 | The function waits for specified milliseconds for any keyboard event.
26 | If you press any key in that time, the program continues.
27 | If 0 is passed, it waits indefinitely for a key stroke.
28 | """
29 | cv2.waitKey(0)
30 |
31 | # destroy all the windows we created
32 | cv2.destroyAllWindows()
33 |
34 |
35 | if __name__ == "__main__":
36 | it = ImageTests()
37 | it.grayscale()
38 |
--------------------------------------------------------------------------------
/tests/recognize_speech.py:
--------------------------------------------------------------------------------
1 | import speech_recognition as sr
2 |
3 |
4 | def get_audio():
5 | """
6 | Get audio from the microphone.
7 | The SpeechRecognition package is used to automatically stop listening when the user stops speaking.
8 |
9 | function returns the raw binary audio string (PCM)
10 | """
11 | l = sr.Microphone.list_microphone_names()
12 | print (l)
13 |
14 | r = sr.Recognizer()
15 |
16 | di = l.index("default")
17 | print ("di", di)
18 |
19 | with sr.Microphone(device_index=di) as source:
20 | #with sr.Microphone() as source:
21 | print("listening for audio from microphone")
22 | #r.adjust_for_ambient_noise(source)
23 | audio = r.listen(source)
24 | print("listening done")
25 |
26 | # convert audio to raw_data (PCM)
27 | raw_audio = audio.get_raw_data()
28 |
29 | text = r.recognize_google(audio) ## recognize speech using Google Speech Recognition
30 |
31 | return text
32 |
33 | try:
34 | text = get_audio()
35 | print("text: %s" % text)
36 | except Exception as e:
37 | print(e)
38 |
--------------------------------------------------------------------------------
/sample_local_settings.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # This is a sample file for local settings
26 | # rename it to local_settings.py
27 | #
28 | # PLEASE DO NOT commit your local_settings.py to version control !!!
29 | #
30 | # local settings, configs, paths for datasets etc.
31 | #
32 |
33 | FACE_IMAGES_DATASET_DIR = "some absolute local path on your computer"
34 |
--------------------------------------------------------------------------------
/singleton.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # a singleton meta class
26 | #
27 |
28 | class SingletonType(type):
29 | _instances = {}
30 |
31 | def __call__(cls, *args, **kwargs):
32 | if cls not in cls._instances:
33 | cls._instances[cls] = super(
34 | SingletonType, cls).__call__(*args, **kwargs)
35 | return cls._instances[cls]
36 |
--------------------------------------------------------------------------------
/prefs_dialog.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | PyLanMessenger
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import os
25 |
26 | from PyQt5 import uic
27 | from PyQt5.QtWidgets import QDialog, QWidget
28 |
29 | DIRPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)))
30 |
31 |
32 | class PrefsDialog(QDialog):
33 |
34 | def __init__(self):
35 | QWidget.__init__(self)
36 |
37 | # loaind ui from xml
38 | self.ui = uic.loadUi(os.path.join(DIRPATH, 'prefs_dialog.ui'), self)
39 |
40 | def display(self):
41 | self.ui.show()
42 |
--------------------------------------------------------------------------------
/settings.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | ROBOVISION
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | ###########################################################
25 | # #
26 | # settings/config script for various params/variables etc #
27 | # #
28 | ###########################################################
29 |
30 | UDP_IP = "127.0.0.1"
31 | UDP_PORT = 5005
32 |
33 | #
34 | # packet/buffer size to send each packet during file transfer
35 | #
36 | FILE_PKT_SIZE = 4096
37 |
38 | #
39 | # port for receiving files
40 | #
41 | FILE_RECV_PORT = 8888
42 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | from PyQt5.QtGui import QPixmap
25 | import pyttsx3
26 |
27 | def add_image_to_label(lbl, img_path):
28 | """
29 | Note: to add an image to a PyQt5 window/dialog,
30 | we need to create a label and add an image to that label.
31 |
32 | Also, we can resize the label as per dimensions of image.
33 |
34 | """
35 | pixmap = QPixmap(img_path)
36 | lbl.setPixmap(pixmap)
37 |
38 | # optional, resize window to image size
39 | lbl.resize(pixmap.width(),pixmap.height())
40 |
41 | def speak_text(text):
42 | engine = pyttsx3.init()
43 | engine.say(text)
44 | engine.runAndWait()
45 |
46 |
--------------------------------------------------------------------------------
/about_dialog.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | ROBOVISION
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import os
25 |
26 | from PyQt5 import uic
27 | from PyQt5.QtWidgets import QDialog, QWidget
28 | from utils import add_image_to_label
29 |
30 | DIRPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)))
31 |
32 |
33 | class AboutDialog(QDialog):
34 |
35 | def __init__(self):
36 | QWidget.__init__(self)
37 |
38 | # loaind ui from xml
39 | self.ui = uic.loadUi(os.path.join(DIRPATH, 'about_dialog.ui'), self)
40 | add_image_to_label(self.lblIcon, "./images/robot_icon_small.png")
41 |
42 | # keep the window fixed sized
43 | self.setFixedSize(self.size())
44 |
45 | def display(self):
46 | self.ui.show()
47 |
--------------------------------------------------------------------------------
/preferences.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | from configparser import ConfigParser
25 |
26 |
27 | class Preferences:
28 |
29 | def __init__(self):
30 | self.parser = ConfigParser()
31 | self.parser.read("./preferences.ini")
32 |
33 | def load_default_preferences(self):
34 | """
35 | load default preference if preferences.ini file not found
36 | """
37 | pass
38 |
39 | def set_nickname(self, nickname):
40 | self.parser.set('General', 'Nickname', nickname)
41 |
42 | def get_nickname(self):
43 | return self.parser.get('General', 'Nickname')
44 |
45 |
46 | if __name__ == '__main__':
47 | prefs = Preferences()
48 | prefs.set_nickname("Navi")
49 | print("Nickname: ", prefs.get_nickname())
50 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | # robovision
3 | AI and machine leaning-based computer vision for a robot
4 |
5 | # Motivation
6 | Surveillance cameras are passive, they need humans to watch the footage caputered by them and to make decisions or take actions.
7 | Robovision is a smart, trained, machine learning, AI based system which can see, make decisions, listen and speak.
8 |
9 | # Features
10 | - Face detection from an image using Haar cascade classifier
11 | - Eyes detection from an image using cascade classifiers.
12 | - Command line tool (under development) for various image processing features.
13 | - Auto mark attendance of an employee with AI camera software.
14 | - Neo - a well trained speaking robot.
15 |
16 | # A Quick Video Demo
17 | [](https://www.youtube.com/watch?v=CCOtPA1-ITs&t=15s)
18 |
19 |
20 | # Dependencies
21 | - PyQt5
22 | - Opencv 3.4.x
23 |
24 | # Setup/Installation
25 | - Use python 3.x
26 | - pip install -r requirements.py
27 | - Use run.sh to run the application
28 | - Dont hesitate to report the bugs and issues you face !
29 |
30 | # Coming soon...
31 | - Robot Neo that will be monitoring all acitvties in app.
32 | - The Neo will talk or chat with you.
33 | - Neo will accept your audio commands.
34 | - Iris identification.
35 | - Office code of conduct compliance checks eg. check if an employee is talking on phone, when it is not allowed in office.
36 | - Talk to users for several actions, eg. greeting Good Morning when an employee enters an office.
37 | - On the fly train itself to identify, new comers in the office.
38 |
39 | # Some screenshots
40 | 
41 | 
42 | 
43 | 
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 |
107 | ##########################################
108 | # ADDED BY NAVI
109 | ##########################################
110 |
111 | # local settings file for project
112 | local_settings.py
113 | labels.pickle
114 | trainer.yml
115 |
116 |
117 |
--------------------------------------------------------------------------------
/video_capture.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import cv2
25 | import numpy as np
26 | from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal, QBasicTimer
27 |
28 | from logger import get_logger
29 | log = get_logger()
30 |
31 |
32 | class VideoCapture(QObject):
33 | """
34 | Class for capturing video from web camera using opencv
35 |
36 | It used an timer function to emit a Qt slot that contains image data.
37 |
38 | Class interested in processing this image will need to connect the slot.
39 | """
40 |
41 | # signal for emitting a frame captured from camera
42 | got_image_data_from_camera = pyqtSignal(np.ndarray)
43 |
44 | def __init__(self, camera_port=0, parent=None):
45 | super().__init__(parent)
46 | self.camera = cv2.VideoCapture(camera_port)
47 | self.timer = QBasicTimer()
48 |
49 | def start(self):
50 | log.info("video capture started")
51 | self.timer.start(0, self)
52 |
53 | def stop(self):
54 | log.info("video capture stopped")
55 | self.timer.stop()
56 |
57 | def timerEvent(self, event):
58 | if (event.timerId() != self.timer.timerId()):
59 | log.warning("Failed to setup timer for video capture")
60 | return
61 |
62 | read, data = self.camera.read()
63 | if read:
64 | self.got_image_data_from_camera.emit(data)
65 |
--------------------------------------------------------------------------------
/global_signals.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # a global sigleton signal emitter that can be used by any module
26 | #
27 | # For example, any module can feed text to mouth to speak, by emitting
28 | # an signal. Mouth enqueues the text to be spoken.
29 | #
30 | # Usage:
31 | # g_emitter().emit_signal_to_feed_mouth("hello navi")
32 | #
33 |
34 | from PyQt5.QtCore import QObject, pyqtSignal
35 | from singleton import SingletonType
36 |
37 |
38 | class GlobalSignals(QObject):
39 |
40 | feed_mouth = pyqtSignal('QString')
41 | set_speaking_state = pyqtSignal()
42 | set_idle_state = pyqtSignal()
43 |
44 | def __init__(self, parent=None):
45 | super().__init__(parent)
46 |
47 | def emit_signal_to_feed_mouth(self, text):
48 | self.feed_mouth.emit(text)
49 |
50 | def emit_signal_to_set_speaking_state(self):
51 | self.set_speaking_state.emit()
52 |
53 | def emit_signal_to_set_idle_state(self):
54 | self.set_idle_state.emit()
55 |
56 |
57 |
58 | class GlobalSignalEmitter(object, metaclass=SingletonType):
59 | """
60 | singleton class/wrapper to hold global signals to be emitted
61 | """
62 |
63 | _global_signals = None
64 |
65 | def __init__(self):
66 | self._global_signals = GlobalSignals()
67 |
68 |
69 | def g_emitter():
70 | """
71 | function to return singleton instance of global signal emitter
72 | """
73 | return GlobalSignalEmitter.__call__()._global_signals
74 |
--------------------------------------------------------------------------------
/image_widget.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import cv2
25 | import numpy as np
26 | from PyQt5.QtWidgets import QWidget
27 | from PyQt5.QtGui import QImage, QPainter
28 |
29 |
30 | class ImageWidget(QWidget):
31 | """
32 | Qt Widget to show an image or a frame from video
33 | """
34 | def __init__(self, parent=None):
35 | super().__init__(parent)
36 | # image to be show in widget
37 | self.image = QImage()
38 | self._red = (0, 0, 255)
39 | self._width = 2
40 | self._min_size = (30, 30)
41 |
42 | def handle_image_data(self, image_data):
43 | self.image = self.get_qimage(image_data)
44 | if self.image.size() != self.size():
45 | self.setFixedSize(self.image.size())
46 |
47 | # redrawing the image
48 | self.update()
49 |
50 | def get_qimage(self, image: np.ndarray):
51 | height, width, colors = image.shape
52 | bytesPerLine = 3 * width
53 |
54 | # composing image from image data
55 | image = QImage(image.data,
56 | width,
57 | height,
58 | bytesPerLine,
59 | QImage.Format_RGB888)
60 |
61 | image = image.rgbSwapped()
62 | return image
63 |
64 | def reset(self):
65 | # creating an empty image to reset/empty the widget
66 | self.image = QImage()
67 | self.update()
68 |
69 | def paintEvent(self, event):
70 | painter = QPainter(self)
71 | painter.drawImage(0, 0, self.image)
72 | self.image = QImage()
73 |
--------------------------------------------------------------------------------
/logger.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import logging
25 | import os
26 | import datetime
27 |
28 | from singleton import SingletonType
29 |
30 | #
31 | # A singleton logger that will be used globally by the project
32 | # All the log files are created inside ./logs/ dir with current date
33 | #
34 |
35 |
36 | class Logger(object, metaclass=SingletonType):
37 | _logger = None
38 |
39 | def __init__(self):
40 | self._logger = logging.getLogger("crumbs")
41 | self._logger.setLevel(logging.DEBUG)
42 |
43 | # creating a logging format
44 | fmt = "[%(levelname)s] %(asctime)s :: %(filename)s:%(lineno)d -" \
45 | " %(funcName)s() | %(message)s"
46 | formatter = logging.Formatter(fmt)
47 |
48 | # ensuring that logs dir exist
49 | now = datetime.datetime.now()
50 | dirname = "./logs"
51 |
52 | if not os.path.isdir(dirname):
53 | os.mkdir(dirname)
54 |
55 | # setting handlers
56 | fileHandler = logging.FileHandler(
57 | dirname + "/log_" + now.strftime("%Y-%m-%d")+".log")
58 | streamHandler = logging.StreamHandler()
59 |
60 | fileHandler.setFormatter(formatter)
61 | streamHandler.setFormatter(formatter)
62 |
63 | self._logger.addHandler(fileHandler)
64 | self._logger.addHandler(streamHandler)
65 |
66 |
67 | def get_logger():
68 | return Logger.__call__()._logger
69 |
70 | if __name__ == "__main__":
71 | logger = get_logger()
72 | logger.debug("some debug log")
73 | logger.info("Hello Navi")
74 | logger.warning("here is a warning !!!")
75 |
--------------------------------------------------------------------------------
/cmd_listener.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | ROBOVISION
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 | #
24 | # UDP command listener
25 | #
26 | # It emits/sends cmds recevied via nw to the handler
27 | #
28 | import _thread as thread
29 | import socket
30 | from PyQt5.QtCore import QObject, pyqtSignal
31 |
32 | from settings import *
33 |
34 | from logger import get_logger
35 | log = get_logger()
36 |
37 |
38 | class CommandListener(QObject):
39 |
40 | # define signal to inform UI about a received cmd
41 | cmd_received = pyqtSignal('QString')
42 |
43 | def __init__(self):
44 | super(MessageListener, self).__init__()
45 |
46 | self.start_msg_receiver()
47 |
48 | def start_msg_receiver(self):
49 | """
50 | function starts a thread to receive cmds
51 | """
52 | try:
53 | thread.start_new_thread(
54 | self.monitor_cmds, ("MsgRecvThread", 2, ))
55 | except Exception as exp:
56 | log.warning("Error: unable to start cmd recevier thread")
57 | log.warning(exp)
58 |
59 | def monitor_cmds(self, thread_name, delay):
60 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
61 | sock.bind(('', UDP_PORT))
62 |
63 | while True:
64 | try:
65 | # buffer size is 1024 bytes
66 | org_data, addr = sock.recvfrom(1024)
67 | data = org_data.decode("utf-8")
68 |
69 | log.info("received cmd: " + data)
70 | self.cmd_received.emit(data)
71 | except Exception as exp:
72 | log.warning("Got exception while monitoring cmds")
73 | log.warning(exp)
74 |
--------------------------------------------------------------------------------
/mouth.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # a mouth that maintains of queue of text to be spoken
26 | #
27 | # it has a slot to feed text to be spoken by it
28 | #
29 | # this text is added in a queue to be played
30 |
31 | import time
32 | from multiprocessing import Queue
33 | from threading import Thread
34 |
35 | from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
36 |
37 | from utils import speak_text
38 |
39 | from global_signals import g_emitter
40 |
41 | from logger import get_logger
42 | log = get_logger()
43 |
44 |
45 | class Mouth(QObject, Thread):
46 |
47 | def __init__(self, parent=None):
48 | super().__init__(parent)
49 |
50 | # max capacity of mouth
51 | BUF_SIZE = 100
52 | self.queue = Queue(BUF_SIZE)
53 |
54 | @pyqtSlot('QString')
55 | def feed_text(self, text):
56 | log.info("Mouth fed with text: %s" % text)
57 | self.queue.put(text)
58 |
59 | def run(self):
60 | while True:
61 | if not self.queue.empty():
62 | text = self.queue.get()
63 | log.info("Mouth speaking text: %s" % text)
64 |
65 | # ignore empty/None texts
66 | if not text or not len(text):
67 | continue
68 | speak_text(text)
69 |
70 | # tell face to change mouth animations to speaking
71 | g_emitter().emit_signal_to_set_speaking_state()
72 |
73 | time.sleep(.1)
74 | else:
75 | # tell face to change mouth animations to idle
76 | time.sleep(.2)
77 | g_emitter().emit_signal_to_set_idle_state()
78 |
--------------------------------------------------------------------------------
/opencv.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # command line script for opencv functions
26 | #
27 |
28 | import argparse
29 | import sys
30 | import cv2
31 |
32 |
33 | class Opencv(object):
34 |
35 | def __init__(self):
36 |
37 | parser = argparse.ArgumentParser(description='Opencv commandline')
38 | parser.add_argument('command', help='Subcommand to run')
39 |
40 | args = parser.parse_args(sys.argv[1:2])
41 | if not hasattr(self, args.command):
42 | print("Unrecognized command")
43 | parser.print_help()
44 | exit(1)
45 |
46 | # use dispatch pattern to invoke method with same name
47 | getattr(self, args.command)()
48 |
49 | def to_grayscale(self):
50 | try:
51 |
52 | parser = argparse.ArgumentParser(
53 | description='Convert an image to grayscale')
54 |
55 | # required fields for creating a gitlab user
56 | parser.add_argument('--input', help='input image name/path')
57 | parser.add_argument('--output', help='output image name/path')
58 |
59 | # parse args for this command
60 | args = parser.parse_args(sys.argv[2:])
61 |
62 | # converting image to grayscale
63 | img = cv2.imread(args.input, 0)
64 |
65 | # saving grayscaled image to output file
66 | cv2.imwrite(args.output, img)
67 |
68 | print(
69 | "Converted '%s' to grayscale and saved in '%s'" %
70 | (args.input, args.output))
71 | except Exception as exp:
72 | print(
73 | "Got exception while converting image to grayscale: ",
74 | str(exp))
75 |
76 |
77 | if __name__ == "__main__":
78 | Opencv()
79 |
--------------------------------------------------------------------------------
/ear.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # an ear to continuously hear for sounds and make decisions
26 | #
27 |
28 | import time
29 | from multiprocessing import Queue
30 | from threading import Thread
31 |
32 | from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
33 |
34 | from utils import speak_text
35 |
36 | from global_signals import g_emitter
37 |
38 | import speech_recognition as sr
39 |
40 | from logger import get_logger
41 | log = get_logger()
42 |
43 |
44 | class Ear(QObject, Thread):
45 |
46 | def __init__(self, parent=None):
47 | super().__init__(parent)
48 |
49 | def get_audio(self):
50 | """
51 | Get audio from the microphone.
52 |
53 | The SpeechRecognition package is used to automatically stop listening
54 | when the user stops speaking.
55 |
56 | Function returns the raw binary audio string (PCM)
57 | """
58 | l = sr.Microphone.list_microphone_names()
59 | log.debug(l)
60 |
61 | r = sr.Recognizer()
62 |
63 | di = l.index("default")
64 |
65 | with sr.Microphone(device_index=di) as source:
66 | # with sr.Microphone() as source:
67 | log.debug("listening for audio from microphone")
68 | # r.adjust_for_ambient_noise(source)
69 | audio = r.listen(source)
70 | log.debug("listening done")
71 |
72 | # convert audio to raw_data (PCM)
73 | raw_audio = audio.get_raw_data()
74 |
75 | # recognize speech using Google Speech Recognition
76 | text = r.recognize_google(audio)
77 |
78 | return text
79 |
80 | def monitor_sounds(self):
81 | # TODO check audio sounds and make decisions
82 | pass
83 |
84 | def run(self):
85 | """
86 | thread function to continuously monitor sounds
87 | """
88 | while True:
89 | monitor_sounds()
90 | time.sleep(1)
91 |
--------------------------------------------------------------------------------
/eye_detection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # eye detection from a live video
26 | #
27 |
28 | import numpy as np
29 | import cv2 as cv
30 |
31 |
32 | class EyeDetector:
33 |
34 | def __init__(self, face_cascade_xml, eye_cascade_xml):
35 |
36 | # lets us pretrained cascade classifiers of opencv for face and eyes detection
37 |
38 | # train/initialize face classifier
39 | self.face_cascade = cv.CascadeClassifier(face_cascade_xml)
40 |
41 | # train/initialize eye classifier
42 | self.eye_cascade = cv.CascadeClassifier(eye_cascade_xml)
43 |
44 | # reading data from webcam
45 | # for internal webcam on laptop use 0
46 | # for external webcam on laptop/PC use 1
47 | cap = cv.VideoCapture(0)
48 |
49 | while True:
50 |
51 | ret, img = cap.read()
52 |
53 | self.process_image(img)
54 |
55 | # wait for key for 10 mscs, or continue
56 | k = cv.waitKey(10) & 0xff
57 |
58 | # if 'esc' pressed, terminate
59 | if k == 27:
60 | break
61 |
62 | cap.release()
63 | cv.destroyAllWindows()
64 |
65 | def process_image(self, img):
66 | """
67 | this function detects eyes in a given image,
68 | and draws an outline rectangle across eyes
69 | """
70 | # HAAR cascade will need a grayscaled image to detect faces and eyes
71 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
72 |
73 | # lets find faces in the image and get their positions
74 | faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
75 | for (x,y,w,h) in faces:
76 |
77 | # define roi for eyes detection,ideally, we should detect
78 | # eyes within the rectangular bounds of a face
79 | roi_gray = gray[y:y+h, x:x+w]
80 | roi_color = img[y:y+h, x:x+w]
81 |
82 | # save face to image for testing purpose
83 | cv.imwrite("../my-face.png", roi_gray)
84 |
85 | # drawing rects for eyes
86 | eyes = self.eye_cascade.detectMultiScale(roi_gray)
87 | for (ex,ey,ew,eh) in eyes:
88 | cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (255, 0, 0), 2)
89 |
90 | font = cv.FONT_HERSHEY_SIMPLEX
91 | cv.putText(img,'Press Esc to quit',(10,450), font, 1,(255,255,255),1,cv.LINE_AA)
92 |
93 | # showing image
94 | cv.imshow('Haar Eye Detection', img)
95 |
96 | if __name__ == "__main__":
97 | # path to Haar face classfier's xml file
98 | face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
99 |
100 | # path to Haar eye classfier's xml file
101 | eye_cascade_xml = './cascades/haarcascades_cuda/haarcascade_eye.xml'
102 |
103 | # run the eye detector with given classfiers
104 | ed = EyeDetector(face_cascade_xml, eye_cascade_xml)
105 |
--------------------------------------------------------------------------------
/robot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | PyLanMessenger
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 |
25 | import time
26 | from threading import Thread
27 |
28 | from utils import add_image_to_label
29 |
30 | from random import randint
31 |
32 | from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
33 |
34 | from logger import get_logger
35 | log = get_logger()
36 |
37 |
38 | class Robot(QObject, Thread):
39 | """
40 | robot can have states IDLE and SPEAKING,
41 | depending upon state the facial images changes
42 | """
43 |
44 | @pyqtSlot()
45 | def set_idle_state(self):
46 | self.become_idle()
47 |
48 | @pyqtSlot()
49 | def set_speaking_state(self):
50 | self.start_speaking()
51 |
52 | def __init__(self, lbl, parent=None):
53 | super().__init__(parent)
54 | self.lbl = lbl
55 |
56 | self.idle_actions = {
57 | "0": self.face_normal,
58 | "1": self.look_left,
59 | "2": self.look_right,
60 | "3": self.look_up,
61 | "4": self.look_down
62 | }
63 |
64 | self.speaking_actions = {
65 | "0": self.face_normal,
66 | "1": self.mouth_open
67 | }
68 |
69 | #self.start_speaking()
70 | self.become_idle()
71 |
72 | log.debug("robot created")
73 |
74 | def become_idle(self):
75 | self.state = "IDLE"
76 |
77 | def start_speaking(self):
78 | self.state = "SPEAKING"
79 |
80 | def look_left(self):
81 | add_image_to_label(self.lbl, "./images/robot/look_left.png")
82 |
83 | def look_right(self):
84 | add_image_to_label(self.lbl, "./images/robot/look_right.png")
85 |
86 | def look_down(self):
87 | add_image_to_label(self.lbl, "./images/robot/look_down.png")
88 |
89 | def look_up(self):
90 | add_image_to_label(self.lbl, "./images/robot/look_up.png")
91 |
92 | def face_normal(self):
93 | add_image_to_label(self.lbl, "./images/robot/normal.png")
94 |
95 | def mouth_open(self):
96 | add_image_to_label(self.lbl, "./images/robot/mouth_open.png")
97 |
98 | def set_idle_face_img(self):
99 | s = str(randint(0, 2))
100 | try:
101 | self.idle_actions[s]()
102 | except Exception as exp:
103 | log.warning("robot idle state action failed")
104 | log.warning("with exception: %s" % str(exp))
105 |
106 | def set_speaking_face_img(self):
107 | s = str(randint(0, 1))
108 | try:
109 | self.speaking_actions[s]()
110 | except Exception as exp:
111 | log.warning("robot speaking state action failed")
112 | log.warning("with exception: %s" % str(exp))
113 |
114 | def run(self):
115 | while True:
116 | if self.state == "IDLE":
117 | self.set_idle_face_img()
118 | time.sleep(randint(1, 4))
119 | continue
120 |
121 | if self.state == "SPEAKING":
122 | self.mouth_open()
123 | time.sleep(.3)
124 | self.face_normal()
125 | time.sleep(.2)
126 | continue
127 |
--------------------------------------------------------------------------------
/face_trainer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # a trainer for faces, inorder to identify faces with names/labels
26 | #
27 | # reference
28 | # https://www.youtube.com/watch?v=PmZ29Vta7Vc
29 | #
30 |
31 | import os
32 | import numpy as np
33 | from PIL import Image
34 | import cv2 as cv
35 | import pickle
36 |
37 |
38 | from PyQt5.QtCore import QObject, pyqtSignal
39 |
40 | from threading import Thread
41 |
42 | from global_signals import g_emitter
43 |
44 | from logger import get_logger
45 | log = get_logger()
46 |
47 |
48 | class FaceTrainer(QObject, Thread):
49 |
50 | # signal for emitting a frame captured from camera
51 | processing_image = pyqtSignal('QString', 'QString')
52 | face_training_finished = pyqtSignal()
53 |
54 | def __init__(self, face_cascade_xml, face_images_dataset_dir, parent=None):
55 | super().__init__(parent)
56 |
57 | self.face_cascade = cv.CascadeClassifier(face_cascade_xml)
58 | self.recognizer = cv.face.LBPHFaceRecognizer_create()
59 |
60 | self.face_images_dataset_dir = face_images_dataset_dir
61 |
62 | def run(self):
63 |
64 | y_labels = []
65 | x_train = []
66 | cur_id = 0
67 | label_ids = {}
68 |
69 | # fetching images from dataset for training
70 | for root, dirs, files in os.walk(self.face_images_dataset_dir):
71 |
72 | # FIXME - adding talkative settings in prefs !!!
73 | # if our robot it too talkative, emit this signal
74 | g_emitter().emit_signal_to_feed_mouth(
75 | "checking %s" % os.path.basename(root))
76 |
77 | for file in files:
78 | # check file extension for image files
79 | extension = os.path.splitext(file)[1]
80 | if extension in [".jpg", ".jpeg", ".png"]:
81 | full_path = os.path.join(root, file)
82 | label = os.path.basename(root).replace(" ", "-").lower()
83 |
84 | if label not in label_ids:
85 | label_ids[label] = cur_id
86 | cur_id += 1
87 |
88 | img_id = label_ids[label]
89 | log.debug(
90 | "FaceTrainer :: %s - %s - %s"
91 | % (str(label), str(img_id), str(full_path)))
92 |
93 | self.processing_image.emit(label, full_path)
94 |
95 | # convert image to grayscale
96 | pil_image = Image.open(full_path).convert("L")
97 |
98 | # convery grayscale image to numpy array
99 | image_array = np.array(pil_image, "uint8")
100 |
101 | faces = self.face_cascade.detectMultiScale(
102 | image_array, 1.3, 5)
103 |
104 | for (x, y, w, h) in faces:
105 | # define roi for eyes detection,ideally,
106 | # we should detect eyes within the rectangular
107 | # bounds of a face
108 | roi = image_array[y:y+h, x:x+w]
109 | x_train.append(roi)
110 | y_labels.append(img_id)
111 |
112 | # save trained labels
113 | with open("dataset/face_trainer_labels.pickle", 'wb') as f:
114 | pickle.dump(label_ids, f)
115 |
116 | self.recognizer.train(x_train, np.array(y_labels))
117 | self.recognizer.save("dataset/face_trainer.yml")
118 |
119 | self.face_training_finished.emit()
120 |
121 |
122 | if __name__ == "__main__":
123 | # path to Haar face classfier's xml file
124 | face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
125 |
126 | from local_settings import FACE_IMAGES_DATASET_DIR
127 | ft = FaceTrainer(face_cascade_xml, FACE_IMAGES_DATASET_DIR)
128 | ft.start()
129 | ft.join()
130 |
--------------------------------------------------------------------------------
/face_recognition.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | ROBOVISION
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | #
25 | # face detection from an image using Haar cascade classifier
26 | #
27 |
28 | import numpy as np
29 | import cv2 as cv
30 | import pickle
31 |
32 | from logger import get_logger
33 | log = get_logger()
34 |
35 |
36 | class FaceRecognition:
37 |
38 | def __init__(self, face_cascade_xml, eye_cascade_xml):
39 |
40 | # train/initialize face classifier
41 | face_cascade = cv.CascadeClassifier(face_cascade_xml)
42 |
43 | # train/initialize eye classifier
44 | eye_cascade = cv.CascadeClassifier(eye_cascade_xml)
45 |
46 | # creating a face recognier with pretrained data
47 | log.info("creating recognier from pre-trained data")
48 | recognizer = cv.face.LBPHFaceRecognizer_create()
49 | recognizer.read("dataset/face_trainer.yml")
50 |
51 | # reading data from webcam
52 | # for internal webcam on laptop use 0
53 | # for external webcam on laptop/PC use 1
54 | cap = cv.VideoCapture(0)
55 |
56 | labels = {}
57 |
58 | font = cv.FONT_HERSHEY_SIMPLEX
59 |
60 | # load trained labels
61 | with open("dataset/face_trainer_labels.pickle", 'rb') as f:
62 | org_labels = pickle.load(f)
63 | labels = {v:k for k, v in org_labels.items()}
64 |
65 | log.info("capturing video data")
66 | while True:
67 |
68 | ret, img = cap.read()
69 |
70 | # HAAR cascade will need grayscaled image to detect faces and eyes
71 | gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
72 |
73 | # lets find faces in the image and get their positions
74 | faces = face_cascade.detectMultiScale(gray, 1.3, 5)
75 | for (x, y, w, h) in faces:
76 |
77 | # drawing rect for face
78 | cv.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
79 |
80 | # define roi for eyes detection,ideally, we should detect
81 | # eyes within the rectangular bounds of a face
82 | roi_gray = gray[y:y+h, x:x+w]
83 | roi_color = img[y:y+h, x:x+w]
84 |
85 | # identify the face with recognizer
86 | index, conf = recognizer.predict(roi_gray)
87 |
88 | if conf > 75 and conf <= 95:
89 | name = labels[index]
90 | # Hurray, we detected a face !!!
91 | logger.info(
92 | "Identified face: Name: %s, index: %d, confidence level: %d" % (name, index, conf))
93 | cv.putText(
94 | img, name, (x, y), font, 1, (255, 255, 255), 1, cv.LINE_AA)
95 |
96 | # drawing rects for eyes
97 | eyes = eye_cascade.detectMultiScale(roi_gray)
98 | for (ex, ey, ew, eh) in eyes:
99 | cv.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
100 |
101 | cv.putText(
102 | img, 'Press Esc to quit', (10, 450), font, 1, (255, 255, 255), 1, cv.LINE_AA)
103 |
104 | # showing image
105 | cv.imshow('Haar Face Detection', img)
106 |
107 | # wait for key for 10 mscs, or continue
108 | k = cv.waitKey(10) & 0xff
109 |
110 | # if 'esc' pressed, terminate
111 | if k == 27:
112 | break
113 |
114 | cap.release()
115 | cv.destroyAllWindows()
116 |
117 | if __name__ == "__main__":
118 |
119 | # lets use pretrained cascade classifiers of opencv
120 | # for face and eyes detection
121 |
122 | # path to Haar face classfier's xml file
123 | face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
124 |
125 | # path to Haar eye classfier's xml file
126 | eye_cascade_xml = './cascades/haarcascades_cuda/haarcascade_eye.xml'
127 |
128 | # run the eye detector with given classfiers
129 | fr = FaceRecognition(face_cascade_xml, eye_cascade_xml)
130 |
--------------------------------------------------------------------------------
/dashboard.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | RoboVision
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import sys
25 | from os import path
26 |
27 | import cv2
28 | import numpy as np
29 |
30 | from PyQt5 import QtCore
31 | from PyQt5 import QtWidgets
32 | from PyQt5 import QtGui
33 |
34 |
35 | class RecordVideo(QtCore.QObject):
36 | image_data = QtCore.pyqtSignal(np.ndarray)
37 |
38 | def __init__(self, camera_port=0, parent=None):
39 | super().__init__(parent)
40 | self.camera = cv2.VideoCapture(camera_port)
41 |
42 | self.timer = QtCore.QBasicTimer()
43 |
44 | def start_recording(self):
45 | print("[INFO] recording started")
46 | self.timer.start(0, self)
47 |
48 | def timerEvent(self, event):
49 | if (event.timerId() != self.timer.timerId()):
50 | return
51 |
52 | read, data = self.camera.read()
53 | if read:
54 | self.image_data.emit(data)
55 |
56 |
57 | class FaceDetectionWidget(QtWidgets.QWidget):
58 | def __init__(self, haar_cascade_filepath, parent=None):
59 | super().__init__(parent)
60 | self.classifier = cv2.CascadeClassifier(haar_cascade_filepath)
61 | self.image = QtGui.QImage()
62 | self._red = (0, 0, 255)
63 | self._width = 2
64 | self._min_size = (30, 30)
65 |
66 | def detect_faces(self, image: np.ndarray):
67 | # haarclassifiers work better in black and white
68 | gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
69 | gray_image = cv2.equalizeHist(gray_image)
70 |
71 | faces = self.classifier.detectMultiScale(gray_image,
72 | scaleFactor=1.3,
73 | minNeighbors=4,
74 | flags=cv2.CASCADE_SCALE_IMAGE,
75 | minSize=self._min_size)
76 |
77 | return faces
78 |
79 | def image_data_slot(self, image_data):
80 | faces = self.detect_faces(image_data)
81 | for (x, y, w, h) in faces:
82 | cv2.rectangle(image_data,
83 | (x, y),
84 | (x+w, y+h),
85 | self._red,
86 | self._width)
87 |
88 | self.image = self.get_qimage(image_data)
89 | if self.image.size() != self.size():
90 | self.setFixedSize(self.image.size())
91 |
92 | self.update()
93 |
94 | def get_qimage(self, image: np.ndarray):
95 | height, width, colors = image.shape
96 | bytesPerLine = 3 * width
97 | QImage = QtGui.QImage
98 |
99 | image = QImage(image.data,
100 | width,
101 | height,
102 | bytesPerLine,
103 | QImage.Format_RGB888)
104 |
105 | image = image.rgbSwapped()
106 | return image
107 |
108 | def paintEvent(self, event):
109 | painter = QtGui.QPainter(self)
110 | painter.drawImage(0, 0, self.image)
111 | self.image = QtGui.QImage()
112 |
113 |
114 | class MainWidget(QtWidgets.QWidget):
115 | def __init__(self, haarcascade_filepath, parent=None):
116 | super().__init__(parent)
117 | fp = haarcascade_filepath
118 | self.face_detection_widget = FaceDetectionWidget(fp)
119 |
120 | self.record_video = RecordVideo()
121 |
122 | image_data_slot = self.face_detection_widget.image_data_slot
123 | self.record_video.image_data.connect(image_data_slot)
124 |
125 | layout = QtWidgets.QVBoxLayout()
126 |
127 | # adding widget for camera display & facedetection
128 | layout.addWidget(self.face_detection_widget)
129 |
130 | # adding start button
131 | self.run_button = QtWidgets.QPushButton('Start')
132 | layout.addWidget(self.run_button)
133 |
134 | self.run_button.clicked.connect(self.record_video.start_recording)
135 | self.setLayout(layout)
136 |
137 |
138 | def main(haar_cascade_filepath):
139 | app = QtWidgets.QApplication(sys.argv)
140 |
141 | main_window = QtWidgets.QMainWindow()
142 | main_widget = MainWidget(haar_cascade_filepath)
143 | main_window.setCentralWidget(main_widget)
144 | main_window.show()
145 | sys.exit(app.exec_())
146 |
147 |
148 | if __name__ == '__main__':
149 | script_dir = path.dirname(path.realpath(__file__))
150 |
151 | face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
152 |
153 | main(face_cascade_xml)
154 |
--------------------------------------------------------------------------------
/ocr_credit_card.py:
--------------------------------------------------------------------------------
1 | #
2 | # Opencv OCR
3 | # reference
4 | # https://www.pyimagesearch.com/2017/07/17/credit-card-ocr-with-opencv-and-python/
5 | #
6 |
7 | from imutils import contours
8 | import numpy as np
9 | import argparse
10 | import imutils
11 | import cv2
12 |
13 | # define a dictionary that maps the first digit of a credit card
14 | # number to the credit card type
15 | FIRST_NUMBER = {
16 | "3": "American Express",
17 | "4": "Visa",
18 | "5": "MasterCard",
19 | "6": "Discover Card"
20 | }
21 |
22 | # construct the argument parser and parse the arguments
23 | ap = argparse.ArgumentParser()
24 | ap.add_argument("-i", "--image", required=True,
25 | help="path to input image")
26 | ap.add_argument("-r", "--reference", required=True,
27 | help="path to reference OCR-A image")
28 | args = vars(ap.parse_args())
29 |
30 |
31 | # load the reference OCR-A image from disk, convert it to grayscale,
32 | # and threshold it, such that the digits appear as *white* on a
33 | # *black* background
34 | # and invert it, such that the digits appear as *white* on a *black*
35 | ref = cv2.imread(args["reference"])
36 | ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
37 | ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]
38 |
39 | # find contours in the OCR-A image (i.e,. the outlines of the digits)
40 | # sort them from left to right, and initialize a dictionary to map
41 | # digit name to the ROI
42 | refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
43 | cv2.CHAIN_APPROX_SIMPLE)
44 | refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1]
45 | refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
46 | digits = {}
47 |
48 | # loop over the OCR-A reference contours
49 | for (i, c) in enumerate(refCnts):
50 | # compute the bounding box for the digit, extract it, and resize
51 | # it to a fixed size
52 | (x, y, w, h) = cv2.boundingRect(c)
53 | roi = ref[y:y + h, x:x + w]
54 | roi = cv2.resize(roi, (57, 88))
55 |
56 | # update the digits dictionary, mapping the digit name to the ROI
57 | digits[i] = roi
58 |
59 | # initialize a rectangular (wider than it is tall) and square
60 | # structuring kernel
61 | rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
62 | sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
63 |
64 | # load the input image, resize it, and convert it to grayscale
65 | image = cv2.imread(args["image"])
66 | image = imutils.resize(image, width=300)
67 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
68 |
69 | # apply a tophat (whitehat) morphological operator to find light
70 | # regions against a dark background (i.e., the credit card numbers)
71 | tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
72 |
73 | # compute the Scharr gradient of the tophat image, then scale
74 | # the rest back into the range [0, 255]
75 | gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0,
76 | ksize=-1)
77 | gradX = np.absolute(gradX)
78 | (minVal, maxVal) = (np.min(gradX), np.max(gradX))
79 | gradX = (255 * ((gradX - minVal) / (maxVal - minVal)))
80 | gradX = gradX.astype("uint8")
81 |
82 |
83 | # apply a closing operation using the rectangular kernel to help
84 | # cloes gaps in between credit card number digits, then apply
85 | # Otsu's thresholding method to binarize the image
86 | gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
87 | thresh = cv2.threshold(gradX, 0, 255,
88 | cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
89 |
90 | # apply a second closing operation to the binary image, again
91 | # to help close gaps between credit card number regions
92 | thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, sqKernel)
93 |
94 |
95 | # find contours in the thresholded image, then initialize the
96 | # list of digit locations
97 | cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
98 | cv2.CHAIN_APPROX_SIMPLE)
99 | cnts = cnts[0] if imutils.is_cv2() else cnts[1]
100 | locs = []
101 |
102 | # loop over the contours
103 | for (i, c) in enumerate(cnts):
104 | # compute the bounding box of the contour, then use the
105 | # bounding box coordinates to derive the aspect ratio
106 | (x, y, w, h) = cv2.boundingRect(c)
107 | ar = w / float(h)
108 |
109 | # since credit cards used a fixed size fonts with 4 groups
110 | # of 4 digits, we can prune potential contours based on the
111 | # aspect ratio
112 | if ar > 2.5 and ar < 4.0:
113 | # contours can further be pruned on minimum/maximum width
114 | # and height
115 | if (w > 40 and w < 55) and (h > 10 and h < 20):
116 | # append the bounding box region of the digits group
117 | # to our locations list
118 | locs.append((x, y, w, h))
119 |
120 |
121 | # sort the digit locations from left-to-right, then initialize the
122 | # list of classified digits
123 | locs = sorted(locs, key=lambda x:x[0])
124 | output = []
125 |
126 | # loop over the 4 groupings of 4 digits
127 | for (i, (gX, gY, gW, gH)) in enumerate(locs):
128 | # initialize the list of group digits
129 | groupOutput = []
130 |
131 | # extract the group ROI of 4 digits from the grayscale image,
132 | # then apply thresholding to segment the digits from the
133 | # background of the credit card
134 | group = gray[gY - 5:gY + gH + 5, gX - 5:gX + gW + 5]
135 | group = cv2.threshold(group, 0, 255,
136 | cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
137 |
138 | # detect the contours of each individual digit in the group,
139 | # then sort the digit contours from left to right
140 | digitCnts = cv2.findContours(group.copy(), cv2.RETR_EXTERNAL,
141 | cv2.CHAIN_APPROX_SIMPLE)
142 | digitCnts = digitCnts[0] if imutils.is_cv2() else digitCnts[1]
143 | digitCnts = contours.sort_contours(digitCnts,
144 | method="left-to-right")[0]
145 |
146 | # loop over the digit contours
147 | for c in digitCnts:
148 | # compute the bounding box of the individual digit, extract
149 | # the digit, and resize it to have the same fixed size as
150 | # the reference OCR-A images
151 | (x, y, w, h) = cv2.boundingRect(c)
152 | roi = group[y:y + h, x:x + w]
153 | roi = cv2.resize(roi, (57, 88))
154 |
155 | # initialize a list of template matching scores
156 | scores = []
157 |
158 | # loop over the reference digit name and digit ROI
159 | for (digit, digitROI) in digits.items():
160 | # apply correlation-based template matching, take the
161 | # score, and update the scores list
162 | result = cv2.matchTemplate(roi, digitROI,
163 | cv2.TM_CCOEFF)
164 | (_, score, _, _) = cv2.minMaxLoc(result)
165 | scores.append(score)
166 |
167 | # the classification for the digit ROI will be the reference
168 | # digit name with the *largest* template matching score
169 | groupOutput.append(str(np.argmax(scores)))
170 |
171 | # draw the digit classifications around the group
172 | cv2.rectangle(image, (gX - 5, gY - 5),
173 | (gX + gW + 5, gY + gH + 5), (0, 0, 255), 2)
174 | cv2.putText(image, "".join(groupOutput), (gX, gY - 15),
175 | cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
176 |
177 | # update the output digits list
178 | output.extend(groupOutput)
179 |
180 | # display the output credit card information to the screen
181 | print("Credit Card Type: {}".format(FIRST_NUMBER[output[0]]))
182 | print("Credit Card #: {}".format("".join(output)))
183 | cv2.imshow("Image", image)
184 | cv2.waitKey(0)
185 |
186 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | """
4 | ROBOVISION
5 | ______________
6 |
7 | This program is free software: you can redistribute it and/or modify
8 | it under the terms of the GNU General Public License as published by
9 | the Free Software Foundation, either version 3 of the License, or
10 | (at your option) any later version.
11 |
12 | This program is distributed in the hope that it will be useful,
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | GNU General Public License for more details.
16 |
17 | You should have received a copy of the GNU General Public License
18 | along with this program. If not, see .
19 |
20 | Project Author/Architect: Navjot Singh
21 |
22 | """
23 |
24 | import sys
25 | import os
26 | import cv2
27 | import numpy as np
28 |
29 | from PyQt5 import uic
30 | from PyQt5.QtWidgets import (
31 | QApplication, QWidget, QMenu, QMainWindow, QMessageBox, QFileDialog,
32 | QSystemTrayIcon, QStyle, QAction, qApp)
33 |
34 | from PyQt5.QtGui import QIcon
35 |
36 | from PIL import Image
37 |
38 | from about_dialog import AboutDialog
39 | from prefs_dialog import PrefsDialog
40 | from video_capture import VideoCapture
41 | from image_widget import ImageWidget
42 | from face_trainer import FaceTrainer
43 | from robot import Robot
44 | from mouth import Mouth
45 | from utils import speak_text
46 | from global_signals import g_emitter
47 |
48 | from logger import get_logger
49 | log = get_logger()
50 |
51 | DIRPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)))
52 |
53 |
54 | class AppWindow(QMainWindow):
55 | """
56 | Main GUI class for application
57 | """
58 |
59 | def __init__(self):
60 | QWidget.__init__(self)
61 |
62 | # loaind ui from xml
63 | uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)
64 |
65 | # FIXME - libpng warning: iCCP: known incorrect sRGB profile
66 | self.setWindowIcon(QIcon("./images/robot_icon.png"))
67 |
68 | # keep the window fixed sized
69 | self.setFixedSize(self.size())
70 |
71 | # button event handlers
72 | self.btnStartCaptureForVideoAnalysis.clicked.connect(
73 | self.start_capture_for_video_analysis)
74 | self.btnStopCaptureForVideoAnalysis.clicked.connect(
75 | self.stop_capture_for_video_analysis)
76 |
77 | self.btnChooseClassifierXML.clicked.connect(
78 | self.choose_classifier_file)
79 |
80 | self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)
81 |
82 | self.setup_tray_menu()
83 |
84 | # add camera ids
85 | for i in range(0, 11):
86 | self.cboxCameraIds.addItem(str(i))
87 | self.cboxCameraIds1.addItem(str(i))
88 |
89 | # setting up handlers for menubar actions
90 | self.actionAbout.triggered.connect(self.about)
91 | self.actionExit.triggered.connect(qApp.quit)
92 | self.actionPreferences.triggered.connect(self.show_preferences)
93 |
94 | # video analysis image widget
95 | self.img_widget_vid_analysis = ImageWidget()
96 | self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)
97 |
98 | # face training image widget
99 | self.img_widget_face_training = ImageWidget()
100 | self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)
101 |
102 | # face identification image widget
103 | self.img_widget_identify_face = ImageWidget()
104 | self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)
105 |
106 | # image analysis image widget
107 | self.img_widget_img_analysis = ImageWidget()
108 | self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
109 | img = cv2.imread("images/human.png")
110 | self.img_widget_img_analysis.handle_image_data(img)
111 |
112 | self.vid_capture = VideoCapture()
113 | self.vid_capture.got_image_data_from_camera.connect(
114 | self.process_image_data_from_camera)
115 |
116 | self.highlight_faces = self.chkHighlightFaces.isChecked()
117 | self.chkHighlightFaces.stateChanged.connect(
118 | self.highlight_faces_checkbox_changed)
119 | self.chckGrayscale.stateChanged.connect(
120 | self.grayscale_checkbox_changed)
121 |
122 | # face trainer dataset browser btn handler
123 | self.btnBrowseDatasetForFaceTrainer.clicked.connect(
124 | self.browse_dataset_for_face_trainer)
125 | self.btnBrowseClassifierForFaceTrainer.clicked.connect(
126 | self.browse_classifier_file_for_face_trainer)
127 | self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)
128 |
129 | self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)
130 |
131 | self.btnTalk.clicked.connect(self.lets_talk)
132 |
133 | # create and start robot
134 | self.robot = Robot(self.lblRobot)
135 |
136 | self.mouth = Mouth()
137 |
138 | # connect global signals to slots
139 | g_emitter().feed_mouth.connect(self.mouth.feed_text)
140 | g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
141 | g_emitter().set_idle_state.connect(self.robot.set_idle_state)
142 |
143 | self.robot.start()
144 | self.mouth.start()
145 |
146 | def lets_talk(self):
147 | text = self.teTalk.toPlainText()
148 | self.teTalk.setText("")
149 | g_emitter().emit_signal_to_feed_mouth(text)
150 |
151 | def browse_identify_face(self):
152 | fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
153 | self.teIdentifyFace.setText(fname[0])
154 |
155 | img = cv2.imread(fname[0])
156 | self.img_widget_identify_face.handle_image_data(img)
157 |
158 | def start_face_trainer(self):
159 | dataset_dir = self.teFaceTrainerDataset.toPlainText()
160 | classifier_xml = self.teFaceTrainerClassifier.toPlainText()
161 | log.info(
162 | "starting face trainer with classifier '%s', dataset '%s'" % (
163 | classifier_xml, dataset_dir))
164 |
165 | ft = FaceTrainer(classifier_xml, dataset_dir)
166 | ft.processing_image.connect(self.processing_image_for_training)
167 | ft.face_training_finished.connect(self.face_training_finished)
168 | ft.start()
169 | self.lblFaceTrainingStatus.setText("FACE TRAINING UNDER PROGRESS")
170 |
171 | def face_training_finished(self):
172 | self.lblFaceTrainingStatus.setText("FACE TRAINING FINISHED")
173 | g_emitter().emit_signal_to_feed_mouth("face training finished")
174 |
175 | def processing_image_for_training(self, label, fname):
176 | log.info("processing image for training: '%s'" % label)
177 | self.lblFaceTrainerCurImg.setText("Learning face of: '%s' " % label)
178 |
179 | try:
180 | img = cv2.imread(fname)
181 | self.img_widget_face_training.handle_image_data(img)
182 | except Exception as exp:
183 | log.warning(
184 | "failed processing image '%s' while training" % fname)
185 | log.warning("Exception: %s" % str(exp))
186 |
187 | def browse_dataset_for_face_trainer(self):
188 | dataset_dir = str(QFileDialog.getExistingDirectory(
189 | self, 'Select directory for dataset', '/home'))
190 | log.info("dataset dir file: %s" % dataset_dir)
191 | self.teFaceTrainerDataset.setText(dataset_dir)
192 |
193 | def browse_classifier_file_for_face_trainer(self):
194 | classifier_xml = QFileDialog.getOpenFileName(
195 | self, 'Open file', '/home')
196 | log.info("classifier xml file: %s" % classifier_xml[0])
197 | self.teFaceTrainerClassifier.setText(classifier_xml[0])
198 |
199 | def grayscale_checkbox_changed(self):
200 | fname = self.teImage.toPlainText()
201 | print(fname)
202 | img = cv2.imread(fname)
203 | if self.chckGrayscale.isChecked():
204 | # convert image to grayscale
205 | pil_image = Image.open(fname).convert("L")
206 |
207 | # convery grayscale image to numpy array
208 | image_array = np.array(pil_image, "uint8")
209 |
210 | # FIXME - code crashes here !!!
211 | self.img_widget_img_analysis.handle_image_data(image_array)
212 | else:
213 | self.img_widget_img_analysis.handle_image_data(img)
214 |
215 | def highlight_faces_checkbox_changed(self):
216 | if self.chkHighlightFaces.isChecked():
217 | print("yes")
218 | else:
219 | print("no")
220 |
221 | def choose_classifier_file(self):
222 | fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
223 | log.info("chose classfier xml file: %s" % fname[0])
224 | self.teClassifierXML.setText(fname[0])
225 |
226 | def choose_image_for_analysis(self):
227 | fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
228 | log.info("chose imagefile: %s, for analysis" % fname[0])
229 | self.teImage.setText(fname[0])
230 |
231 | img = cv2.imread(fname[0])
232 | self.img_widget_img_analysis.handle_image_data(img)
233 |
234 | def start_capture_for_video_analysis(self):
235 | log.debug("start video capture")
236 | self.vid_capture.start()
237 |
238 | def stop_capture_for_video_analysis(self):
239 | log.debug("start video capture")
240 | self.vid_capture.stop()
241 | self.img_widget_vid_analysis.reset()
242 |
243 | def detect_face_in_image_data(self, image_data):
244 | """
245 | function detects faces in image data,
246 | draws rectangle for faces in image data,
247 | and returns this updated image data with highlighted face/s
248 | """
249 | self._red = (0, 0, 255)
250 | self._width = 2
251 | self._min_size = (30, 30)
252 |
253 | # haarclassifiers work better in black and white
254 | gray_image = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
255 | gray_image = cv2.equalizeHist(gray_image)
256 |
257 | # path to Haar face classfier's xml file
258 | face_cascade_xml = './cascades/haarcascades_cuda/" \
259 | "haarcascade_frontalface_default.xml'
260 | self.classifier = cv2.CascadeClassifier(face_cascade_xml)
261 | faces = self.classifier.detectMultiScale(gray_image,
262 | scaleFactor=1.3,
263 | minNeighbors=4,
264 | flags=cv2.CASCADE_SCALE_IMAGE,
265 | minSize=self._min_size)
266 |
267 | for (x, y, w, h) in faces:
268 | cv2.rectangle(image_data,
269 | (x, y),
270 | (x+w, y+h),
271 | self._red,
272 | self._width)
273 |
274 | return image_data
275 |
276 | def process_image_data_from_camera(self, image_data):
277 | if self.chkHighlightFaces.isChecked():
278 | image_data = self.detect_face_in_image_data(image_data)
279 | self.img_widget_vid_analysis.handle_image_data(image_data)
280 |
281 | def about(self):
282 | ad = AboutDialog()
283 | ad.display()
284 |
285 | def show_preferences(self):
286 | print("preferences")
287 | pd = PrefsDialog()
288 | pd.display()
289 |
290 | def setup_tray_menu(self):
291 |
292 | # setting up QSystemTrayIcon
293 | self.tray_icon = QSystemTrayIcon(self)
294 | self.tray_icon.setIcon(QIcon("./images/robot_icon.png"))
295 |
296 | # tray actions
297 | show_action = QAction("Show", self)
298 | quit_action = QAction("Exit", self)
299 | hide_action = QAction("Hide", self)
300 |
301 | # action handlers
302 | show_action.triggered.connect(self.show)
303 | hide_action.triggered.connect(self.hide)
304 | quit_action.triggered.connect(qApp.quit)
305 |
306 | # tray menu
307 | tray_menu = QMenu()
308 | tray_menu.addAction(show_action)
309 | tray_menu.addAction(hide_action)
310 | tray_menu.addAction(quit_action)
311 | self.tray_icon.setContextMenu(tray_menu)
312 | self.tray_icon.show()
313 |
314 | def closeEvent(self, event):
315 | try:
316 | event.ignore()
317 | self.hide()
318 | self.tray_icon.showMessage(
319 | "RoboVision",
320 | "RoboVision was minimized to Tray",
321 | QSystemTrayIcon.Information,
322 | 2000
323 | )
324 | self.robot.stop()
325 | self.robot.join()
326 | except Exception as exp:
327 | log.warning("app close exp: %s" % str(exp))
328 |
329 | def ok_pressed(self):
330 | log.debug("[AppWindow] :: ok")
331 | self.show_msgbox("AppWindow", "Its ok")
332 |
333 | def show_msgbox(self, title, text):
334 | """
335 | Function for showing error/info message box
336 | """
337 | msg = QMessageBox()
338 | msg.setIcon(QMessageBox.Information)
339 | msg.setText(text)
340 | msg.setWindowTitle(title)
341 | msg.setStandardButtons(QMessageBox.Ok)
342 |
343 | retval = msg.exec_()
344 | print("[INFO] Value of pressed message box button:", retval)
345 |
346 |
347 | ##############################################################################
348 | # #
349 | # MAIN #
350 | # #
351 | ##############################################################################
352 | if __name__ == '__main__':
353 |
354 | app = QApplication(sys.argv)
355 | window = AppWindow()
356 | window.resize(1240, 820)
357 | window.show()
358 | sys.exit(app.exec_())
359 |
--------------------------------------------------------------------------------
/about_dialog.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Dialog
4 |
5 |
6 | Qt::WindowModal
7 |
8 |
9 |
10 | 0
11 | 0
12 | 400
13 | 250
14 |
15 |
16 |
17 |
18 | 0
19 | 0
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | 255
29 | 255
30 | 255
31 |
32 |
33 |
34 |
35 |
36 |
37 | 57
38 | 57
39 | 86
40 |
41 |
42 |
43 |
44 |
45 |
46 | 85
47 | 85
48 | 129
49 |
50 |
51 |
52 |
53 |
54 |
55 | 71
56 | 71
57 | 107
58 |
59 |
60 |
61 |
62 |
63 |
64 | 28
65 | 28
66 | 43
67 |
68 |
69 |
70 |
71 |
72 |
73 | 38
74 | 38
75 | 57
76 |
77 |
78 |
79 |
80 |
81 |
82 | 255
83 | 255
84 | 255
85 |
86 |
87 |
88 |
89 |
90 |
91 | 255
92 | 255
93 | 255
94 |
95 |
96 |
97 |
98 |
99 |
100 | 255
101 | 255
102 | 255
103 |
104 |
105 |
106 |
107 |
108 |
109 | 0
110 | 0
111 | 0
112 |
113 |
114 |
115 |
116 |
117 |
118 | 57
119 | 57
120 | 86
121 |
122 |
123 |
124 |
125 |
126 |
127 | 0
128 | 0
129 | 0
130 |
131 |
132 |
133 |
134 |
135 |
136 | 28
137 | 28
138 | 43
139 |
140 |
141 |
142 |
143 |
144 |
145 | 255
146 | 255
147 | 220
148 |
149 |
150 |
151 |
152 |
153 |
154 | 0
155 | 0
156 | 0
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 | 255
166 | 255
167 | 255
168 |
169 |
170 |
171 |
172 |
173 |
174 | 57
175 | 57
176 | 86
177 |
178 |
179 |
180 |
181 |
182 |
183 | 85
184 | 85
185 | 129
186 |
187 |
188 |
189 |
190 |
191 |
192 | 71
193 | 71
194 | 107
195 |
196 |
197 |
198 |
199 |
200 |
201 | 28
202 | 28
203 | 43
204 |
205 |
206 |
207 |
208 |
209 |
210 | 38
211 | 38
212 | 57
213 |
214 |
215 |
216 |
217 |
218 |
219 | 255
220 | 255
221 | 255
222 |
223 |
224 |
225 |
226 |
227 |
228 | 255
229 | 255
230 | 255
231 |
232 |
233 |
234 |
235 |
236 |
237 | 255
238 | 255
239 | 255
240 |
241 |
242 |
243 |
244 |
245 |
246 | 0
247 | 0
248 | 0
249 |
250 |
251 |
252 |
253 |
254 |
255 | 57
256 | 57
257 | 86
258 |
259 |
260 |
261 |
262 |
263 |
264 | 0
265 | 0
266 | 0
267 |
268 |
269 |
270 |
271 |
272 |
273 | 28
274 | 28
275 | 43
276 |
277 |
278 |
279 |
280 |
281 |
282 | 255
283 | 255
284 | 220
285 |
286 |
287 |
288 |
289 |
290 |
291 | 0
292 | 0
293 | 0
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 | 28
303 | 28
304 | 43
305 |
306 |
307 |
308 |
309 |
310 |
311 | 57
312 | 57
313 | 86
314 |
315 |
316 |
317 |
318 |
319 |
320 | 85
321 | 85
322 | 129
323 |
324 |
325 |
326 |
327 |
328 |
329 | 71
330 | 71
331 | 107
332 |
333 |
334 |
335 |
336 |
337 |
338 | 28
339 | 28
340 | 43
341 |
342 |
343 |
344 |
345 |
346 |
347 | 38
348 | 38
349 | 57
350 |
351 |
352 |
353 |
354 |
355 |
356 | 28
357 | 28
358 | 43
359 |
360 |
361 |
362 |
363 |
364 |
365 | 255
366 | 255
367 | 255
368 |
369 |
370 |
371 |
372 |
373 |
374 | 28
375 | 28
376 | 43
377 |
378 |
379 |
380 |
381 |
382 |
383 | 57
384 | 57
385 | 86
386 |
387 |
388 |
389 |
390 |
391 |
392 | 57
393 | 57
394 | 86
395 |
396 |
397 |
398 |
399 |
400 |
401 | 0
402 | 0
403 | 0
404 |
405 |
406 |
407 |
408 |
409 |
410 | 57
411 | 57
412 | 86
413 |
414 |
415 |
416 |
417 |
418 |
419 | 255
420 | 255
421 | 220
422 |
423 |
424 |
425 |
426 |
427 |
428 | 0
429 | 0
430 | 0
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 | About RoboVision
439 |
440 |
441 | 1.000000000000000
442 |
443 |
444 | false
445 |
446 |
447 | true
448 |
449 |
450 |
451 |
452 | 30
453 | 190
454 | 341
455 | 32
456 |
457 |
458 |
459 | Qt::Horizontal
460 |
461 |
462 | QDialogButtonBox::Cancel|QDialogButtonBox::Ok
463 |
464 |
465 |
466 |
467 |
468 | 10
469 | 30
470 | 381
471 | 171
472 |
473 |
474 |
475 |
476 | 13
477 |
478 |
479 |
480 | RoboVision is a smart vision camera with AI and machine learning capabilities. Under Research and Development by Weavebytes.
481 |
482 |
483 | true
484 |
485 |
486 |
487 |
488 |
489 | 150
490 | 10
491 | 181
492 | 61
493 |
494 |
495 |
496 |
497 |
498 |
499 |
500 |
501 | 85
502 | 170
503 | 255
504 |
505 |
506 |
507 |
508 |
509 |
510 |
511 |
512 | 85
513 | 170
514 | 255
515 |
516 |
517 |
518 |
519 |
520 |
521 |
522 |
523 | 42
524 | 85
525 | 127
526 |
527 |
528 |
529 |
530 |
531 |
532 |
533 |
534 | 20
535 | 75
536 | true
537 | true
538 |
539 |
540 |
541 | RoboVision
542 |
543 |
544 |
545 |
546 |
547 | 20
548 | 10
549 | 68
550 | 17
551 |
552 |
553 |
554 |
555 |
556 |
557 |
558 |
559 |
560 |
561 | buttonBox
562 | accepted()
563 | Dialog
564 | accept()
565 |
566 |
567 | 248
568 | 254
569 |
570 |
571 | 157
572 | 274
573 |
574 |
575 |
576 |
577 | buttonBox
578 | rejected()
579 | Dialog
580 | reject()
581 |
582 |
583 | 316
584 | 260
585 |
586 |
587 | 286
588 | 274
589 |
590 |
591 |
592 |
593 |
594 |
--------------------------------------------------------------------------------
/prefs_dialog.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | PrefsDialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 508
10 | 430
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | 255
20 | 255
21 | 255
22 |
23 |
24 |
25 |
26 |
27 |
28 | 85
29 | 85
30 | 127
31 |
32 |
33 |
34 |
35 |
36 |
37 | 213
38 | 234
39 | 255
40 |
41 |
42 |
43 |
44 |
45 |
46 | 149
47 | 202
48 | 255
49 |
50 |
51 |
52 |
53 |
54 |
55 | 42
56 | 85
57 | 127
58 |
59 |
60 |
61 |
62 |
63 |
64 | 56
65 | 113
66 | 170
67 |
68 |
69 |
70 |
71 |
72 |
73 | 255
74 | 255
75 | 255
76 |
77 |
78 |
79 |
80 |
81 |
82 | 255
83 | 255
84 | 255
85 |
86 |
87 |
88 |
89 |
90 |
91 | 255
92 | 255
93 | 255
94 |
95 |
96 |
97 |
98 |
99 |
100 | 255
101 | 255
102 | 255
103 |
104 |
105 |
106 |
107 |
108 |
109 | 85
110 | 85
111 | 127
112 |
113 |
114 |
115 |
116 |
117 |
118 | 0
119 | 0
120 | 0
121 |
122 |
123 |
124 |
125 |
126 |
127 | 170
128 | 212
129 | 255
130 |
131 |
132 |
133 |
134 |
135 |
136 | 255
137 | 255
138 | 220
139 |
140 |
141 |
142 |
143 |
144 |
145 | 0
146 | 0
147 | 0
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 | 255
157 | 255
158 | 255
159 |
160 |
161 |
162 |
163 |
164 |
165 | 85
166 | 85
167 | 127
168 |
169 |
170 |
171 |
172 |
173 |
174 | 213
175 | 234
176 | 255
177 |
178 |
179 |
180 |
181 |
182 |
183 | 149
184 | 202
185 | 255
186 |
187 |
188 |
189 |
190 |
191 |
192 | 42
193 | 85
194 | 127
195 |
196 |
197 |
198 |
199 |
200 |
201 | 56
202 | 113
203 | 170
204 |
205 |
206 |
207 |
208 |
209 |
210 | 255
211 | 255
212 | 255
213 |
214 |
215 |
216 |
217 |
218 |
219 | 255
220 | 255
221 | 255
222 |
223 |
224 |
225 |
226 |
227 |
228 | 255
229 | 255
230 | 255
231 |
232 |
233 |
234 |
235 |
236 |
237 | 255
238 | 255
239 | 255
240 |
241 |
242 |
243 |
244 |
245 |
246 | 85
247 | 85
248 | 127
249 |
250 |
251 |
252 |
253 |
254 |
255 | 0
256 | 0
257 | 0
258 |
259 |
260 |
261 |
262 |
263 |
264 | 170
265 | 212
266 | 255
267 |
268 |
269 |
270 |
271 |
272 |
273 | 255
274 | 255
275 | 220
276 |
277 |
278 |
279 |
280 |
281 |
282 | 0
283 | 0
284 | 0
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 | 42
294 | 85
295 | 127
296 |
297 |
298 |
299 |
300 |
301 |
302 | 85
303 | 85
304 | 127
305 |
306 |
307 |
308 |
309 |
310 |
311 | 213
312 | 234
313 | 255
314 |
315 |
316 |
317 |
318 |
319 |
320 | 149
321 | 202
322 | 255
323 |
324 |
325 |
326 |
327 |
328 |
329 | 42
330 | 85
331 | 127
332 |
333 |
334 |
335 |
336 |
337 |
338 | 56
339 | 113
340 | 170
341 |
342 |
343 |
344 |
345 |
346 |
347 | 42
348 | 85
349 | 127
350 |
351 |
352 |
353 |
354 |
355 |
356 | 255
357 | 255
358 | 255
359 |
360 |
361 |
362 |
363 |
364 |
365 | 42
366 | 85
367 | 127
368 |
369 |
370 |
371 |
372 |
373 |
374 | 85
375 | 85
376 | 127
377 |
378 |
379 |
380 |
381 |
382 |
383 | 85
384 | 85
385 | 127
386 |
387 |
388 |
389 |
390 |
391 |
392 | 0
393 | 0
394 | 0
395 |
396 |
397 |
398 |
399 |
400 |
401 | 85
402 | 170
403 | 255
404 |
405 |
406 |
407 |
408 |
409 |
410 | 255
411 | 255
412 | 220
413 |
414 |
415 |
416 |
417 |
418 |
419 | 0
420 | 0
421 | 0
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 | Preferences
430 |
431 |
432 |
433 |
434 | 20
435 | 380
436 | 481
437 | 32
438 |
439 |
440 |
441 | Qt::Horizontal
442 |
443 |
444 | QDialogButtonBox::Cancel|QDialogButtonBox::Ok
445 |
446 |
447 |
448 |
449 |
450 | 10
451 | 10
452 | 491
453 | 351
454 |
455 |
456 |
457 |
458 |
459 |
460 |
461 |
462 | 39
463 | 39
464 | 58
465 |
466 |
467 |
468 |
469 |
470 |
471 |
472 |
473 | 39
474 | 39
475 | 58
476 |
477 |
478 |
479 |
480 |
481 |
482 |
483 |
484 | 39
485 | 39
486 | 58
487 |
488 |
489 |
490 |
491 |
492 |
493 |
494 | 0
495 |
496 |
497 |
498 | General
499 |
500 |
501 |
502 |
503 | 10
504 | 20
505 | 131
506 | 16
507 |
508 |
509 |
510 |
511 | 50
512 | false
513 |
514 |
515 |
516 | Your nickname:
517 |
518 |
519 |
520 |
521 |
522 | 140
523 | 10
524 | 331
525 | 31
526 |
527 |
528 |
529 |
530 |
531 |
532 | Dataset
533 |
534 |
535 |
536 |
537 | 10
538 | 160
539 | 471
540 | 31
541 |
542 |
543 |
544 |
545 |
546 |
547 | 10
548 | 140
549 | 311
550 | 16
551 |
552 |
553 |
554 |
555 | 50
556 | false
557 |
558 |
559 |
560 | Directory For Output Files
561 |
562 |
563 |
564 |
565 |
566 | 400
567 | 210
568 | 71
569 | 31
570 |
571 |
572 |
573 |
574 | 50
575 | false
576 |
577 |
578 |
579 | Choose
580 |
581 |
582 |
583 |
584 |
585 | 10
586 | 40
587 | 281
588 | 22
589 |
590 |
591 |
592 | Play tone for incoming message
593 |
594 |
595 |
596 |
597 |
598 | 10
599 | 90
600 | 321
601 | 22
602 |
603 |
604 |
605 | Play tone when file download is complete
606 |
607 |
608 |
609 |
610 |
611 | Scheduler
612 |
613 |
614 |
615 |
616 | Debug
617 |
618 |
619 |
620 |
621 | 20
622 | 20
623 | 281
624 | 22
625 |
626 |
627 |
628 | Enable debug mode
629 |
630 |
631 |
632 |
633 |
634 |
635 |
636 |
637 | buttonBox
638 | accepted()
639 | PrefsDialog
640 | accept()
641 |
642 |
643 | 248
644 | 254
645 |
646 |
647 | 157
648 | 274
649 |
650 |
651 |
652 |
653 | buttonBox
654 | rejected()
655 | PrefsDialog
656 | reject()
657 |
658 |
659 | 316
660 | 260
661 |
662 |
663 | 286
664 | 274
665 |
666 |
667 |
668 |
669 |
670 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------