├── .github └── FUNDING.yml ├── .ipynb_checkpoints └── CSVAnnotationGenerator-checkpoint.ipynb ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── environment.yml ├── media ├── EnvSetup.png ├── EnvironmentActivate.png ├── csv generator.png ├── dlib.png ├── extra notes.png ├── greet.png ├── performance booster.png ├── remarks.png ├── runcmd.png ├── step 1.png ├── step 2.png ├── step 3.png ├── step 4.png └── step 5.png ├── notebooks └── CSVAnnotationGenerator.ipynb └── src ├── CSVAnnotationGenerator.py ├── main.py └── webcam_test.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | ## Details to be updated in a while. 2 | -------------------------------------------------------------------------------- /.ipynb_checkpoints/CSVAnnotationGenerator-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import scipy.ndimage.interpolation as snd\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "import glob\n", 13 | "import os\n", 14 | "#sudo pip install bs4 incase you find import error\n", 15 | "from bs4 import BeautifulSoup\n", 16 | "\n", 17 | "#Author: Suraj\n", 18 | "#Contact No: +918486656592\n", 19 | "#Email: hrishabhsuraj52@gmail.com\n", 20 | "#Website: https://suraj.glitch.me" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "#function to parse xml files and then search the data between a specific tag\n", 30 | "def xml_parse(file):\n", 31 | " f = open(file)\n", 32 | " xml = f.read()\n", 33 | " soup = BeautifulSoup(xml)\n", 34 | " return soup\n", 35 | "#Please provide the Classname down here:\n", 36 | "#Eg: Dog, Cat, Hand, BasketBall, Car, Plane or any custom class etc\n", 37 | "ClassName= \"ClassName\"\n", 38 | "\n", 39 | "#Author: Suraj\n", 40 | "#Contact No: +918486656592\n", 41 | "#Email: hrishabhsuraj52@gmail.com\n", 42 | "#Website: https://suraj.glitch.me" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "import xml.etree.ElementTree as ET\n", 52 | "#Provide the path to the xml file which you want to convert down here... \n", 53 | "tree = ET.parse('/home/dl/Dataset/annotation.xml')\n", 54 | "root = tree.getroot()\n", 55 | "labels =[]\n", 56 | "name=[]\n", 57 | "print('Bounding Box:')\n", 58 | "for element in root.iter(tag='image'):\n", 59 | " name.append(element.attrib)\n", 60 | " print(element.attrib)\n", 61 | " print(element.text)\n", 62 | "\n", 63 | "for element in root.iter(tag= 'box'):\n", 64 | " labels.append(element.attrib)\n", 65 | " print(element.attrib)\n", 66 | " print(element.text)\n", 67 | "\n", 68 | "\n", 69 | "#Author: Suraj\n", 70 | "#Contact No: +918486656592\n", 71 | "#Email: hrishabhsuraj52@gmail.com\n", 72 | "#Website: https://suraj.glitch.me" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "#Extracting images name\n", 82 | "file_names =[]\n", 83 | "coordinates =[]\n", 84 | "for i in xrange(len(labels)):\n", 85 | " file_names.append(str(str(name[i]).split(':')[1]).split('\\'')[1])\n", 86 | " list= str(str(str(labels[i]).split(':'))).split(\"'\")[7],str(str(str(labels[i]).split(':'))).split(\"'\")[11],str(str(str(labels[i]).split(':'))).split(\"'\")[3],str(str(str(labels[i]).split(':'))).split(\"'\")[15]\n", 87 | " #print(list)\n", 88 | " print(list)\n", 89 | " coordinates.append(list)\n", 90 | "\n", 91 | "#Expected Verbose after succesful run of this cell\n", 92 | "# For eg : ('106', '328', '224', '348')\n", 93 | "\n", 94 | "\n", 95 | "#Author: Suraj\n", 96 | "#Contact No: +918486656592\n", 97 | "#Email: hrishabhsuraj52@gmail.com\n", 98 | "#Website: https://suraj.glitch.me" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "features =[]\n", 108 | "labels_matrix=[]\n", 109 | "for counter in ((xrange(len(file_names)))):\n", 110 | " print(file_names[counter])\n", 111 | " for file in (glob.glob(file_names[counter])):\n", 112 | " coordinates[counter]= [coordinates[counter][0], coordinates[counter][1], str(int(coordinates[counter][0]) + int(coordinates[counter][2])), str(int(coordinates[counter][3])+ int(coordinates[counter][1]))]\n", 113 | " print(coordinates[counter])\n", 114 | " labels_matrix.append(coordinates[counter])\n", 115 | "\n", 116 | " \n", 117 | "#Expected Verbose after succesful run of this cell.\n", 118 | "# For eg\" /home/dl/Download/images_73.jpg\n", 119 | "#For eg: ['106', '329', '328', '675']\n", 120 | "\n", 121 | "#Author: Suraj\n", 122 | "#Contact No: +918486656592\n", 123 | "#Email: hrishabhsuraj52@gmail.com\n", 124 | "#Website: https://suraj.glitch.me" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "#Please provide the absolute system path where you wish to generate the Faster RCNN compatible label text file.\n", 134 | "f= open(\"/home/dl/Dataset/annotations.txt\",\"w+\")\n", 135 | "for i in range(len(labels_matrix)):\n", 136 | " f.write(str(file_names[i])+\",\"+ str(labels_matrix[i][0])+ \",\" + str(labels_matrix[i][1])+ \",\"+str(labels_matrix[i][2])+ \",\"+ str(labels_matrix[i][3])+ \",\"+str(ClassName) +\"\\n\")\n", 137 | "print(\"The annotation file in faster RCNN compatible text/retinanet format succesfully generated in the path!!\")\n", 138 | "f.close()\n", 139 | "\n", 140 | "#Author: Suraj\n", 141 | "#Contact No: +918486656592\n", 142 | "#Email: hrishabhsuraj52@gmail.com\n", 143 | "#Website: https://suraj.glitch.me" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "Python 3", 164 | "language": "python", 165 | "name": "python3" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.5.6" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 1 182 | } 183 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at hrishabhsuraj52@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | I love your input! I want to make contributing to this project as easy and transparent as possible, whether it's: 2 | 3 | * Reporting a bug 4 | * Discussing the current state of the code 5 | * Submitting a fix 6 | * Proposing new features 7 | * Becoming a maintainer 8 | 9 | ## Note 10 | 11 | When contributing to this repository, please first discuss the change you wish to make via issue, 12 | email, or any other method with the me. 13 | 14 | Please note we have a code of conduct, please follow it in all your interactions with the project. 15 | 16 | ## Pull Request Process 17 | 18 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a 19 | build. 20 | 2. Update the README.md with details of changes to the interface, this includes new environment 21 | variables, useful file locations etc. 22 | 3. Increase the version numbers in any examples files and the README.md to the new version that this 23 | Pull Request would represent. 24 | 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![greet](media/greet.png) 2 | ## Demo Video 3 | Link 4 | 5 | ## Cognitive Annotation Tool 6 | #### About the Software 7 | * An opensource annotation tool aimed to help researchers get rid of annotating images manually for computer vision tasks like object detection and object localisation using the webcam feed. 8 | * The Tool creates annotation of images in Pascal VOC format as .xml files corresponding to each image. The tool also generates annotations as csv file. 9 | * The generated annotations are compatible for training machine learning/deep learning models using Dlib- C++ machine learning library or Retinanet based object detection models easily for custom object detection. For training retina-net based model, The user can refer to this Github Repo. 10 | * The Images and its corresponding annotation XML files are saved in the folder named entered by user in the GUI prompt during the initialisation of the software. 11 | * If the user wants to train a multi-stage object detector like Fast RCNN/Faster RCNN/Retinanet model for custom object detection then this jupyter notebook can be used to convert the Pascal VOC Annotation xml file(s). 12 | * The software is capable of annotating bulk of images needed to create high quality custom machine learning/deep learning based object detectors from scratch or via Transfer learning. 13 | * This tool can act as an automated version of ImgLab 14 | 15 | ## Windows Executable 16 | Download Link :: Coming soon. 17 | 18 | ## Execute using terminal 19 | 20 | ### Prerequisite 21 | 1. Anaconda/Miniconda installed on Windows/Linux platform (Last checked to work on Windows 10) 22 | 2. Integrated webcam(preferably) or an external webcam, check this via ```$ python webcam_test.py``` 23 | 24 | #### Environment Configuration 25 | 1. Create a new environment using the environment.yml 26 | ![Introduction](media/EnvSetup.png) 27 | 28 | 2. Activate the environment using the following command 29 | ![Activate](media/EnvironmentActivate.png) 30 | 31 | #### Launch the GUI Annotation Tool 32 | ![run](media/runcmd.png) 33 | Upon successful execution of the script, A GUI should appear as depicted below. 34 | 35 | 36 | #### Instructions to follow to annotate custom images using GUI. 37 | 38 | Once the GUI of the Software appears. Follow these steps to get started with the annotation. 39 | ![step1](media/step%201.png) 40 | ![step2](media/step%202.png) 41 | ![step3](media/step%203.png) 42 | ![step4](media/step%204.png) 43 | ![step5](media/step%205.png) 44 | 45 | #### Remarks: 46 | ![remarks](media/remarks.png) 47 | 48 | #### Extra Notes. 49 | 50 | ###### 1. Editing the annotation.xml file 51 | ![extra](media/extra%20notes.png) 52 | 53 | ###### A. CSV generator 54 | * XML to CSV Converter(Notebook) 55 | * XML to CSV Converter 56 | ![csv generator](media/csv%20generator.png) 57 | 58 | ###### B. Pascal VOC XML generator 59 | Coming soon: With TFOD Compatibility(Tensorflow Object detection toolbox). 60 | ``` 61 | After you execute main.py, you should be getting a core xml file which you can easily split into multiple xml files using few lines of code. Neverthless, I will try to upload the helper script soon :) 62 | ``` 63 | 64 | ###### C. Yolo versions compatible Annotation generator 65 | Coming soon: 66 | 67 | ###### D. DLIB DNN-MOD/HOG+SVM COMPATIBLE 68 | Dlib DNN_MMOD 69 | Dlib HOG+SVM 70 | 71 | ![dlib](media/dlib.png) 72 | 73 | ###### 2. Steps to follow to get best results from the software 74 | ![Performance](media/performance%20booster.png) 75 | 76 | #### Python Library on PyPi 77 | ```Coming soon!``` 78 | 79 | #### FAQs. 80 | 1. Reason for not using cv2.Multibox tracker : I have tried and experimented with various tracking algorithms available in Opencv's multibox tracker but Dlib's correlation tracker outperformed each one of them. If you wish to experiment more, feel free to redirect yourself to the Pre-release where I have already incorporated OpenCV's various multibox tracker instead of Dlib's Correlation tracker. 81 | 82 | #### Made with Dlib 83 | Dlib 84 | 85 | #### Future Work(Contribution appreciated :)) 86 | 1. Remove python backend of the Annotation tool with C++ for better realtime performance. 87 | 2. Enable support for various format of annotations used in commonly used Object detection architectures like Yolo, Architectures in Tensorflow Object detection besides the currently supported RetinaNet and Dlib's HOG+SVM/DNN_mmod object detector algorithms. 88 | 89 | ## License 90 | This project is licensed under the LGPLv3.0 License - see the [LICENSE.md](LICENSE.md) file for details 91 | 92 | 93 | ### Citation 94 | If you find the tool helpful, please cite my paper. 95 | ```My name is not in BibTex file(though I am the first author) due to Google Scholar not being able to parse mononymous names :( ``` 96 | ``` 97 | @inproceedings{kool2018visual, 98 | title={Visual machine intelligence for home automation}, 99 | author={Kool, Ish and Kumar, Dharmendra and Barma, Shovan and others}, 100 | booktitle={2018 3rd International Conference on Internet of Things: Smart Innovation and Usages (IoT-SIU)}, 101 | pages={1--6}, 102 | year={2018}, 103 | organization={IEEE} 104 | } 105 | ``` 106 | 107 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: CognitiveAnnotationTool 2 | channels: 3 | - conda-forge/label/cf201901 4 | - defaults 5 | dependencies: 6 | - blas=1.0=mkl 7 | - ca-certificates=2018.11.29=ha4d7672_0 8 | - certifi=2018.8.24=py35_1001 9 | - dlib=19.9=np19py35_0 10 | - icc_rt=2019.0.0=h0cc432a_1 11 | - icu=58.2=ha925a31_3 12 | - intel-openmp=2019.4=245 13 | - jpeg=9c=hfa6e2cd_1001 14 | - libpng=1.6.34=h7602738_2 15 | - libtiff=4.0.9=h36446d0_1002 16 | - libwebp=0.5.2=vc14_7 17 | - mkl=2018.0.3=1 18 | - numpy=1.9.3=py35h0e52b17_2 19 | - opencv=3.4.1=py35_200 20 | - openssl=1.0.2o=vc14_0 21 | - pip=10.0.1=py35_0 22 | - python=3.5.6=he025d50_0 23 | - qt=5.6.2=h2639256_8 24 | - setuptools=40.2.0=py35_0 25 | - sqlite=3.26.0=hfa6e2cd_1000 26 | - vc=14.2=h21ff451_1 27 | - vs2015_runtime=14.27.29016=h5e58377_2 28 | - wheel=0.36.2=pyhd3eb1b0_0 29 | - wincertstore=0.2=py35hfebbdb8_0 30 | - zlib=1.2.11=vc14_0 31 | prefix: C:\Users\Jarvis\.conda\envs\CognitiveAnnotationTool 32 | 33 | -------------------------------------------------------------------------------- /media/EnvSetup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/EnvSetup.png -------------------------------------------------------------------------------- /media/EnvironmentActivate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/EnvironmentActivate.png -------------------------------------------------------------------------------- /media/csv generator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/csv generator.png -------------------------------------------------------------------------------- /media/dlib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/dlib.png -------------------------------------------------------------------------------- /media/extra notes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/extra notes.png -------------------------------------------------------------------------------- /media/greet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/greet.png -------------------------------------------------------------------------------- /media/performance booster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/performance booster.png -------------------------------------------------------------------------------- /media/remarks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/remarks.png -------------------------------------------------------------------------------- /media/runcmd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/runcmd.png -------------------------------------------------------------------------------- /media/step 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/step 1.png -------------------------------------------------------------------------------- /media/step 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/step 2.png -------------------------------------------------------------------------------- /media/step 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/step 3.png -------------------------------------------------------------------------------- /media/step 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/step 4.png -------------------------------------------------------------------------------- /media/step 5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Suraj520/CognitiveAnnotationTool/7d2924d4a418feb53dd37298e60671d2d596be42/media/step 5.png -------------------------------------------------------------------------------- /notebooks/CSVAnnotationGenerator.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import scipy.ndimage.interpolation as snd\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "import glob\n", 13 | "import os\n", 14 | "#sudo pip install bs4 incase you find import error\n", 15 | "from bs4 import BeautifulSoup\n", 16 | "\n", 17 | "#Author: Suraj\n", 18 | "#Contact No: +918486656592\n", 19 | "#Email: hrishabhsuraj52@gmail.com\n", 20 | "#Website: https://suraj.glitch.me" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "#function to parse xml files and then search the data between a specific tag\n", 30 | "def xml_parse(file):\n", 31 | " f = open(file)\n", 32 | " xml = f.read()\n", 33 | " soup = BeautifulSoup(xml)\n", 34 | " return soup\n", 35 | "#Please provide the Classname down here:\n", 36 | "#Eg: Dog, Cat, Hand, BasketBall, Car, Plane or any custom class etc\n", 37 | "ClassName= \"ClassName\"\n", 38 | "\n", 39 | "#Author: Suraj\n", 40 | "#Contact No: +918486656592\n", 41 | "#Email: hrishabhsuraj52@gmail.com\n", 42 | "#Website: https://suraj.glitch.me" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "import xml.etree.ElementTree as ET\n", 52 | "#Provide the path to the xml file which you want to convert down here... \n", 53 | "tree = ET.parse('/home/dl/Dataset/annotation.xml')\n", 54 | "root = tree.getroot()\n", 55 | "labels =[]\n", 56 | "name=[]\n", 57 | "print('Bounding Box:')\n", 58 | "for element in root.iter(tag='image'):\n", 59 | " name.append(element.attrib)\n", 60 | " print(element.attrib)\n", 61 | " print(element.text)\n", 62 | "\n", 63 | "for element in root.iter(tag= 'box'):\n", 64 | " labels.append(element.attrib)\n", 65 | " print(element.attrib)\n", 66 | " print(element.text)\n", 67 | "\n", 68 | "\n", 69 | "#Author: Suraj\n", 70 | "#Contact No: +918486656592\n", 71 | "#Email: hrishabhsuraj52@gmail.com\n", 72 | "#Website: https://suraj.glitch.me" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "#Extracting images name\n", 82 | "file_names =[]\n", 83 | "coordinates =[]\n", 84 | "for i in xrange(len(labels)):\n", 85 | " file_names.append(str(str(name[i]).split(':')[1]).split('\\'')[1])\n", 86 | " list= str(str(str(labels[i]).split(':'))).split(\"'\")[7],str(str(str(labels[i]).split(':'))).split(\"'\")[11],str(str(str(labels[i]).split(':'))).split(\"'\")[3],str(str(str(labels[i]).split(':'))).split(\"'\")[15]\n", 87 | " #print(list)\n", 88 | " print(list)\n", 89 | " coordinates.append(list)\n", 90 | "\n", 91 | "#Expected Verbose after succesful run of this cell\n", 92 | "# For eg : ('106', '328', '224', '348')\n", 93 | "\n", 94 | "\n", 95 | "#Author: Suraj\n", 96 | "#Contact No: +918486656592\n", 97 | "#Email: hrishabhsuraj52@gmail.com\n", 98 | "#Website: https://suraj.glitch.me" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "features =[]\n", 108 | "labels_matrix=[]\n", 109 | "for counter in ((xrange(len(file_names)))):\n", 110 | " print(file_names[counter])\n", 111 | " for file in (glob.glob(file_names[counter])):\n", 112 | " coordinates[counter]= [coordinates[counter][0], coordinates[counter][1], str(int(coordinates[counter][0]) + int(coordinates[counter][2])), str(int(coordinates[counter][3])+ int(coordinates[counter][1]))]\n", 113 | " print(coordinates[counter])\n", 114 | " labels_matrix.append(coordinates[counter])\n", 115 | "\n", 116 | " \n", 117 | "#Expected Verbose after succesful run of this cell.\n", 118 | "# For eg\" /home/dl/Download/images_73.jpg\n", 119 | "#For eg: ['106', '329', '328', '675']\n", 120 | "\n", 121 | "#Author: Suraj\n", 122 | "#Contact No: +918486656592\n", 123 | "#Email: hrishabhsuraj52@gmail.com\n", 124 | "#Website: https://suraj.glitch.me" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "#Please provide the absolute system path where you wish to generate the Faster RCNN compatible label text file.\n", 134 | "f= open(\"/home/dl/Dataset/annotations.txt\",\"w+\")\n", 135 | "for i in range(len(labels_matrix)):\n", 136 | " f.write(str(file_names[i])+\",\"+ str(labels_matrix[i][0])+ \",\" + str(labels_matrix[i][1])+ \",\"+str(labels_matrix[i][2])+ \",\"+ str(labels_matrix[i][3])+ \",\"+str(ClassName) +\"\\n\")\n", 137 | "print(\"The annotation file in faster RCNN compatible text/retinanet format succesfully generated in the path!!\")\n", 138 | "f.close()\n", 139 | "\n", 140 | "#Author: Suraj\n", 141 | "#Contact No: +918486656592\n", 142 | "#Email: hrishabhsuraj52@gmail.com\n", 143 | "#Website: https://suraj.glitch.me" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "Python 3", 164 | "language": "python", 165 | "name": "python3" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.5.6" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 1 182 | } 183 | -------------------------------------------------------------------------------- /src/CSVAnnotationGenerator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import numpy as np 8 | import scipy.ndimage.interpolation as snd 9 | import matplotlib.pyplot as plt 10 | import glob 11 | import os 12 | #sudo pip install bs4 incase you find import error 13 | from bs4 import BeautifulSoup 14 | 15 | #Author: Suraj 16 | #Contact No: +918486656592 17 | #Email: hrishabhsuraj52@gmail.com 18 | #Website: https://suraj.glitch.me 19 | 20 | 21 | # In[ ]: 22 | 23 | 24 | #function to parse xml files and then search the data between a specific tag 25 | def xml_parse(file): 26 | f = open(file) 27 | xml = f.read() 28 | soup = BeautifulSoup(xml) 29 | return soup 30 | #Please provide the Classname down here: 31 | #Eg: Dog, Cat, Hand, BasketBall, Car, Plane or any custom class etc 32 | ClassName= "ClassName" 33 | 34 | #Author: Suraj 35 | #Contact No: +918486656592 36 | #Email: hrishabhsuraj52@gmail.com 37 | #Website: https://suraj.glitch.me 38 | 39 | 40 | # In[ ]: 41 | 42 | 43 | import xml.etree.ElementTree as ET 44 | #Provide the path to the xml file which you want to convert down here... 45 | tree = ET.parse('/home/dl/Dataset/annotation.xml') 46 | root = tree.getroot() 47 | labels =[] 48 | name=[] 49 | print('Bounding Box:') 50 | for element in root.iter(tag='image'): 51 | name.append(element.attrib) 52 | print(element.attrib) 53 | print(element.text) 54 | 55 | for element in root.iter(tag= 'box'): 56 | labels.append(element.attrib) 57 | print(element.attrib) 58 | print(element.text) 59 | 60 | 61 | #Author: Suraj 62 | #Contact No: +918486656592 63 | #Email: hrishabhsuraj52@gmail.com 64 | #Website: https://suraj.glitch.me 65 | 66 | 67 | # In[ ]: 68 | 69 | 70 | #Extracting images name 71 | file_names =[] 72 | coordinates =[] 73 | for i in xrange(len(labels)): 74 | file_names.append(str(str(name[i]).split(':')[1]).split('\'')[1]) 75 | list= str(str(str(labels[i]).split(':'))).split("'")[7],str(str(str(labels[i]).split(':'))).split("'")[11],str(str(str(labels[i]).split(':'))).split("'")[3],str(str(str(labels[i]).split(':'))).split("'")[15] 76 | #print(list) 77 | print(list) 78 | coordinates.append(list) 79 | 80 | #Expected Verbose after succesful run of this cell 81 | # For eg : ('106', '328', '224', '348') 82 | 83 | 84 | #Author: Suraj 85 | #Contact No: +918486656592 86 | #Email: hrishabhsuraj52@gmail.com 87 | #Website: https://suraj.glitch.me 88 | 89 | 90 | # In[ ]: 91 | 92 | 93 | features =[] 94 | labels_matrix=[] 95 | for counter in ((xrange(len(file_names)))): 96 | print(file_names[counter]) 97 | for file in (glob.glob(file_names[counter])): 98 | coordinates[counter]= [coordinates[counter][0], coordinates[counter][1], str(int(coordinates[counter][0]) + int(coordinates[counter][2])), str(int(coordinates[counter][3])+ int(coordinates[counter][1]))] 99 | print(coordinates[counter]) 100 | labels_matrix.append(coordinates[counter]) 101 | 102 | 103 | #Expected Verbose after succesful run of this cell. 104 | # For eg" /home/dl/Download/images_73.jpg 105 | #For eg: ['106', '329', '328', '675'] 106 | 107 | #Author: Suraj 108 | #Contact No: +918486656592 109 | #Email: hrishabhsuraj52@gmail.com 110 | #Website: https://suraj.glitch.me 111 | 112 | 113 | # In[ ]: 114 | 115 | 116 | #Please provide the absolute system path where you wish to generate the Faster RCNN compatible label text file. 117 | f= open("/home/dl/Dataset/annotations.txt","w+") 118 | for i in range(len(labels_matrix)): 119 | f.write(str(file_names[i])+","+ str(labels_matrix[i][0])+ "," + str(labels_matrix[i][1])+ ","+str(labels_matrix[i][2])+ ","+ str(labels_matrix[i][3])+ ","+str(ClassName) +"\n") 120 | print("The annotation file in faster RCNN compatible text/retinanet format succesfully generated in the path!!") 121 | f.close() 122 | 123 | #Author: Suraj 124 | #Contact No: +918486656592 125 | #Email: hrishabhsuraj52@gmail.com 126 | #Website: https://suraj.glitch.me 127 | 128 | 129 | # In[ ]: 130 | 131 | 132 | 133 | 134 | 135 | # In[ ]: 136 | 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | ## All rights reserved to Suraj #### 2 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 3 | 4 | # If you are reviewing the code for assesment for any applied research/dev positions. kindly review: https://github.com/Suraj520/Python_developer_track with more weightage. 5 | 6 | #importing the modules 7 | import dlib 8 | import cv2 9 | import sys 10 | import os 11 | from distutils.dir_util import mkpath 12 | from tkinter import * 13 | #defining the function for drawing bounding box 14 | def run(im, multi=False): 15 | #duplicating the image as 2 sets, one for displaying and the other one being the image over which the user will initialise the annotation by drawing annotation. 16 | im_disp = im.copy() 17 | im_draw = im.copy() 18 | #naming the image window 19 | window_name = "Window for Drawing Bounding Box" 20 | cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) 21 | #displaying the image window on which user can draw 22 | cv2.imshow(window_name, im_draw) 23 | # Defining the list for containing top-left and bottom-down coordinates for the image". 24 | pts_1 = [] 25 | pts_2 = [] 26 | rects = [] 27 | #initialising the mouse down action event to be initially False 28 | run.mouse_down = False 29 | #defining the function for annotating the image in two modes : Single Object and Multiple Object Annotation Mode." 30 | def callback(event, x, y, flags, param): 31 | if event == cv2.EVENT_LBUTTONDOWN: 32 | if multi == False and len(pts_2) == 1: 33 | print("Single Tracking mode currently enabled can't select multiple.") 34 | return 35 | run.mouse_down = True 36 | pts_1.append((x, y)) 37 | elif event == cv2.EVENT_LBUTTONUP and run.mouse_down == True: 38 | run.mouse_down = False 39 | pts_2.append((x, y)) 40 | print("Object Coordinates : [{}, {}]".format(pts_1[-1], pts_2[-1])) 41 | elif event == cv2.EVENT_MOUSEMOVE and run.mouse_down == True: 42 | im_draw = im.copy() 43 | cv2.rectangle(im_draw, pts_1[-1], (x, y), (255,255,255), 3) 44 | cv2.imshow(window_name, im_draw) 45 | 46 | print("Firstly Press then release the mouse around the object that you wish to annotate \nFor multiple object annotation you can repeat the press and release multiple times") 47 | cv2.setMouseCallback(window_name, callback) 48 | 49 | print("To continue with the selected objects to annotate, Press P.") 50 | print("Press esc, followed by pressing the exit button.") 51 | ## All rights reserved to Suraj #### 52 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 53 | while True: 54 | # Use Mouse to draw the rectangular boxes around the image. 55 | window_name_2 = "Objects to be annotated" 56 | for pt1, pt2 in zip(pts_1, pts_2): 57 | rects.append([pt1[0],pt2[0], pt1[1], pt2[1]]) 58 | cv2.rectangle(im_disp, pt1, pt2, (255, 255, 255), 3) 59 | # Displaying the cropped images 60 | cv2.namedWindow(window_name_2, cv2.WINDOW_NORMAL) 61 | cv2.imshow(window_name_2, im_disp) 62 | key = cv2.waitKey(30) 63 | if key == ord('p'): 64 | #destroying the window by this time 65 | cv2.destroyAllWindows() 66 | point= [(tl + br) for tl, br in zip(pts_1, pts_2)] 67 | corrected_point=check_point(point) 68 | return corrected_point 69 | if cv2.waitKey(1) == 27: 70 | break # esc to quit 71 | cv2.destroyAllWindows() 72 | #cv2.destroyAllWindows() 73 | point= [(tl + br) for tl, br in zip(pts_1, pts_2)] 74 | corrected_point=check_point(point) 75 | 76 | return corrected_point 77 | ## All rights reserved to Suraj #### 78 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 79 | #function for collecting Bounding box points_suraj_tool from the annotations drawn over the image using mouse press and release events 80 | def check_point(points_suraj_tool): 81 | out=[] 82 | for point in points_suraj_tool: 83 | minx=point[0] 84 | #to find min and max x coordinates 85 | if point[0]") 121 | suraj_list.append("") 122 | suraj_list.append("") 123 | suraj_list.append("Dlib-C++ compatible dataset") 124 | suraj_list.append("Created by Suraj - email : hrishabhsuraj52@gmail.com.,linkedin:https://in.linkedin.com/in/suraj52 ") 125 | suraj_list.append("") 126 | for i in range(6): 127 | f.write(suraj_list[i]+"\n") 128 | print("Press `p` to start with automatic annotation of selected objects ") 129 | ## All rights reserved to Suraj #### 130 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 131 | frame_count = 0 132 | while True: 133 | User = str(UserName.get()) 134 | retval, img = cam.read() 135 | #Freeze at frame 1 and prompt for user annotation. 136 | #let the user annotate on frame 1 and then accordingly as per the number of bounding boxes, initialise the trackers 137 | if frame_count == 0: 138 | points_suraj_tool = run(img, multi=True) 139 | if not points_suraj_tool: 140 | print("ERROR: No object to be annotated") 141 | exit() 142 | # Create the tracker object 143 | tracker = [dlib.correlation_tracker() for _ in range(len(points_suraj_tool))] 144 | # Provide the tracker the initial position of the object 145 | [tracker[i].start_track(img, dlib.rectangle(*rect)) for i, rect in enumerate(points_suraj_tool)] 146 | frame_count+=1 147 | #start writing from frame 2 onwards 148 | cv2.imwrite(str(User)+"_"+str(a)+".jpg",img) 149 | if not retval: 150 | print("Device not accessible ") 151 | exit() 152 | # Update the tracker 153 | annotate=[] 154 | annotate.append("") 155 | for i in range(len(tracker)): 156 | tracker[i].update(img) 157 | # Get the position of th object, draw a 158 | # bounding box around it and display it. 159 | rect = tracker[i].get_position() 160 | pt1 = (int(rect.left()), int(rect.top())) 161 | pt2 = (int(rect.right()), int(rect.bottom())) 162 | cv2.rectangle(img, pt1, pt2, (255, 255, 255), 2) 163 | print("Object {} Location [{}, {}] \r".format(i, pt1, pt2),) 164 | annotate.append("") 165 | 166 | for i in range(len(points_suraj_tool)+1): 167 | f.write(annotate[i]+"\n") 168 | list1=[] 169 | list1.append("") 170 | f.write(list1[0]+"\n") 171 | a = a+1 172 | ## All rights reserved to Suraj #### 173 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 174 | loc = (int(rect.left()), int(rect.top()-20)) 175 | txt = "Object tracked at [{}, {}]".format(pt1, pt2) 176 | cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1) 177 | cv2.namedWindow("Image", cv2.WINDOW_NORMAL) 178 | cv2.imshow("Image", img) 179 | if cv2.waitKey(1)==27: 180 | suraj_list=[] 181 | suraj_list.append("") 182 | suraj_list.append("") 183 | for i in range(2): 184 | f.write(suraj_list[i]+"\n") 185 | cam.release() 186 | cv2.destroyAllWindows() 187 | break 188 | ## All rights reserved to Suraj #### 189 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 190 | def Exit(): 191 | sys.exit() 192 | ## All rights reserved to Suraj #### 193 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 194 | if __name__ == "__main__": 195 | root = Tk() 196 | root.title('Cogntive Annotation Tool') 197 | Label(root, text="Welcome User to the CognitiveAnnotationTool", fg="blue") 198 | Label(root, text="Enter User Name",fg ='blue').grid(row=1,column=0) 199 | #creating a text field for User 200 | UserName= Entry(root, bd =1) 201 | UserName.grid(row=1 ,column=1) 202 | Button(root, text='Submit User Name' ,command=SubmitUserName,fg ='yellow',bg='black' ).grid(row=3, column=1, sticky=W, pady=4) 203 | Button(root, text='Capture Bounding Box(s)',command=CapBBox ,fg ='yellow',bg='black').grid(row=5, column=0, sticky=W, pady=4) 204 | Button(root, text='Exit',command=Exit ,fg ='yellow',bg='black').grid(row=18, column=1, sticky=W, pady=4) 205 | ## All rights reserved to Suraj #### 206 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 207 | #Instruction to use the software 208 | Label(root, text=">>>>>>>>>>>>>>>>>>>>>>>>Steps to Use the software :>>>>>>>>>>>>>>>>>>>>>>>>",fg ='red').grid(row=6,column=0) 209 | Label(root, text="1. Enter User Name and Click <> Only Once**",fg ='blue').grid(row=7,column=0) 210 | Label(root, text="2.Click Capture Bounding Box(s)",fg ='blue').grid(row=8,column=0) 211 | Label(root, text="3.Select the `Window for Drawing Bounding Box` and draw the bounding box using mouse around the ROI(s)",fg ='blue').grid(row=9,column=0) 212 | Label(root, text="4.Press'p' on your keyboard once BBox(s) are created to start annotation,close the annotation by pressing esc buttom",fg ='blue').grid(row=10,column=0) 213 | Label(root, text="Note : **Please Move objects in BBox slowly to create quality annotations.**",fg ='red').grid(row=11,column=0) 214 | Label(root, text="5. Close the software by pressing the exit button on GUI.",fg ='blue').grid(row=12,column=0) 215 | Label(root, text="**Created by : Suraj : Linkedin: https://www.linkedin.com/in/suraj52/**",fg ='red').grid(row=13,column=0) 216 | Label(root,text ="Only for Research Purposes!",fg='black').grid(row=14,column=0) 217 | 218 | root.mainloop() 219 | SubmitUserName() 220 | ## All rights reserved to Suraj #### 221 | ### Please note the code is non-optimized intentionally to restrict its reproducibility in its exact form and I may give the optimized code upon your query for a commercial license. 222 | #Support if you like the software :) 223 | -------------------------------------------------------------------------------- /src/webcam_test.py: -------------------------------------------------------------------------------- 1 | #webcam display script to check the working. 2 | import cv2 3 | def show_webcam(mirror=False): 4 | cam = cv2.VideoCapture(0) 5 | while True: 6 | ret_val, img = cam.read() 7 | if mirror: 8 | img = cv2.flip(img, 1) 9 | cv2.imshow('my webcam', img) 10 | if cv2.waitKey(1) == 27: 11 | break # esc to quit 12 | cv2.destroyAllWindows() 13 | 14 | 15 | def main(): 16 | show_webcam(mirror=True) 17 | 18 | 19 | if __name__ == '__main__': 20 | main() 21 | --------------------------------------------------------------------------------