├── .gitignore ├── docs ├── imgs │ ├── level_panel.png │ ├── colorized_depth.png │ ├── block_annotation.png │ ├── mobaxterm_config.png │ └── annotation_tool_info.png ├── readme-assets │ ├── mosaic.png │ ├── cameras.png │ ├── gazeRegions.png │ ├── dmdStructure.png │ ├── environments.png │ └── participants.png ├── issue_bug_template.md ├── issue_feature_template.md ├── setup_windows.md └── setup_linux.md ├── exploreMaterial-tool ├── config_DEx.json ├── statistics.py ├── group_split_material.py ├── README.md ├── DExTool.py ├── vcd4reader.py ├── accessDMDAnn.py └── Tutorial_DEx_(dataset_explorer_tool).ipynb ├── CHANGELOG.md ├── annotation-tool ├── config_drowsiness.json ├── config_gaze.json ├── config_hands.json ├── config_distraction.json ├── config_statics.json ├── config.json ├── setUp.py ├── README.md └── vcd4parser.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | dmd/ 2 | dmd_rgb/ -------------------------------------------------------------------------------- /docs/imgs/level_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/imgs/level_panel.png -------------------------------------------------------------------------------- /docs/imgs/colorized_depth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/imgs/colorized_depth.png -------------------------------------------------------------------------------- /docs/readme-assets/mosaic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/mosaic.png -------------------------------------------------------------------------------- /docs/imgs/block_annotation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/imgs/block_annotation.png -------------------------------------------------------------------------------- /docs/imgs/mobaxterm_config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/imgs/mobaxterm_config.png -------------------------------------------------------------------------------- /docs/readme-assets/cameras.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/cameras.png -------------------------------------------------------------------------------- /docs/imgs/annotation_tool_info.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/imgs/annotation_tool_info.png -------------------------------------------------------------------------------- /docs/readme-assets/gazeRegions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/gazeRegions.png -------------------------------------------------------------------------------- /docs/readme-assets/dmdStructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/dmdStructure.png -------------------------------------------------------------------------------- /docs/readme-assets/environments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/environments.png -------------------------------------------------------------------------------- /docs/readme-assets/participants.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Vicomtech/DMD-Driver-Monitoring-Dataset/HEAD/docs/readme-assets/participants.png -------------------------------------------------------------------------------- /exploreMaterial-tool/config_DEx.json: -------------------------------------------------------------------------------- 1 | { 2 | "material": ["videos"], 3 | "streams" : ["body"], 4 | "channels" : ["rgb"], 5 | "annotations" : ["driver_actions/safe_drive", "driver_actions/texting_right", "driver_actions/phonecall_right", "driver_actions/texting_left","driver_actions/phonecall_left","driver_actions/reach_side","driver_actions/radio","driver_actions/drinking"], 6 | "write" : true, 7 | "size" : [224, 224], 8 | "intervalChunk" : 50, 9 | "ignoreSmall" : false, 10 | "asc" : true 11 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to the DMD repository will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.0.0] - 2020-07-22 11 | 12 | ### Added 13 | 14 | - First version of annotation tool (TaTo). 15 | - New Readme files and steps to run the annotation tool. 16 | - The wiki includes the DMD file structure and annotation instructions for distraction related actions. 17 | 18 | [unreleased]: https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/compare/v1.0.0...HEsAD 19 | [1.0.0]: https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/release/tag/v1.0.0 -------------------------------------------------------------------------------- /docs/issue_bug_template.md: -------------------------------------------------------------------------------- 1 | # Opening an Issue 2 | If you find any problem with the temporal annotation tool you are free to open a new issue. Please check before the [README file](README.md) and other [issues](https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/issues) to see if your it is already solved. 3 | 4 | ## Bug Issue Template 5 | 6 | ### Expected Behavior 7 | What should had happened. 8 | 9 | ### Actual Behavior 10 | A clear and concise description of the behavior. 11 | 12 | ### Steps to Reproduce the Problem 13 | 14 | 1. 15 | 1. 16 | 1. 17 | 18 | ### Environment 19 | 20 | - Tool Version: 21 | - Dependencies Version(OpenCV, Numpy, VCD): 22 | - OS: 23 | - Using WSL?: 24 | 25 | ### Additional information / Any Screenshot? 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /docs/issue_feature_template.md: -------------------------------------------------------------------------------- 1 | # Opening an Issue 2 | If you find any problem with the temporal annotation tool you are free to open a new issue. Please check before the [README file](README.md) and other issues to see if your issue is already solved. 3 | 4 | ## Feature Request 5 | 6 | ### Is your feature request related to a problem? Please describe. 7 | A clear and concise description of what the problem is. Ex. I have an issue when [...] 8 | 9 | ## Describe the solution you'd like 10 | A clear and concise description of what you want to happen. Add any considered drawbacks. 11 | 12 | ## Describe alternatives you've considered 13 | A clear and concise description of any alternative solutions or features you've considered. 14 | 15 | ## Teachability, Documentation, Adoption, Migration Strategy 16 | If you can, explain how users will be able to use this and possibly write out a version the docs. 17 | Maybe a screenshot or design? 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /annotation-tool/config_drowsiness.json: -------------------------------------------------------------------------------- 1 | { 2 | "0": { 3 | "0": "face_camera", 4 | "1": "body_camera", 5 | "2": "hands_camera", 6 | "99": "--", 7 | "100": "NAN" 8 | }, 9 | "1":{ 10 | "0": "open", 11 | "1": "close", 12 | "2": "opening", 13 | "3": "closing", 14 | "4": "undefined", 15 | "100": "NAN" 16 | }, 17 | "2": { 18 | "0": "blinking", 19 | "99": "--", 20 | "100": "NAN" 21 | }, 22 | "3": { 23 | "0": "Yawning with hand", 24 | "1": "Yawning without hand", 25 | "99": "--", 26 | "100": "NAN" 27 | }, 28 | "level_names": { 29 | "0": "occlusion", 30 | "1": "eyes_state", 31 | "2": "blinks", 32 | "3": "yawning" 33 | }, 34 | "level_types": { 35 | "0": "stream_properties", 36 | "1": "action", 37 | "2": "action", 38 | "3": "action" 39 | }, 40 | "level_defaults": { 41 | "0": 99, 42 | "1": 0, 43 | "2": 99, 44 | "3": 99 45 | 46 | }, 47 | "camera_dependencies": { 48 | "face": [1,2,3], 49 | "body": [], 50 | "hands": [] 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /annotation-tool/config_gaze.json: -------------------------------------------------------------------------------- 1 | { 2 | "2": { 3 | "0": "blinking", 4 | "99": "--", 5 | "100": "NAN" 6 | }, 7 | "1": { 8 | "0": "left_mirror", 9 | "1": "left", 10 | "2": "front", 11 | "3": "center_mirror", 12 | "4": "front_right", 13 | "5": "right_mirror", 14 | "6": "right", 15 | "7": "infotainment", 16 | "8": "steering_wheel", 17 | "9": "not_valid", 18 | "100": "NAN" 19 | }, 20 | "0": { 21 | "0": "face_camera", 22 | "1": "body_camera", 23 | "2": "hands_camera", 24 | "99": "--", 25 | "100": "NAN" 26 | }, 27 | "level_names": { 28 | "0": "occlusion", 29 | "1": "gaze_zone", 30 | "2": "blinks" 31 | }, 32 | "level_types": { 33 | "0": "stream_properties", 34 | "1": "action", 35 | "2": "action" 36 | }, 37 | "level_defaults": { 38 | "0": 99, 39 | "1": 2, 40 | "2": 99 41 | }, 42 | "camera_dependencies": { 43 | "face": [ 44 | 1, 45 | 2 46 | ], 47 | "body": [], 48 | "hands": [] 49 | } 50 | } -------------------------------------------------------------------------------- /annotation-tool/config_hands.json: -------------------------------------------------------------------------------- 1 | { 2 | "0": { 3 | "0": "face_camera", 4 | "1": "body_camera", 5 | "2": "hands_camera", 6 | "99": "--", 7 | "100": "NAN" 8 | }, 9 | "1": { 10 | "0": "both_hands", 11 | "1": "only_right", 12 | "2": "only_left", 13 | "3": "none", 14 | "100": "NAN" 15 | }, 16 | "2": { 17 | "0": "taking control", 18 | "1": "giving control", 19 | "99": "--", 20 | "100": "NAN" 21 | }, 22 | "3": { 23 | "0": "moving", 24 | "1": "not_moving", 25 | "100": "NAN" 26 | }, 27 | "level_names": { 28 | "0": "occlusion", 29 | "1": "hands_on_wheel", 30 | "2": "transition", 31 | "3": "moving_hands" 32 | }, 33 | "level_types": { 34 | "0": "stream_properties", 35 | "1": "action", 36 | "2": "action", 37 | "3": "action" 38 | }, 39 | "level_defaults": { 40 | "0": 99, 41 | "1": 0, 42 | "2": 99, 43 | "3": 1 44 | }, 45 | "camera_dependencies": { 46 | "face": [], 47 | "body": [ 48 | 1, 49 | 2, 50 | 3 51 | ], 52 | "hands": [] 53 | } 54 | } -------------------------------------------------------------------------------- /docs/setup_windows.md: -------------------------------------------------------------------------------- 1 | # Setting up the Temporal Annotation Tool (TaTo) - Windows 2 | 3 | ## Dependencies 4 | The TaTo tool has been tested using the following system configuration: 5 | 6 | **OS:** Windows 10
7 | **Dependencies:** Python 3.8, OpenCV-Python 4.2.0, VCD 4.3.0. Add: FFMPEG and ffmpeg-python for DExTool. 8 | 9 | ## Environment in Windows 10 | 11 | - Verify pip is installed, if not install: 12 | 13 | - (Optional) It is recommended to create a virtual environment in Python 3 (more info [here](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)): 14 | 15 | - Install dependencies 16 | -opencv-python, numpy, vcd 17 | -FFMPEG and ffmpeg-python for DExTool. 18 | 19 | - Go to [directory](../annotation-tool) that contains the tool scripts. 20 | 21 | ## Launching TaTo 22 | In a terminal window, within the folder [annotation_tool](../annotation-tool) run 23 | 24 | ```python 25 | python TaTo.py 26 | ``` 27 | 28 | The tool will ask you to input the **path** of the mosaic video you want to annotate. Please insert the path following the [DMD file structure](../docs/dmd_file_struct.md). 29 | 30 | The annotation tool TaTo opens with three windows. 31 | 32 | ## Launching DEx 33 | In a terminal window, within the folder [exploreMaterial-tool](../exploreMaterial-tool) run 34 | 35 | ```python 36 | python DExTool.py 37 | ``` 38 | 39 | The tool will ask you to input the **task** you wish to perform. 40 | -------------------------------------------------------------------------------- /docs/setup_linux.md: -------------------------------------------------------------------------------- 1 | # Setting up the Temporal Annotation Tool (TaTo) - Linux 2 | 3 | ## Dependencies 4 | The TaTo tool has been tested using the following system configuration: 5 | 6 | **OS:** Ubuntu 18.04, Windows 10
7 | **Dependencies:** Python 3.8, OpenCV-Python 4.2.0, VCD 4.3.0. Add: FFMPEG and ffmpeg-python for DExTool. 8 | 9 | ## Environment for Ubuntu 10 | - Please make sure you have **Python 3** installed in your system 11 | - Verify pip is installed, if not install: 12 | ```bash 13 | sudo apt-get install python3 python3-pip 14 | ``` 15 | ```bash 16 | pip3 install --upgrade pip 17 | ``` 18 | - (Optional) It is recommended to create a virtual environment in Python 3 (more info [here](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)): 19 | - Configure a new virtual environment: 20 | ```bash 21 | mkdir anntool_py 22 | ``` 23 | ```bash 24 | python3 -m venv anntool_py 25 | ``` 26 | - Activate the virtual environment: 27 | ```bash 28 | source anntool_py/bin/activate 29 | ``` 30 | - Install the dependencies 31 | ``` bash 32 | pip3 install opencv-python numpy vcd 33 | ``` 34 | - For DExTool, install the dependecies: 35 | ``` bash 36 | pip3 install --upgrade setuptools 37 | sudo apt update 38 | sudo apt install ffmpeg 39 | pip3 install ffmpeg-python 40 | ``` 41 | - Go to [directory](../annotation-tool) that contains the tool scripts. 42 | 43 | ## Launching TaTo 44 | In a terminal window within the folder [annotation_tool](../annotation-tool) run: 45 | 46 | ```python 47 | python TaTo.py 48 | ``` 49 | 50 | The tool will ask you to input the **path** of the video you want to annotate. Please insert the path following the [DMD file structure](../docs/dmd_file_struct.md). 51 | 52 | The annotation tool TaTo opens with three windows. 53 | 54 | ## Launching DEx 55 | In a terminal window, within the folder [exploreMaterial-tool](../exploreMaterial-tool) run 56 | 57 | ```python 58 | python DExTool.py 59 | ``` 60 | The tool will ask you to input the **task** you wish to perform. 61 | -------------------------------------------------------------------------------- /annotation-tool/config_distraction.json: -------------------------------------------------------------------------------- 1 | { 2 | "6": { 3 | "0": "safe_drive", 4 | "1": "texting_right", 5 | "2": "phonecall_right", 6 | "3": "texting_left", 7 | "4": "phonecall_left", 8 | "5": "radio", 9 | "6": "drinking", 10 | "7": "reach_side", 11 | "8": "hair_and_makeup", 12 | "9": "talking_to_passenger", 13 | "10": "reach_backseat", 14 | "11": "change_gear", 15 | "12": "standstill_or_waiting", 16 | "13": "unclassified", 17 | "100": "NAN" 18 | }, 19 | "5": { 20 | "0": "cellphone", 21 | "1": "hair_comb", 22 | "2": "bottle", 23 | "99": "--", 24 | "100": "NAN" 25 | }, 26 | "4": { 27 | "0": "hand_on_gear", 28 | "99": "--", 29 | "100": "NAN" 30 | }, 31 | "3": { 32 | "0": "both", 33 | "1": "only_right", 34 | "2": "only_left", 35 | "3": "none", 36 | "100": "NAN" 37 | }, 38 | "2": { 39 | "0": "talking", 40 | "99": "--", 41 | "100": "NAN" 42 | }, 43 | "1": { 44 | "0": "looking_road", 45 | "1": "not_looking_road", 46 | "100": "NAN" 47 | }, 48 | "0": { 49 | "0": "face_camera", 50 | "1": "body_camera", 51 | "2": "hands_camera", 52 | "99": "--", 53 | "100": "NAN" 54 | }, 55 | "level_names": { 56 | "0": "occlusion", 57 | "1": "gaze_on_road", 58 | "2": "talking", 59 | "3": "hands_using_wheel", 60 | "4": "hand_on_gear", 61 | "5": "objects_in_scene", 62 | "6": "driver_actions" 63 | }, 64 | "level_types": { 65 | "0": "stream_properties", 66 | "1": "action", 67 | "2": "action", 68 | "3": "action", 69 | "4": "action", 70 | "5": "object", 71 | "6": "action" 72 | }, 73 | "level_defaults": { 74 | "0": 99, 75 | "1": 0, 76 | "2": 99, 77 | "3": 0, 78 | "4": 99, 79 | "5": 99, 80 | "6": 0 81 | }, 82 | "camera_dependencies":{ 83 | "face": [1,2], 84 | "body": [5,6], 85 | "hands": [3,4] 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /annotation-tool/config_statics.json: -------------------------------------------------------------------------------- 1 | { 2 | "static_dict": { 3 | "0": { 4 | "name": "age", 5 | "text": "Subject Age", 6 | "type": "num", 7 | "parent": { 8 | "element": "object", 9 | "type": "driver" 10 | } 11 | }, 12 | "1": { 13 | "name": "gender", 14 | "text": "Subject Gender", 15 | "type": "text", 16 | "options": { 17 | "0": "Male", 18 | "1": "Female" 19 | }, 20 | "parent": { 21 | "element": "object", 22 | "type": "driver" 23 | } 24 | }, 25 | "2": { 26 | "name": "glasses", 27 | "text": "Is wearing glasses?", 28 | "type": "boolean", 29 | "options": { 30 | "0": "No", 31 | "1": "Yes" 32 | }, 33 | "parent": { 34 | "element": "object", 35 | "type": "driver" 36 | } 37 | }, 38 | "3": { 39 | "name": "drive_freq", 40 | "text": "Driving Frecuency", 41 | "type": "text", 42 | "options": { 43 | "0": "Once a week or less", 44 | "1": "Between 2 or 5 times a week", 45 | "2": "Everyday" 46 | }, 47 | "parent": { 48 | "element": "object", 49 | "type": "driver" 50 | } 51 | }, 52 | "4": { 53 | "name": "experience", 54 | "text": "Driving Experience", 55 | "type": "text", 56 | "options": { 57 | "0": "Less than 1 year", 58 | "1": "Between 1 and 3 years", 59 | "2": "More than 3 years" 60 | }, 61 | "parent": { 62 | "element": "object", 63 | "type": "driver" 64 | } 65 | }, 66 | "5": { 67 | "name": "weather", 68 | "text": "Weather", 69 | "type": "text", 70 | "options": { 71 | "0": "Sunny", 72 | "1": "Rainy", 73 | "2": "Cloudy" 74 | }, 75 | "parent": { 76 | "element": "context", 77 | "type": "recording_context" 78 | } 79 | }, 80 | "6": { 81 | "name": "setup", 82 | "text": "Setup", 83 | "type": "text", 84 | "options": { 85 | "0": "Car Moving", 86 | "1": "Car Stopped", 87 | "2": "Simulator" 88 | }, 89 | "parent": { 90 | "element": "context", 91 | "type": "recording_context" 92 | } 93 | }, 94 | "7": { 95 | "name": "annotatorID", 96 | "text": "Annotator ID", 97 | "type": "num", 98 | "parent": { 99 | "element": "metadata", 100 | "type": "annotator" 101 | } 102 | } 103 | } 104 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Driver Monitoring Dataset (DMD) 2 | The [Driver Monitoring Dataset](http://dmd.vicomtech.org/) is the largest visual dataset for real driving actions, with footage from synchronized multiple cameras (body, face, hands) and multiple streams (RGB, Depth, IR) recorded in two scenarios (real car, driving simulator). Different annotated labels related to distraction, fatigue and gaze-head pose can be used to train Deep Learning models for Driver Monitor Systems. 3 | 4 | This project includes a tool to annotate the dataset, inspect the annotated data and export training sets. Output annotations are formatted using [OpenLABEL](https://www.asam.net/standards/detail/openlabel/) language [VCD (Video Content Description)](https://vcd.vicomtech.org/). 5 | 6 | ## Dataset details 7 | More details of the recording and video material of DMD can be found at the [official website](http://dmd.vicomtech.org/) 8 | 9 | In addition, this repository [wiki](https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/wiki) has useful information about the DMD dataset and the annotation process. 10 | 11 | ## Available tools: 12 | - Temporal Annotation Tool (TaTo) - (more info [here](annotation-tool/README.md)) 13 | - Dataset Explorer Tool (DEx) - (more info [here](exploreMaterial-tool/README.md)) 14 | ### Annotation Instructions 15 | Depending the annotation problem, different annotation criteria should be defined to guarantee all the annotators produce the same output annotations. 16 | 17 | We have defined the following criteria to be used with tool to produce consistent annotations: 18 | 19 | - [DMD Distraction-related actions](https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/wiki/DMD-distraction-related-action-annotation-criteria) annotation 20 | 21 | ## Known Issues 22 | - The version of OpenLABEL in the annotation files (OpenLabel) and in the tools in this repository has been updated to VCD>=5.0. Make sure you download the annotations files again and update the tools. 23 | - There was an error when uploading IR videos. They have to be .mp4 format, and they were uploaded as .avi. This is fixed now but requires the user to download them again. 24 | 25 | ## Credits 26 | Development of DMD was supported and funded by the European Commission (EC) Horizon 2020 programme (project [VI-DAS](http://www.vi-das.eu/), grant agreement 690772) 27 | 28 | Developed with :blue_heart: by: 29 | 30 | * Paola Cañas (pncanas@vicomtech.org) 31 | * Juan Diego Ortega (jdortega@vicomtech.org) 32 | 33 | Contributions of ideas and comments: Marcos Nieto, Mikel Garcia, Gonzalo Pierola, Itziar Sagastiberri, Itziar Urbieta, Eneritz Etxaniz, Orti Senderos. 34 | 35 | ## License 36 | Copyright :copyright: 2024 Vicomtech 37 | 38 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 39 | 40 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 41 | 42 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 43 | -------------------------------------------------------------------------------- /exploreMaterial-tool/statistics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | from pathlib import Path # To handle paths independent of OS 4 | 5 | # Import local class to parse OpenLABEL content 6 | from vcd4reader import VcdHandler 7 | 8 | #Written by Paola Cañas with <3 9 | 10 | #Script to get statistics of the data (# of frames per class and total # of frames) 11 | class get_statistics(): 12 | 13 | def __init__(self, vcdFile, destinationFile): 14 | 15 | self.vcdFile = vcdFile 16 | self.vcd_handler = VcdHandler(vcd_file=Path(self.vcdFile)) 17 | 18 | self.actionPath = destinationFile.replace(".txt","-actions.txt") 19 | self.framesPath = destinationFile.replace(".txt","-frames.txt") 20 | 21 | # @self.actionList: ["driver_actions/safe_drive", "gaze_on_road/looking_road",.. , ..] 22 | self.actionList = self.vcd_handler.get_action_type_list() 23 | #Get object list 24 | self.objectList = self.vcd_handler.get_object_type_list() 25 | #from 1 to not get "driver" object 26 | for object in self.objectList: 27 | # Append objects to actionList 28 | if "driver" in object: 29 | #Dont add driver object 30 | continue 31 | self.actionList.append("objects_in_scene/"+object) 32 | 33 | self.countActions() 34 | self.countFrames() 35 | 36 | def countActions(self): 37 | string_txt = [] 38 | 39 | if os.path.exists(self.actionPath): 40 | with open(self.actionPath, "r") as f: 41 | lines = f.readlines() 42 | for line in lines: 43 | string_txt.append(line.split(":")) 44 | 45 | #Delete to avoid redundancy 46 | os.remove(self.actionPath) 47 | 48 | for annotation in self.actionList: 49 | sum = 0 50 | # Check if annotation is an object or an action 51 | if "object" in annotation: 52 | # get object intervals from OpenLABEL 53 | fullIntervals = self.vcd_handler.get_frames_intervals_of_object(annotation) 54 | else: 55 | # get action intervals from OpenLABEL 56 | fullIntervals = self.vcd_handler.get_frames_intervals_of_action(annotation) 57 | 58 | #sum all frames in intervals 59 | for interval in fullIntervals: 60 | sum = sum + int(interval["frame_end"]) - int(interval["frame_start"]) 61 | 62 | found = False 63 | #replace the sum for the new one if annotation found in txt 64 | for num, line in enumerate(string_txt): 65 | if annotation in line[0]: 66 | found = True 67 | string_txt[num][1] = str(sum+int(string_txt[num][1]))+"\n" 68 | 69 | #if not found, add the sum 70 | if not found: 71 | string_txt.append([annotation,str(sum)+"\n"]) 72 | 73 | #write 74 | file = open(self.actionPath, "a+") 75 | for line in string_txt: 76 | file.write(line[0]+":"+line[1]) 77 | file.close() 78 | 79 | def countFrames(self): 80 | sum = 0 81 | if os.path.exists(self.framesPath): 82 | with open(self.framesPath, "r") as f: 83 | lines = f.read() 84 | sum = int(lines.split(":")[1]) 85 | 86 | #Delete to avoid redundancy 87 | os.remove(self.framesPath) 88 | 89 | sum = sum + self.vcd_handler.get_frames_number() 90 | 91 | #write 92 | file = open(self.framesPath, "a+") 93 | file.write("total_frames"+":"+str(sum)) 94 | file.close() -------------------------------------------------------------------------------- /annotation-tool/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "tatoConfig": { 3 | "annotation_mode": "distraction", 4 | "dataset": "dmd", 5 | "pre_annotate": 0, 6 | "calculate_time":0 7 | }, 8 | "interfaceText": { 9 | "mainLevelDependency":{ 10 | "distraction":"depending on Driver Actions level!", 11 | "drowsiness":"depending on Eye State (Right) level!" 12 | }, 13 | "levelCompletedToAnnotate":{ 14 | "distraction": "Only do this when you have completed the Driver_Actions Annotations", 15 | "drowsiness": "Only do this when you have completed the Eye State (Right) Annotations" 16 | } 17 | }, 18 | "consoleText": { 19 | "video_path_dmd": { 20 | "True": "PATH of the video (/dmd/.../_.._mosaic.avi): ", 21 | "False": "PATH of the video: " 22 | } 23 | }, 24 | "dimensions":{ 25 | "total-width": 1280, 26 | "total-height":720 27 | 28 | }, 29 | "colors": { 30 | "textColorMain": [ 31 | 60, 32 | 60, 33 | 60 34 | ], 35 | "textColorLabels": [ 36 | 255, 37 | 255, 38 | 255 39 | ], 40 | "textColorInstructions": [ 41 | 60, 42 | 60, 43 | 60 44 | ], 45 | "backgroundColorMain": 255, 46 | "backgroundColorLabels": 40, 47 | "backgroundColorInstructions": 230, 48 | "keyFrameColor": [ 49 | 131, 50 | 255, 51 | 167 52 | ], 53 | "colorDict": { 54 | "0": [ 55 | 223, 56 | 215, 57 | 195 58 | ], 59 | "1": [ 60 | 105, 61 | 237, 62 | 249 63 | ], 64 | "2": [ 65 | 201, 66 | 193, 67 | 63 68 | ], 69 | "3": [ 70 | 6, 71 | 214, 72 | 160 73 | ], 74 | "4": [ 75 | 233, 76 | 187, 77 | 202 78 | ], 79 | "5": [ 80 | 133, 81 | 81, 82 | 252 83 | ], 84 | "6": [ 85 | 0, 86 | 154, 87 | 255 88 | ], 89 | "7": [ 90 | 181, 91 | 107, 92 | 69 93 | ], 94 | "8": [ 95 | 137, 96 | 171, 97 | 31 98 | ], 99 | "9": [ 100 | 224, 101 | 119, 102 | 125 103 | ], 104 | "10": [ 105 | 153, 106 | 153, 107 | 255 108 | ], 109 | "11": [ 110 | 83, 111 | 73, 112 | 193 113 | ], 114 | "12": [ 115 | 107, 116 | 79, 117 | 54 118 | ], 119 | "13": [ 120 | 106, 121 | 107, 122 | 131 123 | ], 124 | "99": [ 125 | 245, 126 | 245, 127 | 245 128 | ], 129 | "100": [ 130 | 80, 131 | 80, 132 | 80 133 | ], 134 | "val_0": [ 135 | 245, 136 | 245, 137 | 245 138 | ], 139 | "val_1": [ 140 | 223, 141 | 187, 142 | 185 143 | ], 144 | "val_2": [ 145 | 250, 146 | 210, 147 | 170 148 | ] 149 | } 150 | } 151 | } -------------------------------------------------------------------------------- /exploreMaterial-tool/group_split_material.py: -------------------------------------------------------------------------------- 1 | import random 2 | import shutil 3 | import os 4 | import glob 5 | import sys 6 | from pathlib import Path 7 | # Written by Paola Cañas with <3 8 | 9 | # groupClass(): group dataset by classes ("radio", "drinking"...) 10 | # Dataset in folder must be organized by sessions. (s1,s2,s3...) 11 | 12 | class groupClass(): 13 | 14 | def __init__(self,materialPath): 15 | 16 | self.materialPath = materialPath 17 | #e.g /mymaterialpath/dmd_rgb/ 18 | if not Path(self.materialPath).exists(): 19 | raise RuntimeError("Material path does not exist") 20 | 21 | #list all sessions folders 22 | session_paths = glob.glob(self.materialPath + '/*') 23 | session_paths.sort() 24 | 25 | for session in session_paths: 26 | # e.g /mymaterialpath/dmd_rgb/s1/ 27 | #For each session folder, list all classes folders 28 | class_paths = glob.glob(session + '/*') 29 | class_paths.sort() 30 | 31 | for classF in class_paths: 32 | #e.g /mymaterialpath/dmd_rgb/s1/driver_actions 33 | # or /mymaterialpath/dmd_rgb/s1/safe_drive 34 | subClass = glob.glob(classF + '/*') 35 | subClass.sort() 36 | dir = Path(subClass[0]).is_dir() 37 | print("dir",dir) 38 | #If theres a level more of folers 39 | if dir: 40 | for subClassF in subClass: 41 | #e.g /mymaterialpath/dmd_rgb/s1/driver_actions/safe_drive 42 | class_name = Path(classF).name 43 | name = Path(subClassF).name 44 | dest = Path(self.materialPath+"/"+class_name+"/"+name) 45 | #e.g /mymaterialpath/dmd_rgb/driver_actions/safe_drive 46 | os.makedirs(str(dest), exist_ok=True) 47 | print("Moving",name, "to", dest) 48 | shutil.copytree(subClassF, str(dest),dirs_exist_ok=True) 49 | 50 | else: 51 | #For each class folder, get the name and make a folder in destination 52 | name = Path(classF).name 53 | dest = Path(self.materialPath+"/"+name) 54 | #e.g /mymaterialpath/dmd_rgb/safe_drive 55 | os.makedirs(str(dest), exist_ok=True) 56 | print("Moving",name) 57 | shutil.copytree(classF, str(dest),dirs_exist_ok=True) 58 | 59 | #Delete session folder 60 | shutil.rmtree(session) 61 | 62 | 63 | 64 | # splitClass(): split dataset into train and test splits. 65 | # Dataset must be organized by classes. A folder containing each class material. ("radio","drinking"...) 66 | class splitClass(): 67 | 68 | def __init__(self,materialPath,destination,testPercent): 69 | 70 | #@self.materialPath: Path of dmd dataset. (inside must be classes folders) 71 | #@self.destination: Path of where the dataset will be splitted in train and test folders. 72 | #@self.testPercent: Portion of desired material for test split. (e.g. 0.20) 73 | self.materialPath = materialPath 74 | self.destination = destination 75 | self.testPercent = float(testPercent) 76 | if not Path(self.materialPath).exists(): 77 | raise RuntimeError("Material path does not exist") 78 | 79 | if self.testPercent>1.0 or self.testPercent<=0: 80 | raise RuntimeError("Invalid percent for test split. Must be a number 0.0> and <1.0") 81 | 82 | #Create train and test folders in destination 83 | os.makedirs( self.destination + "/train", exist_ok=True) 84 | os.makedirs( self.destination + "/test", exist_ok=True) 85 | 86 | #List all classes folders 87 | label_paths = glob.glob(self.materialPath + '/*') 88 | label_paths.sort() 89 | print("folders: ",label_paths) 90 | for count,cl in enumerate(label_paths): 91 | #For each class folder, list all files 92 | files = glob.glob(str(cl) + '/*') 93 | #split file list in two by @testPercent 94 | train, test = self.partitionFiles(files) 95 | print("Moving ", len(files), " files: ",len(train)," for training and ",len(test)," for testing.") 96 | 97 | #Create class folder in train and test folders 98 | os.makedirs( self.destination + "/train/"+ str(count)+"/", exist_ok=True) 99 | os.makedirs(self.destination + "/test/" + str(count) + "/", exist_ok=True) 100 | 101 | #Move files from each partition to their correspondant folder 102 | for f in train: 103 | shutil.move(f, self.destination + "/train/" + str(count)+"/") 104 | for f in test: 105 | shutil.move(f, self.destination + "/test/" + str(count)+"/") 106 | 107 | def partitionFiles(self,files_list): 108 | #Calculate number of files for test partition 109 | howManyNumbers = int(round(self.testPercent * len(files_list))) 110 | shuffled = files_list[:] 111 | random.seed(123) 112 | #shuffle list of files 113 | random.shuffle(shuffled) 114 | #return partitions 115 | return shuffled[howManyNumbers:], shuffled[:howManyNumbers] -------------------------------------------------------------------------------- /exploreMaterial-tool/README.md: -------------------------------------------------------------------------------- 1 | # Dataset Explorer Tool (DEx) 2 | The DMD annotations come in [OpenLABEL](https://www.asam.net/standards/detail/openlabel/) format [link vicomtech](https://vcd.vicomtech.org/), which is compatible with the ASAM OpenLABEL annotation standard. 3 | This language is defined with JSON schemas and supports different types of annotations, being ideal for describing any kind of scenes. 4 | The DMD has spatial and temporal annotations (e.g. Bounding boxes and time intervals), also context and static annotations (e.g. Driver’s and environmental info); with OpenLABEL, it is possible to have all these annotations in one file. VCD is also an API, you can use the library to create and update OpenLABEL files. 5 | 6 | We have developed DEx tool to help access those annotations in the OpenLABEL easily. The main functionality of DEx at the moment is to access the OpenLABEL’s, read annotations and prepare DMD material for training. 7 | 8 | ## Content 9 | - [Dataset Explorer Tool (DEx)](#dataset-explorer-tool-dex) 10 | - [Content](#content) 11 | - [DEx characteristics](#dex-characteristics) 12 | - [Usage Instructions](#usage-instructions) 13 | - [DEx initialization](#dex-initialization) 14 | - [DEx export configuration](#dex-export-configuration) 15 | - [Changelog](#changelog) 16 | 17 | ## Setup and Launching 18 | DEx tool has been tested using the following system configuration: 19 | 20 | **OS:** Ubuntu 18.04, Windows 10
21 | **Dependencies:** Python 3.8, OpenCV-Python 4.2.0, VCD 6.0, FFMPEG and [ffmpeg-python](https://github.com/kkroening/ffmpeg-python) 22 | 23 | For a detailed description on how to configure the environment and launch the tool, check: [Linux](../docs/setup_linux.md) / [Windows](../docs/setup_windows.md) 24 | 25 | ## DEx characteristics 26 | TaTo is a python-based tool to access OpenLABEL annotations more easily. You can prepare the DMD material for training by using DEx. The main functionalities of DEx are: exporting material in images or videos by frame intervals from the annotations, group the resulting material into folders organized by classes (only available for DMD) and after the material is organized by classes, the tool can generate a training and a testing split. 27 | 28 | - Get a **list of frame intervals** of a specific activity (or label) from OpenLABEL. 29 | - Take a list of frame intervals and **divide** them into **subintervals** of desired size. This can be done starting from the first frame of from the last frame and back. 30 | - **Export** those frame intervals as **video clips** or **images**. The material can be exported from the 3 camera perspectives videos (only available for DMD). You can also export images or videos in any size, like 224x224. 31 | - **Export** intervals from **IR**, **RGB** or **DEPTH** material. Each material type will be in a different folder: dmd-ir, dmd-rgb, dmd-depth. 32 | - You can choose what material to export: a group's material, a session material or just the material from a specific OpenLABEL annotation. 33 | - If you are working with the DMD, the exported material will be organized in a similar way as the DMD structure: by groups, sessions and subjects. With DEx, you can **group** this material by **classes**. This is only possible with DMD material. 34 | - After you have the data organized by classes, you can **split** the material into a **training** and a **testing** split. You must provide the testing **ratio or proportion** (e.g: 0.20, 0.25). If the testing ratio is 0.20, the result is a folder named “train” with 80% of the data and a folder named “test” with the 20% of the data. 35 | - Get **statistics** of data. This means, get the number of frames per class and the total number of frames from data of a group, session or a single OpenLABEL. 36 | 37 | ## Usage Instructions 38 | ### DEx initialization 39 | You can initialize the tool by executing the python script [DExTool.py](./DExTool.py). This script will guide you to prepare the DMD material. 40 | 41 | If you need something more specific, you can direclty implement functions from [accessDMDAnn.py](./accessDMDAnn.py), [vcd4reader.py](./vcd4reader.py), [group_split_material.py](./group_split_material.py). 42 | 43 | ### DEx export configuration 44 | There are some export settings you can change at the __init()__ function of file [accessDMDAnn.py](./accessDMDAnn.py) under “-- CONTROL VARIABLES --“ comment. 45 | - To define the **data format** you wish to export, add “image” and/or “video” to **@material** variable as a list. 46 | - The list of **camera perspectives** to export material from can be defined in **@streams** variable, these are: "face", "body" or "hands" camera. If is a video from other dataset, it must be "general" 47 | - To choose the channel of information, **RGB**, **IR** or **DEPTH**, you must specify it with the **@channels** variable. You can define a list of channesl: ["ir","rgb","depth"]. For videos from other datasets, it must be only ["rgb"]. 48 | - You can choose the final image/video **size**. Set it as "original" or a tuple with a smaller size than the original (width, height). e.g.(224,224). 49 | - You can make a list of the **classes** you want to get the frame intervals of (e.g. [“safe_drive”,"drinking"]) and assing it to the **@annotations** variable. Objects (cellphone, hair comb and bottle) have to be with the 'object_in_scene/__' label before. The var @self.actionList will get all the classes available in OpenLABEL 50 | - If you want to export and create/write material in a **destination folder**, you must set **@write** variable to True. 51 | - If you wish to **cut** the frame intervals to subintervals, the **size** of the final subintervals can be set in **@intervalChunk** variable. 52 | - Sometimes not all frame intervals can be cutted because they are smaller than the @intervalChunk. To **ignore** and not export these **smaller frame intervals**, set **@ignoreSmall** to True 53 | - To decide where to start cutting the frame intervals, change the **@asc** variable. True to start from the **first frame** and False to start from the **last frame** and go backwards. 54 | 55 | You can read more details about depth data and how to export it on the [DMD-Depth-Material](https://github.com/Vicomtech/DMD-Driver-Monitoring-Dataset/wiki/DMD-Depth-Material) page of the wiki. 56 | 57 | ## Changelog 58 | For a complete list of changes check the [CHANGELOG.md](../CHANGELOG.md) file 59 | 60 | :warning: If you find any bug with the tool or have ideas of new features please open a new issue using the [bug report template](../docs/issue_bug_template.md) or the [feature request template](../docs/issue_feature_template.md) :warning: 61 | -------------------------------------------------------------------------------- /exploreMaterial-tool/DExTool.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Created by Paola Cañas with <3 3 | import glob 4 | import os 5 | import re 6 | from pathlib import Path 7 | from accessDMDAnn import exportClass 8 | from group_split_material import splitClass, groupClass 9 | from statistics import get_statistics 10 | 11 | print("Welcome :)") 12 | opt = int(input("What do you whish to do?: export material for training:[0] group exported material by classes:[1] create train and test split:[2] get statistics:[3] : ")) 13 | 14 | if opt == 0: 15 | # export material for training 16 | print("To change export settings go to config_DEx.json and change control variables.") 17 | destination_path = input("Enter destination path: ") 18 | selec = input("How do you want to read annotations, by: Group:[g] Sessions:[f] One OpenLABEL:[v] : ") 19 | 20 | if selec == "g": 21 | #By group 22 | folder_path = input("Enter DMD group's path (../dmd/g#): ") 23 | #e.g /home/pncanas/Desktop/consumption/dmd/gA 24 | selec_session = input("Enter the session you wish to export in this group: all:[0] S1:[1] S2:[2] S3[3] S4[4] S5[5] S6[6] : ") 25 | 26 | subject_paths = glob.glob(folder_path + '/*') 27 | subject_paths.sort() 28 | 29 | for subject in subject_paths: 30 | print(subject) 31 | session_path = glob.glob(subject + '/*') 32 | session_path.sort() 33 | 34 | for session in session_path: 35 | if "s"+str(selec_session) in session or selec_session == "0": 36 | print(session) 37 | annotation_paths = glob.glob(session + '/*.json') 38 | annotation_paths.sort() 39 | 40 | for annotation in annotation_paths: 41 | print(annotation) 42 | dmd_folder=Path(annotation).parents[3] 43 | 44 | exportClass(annotation,str(dmd_folder),destination_path) 45 | 46 | print("Oki :) ----------------------------------------") 47 | 48 | elif selec == "f": 49 | #By session 50 | folder_path = input("Enter root dmd folder path(../dmd): ") 51 | #e.g /home/pncanas/Desktop/dmd/ 52 | selec_session = input("Enter the session you wish to export in this group: all:[0] S1:[1] S2:[2] S3[3] S4[4] S5[5] S6[6] : ") 53 | 54 | group_paths = glob.glob(folder_path + '/*') 55 | group_paths.sort() 56 | 57 | for group in group_paths: 58 | print(group) 59 | subject_paths = glob.glob(group + '/*') 60 | subject_paths.sort() 61 | 62 | for subject in subject_paths: 63 | print(subject) 64 | session_path = glob.glob(subject + '/*') 65 | session_path.sort() 66 | 67 | for session in session_path: 68 | if "s"+str(selec_session) in session or selec_session == "0": 69 | print(session) 70 | annotation_paths = glob.glob(session + '/*.json') 71 | annotation_paths.sort() 72 | 73 | for annotation in annotation_paths: 74 | print(annotation) 75 | dmd_folder=Path(annotation).parents[3] 76 | 77 | exportClass(annotation,str(dmd_folder),destination_path) 78 | 79 | print("Oki :) ----------------------------------------") 80 | 81 | elif selec == "v": 82 | 83 | vcd_path = input("Paste the OpenLABEL file path (..._ann.json): ") 84 | # e.g: /Desktop/consumption/dmd/gA/1/s2/gA_1_s2_2019-03-08T09;21;03+01;00_rgb_ann.json 85 | regex_internal = '(?P[1-9]|[1-2][0-9]|[3][0-7])_(?P[a-z]{1,})_'\ 86 | '(?P(?P0[1-9]|1[012])-(?P0[1-9]|[12][0-9]|3[01]))' 87 | regex_external = '(?Pg[A-z]{1,})_(?P[1-9]|[1-2][0-9]|[3][0-7])_'\ 88 | '(?Ps[1-9]{1,})_(?P(?P(?P\d{4})-(?P0[1-9]|1[012])-'\ 89 | '(?P0[1-9]|[12][0-9]|3[01]))T(?P