├── ASLCoordinateDictionary.py ├── LICENSE ├── README.md ├── app.py ├── recordingTest.mp3 ├── static ├── css │ └── styles.css ├── img │ ├── devpost-icon.svg │ ├── favicon.ico │ └── github-icon.svg ├── js │ ├── main.js │ └── postHandler.js └── json │ └── reference.json ├── templates └── index.html ├── wildfireNews.mp3 └── word videos ├── about.mp4 ├── all.mp4 ├── back.mp4 ├── been.mp4 ├── canadian.mp4 ├── country.mp4 ├── from.mp4 ├── getting.mp4 ├── here.mp4 ├── is.mp4 ├── morning.mp4 ├── much.mp4 ├── right.mp4 ├── smoke.mp4 ├── smother.mp4 ├── so.mp4 ├── still.mp4 ├── talk.mp4 ├── that.mp4 ├── the.mp4 ├── this.mp4 ├── to.mp4 ├── we.mp4 ├── weather.mp4 └── wildfire.mp4 /ASLCoordinateDictionary.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | import cv2 3 | import numpy as np 4 | import os 5 | import json 6 | 7 | mp_drawing = mp.solutions.drawing_utils 8 | mp_hands = mp.solutions.hands 9 | 10 | # Folder containing video files 11 | video_folder = 'word videos/' 12 | 13 | # Open JSON file for writing 14 | json_file = open('reference.json', 'w') 15 | 16 | with mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands: 17 | # Create a dictionary to store the hand coordinates 18 | data = {} 19 | 20 | # Iterate over video files in the folder 21 | for idx, filename in enumerate(os.listdir(video_folder)): 22 | if filename.endswith(".mp4"): 23 | video_file = os.path.join(video_folder, filename) 24 | 25 | # Extract the file name without extension 26 | file_name = filename.split('.')[0] 27 | data[file_name] = [] 28 | 29 | # Open video file 30 | cap = cv2.VideoCapture(video_file) 31 | 32 | frame_number = 0 33 | 34 | while cap.isOpened(): 35 | ret, frame = cap.read() 36 | 37 | if not ret: 38 | break 39 | 40 | # Resize the frame to 800x750 41 | frame = cv2.resize(frame, (800, 750)) 42 | 43 | # Convert the frame to RGB 44 | image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 45 | 46 | # Detections 47 | results = hands.process(image_rgb) 48 | 49 | # Check if hand landmarks are detected 50 | if results.multi_hand_landmarks: 51 | # Initialize hand coordinates 52 | hand_coordinates = [] 53 | 54 | for hand_landmarks in results.multi_hand_landmarks: 55 | hand = hand_landmarks.landmark 56 | 57 | # Determine handedness based on landmark positions 58 | if hand[mp_hands.HandLandmark.WRIST].x < hand[mp_hands.HandLandmark.THUMB_CMC].x: 59 | handedness = "Left" 60 | else: 61 | handedness = "Right" 62 | 63 | # Store coordinates and joint index in the hand coordinates list 64 | for joint_id, landmark in enumerate(hand): 65 | x, y, z = landmark.x, landmark.y, landmark.z 66 | joint_data = { 67 | "Joint Index": joint_id, 68 | "Coordinates": [x, y, z] 69 | } 70 | hand_coordinates.append(joint_data) 71 | 72 | # Draw landmarks on the frame 73 | mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS, 74 | mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2, 75 | circle_radius=4), 76 | mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2, 77 | circle_radius=2)) 78 | 79 | # Add frame data to the dictionary 80 | data[file_name].append({ 81 | "Frame": frame_number, 82 | "Left Hand Coordinates": hand_coordinates if handedness == "Left" else [], 83 | "Right Hand Coordinates": hand_coordinates if handedness == "Right" else [] 84 | }) 85 | 86 | # Increment the frame number 87 | frame_number += 1 88 | 89 | # Display the video frame with landmarks 90 | cv2.imshow('Video', frame) 91 | if cv2.waitKey(1) & 0xFF == ord('q'): 92 | break 93 | 94 | cap.release() 95 | 96 | # Print the current word during each loop iteration 97 | print(f"Processing video {idx + 1} of {len(os.listdir(video_folder))}: {file_name}") 98 | 99 | # Serialize the data dictionary to JSON and write it to the file 100 | json.dump(data, json_file) 101 | 102 | # Close the JSON file 103 | json_file.close() 104 | 105 | # Open the JSON file for reading 106 | json_file = open('reference.json', 'r') 107 | data = json.load(json_file) 108 | json_file.close() 109 | 110 | # Check for large gaps between frames and interpolate 111 | for word in data: 112 | frames = data[word] 113 | num_frames = len(frames) 114 | 115 | if num_frames > 1: 116 | interpolated_frames = [] 117 | 118 | for i in range(num_frames - 1): 119 | current_frame = frames[i] 120 | next_frame = frames[i + 1] 121 | 122 | if next_frame["Frame"] - current_frame["Frame"] > 1: 123 | # Compute the gap between frames 124 | gap = next_frame["Frame"] - current_frame["Frame"] 125 | 126 | # Interpolate hand coordinates for the gap frames 127 | for j in range(1, gap): 128 | interpolation_ratio = j / gap 129 | 130 | interpolated_coordinates = [] 131 | 132 | # Interpolate each joint's coordinates 133 | for joint_data in current_frame["Left Hand Coordinates"]: 134 | current_coordinates = joint_data["Coordinates"] 135 | next_coordinates = next_frame["Left Hand Coordinates"][joint_data["Joint Index"]]["Coordinates"] 136 | 137 | interpolated_coordinates.append({ 138 | "Joint Index": joint_data["Joint Index"], 139 | "Coordinates": [ 140 | current_coordinates[0] + (next_coordinates[0] - current_coordinates[0]) * interpolation_ratio, 141 | current_coordinates[1] + (next_coordinates[1] - current_coordinates[1]) * interpolation_ratio, 142 | current_coordinates[2] + (next_coordinates[2] - current_coordinates[2]) * interpolation_ratio 143 | ] 144 | }) 145 | 146 | # Add interpolated frame to the list 147 | interpolated_frames.append({ 148 | "Frame": current_frame["Frame"] + j, 149 | "Left Hand Coordinates": interpolated_coordinates, 150 | "Right Hand Coordinates": [] 151 | }) 152 | 153 | # Append the interpolated frames to the original frames 154 | frames.extend(interpolated_frames) 155 | 156 | # Serialize the updated data dictionary to JSON and write it to the file 157 | json_file = open('reference.json', 'w') 158 | json.dump(data, json_file) 159 | json_file.close() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 tan-ad 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SignWave 2 | 3 | [![License](https://img.shields.io/badge/license-MIT-green)](LICENSE.md) 4 | ![GitHub repo size](https://img.shields.io/github/repo-size/tan-ad/SignWave) 5 | ![JamHacks7](https://img.shields.io/badge/event-JAMHacks%207-blueviolet) 6 | 7 | An easy-to-use program that transcribes text or audio files into a sign language animation. 8 | 9 | ![image](https://github.com/tan-ad/SignWave/assets/42822671/d652e45e-f72d-475f-bb8c-a04ea8631dc6) 10 | 11 | ## Inspiration 12 | 13 | Given how much society has advanced technologically, the fact that there still isn't enough attention given to making communication more accessible for the deaf community is inexcusable. One of our teammates spoke of his first-hand experience with this issue, as his grandfather is a deaf individual who communicates primarily through sign language and visual cues. That's when we had the idea of automating translation to sign language, similar to closed captions on videos. As a result, we have created SignWave, an accessible and convenient translator from English to American Sign Language (ASL). 14 | 15 | ## What it does and how it can be used 16 | 17 | SignWave can take two types of input: audio files and text. If an audio file is given, SignWave produces a transcript of the words spoken in the audio file. If a text input is given and presents an animation of the transcript in sign language and continues to the next step. When a text input is given, the process is bypassed and the program produces an animation of the text in sign language. While useful as a sign language equivalent of closed captions, SignWave also extends as an educational tool. Those looking to learn sign language can use SignWave to teach themselves how to sign various phrases using both the speech-to-sign and text-to-sign functionalities. 18 | 19 | 20 | ## Installation 21 | Requirements: 22 | * System: Python (3.11.4) 23 | * Speech-to-text: PyTorch (2.0.1), ffmpeg (6.0), openai-whisper (beta) 24 | * Animation: NumPy (1.21.0), mediapipe (0.8.9.1) 25 | * Misc: Flask (2.3.2), Levenshtein (0.2.1), regex (2023.6.3), opencv-python (4.5.5.61) 26 | 27 | Instructions: 28 | 1. Clone the repository 29 | ``` 30 | $ git clone https://github.com/tan-ad/SignWave.git 31 | ``` 32 | 3. Set up Python virtual environment 33 | ``` 34 | $ python -m venv myenv 35 | ``` 36 | 4. Install required dependencies 37 | 5. Run the ```app.py``` file 38 | ``` 39 | $ python app.py 40 | ``` 41 | 42 | ## How we built it 43 | 44 | Our program has three main steps: 45 | * Convert audio to text (when converting text-to-sign, this is skipped) 46 | * Find what movement corresponds to each word 47 | * Animate the movement 48 | 49 | We used OpenAI's [Whisper API](https://openai.com/research/whisper) to recognize and convert speech to text. Once in text form, we used [ASL Sign Language Dictionary](https://www.handspeak.com/word/) to collect video demonstrations of various words in sign language. For each video, we tracked the hand joints using [MediaPipe Hand Landmarker](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker), which gave us the 3D coordinate of each hand joint at each frame of the video. We then created a dictionary, mapping each word to a multidimensional array of coordinates. This then allows us to use three.js to animate the hands as a set of points and edges. Finally, we wrapped it all together into a pleasant and usable interface using HTML, JS, and CSS. 50 | 51 | ## Challenges we ran into 52 | 53 | * The team was almost completely new to Git, so we had to learn to use Git commands, such as add, commit, push, and pull from scratch 54 | * Semantics: Not having the exact translation of every word in the ASL dictionary 55 | * Creating a model that uses both right and left hand, especially when their animations overlap 56 | * Making User Interface design smooth, accommodating both text and audio file inputs 57 | 58 | ## Accomplishments that we're proud of 59 | 60 | * Creating a 2D model that accurately maps the movements of both hands 61 | * Transcribing a .mp3 file into .txt, then mapping it to our dictionary of ASL videos 62 | * Accommodating both audio and text input 63 | * Creating a clean and easy-to-use UI 64 | 65 | ## What we learned 66 | 67 | * How to use OpenAI's [Whisper API](https://openai.com/research/whisper) to convert speech to text 68 | * Using Python scripts to convert the .txt file into a list of unique strings 69 | * Using Google's [MediaPipe Hand Landmarker](https://www.handspeak.com/word/) to retried the coordinates of each hand 70 | * Using the [ASL Dictionary](https://www.handspeak.com/word/) to map each word to an array of coordinates 71 | * Using [three.js](https://threejs.org/) to animate the set of points 72 | * Using HTML, CSS, JS, and Git to create a website and repository 73 | 74 | ## What's next for SignWave 75 | * Adding sliders to allow users to control animation speed 76 | * Creating a model with more humanoid hands 77 | * Implementing a reverse translation function of ASL to English by using computer recognition and Machine Learning 78 | * Expanding it into commercial uses (eg. transcribing university lectures for deaf or hard-of-hearing students, captioning for TV shows, etc) 79 | * We're excited to see where SignWave can go in the future 👋 80 | 81 | ## Contact us 82 | * Andrew Gordienko (gordienko.adg@gmail.com) 83 | * Julian Zhang (juleoan123@gmail.com) 84 | * Rally Lin (linrally@gmail.com) 85 | * Adrian Tang (tanad@utschools.ca) 86 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask,render_template, request, jsonify 2 | import numpy as np 3 | from subprocess import CalledProcessError, run 4 | import whisper 5 | 6 | #libraries for text modification 7 | from Levenshtein import ratio 8 | import re 9 | import json 10 | 11 | app = Flask(__name__, template_folder='templates') 12 | 13 | model = whisper.load_model('base') 14 | 15 | SAMPLE_RATE = 16000 16 | def custom_load_audio(byte_data: bytes, sr=SAMPLE_RATE): #converts byte data to what whisper can use (adapted from https://github.com/openai/whisper/blob/main/whisper/audio.py) 17 | cmd = [ 18 | "ffmpeg", 19 | "-nostdin", 20 | "-threads", "0", 21 | "-i", "-", 22 | "-f", "s16le", 23 | "-ac", "1", 24 | "-acodec", "pcm_s16le", 25 | "-ar", str(sr), 26 | "-" 27 | ] 28 | try: 29 | out = run(cmd, input=byte_data, capture_output=True, check=True).stdout 30 | except CalledProcessError as e: 31 | raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e 32 | return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 33 | 34 | def process_audio(audio): 35 | audio = whisper.pad_or_trim(audio) 36 | 37 | mel = whisper.log_mel_spectrogram(audio).to(model.device) 38 | 39 | options = whisper.DecodingOptions(fp16=False) 40 | result = whisper.decode(model, mel, options) 41 | return result.text 42 | 43 | with open('static/json/reference.json', 'r') as json_file: 44 | reference_data = json.load(json_file) 45 | 46 | def modify_words(text): #modifies words so all of them are in the dictionary 47 | words = re.findall(r'\b\w+\b', text.lower().strip()) 48 | filtered_words = [word for word in words if len(word) > 2] 49 | modified_words = [] 50 | for word in filtered_words: 51 | modified_word = None 52 | for reference_word in reference_data: 53 | # Calculate the similarity ratio using Levenshtein distance 54 | similarity = ratio(word, reference_word) 55 | if similarity >= 0.8: # Adjust the threshold as needed 56 | modified_word = reference_word 57 | break 58 | if not modified_word is None: 59 | modified_words.append(modified_word) #we're just removing words that dont match to make it easier (needs to be fixed) 60 | return ' '.join(modified_words) 61 | 62 | @app.route("/") 63 | def home(): 64 | return render_template('index.html') 65 | 66 | @app.route("/", methods=['POST']) #check for empty files or no file updated 67 | def upload_file(): 68 | f = request.files['file'] 69 | rawText = process_audio(custom_load_audio(f.read())) 70 | modText = modify_words(rawText) 71 | return jsonify({'rawText':rawText,'modText': modText}) 72 | 73 | 74 | if __name__ == '__main__': 75 | app.run(port=5001) -------------------------------------------------------------------------------- /recordingTest.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/recordingTest.mp3 -------------------------------------------------------------------------------- /static/css/styles.css: -------------------------------------------------------------------------------- 1 | * { 2 | margin: 0; 3 | padding: 0; 4 | box-sizing: border-box; 5 | font-family: monospace; 6 | } 7 | 8 | body{ 9 | background-color: #253238; 10 | color:#C0C0C0; 11 | font-size: small; 12 | } 13 | 14 | .flex-container{ 15 | display: flex; 16 | height: 100vh; 17 | } 18 | 19 | .left-box { 20 | flex: 1; 21 | border-color: #C0C0C0; 22 | border-width: 2px; 23 | border-style: solid; 24 | margin: 20px; 25 | display: flex; 26 | flex-direction: column; 27 | } 28 | 29 | .right-box { 30 | flex: 1; 31 | display: flex; 32 | flex-direction: column; 33 | } 34 | 35 | .box { 36 | flex: 1; 37 | margin: 20px 20px 0px 0px; 38 | border-color: #C0C0C0; 39 | border-width: 2px; 40 | border-style: solid; 41 | } 42 | 43 | .form-container{ 44 | display: flex; 45 | } 46 | 47 | .subsection { 48 | flex: 1; 49 | } 50 | 51 | .box .title{ 52 | text-align: center; 53 | font-size: 15px; 54 | margin-top: 20px; 55 | } 56 | 57 | #label{ 58 | margin-top: 20px; 59 | margin-left: auto; 60 | margin-right: auto; 61 | } 62 | 63 | #container{ 64 | flex: 1; 65 | margin:50px; 66 | } 67 | 68 | button{ 69 | border-width: 2px; 70 | border-style: solid; 71 | border-color: #C0C0C0; 72 | padding: 10px; 73 | width: 250px; 74 | font-weight: bold; 75 | display:block; 76 | margin-top: 20px; 77 | } 78 | button:hover { 79 | background-image: linear-gradient(rgb(0 0 0/40%) 0 0); 80 | } 81 | 82 | label{ 83 | text-align: center; 84 | background-color: #333333; 85 | border-color: #C0C0C0; 86 | color:#C0C0C0; 87 | border-width: 3px; 88 | border-style: solid; 89 | padding: 10px; 90 | width: 250px; 91 | font-weight: bold; 92 | display:block; 93 | margin-top: 20px; 94 | margin-left:auto; 95 | margin-right: 10px; 96 | } 97 | label:hover { 98 | background-image: linear-gradient(rgb(0 0 0/40%) 0 0); 99 | } 100 | 101 | .dark{ 102 | background-color: #333333; 103 | color:#C0C0C0; 104 | } 105 | 106 | .light{ 107 | background-color: #C0C0C0; 108 | color:#333333; 109 | } 110 | 111 | #file-chosen{ 112 | margin-left:auto; 113 | margin-right:auto; 114 | font-size: 10px; 115 | display: block; 116 | margin-top: 5px; 117 | font-weight: normal; 118 | } 119 | 120 | #subtext{ 121 | color:#333333; 122 | margin-left:auto; 123 | margin-right:auto; 124 | font-size: 10px; 125 | display: block; 126 | margin-top: 5px; 127 | font-weight: normal; 128 | } 129 | 130 | textarea{ 131 | display: block; 132 | margin-left:auto; 133 | margin-right:auto; 134 | margin-top: 20px; 135 | width:520px; 136 | height: 150px; 137 | padding: 10px; 138 | } 139 | 140 | .socials{ 141 | text-decoration: none; 142 | margin-left: 5px; 143 | margin-right: 5px; 144 | } 145 | 146 | .foot{ 147 | display: flex; 148 | justify-content: center; 149 | align-items: center; 150 | margin-top: 15px; 151 | } -------------------------------------------------------------------------------- /static/img/devpost-icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 9 | 10 | 11 | 12 | 13 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/static/img/favicon.ico -------------------------------------------------------------------------------- /static/img/github-icon.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /static/js/main.js: -------------------------------------------------------------------------------- 1 | import * as THREE from 'https://cdn.skypack.dev/three@0.132.2'; 2 | 3 | var label = document.getElementById("label") 4 | 5 | var container = document.getElementById("container"); 6 | const fov = 75; 7 | const aspect = container.clientWidth / container.clientHeight; 8 | const near = 0.1; 9 | const far = 1000; 10 | 11 | const scene = new THREE.Scene(); 12 | scene.background = new THREE.Color( 0x253238 ); 13 | const camera = new THREE.PerspectiveCamera(fov, aspect, near, far); 14 | 15 | const renderer = new THREE.WebGLRenderer(); 16 | renderer.setSize(container.clientWidth, container.clientHeight); 17 | container.appendChild(renderer.domElement); 18 | 19 | var wordList = [] 20 | var wordidx = 0; 21 | var frameidx=0; 22 | 23 | var textForm = document.getElementById("inputForm"); 24 | textForm.addEventListener("submit", function(e) { 25 | e.preventDefault(); 26 | var message = document.getElementById("message").value; 27 | wordList = message.split(" "); 28 | frameidx = 0; 29 | wordidx = 0; 30 | console.log(wordList); 31 | }); 32 | 33 | fetch('static/json/reference.json') //fetches json data (very slow) 34 | .then(response => response.json()) 35 | .then(data => { 36 | function drawPoint(x, y, z){ 37 | const pointRadius = 0.25; 38 | const geometry = new THREE.SphereGeometry( pointRadius, 32, 16 ); 39 | const material = new THREE.MeshBasicMaterial( { color: 0x84FFFF } ); 40 | const sphere = new THREE.Mesh( geometry, material ); scene.add(sphere); 41 | sphere.position.x = x; 42 | sphere.position.y = y; 43 | sphere.position.z = z; 44 | } 45 | 46 | function drawLine(x1, y1, z1, x2, y2, z2){ 47 | const points = []; 48 | points.push (new THREE.Vector3(x1, y1, z1)); 49 | points.push (new THREE.Vector3(x2, y2, z2)); 50 | const geometry = new THREE.BufferGeometry().setFromPoints( points ); 51 | const material = new THREE.LineBasicMaterial( { color: 0xFFFFFF } ); 52 | const line = new THREE.Line( geometry, material ); 53 | scene.add(line); 54 | } 55 | 56 | function redistributeElements(left, right) { //fixes the problem where more than 21 nodes are identified as left and lets the lines be drawn properly 57 | if (left.length > 21) { 58 | const redistributedElements = left.splice(21); 59 | right.push(...redistributedElements); 60 | } else if (right.length > 21) { 61 | const redistributedElements = right.splice(21); 62 | left.push(...redistributedElements); 63 | } 64 | } 65 | 66 | function connectLines(frameidx){ 67 | const edgeList = [[0,1],[1,2], [2,3], [3,4], [0,5], [5,6], [6,7], [7,8], [5,9], [9,10], [10,11], [11,12], [9,13], [13,14], [14,15], [15,16], [13,17], [17,18], [18,19], [19,20], [0,17]]; 68 | var left = data[wordList[wordidx]][frameidx]['Left Hand Coordinates']; 69 | var right = data[wordList[wordidx]][frameidx]['Right Hand Coordinates']; 70 | 71 | redistributeElements(left, right); 72 | 73 | edgeList.forEach(function(edge){ 74 | const u = edge[0]; 75 | const v = edge[1]; 76 | if (left[u] && left[v]){ 77 | const l1 = left[u]['Coordinates']; 78 | const l2 = left[v]['Coordinates']; 79 | drawLine(l1[0]*50, l1[1]*-50, l1[2]*50, l2[0]*50, l2[1]*-50, l2[2]*50); 80 | } 81 | if (right[u] && right[v]){ 82 | const r1 = right[u]['Coordinates']; 83 | const r2 = right[v]['Coordinates']; 84 | drawLine(r1[0]*50, r1[1]*-50, r1[2]*50, r2[0]*50, r2[1]*-50, r2[2]*50); 85 | } 86 | }) 87 | } 88 | 89 | let clock = new THREE.Clock(); 90 | let delta = 0; 91 | let interval = 1 / 45; //timer allows us to run at 45 fps 92 | 93 | function render() { 94 | requestAnimationFrame(render); 95 | delta += clock.getDelta(); 96 | 97 | if (delta > interval){ 98 | delta = delta % interval; 99 | 100 | if(wordList.length > 0 && wordidx < wordList.length){ 101 | 102 | label.innerHTML = wordList[wordidx].toUpperCase(); 103 | 104 | var left = data[wordList[wordidx]][frameidx]['Left Hand Coordinates']; 105 | var right = data[wordList[wordidx]][frameidx]['Right Hand Coordinates']; 106 | 107 | left.forEach(function(joint) { 108 | drawPoint(joint['Coordinates'][0]*50, joint['Coordinates'][1]*-50, joint['Coordinates'][2]*50); 109 | }) 110 | right.forEach(function(joint) { 111 | drawPoint(joint['Coordinates'][0]*50, joint['Coordinates'][1]*-50, joint['Coordinates'][2]*50); 112 | }) 113 | connectLines(frameidx); 114 | 115 | frameidx++; 116 | if(frameidx >= data[wordList[wordidx]].length){ 117 | frameidx = 0; 118 | wordidx++; 119 | label.innerHTML = wordList[wordidx].toUpperCase(); 120 | } 121 | } 122 | else{ 123 | label.innerHTML = "N/A"; 124 | } 125 | renderer.render(scene, camera); 126 | scene.remove.apply(scene, scene.children); 127 | } 128 | } 129 | 130 | render(); 131 | }) 132 | 133 | camera.position.set(27.5, -30, 25); -------------------------------------------------------------------------------- /static/js/postHandler.js: -------------------------------------------------------------------------------- 1 | $(function(){ 2 | $('#fileForm').submit(function(e) { 3 | e.preventDefault(); 4 | var formData = new FormData($(this)[0]); //extracting the file data 5 | 6 | $.ajax({ //ajax sending a post request to the flask server 7 | url: "/", 8 | data: formData, 9 | type: 'POST', 10 | async: false, 11 | contentType: false, 12 | processData: false, 13 | success: function(response) { //updating the webpage with result 14 | $('#display').text(response.rawText); 15 | $('#message').text(response.modText); 16 | }, 17 | error: function(err) { 18 | console.log(err); 19 | } 20 | }); 21 | }); 22 | }); -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | SignWave 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 |
16 |

N/A

17 |
18 |
19 |
20 |
21 |
SPEECH 2 TEXT
22 |
23 |
24 |
25 | 26 | 30 |
31 |
32 | 35 |
36 |
37 | 44 |
45 | 46 |
47 |
48 |
TEXT 2 ASL
49 |
50 | 51 | 52 |
53 |
54 | 55 | 56 | 57 |
58 |
59 |
60 |
61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /wildfireNews.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/wildfireNews.mp3 -------------------------------------------------------------------------------- /word videos/about.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/about.mp4 -------------------------------------------------------------------------------- /word videos/all.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/all.mp4 -------------------------------------------------------------------------------- /word videos/back.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/back.mp4 -------------------------------------------------------------------------------- /word videos/been.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/been.mp4 -------------------------------------------------------------------------------- /word videos/canadian.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/canadian.mp4 -------------------------------------------------------------------------------- /word videos/country.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/country.mp4 -------------------------------------------------------------------------------- /word videos/from.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/from.mp4 -------------------------------------------------------------------------------- /word videos/getting.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/getting.mp4 -------------------------------------------------------------------------------- /word videos/here.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/here.mp4 -------------------------------------------------------------------------------- /word videos/is.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/is.mp4 -------------------------------------------------------------------------------- /word videos/morning.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/morning.mp4 -------------------------------------------------------------------------------- /word videos/much.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/much.mp4 -------------------------------------------------------------------------------- /word videos/right.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/right.mp4 -------------------------------------------------------------------------------- /word videos/smoke.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/smoke.mp4 -------------------------------------------------------------------------------- /word videos/smother.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/smother.mp4 -------------------------------------------------------------------------------- /word videos/so.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/so.mp4 -------------------------------------------------------------------------------- /word videos/still.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/still.mp4 -------------------------------------------------------------------------------- /word videos/talk.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/talk.mp4 -------------------------------------------------------------------------------- /word videos/that.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/that.mp4 -------------------------------------------------------------------------------- /word videos/the.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/the.mp4 -------------------------------------------------------------------------------- /word videos/this.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/this.mp4 -------------------------------------------------------------------------------- /word videos/to.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/to.mp4 -------------------------------------------------------------------------------- /word videos/we.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/we.mp4 -------------------------------------------------------------------------------- /word videos/weather.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/weather.mp4 -------------------------------------------------------------------------------- /word videos/wildfire.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tan-ad/SignWave/278338005310b4cfa54f5a436295618b83a5b4ec/word videos/wildfire.mp4 --------------------------------------------------------------------------------