├── .gitignore ├── LICENSE ├── Procfile ├── README.md ├── adhd_classifier.joblib ├── app.py ├── create_predict_data.py ├── create_train_test_data.py ├── egemaps.py ├── pca.py ├── predict.py ├── railway.json ├── requirements.txt ├── runtime.txt ├── scaler.joblib ├── static ├── css │ ├── analysis-style.css │ └── styles.css ├── img │ ├── bg (1).jpg │ ├── bg (2).jpg │ ├── bg (3).jpg │ ├── bg (4).jpg │ ├── bg (5).jpg │ ├── bg (6).jpg │ ├── bg (7).jpg │ ├── bg.jpg │ ├── bg2.jpg │ └── logo.png └── js │ ├── analysis.js │ ├── chart.js │ ├── recorder.js │ └── upload.js └── templates ├── index.html └── results.html /.gitignore: -------------------------------------------------------------------------------- 1 | #dependencies 2 | /dataset 3 | /venv 4 | /uploads 5 | /processed 6 | # Dataset directory 7 | dataset/ 8 | 9 | # Virtual Environment 10 | venv/ 11 | env/ 12 | ENV/ 13 | 14 | # Python 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | *.so 19 | .Python 20 | env/ 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | lib/ 28 | lib64/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | 37 | # IDE specific files 38 | .idea/ 39 | .vscode/ 40 | *.swp 41 | *.swo 42 | svm.py 43 | # Jupyter Notebook 44 | .ipynb_checkpoints 45 | 46 | # Local development settings 47 | .env 48 | .env.local 49 | .env.*.local 50 | 51 | # Generated files 52 | *.csv 53 | !requirements.txt 54 | 55 | # Project specific 56 | *.pth 57 | *.mp3 58 | *.wav -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 ADHD Voice Analysis 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | web: gunicorn app:app -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ADHD Recognition System with Personal Voice 2 | 3 | A machine learning-based system for analyzing audio recordings to detect potential ADHD characteristics using advanced audio processing and machine learning techniques. 4 | 5 | ## Overview 6 | 7 | This system uses audio processing and machine learning to analyze speech patterns and detect potential ADHD characteristics. It employs the eGeMAPs (extended Geneva Minimalistic Acoustic Parameter Set) feature set for audio analysis and uses Support Vector Machine (SVM) for classification. 8 | 9 | ## Features 10 | 11 | - Audio file processing and segmentation 12 | - eGeMAPs feature extraction 13 | - Principal Component Analysis (PCA) for feature reduction 14 | - SVM-based ADHD classification 15 | - Real-time processing capabilities 16 | - Web interface for easy interaction 17 | - Support for MP3 and WAV audio formats 18 | 19 | ## Technical Stack 20 | 21 | - Python 3.x 22 | - Flask (Web Framework) 23 | - Librosa (Audio Processing) 24 | - OpenSMILE (eGeMAPs Feature Extraction) 25 | - scikit-learn (Machine Learning) 26 | - Pandas & NumPy (Data Processing) 27 | - Matplotlib & Seaborn (Visualization) 28 | 29 | ## Installation 30 | 31 | 1. Clone the repository: 32 | 33 | ```bash 34 | git clone [repository-url] 35 | cd ADHD 36 | ``` 37 | 38 | 2. Create and activate a virtual environment: 39 | 40 | ```bash 41 | python -m venv venv 42 | source venv/bin/activate # On Windows: venv\Scripts\activate 43 | ``` 44 | 45 | 3. Install dependencies: 46 | 47 | ```bash 48 | pip install -r requirements.txt 49 | ``` 50 | 51 | ## Usage 52 | 53 | 1. Start the web server: 54 | 55 | ```bash 56 | python app.py 57 | ``` 58 | 59 | 2. Open your web browser and navigate to `http://localhost:5000` 60 | 61 | 3. Upload an audio file (MP3 or WAV format) through the web interface 62 | 63 | 4. The system will process the audio and provide: 64 | - ADHD probability assessment 65 | - Classification result 66 | - Detailed analysis report 67 | 68 | ## Project Structure 69 | 70 | ``` 71 | ADHD/ 72 | ├── app.py # Main Flask application 73 | ├── create_predict_data.py # Audio processing and feature extraction 74 | ├── predict.py # ADHD prediction module 75 | ├── svm.py # SVM model training and evaluation 76 | ├── pca.py # PCA analysis and visualization 77 | ├── egemaps.py # eGeMAPs feature extraction utilities 78 | ├── static/ # Static files (CSS, JS) 79 | ├── templates/ # HTML templates 80 | ├── uploads/ # Temporary storage for uploaded files 81 | └── process/ # Processing directory for audio files 82 | ``` 83 | 84 | ## Model Training 85 | 86 | To train the model with your own dataset: 87 | 88 | 1. Place your audio files in the `dataset/train_16k` directory 89 | 2. Run the training script: 90 | 91 | ```bash 92 | python create_train_test_data.py 93 | ``` 94 | 95 | ## API Endpoints 96 | 97 | - `GET /`: Main web interface 98 | - `POST /upload_file`: Upload and process audio files 99 | - Accepts: MP3 or WAV files 100 | - Returns: JSON response with analysis results 101 | 102 | ## Performance 103 | 104 | The system uses a combination of: 105 | 106 | - eGeMAPs features for robust audio analysis 107 | - PCA for feature reduction 108 | - SVM with RBF kernel for classification 109 | - Real-time processing capabilities 110 | 111 | ## License 112 | 113 | This project is licensed under the terms of the included LICENSE file. 114 | 115 | ## Contributing 116 | 117 | Contributions are welcome! Please feel free to submit a Pull Request. 118 | 119 | ## Acknowledgments 120 | 121 | - OpenSMILE for eGeMAPs feature extraction 122 | - scikit-learn for machine learning capabilities 123 | - Flask for web framework 124 | 125 | 126 | [Telegram](https://t.me/Minato_95) 127 | -------------------------------------------------------------------------------- /adhd_classifier.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/adhd_classifier.joblib -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, jsonify, render_template, Response 2 | import os 3 | from werkzeug.utils import secure_filename 4 | import create_predict_data 5 | import predict 6 | import json 7 | import time 8 | import tempfile 9 | 10 | app = Flask(__name__) 11 | 12 | # Use system temp directory for uploads in production 13 | if os.environ.get('RAILWAY_ENVIRONMENT'): 14 | app.config['UPLOAD_FOLDER'] = tempfile.gettempdir() 15 | app.config['PROCESS'] = tempfile.gettempdir() 16 | else: 17 | app.config['UPLOAD_FOLDER'] = 'uploads' 18 | app.config['PROCESS'] = 'process' 19 | os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) 20 | os.makedirs(app.config['PROCESS'], exist_ok=True) 21 | 22 | app.config['MAX_CONTENT_LENGTH'] = 1000 * 1024 * 1024 # 100MB max file size 23 | app.config['MAX_CONTENT_PATH'] = 255 # Maximum length of file path 24 | 25 | 26 | def send_estimate_time(estimate_time): 27 | return f"data: {json.dumps({'type': 'estimate', 'estimate_time': estimate_time})}\n\n" 28 | 29 | 30 | def send_result(result, success): 31 | return f"data: {json.dumps({'type': 'result','success':success, 'result': result})}\n\n" 32 | 33 | 34 | @app.route('/') 35 | def index(): 36 | return render_template('index.html') 37 | 38 | 39 | @app.route('/upload_file', methods=['POST']) 40 | def upload_file(): 41 | try: 42 | if 'file' not in request.files: 43 | return Response(send_result({ 44 | 'success': False, 45 | 'message': 'No file part' 46 | }), 47 | mimetype='text/event-stream') 48 | 49 | file = request.files['file'] 50 | if file.filename == '': 51 | return Response(send_result({ 52 | 'success': False, 53 | 'message': 'No selected file' 54 | }), 55 | mimetype='text/event-stream') 56 | 57 | # Check file extension 58 | allowed_extensions = {'mp3', 'wav'} 59 | if not '.' in file.filename or file.filename.rsplit( 60 | '.', 1)[1].lower() not in allowed_extensions: 61 | return Response(send_result({ 62 | 'success': 63 | False, 64 | 'message': 65 | 'Invalid file type. Only MP3 and WAV files are allowed.' 66 | }), 67 | mimetype='text/event-stream') 68 | 69 | # Check file size 70 | file_size = len(file.read()) 71 | file.seek(0) # Reset file pointer 72 | if file_size > app.config['MAX_CONTENT_LENGTH']: 73 | return Response(send_result({ 74 | 'success': 75 | False, 76 | 'message': 77 | f'File too large. Maximum size is {app.config["MAX_CONTENT_LENGTH"] // (1024*1024)}MB' 78 | }), 79 | mimetype='text/event-stream') 80 | 81 | filename = secure_filename(file.filename) 82 | input_filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) 83 | output_filepath = os.path.join(app.config['PROCESS']) 84 | # Check if file path is too long 85 | if len(input_filepath) > app.config['MAX_CONTENT_PATH']: 86 | return Response(send_result({ 87 | 'success': False, 88 | 'message': 'File path too long' 89 | }), 90 | mimetype='text/event-stream') 91 | 92 | file.save(input_filepath) 93 | 94 | def generate(): 95 | try: 96 | estimate_time = 2 * ( 97 | create_predict_data.split_number(input_filepath) - 98 | 1) if create_predict_data.split_number( 99 | input_filepath) >= 2 else 3 100 | yield send_estimate_time(estimate_time) 101 | # Process audio file 102 | features_df = create_predict_data.process_audio_files( 103 | input_filepath, output_filepath) 104 | 105 | result = predict.predict_adhd(features_df) 106 | print(result) 107 | 108 | yield send_result(result, True) 109 | 110 | except Exception as e: 111 | yield send_result({ 112 | 'success': False, 113 | 'message': f'Error processing file: {str(e)}' 114 | }) 115 | finally: 116 | # Clean up 117 | if os.path.exists(input_filepath): 118 | os.remove(input_filepath) 119 | 120 | return Response(generate(), mimetype='text/event-stream') 121 | 122 | except Exception as e: 123 | return Response(send_result({ 124 | 'success': False, 125 | 'message': f'Server error: {str(e)}' 126 | }), 127 | mimetype='text/event-stream') 128 | 129 | 130 | if __name__ == '__main__': 131 | port = int(os.environ.get('PORT', 5000)) 132 | app.run(host='0.0.0.0', port=port) 133 | -------------------------------------------------------------------------------- /create_predict_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import librosa 3 | import soundfile as sf 4 | import numpy as np 5 | from tqdm import tqdm 6 | import opensmile 7 | import pandas as pd 8 | from sklearn.preprocessing import StandardScaler 9 | from sklearn.decomposition import PCA 10 | import matplotlib.pyplot as plt 11 | import seaborn as sns 12 | import shutil 13 | 14 | 15 | def split_number(input_file, segment_length_seconds=60): 16 | y, sr = librosa.load(input_file) 17 | segment_length_samples = int(segment_length_seconds * sr) 18 | total_segments = len(y) // segment_length_samples + ( 19 | 1 if len(y) % segment_length_samples != 0 else 0) 20 | return total_segments 21 | 22 | 23 | def split_audio(input_file, output_dir, segment_length_seconds=60): 24 | """ 25 | Split an audio file into segments of specified length 26 | 27 | Args: 28 | input_file (str): Path to the input audio file 29 | output_dir (str): Directory to save the split audio files 30 | segment_length_seconds (int): Length of each segment in seconds 31 | """ 32 | # Create output directory if it doesn't exist 33 | os.makedirs(output_dir, exist_ok=True) 34 | 35 | # Load the audio file 36 | print(f"Loading audio file: {input_file}") 37 | y, sr = librosa.load(input_file) 38 | 39 | # Calculate segment length in samples 40 | segment_length_samples = int(segment_length_seconds * sr) 41 | 42 | # Calculate number of segments 43 | total_segments = len(y) // segment_length_samples + ( 44 | 1 if len(y) % segment_length_samples != 0 else 0) 45 | 46 | print(f"Total duration: {len(y)/sr:.2f} seconds") 47 | print(f"Number of segments: {total_segments}") 48 | 49 | # Get the file extension 50 | file_extension = os.path.splitext(input_file)[1].lower() 51 | 52 | # Split and save segments 53 | for i in tqdm(range(total_segments), desc="Splitting audio"): 54 | start_sample = i * segment_length_samples 55 | end_sample = min((i + 1) * segment_length_samples, len(y)) 56 | 57 | # Extract segment 58 | segment = y[start_sample:end_sample] 59 | # Generate output filename 60 | output_filename = f"segment_{i+1:03d}{file_extension}" 61 | output_path = os.path.join(output_dir, output_filename) 62 | 63 | # Export segment 64 | sf.write(output_path, segment, sr) 65 | 66 | 67 | def resample_audio(input_file, output_file, target_sr=16000): 68 | """ 69 | Resample audio file to target sampling rate 70 | 71 | Args: 72 | input_file (str): Path to the input audio file 73 | output_file (str): Path to save the resampled audio file 74 | target_sr (int): Target sampling rate 75 | """ 76 | # Load audio file 77 | y, sr = librosa.load(input_file) 78 | 79 | # Resample if necessary 80 | if sr != target_sr: 81 | y = librosa.resample(y, orig_sr=sr, target_sr=target_sr) 82 | 83 | # Save resampled audio 84 | sf.write(output_file, y, target_sr) 85 | 86 | 87 | def process_directory(input_dir, output_dir, target_sr=16000): 88 | """ 89 | Process all audio files in a directory and resample them 90 | 91 | Args: 92 | input_dir (str): Directory containing audio files 93 | output_dir (str): Directory to save resampled files 94 | target_sr (int): Target sampling rate 95 | """ 96 | # Create output directory if it doesn't exist 97 | os.makedirs(output_dir, exist_ok=True) 98 | 99 | # Get all audio files 100 | audio_files = [ 101 | f for f in os.listdir(input_dir) if f.endswith(('.mp3', '.wav')) 102 | ] 103 | 104 | print(f"Found {len(audio_files)} audio files") 105 | 106 | # Process each audio file 107 | for audio_file in tqdm(audio_files, desc="Resampling audio files"): 108 | input_path = os.path.join(input_dir, audio_file) 109 | output_path = os.path.join(output_dir, audio_file) 110 | 111 | try: 112 | resample_audio(input_path, output_path, target_sr) 113 | except Exception as e: 114 | print(f"Error processing {audio_file}: {str(e)}") 115 | 116 | 117 | def extract_egemaps(audio_file): 118 | """ 119 | Extract eGeMAPs features from an audio file 120 | 121 | Args: 122 | audio_file (str): Path to the audio file 123 | 124 | Returns: 125 | numpy.ndarray: Array of eGeMAPs features 126 | """ 127 | # Initialize eGeMAPs feature extractor 128 | smile = opensmile.Smile(feature_set=opensmile.FeatureSet.eGeMAPSv02, 129 | feature_level=opensmile.FeatureLevel.Functionals, 130 | sampling_rate=16000) 131 | 132 | # Extract features 133 | features = smile.process_file(audio_file) 134 | return features.values[0] 135 | 136 | 137 | def process_audio_files(input_file, output_dir, segment_length=60): 138 | """ 139 | Process audio file: split, resample, and extract features 140 | 141 | Args: 142 | input_file (str): Path to the input audio file 143 | output_dir (str): Base directory for outputs 144 | segment_length (int): Length of each segment in seconds 145 | """ 146 | # Create necessary directories 147 | split_dir = os.path.join(output_dir, 'split') 148 | resampled_dir = os.path.join(output_dir, 'process') 149 | 150 | # Delete existing directories if they exist 151 | if os.path.exists(split_dir): 152 | print(f"Deleting existing split directory: {split_dir}") 153 | shutil.rmtree(split_dir) 154 | if os.path.exists(resampled_dir): 155 | print(f"Deleting existing resampled directory: {resampled_dir}") 156 | shutil.rmtree(resampled_dir) 157 | 158 | # Create fresh directories 159 | # os.makedirs(split_dir, exist_ok=True) 160 | # os.makedirs(resampled_dir, exist_ok=True) 161 | 162 | # Step 1: Split audio 163 | print("\nStep 1: Splitting audio file...") 164 | split_audio(input_file, split_dir, segment_length) 165 | 166 | # Step 2: Resample segments 167 | print("\nStep 2: Resampling segments to 16kHz...") 168 | process_directory(split_dir, resampled_dir, target_sr=16000) 169 | 170 | # Step 3: Extract features 171 | print("\nStep 3: Extracting eGeMAPs features...") 172 | all_features = [] 173 | file_names = [] 174 | 175 | audio_files = [ 176 | f for f in os.listdir(resampled_dir) if f.endswith(('.mp3', '.wav')) 177 | ] 178 | for audio_file in tqdm(audio_files, desc="Extracting features"): 179 | file_path = os.path.join(resampled_dir, audio_file) 180 | try: 181 | features = extract_egemaps(file_path) 182 | all_features.append(features) 183 | file_names.append(audio_file) 184 | except Exception as e: 185 | print(f"Error processing {audio_file}: {str(e)}") 186 | 187 | # Create DataFrame with features 188 | feature_names = opensmile.Smile( 189 | feature_set=opensmile.FeatureSet.eGeMAPSv02, 190 | feature_level=opensmile.FeatureLevel.Functionals, 191 | sampling_rate=16000).feature_names 192 | 193 | df = pd.DataFrame(all_features, index=file_names, columns=feature_names) 194 | 195 | # Save features 196 | features_file = os.path.join(output_dir, 'features.csv') 197 | if os.path.exists(features_file): 198 | print(f"Deleting existing features file: {features_file}") 199 | os.remove(features_file) 200 | df.to_csv(features_file) 201 | print(f"\nFeatures saved to: {features_file}") 202 | 203 | return df 204 | 205 | 206 | # # def perform_pca(features_df, n_components=3): 207 | # """ 208 | # Perform PCA on the features with manual input of number of components 209 | 210 | # Args: 211 | # features_df (pd.DataFrame): DataFrame containing features 212 | # n_components (int, optional): Number of components to keep. If None, will ask for input. 213 | 214 | # Returns: 215 | # tuple: (PCA object, transformed features, explained variance ratio) 216 | # """ 217 | # # Standardize the features 218 | # scaler = StandardScaler() 219 | # scaled_features = scaler.fit_transform(features_df) 220 | 221 | # # If n_components is not provided, ask for input 222 | # if n_components is None: 223 | # max_components = min(features_df.shape[0], features_df.shape[1]) 224 | # print(f"\nMaximum possible components: {max_components}") 225 | # while True: 226 | # try: 227 | # n_components = 3 228 | # # int(input("Enter the number of components to keep: ")) 229 | # if 1 <= n_components <= max_components: 230 | # break 231 | # else: 232 | # print(f"Please enter a number between 1 and {max_components}") 233 | # except ValueError: 234 | # print("Please enter a valid number") 235 | 236 | # # Perform PCA 237 | # pca = PCA(n_components=n_components) 238 | # pca_result = pca.fit_transform(scaled_features) 239 | 240 | # # Print explained variance information 241 | # print("\nPCA Results:") 242 | # print(f"Number of components: {n_components}") 243 | # print("\nExplained variance ratio for each component:") 244 | # for i, var_ratio in enumerate(pca.explained_variance_ratio_, 1): 245 | # print(f"PC{i}: {var_ratio:.4f} ({var_ratio*100:.2f}%)") 246 | 247 | # print(f"\nCumulative explained variance: {sum(pca.explained_variance_ratio_):.4f} ({sum(pca.explained_variance_ratio_)*100:.2f}%)") 248 | 249 | # return pca, pca_result, pca.explained_variance_ratio_ 250 | 251 | # # def plot_pca_results(pca_result, explained_variance_ratio, output_dir): 252 | # """ 253 | # Plot PCA results including explained variance and component visualization 254 | 255 | # Args: 256 | # pca_result (np.ndarray): PCA transformed features 257 | # explained_variance_ratio (np.ndarray): Explained variance ratio for each component 258 | # output_dir (str): Directory to save plots 259 | # """ 260 | # # Create plots directory if it doesn't exist 261 | # plots_dir = os.path.join(output_dir, 'plots') 262 | # os.makedirs(plots_dir, exist_ok=True) 263 | 264 | # # Plot 1: Explained variance ratio 265 | # plt.figure(figsize=(10, 6)) 266 | # plt.bar(range(1, len(explained_variance_ratio) + 1), explained_variance_ratio) 267 | # plt.title('Explained Variance Ratio by Component') 268 | # plt.xlabel('Principal Component') 269 | # plt.ylabel('Explained Variance Ratio') 270 | # plt.savefig(os.path.join(plots_dir, 'explained_predict_variance.png')) 271 | # plt.close() 272 | 273 | # # Plot 2: Cumulative explained variance 274 | # plt.figure(figsize=(10, 6)) 275 | # cumulative_variance = np.cumsum(explained_variance_ratio) 276 | # plt.plot(range(1, len(cumulative_variance) + 1), cumulative_variance, 'bo-') 277 | # plt.title('Cumulative Explained Variance') 278 | # plt.xlabel('Number of Components') 279 | # plt.ylabel('Cumulative Explained Variance') 280 | # plt.grid(True) 281 | # plt.savefig(os.path.join(plots_dir, 'cumulative_predict_variance.png')) 282 | # plt.close() 283 | 284 | # # Plot 3: First two components scatter plot (if at least 2 components) 285 | # if pca_result.shape[1] >= 2: 286 | # plt.figure(figsize=(10, 6)) 287 | # plt.scatter(pca_result[:, 0], pca_result[:, 1]) 288 | # plt.title('First Two Principal Components') 289 | # plt.xlabel(f'PC1 ({explained_variance_ratio[0]*100:.1f}% variance)') 290 | # plt.ylabel(f'PC2 ({explained_variance_ratio[1]*100:.1f}% variance)') 291 | # plt.grid(True) 292 | # plt.savefig(os.path.join(plots_dir, 'pca_scatter.png')) 293 | # plt.close() 294 | 295 | # def main(): 296 | # # Specify your input and output paths 297 | # input_file = r"dataset\predict_non_ADHD.mp3" # Change this to your input file path 298 | # output_dir = r"processed" # Change this to your desired output directory 299 | # segment_length = 60 # Length of each segment in seconds 300 | # try: 301 | # # Process audio files 302 | # print("\nProcessing audio files...") 303 | # features_df = process_audio_files(input_file, output_dir, segment_length) 304 | 305 | # print("\nProcessing completed successfully!") 306 | # print(f"Total segments processed: {len(features_df)}") 307 | # print(f"Number of features extracted: {len(features_df.columns)}") 308 | 309 | # # Perform PCA 310 | # # print("\nPerforming PCA analysis...") 311 | # # pca, pca_result, explained_variance_ratio = perform_pca(features_df) 312 | 313 | # # # Create PCA results DataFrame 314 | # # pca_df = pd.DataFrame( 315 | # # pca_result, 316 | # # columns=[f'PC{i+1}' for i in range(pca_result.shape[1])], 317 | # # index=features_df.index 318 | # # ) 319 | 320 | # # # Save PCA results 321 | # # pca_file = os.path.join(output_dir, 'pca_predict_results.csv') 322 | # # pca_df.to_csv(pca_file) 323 | # # print(f"\nPCA results saved to: {pca_file}") 324 | 325 | # # # Plot PCA results 326 | # # print("\nGenerating PCA plots...") 327 | # # plot_pca_results(pca_result, explained_variance_ratio, output_dir) 328 | # # print(f"Plots saved to: {os.path.join(output_dir, 'plots')}") 329 | 330 | # except Exception as e: 331 | # print(f"An error occurred: {str(e)}") 332 | 333 | # if __name__ == "__main__": 334 | # main() 335 | -------------------------------------------------------------------------------- /create_train_test_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pandas as pd 4 | from sklearn.preprocessing import StandardScaler 5 | from sklearn.decomposition import PCA 6 | import opensmile 7 | import librosa 8 | import matplotlib.pyplot as plt 9 | 10 | def get_feature_names(): 11 | """ 12 | Get feature names directly from opensmile 13 | 14 | Returns: 15 | list: List of feature names 16 | """ 17 | # Initialize eGeMAPs feature extractor 18 | smile = opensmile.Smile( 19 | feature_set=opensmile.FeatureSet.eGeMAPSv02, 20 | feature_level=opensmile.FeatureLevel.Functionals, 21 | sampling_rate=16000 22 | ) 23 | 24 | # Get feature names from opensmile 25 | return smile.feature_names 26 | 27 | def resample_audio(audio_file, target_sr=16000): 28 | """ 29 | Resample audio file to target sampling rate 30 | 31 | Args: 32 | audio_file (str): Path to the audio file 33 | target_sr (int): Target sampling rate (default 16000 Hz) 34 | 35 | Returns: 36 | tuple: (resampled audio data, target sampling rate) 37 | """ 38 | # Load audio file 39 | y, sr = librosa.load(audio_file) 40 | 41 | # Resample if necessary 42 | if sr != target_sr: 43 | y = librosa.resample(y, orig_sr=sr, target_sr=target_sr) 44 | 45 | return y, target_sr 46 | 47 | def extract_egemaps(audio_file): 48 | """ 49 | Extract eGeMAPs features from an audio file 50 | 51 | Args: 52 | audio_file (str): Path to the audio file 53 | 54 | Returns: 55 | numpy.ndarray: Array of eGeMAPs features 56 | """ 57 | # Initialize eGeMAPs feature extractor 58 | smile = opensmile.Smile( 59 | feature_set=opensmile.FeatureSet.eGeMAPSv02, 60 | feature_level=opensmile.FeatureLevel.Functionals, 61 | sampling_rate=16000 62 | ) 63 | 64 | # Extract features 65 | features = smile.process_file(audio_file) 66 | return features.values[0] 67 | def get_label(filename): 68 | """ 69 | Get label based on filename (1 for ADHD, 0 for non-ADHD) 70 | 71 | Args: 72 | filename (str): Name of the audio file 73 | 74 | Returns: 75 | int: Label (1 for ADHD, 0 for non-ADHD) 76 | """ 77 | return 1 if 'adhd' in filename.lower() else 0 78 | 79 | def process_audio_directory(input_dir, output_file): 80 | """ 81 | Process all audio files in a directory and extract eGeMAPs features 82 | 83 | Args: 84 | input_dir (str): Directory containing audio files 85 | output_file (str): Path to save the features CSV file 86 | 87 | Returns: 88 | tuple: (DataFrame with features, list of labels) 89 | """ 90 | # Get all audio files 91 | audio_files = [f for f in os.listdir(input_dir) if f.endswith(('.mp3', '.wav'))] 92 | 93 | # Initialize lists to store features, labels, and file names 94 | all_features = [] 95 | all_labels = [] 96 | file_names = [] 97 | 98 | # Process each audio file 99 | for audio_file in audio_files: 100 | print(f"Processing: {audio_file}") 101 | file_path = os.path.join(input_dir, audio_file) 102 | 103 | try: 104 | # Extract eGeMAPs features 105 | features = extract_egemaps(file_path) 106 | label = get_label(audio_file) 107 | 108 | all_features.append(features) 109 | all_labels.append(label) 110 | file_names.append(audio_file) 111 | print(f"Successfully processed: {audio_file}") 112 | except Exception as e: 113 | print(f"Error processing {audio_file}: {str(e)}") 114 | 115 | if not all_features: 116 | raise ValueError("No features were successfully extracted from any files") 117 | 118 | # Get feature names from opensmile 119 | feature_names = get_feature_names() 120 | 121 | # Convert to DataFrame with named columns 122 | df = pd.DataFrame(all_features, index=file_names, columns=feature_names) 123 | 124 | # Add label column to the DataFrame 125 | df['label'] = all_labels 126 | 127 | # Save features to CSV 128 | df.to_csv(output_file) 129 | print(f"\nFeatures saved to: {output_file}") 130 | print("\nDataset Statistics:") 131 | print(f"Total samples: {len(df)}") 132 | print(f"ADHD samples: {sum(all_labels)}") 133 | print(f"Non-ADHD samples: {len(all_labels) - sum(all_labels)}") 134 | 135 | return df, all_labels 136 | 137 | def PCA_analysis(df): 138 | # Load the eGeMAPs features from CSV 139 | 140 | # Separate features and labels 141 | X = df.drop('label', axis=1) # All columns except 'label' 142 | y = df['label'] # The label column 143 | 144 | # Standardize the data (mean=0, variance=1) 145 | scaler = StandardScaler() 146 | X_scaled = scaler.fit_transform(X) 147 | 148 | # Step 3: Apply PCA 149 | pca = PCA() # By default, it keeps all components 150 | X_pca = pca.fit_transform(X_scaled) 151 | 152 | # Get explained variance ratio 153 | explained_variance_ratio = pca.explained_variance_ratio_ 154 | cumulative_explained_variance = np.cumsum(explained_variance_ratio) 155 | 156 | # Step 4: Plot Explained Variance 157 | plt.figure(figsize=(10, 6)) 158 | 159 | # Individual explained variance 160 | plt.bar(range(1, len(explained_variance_ratio) + 1), explained_variance_ratio, alpha=0.5, align='center', label='Individual explained variance') 161 | 162 | # Cumulative explained variance 163 | plt.step(range(1, len(cumulative_explained_variance) + 1), cumulative_explained_variance, where='mid', label='Cumulative explained variance') 164 | 165 | # Highlight the point where cumulative variance reaches 95% 166 | target_variance = 0.95 167 | n_components_95 = np.argmax(cumulative_explained_variance >= target_variance) + 1 168 | plt.axvline(n_components_95, color='r', linestyle='--', label=f'{target_variance*100:.0f}% Variance (n_components={n_components_95})') 169 | 170 | plt.xlabel('Number of Principal Components') 171 | plt.ylabel('Explained Variance Ratio') 172 | plt.title('Explained Variance by Principal Components (eGeMAPs Features)') 173 | plt.legend(loc='best') 174 | plt.grid() 175 | plt.savefig('pca_test_variance_plot.png') 176 | plt.show() 177 | 178 | # Step 5: Print Component Information 179 | print("\nPCA Component Information:") 180 | for i, (ratio, cum_ratio) in enumerate(zip(explained_variance_ratio, cumulative_explained_variance)): 181 | print(f"PC{i+1}: {ratio*100:.2f}% (Cumulative: {cum_ratio*100:.2f}%)") 182 | 183 | # Step 6: Retain Selected Components 184 | # Re-fit PCA with the selected number of components 185 | pca_reduced = PCA(n_components=32) 186 | X_pca_reduced = pca_reduced.fit_transform(X_scaled) 187 | 188 | # Create DataFrame with reduced PCA components and labels 189 | pca_df = pd.DataFrame( 190 | X_pca_reduced, 191 | columns=[f'PC{i+1}' for i in range(32)], 192 | index=X.index 193 | ) 194 | pca_df['label'] = y # Add back the labels 195 | 196 | # Save the reduced PCA results 197 | pca_df.to_csv('pca_results.csv') 198 | print(f"\nReduced PCA results saved to: pca_results_reduced.csv") 199 | print(f"Number of components to capture {target_variance*100:.0f}% variance: {n_components_95}") 200 | print(f"Reduced data shape: {X_pca_reduced.shape}") 201 | 202 | # Step 7: Plot PCA Components 203 | plt.figure(figsize=(10, 6)) 204 | plt.scatter(X_pca_reduced[:, 0], X_pca_reduced[:, 1], c=y, cmap='viridis') 205 | plt.xlabel('First Principal Component') 206 | plt.ylabel('Second Principal Component') 207 | plt.title('PCA Components 1 vs 2 (Colored by ADHD Label)') 208 | plt.colorbar(label='ADHD Label') 209 | plt.savefig('pca_test_components_plot.png') 210 | plt.show() 211 | 212 | 213 | 214 | def main(): 215 | # Specify your directories and parameters 216 | input_dir = r"dataset\train_16k" # Directory containing audio files 217 | features_file = "train_feature.csv" # Where to save the features 218 | 219 | try: 220 | # Extract eGeMAPs features and get labels 221 | print("Extracting eGeMAPs features...") 222 | features_df, labels = process_audio_directory(input_dir, features_file) 223 | PCA_analysis(features_df, labels) 224 | except Exception as e: 225 | print(f"An error occurred: {str(e)}") 226 | 227 | if __name__ == "__main__": 228 | main() -------------------------------------------------------------------------------- /egemaps.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import pandas as pd 4 | from sklearn.preprocessing import StandardScaler 5 | from sklearn.decomposition import PCA 6 | import opensmile 7 | import librosa 8 | import soundfile as sf 9 | 10 | def get_feature_names(): 11 | """ 12 | Get feature names directly from opensmile 13 | 14 | Returns: 15 | list: List of feature names 16 | """ 17 | # Initialize eGeMAPs feature extractor 18 | smile = opensmile.Smile( 19 | feature_set=opensmile.FeatureSet.eGeMAPSv02, 20 | feature_level=opensmile.FeatureLevel.Functionals, 21 | sampling_rate=16000 22 | ) 23 | 24 | # Get feature names from opensmile 25 | return smile.feature_names 26 | 27 | def resample_audio(audio_file, target_sr=16000): 28 | """ 29 | Resample audio file to target sampling rate 30 | 31 | Args: 32 | audio_file (str): Path to the audio file 33 | target_sr (int): Target sampling rate (default 16000 Hz) 34 | 35 | Returns: 36 | tuple: (resampled audio data, target sampling rate) 37 | """ 38 | # Load audio file 39 | y, sr = librosa.load(audio_file) 40 | 41 | # Resample if necessary 42 | if sr != target_sr: 43 | y = librosa.resample(y, orig_sr=sr, target_sr=target_sr) 44 | 45 | return y, target_sr 46 | 47 | def extract_egemaps(audio_file): 48 | """ 49 | Extract eGeMAPs features from an audio file 50 | 51 | Args: 52 | audio_file (str): Path to the audio file 53 | 54 | Returns: 55 | numpy.ndarray: Array of eGeMAPs features 56 | """ 57 | # Initialize eGeMAPs feature extractor 58 | smile = opensmile.Smile( 59 | feature_set=opensmile.FeatureSet.eGeMAPSv02, 60 | feature_level=opensmile.FeatureLevel.Functionals, 61 | sampling_rate=16000 62 | ) 63 | 64 | # Extract features 65 | features = smile.process_file(audio_file) 66 | return features.values[0] 67 | 68 | # def get_label(filename): 69 | # """ 70 | # Get label based on filename (1 for ADHD, 0 for non-ADHD) 71 | 72 | # Args: 73 | # filename (str): Name of the audio file 74 | 75 | # Returns: 76 | # int: Label (1 for ADHD, 0 for non-ADHD) 77 | # """ 78 | # return 1 if 'adhd' in filename.lower() else 0 79 | 80 | def process_audio_directory(input_dir, output_file): 81 | """ 82 | Process all audio files in a directory and extract eGeMAPs features 83 | 84 | Args: 85 | input_dir (str): Directory containing audio files 86 | output_file (str): Path to save the features CSV file 87 | 88 | Returns: 89 | tuple: (DataFrame with features, list of labels) 90 | """ 91 | # Get all audio files 92 | audio_files = [f for f in os.listdir(input_dir) if f.endswith(('.mp3', '.wav'))] 93 | 94 | # Initialize lists to store features, labels, and file names 95 | all_features = [] 96 | # all_labels = [] 97 | file_names = [] 98 | 99 | # Process each audio file 100 | for audio_file in audio_files: 101 | print(f"Processing: {audio_file}") 102 | file_path = os.path.join(input_dir, audio_file) 103 | 104 | try: 105 | # Extract eGeMAPs features 106 | features = extract_egemaps(file_path) 107 | # label = get_label(audio_file) 108 | 109 | all_features.append(features) 110 | # all_labels.append(label) 111 | file_names.append(audio_file) 112 | print(f"Successfully processed: {audio_file}") 113 | except Exception as e: 114 | print(f"Error processing {audio_file}: {str(e)}") 115 | 116 | if not all_features: 117 | raise ValueError("No features were successfully extracted from any files") 118 | 119 | # Get feature names from opensmile 120 | feature_names = get_feature_names() 121 | 122 | # Convert to DataFrame with named columns 123 | df = pd.DataFrame(all_features, index=file_names, columns=feature_names) 124 | 125 | # Add label column to the DataFrame 126 | # df['label'] = all_labels 127 | 128 | # Save features to CSV 129 | df.to_csv(output_file) 130 | print(f"\nFeatures saved to: {output_file}") 131 | print("\nDataset Statistics:") 132 | print(f"Total samples: {len(df)}") 133 | # print(f"ADHD samples: {sum(all_labels)}") 134 | # print(f"Non-ADHD samples: {len(all_labels) - sum(all_labels)}") 135 | 136 | return df 137 | 138 | def main(): 139 | # Specify your directories and parameters 140 | input_dir = r"dataset\predict_16k" # Directory containing audio files 141 | features_file = "predict_feature.csv" # Where to save the features 142 | 143 | try: 144 | # Extract eGeMAPs features and get labels 145 | print("Extracting eGeMAPs features...") 146 | features_df, labels = process_audio_directory(input_dir, features_file) 147 | 148 | except Exception as e: 149 | print(f"An error occurred: {str(e)}") 150 | 151 | if __name__ == "__main__": 152 | main() -------------------------------------------------------------------------------- /pca.py: -------------------------------------------------------------------------------- 1 | # Step 1: Import Required Libraries 2 | import numpy as np 3 | import pandas as pd 4 | from sklearn.decomposition import PCA 5 | from sklearn.preprocessing import StandardScaler 6 | import matplotlib.pyplot as plt 7 | 8 | # Step 2: Load and Prepare Your Data 9 | # Load the eGeMAPs features from CSV 10 | df = pd.read_csv('predict_feature.csv', index_col=0) 11 | 12 | # Separate features and labels 13 | X = df.drop('label', axis=1) # All columns except 'label' 14 | y = df['label'] # The label column 15 | 16 | # Standardize the data (mean=0, variance=1) 17 | scaler = StandardScaler() 18 | X_scaled = scaler.fit_transform(X) 19 | 20 | # Step 3: Apply PCA 21 | pca = PCA() # By default, it keeps all components 22 | X_pca = pca.fit_transform(X_scaled) 23 | 24 | # Get explained variance ratio 25 | explained_variance_ratio = pca.explained_variance_ratio_ 26 | cumulative_explained_variance = np.cumsum(explained_variance_ratio) 27 | 28 | # Step 4: Plot Explained Variance 29 | plt.figure(figsize=(10, 6)) 30 | 31 | # Individual explained variance 32 | plt.bar(range(1, len(explained_variance_ratio) + 1), explained_variance_ratio, alpha=0.5, align='center', label='Individual explained variance') 33 | 34 | # Cumulative explained variance 35 | plt.step(range(1, len(cumulative_explained_variance) + 1), cumulative_explained_variance, where='mid', label='Cumulative explained variance') 36 | 37 | # Highlight the point where cumulative variance reaches 95% 38 | target_variance = 0.95 39 | n_components_95 = np.argmax(cumulative_explained_variance >= target_variance) + 1 40 | plt.axvline(n_components_95, color='r', linestyle='--', label=f'{target_variance*100:.0f}% Variance (n_components={n_components_95})') 41 | 42 | plt.xlabel('Number of Principal Components') 43 | plt.ylabel('Explained Variance Ratio') 44 | plt.title('Explained Variance by Principal Components (eGeMAPs Features)') 45 | plt.legend(loc='best') 46 | plt.grid() 47 | plt.savefig('pca_test_variance_plot.png') 48 | plt.show() 49 | 50 | # Step 5: Print Component Information 51 | print("\nPCA Component Information:") 52 | for i, (ratio, cum_ratio) in enumerate(zip(explained_variance_ratio, cumulative_explained_variance)): 53 | print(f"PC{i+1}: {ratio*100:.2f}% (Cumulative: {cum_ratio*100:.2f}%)") 54 | 55 | # Step 6: Retain Selected Components 56 | # Re-fit PCA with the selected number of components 57 | pca_reduced = PCA(n_components=32) 58 | X_pca_reduced = pca_reduced.fit_transform(X_scaled) 59 | 60 | # Create DataFrame with reduced PCA components and labels 61 | pca_df = pd.DataFrame( 62 | X_pca_reduced, 63 | columns=[f'PC{i+1}' for i in range(32)], 64 | index=X.index 65 | ) 66 | pca_df['label'] = y # Add back the labels 67 | 68 | # Save the reduced PCA results 69 | pca_df.to_csv('pca_results_reduced.csv') 70 | print(f"\nReduced PCA results saved to: pca_results_reduced.csv") 71 | print(f"Number of components to capture {target_variance*100:.0f}% variance: {n_components_95}") 72 | print(f"Reduced data shape: {X_pca_reduced.shape}") 73 | 74 | # Step 7: Plot PCA Components 75 | plt.figure(figsize=(10, 6)) 76 | plt.scatter(X_pca_reduced[:, 0], X_pca_reduced[:, 1], c=y, cmap='viridis') 77 | plt.xlabel('First Principal Component') 78 | plt.ylabel('Second Principal Component') 79 | plt.title('PCA Components 1 vs 2 (Colored by ADHD Label)') 80 | plt.colorbar(label='ADHD Label') 81 | plt.savefig('pca_test_components_plot.png') 82 | plt.show() -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | import joblib 2 | from create_predict_data import process_audio_files 3 | def predict_adhd(features_df): 4 | """ 5 | Predict ADHD from features DataFrame 6 | 7 | Args: 8 | features_df (pd.DataFrame): DataFrame containing features 9 | 10 | Returns: 11 | dict: Dictionary containing prediction results 12 | { 13 | 'prediction': 'ADHD' or 'Non-ADHD', 14 | 'probability': float (0-1), 15 | 'percentage': float (0-100), 16 | 'message': str 17 | } 18 | """ 19 | try: 20 | # Load the trained model and scaler 21 | model = joblib.load('adhd_classifier.joblib') 22 | scaler = joblib.load('scaler.joblib') 23 | 24 | # Scale the features 25 | X_scaled = scaler.transform(features_df) 26 | 27 | # Make predictions 28 | # predictions = model.predict(X_scaled) 29 | probabilities = model.predict_proba(X_scaled) 30 | print(probabilities) 31 | # Calculate average probability for ADHD 32 | avg_probability = probabilities[:, 1].mean() 33 | 34 | # Determine final prediction 35 | final_prediction = 1 if avg_probability >= 0.5 else 0 36 | 37 | # Prepare result 38 | result = { 39 | 'success':True, 40 | 'prediction': f"prediction: {'ADHD' if final_prediction == 1 else 'Non-ADHD'}", 41 | 'probability': f"Probability of ADHD: {avg_probability:.2%}", 42 | 'percentage': float(avg_probability * 100), 43 | } 44 | 45 | return result 46 | 47 | except Exception as e: 48 | return { 49 | 'prediction': 'Error', 50 | 'probability': 0.0, 51 | 'percentage': 0.0, 52 | 'message': f'Error processing features: {str(e)}' 53 | } 54 | 55 | def main(): 56 | # Example usage 57 | # Process audio file 58 | features_df = process_audio_files( 59 | input_file="dataset/predict_non_ADHD.mp3" 60 | ) 61 | # Make prediction 62 | result = predict_adhd(features_df) 63 | 64 | print("\nPrediction Results:") 65 | print(f"Prediction: {result['prediction']}") 66 | print(f"Probability: {result['probability']:.4f}") 67 | print(f"Percentage: {result['percentage']:.2f}%") 68 | print(f"Message: {result['message']}") 69 | 70 | if __name__ == "__main__": 71 | main() -------------------------------------------------------------------------------- /railway.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "builder": "nixpacks", 4 | "config": { 5 | "phases": { 6 | "setup": { 7 | "nixPkgs": ["python3", "ffmpeg-headless", "gcc"] 8 | }, 9 | "install": { 10 | "cmds": [ 11 | "python -m venv --copies /opt/venv", 12 | ". /opt/venv/bin/activate", 13 | "pip install -r requirements.txt" 14 | ] 15 | }, 16 | "start": { 17 | "cmds": ["python app.py"] 18 | } 19 | } 20 | } 21 | } 22 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | flask==3.1.0 2 | gunicorn==21.2.0 3 | librosa==0.11.0 4 | soundfile==0.13.1 5 | opensmile==2.5.1 6 | pandas==2.2.3 7 | scikit-learn==1.6.1 8 | joblib==1.3.2 9 | tqdm==4.67.1 10 | matplotlib==3.10.1 11 | seaborn==0.13.2 12 | numpy==2.1.3 -------------------------------------------------------------------------------- /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.13.2 -------------------------------------------------------------------------------- /scaler.joblib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/scaler.joblib -------------------------------------------------------------------------------- /static/css/analysis-style.css: -------------------------------------------------------------------------------- 1 | .metrics-grid { 2 | display: grid; 3 | grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); 4 | gap: 1.5rem; 5 | margin-bottom: 2rem; 6 | } 7 | 8 | .metric-card { 9 | background: rgba(255, 255, 255, 0.1); 10 | border-radius: 15px; 11 | padding: 1.5rem; 12 | text-align: center; 13 | backdrop-filter: blur(10px); 14 | transition: transform 0.3s; 15 | } 16 | 17 | .metric-card:hover { 18 | transform: translateY(-5px); 19 | } 20 | 21 | .metric-card h3 { 22 | margin: 0 0 1rem 0; 23 | color: rgba(255, 255, 255, 0.8); 24 | } 25 | 26 | .metric-value { 27 | font-size: 2rem; 28 | font-weight: bold; 29 | margin-bottom: 1rem; 30 | color: var(--accent-font-color); 31 | } 32 | 33 | .metric-chart { 34 | width: 100%; 35 | height: 100px; 36 | } 37 | 38 | .back-btn { 39 | background: var(--accent-color); 40 | border: none; 41 | color: white; 42 | padding: 1rem 2rem; 43 | border-radius: 25px; 44 | cursor: pointer; 45 | font-size: 1.1rem; 46 | text-decoration: none; 47 | transition: transform 0.3s, box-shadow 0.3s; 48 | } 49 | 50 | .back-btn:hover { 51 | transform: translateY(-2px); 52 | box-shadow: 0 4px 15px rgba(74,144,226,0.4); 53 | } 54 | -------------------------------------------------------------------------------- /static/css/styles.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --main-color: #02002D; 3 | --accent-color: #4A90E2; 4 | --accent-font-color: #75a8e2; 5 | --font-color: #bbd7f8; 6 | --gradient-start: #02002D; 7 | --gradient-end: #1A1A4F; 8 | } 9 | 10 | body { 11 | margin: 0; 12 | padding: 0; 13 | min-height: 100vh; 14 | font-family: Arial, sans-serif; 15 | background: linear-gradient(135deg, var(--gradient-start), var(--gradient-end)); 16 | color: white; 17 | } 18 | 19 | body::before { 20 | content: ""; /* Required for pseudo-element */ 21 | position: absolute; /* Position absolute to cover the body */ 22 | top: 0; 23 | left: 0; 24 | right: 0; 25 | bottom: 0; 26 | background-image: url("../img/bg\ \(5\).jpg"); /* Path to the background image */ 27 | background-size: cover; /* Cover the entire viewport */ 28 | background-position: center; /* Center the image */ 29 | filter: blur(3px); /* Apply blur effect */ 30 | z-index: -1; /* Send the background behind other content */ 31 | } 32 | 33 | .container { 34 | display: flex; 35 | flex-direction: column; 36 | align-items: center; 37 | justify-content: center; 38 | min-height: 100vh; 39 | padding: 20px; 40 | text-align: center; 41 | } 42 | 43 | 44 | .chart-container { 45 | background-color: white; 46 | border-radius: 8px; 47 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 48 | padding: 20px; 49 | margin-top: 20px; 50 | } 51 | 52 | h1 { 53 | font-size: 2.5rem; 54 | margin-bottom: 3rem; 55 | color: var(--font-color); 56 | text-shadow: 0 2px 4px rgba(0,0,0,0.2); 57 | } 58 | 59 | .progress-container { 60 | display: flex; 61 | flex-direction: column; 62 | align-items: center; 63 | justify-content: center; 64 | margin: 40px 0; 65 | position: relative; 66 | } 67 | 68 | .progress-circles { 69 | position: relative; 70 | width: 200px; 71 | height: 200px; 72 | margin-bottom: 20px; 73 | } 74 | 75 | .progress-circle { 76 | position: relative; 77 | width: 100%; 78 | height: 100%; 79 | } 80 | 81 | .progress-circle svg { 82 | width: 200px; 83 | height: 200px; 84 | transform: rotate(-90deg); 85 | } 86 | 87 | .progress-circle circle { 88 | fill: none; 89 | stroke-width: 12; 90 | stroke-linecap: round; 91 | } 92 | 93 | .progress-circle .bg { 94 | stroke: rgba(255,255,255,0.1); 95 | } 96 | 97 | .progress-circle .progress { 98 | stroke: var(--accent-color); 99 | stroke-dasharray: 565; 100 | stroke-dashoffset: 565; 101 | stroke-linecap: round; 102 | transition: stroke-dashoffset 0.5s; 103 | } 104 | 105 | .percentage { 106 | position: absolute; 107 | top: 50%; 108 | left: 50%; 109 | transform: translate(-50%, -50%); 110 | font-size: 2.5rem; 111 | font-weight: bold; 112 | color: white; 113 | } 114 | 115 | .timer { 116 | font-size: 1.5rem; 117 | color: rgba(255,255,255,0.8); 118 | font-weight: bold; 119 | margin-top: 10px; 120 | } 121 | 122 | .record-btn { 123 | width: 200px; 124 | height: 200px; 125 | border-radius: 50%; 126 | /* background: var(--accent-color); */ 127 | background: radial-gradient( 128 | circle at center, 129 | /* Start with the light color #5791e5 */ 130 | #5791e5 0%, 131 | /* Transition to an intermediate color */ 132 | #5791e5 50%, 133 | /* End with the dark color #0c3d86 */ 134 | #0043ff 100% 135 | ); 136 | 137 | /* Add a subtle box shadow for depth */ 138 | box-shadow: 0 0 20px rgba(12, 61, 134, 0.3); 139 | border: none; 140 | color: white; 141 | font-size: 1.5rem; 142 | cursor: pointer; 143 | display: flex; 144 | flex-direction: column; 145 | align-items: center; 146 | justify-content: center; 147 | margin: 0 auto 2rem; 148 | transition: transform 0.2s, box-shadow 0.2s; 149 | } 150 | 151 | .results-portal { 152 | display: none; 153 | position: fixed; 154 | top: 50%; 155 | left: 50%; 156 | transform: translate(-50%, -50%); 157 | background: white; 158 | padding: 30px; 159 | border-radius: 10px; 160 | box-shadow: 0 0 20px rgba(0,0,0,0.2); 161 | z-index: 1000; 162 | max-width: 400px; 163 | width: 90%; 164 | color: black; 165 | } 166 | .results-portal h2 { 167 | margin-top: 0; 168 | color: #333; 169 | } 170 | 171 | .results-content { 172 | margin: 20px 0; 173 | } 174 | 175 | .result-item { 176 | margin: 10px 0; 177 | padding: 10px; 178 | background: #f5f5f5; 179 | border-radius: 5px; 180 | } 181 | 182 | .close-results { 183 | background: #4CAF50; 184 | color: white; 185 | border: none; 186 | padding: 10px 20px; 187 | border-radius: 5px; 188 | cursor: pointer; 189 | margin-top: 20px; 190 | } 191 | 192 | .close-results:hover { 193 | background: #45a049; 194 | } 195 | .overlay { 196 | display: none; 197 | position: fixed; 198 | top: 0; 199 | left: 0; 200 | right: 0; 201 | bottom: 0; 202 | background: rgba(0,0,0,0.5); 203 | z-index: 999; 204 | } 205 | 206 | .file-upload { 207 | display: flex; 208 | flex-direction: column; 209 | align-items: center; 210 | justify-content: center; 211 | } 212 | 213 | /* Hide the default file input */ 214 | #file_input { 215 | display: none; /* Hide the input */ 216 | } 217 | 218 | /* Style for the file name display */ 219 | .file-name { 220 | font-size: 16px; 221 | color: var(--font-color); 222 | } 223 | 224 | .upload-btn { 225 | background: rgba(255,255,255,0.1); 226 | border: 1px solid rgba(255,255,255,0.2); 227 | color: white; 228 | padding: 0.8rem 1.5rem; 229 | border-radius: 25px; 230 | cursor: pointer; 231 | margin-bottom: 1rem; 232 | transition: background 0.3s; 233 | } 234 | 235 | .upload-btn:hover { 236 | background: rgba(255,255,255,0.2); 237 | } 238 | 239 | .analysis-btn { 240 | background: var(--accent-color); 241 | border: none; 242 | color: white; 243 | padding: 1.2rem 3rem; 244 | margin-top: 1rem; 245 | font-size: 1.2rem; 246 | border-radius: 30px; 247 | cursor: pointer; 248 | transition: transform 0.3s, box-shadow 0.3s; 249 | max-width: 400px; 250 | } 251 | 252 | .analysis-btn:hover { 253 | transform: translateY(-2px); 254 | box-shadow: 0 4px 15px rgba(74,144,226,0.4); 255 | } 256 | 257 | .analysis-btn:disabled { 258 | opacity: 0.5; 259 | cursor: not-allowed; 260 | } 261 | 262 | .analysis-btn.loading { 263 | position: relative; 264 | color: transparent; 265 | } 266 | 267 | .analysis-btn.loading::after { 268 | content: ""; 269 | position: absolute; 270 | width: 20px; 271 | height: 20px; 272 | top: 50%; 273 | left: 50%; 274 | transform: translate(-50%, -50%); 275 | border: 2px solid #ffffff; 276 | border-top: 2px solid transparent; 277 | border-radius: 50%; 278 | animation: spin 1s linear infinite; 279 | } 280 | 281 | 282 | .logo { 283 | width: 180px; 284 | height: 100px; 285 | position: absolute; 286 | top: 20px; 287 | left: 20px; 288 | } 289 | /* .logo img { 290 | width: 100%; 291 | height: 100%; 292 | object-fit: contain; 293 | } */ 294 | @keyframes spin { 295 | 0% { transform: translate(-50%, -50%) rotate(0deg); } 296 | 100% { transform: translate(-50%, -50%) rotate(360deg); } 297 | } 298 | 299 | .button-group { 300 | display: flex; 301 | gap: 10px; 302 | justify-content: center; 303 | margin-top: 20px; 304 | } 305 | 306 | .btn { 307 | padding: 10px 20px; 308 | border: none; 309 | border-radius: 5px; 310 | cursor: pointer; 311 | font-size: 16px; 312 | transition: all 0.3s ease; 313 | display: flex; 314 | align-items: center; 315 | gap: 8px; 316 | } 317 | 318 | .btn i { 319 | font-size: 18px; 320 | } 321 | 322 | .btn-primary { 323 | background-color: #4CAF50; 324 | color: white; 325 | } 326 | 327 | .btn-success { 328 | background-color: #2196F3; 329 | color: white; 330 | } 331 | 332 | .btn-info { 333 | background-color: #FF9800; 334 | color: white; 335 | } 336 | 337 | .btn:disabled { 338 | background-color:rgb(99, 96, 96); 339 | cursor: not-allowed; 340 | } 341 | 342 | .btn:hover:not(:disabled) { 343 | transform: translateY(-2px); 344 | box-shadow: 0 2px 5px rgba(0,0,0,0.2); 345 | } -------------------------------------------------------------------------------- /static/img/bg (1).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (1).jpg -------------------------------------------------------------------------------- /static/img/bg (2).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (2).jpg -------------------------------------------------------------------------------- /static/img/bg (3).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (3).jpg -------------------------------------------------------------------------------- /static/img/bg (4).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (4).jpg -------------------------------------------------------------------------------- /static/img/bg (5).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (5).jpg -------------------------------------------------------------------------------- /static/img/bg (6).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (6).jpg -------------------------------------------------------------------------------- /static/img/bg (7).jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg (7).jpg -------------------------------------------------------------------------------- /static/img/bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg.jpg -------------------------------------------------------------------------------- /static/img/bg2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/bg2.jpg -------------------------------------------------------------------------------- /static/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Phoenix-95107/ADHD_recognition/7aed11606c6bb86a5d709839dfc29ac0c0404489/static/img/logo.png -------------------------------------------------------------------------------- /static/js/analysis.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', () => { 2 | // Simple line chart drawing function 3 | function drawChart(canvas, data) { 4 | const ctx = canvas.getContext('2d'); 5 | const width = canvas.width; 6 | const height = canvas.height; 7 | 8 | // Clear canvas 9 | ctx.clearRect(0, 0, width, height); 10 | 11 | // Draw line 12 | ctx.beginPath(); 13 | ctx.strokeStyle = '#4A90E2'; 14 | ctx.lineWidth = 2; 15 | 16 | data.forEach((value, index) => { 17 | const x = (index / (data.length - 1)) * width; 18 | const y = height - (value / 100 * height); 19 | 20 | if (index === 0) { 21 | ctx.moveTo(x, y); 22 | } else { 23 | ctx.lineTo(x, y); 24 | } 25 | }); 26 | 27 | ctx.stroke(); 28 | } 29 | 30 | // Initialize charts 31 | const charts = document.querySelectorAll('.metric-chart'); 32 | charts.forEach(canvas => { 33 | // Set canvas size 34 | canvas.width = canvas.offsetWidth; 35 | canvas.height = canvas.offsetHeight; 36 | 37 | // Generate random data for demonstration 38 | const data = Array.from({length: 20}, () => Math.random() * 100); 39 | drawChart(canvas, data); 40 | }); 41 | 42 | // Handle window resize 43 | window.addEventListener('resize', () => { 44 | charts.forEach(canvas => { 45 | canvas.width = canvas.offsetWidth; 46 | canvas.height = canvas.offsetHeight; 47 | const data = Array.from({length: 20}, () => Math.random() * 100); 48 | drawChart(canvas, data); 49 | }); 50 | }); 51 | }); -------------------------------------------------------------------------------- /static/js/chart.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', function() { 2 | // Sample data (first few and last few elements from the provided array) 3 | 4 | 5 | // Generate labels (indices) for the x-axis 6 | 7 | const harmonics_value = document.getElementById("harmonics_chart_value").value(); 8 | 9 | const labels = Array.from({ length: harmonics_value.length }, (_, i) => i + 1); 10 | 11 | const ctx = document.getElementById('harmonics_chart').getContext('2d'); 12 | new Chart(ctx, { 13 | type: 'line', 14 | data: { 15 | labels: labels, 16 | datasets: [{ 17 | label: 'Array Values', 18 | data: harmonics_value, 19 | borderColor: '#5791e5', 20 | backgroundColor: 'rgba(87, 145, 229, 0.1)', 21 | borderWidth: 2, 22 | pointRadius: 0, 23 | fill: true, 24 | }] 25 | }, 26 | options: { 27 | responsive: true, 28 | maintainAspectRatio: false, 29 | scales: { 30 | x: { 31 | title: { 32 | display: true, 33 | text: 'Index' 34 | } 35 | }, 36 | y: { 37 | title: { 38 | display: true, 39 | text: 'Value' 40 | }, 41 | ticks: { 42 | callback: function(value, index, values) { 43 | return value.toExponential(2); 44 | } 45 | } 46 | } 47 | }, 48 | plugins: { 49 | legend: { 50 | display: false 51 | }, 52 | tooltip: { 53 | callbacks: { 54 | label: function(context) { 55 | let label = context.dataset.label || ''; 56 | if (label) { 57 | label += ': '; 58 | } 59 | if (context.parsed.y !== null) { 60 | label += context.parsed.y.toExponential(6); 61 | } 62 | return label; 63 | } 64 | } 65 | } 66 | } 67 | } 68 | }); 69 | }); -------------------------------------------------------------------------------- /static/js/recorder.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', () => { 2 | const recordButton = document.getElementById('recordButton'); 3 | const uploadButton = document.getElementById('uploadButton'); 4 | const fileInput = document.getElementById('file_input'); 5 | const analysisButton = document.getElementById('analysisButton'); 6 | const progressCircle = document.querySelector('.progress'); 7 | const percentageText = document.querySelector('.percentage'); 8 | const timerText = document.querySelector('.timer'); 9 | 10 | let mediaRecorder; 11 | let audioChunks = []; 12 | let startTime; 13 | let timerInterval; 14 | const MAX_DURATION = 60000; // 2 minutes in milliseconds 15 | 16 | // Request microphone access 17 | async function setupRecorder() { 18 | try { 19 | const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); 20 | mediaRecorder = new MediaRecorder(stream); 21 | 22 | mediaRecorder.ondataavailable = (event) => { 23 | audioChunks.push(event.data); 24 | }; 25 | 26 | mediaRecorder.onstop = () => { 27 | const audioBlob = new Blob(audioChunks, { type: 'audio/wav' }); 28 | analysisButton.disabled = false; 29 | audioChunks = []; 30 | // Confirm before uploading 31 | const confirmUpload = confirm('Do you want to upload the recorded audio?'); 32 | if (confirmUpload) { 33 | uploadAudio(audioBlob); // Call the upload function 34 | } else { 35 | console.log('Upload canceled by user.'); 36 | } 37 | }; 38 | } catch (err) { 39 | console.error('Error accessing microphone:', err); 40 | alert('Unable to access microphone. Please ensure you have granted permission.'); 41 | } 42 | } 43 | 44 | 45 | // Function to upload audio to the backend 46 | async function uploadAudio(audioBlob) { 47 | const formData = new FormData(); 48 | formData.append('audio', audioBlob, 'recording.wav'); // Append the file with a name 49 | 50 | try { 51 | const response = await fetch('/upload', { 52 | method: 'POST', 53 | body: formData, 54 | }); 55 | 56 | if (response.ok) { 57 | const result = await response.json(); // Parse JSON response 58 | console.log('Audio uploaded successfully:', result); 59 | alert('Audio uploaded successfully!'); 60 | } else { 61 | console.error('Failed to upload audio:', response.statusText); 62 | alert('Failed to upload audio. Please try again.'); 63 | } 64 | } catch (error) { 65 | console.error('Error uploading audio:', error); 66 | alert('Error uploading audio. Please check your network connection.'); 67 | } 68 | } 69 | 70 | // Update progress and timer 71 | function updateProgress(elapsed) { 72 | const progress = Math.min((elapsed / MAX_DURATION) * 100, 100); 73 | const dashOffset = 283 - (283 * progress / 100); 74 | progressCircle.style.strokeDashoffset = dashOffset; 75 | percentageText.textContent = `${Math.round(progress)}%`; 76 | 77 | const seconds = Math.floor(elapsed / 1000); 78 | const minutes = Math.floor(seconds / 60); 79 | const remainingSeconds = seconds % 60; 80 | timerText.textContent = 81 | `${minutes.toString().padStart(2, '0')}:${remainingSeconds.toString().padStart(2, '0')}`; 82 | } 83 | 84 | // Record button events 85 | recordButton.addEventListener('mousedown', () => { 86 | setupRecorder().then(() => { 87 | audioChunks = []; 88 | mediaRecorder.start(); 89 | startTime = Date.now(); 90 | 91 | timerInterval = setInterval(() => { 92 | const elapsed = Date.now() - startTime; 93 | updateProgress(elapsed); 94 | 95 | if (elapsed >= MAX_DURATION) { 96 | mediaRecorder.stop(); 97 | clearInterval(timerInterval); 98 | } 99 | }, 100); 100 | }); 101 | }); 102 | 103 | recordButton.addEventListener('mouseup', () => { 104 | if (mediaRecorder && mediaRecorder.state === 'recording') { 105 | mediaRecorder.stop(); 106 | clearInterval(timerInterval); 107 | } 108 | }); 109 | 110 | // File upload handling 111 | uploadButton.addEventListener('click', () => { 112 | fileInput.click(); 113 | }); 114 | 115 | // fileInput.addEventListener('change', (e) => { 116 | // if (e.target.files.length > 0) { 117 | // analysisButton.disabled = false; 118 | // } 119 | // }); 120 | 121 | // // Analysis button 122 | // analysisButton.addEventListener('click', () => { 123 | // window.location.href = 'analysis.html'; 124 | // }); 125 | 126 | // JavaScript to handle file selection 127 | fileInput.addEventListener('change', function(e) { 128 | if (e.target.files.length > 0) { 129 | analysisButton.disabled = false; 130 | const fileName = this.files[0] ? this.files[0].name : 'No file chosen'; 131 | document.getElementById('fileName').textContent = fileName; 132 | } 133 | }); 134 | 135 | // Handle form submission 136 | document.getElementById('uploadForm').addEventListener('submit', function(event) { 137 | const fileInput = document.getElementById('file_input'); 138 | if (!fileInput.files.length) { 139 | alert('Please select a file before submitting.'); 140 | event.preventDefault(); // Prevent form submission 141 | } 142 | 143 | analysisButton.classList.add('loading'); 144 | analysisButton.disabled = true; 145 | 146 | // Simulate analysis process (replace with actual analysis logic) 147 | setTimeout(() => { 148 | analysisButton.classList.remove('loading'); 149 | analysisButton.disabled = false; 150 | // window.location.href = 'analysis.html'; 151 | }, 20000); // Simulating 3 seconds of analysis 152 | }); 153 | 154 | }); 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /static/js/upload.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function () { 2 | const fileInput = document.getElementById("file_input"); 3 | const uploadButton = document.getElementById("uploadButton"); 4 | const analysisButton = document.getElementById("analysisButton"); 5 | const resultButton = document.getElementById("resultButton"); 6 | const fileName = document.getElementById("fileName"); 7 | const uploadForm = document.getElementById("uploadForm"); 8 | const progressCircle = document.querySelector(".progress"); 9 | const percentageText = document.querySelector(".percentage"); 10 | const timerElement = document.querySelector(".timer"); 11 | const resultsPortal = document.getElementById("resultsPortal"); 12 | const resultsContent = document.getElementById("resultsContent"); 13 | const overlay = document.getElementById("overlay"); 14 | 15 | let uploadStartTime; 16 | let processingStartTime; 17 | let timerInterval; 18 | let analysisResult = null; 19 | let progressInterval; 20 | let resultsTimeout; 21 | let currentFileName = ""; // Store current file name 22 | 23 | // Reset all UI elements 24 | function resetUI() { 25 | // Reset progress circle 26 | updateProgress(0); 27 | 28 | // Reset timer 29 | stopTimer(); 30 | timerElement.textContent = "00:00"; 31 | 32 | // Reset buttons 33 | analysisButton.disabled = true; 34 | resultButton.style.display = "none"; 35 | 36 | // Reset file input and name 37 | fileInput.value = ""; 38 | fileName.textContent = "No file chosen"; 39 | currentFileName = ""; 40 | 41 | // Clear any existing results 42 | resultsPortal.style.display = "none"; 43 | overlay.style.display = "none"; 44 | 45 | // Clear any existing timeouts and intervals 46 | clearTimeout(resultsTimeout); 47 | clearInterval(progressInterval); 48 | clearInterval(timerInterval); 49 | 50 | // Reset result data 51 | analysisResult = null; 52 | } 53 | 54 | // Update progress circle 55 | function updateProgress(percentage) { 56 | const radius = progressCircle.r.baseVal.value; 57 | const circumference = radius * 2 * Math.PI; 58 | const offset = circumference - (percentage / 100) * circumference; 59 | 60 | progressCircle.style.strokeDasharray = `${circumference} ${circumference}`; 61 | progressCircle.style.strokeDashoffset = offset; 62 | percentageText.textContent = `${Math.round(percentage)}%`; 63 | 64 | // Show results portal when progress reaches 100% 65 | if (percentage >= 100 && analysisResult) { 66 | clearTimeout(resultsTimeout); 67 | resultsTimeout = setTimeout(() => { 68 | showResults(analysisResult); 69 | }, 1000); 70 | stopTimer(); // 1 second delay 71 | } 72 | } 73 | 74 | // Smooth progress animation 75 | function startProgressAnimation(estimateTime) { 76 | console.log(estimateTime); 77 | let currentProgress = 0; 78 | const targetProgress = 94; // We'll go up to 90% during processing 79 | const totalSteps = estimateTime * 100; // Convert seconds to steps 80 | const stepSize = targetProgress / totalSteps; 81 | const stepInterval = 10; // Update every 10ms 82 | 83 | clearInterval(progressInterval); 84 | progressInterval = setInterval(() => { 85 | currentProgress += stepSize; 86 | if (currentProgress >= targetProgress) { 87 | currentProgress = targetProgress; 88 | clearInterval(progressInterval); 89 | } 90 | updateProgress(currentProgress); 91 | }, stepInterval); 92 | } 93 | 94 | // Update timer 95 | function updateTimer() { 96 | const currentTime = new Date().getTime(); 97 | const elapsedTime = currentTime - uploadStartTime; 98 | const seconds = Math.floor(elapsedTime / 1000); 99 | const minutes = Math.floor(seconds / 60); 100 | const remainingSeconds = seconds % 60; 101 | timerElement.textContent = `${minutes 102 | .toString() 103 | .padStart(2, "0")}:${remainingSeconds.toString().padStart(2, "0")}`; 104 | } 105 | 106 | // Start timer 107 | function startTimer() { 108 | uploadStartTime = new Date().getTime(); 109 | timerInterval = setInterval(updateTimer, 1000); 110 | } 111 | 112 | // Stop timer 113 | function stopTimer() { 114 | clearInterval(timerInterval); 115 | } 116 | 117 | // Show results in portal 118 | function showResults(result) { 119 | let content = ""; 120 | if (result.success) { 121 | content = ` 122 |