├── .dockerignore
├── Models
├── TensorBoard
│ ├── .dockerignore
│ ├── app
│ │ ├── services
│ │ │ ├── __init__.py
│ │ │ ├── generate_predictions.py
│ │ │ ├── generate_logs.py
│ │ │ └── generate_images.py
│ │ ├── utils
│ │ │ ├── __init__.py
│ │ │ └── image_utils.py
│ │ ├── __init__.py
│ │ └── routes
│ │ │ ├── __init__.py
│ │ │ └── logs.py
│ ├── models
│ │ └── MobileNet
│ │ │ ├── __init__.py
│ │ │ ├── model.py
│ │ │ └── conv_layers
│ │ │ ├── convscript.py
│ │ │ ├── conv_layer_names.txt
│ │ │ └── all_layers.txt
│ ├── requirements.txt
│ ├── server.py
│ ├── start
│ │ ├── get_weights.py
│ │ └── start.sh
│ └── Dockerfile
└── TensorFlow
│ ├── app
│ ├── services
│ │ ├── __init__.py
│ │ ├── generate_predictions.py
│ │ └── generate_images.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── image_utils.py
│ │ └── s3_utils.py
│ ├── __init__.py
│ └── routes
│ │ ├── __init__.py
│ │ ├── predictions.py
│ │ ├── featuremaps.py
│ │ ├── heatmaps.py
│ │ └── preprocess.py
│ ├── models
│ └── MobileNet
│ │ ├── __init__.py
│ │ ├── model.py
│ │ └── conv_layers
│ │ ├── convscript.py
│ │ ├── conv_layer_names.txt
│ │ └── all_layers.txt
│ ├── server.py
│ ├── start
│ ├── get_weights.py
│ └── start.sh
│ ├── Dockerfile
│ └── requirements.txt
├── README.md
├── public
├── logo.jpg
├── logo.png
├── title.png
├── background.jpg
├── loadspinner.gif
├── logo copy.jpeg
├── logoBlack.png
├── backgroundFlare.jpg
├── backgroundFlareBW.avif
├── modal-background-1.jpg
├── modal-background-2.jpg
├── modal-background-3.jpg
├── modal-background-4.jpg
├── xflair_logo_clear.png
├── input_images_comparison.png
├── vercel.svg
└── next.svg
├── src
└── app
│ ├── favicon.ico
│ ├── globals.css
│ ├── ui
│ ├── resizedImage.tsx
│ ├── gridMapGif.tsx
│ ├── navComponents
│ │ ├── AccordionLink.tsx
│ │ ├── Icon.tsx
│ │ ├── Accordion.tsx
│ │ └── Sidenav.tsx
│ ├── heatMapGif.tsx
│ ├── modal.tsx
│ ├── top5Classes.tsx
│ ├── dotButton.tsx
│ ├── heatmap.tsx
│ ├── carousel.tsx
│ └── newModal.tsx
│ ├── lib
│ ├── definitions.ts
│ └── accordionData.ts
│ ├── (routes)
│ ├── models
│ │ ├── actions
│ │ │ ├── logs
│ │ │ │ └── route.ts
│ │ │ └── image
│ │ │ │ ├── heatmaps
│ │ │ │ └── route.ts
│ │ │ │ ├── featmaps
│ │ │ │ └── route.ts
│ │ │ │ ├── predict
│ │ │ │ └── route.ts
│ │ │ │ ├── upload
│ │ │ │ └── route.ts
│ │ │ │ ├── preprocess
│ │ │ │ └── route.ts
│ │ │ │ └── gifs
│ │ │ │ └── route.ts
│ │ ├── agnostic
│ │ │ ├── text
│ │ │ │ ├── gpt
│ │ │ │ │ └── page.tsx
│ │ │ │ └── texttospeech
│ │ │ │ │ └── page.tsx
│ │ │ └── images
│ │ │ │ ├── detection
│ │ │ │ └── page.tsx
│ │ │ │ ├── generation
│ │ │ │ └── page.tsx
│ │ │ │ └── classification
│ │ │ │ └── page.tsx
│ │ ├── pytorch
│ │ │ ├── text
│ │ │ │ ├── gpt
│ │ │ │ │ └── page.tsx
│ │ │ │ └── texttospeech
│ │ │ │ │ └── page.tsx
│ │ │ └── images
│ │ │ │ ├── detection
│ │ │ │ └── page.tsx
│ │ │ │ ├── classification
│ │ │ │ └── page.tsx
│ │ │ │ └── generation
│ │ │ │ └── page.tsx
│ │ └── tensorflow
│ │ │ ├── text
│ │ │ ├── gpt
│ │ │ │ └── page.tsx
│ │ │ └── texttospeech
│ │ │ │ └── page.tsx
│ │ │ └── images
│ │ │ ├── detection
│ │ │ └── page.tsx
│ │ │ ├── generation
│ │ │ └── page.tsx
│ │ │ └── classification
│ │ │ └── page.tsx
│ ├── about-us
│ │ └── page.tsx
│ ├── libraries
│ │ ├── python
│ │ │ └── page.tsx
│ │ └── react
│ │ │ ├── javascript
│ │ │ └── page.tsx
│ │ │ └── typescript
│ │ │ └── page.tsx
│ └── documentation
│ │ ├── python
│ │ └── page.tsx
│ │ └── javascript-typescript
│ │ └── page.tsx
│ ├── Sidebar.module.css
│ ├── page.tsx
│ ├── layout.tsx
│ └── page.module.css
├── next.config.mjs
├── postcss.config.js
├── .eslintrc.json
├── Dockerfile-dev
├── Dockerfile
├── docker-compose-tensorboard.yml
├── docker-compose-prod.yml
├── docker-compose-dev.yml
├── tsconfig.json
├── tailwind.config.js
├── scripts
├── newHeatmap.ts
└── newRenderTest.ts
├── .gitignore
└── package.json
/.dockerignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | Models
--------------------------------------------------------------------------------
/Models/TensorBoard/.dockerignore:
--------------------------------------------------------------------------------
1 | logs/*
--------------------------------------------------------------------------------
/Models/TensorBoard/app/services/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/services/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Models/TensorBoard/models/MobileNet/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Models/TensorFlow/models/MobileNet/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | xFlair is still under development. More info coming soon!
--------------------------------------------------------------------------------
/public/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/logo.jpg
--------------------------------------------------------------------------------
/public/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/logo.png
--------------------------------------------------------------------------------
/public/title.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/title.png
--------------------------------------------------------------------------------
/src/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/src/app/favicon.ico
--------------------------------------------------------------------------------
/src/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
--------------------------------------------------------------------------------
/public/background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/background.jpg
--------------------------------------------------------------------------------
/public/loadspinner.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/loadspinner.gif
--------------------------------------------------------------------------------
/public/logo copy.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/logo copy.jpeg
--------------------------------------------------------------------------------
/public/logoBlack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/logoBlack.png
--------------------------------------------------------------------------------
/public/backgroundFlare.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/backgroundFlare.jpg
--------------------------------------------------------------------------------
/public/backgroundFlareBW.avif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/backgroundFlareBW.avif
--------------------------------------------------------------------------------
/public/modal-background-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/modal-background-1.jpg
--------------------------------------------------------------------------------
/public/modal-background-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/modal-background-2.jpg
--------------------------------------------------------------------------------
/public/modal-background-3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/modal-background-3.jpg
--------------------------------------------------------------------------------
/public/modal-background-4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/modal-background-4.jpg
--------------------------------------------------------------------------------
/public/xflair_logo_clear.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/xflair_logo_clear.png
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {};
3 |
4 | export default nextConfig;
5 |
--------------------------------------------------------------------------------
/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/public/input_images_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oslabs-beta/xflair/HEAD/public/input_images_comparison.png
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "next/core-web-vitals",
3 | "rules": {
4 | "@next/next/no-img-element": "off"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/Models/TensorBoard/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | Flask-Cors
3 | tensorflow
4 | numpy
5 | Pillow
6 | opencv-python
7 | boto3
8 | gunicorn
9 | matplotlib
--------------------------------------------------------------------------------
/Models/TensorBoard/server.py:
--------------------------------------------------------------------------------
1 | from app import create_app
2 |
3 | app = create_app()
4 |
5 | if __name__ == '__main__':
6 | app.run(debug=True)
--------------------------------------------------------------------------------
/Models/TensorFlow/server.py:
--------------------------------------------------------------------------------
1 | from app import create_app
2 |
3 | app = create_app()
4 |
5 | if __name__ == '__main__':
6 | app.run(debug=True)
--------------------------------------------------------------------------------
/Models/TensorBoard/start/get_weights.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | def get_model():
4 | return tf.keras.applications.MobileNetV2(weights='imagenet')
5 |
6 | model = get_model()
--------------------------------------------------------------------------------
/Models/TensorFlow/start/get_weights.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | def get_model():
4 | return tf.keras.applications.MobileNetV2(weights='imagenet')
5 |
6 | model = get_model()
--------------------------------------------------------------------------------
/Dockerfile-dev:
--------------------------------------------------------------------------------
1 | FROM node:latest
2 |
3 | WORKDIR /usr/src/xflair
4 |
5 | COPY . /usr/src/xflair
6 |
7 | RUN npm install
8 |
9 | EXPOSE 3000
10 |
11 | CMD ["npm", "run", "dev"]
12 |
13 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:latest
2 |
3 | WORKDIR /usr/src/xflair
4 |
5 | COPY . /usr/src/xflair
6 |
7 | RUN npm install
8 |
9 | RUN npm run build
10 |
11 | EXPOSE 3000
12 |
13 | CMD ["npm", "start"]
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | # Import any modules or subpackages you want to make available
3 | # when importing the `utils` package
4 |
5 | # For example:
6 | # from .module1 import *
7 | # from .module2 import *
8 | # from .subpackage import *
9 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | # Import any modules or subpackages you want to make available
3 | # when importing the `utils` package
4 |
5 | # For example:
6 | # from .module1 import *
7 | # from .module2 import *
8 | # from .subpackage import *
9 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from app.routes import routes_bp
3 | from flask_cors import CORS
4 |
5 | def create_app():
6 | app = Flask(__name__)
7 | CORS(app)
8 | app.register_blueprint(routes_bp)
9 | return app
10 |
11 | # Path: app/routes.py
12 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from app.routes import routes_bp
3 | from flask_cors import CORS
4 |
5 | def create_app():
6 | app = Flask(__name__)
7 | CORS(app)
8 | app.register_blueprint(routes_bp)
9 | return app
10 |
11 | # Path: app/routes.py
12 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/routes/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from flask import Blueprint
3 |
4 | # Create a blueprint for the routes
5 | routes_bp = Blueprint('routes', __name__)
6 |
7 |
8 | # Import the routes
9 | from app.routes import logs
10 |
11 | # Register the routes
12 |
13 | routes_bp.register_blueprint(logs.logs)
14 |
15 |
16 |
--------------------------------------------------------------------------------
/Models/TensorFlow/start/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Navigate to the directory containing the model container files
4 | cd /TensorFlow
5 |
6 | # Download the weights by running the get_weights.py script
7 | echo "Downloading MobileNetV2 weights..."
8 | python start/get_weights.py
9 |
10 | # Start Gunicorn with your Flask application
11 | echo "Starting Gunicorn..."
12 | exec gunicorn --workers=9 --timeout 120 --bind 0.0.0.0:5000 server:app
--------------------------------------------------------------------------------
/Models/TensorFlow/app/routes/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from flask import Blueprint
3 |
4 | # Create a blueprint for the routes
5 | routes_bp = Blueprint('routes', __name__)
6 |
7 |
8 | # Import the routes
9 | from app.routes import featuremaps, heatmaps, preprocess, predictions
10 |
11 | # Register the routes
12 |
13 | routes_bp.register_blueprint(featuremaps.featuremaps)
14 | routes_bp.register_blueprint(heatmaps.heatmaps)
15 | routes_bp.register_blueprint(preprocess.preprocess)
16 | routes_bp.register_blueprint(predictions.predictions)
17 |
18 |
19 |
--------------------------------------------------------------------------------
/Models/TensorBoard/start/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Navigate to the directory containing the model container files
4 | cd /TensorBoard
5 |
6 | # Download the weights by running the get_weights.py script
7 | echo "Downloading MobileNetV2 weights..."
8 | python start/get_weights.py
9 |
10 | # Start TensorBoard in the background
11 | echo "Starting TensorBoard..."
12 | tensorboard --logdir=./logs --bind_all --port=6006 &
13 |
14 | # Start Gunicorn with your Flask application
15 | echo "Starting Gunicorn..."
16 | exec gunicorn --workers=5 --timeout 120 --bind 0.0.0.0:5000 server:app
--------------------------------------------------------------------------------
/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/app/ui/resizedImage.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useRef, useState } from 'react';
2 | import Image from 'next/image';
3 | import styles from '../page.module.css';
4 |
5 | interface Props {
6 | preprocessedFilePath: string;
7 | }
8 |
9 | let resizedImage: File | undefined;
10 |
11 | export default function ResizedImage(props: Props) {
12 | return (
13 |
14 |
21 |
22 | );
23 | }
24 |
--------------------------------------------------------------------------------
/docker-compose-tensorboard.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | app:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile-dev
8 | image: xflair/dev:latest
9 | environment:
10 | - mode=logs
11 | ports:
12 | - '3000:3000'
13 | depends_on:
14 | - model
15 |
16 | model:
17 | build:
18 | context: ./Models/TensorBoard
19 | dockerfile: Dockerfile
20 | image: xflair/tensorboard:latest
21 | ports:
22 | - '5000:5000'
23 | - '6006:6006'
24 | volumes:
25 | - ./Models/TensorBoard/logs:./TensorBoard/logs
26 | entrypoint: /TensorBoard/start/start.sh
27 |
--------------------------------------------------------------------------------
/docker-compose-prod.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | app:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile
8 | image: xflair/prod
9 | ports:
10 | - "3000:3000"
11 | environment:
12 | - NODE_ENV=production
13 | - SERVICE_HOST=host.docker.internal
14 | env_file:
15 | - .env
16 | depends_on:
17 | - model
18 |
19 | model:
20 | build:
21 | context: ./Models/TensorFlow
22 | dockerfile: Dockerfile
23 | image: xflair/tensorflow:latest
24 | ports:
25 | - "5000:5000"
26 | env_file:
27 | - ./Models/TensorFlow/.env
28 | entrypoint: /TensorFlow/start/start.sh
--------------------------------------------------------------------------------
/docker-compose-dev.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | app:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile-dev
8 | image: xflair/dev:latest
9 | ports:
10 | - '3000:3000'
11 | environment:
12 | - mode=development
13 | - SERVICE_HOST=host.docker.internal
14 | env_file:
15 | - .env
16 | depends_on:
17 | - model
18 |
19 | model:
20 | build:
21 | context: ./Models/Tensorflow
22 | dockerfile: Dockerfile
23 | image: xflair/tensorflow:latest
24 | ports:
25 | - '5000:5000'
26 | env_file:
27 | - ./Models/TensorFlow/.env
28 | entrypoint: /TensorFlow/start/start.sh
29 |
--------------------------------------------------------------------------------
/src/app/lib/definitions.ts:
--------------------------------------------------------------------------------
1 | export type Upload = {
2 | id: string;
3 | name: string;
4 | size: number;
5 | image: Buffer;
6 | };
7 |
8 | export type Heatmaps = {
9 | heatmaps: string[] | null
10 | progressbars: string[] | null
11 | }
12 |
13 | export type Featuremaps = {
14 | featuremaps: string[] | null
15 | progressbars: string[] | null
16 | }
17 |
18 | export interface Top5Obj {
19 | [key: string]: number;
20 | }
21 |
22 | export interface AccordionContent {
23 | type: 'Link' | 'Accordion';
24 | label: string;
25 | path?: string;
26 | id?: string[];
27 | icon?: 'gear' | 'book' | 'pages';
28 | contents?: AccordionContent[];
29 | }
30 |
--------------------------------------------------------------------------------
/Models/TensorBoard/models/MobileNet/model.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import os
3 |
4 | dir = os.path.dirname(__file__)
5 |
6 | def get_model():
7 | return tf.keras.applications.MobileNetV2(weights='imagenet')
8 |
9 | def get_layer_names():
10 | layer_names_file = os.path.join(dir, 'conv_layers', 'conv_layer_names.txt')
11 | with open(layer_names_file, "r") as f:
12 | layer_names = f.read().splitlines()
13 | return layer_names
14 |
15 | def get_all_layer_names():
16 | layer_names_file = os.path.join(dir, 'conv_layers', 'all_layers.txt')
17 | with open(layer_names_file, "r") as f:
18 | layer_names = f.read().splitlines()
19 | return layer_names
20 |
21 |
--------------------------------------------------------------------------------
/Models/TensorFlow/models/MobileNet/model.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import os
3 |
4 | dir = os.path.dirname(__file__)
5 |
6 | def get_model():
7 | return tf.keras.applications.MobileNetV2(weights='imagenet')
8 |
9 | def get_layer_names():
10 | layer_names_file = os.path.join(dir, 'conv_layers', 'conv_layer_names.txt')
11 | with open(layer_names_file, "r") as f:
12 | layer_names = f.read().splitlines()
13 | return layer_names
14 |
15 | def get_all_layer_names():
16 | layer_names_file = os.path.join(dir, 'conv_layers', 'all_layers.txt')
17 | with open(layer_names_file, "r") as f:
18 | layer_names = f.read().splitlines()
19 | return layer_names
20 |
21 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "lib": ["dom", "dom.iterable", "esnext"],
4 | "allowJs": true,
5 | "skipLibCheck": true,
6 | "strict": true,
7 | "noEmit": true,
8 | "esModuleInterop": true,
9 | "module": "esnext",
10 | "moduleResolution": "bundler",
11 | "resolveJsonModule": true,
12 | "isolatedModules": true,
13 | "jsx": "preserve",
14 | "incremental": true,
15 | "plugins": [
16 | {
17 | "name": "next"
18 | }
19 | ],
20 | "paths": {
21 | "@/*": ["./src/*"]
22 | }
23 | },
24 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
25 | "exclude": ["node_modules"]
26 | }
27 |
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | content: [
4 | "./src/pages/**/*.{js,ts,jsx,tsx,mdx}",
5 | "./src/components/**/*.{js,ts,jsx,tsx,mdx}",
6 | "./src/app/**/*.{js,ts,jsx,tsx,mdx}",
7 | 'node_modules/preline/dist/*.js',
8 | ],
9 | theme: {
10 | extend: {
11 | colors: {
12 | palette: {
13 | light: '#F5F3FF',
14 | primary: '#7C3AED',
15 | dark: '#6D28D9',
16 | },
17 | },
18 | fontFamily: {
19 | primary: ['Poppins'],
20 | secondary: ['Open Sans'],
21 | saira: ['Saira'],
22 | },
23 | },
24 | },
25 | variants: {
26 | extend: {},
27 | },
28 | plugins: [
29 | require('preline/plugin'),
30 | ],
31 | }
32 |
33 |
--------------------------------------------------------------------------------
/Models/TensorFlow/models/MobileNet/conv_layers/convscript.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | # Load MobileNetV2 model
4 | model = tf.keras.applications.MobileNetV2(input_shape=(224, 224, 3),
5 | include_top=True,
6 | weights='imagenet')
7 |
8 | # Specify the file path to save the layer names
9 | file_path = '/Users/charlesschubach/Development/codesmith/OSP/xflairRP/python/conv_layer_names.txt'
10 |
11 | # Open a file in write mode
12 | with open(file_path, 'w') as file:
13 | # Iterate through the model's layers
14 | for layer in model.layers:
15 | # Write the layer name followed by a newline character to the file
16 | file.write(layer.name + '\n')
17 |
18 | print(f"Layer names saved to {file_path}")
--------------------------------------------------------------------------------
/Models/TensorBoard/models/MobileNet/conv_layers/convscript.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | # Load MobileNetV2 model
4 | model = tf.keras.applications.MobileNetV2(input_shape=(224, 224, 3),
5 | include_top=True,
6 | weights='imagenet')
7 |
8 | # Specify the file path to save the layer names
9 | file_path = '/Users/charlesschubach/Development/codesmith/OSP/xflairRP/python/conv_layer_names.txt'
10 |
11 | # Open a file in write mode
12 | with open(file_path, 'w') as file:
13 | # Iterate through the model's layers
14 | for layer in model.layers:
15 | # Write the layer name followed by a newline character to the file
16 | file.write(layer.name + '\n')
17 |
18 | print(f"Layer names saved to {file_path}")
--------------------------------------------------------------------------------
/scripts/newHeatmap.ts:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 |
3 | const numRows = 224;
4 | const numCols = 224;
5 |
6 | // Create an array to hold the CSV rows
7 | const csvRows = [];
8 |
9 | // Add the CSV header
10 | csvRows.push('row,column,value');
11 |
12 | // Generate random values and populate the CSV rows
13 | for (let row = 0; row < numRows; row++) {
14 | for (let col = 0; col < numCols; col++) {
15 | const value = Math.floor(Math.random() * 100) + 1; // Random value between 1 and 100
16 | csvRows.push(`r${row},c${col},${value}`);
17 | }
18 | }
19 |
20 | // Join rows with newline characters
21 | const csvContent = csvRows.join('\n');
22 |
23 | // Write CSV content to a file
24 | fs.writeFileSync('heatmap_data.csv', csvContent);
25 |
26 | console.log('Heatmap data saved to heatmap_data.csv');
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/logs/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 |
3 | const serviceHost = process.env.SERVICE_HOST || 'localhost'; // Default to localhost if not set
4 | const serviceUrl = `http://${serviceHost}:5000`;
5 |
6 | export async function POST(req: NextRequest) {
7 | try {
8 | const reqParsed = await req.json();
9 | const { data, modelName } = reqParsed;
10 | const response = await fetch(`${serviceUrl}/logs/${modelName}`, {
11 | method: 'POST',
12 | headers: {
13 | 'Content-Type': 'application/json',
14 | },
15 | body: JSON.stringify({ data }),
16 | });
17 | return response;
18 | } catch (err) {
19 | console.error('Error: ', err);
20 | return new Response('Internal Server Error', { status: 500 });
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/heatmaps/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 |
3 | const serviceHost = process.env.SERVICE_HOST || 'localhost'; // Default to localhost if not set
4 | const serviceUrl = `http://${serviceHost}:5000`;
5 |
6 | export async function POST(req: NextRequest) {
7 | try {
8 | const reqParsed = await req.json();
9 | const { data, modelName } = reqParsed;
10 | const response = await fetch(`${serviceUrl}/heatmaps/${modelName}`, {
11 | method: 'POST',
12 | headers: {
13 | 'Content-Type': 'application/json',
14 | },
15 | body: JSON.stringify({ data }),
16 | });
17 | return response;
18 | } catch (err) {
19 | console.error('Error: ', err);
20 | return new Response('Internal Server Error', { status: 500 });
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/featmaps/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 |
3 | const serviceHost = process.env.SERVICE_HOST || 'localhost'; // Default to localhost if not set
4 | const serviceUrl = `http://${serviceHost}:5000`;
5 |
6 | export async function POST(req: NextRequest) {
7 | try {
8 | const reqParsed = await req.json();
9 | const { data, modelName } = reqParsed;
10 | const response = await fetch(`${serviceUrl}/featuremaps/${modelName}`, {
11 | method: 'POST',
12 | headers: {
13 | 'Content-Type': 'application/json',
14 | },
15 | body: JSON.stringify({ data }),
16 | });
17 | return response;
18 | } catch (err) {
19 | console.error('Error: ', err);
20 | return new Response('Internal Server Error', { status: 500 });
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/predict/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 |
3 | const serviceHost = process.env.SERVICE_HOST || 'localhost'; // Default to localhost if not set
4 | const serviceUrl = `http://${serviceHost}:5000`;
5 |
6 | export async function POST(req: NextRequest) {
7 | try {
8 | const reqParsed = await req.json();
9 | const { data, modelName } = reqParsed;
10 |
11 | const response = await fetch(`${serviceUrl}/predictions/${modelName}`, {
12 | method: 'POST',
13 | headers: {
14 | 'Content-Type': 'application/json',
15 | },
16 | body: JSON.stringify({ data }),
17 | });
18 | return response;
19 | } catch (err) {
20 | console.error('Error: ', err);
21 | return new Response('Internal Server Error', { status: 500 });
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/Models/TensorFlow/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.12.2
2 |
3 | WORKDIR /TensorFlow
4 |
5 | COPY . /TensorFlow/
6 |
7 | RUN apt-get update && apt-get install -y \
8 | python3-pip \
9 | python3-dev \
10 | build-essential \
11 | libssl-dev \
12 | libffi-dev \
13 | python3-setuptools \
14 | libgl1-mesa-dev \
15 | libhdf5-dev \
16 | && apt-get clean
17 |
18 | RUN pip install --upgrade pip
19 |
20 | # Install any needed packages specified in requirements.txt
21 | RUN pip install --no-cache-dir -r requirements.txt
22 |
23 | RUN chmod +x /TensorFlow/start/start.sh
24 |
25 | RUN chmod +x /TensorFlow/start/get_weights.py
26 |
27 | # Make port 5000 available to the world outside this container
28 | EXPOSE 5000
29 |
30 | # Run server.py when the container launches
31 | ENTRYPOINT ["/TensorFlow/start/start.sh"]
32 |
33 |
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 | .yarn/install-state.gz
8 |
9 | # testing
10 | /coverage
11 |
12 | # next.js
13 | /.next/
14 | /out/
15 |
16 | # production
17 | /build
18 |
19 | # misc
20 | .DS_Store
21 | *.pem
22 |
23 | # debug
24 | npm-debug.log*
25 | yarn-debug.log*
26 | yarn-error.log*
27 |
28 | # local env files
29 | .env*.local
30 |
31 | # vercel
32 | .vercel
33 |
34 | # typescript
35 | *.tsbuildinfo
36 | next-env.d.ts
37 |
38 | # venv
39 | venv/
40 |
41 | # env
42 | /Models/TensorFlow/.env
43 | /Models/TensorBoard/logs/*
44 | /Models/TensorFlow/app/assets/*
45 | /Models/TensorBoard/logs/*
46 | /Models/TensorFLow/logs/*
47 | .env
48 | .vscode
49 | /public/renderTest
50 | /public/RP Data
51 | /public/heatmap_data.csv
52 |
53 |
--------------------------------------------------------------------------------
/Models/TensorBoard/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:latest
2 |
3 | WORKDIR /TensorBoard
4 |
5 | COPY . /TensorBoard/
6 |
7 | RUN apt-get update && apt-get install -y \
8 | python3-pip \
9 | python3-dev \
10 | build-essential \
11 | libssl-dev \
12 | libffi-dev \
13 | python3-setuptools \
14 | libgl1-mesa-dev \
15 | libhdf5-dev \
16 | && apt-get clean
17 |
18 | RUN pip install --upgrade pip
19 |
20 | # Install any needed packages specified in requirements.txt
21 | RUN pip install --no-cache-dir -r requirements.txt
22 |
23 | RUN chmod +x /TensorBoard/start/start.sh
24 |
25 | RUN chmod +x /TensorBoard/start/get_weights.py
26 |
27 | # Make port 5000 available to the world outside this container
28 | EXPOSE 5000
29 |
30 | EXPOSE 6006
31 |
32 | # Run server.py when the container launches
33 | ENTRYPOINT ["/TensorBoard/start/start.sh"]
34 |
35 |
36 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/services/generate_predictions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from app.utils.image_utils import preprocess_image
4 |
5 |
6 | def generate_predictions(model, base64_image):
7 | preprocessed_image = preprocess_image(base64_image)
8 |
9 | # Make predictions using the provided model
10 | predictions = model.predict(preprocessed_image)
11 | predicted_class = np.argmax(predictions[0]).item() # Convert to Python int for JSON serialization
12 |
13 | # Get the top 5 predictions and their probabilities
14 | top_5 = tf.keras.applications.mobilenet_v2.decode_predictions(predictions, top=5)[0]
15 |
16 | # Return a dictionary that's directly serializable to JSON
17 | return {
18 | "predicted_class": predicted_class,
19 | "top_5": top_5,
20 | "preprocessed_image": preprocessed_image
21 | }
--------------------------------------------------------------------------------
/src/app/ui/gridMapGif.tsx:
--------------------------------------------------------------------------------
1 | import styles from "../page.module.css";
2 | import Image from "next/image";
3 | import { useState, useEffect } from "react";
4 |
5 | interface Props {
6 | fGifURL: string;
7 | }
8 |
9 | export default function GridMapGif(props: Props) {
10 | const [fGifListener, setFGifListener] = useState(false);
11 |
12 | useEffect(() => {
13 | if (props.fGifURL) setFGifListener(true);
14 | }, [props.fGifURL]);
15 |
16 | return (
17 | <>
18 | {!fGifListener && }
19 | {fGifListener && (
20 |
21 |
27 |
28 | )}
29 | >
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/src/app/ui/navComponents/AccordionLink.tsx:
--------------------------------------------------------------------------------
1 | import Link from 'next/link';
2 |
3 | interface Props {
4 | path: string;
5 | label: string;
6 | }
7 |
8 | export default function AccordionLink(props: Props) {
9 | return (
10 |
11 |
19 | {props.label}
20 |
21 |
22 | );
23 | }
24 |
--------------------------------------------------------------------------------
/src/app/ui/heatMapGif.tsx:
--------------------------------------------------------------------------------
1 | import { image } from 'd3';
2 | import styles from '../page.module.css';
3 | import Image from 'next/image';
4 | import { useState, useEffect } from 'react';
5 |
6 | interface Props {
7 | hGifURL: string;
8 | }
9 |
10 | export default function HeatMapGif(props: Props) {
11 | const [hGifListener, setHGifListener] = useState(false);
12 |
13 |
14 | useEffect(() => {
15 | if(props.hGifURL)setHGifListener(true)
16 | },[props.hGifURL])
17 |
18 | return (
19 | <>
20 | {(!hGifListener &&
21 | )}
22 | {hGifListener &&
23 | (
24 |
25 |
31 | )}
32 | >
33 | );
34 | }
--------------------------------------------------------------------------------
/src/app/(routes)/about-us/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/libraries/python/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/documentation/python/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/libraries/react/javascript/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/libraries/react/typescript/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/models/agnostic/text/gpt/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/pytorch/text/gpt/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/tensorflow/text/gpt/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/documentation/javascript-typescript/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
--------------------------------------------------------------------------------
/src/app/(routes)/models/pytorch/images/detection/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/agnostic/images/detection/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/agnostic/images/generation/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/agnostic/text/texttospeech/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/pytorch/images/classification/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/pytorch/images/generation/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/pytorch/text/texttospeech/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/tensorflow/images/detection/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/tensorflow/images/generation/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/tensorflow/text/texttospeech/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/agnostic/images/classification/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import Link from "next/link";
4 |
5 | export default async function Home() {
6 | return (
7 |
8 |
13 |
14 | Coming Soon
15 |
16 |
17 | Feature still in progress. We apologize for the inconvenience.
18 |
19 |
23 | Return to Homepage
24 |
25 |
26 | );
27 | }
28 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/routes/predictions.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flask import Blueprint, request, jsonify
3 | import time
4 |
5 | from app.services.generate_predictions import generate_predictions
6 |
7 |
8 | dir = os.path.dirname(__file__)
9 |
10 | predictions = Blueprint('predictions', __name__)
11 |
12 | @predictions.route('/predictions/', methods=['POST'])
13 | def upload_predictions(model_name):
14 |
15 | data = request.get_json()
16 | base64_image = data['data']
17 |
18 | model_module = __import__(f"models.{model_name}.model", fromlist=["get_model"])
19 | model = model_module.get_model()
20 |
21 | start_time = time.time()
22 |
23 | predictions = generate_predictions(model, base64_image)
24 |
25 | end_time = time.time()
26 |
27 | total_time = end_time - start_time
28 |
29 | print(f"Predictions: {predictions}")
30 | print(f"Total time taken: {total_time}")
31 |
32 | return jsonify({
33 | 'predictions': predictions,
34 | 'time': total_time
35 | })
36 |
37 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/routes/logs.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import os
3 |
4 | from app.services.generate_logs import handle_tensorboard_logging
5 |
6 | dir = os.path.dirname(__file__)
7 | logs = Blueprint('logs', __name__)
8 |
9 | @logs.route('/logs/', methods=['POST'])
10 | def tensorboard_logs(model_name):
11 | data = request.get_json()
12 | base64_image = data['data']
13 |
14 | # Invoke the get_model function from /model
15 | model_module = __import__(f"models.{model_name}.model", fromlist=["get_model"])
16 | model = model_module.get_model()
17 | layer_names_module = __import__(f"models.{model_name}.model", fromlist=["get_all_layer_names"])
18 | layer_names = layer_names_module.get_all_layer_names()
19 |
20 | # Generate TensorBoard logs and obtain the log directory
21 | log_dir = handle_tensorboard_logging(model, base64_image, layer_names, model_name)
22 | # model_summary(model, log_dir)
23 |
24 | return jsonify({"message": "TensorBoard logs generated successfully.", "log_dir": log_dir})
25 |
26 |
--------------------------------------------------------------------------------
/src/app/ui/modal.tsx:
--------------------------------------------------------------------------------
1 | import { Top5Obj } from '../lib/definitions';
2 | import styles from '../page.module.css';
3 | import { EmblaCarousel } from './carousel';
4 |
5 | interface Props {
6 | closeViz: () => void;
7 | hGifURL: string;
8 | fGifURL: string;
9 | top5: Top5Obj;
10 | preprocessFilePath: string;
11 | }
12 |
13 | export default function Modal(props: Props) {
14 | return (
15 |
16 |
17 |
18 |
Analysis
19 |
20 | X
21 |
22 |
23 |
29 |
30 |
31 | Okay
32 |
33 |
34 |
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/src/app/Sidebar.module.css:
--------------------------------------------------------------------------------
1 | /* ./ui/Sidebar.module.css */
2 |
3 | .sidebar {
4 | background-color: black;
5 | /* border: 1px solid white; */
6 | color: white;
7 | min-height: 100vh;
8 | width: 200px;
9 | display: flex;
10 | flex-direction: column;
11 | padding: 5px;
12 | padding-top: 30px;
13 | }
14 |
15 | .sideBarLogo {
16 | margin-bottom: 30px;
17 | text-align: center;
18 | font-size: 24px;
19 | font-weight: bold;
20 | }
21 |
22 | .menuItem {
23 | border: 1px solid white;
24 | margin: 10px 0px;
25 | padding: 10px;
26 | padding-left: 30px;
27 | cursor: pointer;
28 | border-radius: 1px;
29 | transition: background-color 0.3s ease;
30 | }
31 |
32 | .menuItem:hover {
33 | background-color: #34495e;
34 | }
35 |
36 | .footer {
37 | margin-top: auto;
38 | text-align: center;
39 | font-size: 14px;
40 | }
41 |
42 | .sideBarTitle{
43 | height: 50px;
44 | width: 75px;
45 | }
46 |
47 | .sideBarLogoContainer {
48 | display: flex;
49 | flex-direction: row;
50 | align-items: center;
51 | justify-content: center;
52 | margin-bottom: 30px;
53 | }
54 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "explai",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "@aws-sdk/client-s3": "^3.540.0",
13 | "@aws-sdk/s3-request-presigner": "^3.540.0",
14 | "@preline/overlay": "^2.0.2",
15 | "@vercel/postgres": "^0.7.2",
16 | "aws-sdk": "^2.1591.0",
17 | "axios": "^1.6.8",
18 | "cloudinary": "^2.1.0",
19 | "cors": "^2.8.5",
20 | "d3": "^7.9.0",
21 | "dotenv": "^16.4.5",
22 | "embla-carousel-react": "^8.0.0",
23 | "next": "14.1.3",
24 | "preline": "^2.0.3",
25 | "react": "^18",
26 | "react-dom": "^18",
27 | "vercel": "^33.5.5",
28 | "zod": "^3.22.4"
29 | },
30 | "devDependencies": {
31 | "@types/d3": "^7.4.3",
32 | "@types/node": "^20",
33 | "@types/react": "^18",
34 | "@types/react-dom": "^18",
35 | "autoprefixer": "^10.4.19",
36 | "eslint": "^8",
37 | "eslint-config-next": "14.1.3",
38 | "postcss": "^8.4.38",
39 | "tailwindcss": "^3.4.3",
40 | "typescript": "^5"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/src/app/ui/top5Classes.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useRef, useState } from 'react';
2 | import styles from '../page.module.css';
3 | import { Top5Obj } from '../lib/definitions';
4 |
5 | interface Props {
6 | top5: Top5Obj;
7 | }
8 |
9 | export default function Top5(props: Props) {
10 | const top5Table = [];
11 |
12 | // Convert object into an array of [key, value] pairs
13 | const sortedEntries = Object.entries(props.top5)
14 | // Sort the array by value in descending order
15 | .sort((a, b) => b[1] - a[1]);
16 |
17 | // Iterate over sorted entries to generate table rows
18 | for (let [key, value] of sortedEntries) {
19 | top5Table.push(
20 |
21 | {key}
22 | {(value * 100).toFixed(2)}%
23 |
24 | );
25 | }
26 |
27 | return (
28 |
29 | {sortedEntries.length > 0 && (
30 |
31 |
32 |
33 | Class
34 | Certainty
35 |
36 |
37 | {top5Table}
38 |
39 | )}
40 |
41 | );
42 | }
43 |
--------------------------------------------------------------------------------
/Models/TensorFlow/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==2.1.0
2 | astunparse==1.6.3
3 | blinker==1.7.0
4 | boto3==1.34.78
5 | botocore==1.34.78
6 | certifi==2024.2.2
7 | charset-normalizer==3.3.2
8 | click==8.1.7
9 | contourpy==1.2.1
10 | cycler==0.12.1
11 | Flask==3.0.2
12 | Flask-Cors==4.0.0
13 | flatbuffers==24.3.25
14 | fonttools==4.50.0
15 | gast==0.5.4
16 | google-pasta==0.2.0
17 | grpcio==1.62.1
18 | gunicorn==21.2.0
19 | h5py==3.10.0
20 | idna==3.6
21 | itsdangerous==2.1.2
22 | Jinja2==3.1.3
23 | jmespath==1.0.1
24 | keras==3.1.1
25 | kiwisolver==1.4.5
26 | libclang==18.1.1
27 | Markdown==3.6
28 | markdown-it-py==3.0.0
29 | MarkupSafe==2.1.5
30 | matplotlib==3.8.4
31 | mdurl==0.1.2
32 | ml-dtypes==0.3.2
33 | namex==0.0.7
34 | numpy==1.26.4
35 | opencv-python==4.9.0.80
36 | opt-einsum==3.3.0
37 | optree==0.11.0
38 | packaging==24.0
39 | pillow==10.3.0
40 | protobuf==4.25.3
41 | Pygments==2.17.2
42 | pyparsing==3.1.2
43 | python-dateutil==2.9.0.post0
44 | requests==2.31.0
45 | rich==13.7.1
46 | s3transfer==0.10.1
47 | setuptools==69.1.1
48 | six==1.16.0
49 | tensorboard==2.16.2
50 | tensorboard-data-server==0.7.2
51 | tensorflow==2.16.1
52 | termcolor==2.4.0
53 | typing_extensions==4.10.0
54 | urllib3==2.2.1
55 | Werkzeug==3.0.2
56 | wheel==0.43.0
57 | wrapt==1.16.0
--------------------------------------------------------------------------------
/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import tensorflow as tf
3 | import numpy as np
4 |
5 | def preprocess_image(base64_image_str):
6 | image_bytes = base64.b64decode(base64_image_str)
7 | image_tensor = tf.image.decode_image(image_bytes, channels=3)
8 | image_tensor = tf.image.resize(image_tensor, (224, 224))
9 | image_tensor = tf.keras.applications.mobilenet_v2.preprocess_input(image_tensor)
10 | image_tensor = tf.expand_dims(image_tensor, 0)
11 | return image_tensor
12 |
13 | def add_progress_bar(image, current_index, total_layers, width, border_color=(255,255,255), progress_color=(50,205,50), remaining_color=(220,220,220)):
14 |
15 | img_height, img_width = image.shape[:2]
16 | progress_width = int(((current_index + 1) / total_layers) * img_width)
17 |
18 | # Create a progress bar image
19 | bar_img = np.zeros((width, img_width, 3), dtype=np.uint8)
20 |
21 | # Fill the progress part
22 | bar_img[:, :progress_width] = progress_color
23 |
24 | # Fill the remaining part
25 | bar_img[:, progress_width:] = remaining_color
26 |
27 | # Add a border
28 | bar_img[0, :] = border_color
29 | bar_img[-1, :] = border_color
30 | bar_img[:, 0] = border_color
31 | bar_img[:, -1] = border_color
32 |
33 | # Combine the progress bar with the original image
34 | combined_img = np.vstack((bar_img, image))
35 |
36 | return combined_img
--------------------------------------------------------------------------------
/Models/TensorFlow/app/services/generate_predictions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from app.utils.image_utils import preprocess_image
4 |
5 |
6 | def generate_predictions(model, base64_image):
7 | preprocessed_image = preprocess_image(base64_image)
8 |
9 | # Make predictions using the provided model
10 | predictions = model.predict(preprocessed_image)
11 | predicted_class = np.argmax(predictions[0]).item() # Convert to Python int for JSON serialization
12 |
13 | # Get the name of the predicted class
14 | predicted_class_name = tf.keras.applications.mobilenet_v2.decode_predictions(predictions, top=1)[0][0][1]
15 |
16 | # Get the top 5 predictions and their probabilities
17 | top_5 = tf.keras.applications.mobilenet_v2.decode_predictions(predictions, top=5)[0]
18 | class_name_probabilities = {}
19 |
20 | for _, class_name, probability in top_5:
21 | # Convert numpy float to Python float for JSON serialization
22 | class_name_probabilities[class_name] = float(probability)
23 |
24 | # Return a dictionary that's directly serializable to JSON
25 | return {
26 | "predicted_class_name": predicted_class_name,
27 | "predicted_class_probability": predictions[0][predicted_class].item(), # Ensure this is a Python float
28 | "class_name_probabilities": class_name_probabilities,
29 | "predicted_class": predicted_class
30 | }
--------------------------------------------------------------------------------
/Models/TensorBoard/app/utils/image_utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import tensorflow as tf
3 | import numpy as np
4 |
5 | def preprocess_image(base64_image_str):
6 | image_bytes = base64.b64decode(base64_image_str)
7 | image_tensor = tf.image.decode_image(image_bytes, channels=3)
8 | image_tensor = tf.image.resize(image_tensor, (224, 224))
9 | image_tensor = tf.keras.applications.mobilenet_v2.preprocess_input(image_tensor)
10 | image_tensor = tf.expand_dims(image_tensor, 0)
11 | return image_tensor
12 |
13 | def add_progress_bar(image, current_index, total_layers, bar_height=5, border_color=(255,255,255), progress_color=(50,205,50), remaining_color=(220,220,220)):
14 |
15 | img_height, img_width = image.shape[:2]
16 | progress_width = int(((current_index + 1) / total_layers) * img_width)
17 |
18 | # Create a progress bar image
19 | bar_img = np.zeros((bar_height, img_width, 3), dtype=np.uint8)
20 |
21 | # Fill the progress part
22 | bar_img[:, :progress_width] = progress_color
23 |
24 | # Fill the remaining part
25 | bar_img[:, progress_width:] = remaining_color
26 |
27 | # Add a border
28 | bar_img[0, :] = border_color
29 | bar_img[-1, :] = border_color
30 | bar_img[:, 0] = border_color
31 | bar_img[:, -1] = border_color
32 |
33 | # Combine the progress bar with the original image
34 | combined_img = np.vstack((bar_img, image))
35 |
36 | return combined_img
--------------------------------------------------------------------------------
/Models/TensorBoard/models/MobileNet/conv_layers/conv_layer_names.txt:
--------------------------------------------------------------------------------
1 | Conv1
2 | bn_Conv1
3 | Conv1_relu
4 | expanded_conv_depthwise
5 | expanded_conv_depthwise_relu
6 | expanded_conv_project
7 | block_1_expand
8 | block_1_expand_BN
9 | block_1_expand_relu
10 | block_1_pad
11 | block_1_depthwise
12 | block_1_depthwise_BN
13 | block_1_depthwise_relu
14 | block_1_project
15 | block_1_project_BN
16 | block_2_expand
17 | block_2_expand_BN
18 | block_2_expand_relu
19 | block_2_depthwise
20 | block_2_depthwise_BN
21 | block_2_depthwise_relu
22 | block_2_project
23 | block_2_project_BN
24 | block_3_expand
25 | block_3_depthwise_relu
26 | block_3_project
27 | block_4_expand
28 | block_4_depthwise_relu
29 | block_4_project
30 | block_5_expand
31 | block_5_depthwise_relu
32 | block_5_project
33 | block_6_expand
34 | block_6_depthwise_relu
35 | block_6_project
36 | block_7_expand
37 | block_7_depthwise_relu
38 | block_7_project
39 | block_8_expand
40 | block_8_depthwise_relu
41 | block_8_project
42 | block_9_expand
43 | block_9_depthwise_relu
44 | block_9_project
45 | block_10_expand
46 | block_10_depthwise_relu
47 | block_10_project
48 | block_11_expand
49 | block_11_depthwise_relu
50 | block_11_project
51 | block_12_expand
52 | block_12_depthwise_relu
53 | block_12_project
54 | block_13_expand
55 | block_13_depthwise_relu
56 | block_13_project
57 | block_16_expand
58 | block_16_depthwise_relu
59 | block_16_project
60 | Conv_1
61 | Conv_1_bn
62 | out_relu
63 |
--------------------------------------------------------------------------------
/Models/TensorFlow/models/MobileNet/conv_layers/conv_layer_names.txt:
--------------------------------------------------------------------------------
1 | Conv1
2 | bn_Conv1
3 | Conv1_relu
4 | expanded_conv_depthwise
5 | expanded_conv_depthwise_relu
6 | expanded_conv_project
7 | block_1_expand
8 | block_1_expand_BN
9 | block_1_expand_relu
10 | block_1_pad
11 | block_1_depthwise
12 | block_1_depthwise_BN
13 | block_1_depthwise_relu
14 | block_1_project
15 | block_1_project_BN
16 | block_2_expand
17 | block_2_expand_BN
18 | block_2_expand_relu
19 | block_2_depthwise
20 | block_2_depthwise_BN
21 | block_2_depthwise_relu
22 | block_2_project
23 | block_2_project_BN
24 | block_3_expand
25 | block_3_depthwise_relu
26 | block_3_project
27 | block_4_expand
28 | block_4_depthwise_relu
29 | block_4_project
30 | block_5_expand
31 | block_5_depthwise_relu
32 | block_5_project
33 | block_6_expand
34 | block_6_depthwise_relu
35 | block_6_project
36 | block_7_expand
37 | block_7_depthwise_relu
38 | block_7_project
39 | block_8_expand
40 | block_8_depthwise_relu
41 | block_8_project
42 | block_9_expand
43 | block_9_depthwise_relu
44 | block_9_project
45 | block_10_expand
46 | block_10_depthwise_relu
47 | block_10_project
48 | block_11_expand
49 | block_11_depthwise_relu
50 | block_11_project
51 | block_12_expand
52 | block_12_depthwise_relu
53 | block_12_project
54 | block_13_expand
55 | block_13_depthwise_relu
56 | block_13_project
57 | block_16_expand
58 | block_16_depthwise_relu
59 | block_16_project
60 | Conv_1
61 | Conv_1_bn
62 | out_relu
63 |
--------------------------------------------------------------------------------
/src/app/page.tsx:
--------------------------------------------------------------------------------
1 | 'use server';
2 |
3 | import './globals.css';
4 | import Link from 'next/link';
5 |
6 | async function StartPage() {
7 | return (
8 |
9 |
14 |
15 |
16 |
17 |
18 | Enhance the user experience with AI integrations using{' '}
19 | xFlair.
20 |
21 |
22 | Provide meaningful, digestible, and real-time visualizations for AI models, from input to output.
23 |
24 |
25 |
29 | Start
30 |
31 |
32 |
33 |
34 | );
35 | }
36 |
37 | export default StartPage;
38 |
--------------------------------------------------------------------------------
/scripts/newRenderTest.ts:
--------------------------------------------------------------------------------
1 | const file = require('fs');
2 | const path = require('path');
3 |
4 | const x = 50;
5 | const y = 50;
6 | const rate = 4;
7 | let dataset = 1;
8 |
9 | interface GridValues {
10 | [index: string]: number;
11 | }
12 |
13 | const final: GridValues = {};
14 |
15 | for (let row = 1; row <= x; row++) {
16 | for (let col = 1; col <= y; col++) {
17 | const distance = Math.max(row, col) - 1;
18 | const value = 100 - distance * 2;
19 | final[`r${row},c${col}`] = value;
20 | }
21 | }
22 |
23 | // file.writeFileSync(
24 | // path.join(__dirname, `../public/renderTest/dataFINAL.csv`),
25 | // final.join('\n')
26 | // );
27 |
28 | let contents: Array = [];
29 |
30 | for (let row = 1; row <= x; row++) {
31 | for (let col = 1; col <= y; col++) {
32 | const value = Math.floor(Math.random() * 100) + 1;
33 | contents.push(`r${row},c${col},${value}`);
34 | }
35 | }
36 |
37 | file.writeFileSync(
38 | path.join(__dirname, `../public/renderTest/data${dataset}.csv`),
39 | 'row,column,value\n' + contents.join('\n')
40 | );
41 | dataset++;
42 |
43 | for (let i = 2; i <= 30; i++) {
44 | let temp: Array = [];
45 |
46 | contents.forEach((el) => {
47 | const data: Array = el.split(',');
48 | let value: number = Number(data.pop() as string);
49 | const key: string = data.join(',');
50 |
51 | if (Math.abs(final[key] - value) < 5) value = final[key];
52 | else if (final[key] > value) value += rate;
53 | else if (final[key] < value) value -= rate;
54 |
55 | temp.push(`${key},${value}`);
56 | });
57 |
58 | contents = temp;
59 |
60 | file.writeFileSync(
61 | path.join(__dirname, `../public/renderTest/data${dataset}.csv`),
62 | 'row,column,value\n' + contents.join('\n')
63 | );
64 | dataset++;
65 | }
66 |
67 | console.log('done');
68 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/utils/s3_utils.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | from botocore.exceptions import ClientError, NoCredentialsError, BotoCoreError
3 | import logging
4 | import time
5 |
6 | def create_s3_client():
7 | """Create an S3 client."""
8 | return boto3.client('s3')
9 |
10 | def upload_file(file_path, bucket_name, object_name):
11 | s3_client = create_s3_client()
12 | try:
13 | s3_client.upload_file(file_path, bucket_name, object_name)
14 | except ClientError as e:
15 | print(f"Error uploading file to S3: {e}")
16 | raise
17 |
18 | def download_file(bucket_name, object_name, file_path):
19 | """Download a file from an S3 bucket."""
20 | s3_client = create_s3_client()
21 | try:
22 | s3_client.download_file(bucket_name, object_name, file_path)
23 | print(f"File {object_name} downloaded from {bucket_name} to {file_path}")
24 | except ClientError as e:
25 | print(f"Error downloading file from S3: {e}")
26 | raise
27 |
28 | def list_objects_in_bucket(bucket_name, prefix=""):
29 | """List objects in an S3 bucket."""
30 | s3_client = create_s3_client()
31 | try:
32 | response = s3_client.list_objects_v2(Bucket=bucket_name, Prefix=prefix)
33 | objects = response.get("Contents", [])
34 | return [obj["Key"] for obj in objects]
35 | except ClientError as e:
36 | print(f"Error listing objects in bucket {bucket_name}: {e}")
37 | raise
38 |
39 | def delete_object_from_bucket(bucket_name, object_name):
40 | """Delete an object from an S3 bucket."""
41 | s3_client = create_s3_client()
42 | try:
43 | s3_client.delete_object(Bucket=bucket_name, Key=object_name)
44 | print(f"Object {object_name} deleted from {bucket_name}")
45 | except ClientError as e:
46 | print(f"Error deleting object from bucket {bucket_name}: {e}")
47 | raise
--------------------------------------------------------------------------------
/src/app/ui/dotButton.tsx:
--------------------------------------------------------------------------------
1 | import React, {
2 | PropsWithChildren,
3 | useCallback,
4 | useEffect,
5 | useState
6 | } from 'react'
7 | import { EmblaCarouselType } from 'embla-carousel'
8 | import styles from '../page.module.css';
9 |
10 | type UseDotButtonType = {
11 | selectedIndex: number
12 | scrollSnaps: number[]
13 | onDotButtonClick: (index: number) => void
14 | }
15 |
16 | export const useDotButton = (
17 | emblaApi: EmblaCarouselType | undefined
18 | ): UseDotButtonType => {
19 | const [selectedIndex, setSelectedIndex] = useState(0)
20 | const [scrollSnaps, setScrollSnaps] = useState([])
21 |
22 | const onDotButtonClick = useCallback(
23 | (index: number) => {
24 | if (!emblaApi) return
25 | emblaApi.scrollTo(index)
26 | },
27 | [emblaApi]
28 | )
29 |
30 | const onInit = useCallback((emblaApi: EmblaCarouselType) => {
31 | setScrollSnaps(emblaApi.scrollSnapList())
32 | }, [])
33 |
34 | const onSelect = useCallback((emblaApi: EmblaCarouselType) => {
35 | setSelectedIndex(emblaApi.selectedScrollSnap())
36 | }, [])
37 |
38 | useEffect(() => {
39 | if (!emblaApi) return
40 |
41 | onInit(emblaApi)
42 | onSelect(emblaApi)
43 | emblaApi.on('reInit', onInit)
44 | emblaApi.on('reInit', onSelect)
45 | emblaApi.on('select', onSelect)
46 | }, [emblaApi, onInit, onSelect])
47 |
48 | return {
49 | selectedIndex,
50 | scrollSnaps,
51 | onDotButtonClick
52 | }
53 | }
54 |
55 | type PropType = PropsWithChildren<
56 | React.DetailedHTMLProps<
57 | React.ButtonHTMLAttributes,
58 | HTMLButtonElement
59 | >
60 | >
61 |
62 | export const DotButton: React.FC = (props) => {
63 | const { children, ...restProps } = props
64 |
65 | return (
66 |
67 | {children}
68 |
69 | )
70 | }
71 |
--------------------------------------------------------------------------------
/src/app/ui/heatmap.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useRef, useState } from 'react';
2 | import * as d3 from 'd3';
3 |
4 | interface DataItem {
5 | group: string;
6 | variable: string;
7 | value: string;
8 | }
9 |
10 | export default function Heatmap() {
11 | const svgRef = useRef(null);
12 |
13 | const [containerSize, setContainerSize] = useState({
14 | width: 650,
15 | height: 650,
16 | });
17 |
18 | useEffect(() => {
19 | const numCells = { x: 224, y: 224 };
20 | const margin = { top: 5, right: 5, bottom: 5, left: 5 };
21 | const width = containerSize.width - margin.left - margin.right;
22 | const height = containerSize.height - margin.top - margin.bottom;
23 |
24 | const cellSize = {
25 | width: Math.min(width / numCells.x, height / numCells.y),
26 | height: Math.min(width / numCells.x, height / numCells.y),
27 | };
28 |
29 | const svg = d3
30 | .select(svgRef.current)
31 | .attr('width', width + margin.left + margin.right)
32 | .attr('height', height + margin.top + margin.bottom)
33 | .append('g')
34 | .attr('transform', `translate(${margin.left}, ${margin.top})`);
35 |
36 | const myColor = d3
37 | .scaleLinear()
38 | .range(['#f5f0f0', '#f20505'])
39 | .domain([1, 100]);
40 |
41 | // d3.csv(`/renderTest/data${nextArray}.csv`).then((data) => {
42 | d3.csv(`/RP Data/gradients.csv`).then((data) => {
43 | svg
44 | .selectAll('rect')
45 | .data(data)
46 | .enter()
47 | .append('rect')
48 | .attr('x', (_, i) => (i % numCells.x) * cellSize.width)
49 | .attr('y', (_, i) => Math.floor(i / numCells.x) * cellSize.height)
50 | .attr('width', cellSize.width)
51 | .attr('height', cellSize.height)
52 | .style('fill', (d) => myColor(+d.value));
53 | });
54 | });
55 |
56 | return ;
57 | }
58 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/upload/route.ts:
--------------------------------------------------------------------------------
1 | import { NextRequest, NextResponse } from 'next/server';
2 | import { S3Client, PutObjectCommand } from '@aws-sdk/client-s3';
3 |
4 | export async function POST(req: NextRequest, res: NextResponse) {
5 | try {
6 | //Incoming request is a multipart/form-data with 'file' and 'modelName' fields
7 | //file is a File type and modelName is a string
8 | //help
9 |
10 | const formData = await req.formData();
11 |
12 | const file = formData.get('file');
13 | const modelName = formData.get('modelName');
14 |
15 | if (!file || !modelName) {
16 | return new Response('Missing file or modelName', { status: 400 });
17 | }
18 |
19 | const buffer = Buffer.from(await (file as File).arrayBuffer());
20 |
21 | const fileName = `inputimages/${modelName}/${Date.now()}.${(
22 | file as File
23 | ).name
24 | .split('.')
25 | .pop()}`;
26 |
27 | const s3 = new S3Client({
28 | region: process.env.AWS_DEFAULT_REGION as string,
29 | credentials: {
30 | accessKeyId: process.env.AWS_ACCESS_KEY_ID as string,
31 | secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string,
32 | },
33 | });
34 |
35 | const uploadParams = {
36 | Bucket: process.env.AWS_S3_BUCKET_NAME as string,
37 | Key: fileName,
38 | Body: buffer, // Stream the file directly to S3
39 | ContentType: (file as File).type, // Set the correct content type for the file// Optional: Set ACL to control access permissions
40 | };
41 |
42 | const command = new PutObjectCommand(uploadParams);
43 |
44 | await s3.send(command);
45 |
46 | const filePath = `https://${process.env.AWS_S3_BUCKET_NAME}.s3.amazonaws.com/${fileName}`;
47 |
48 | return new Response(JSON.stringify({ filePath }), {
49 | status: 200,
50 | });
51 | } catch (err) {
52 | console.error('Error uploading image:', err);
53 | return new Response('Error uploading image', { status: 500 });
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/src/app/ui/navComponents/Icon.tsx:
--------------------------------------------------------------------------------
1 | interface Props {
2 | name: string;
3 | }
4 |
5 | export default function Icon({ name }: Props) {
6 | if (name === 'gear') {
7 | return (
8 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | );
33 | }
34 | if (name === 'book') {
35 | return (
36 |
48 |
49 |
50 |
51 | );
52 | }
53 | if (name === 'pages') {
54 | return (
55 |
67 |
68 |
69 |
70 |
71 | );
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/preprocess/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 | import { S3Client, GetObjectCommand } from '@aws-sdk/client-s3';
3 | import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
4 |
5 | const serviceHost = process.env.SERVICE_HOST || 'localhost'; // Default to localhost if not set
6 | const serviceUrl = `http://${serviceHost}:5000`;
7 |
8 | const s3 = new S3Client({
9 | region: process.env.AWS_DEFAULT_REGION as string,
10 | credentials: {
11 | accessKeyId: process.env.AWS_ACCESS_KEY_ID as string,
12 | secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string,
13 | },
14 | });
15 |
16 | const generateSignedUrl = async (
17 | bucketName: string,
18 | objectKey: string,
19 | expiresInSeconds: number
20 | ) => {
21 | const command = new GetObjectCommand({
22 | Bucket: bucketName,
23 | Key: objectKey,
24 | });
25 |
26 | try {
27 | const signedUrl = await getSignedUrl(s3, command, {
28 | expiresIn: expiresInSeconds,
29 | });
30 | return signedUrl;
31 | } catch (error) {
32 | console.error('Error generating signed URL:', error);
33 | throw error; // Rethrow the error for further handling
34 | }
35 | };
36 | export async function POST(req: NextRequest) {
37 | try {
38 | const reqParsed = await req.json();
39 | const { data, filePath, fileType } = reqParsed;
40 |
41 | // Call the preprocess endpoint
42 | const preprocessResponse = await fetch(`${serviceUrl}/preprocess`, {
43 | method: 'POST',
44 | headers: {
45 | 'Content-Type': 'application/json',
46 | },
47 | body: JSON.stringify({ data, filePath, fileType }),
48 | });
49 | const preprocessResult = await preprocessResponse.json();
50 |
51 | const { preprocessed_images } = preprocessResult;
52 | const bucketName = process.env.AWS_S3_BUCKET_NAME as string;
53 | const expiresInSeconds = 60 * 5; // 5 minutes
54 |
55 | // Generate signed URLs for the preprocessed images
56 | const signedUrls = await Promise.all(
57 | preprocessed_images.map(async (image: string) => {
58 | return generateSignedUrl(bucketName, image, expiresInSeconds);
59 | })
60 | );
61 |
62 | // Return the signed URLs
63 | return new Response(JSON.stringify({ urls: signedUrls }), { status: 200 });
64 | } catch (err) {
65 | console.error('Error:', err);
66 | return new Response('Internal Server Error', { status: 500 });
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/src/app/ui/carousel.tsx:
--------------------------------------------------------------------------------
1 | import React, { useEffect, useCallback, Suspense, lazy } from 'react';
2 | import useEmblaCarousel, {
3 | type UseEmblaCarouselType,
4 | } from 'embla-carousel-react';
5 | import styles from '../page.module.css';
6 | import { DotButton, useDotButton } from './dotButton';
7 | import ResizedImage from './resizedImage';
8 | import HeatMapGif from './heatMapGif';
9 | import GridMapGif from './gridMapGif';
10 | import Top5Classes from './top5Classes';
11 | import { Top5Obj } from '../lib/definitions';
12 |
13 | interface Props {
14 | hGifURL: string;
15 | fGifURL: string;
16 | top5: Top5Obj;
17 | preprocessFilePath: string;
18 | }
19 |
20 | export function EmblaCarousel(props: Props) {
21 | const [emblaRef, emblaApi] = useEmblaCarousel({ loop: false });
22 |
23 | useEffect(() => {
24 | if (emblaApi) {
25 | console.log(emblaApi.slideNodes()); // Access API
26 | }
27 | }, [emblaApi]);
28 |
29 | const { selectedIndex, scrollSnaps, onDotButtonClick } =
30 | useDotButton(emblaApi);
31 |
32 | const scrollPrev = useCallback(() => {
33 | if (emblaApi) emblaApi.scrollPrev();
34 | }, [emblaApi]);
35 | const scrollNext = useCallback(() => {
36 | if (emblaApi) emblaApi.scrollNext();
37 | }, [emblaApi]);
38 |
39 | return (
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 | <
52 |
53 |
54 | {scrollSnaps.map((_, index) => {
55 | const style =
56 | index === selectedIndex ? 'embla__dot--selected' : 'embla__dot';
57 | return (
58 | onDotButtonClick(index)}
61 | className={styles[style]}
62 | />
63 | );
64 | })}
65 |
66 |
67 | >
68 |
69 |
70 |
71 | );
72 | }
73 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/services/generate_logs.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import os
4 | from datetime import datetime\
5 |
6 | from app.services.generate_images import make_gradcam_heatmap
7 | from app.services.generate_predictions import generate_predictions
8 |
9 | dir = os.path.dirname(__file__)
10 |
11 | def log_model_architecture(model, writer):
12 | dummy_data = tf.random.normal(shape=(1, 224, 224, 3))
13 |
14 | # Wrap the model call with tf.function
15 | @tf.function
16 | def model_call(inputs):
17 | return model(inputs)
18 |
19 | # Get the concrete function from the wrapped model call
20 | concrete_func = model_call.get_concrete_function(dummy_data)
21 |
22 | # Create a file writer for TensorBoard summaries
23 | # Write the model architecture to TensorBoard
24 | with writer.as_default():
25 | tf.summary.graph(concrete_func.graph)
26 |
27 | def log_comprehensive_information(model, predictions, layer_names, writer, log_dir):
28 | # Generate predictions
29 | top_5 = predictions.top_5
30 | preprocessed_image = predictions.preprocessed_image
31 |
32 | with writer.as_default():
33 | # Log input image
34 | tf.summary.image("Input Image", preprocessed_image, step=0)
35 | # Log top 5 predictions
36 | for _, label, prob in top_5: # Replaced imagenet_id with _
37 | tf.summary.scalar(f"Top 5 Predictions/{label}", prob, step=0)
38 | # Generate and log Grad-CAM heatmaps
39 | for layer_name in layer_names:
40 | heatmap = make_gradcam_heatmap(preprocessed_image, model, layer_name)
41 | if heatmap is not None:
42 | heatmap = np.uint8(255 * heatmap)
43 | heatmap_image = np.expand_dims(np.repeat(heatmap[:, :, np.newaxis], 3, axis=2), axis=0)
44 | tf.summary.image(f"Grad-CAM/{layer_name}", heatmap_image, step=0)
45 |
46 | def handle_tensorboard_logging(model, base64_image, layer_names, model_name):
47 | predictions = generate_predictions(model, base64_image)
48 |
49 | current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
50 | log_dir = os.path.join(dir, f"../../logs/{model_name}/{current_time}")
51 | os.makedirs(log_dir, exist_ok=True)
52 |
53 | writer = tf.summary.create_file_writer(log_dir)
54 |
55 | log_comprehensive_information(model, predictions, layer_names, writer, log_dir)
56 | log_model_architecture(model, writer, log_dir)
57 |
58 | writer.flush()
59 | writer.close()
60 |
61 | return log_dir
--------------------------------------------------------------------------------
/src/app/layout.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import type { Metadata } from "next";
4 | import { Inter, Saira, Roboto, Roboto_Slab } from "next/font/google";
5 | import "./globals.css";
6 | import Head from "next/head";
7 | import tailwind from "tailwindcss";
8 |
9 | import { ReactNode, useState } from "react";
10 | import Sidenav from "./ui/navComponents/Sidenav";
11 |
12 | interface Props {
13 | children: ReactNode;
14 | }
15 |
16 | const inter = Inter({ subsets: ["latin"] });
17 | const saira = Saira({ subsets: ["latin"] });
18 | const roboto = Roboto({
19 | subsets: ["latin"],
20 | weight: "100",
21 | });
22 | const robotoSlab = Roboto_Slab({
23 | subsets: ["latin"],
24 | weight: "900",
25 | });
26 |
27 | export default function RootLayout({ children }: Props) {
28 | const [sidebarOpen, setSidebarOpen] = useState(false);
29 |
30 | return (
31 |
32 |
33 | xFlair Demo
34 |
35 | {/* */}
36 |
37 |
38 |
39 |
40 |
44 |
49 |
setSidebarOpen(!sidebarOpen)}
53 | >
54 | Toggle Navigation
55 |
62 |
66 |
67 |
68 | {children}
69 |
70 |
71 |
72 |
73 |
74 | );
75 | }
76 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/routes/featuremaps.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | from PIL import Image
4 | from flask import Blueprint, request, jsonify
5 |
6 | from app.utils.image_utils import preprocess_image
7 | from app.services.generate_images import make_feature_maps
8 | from app.utils.image_utils import add_progress_bar
9 | from app.utils.s3_utils import upload_file
10 |
11 | dir = os.path.dirname(__file__)
12 | featuremaps = Blueprint('featuremaps', __name__)
13 |
14 | @featuremaps.route('/featuremaps/', methods=['POST'])
15 | def upload_featuremaps(model_name):
16 | data = request.get_json()
17 | base64_image = data['data']
18 |
19 | model_module = __import__(f"models.{model_name}.model", fromlist=["get_model"])
20 | model = model_module.get_model()
21 | layer_names_module = __import__(f"models.{model_name}.model", fromlist=["get_all_layer_names"])
22 | layer_names = layer_names_module.get_layer_names()
23 |
24 | preprocessed_image = preprocess_image(base64_image)
25 |
26 | visuals_dir = os.path.join(dir, "../visuals/")
27 | featuremaps_dir = os.path.join(visuals_dir, "featuremaps/")
28 | featuremapspb_dir = os.path.join(featuremaps_dir, "progressbars/")
29 | os.makedirs(visuals_dir, exist_ok=True)
30 | os.makedirs(featuremaps_dir, exist_ok=True)
31 | os.makedirs(featuremapspb_dir, exist_ok=True)
32 |
33 | links = []
34 | progressbar_links = []
35 |
36 | bucket_name = os.getenv('AWS_S3_BUCKET_NAME')
37 | if not bucket_name:
38 | return jsonify({"error": "Server configuration error. AWS S3 Bucket name is missing."}), 500
39 |
40 | for layer_name in layer_names:
41 | output_path = make_feature_maps(preprocessed_image, model, layer_name, featuremaps_dir, layer_names)
42 | if output_path is None:
43 | continue
44 |
45 | file_name = os.path.splitext(os.path.basename(output_path))[0]
46 | upload_file(output_path, bucket_name, f"featuremaps/{os.path.basename(output_path)}")
47 | links.append(f"https://{bucket_name}.s3.amazonaws.com/featuremaps/{file_name}.jpg")
48 |
49 | img_with_progress = Image.open(output_path)
50 | img_array = np.array(img_with_progress)
51 | current_index = layer_names.index(layer_name)
52 | total_layers = len(layer_names)
53 | img_with_progress_bar = add_progress_bar(img_array, current_index, total_layers, width = 20)
54 |
55 | # Resize and save the final image with the progress bar
56 | final_img = Image.fromarray(img_with_progress_bar)
57 | final_img_resized = final_img.resize((500, 500), Image.Resampling.LANCZOS) # Resizing the final image
58 | output_path_pb = os.path.join(featuremapspb_dir, file_name)
59 | final_img_resized.save(output_path_pb, format='JPEG', quality=85) # Saving the resized final image
60 |
61 | upload_file(output_path_pb, bucket_name, f"featuremaps/progressbars/{os.path.basename(output_path_pb)}.jpg")
62 | progressbar_links.append(f"https://{bucket_name}.s3.amazonaws.com/featuremaps/progressbars/{file_name}.jpg")
63 |
64 | return jsonify({'featuremaps': links, 'progressbars': progressbar_links})
65 |
66 |
67 |
68 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/routes/heatmaps.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import tensorflow as tf
3 | import cv2
4 | import os
5 | import numpy as np
6 |
7 | from app.services.generate_images import make_gradcam_heatmap
8 | from app.utils.image_utils import preprocess_image
9 | from app.services.generate_predictions import generate_predictions
10 | from app.utils.s3_utils import upload_file
11 | from app.utils.image_utils import add_progress_bar
12 |
13 |
14 | dir = os.path.dirname(__file__)
15 | heatmaps = Blueprint('heatmaps', __name__)
16 |
17 | @heatmaps.route('/heatmaps/', methods=['POST'])
18 | def upload_heatmaps(model_name):
19 | data = request.get_json()
20 | base64_image = data['data']
21 |
22 | model_module = __import__(f"models.{model_name}.model", fromlist=["get_model"])
23 | model = model_module.get_model()
24 | layer_names_module = __import__(f"models.{model_name}.model", fromlist=["get_all_layer_names"])
25 | layer_names = layer_names_module.get_layer_names()
26 |
27 | preprocessed_image = preprocess_image(base64_image)
28 | pred_index = generate_predictions(model, base64_image)['predicted_class']
29 |
30 | visuals_dir = os.path.join(dir, "../visuals/")
31 | heatmaps_dir = os.path.join(visuals_dir, "heatmaps/")
32 | heatmapspb_dir = os.path.join(heatmaps_dir, "progressbars/")
33 | os.makedirs(visuals_dir, exist_ok=True)
34 | os.makedirs(heatmaps_dir, exist_ok=True)
35 | os.makedirs(heatmapspb_dir, exist_ok=True)
36 |
37 | links = []
38 | progessbar_links = []
39 |
40 | for layer_name in layer_names:
41 | heatmap = make_gradcam_heatmap(preprocessed_image, model, layer_name, pred_index)
42 | if heatmap is None:
43 | continue
44 |
45 | img = tf.squeeze(preprocessed_image)
46 | img = (img * 0.5) + 0.5
47 | heatmap_resized = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
48 | heatmap_resized = np.uint8(255 * heatmap_resized)
49 | heatmap_colored = cv2.applyColorMap(heatmap_resized, cv2.COLORMAP_JET)
50 | superimposed_img = heatmap_colored * 0.4 + np.uint8(img * 255)
51 |
52 | file_name = str(layer_names.index(layer_name) + 1).zfill(3) + layer_name
53 | output_path = os.path.join(heatmaps_dir, f"{file_name}.jpg")
54 | cv2.imwrite(output_path, superimposed_img)
55 |
56 | bucket_name = os.getenv('AWS_S3_BUCKET_NAME')
57 | upload_file(output_path, bucket_name, f"heatmaps/{file_name}.jpg")
58 | links.append(f"https://{bucket_name}.s3.amazonaws.com/heatmaps/{file_name}.jpg")
59 |
60 | output_path_pb = os.path.join(heatmapspb_dir, f"{file_name}.jpg")
61 | current_index = layer_names.index(layer_name)
62 | total_layers = len(layer_names)
63 | superimposed_img_with_progress = add_progress_bar(superimposed_img, current_index, total_layers, width=4)
64 | cv2.imwrite(output_path_pb, superimposed_img_with_progress) # Correctly save the image with progress bar
65 |
66 | upload_file(output_path_pb, bucket_name, f"heatmaps/progressbars/{file_name}.jpg")
67 | progessbar_links.append(f"https://{bucket_name}.s3.amazonaws.com/heatmaps/progressbars/{file_name}.jpg")
68 |
69 | return jsonify({'heatmaps': links, 'progressbars': progessbar_links})
--------------------------------------------------------------------------------
/src/app/ui/navComponents/Accordion.tsx:
--------------------------------------------------------------------------------
1 | import AccordionLink from './AccordionLink';
2 | import Icon from './Icon';
3 | import { AccordionContent } from '@/app/lib/definitions';
4 |
5 | interface Props {
6 | activeAccordion: string[];
7 | toggleAccordion: (path: string[]) => void;
8 | label: string;
9 | contents: AccordionContent[];
10 | depth: number;
11 | id: string[];
12 | icon?: 'gear' | 'book' | 'pages';
13 | }
14 |
15 | export default function Accordion(props: Props) {
16 | const contents: JSX.Element[] = [];
17 | props.contents.forEach((el) => {
18 | if (el.type === 'Link') {
19 | contents.push(
20 |
21 | );
22 | } else {
23 | contents.push(
24 |
32 | );
33 | }
34 | });
35 |
36 | return (
37 |
38 | props.toggleAccordion(props.id)}
46 | >
47 | {props.icon && }
48 | {props.label}
49 |
61 |
62 |
63 |
71 |
77 |
78 |
79 |
80 |
90 |
91 | );
92 | }
93 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/routes/preprocess.py:
--------------------------------------------------------------------------------
1 | import os
2 | from flask import Blueprint, request, jsonify
3 | from botocore.exceptions import ClientError
4 | import base64
5 | import glob
6 |
7 | from app.services.generate_images import make_preprocess_image, make_preprocess_images
8 | from app.utils.image_utils import preprocess_image
9 | from app.utils.s3_utils import download_file, upload_file
10 |
11 |
12 | dir = os.path.dirname(__file__)
13 | preprocess = Blueprint('preprocess', __name__)
14 |
15 | @preprocess.route('/preprocess', methods=['POST'])
16 | def upload_preprocess():
17 | data = request.get_json()
18 | print(f"Received data: {data}")
19 | base64_image = data['data']
20 | s3_url = str(data['filePath']) # Convert s3_url to string
21 | image_type = data['fileType']
22 |
23 | print(f"Received image from {s3_url}")
24 | print(f"Image type is {image_type}")
25 |
26 | # Process the base64-encoded image
27 | preprocessed = preprocess_image(base64_image)
28 |
29 | # Prepare directories and filenames
30 | assets_dir = os.path.join(dir, '../assets')
31 | if not os.path.exists(assets_dir):
32 | os.makedirs(assets_dir)
33 |
34 | print(f"Assets directory is {assets_dir}")
35 |
36 | object_name = s3_url.split('.com/')[-1]
37 | print(f"Object name is {object_name}")
38 |
39 | download_path = os.path.join(assets_dir, f'upload.{image_type}') # Fixed string formatting
40 |
41 | s3_bucket = os.environ.get('AWS_S3_BUCKET_NAME')
42 |
43 | try:
44 | # Attempt to download the file from S3
45 | download_file(s3_bucket, object_name, download_path)
46 | except Exception as e:
47 | # If download fails, fallback to saving the file from base64 data
48 | print(f"Failed to download {object_name} from S3: {e}. Fallback to base64.")
49 | try:
50 | image_data = base64.b64decode(base64_image)
51 | with open(download_path, 'wb') as file:
52 | file.write(image_data)
53 | except Exception as e:
54 | print(f"Failed to save file from base64 data: {e}")
55 | return jsonify({'error': 'Failed to process image.'}), 500
56 |
57 | # Error handling for S3 operations
58 | try:
59 | print(f"Downloaded file from s3://{s3_bucket}/{object_name}")
60 | file_names = make_preprocess_images(base64_image, assets_dir)
61 | # upload_file(upload_path, s3_bucket, 'preprocess/image.jpg')
62 | if os.path.exists(download_path):
63 | os.remove(download_path)
64 | print(f"Deleted upload.jpg from {assets_dir}")
65 |
66 | file_list = glob.glob(os.path.join(assets_dir, '*'))
67 | # Loop through each file and upload it to S3
68 | for file_path in file_list:
69 | try:
70 | # Get the filename from the file path
71 | filename = os.path.basename(file_path)
72 |
73 | # Upload the file to S3
74 | upload_file(file_path, s3_bucket, f'preprocess/{filename}')
75 |
76 | print(f"Uploaded {filename} to s3://{s3_bucket}/preprocess/{filename}")
77 | except ClientError as e:
78 | print(f"Failed to upload {filename} to S3: {e}")
79 |
80 | print(f"Uploaded preprocessed image to s3://{s3_bucket}/preprocess/image.jpg")
81 | object_names = [f"preprocess/{file_name}" for file_name in file_names]
82 | return jsonify({'preprocessed_images': object_names})
83 | except ClientError as e:
84 | return jsonify({'error': str(e)}), 500
--------------------------------------------------------------------------------
/Models/TensorBoard/models/MobileNet/conv_layers/all_layers.txt:
--------------------------------------------------------------------------------
1 | Conv1
2 | bn_Conv1
3 | Conv1_relu
4 | expanded_conv_depthwise
5 | expanded_conv_depthwise_BN
6 | expanded_conv_depthwise_relu
7 | expanded_conv_project
8 | expanded_conv_project_BN
9 | block_1_expand
10 | block_1_expand_BN
11 | block_1_expand_relu
12 | block_1_pad
13 | block_1_depthwise
14 | block_1_depthwise_BN
15 | block_1_depthwise_relu
16 | block_1_project
17 | block_1_project_BN
18 | block_2_expand
19 | block_2_expand_BN
20 | block_2_expand_relu
21 | block_2_depthwise
22 | block_2_depthwise_BN
23 | block_2_depthwise_relu
24 | block_2_project
25 | block_2_project_BN
26 | block_2_add
27 | block_3_expand
28 | block_3_expand_BN
29 | block_3_expand_relu
30 | block_3_pad
31 | block_3_depthwise
32 | block_3_depthwise_BN
33 | block_3_depthwise_relu
34 | block_3_project
35 | block_3_project_BN
36 | block_4_expand
37 | block_4_expand_BN
38 | block_4_expand_relu
39 | block_4_depthwise
40 | block_4_depthwise_BN
41 | block_4_depthwise_relu
42 | block_4_project
43 | block_4_project_BN
44 | block_4_add
45 | block_5_expand
46 | block_5_expand_BN
47 | block_5_expand_relu
48 | block_5_depthwise
49 | block_5_depthwise_BN
50 | block_5_depthwise_relu
51 | block_5_project
52 | block_5_project_BN
53 | block_5_add
54 | block_6_expand
55 | block_6_expand_BN
56 | block_6_expand_relu
57 | block_6_pad
58 | block_6_depthwise
59 | block_6_depthwise_BN
60 | block_6_depthwise_relu
61 | block_6_project
62 | block_6_project_BN
63 | block_7_expand
64 | block_7_expand_BN
65 | block_7_expand_relu
66 | block_7_depthwise
67 | block_7_depthwise_BN
68 | block_7_depthwise_relu
69 | block_7_project
70 | block_7_project_BN
71 | block_7_add
72 | block_8_expand
73 | block_8_expand_BN
74 | block_8_expand_relu
75 | block_8_depthwise
76 | block_8_depthwise_BN
77 | block_8_depthwise_relu
78 | block_8_project
79 | block_8_project_BN
80 | block_8_add
81 | block_9_expand
82 | block_9_expand_BN
83 | block_9_expand_relu
84 | block_9_depthwise
85 | block_9_depthwise_BN
86 | block_9_depthwise_relu
87 | block_9_project
88 | block_9_project_BN
89 | block_9_add
90 | block_10_expand
91 | block_10_expand_BN
92 | block_10_expand_relu
93 | block_10_depthwise
94 | block_10_depthwise_BN
95 | block_10_depthwise_relu
96 | block_10_project
97 | block_10_project_BN
98 | block_11_expand
99 | block_11_expand_BN
100 | block_11_expand_relu
101 | block_11_depthwise
102 | block_11_depthwise_BN
103 | block_11_depthwise_relu
104 | block_11_project
105 | block_11_project_BN
106 | block_11_add
107 | block_12_expand
108 | block_12_expand_BN
109 | block_12_expand_relu
110 | block_12_depthwise
111 | block_12_depthwise_BN
112 | block_12_depthwise_relu
113 | block_12_project
114 | block_12_project_BN
115 | block_12_add
116 | block_13_expand
117 | block_13_expand_BN
118 | block_13_expand_relu
119 | block_13_pad
120 | block_13_depthwise
121 | block_13_depthwise_BN
122 | block_13_depthwise_relu
123 | block_13_project
124 | block_13_project_BN
125 | block_14_expand
126 | block_14_expand_BN
127 | block_14_expand_relu
128 | block_14_depthwise
129 | block_14_depthwise_BN
130 | block_14_depthwise_relu
131 | block_14_project
132 | block_14_project_BN
133 | block_14_add
134 | block_15_expand
135 | block_15_expand_BN
136 | block_15_expand_relu
137 | block_15_depthwise
138 | block_15_depthwise_BN
139 | block_15_depthwise_relu
140 | block_15_project
141 | block_15_project_BN
142 | block_15_add
143 | block_16_expand
144 | block_16_expand_BN
145 | block_16_expand_relu
146 | block_16_depthwise
147 | block_16_depthwise_BN
148 | block_16_depthwise_relu
149 | block_16_project
150 | block_16_project_BN
151 | Conv_1
152 | Conv_1_bn
153 | out_relu
154 |
--------------------------------------------------------------------------------
/Models/TensorFlow/models/MobileNet/conv_layers/all_layers.txt:
--------------------------------------------------------------------------------
1 | Conv1
2 | bn_Conv1
3 | Conv1_relu
4 | expanded_conv_depthwise
5 | expanded_conv_depthwise_BN
6 | expanded_conv_depthwise_relu
7 | expanded_conv_project
8 | expanded_conv_project_BN
9 | block_1_expand
10 | block_1_expand_BN
11 | block_1_expand_relu
12 | block_1_pad
13 | block_1_depthwise
14 | block_1_depthwise_BN
15 | block_1_depthwise_relu
16 | block_1_project
17 | block_1_project_BN
18 | block_2_expand
19 | block_2_expand_BN
20 | block_2_expand_relu
21 | block_2_depthwise
22 | block_2_depthwise_BN
23 | block_2_depthwise_relu
24 | block_2_project
25 | block_2_project_BN
26 | block_2_add
27 | block_3_expand
28 | block_3_expand_BN
29 | block_3_expand_relu
30 | block_3_pad
31 | block_3_depthwise
32 | block_3_depthwise_BN
33 | block_3_depthwise_relu
34 | block_3_project
35 | block_3_project_BN
36 | block_4_expand
37 | block_4_expand_BN
38 | block_4_expand_relu
39 | block_4_depthwise
40 | block_4_depthwise_BN
41 | block_4_depthwise_relu
42 | block_4_project
43 | block_4_project_BN
44 | block_4_add
45 | block_5_expand
46 | block_5_expand_BN
47 | block_5_expand_relu
48 | block_5_depthwise
49 | block_5_depthwise_BN
50 | block_5_depthwise_relu
51 | block_5_project
52 | block_5_project_BN
53 | block_5_add
54 | block_6_expand
55 | block_6_expand_BN
56 | block_6_expand_relu
57 | block_6_pad
58 | block_6_depthwise
59 | block_6_depthwise_BN
60 | block_6_depthwise_relu
61 | block_6_project
62 | block_6_project_BN
63 | block_7_expand
64 | block_7_expand_BN
65 | block_7_expand_relu
66 | block_7_depthwise
67 | block_7_depthwise_BN
68 | block_7_depthwise_relu
69 | block_7_project
70 | block_7_project_BN
71 | block_7_add
72 | block_8_expand
73 | block_8_expand_BN
74 | block_8_expand_relu
75 | block_8_depthwise
76 | block_8_depthwise_BN
77 | block_8_depthwise_relu
78 | block_8_project
79 | block_8_project_BN
80 | block_8_add
81 | block_9_expand
82 | block_9_expand_BN
83 | block_9_expand_relu
84 | block_9_depthwise
85 | block_9_depthwise_BN
86 | block_9_depthwise_relu
87 | block_9_project
88 | block_9_project_BN
89 | block_9_add
90 | block_10_expand
91 | block_10_expand_BN
92 | block_10_expand_relu
93 | block_10_depthwise
94 | block_10_depthwise_BN
95 | block_10_depthwise_relu
96 | block_10_project
97 | block_10_project_BN
98 | block_11_expand
99 | block_11_expand_BN
100 | block_11_expand_relu
101 | block_11_depthwise
102 | block_11_depthwise_BN
103 | block_11_depthwise_relu
104 | block_11_project
105 | block_11_project_BN
106 | block_11_add
107 | block_12_expand
108 | block_12_expand_BN
109 | block_12_expand_relu
110 | block_12_depthwise
111 | block_12_depthwise_BN
112 | block_12_depthwise_relu
113 | block_12_project
114 | block_12_project_BN
115 | block_12_add
116 | block_13_expand
117 | block_13_expand_BN
118 | block_13_expand_relu
119 | block_13_pad
120 | block_13_depthwise
121 | block_13_depthwise_BN
122 | block_13_depthwise_relu
123 | block_13_project
124 | block_13_project_BN
125 | block_14_expand
126 | block_14_expand_BN
127 | block_14_expand_relu
128 | block_14_depthwise
129 | block_14_depthwise_BN
130 | block_14_depthwise_relu
131 | block_14_project
132 | block_14_project_BN
133 | block_14_add
134 | block_15_expand
135 | block_15_expand_BN
136 | block_15_expand_relu
137 | block_15_depthwise
138 | block_15_depthwise_BN
139 | block_15_depthwise_relu
140 | block_15_project
141 | block_15_project_BN
142 | block_15_add
143 | block_16_expand
144 | block_16_expand_BN
145 | block_16_expand_relu
146 | block_16_depthwise
147 | block_16_depthwise_BN
148 | block_16_depthwise_relu
149 | block_16_project
150 | block_16_project_BN
151 | Conv_1
152 | Conv_1_bn
153 | out_relu
154 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/actions/image/gifs/route.ts:
--------------------------------------------------------------------------------
1 | import { type NextRequest } from 'next/server';
2 | import { v2 as cloudinary } from 'cloudinary';
3 | import { S3Client, GetObjectCommand } from '@aws-sdk/client-s3';
4 | import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
5 | import { ImageAndVideoFormatOptions } from 'cloudinary';
6 |
7 | cloudinary.config({
8 | cloud_name: process.env.CLOUDINARY_CLOUD_NAME,
9 | api_key: process.env.CLOUDINARY_API_KEY,
10 | api_secret: process.env.CLOUDINARY_API_SECRET,
11 | });
12 |
13 | const s3 = new S3Client({
14 | region: process.env.AWS_DEFAULT_REGION as string,
15 | credentials: {
16 | accessKeyId: process.env.AWS_ACCESS_KEY_ID as string,
17 | secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY as string,
18 | },
19 | });
20 |
21 | const generateSignedUrl = async (
22 | bucketName: string,
23 | objectKey: string,
24 | expiresInSeconds: number
25 | ) => {
26 | const command = new GetObjectCommand({
27 | Bucket: bucketName,
28 | Key: objectKey,
29 | });
30 |
31 | // Generate the signed URL
32 | try {
33 | const signedUrl = await getSignedUrl(s3, command, {
34 | expiresIn: expiresInSeconds,
35 | });
36 | return signedUrl;
37 | } catch (error) {
38 | console.error('Error generating signed URL:', error);
39 | throw error; // Rethrow the error for further handling
40 | }
41 | };
42 |
43 | const uploadImage = async (
44 | imagePath: string,
45 | publicId: string,
46 | tag: string
47 | ) => {
48 | return cloudinary.uploader.upload(imagePath, {
49 | folder: `${tag}_images`,
50 | tags: tag,
51 | public_id: publicId,
52 | });
53 | };
54 |
55 | const delay = (ms: number) => {
56 | return new Promise(resolve => setTimeout(resolve, ms));
57 | }
58 |
59 | export async function POST(req: NextRequest) {
60 | try {
61 | const { urls, tag } = await req.json();
62 |
63 | const objectKeys = urls.map((url: string) => {
64 | const startIndex = url.indexOf('.com') + 5;
65 | const endIndex = url.length;
66 | const substring = url.substring(startIndex, endIndex);
67 | return substring;
68 | });
69 |
70 | if (!objectKeys.length || !tag) {
71 | return new Response('Missing URLs', { status: 400 });
72 | }
73 |
74 | const signedUrls = await Promise.all(
75 | objectKeys.map((objectKey: string) =>
76 | generateSignedUrl(
77 | process.env.AWS_S3_BUCKET_NAME as string,
78 | objectKey,
79 | 180
80 | )
81 | )
82 | );
83 |
84 | // Creating upload promises for all URLs
85 | const uploadPromises = signedUrls.map((url: string, index: number) => {
86 | const formattedIndex = String(index + 1).padStart(3, '0');
87 | uploadImage(url, `${tag}_${formattedIndex}`, tag);
88 | });
89 |
90 | // Await all upload promises
91 | await Promise.all(uploadPromises);
92 | console.log('All images uploaded.');
93 |
94 | await delay(1000);
95 |
96 | // Create GIF from uploaded images and cleanup
97 | const gifCreationResult = await cloudinary.uploader.multi(tag as string, {
98 | format: 'gif' as ImageAndVideoFormatOptions,
99 | });
100 |
101 | // Delete uploaded images after creating the GIF
102 | cloudinary.api.delete_resources_by_tag(tag);
103 | console.log('Uploaded images cleaned up successfully');
104 |
105 | // Return the URL of the created GIF
106 | return new Response(
107 | JSON.stringify({
108 | url: gifCreationResult.secure_url as string,
109 | tag: tag as string,
110 | }),
111 | { status: 200 }
112 | );
113 | } catch (error) {
114 | console.error('Error:', error);
115 | return new Response('Internal Server Error', { status: 500 });
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/Models/TensorBoard/app/services/generate_images.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import os
3 | import matplotlib.pyplot as plt
4 | import cv2
5 |
6 | def make_gradcam_heatmap(img_array, model, conv_layer_name, pred_index=None):
7 | # Create a model that maps the input image to the activations of the conv layer
8 | # as well as the output predictions
9 | grad_model = tf.keras.models.Model(inputs=model.inputs, outputs=[model.get_layer(conv_layer_name).output, model.output])
10 |
11 | # Record operations for automatic differentiation to compute gradients later
12 | with tf.GradientTape() as tape:
13 | # Cast the input image to float32 in case it's not
14 | img_array = tf.cast(img_array, tf.float32)
15 | tape.watch(img_array)
16 | # Compute the model's output (including the output of the conv layer)
17 | conv_layer_output, preds = grad_model(img_array)
18 | if pred_index is None:
19 | pred_index = tf.argmax(preds[0])
20 | # Use the gradient of the output neuron (for the predicted class or specified class)
21 | # relative to the conv layer output to compute the gradients
22 | class_channel = preds[:, pred_index]
23 |
24 | # Compute gradients towards the class channel
25 | grads = tape.gradient(class_channel, conv_layer_output)
26 |
27 | # Pooling and normalization of gradients
28 | pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
29 | conv_layer_output = conv_layer_output[0]
30 |
31 | # Weight the channels by the gradients and then sum them
32 | heatmap = tf.matmul(conv_layer_output, pooled_grads[..., tf.newaxis])
33 | heatmap = tf.squeeze(heatmap)
34 |
35 | # For visualization, make sure the heatmap is in a [0, 1] range
36 | heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
37 | return heatmap.numpy()
38 |
39 | def make_feature_maps(preprocessed_image, model, layer_name, output_dir, layer_names, max_feature_maps=16):
40 | intermediate_layer_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
41 | intermediate_output = intermediate_layer_model(preprocessed_image)
42 | num_feature_maps = intermediate_output.shape[-1]
43 | num_feature_maps_to_visualize = min(num_feature_maps, max_feature_maps)
44 |
45 | if num_feature_maps_to_visualize >= 16:
46 | nrows, ncols = 4, 4
47 | fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20))
48 | axes = axes.flatten()
49 |
50 | for i in range(num_feature_maps_to_visualize):
51 | ax = axes[i]
52 | feature_map = intermediate_output[0, :, :, i]
53 | ax.imshow(feature_map, cmap='viridis')
54 | ax.set_title(f'Feature Map {i+1}')
55 | ax.axis('off')
56 |
57 | file_name = f"{str(layer_names.index(layer_name) + 1).zfill(3) + layer_name}.jpg"
58 | output_path = os.path.join(output_dir, file_name)
59 | plt.savefig(output_path, format='jpeg')
60 | plt.close(fig)
61 | else:
62 | return
63 |
64 | return output_path
65 |
66 |
67 | def make_preprocess_image(original_image_path, preprocessed_image, output_dir):
68 | original_image = cv2.imread(original_image_path)
69 | preprocessed_image_np = preprocessed_image.numpy().squeeze()
70 |
71 | fig, ax = plt.subplots(1, 2, figsize=(10, 5))
72 | ax[0].imshow(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))
73 | ax[0].set_title('Original Image')
74 | ax[0].axis('off')
75 |
76 | ax[1].imshow((preprocessed_image_np * 0.5) + 0.5) # Assuming preprocessing includes normalization
77 | ax[1].set_title('Preprocessed Image')
78 | ax[1].axis('off')
79 | file_name = 'input_images_comparison.png'
80 | file_path = os.path.join(output_dir, file_name)
81 | print(file_name)
82 | # Save the figure
83 | plt.savefig(file_path)
84 | plt.close(fig) # Close the figure to free memory
--------------------------------------------------------------------------------
/src/app/ui/navComponents/Sidenav.tsx:
--------------------------------------------------------------------------------
1 | // Import necessary libraries and components
2 | import React, { useState, useEffect, useRef } from 'react';
3 | import Link from 'next/link';
4 | import Accordion from './Accordion';
5 | import { AccordionContent } from '@/app/lib/definitions';
6 | import {
7 | modAccordion,
8 | libAccordion,
9 | docAccordion,
10 | } from '@/app/lib/accordionData';
11 |
12 | interface props {
13 | sidebarOpen: boolean;
14 | setSidebarOpen: React.Dispatch>;
15 | }
16 |
17 | // Define the Sidenav component
18 | export default function Sidenav({ sidebarOpen, setSidebarOpen }: props) {
19 | // Define state for sidebar expansion
20 | const [sidebarExpanded, setSidebarExpanded] = useState(false);
21 | const [activeAccordion, setActiveAccordion] = useState([]);
22 |
23 | // Create a reference to the sidebar element
24 | const sidebar = useRef(null);
25 |
26 | // Effect to add or remove a className to the body element based on sidebar expansion
27 | useEffect(() => {
28 | if (sidebarExpanded) {
29 | document.body.classList.add('sidebar-expanded');
30 | } else {
31 | document.body.classList.remove('sidebar-expanded');
32 | }
33 | }, [sidebarExpanded]);
34 |
35 | // Function to toggle accordion item
36 | const toggleAccordion = (path: string[]) => {
37 | const newPath = path.slice();
38 | if (activeAccordion.includes(newPath[newPath.length - 1])) newPath.pop();
39 | setActiveAccordion(newPath);
40 | };
41 |
42 | return (
43 |
149 | );
150 | }
151 |
--------------------------------------------------------------------------------
/Models/TensorFlow/app/services/generate_images.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import os
3 | import matplotlib.pyplot as plt
4 | import cv2
5 | import base64
6 | import numpy as np
7 |
8 | def make_gradcam_heatmap(img_array, model, conv_layer_name, pred_index=None):
9 | # Create a model that maps the input image to the activations of the conv layer
10 | # as well as the output predictions
11 | grad_model = tf.keras.models.Model(inputs=model.inputs, outputs=[model.get_layer(conv_layer_name).output, model.output])
12 |
13 | # Record operations for automatic differentiation to compute gradients later
14 | with tf.GradientTape() as tape:
15 | # Cast the input image to float32 in case it's not
16 | img_array = tf.cast(img_array, tf.float32)
17 | tape.watch(img_array)
18 | # Compute the model's output (including the output of the conv layer)
19 | conv_layer_output, preds = grad_model(img_array)
20 | if pred_index is None:
21 | pred_index = tf.argmax(preds[0])
22 | # Use the gradient of the output neuron (for the predicted class or specified class)
23 | # relative to the conv layer output to compute the gradients
24 | class_channel = preds[:, pred_index]
25 |
26 | # Compute gradients towards the class channel
27 | grads = tape.gradient(class_channel, conv_layer_output)
28 |
29 | # Pooling and normalization of gradients
30 | pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
31 | conv_layer_output = conv_layer_output[0]
32 |
33 | # Weight the channels by the gradients and then sum them
34 | heatmap = tf.matmul(conv_layer_output, pooled_grads[..., tf.newaxis])
35 | heatmap = tf.squeeze(heatmap)
36 |
37 | # For visualization, make sure the heatmap is in a [0, 1] range
38 | heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
39 | return heatmap.numpy()
40 |
41 | def make_feature_maps(preprocessed_image, model, layer_name, output_dir, layer_names, max_feature_maps=16):
42 | intermediate_layer_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
43 | intermediate_output = intermediate_layer_model(preprocessed_image)
44 | num_feature_maps = intermediate_output.shape[-1]
45 | num_feature_maps_to_visualize = min(num_feature_maps, max_feature_maps)
46 |
47 | if num_feature_maps_to_visualize >= 16:
48 | nrows, ncols = 4, 4
49 | fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20))
50 | axes = axes.flatten()
51 |
52 | for i in range(num_feature_maps_to_visualize):
53 | ax = axes[i]
54 | feature_map = intermediate_output[0, :, :, i]
55 | ax.imshow(feature_map, cmap='viridis')
56 | ax.set_title(f'Feature Map {i+1}')
57 | ax.axis('off')
58 |
59 | file_name = f"{str(layer_names.index(layer_name) + 1).zfill(3) + layer_name}.jpg"
60 | output_path = os.path.join(output_dir, file_name)
61 | plt.savefig(output_path, format='jpeg')
62 | plt.close(fig)
63 | else:
64 | return
65 |
66 | return output_path
67 |
68 |
69 | def make_preprocess_image(original_image_path, preprocessed_image, output_dir):
70 | original_image = cv2.imread(original_image_path)
71 | preprocessed_image_np = preprocessed_image.numpy().squeeze()
72 |
73 | fig, ax = plt.subplots(1, 2, figsize=(10, 5))
74 | ax[0].imshow(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))
75 | ax[0].set_title('Original Image')
76 | ax[0].axis('off')
77 |
78 | ax[1].imshow((preprocessed_image_np * 0.5) + 0.5) # Assuming preprocessing includes normalization
79 | ax[1].axis('off')
80 | # Save the figure
81 | print (output_dir)
82 | print ('saving')
83 | plt.savefig(output_dir, format='jpeg')
84 | print ('saved')
85 | plt.close(fig) # Close the figure to free memory
86 |
87 | def preprocess_image_steps(base64_image_str):
88 | # Decode the image from base64
89 | image_bytes = base64.b64decode(base64_image_str)
90 | # Decode image to tensor and resize
91 | image_tensor = tf.image.decode_image(image_bytes, channels=3)
92 | resized_image_tensor = tf.image.resize(image_tensor, (224, 224))
93 | # Normalize
94 | preprocessed_image_tensor = tf.keras.applications.mobilenet_v2.preprocess_input(resized_image_tensor)
95 | # Add batch dimension
96 | final_image_tensor = tf.expand_dims(preprocessed_image_tensor, 0)
97 |
98 | # Convert tensors to numpy for visualization
99 | original_np = image_tensor.numpy()
100 | resized_np = resized_image_tensor.numpy()
101 | preprocessed_np = (preprocessed_image_tensor.numpy()) # Undo normalization for visualization
102 |
103 | return original_np, resized_np, preprocessed_np
104 |
105 | def save_image(image_np, output_path):
106 | fig, ax = plt.subplots(figsize=(5, 5))
107 | ax.imshow(image_np)
108 | ax.axis('off') # Remove axes and borders
109 | plt.savefig(output_path, format='jpeg', bbox_inches='tight', pad_inches=0)
110 | plt.close(fig)
111 |
112 | def make_preprocess_images(base64_image_str, output_dir):
113 | original_np, resized_np, preprocessed_np = preprocess_image_steps(base64_image_str)
114 |
115 | original_file_name = 'original_image.jpg'
116 | resized_file_name = 'resized_image.jpg'
117 | preprocessed_file_name = 'preprocessed_image.jpg'
118 | # Adjust these lines to change the format or naming convention
119 | original_output_path = os.path.join(output_dir, original_file_name)
120 | resized_output_path = os.path.join(output_dir, resized_file_name)
121 | preprocessed_output_path = os.path.join(output_dir, preprocessed_file_name)
122 |
123 | # Save the images
124 | save_image(cv2.cvtColor(original_np.astype(np.uint8), cv2.COLOR_BGR2RGB), original_output_path)
125 | save_image(cv2.cvtColor(resized_np.astype(np.uint8), cv2.COLOR_BGR2RGB), resized_output_path)
126 | save_image(preprocessed_np, preprocessed_output_path)
127 |
128 | return [original_file_name, resized_file_name, preprocessed_file_name]
--------------------------------------------------------------------------------
/src/app/lib/accordionData.ts:
--------------------------------------------------------------------------------
1 | import { AccordionContent } from "./definitions";
2 |
3 |
4 | export const modAccordion: AccordionContent = {
5 | type: 'Accordion',
6 | label: 'Models',
7 | id: ['mod-accordion'],
8 | icon: 'gear',
9 | contents: [
10 | {
11 | type: 'Accordion',
12 | label: 'TensorFlow',
13 | id: ['mod-accordion', 'mod-tf-accordion'],
14 | contents: [
15 | {
16 | type: 'Accordion',
17 | label: 'Images',
18 | id: ['mod-accordion', 'mod-tf-accordion', 'mod-tf-img-accordion'],
19 | contents: [
20 | {
21 | type: 'Link',
22 | label: 'Classification',
23 | path: '/models/tensorflow/images/classification',
24 | },
25 | {
26 | type: 'Link',
27 | label: 'Detection',
28 | path: '/models/tensorflow/images/detection',
29 | },
30 | {
31 | type: 'Link',
32 | label: 'Generation',
33 | path: '/models/tensorflow/images/generation',
34 | },
35 | ],
36 | },
37 | {
38 | type: 'Accordion',
39 | label: 'Text',
40 | id: ['mod-accordion', 'mod-tf-accordion', 'mod-tf-txt-accordion'],
41 | contents: [
42 | {
43 | type: 'Link',
44 | label: 'GPTs',
45 | path: '/models/tensorflow/text/gpt',
46 | },
47 | {
48 | type: 'Link',
49 | label: 'Detection',
50 | path: '/models/tensorflow/text/texttospeech',
51 | },
52 | ],
53 | },
54 | ],
55 | },
56 | {
57 | type: 'Accordion',
58 | label: 'PyTorch',
59 | id: ['mod-accordion', 'mod-py-accordion'],
60 | contents: [
61 | {
62 | type: 'Accordion',
63 | label: 'Images',
64 | id: ['mod-accordion', 'mod-py-accordion', 'mod-py-img-accordion'],
65 | contents: [
66 | {
67 | type: 'Link',
68 | label: 'Classification',
69 | path: '/models/pytorch/images/classification',
70 | },
71 | {
72 | type: 'Link',
73 | label: 'Detection',
74 | path: '/models/pytorch/images/detection',
75 | },
76 | {
77 | type: 'Link',
78 | label: 'Generation',
79 | path: '/models/pytorch/images/generation',
80 | },
81 | ],
82 | },
83 | {
84 | type: 'Accordion',
85 | label: 'Text',
86 | id: ['mod-accordion', 'mod-py-accordion', 'mod-py-txt-accordion'],
87 | contents: [
88 | {
89 | type: 'Link',
90 | label: 'GPTs',
91 | path: '/models/pytorch/text/gpt',
92 | },
93 | {
94 | type: 'Link',
95 | label: 'Detection',
96 | path: '/models/pytorch/text/texttospeech',
97 | },
98 | ],
99 | },
100 | ],
101 | },
102 | {
103 | type: 'Accordion',
104 | label: 'Model Agnostic',
105 | id: ['mod-accordion', 'mod-ag-accordion'],
106 | contents: [
107 | {
108 | type: 'Accordion',
109 | label: 'Images',
110 | id: ['mod-accordion', 'mod-ag-accordion', 'mod-ag-img-accordion'],
111 | contents: [
112 | {
113 | type: 'Link',
114 | label: 'Classification',
115 | path: '/models/agnostic/images/classification',
116 | },
117 | {
118 | type: 'Link',
119 | label: 'Detection',
120 | path: '/models/agnostic/images/detection',
121 | },
122 | {
123 | type: 'Link',
124 | label: 'Generation',
125 | path: '/models/agnostic/images/generation',
126 | },
127 | ],
128 | },
129 | {
130 | type: 'Accordion',
131 | label: 'Text',
132 | id: ['mod-accordion', 'mod-ag-accordion', 'mod-ag-txt-accordion'],
133 | contents: [
134 | {
135 | type: 'Link',
136 | label: 'GPTs',
137 | path: '/models/agnostic/text/gpt',
138 | },
139 | {
140 | type: 'Link',
141 | label: 'Detection',
142 | path: '/models/agnostic/text/texttospeech',
143 | },
144 | ],
145 | },
146 | ],
147 | },
148 | ],
149 | };
150 |
151 | export const libAccordion: AccordionContent = {
152 | type: 'Accordion',
153 | label: 'Libraries',
154 | id: ['lib-accordion'],
155 | icon: 'book',
156 | contents: [
157 | {
158 | type: 'Link',
159 | label: 'Python',
160 | path: '/libraries/python',
161 | },
162 | {
163 | type: 'Accordion',
164 | label: 'React',
165 | id: ['lib-accordion', 'lib-react-accordion'],
166 | contents: [
167 | {
168 | type: 'Link',
169 | label: 'Javascript',
170 | path: '/libraries/react/javascript',
171 | },
172 | {
173 | type: 'Link',
174 | label: 'Typescript',
175 | path: '/libraries/react/typescript',
176 | },
177 | ],
178 | },
179 | ],
180 | };
181 |
182 | export const docAccordion: AccordionContent = {
183 | type: 'Accordion',
184 | label: 'Documentation',
185 | id: ['doc-accordion'],
186 | icon: 'pages',
187 | contents: [
188 | {
189 | type: 'Link',
190 | label: 'Python',
191 | path: '/documentation/python',
192 | },
193 | {
194 | type: 'Link',
195 | label: 'Javascript / Typescript',
196 | path: '/documentation/javascript-typescript',
197 | },
198 | ],
199 | };
--------------------------------------------------------------------------------
/src/app/ui/newModal.tsx:
--------------------------------------------------------------------------------
1 | import { Top5Obj } from '../lib/definitions';
2 | import styles from '../page.module.css';
3 | import HeatMapGif from './heatMapGif';
4 | import GridMapGif from './gridMapGif';
5 | import Top5 from './top5Classes';
6 |
7 | interface Props {
8 | closeViz: () => void;
9 | hGifURL: string;
10 | fGifURL: string;
11 | top5: Top5Obj;
12 | preprocessFilePath: string[];
13 | }
14 |
15 | export default function NewModal(props: Props) {
16 | return (
17 | // modal main container
18 |
19 | {/* exit button */}
20 |
21 |
22 |
27 |
28 |
29 | {/* intro slide */}
30 |
31 | {/* title */}
32 |
33 |
The Path Between Input and Output
34 |
35 |
36 |
37 |
Unraveling AI: A Visual Journey
38 | Discover how artificial intelligence
39 | perceives and interprets your image.
40 |
41 |
42 |
43 | This immersive experience will guide you through the fascinating
44 | process of AI image analysis. From preprocessing to feature
45 | extraction and classification, you'll gain a deeper
46 | understanding of how AI models work behind the scenes.
47 |
48 |
49 |
50 | {/*
Scroll Down For More Information
*/}
51 |
59 |
63 |
64 |
65 |
66 |
67 |
70 |
71 |
72 | {/*preprocess image explanation*/}
73 |
74 |
75 |
Preparing Your Image
76 |
77 | The essential steps that prepare your image for AI analysis.
78 |
79 |
80 |
81 |
82 | Before an AI model can analyze your image, it needs to be
83 | preprocessed. This crucial step ensures that the image is
84 | converted into an array of parameters that the nueral network expects. In
85 | this slide, notice how your image is transformed through resizing,
86 | normalization, and other preprocessing techniques. These steps are
87 | essential for the AI model to properly interpret your image.
88 |
89 |
90 |
91 | {/*
Scroll Down For More Information
*/}
92 |
100 |
104 |
105 |
106 |
107 |
108 |
109 |
110 | {/*preprocess image*/}
111 |
112 |
Initial Formatting
113 |
119 |
120 |
121 |
122 |
Image Resizing
123 |
124 |
130 |
131 |
132 |
133 |
Image Tensor
134 |
135 |
141 |
142 |
143 |
144 |
145 | {/*explain gifs*/}
146 |
147 |
148 |
The Brain of AI
149 | Your image entering the complex world of the neural network
150 |
151 |
152 |
153 | In the slide below, we show how your
154 | preprocessed image is analyzed by the AI model's neural
155 | network. The sequences showcase activation heatmaps at different
156 | layers, representing the regions of the image that the AI focuses
157 | on. As your image flows through the layers, the network assigns
158 | weights to different regions, ultimately leading to a
159 | classification decision.
160 |
161 |
162 |
163 | {/*
Scroll Down For More Information
*/}
164 |
172 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
Heat Map
186 |
187 |
188 |
189 |
Feature Map
190 |
191 |
192 |
193 |
194 | On the left side, you can see which pixels the AI model focuses on
195 | as it moves through the layers of the neural network. The network
196 | provides weights to important regions, and the larger the weight,
197 | the hotter the region. The right is a visualization of how the
198 | neural network interprets different features of the image. Feature
199 | determination is broad in earlier stages, and becomes more focused
200 | on smaller areas towards the end.
201 |
202 |
203 |
204 |
205 |
206 | );
207 | }
208 |
--------------------------------------------------------------------------------
/src/app/(routes)/models/tensorflow/images/classification/page.tsx:
--------------------------------------------------------------------------------
1 | 'use client';
2 |
3 | import React, { useState, useEffect, ChangeEvent, use } from 'react';
4 | import axios from 'axios';
5 | import NewModal from '@/app/ui/newModal';
6 | import { Heatmaps, Featuremaps } from '@/app/lib/definitions';
7 | import { Top5Obj } from '@/app/lib/definitions';
8 |
9 | let inputImage: File | undefined;
10 | let count = 0;
11 |
12 | let modelName: string = 'MobileNet';
13 |
14 | export default function Home() {
15 | console.log(`===== RENDER ${count++} ======`);
16 | const states = [
17 | 'original',
18 | 'preprocessing',
19 | 'preprocessed',
20 | 'loadingMesh',
21 | 'featureMap',
22 | 'resizing',
23 | 'classifying',
24 | 'output',
25 | ];
26 |
27 | const [imgName, setImgName] = useState('Browse...');
28 | const [imgURL, setImgURL] = useState('');
29 | const [vizState, setVizState] = useState(false);
30 | const [predictionName, setPredictionName] = useState('');
31 | const [viz, openViz] = useState(false);
32 | const [hGifURL, setHGifURL] = useState('');
33 | const [fGifURL, setFGifURL] = useState('');
34 | const [time, setTime] = useState(0);
35 | const [top5, setTop5] = useState({} as Top5Obj);
36 | const [preprocessFilePath, setPreprocessFilePath] = useState([]);
37 | const [filePath, setFilePath] = useState('');
38 | const [buttonState, setButtonState] = useState(0);
39 | const [heatmapLinks, setHeatmapLinks] = useState({} as Heatmaps);
40 | const [featuremapLinks, setFeaturemapLinks] = useState({} as Featuremaps);
41 | const [data, setData] = useState('');
42 | const [fileType, setFileType] = useState('');
43 | const [top5Formatted, setTop5Formatted] = useState({} as Top5Obj);
44 |
45 | const browse = (e: ChangeEvent) => {
46 | inputImage = e.currentTarget.files?.[0];
47 | setImgName(`File Name: ${inputImage?.name}`);
48 | if (inputImage) {
49 | setImgURL(URL.createObjectURL(inputImage));
50 | setButtonState(1);
51 | }
52 | };
53 |
54 | async function imgUpload(inputImage: File) {
55 | const formData = new FormData();
56 | formData.append('file', inputImage as File);
57 | formData.append('modelName', modelName);
58 | axios
59 | .post('/models/actions/image/upload', formData, {
60 | headers: {
61 | 'Content-Type': 'multipart/form-data',
62 | },
63 | })
64 | .then((response) => {
65 | console.log('response:', response.data);
66 | setFilePath(response.data.filePath as string);
67 | })
68 | .catch((error) => {
69 | console.error(error);
70 | });
71 | }
72 |
73 | async function predict(data: string, modelName: string) {
74 | fetch('/models/actions/image/predict', {
75 | method: 'POST',
76 | headers: {
77 | 'Content-Type': 'application/json',
78 | },
79 | body: JSON.stringify({ data, modelName }),
80 | })
81 | .then((response) => response.json())
82 | .then((response) => {
83 | console.log('response:', response);
84 | setPredictionName(response.predictions.predicted_class_name);
85 | setTop5(response.predictions.class_name_probabilities);
86 | setTime(response.time);
87 | })
88 | .catch((error) => {
89 | console.error(error);
90 | });
91 | }
92 |
93 | function preprocess(data: string, filePath: string, fileType: string) {
94 | fetch('/models/actions/image/preprocess', {
95 | method: 'POST',
96 | headers: {
97 | 'Content-Type': 'application/json',
98 | },
99 | body: JSON.stringify({ data, filePath, fileType }),
100 | })
101 | .then((response) => response.json())
102 | .then((response) => {
103 | console.log('response:', response);
104 | setPreprocessFilePath(response.urls as string[]);
105 | })
106 | .catch((error) => {
107 | console.error(error);
108 | });
109 | }
110 |
111 | function heatmaps(data: string, modelName: string) {
112 | fetch('/models/actions/image/heatmaps', {
113 | method: 'POST',
114 | headers: {
115 | 'Content-Type': 'application/json',
116 | },
117 | body: JSON.stringify({ data, modelName }),
118 | })
119 | .then((response) => response.json())
120 | .then((response) => {
121 | console.log('response:', response);
122 | setHeatmapLinks(response);
123 | })
124 | .catch((error) => {
125 | console.error(error);
126 | });
127 | }
128 |
129 | function gifs(urls: string[], tag: string) {
130 | fetch('/models/actions/image/gifs', {
131 | method: 'POST',
132 | headers: {
133 | 'Content-Type': 'application/json',
134 | },
135 | body: JSON.stringify({ urls, tag }),
136 | })
137 | .then((response) => response.json())
138 | .then((response) => {
139 | console.log('response:', response);
140 | if (tag === 'heatmap_gif') {
141 | setHGifURL(response.url);
142 | } else {
143 | setFGifURL(response.url);
144 | }
145 | })
146 | .catch((error) => {
147 | console.error(error);
148 | });
149 | }
150 |
151 | function featureMaps(data: string, modelName: string) {
152 | fetch('/models/actions/image/featmaps', {
153 | method: 'POST',
154 | headers: {
155 | 'Content-Type': 'application/json',
156 | },
157 | body: JSON.stringify({ data, modelName }),
158 | })
159 | .then((response) => response.json())
160 | .then((response) => {
161 | console.log('response:', response);
162 | setFeaturemapLinks(response);
163 | })
164 | .catch((error) => {
165 | console.error(error);
166 | });
167 | }
168 |
169 | function logs(data: string, modelName: string) {
170 | fetch('/models/actions/logs', {
171 | method: 'POST',
172 | headers: {
173 | 'Content-Type': 'application/json',
174 | },
175 | body: JSON.stringify({ data, modelName }),
176 | })
177 | .then((response) => response.json())
178 | .then((response) => {
179 | console.log('response:', response);
180 | })
181 | .then(() => {
182 | setVizState(true);
183 | setButtonState(2);
184 | })
185 | .catch((error) => {
186 | console.error(error);
187 | });
188 | }
189 |
190 | function formatString(inputString: string): string {
191 | return inputString
192 | .split('_')
193 | .map((word) => word.charAt(0).toUpperCase() + word.slice(1).toLowerCase())
194 | .join(' ');
195 | }
196 |
197 | const uploadClick = async () => {
198 | if (inputImage) {
199 | const reader = new FileReader();
200 | reader.readAsDataURL(inputImage as File);
201 |
202 | reader.onloadend = () => {
203 | const data = (reader.result as string).split(',')[1];
204 | setData(data as string);
205 | const fileType = inputImage?.type.split('/')[1];
206 | setFileType(fileType as string);
207 | if (process.env.mode === 'logs') {
208 | logs(data, modelName);
209 | } else {
210 | heatmaps(data, modelName);
211 | featureMaps(data, modelName);
212 | imgUpload(inputImage as File);
213 | predict(data, modelName)
214 | .then(() => {
215 | setVizState(true);
216 | setButtonState(2);
217 | })
218 | .catch((error) => {
219 | console.error(error);
220 | });
221 | }
222 | };
223 | }
224 | };
225 |
226 | useEffect(() => {
227 | if (filePath) {
228 | preprocess(data, filePath, fileType);
229 | }
230 | }, [filePath, data, fileType]);
231 |
232 | useEffect(() => {
233 | if (heatmapLinks.progressbars && !hGifURL) {
234 | gifs(heatmapLinks.progressbars as string[], 'heatmap_gif');
235 | }
236 | }, [heatmapLinks, hGifURL]);
237 |
238 | useEffect(() => {
239 | if (featuremapLinks.progressbars && hGifURL && !fGifURL) {
240 | gifs(featuremapLinks.progressbars as string[], 'featuremap_gif');
241 | }
242 | }, [hGifURL, featuremapLinks, fGifURL]);
243 |
244 | useEffect(() => {
245 | if (top5) {
246 | const formattedTop5: Top5Obj = {};
247 | for (const key in top5) {
248 | const formattedKey = formatString(key);
249 | formattedTop5[formattedKey] = top5[key];
250 | }
251 | setTop5Formatted(formattedTop5);
252 | }
253 | }, [top5]);
254 |
255 | useEffect(() => {
256 | if (fGifURL) {
257 |
258 | let hGif = hGifURL;
259 | setHGifURL('');
260 | setHGifURL(hGif)
261 | }
262 | }
263 | , [fGifURL, hGifURL]);
264 |
265 | const clearClick = () => {
266 | inputImage = undefined;
267 | setImgName('Browse...');
268 | setImgURL('');
269 | setFilePath('');
270 | setHGifURL('');
271 | setFGifURL('');
272 | setVizState(false);
273 | setButtonState(0);
274 | setHeatmapLinks({} as Heatmaps);
275 | setFeaturemapLinks({} as Featuremaps);
276 | setPredictionName('');
277 | setTime(0);
278 | };
279 |
280 | const vizClick = () => {
281 | openViz(true);
282 | };
283 |
284 | const closeViz = () => {
285 | openViz(false);
286 | };
287 |
288 | return (
289 |
290 |
291 |
292 |
293 |
294 | {/* Your image and logo components here */}
295 |
296 | {/*
*/}
297 |
302 | {/*
*/}
303 |
304 |
305 |
306 | {viz && (
307 |
314 | )}{' '}
315 |
316 | {imgURL && (
317 |
322 | )}
323 |
324 | {vizState && (
325 | <>
326 |
327 | Class: {formatString(predictionName)}
328 |
329 | {time > 0 && (
330 |
331 | Time: {time.toFixed(2)} seconds
332 |
333 | )}
334 | >
335 | )}
336 |
337 |
338 |
339 |
344 |
345 | {!vizState && (
346 |
347 | Analysis Visualization
348 |
349 | )}
350 | {vizState && (
351 |
355 | {' '}
356 | Analysis Visualization
357 |
358 | )}
359 |
360 |
364 | Clear
365 |
366 |
367 |
368 | {/*
*/}
369 |
370 | {/*
*/}
371 |
372 | {buttonState === 0 && (
373 |
374 | {imgName}
375 |
381 |
382 | )}
383 | {buttonState === 1 && (
384 |
388 | Upload
389 |
390 | )}
391 | {buttonState === 2 && (
392 |
396 | Reset
397 |
398 | )}
399 |
400 |
401 | );
402 | }
403 |
--------------------------------------------------------------------------------
/src/app/page.module.css:
--------------------------------------------------------------------------------
1 | .main {
2 | display: flex;
3 | flex-direction: row;
4 | justify-content: space-evenly;
5 | align-items: center;
6 | background-color: black;
7 | /* border: 1px solid white; */
8 | /* padding: ; */
9 | /* min-height: 100vh; */
10 | /* background-image: url(/public/ai-generated-7914562_1920.jpg); */
11 | }
12 |
13 | .Logo {
14 | display: flex;
15 | flex-direction: column;
16 | object-fit: contain;
17 | align-items: center;
18 | }
19 |
20 | .column {
21 | display: flex;
22 | flex-direction: column;
23 | flex: 20%;
24 | align-items: center;
25 | justify-content: center;
26 | /* border: 1px solid white; */
27 | height: 30vh;
28 | }
29 |
30 | .majorDiv {
31 | display: flex;
32 | flex-direction: column;
33 | align-items: center;
34 | background-color: transparent;
35 | /* border: 1px solid white; */
36 | height: 100px;
37 | width: 1vh;
38 | margin-left: 5%;
39 | }
40 |
41 | .title {
42 | font-size: xx-large;
43 | display: flex;
44 | flex-direction: row;
45 | /* align-items: row; */
46 | background-color: #f3ec78;
47 | background-image: linear-gradient(15deg, #af4261, #f3ec78);
48 | background-size: 50%;
49 | -webkit-background-clip: text;
50 | -moz-background-clip: text;
51 | -webkit-text-fill-color: transparent;
52 | -moz-text-fill-color: transparent;
53 | /* border: 1px solid white; */
54 | margin-bottom: 2%;
55 | }
56 |
57 | .titleImg {
58 | height: 200px;
59 | width: 300px;
60 | object-fit: contain;
61 | }
62 |
63 | .inputBox {
64 | display: flex;
65 | align-items: center;
66 | justify-content: center;
67 | height: 500px; /* This height must be enough to contain the fixed-size background */
68 | /* width: 100%; Adjust width as needed */
69 | /* border: 1px solid white; */
70 | background-image: url(/backgroundFlareBW.avif);
71 | background-position: -6px;
72 | background-repeat: no-repeat;
73 | background-size: cover; /* or 'cover' if you want it to fill the entire container */
74 | /* border-radius: 5px; */
75 | /* background-image: linear-gradient(
76 | 130deg,
77 | hsl(28deg 52% 53%) 0%,
78 | hsl(26deg 51% 53%) 8%,
79 | hsl(24deg 48% 54%) 15%,
80 | hsl(21deg 46% 54%) 22%,
81 | hsl(19deg 44% 54%) 28%,
82 | hsl(17deg 41% 54%) 34%,
83 | hsl(15deg 38% 54%) 40%,
84 | hsl(13deg 35% 53%) 45%,
85 | hsl(11deg 32% 53%) 50%,
86 | hsl(9deg 29% 52%) 55%,
87 | hsl(7deg 26% 52%) 59%,
88 | hsl(6deg 25% 51%) 63%,
89 | hsl(5deg 24% 52%) 67%,
90 | hsl(4deg 23% 52%) 71%,
91 | hsl(4deg 22% 52%) 75%,
92 | hsl(3deg 21% 52%) 78%,
93 | hsl(3deg 20% 52%) 82%,
94 | hsl(2deg 19% 52%) 86%,
95 | hsl(2deg 18% 52%) 89%,
96 | hsl(1deg 17% 52%) 93%,
97 | hsl(1deg 16% 53%) 96%,
98 | hsl(0deg 15% 53%) 100%
99 | ); */
100 | /* box-shadow: 0px 0px 10px white; */
101 | /* height: 60vh; */
102 | /* padding: 5%; */
103 | width: 70vw;
104 |
105 | min-height: 500px;
106 | }
107 |
108 | .hide {
109 | /* background-image: url(/public/backgroundFlareBW.avif); */
110 | display: none;
111 | }
112 |
113 | .grid-item {
114 | display: flex;
115 | flex-direction: column; /* Stack items vertically */
116 | align-items: center; /* Center items horizontally */
117 | justify-content: center; /* Center items vertically if there's extra space */
118 | margin: 10px; /* Add some space around each grid item */
119 | }
120 |
121 | .grid-item img {
122 | width: 100%;
123 | height: auto;
124 | }
125 |
126 | .gridItem div {
127 | margin-bottom: 10px; /* Space between title and image */
128 | color: white; /* Assuming you want the text white as per modal theme */
129 | font-size: 1em; /* Adjust based on your design */
130 | }
131 |
132 | .placeholderText {
133 | font-size: 30px;
134 | font-weight: bolder;
135 | color: white;
136 | /* background-color: #f3ec78;
137 | background-image: linear-gradient(
138 | 180deg,
139 | hsl(0deg 100% 100%) 0%,
140 | hsl(17deg 100% 78%) 36%,
141 | hsl(26deg 83% 53%) 59%,
142 | hsl(15deg 95% 47%) 78%,
143 | hsl(2deg 80% 53%) 92%,
144 | hsl(0deg 75% 58%) 100%
145 | );
146 | background-size: 100%;
147 | -webkit-background-clip: text;
148 | -moz-background-clip: text;
149 | -webkit-text-fill-color: transparent;
150 | -moz-text-fill-color: transparent; */
151 | padding-top: 5%;
152 | margin-bottom: 2%;
153 | }
154 |
155 | .primaryBtn:active {
156 | color: white;
157 | background-color: rgba(100, 100, 100, 0.5);
158 | }
159 |
160 | .altBtn {
161 | display: inline-block;
162 | font-family: 'Hanken Grotesk', sans-serif;
163 | text-align: left;
164 | color: white;
165 | font-size: medium;
166 | background-color: transparent;
167 | border: none;
168 | border-radius: 50%;
169 | height: 100px;
170 | width: 100px;
171 | /* background-image: linear-gradient(
172 | 220deg,
173 | hsl(0deg 100% 100%) 0%,
174 | hsl(17deg 100% 78%) 18%,
175 | hsl(26deg 83% 53%) 59%,
176 | hsl(15deg 95% 47%) 78%,
177 | hsl(2deg 80% 53%) 92%,
178 | hsl(0deg 75% 58%) 100%
179 | ); */
180 | padding: 10px;
181 | margin: 10px;
182 | }
183 |
184 | .altBtn:hover {
185 | color: white;
186 | /* background-color: rgba(200, 200, 200, 0.5); */
187 | background-image: linear-gradient(
188 | 220deg,
189 | hsl(0deg 100% 100%) 0%,
190 | hsl(17deg 100% 78%) 18%,
191 | hsl(26deg 83% 53%) 59%,
192 | hsl(15deg 95% 47%) 78%,
193 | hsl(2deg 80% 53%) 92%,
194 | hsl(0deg 75% 58%) 100%
195 | );
196 | }
197 |
198 | .altBtn:active {
199 | color: white;
200 | /* background-color: rgba(100, 100, 100, 0.5); */
201 | background-image: linear-gradient(
202 | 220deg,
203 | hsl(0deg 100% 100%) 0%,
204 | hsl(17deg 100% 78%) 18%,
205 | hsl(26deg 83% 53%) 59%,
206 | hsl(15deg 95% 47%) 78%,
207 | hsl(2deg 80% 53%) 92%,
208 | hsl(0deg 75% 58%) 100%
209 | );
210 | }
211 |
212 | .uploadImg {
213 | height: 150px;
214 | width: 150px;
215 | object-fit: contain;
216 | }
217 |
218 | .imgInput,
219 | .primaryBtn {
220 | display: flex;
221 | justify-content: center;
222 | align-items: center;
223 | text-align: center;
224 | border-radius: 50%;
225 | height: 11em;
226 | width: 11em;
227 | cursor: pointer;
228 | background-color: transparent;
229 | border: none;
230 | padding: 10px;
231 | font-family: 'Hanken Grotesk', sans-serif;
232 | font-size: medium;
233 | position: relative;
234 | }
235 |
236 | .imgInput:hover,
237 | .primaryBtn:hover {
238 | box-shadow: 0 0 0 5px white inset;
239 | }
240 |
241 | .browseText {
242 | color: white;
243 | }
244 |
245 | /* .imgInput {
246 | /* border: 1px solid #ccc;
247 | display: inline-block;
248 | cursor: pointer;
249 | color: white;
250 | border-radius: 50%;
251 | height: 100px;
252 | width: 100px;
253 | /* background-color: transparent; */
254 | /* background-image: url(/backgroundFlareBW.avif);
255 | padding: 10px;
256 | margin: 10px;
257 |
258 | font-family: 'Hanken Grotesk', sans-serif;
259 | font-size: medium;
260 | text-align: center;
261 | }
262 | .imgInput:hover {
263 | color: rgb(255, 255, 255);
264 | background-color: rgba(200, 200, 200, 0.5);
265 | background-image: radial-gradient(
266 | #f5f5f5 100%,
267 | #6b6b6b 90%,
268 | rgb(42, 42, 42) 50%,
269 | rgb(0, 0%, 0%) 00%,
270 | );
271 | background-image: linear-gradient(
272 | 220deg,
273 | hsl(0deg 100% 100%) 0%,
274 | hsl(17deg 100% 78%) 18%,
275 | hsl(26deg 83% 53%) 59%,
276 | hsl(15deg 95% 47%) 78%,
277 | hsl(2deg 80% 53%) 92%,
278 | hsl(0deg 75% 58%) 100%
279 | );
280 | } */
281 |
282 | /* .imgInput:active {
283 | color: white;
284 | background-color: rgba(100, 100, 100, 0.5);
285 | } */
286 |
287 | .buttonBox {
288 | display: flex;
289 | flex-direction: column; /* Stack buttons vertically */
290 | align-items: center; /* Center buttons horizontally */
291 | justify-content: center;
292 | border-radius: 50%; /* Center the .buttonBox vertically within its container */
293 | width: 100%; /* Take full width to center content properly */
294 | }
295 |
296 | /* MODAL */
297 | .exitmodalbutton {
298 | background-color: black; /* Green */
299 | border: none;
300 | position: sticky;
301 | top: 5%;
302 | left: 95%;
303 | color: white;
304 | max-height: 1rem;
305 | max-width: 1rem;
306 | text-align: center;
307 | text-decoration: none;
308 | display: inline-block;
309 | font-size: 16px;
310 | /* margin: 4px 2px; */
311 | z-index: 20;
312 | cursor: pointer;
313 | transition-duration: 0.4s;
314 | }
315 |
316 | .exitmodalbutton:hover {
317 | background-color: #e92a04;
318 | }
319 |
320 | .custom-button:hover {
321 | background-color: #45a049;
322 | }
323 |
324 | .modalclosed {
325 | display: none;
326 | }
327 |
328 | .modalcontainer {
329 | color: white;
330 | display: flex;
331 | flex-direction: column;
332 | align-items: center;
333 | justify-content: space-around;
334 | position: absolute;
335 | max-width: 80vw;
336 | max-height: 100vh;
337 | top: 55%;
338 | left: 50%;
339 | transform: translate(-50%, -50%);
340 | background-image: url(../../public/modal-background-1.jpg);
341 | background-size: cover;
342 | background-position: -15px center;
343 | z-index: 10;
344 | box-shadow: inset 0 0 16px 16px #000000;
345 | overflow: scroll;
346 | /* box-sizing: border-box; */
347 | }
348 |
349 | .slide {
350 | border: 1px solid goldenrod;
351 | display: flex;
352 | flex-direction: column;
353 | align-items: center;
354 | justify-content: center;
355 | margin-top: 20px;
356 | position: relative;
357 | top: 0;
358 | background-color: rgba(0, 0, 0, 0.75);
359 | color: white;
360 | width: 80vw;
361 | min-height: 80vh;
362 | max-height: 80vh;
363 | /* padding: 20px; */
364 | font-family: 'Roboto', sans-serif;
365 | }
366 |
367 | .slideRow {
368 | margin-top: 20px;
369 | border: 1px solid goldenrod;
370 | display: flex;
371 | flex-direction: row;
372 | align-items: center;
373 | justify-content: center;
374 | position: relative;
375 | top: 0;
376 | background-color: rgba(0, 0, 0, 0.5);
377 | color: white;
378 | width: 80vw;
379 | min-height: 80vh;
380 | max-height: 80vh;
381 | /* padding: 20px; */
382 | font-family: 'Roboto', sans-serif;
383 | }
384 |
385 | .modalcontainertext {
386 | background-color: rgba(0, 0, 0, 0.5);
387 | color: white;
388 | width: 40vw;
389 | height: 45vh;
390 | display: flex;
391 | flex-direction: column;
392 | align-items: center;
393 | justify-content: center;
394 | padding: 20px;
395 | font-family: 'Roboto', sans-serif;
396 | }
397 |
398 | .modalcontainertext h1 {
399 | font-size: 1.75em;
400 | padding-bottom: 20px;
401 | padding-top: 20px;
402 | font-family: 'RobotoSlab', sans-serif;
403 | }
404 |
405 | .modalcontainertext h2 {
406 | font-size: 1.25em;
407 | }
408 |
409 | .modalcontainertext p {
410 | font-size: 1em;
411 | }
412 |
413 | /* Style for Chrome, Safari, and Opera */
414 | .modalcontainer::-webkit-scrollbar {
415 | width: 12px; /* Adjust the width of the scrollbar */
416 | }
417 |
418 | .modalcontainer::-webkit-scrollbar-track {
419 | background: #000; /* Track color */
420 | }
421 |
422 | .modalcontainer::-webkit-scrollbar-thumb {
423 | background-color: #555; /* Thumb color */
424 | border-radius: 6px; /* Roundness of the scrollbar thumb */
425 | border: 3px solid #000; /* Creates padding around the scroll thumb */
426 | }
427 |
428 | /* Style for Firefox */
429 | .modalcontainer {
430 | scrollbar-width: thin; /* Can be 'auto', 'thin', or 'none' */
431 | scrollbar-color: #555 #000; /* Thumb and track color */
432 | }
433 |
434 | /* Hide scrollbar for IE, Edge, and Firefox (where applicable) */
435 | .modalcontainer {
436 | -ms-overflow-style: none; /* IE and Edge */
437 | scrollbar-width: none; /* Firefox */
438 | }
439 |
440 | .modalcontainer::-webkit-scrollbar {
441 | display: none; /* Chrome, Safari, and Opera */
442 | }
443 |
444 | .modalheader {
445 | display: flex;
446 | flex-direction: column;
447 | align-items: center;
448 | justify-content: space-between;
449 | text-align: center;
450 | }
451 |
452 | .modalbody {
453 | display: flex;
454 | flex-direction: column;
455 | align-items: center;
456 | text-align: center;
457 | justify-content: space-between;
458 | padding: 20px;
459 | }
460 |
461 | /* .modalcontainer {
462 | border: 1px solid blue;
463 | position: fixed !important;
464 | background: rgba(20, 20, 20, 0.98);
465 | color: white;
466 | z-index: 10;
467 | top: 50% !important;
468 | left: 50% !important;
469 | transform: translate(-50%, -50%);
470 | width: 80vw;
471 | height: 100vh;
472 | padding: 20px;
473 | overflow: none;
474 | display: flex;
475 | flex-direction: column;
476 | align-items: center;
477 | justify-content: space-between;
478 | border-radius: 5px;
479 | box-shadow: inset 0 0 30px 10px gray;
480 | } */
481 |
482 | /* .modalcontents {
483 | border: 1px solid green;
484 | overflow: scroll;
485 | } */
486 |
487 | .modaltitlecontainer {
488 | display: flex;
489 | flex-direction: row;
490 | align-items: center;
491 | justify-content: center;
492 | margin-bottom: 25px;
493 | width: 100%;
494 | padding: 10px;
495 | }
496 |
497 | .startJourneyButton {
498 | color: white;
499 | border: none;
500 | padding: 10px 20px;
501 | text-align: center;
502 | text-decoration: none;
503 | display: flex;
504 | align-items: center;
505 | justify-content: center;
506 | font-size: 16px;
507 | margin: 4px 2px;
508 | cursor: pointer;
509 | }
510 |
511 | .modaltitle {
512 | display: flex;
513 | align-items: center;
514 | position: absolute;
515 | font-size: 2.5em;
516 | font-weight: 1000;
517 | position: sticky;
518 | font-family: 'RobotoSlab', sans-serif;
519 | }
520 |
521 | /* CAROUSEL */
522 |
523 | .embla {
524 | overflow: hidden;
525 | }
526 | .embla__container {
527 | display: flex;
528 | width: 50vw;
529 | height: 50vh;
530 | }
531 | .embla__slide {
532 | flex: 0 0 80%;
533 | min-width: 0;
534 | margin-right: 10px;
535 | margin-left: 10px;
536 | border: 1px solid white;
537 | }
538 |
539 | .embla__controls {
540 | display: flex;
541 | flex-direction: row;
542 | justify-content: center;
543 | align-items: center;
544 | }
545 |
546 | .embla__dots {
547 | display: flex;
548 | flex-wrap: wrap;
549 | justify-content: flex-end;
550 | align-items: center;
551 | /* margin-right: calc((2.6rem - 1.4rem) / 2 * -1); */
552 | }
553 |
554 | .embla__dot {
555 | touch-action: manipulation;
556 | display: inline-flex;
557 | text-decoration: none;
558 | cursor: pointer;
559 | border: 0;
560 | padding: 0;
561 | margin: 0.5rem;
562 | width: 1.5rem;
563 | height: 1.5rem;
564 | display: flex;
565 | align-items: center;
566 | justify-content: center;
567 | border-radius: 50%;
568 | background-color: black;
569 | /* background-image: url(/public/ai-generated-7914562_1920.jpg); */
570 | border: 0.2rem solid gray;
571 | }
572 | .embla__dot:hover {
573 | border-color: white;
574 | }
575 | .embla__dot--selected {
576 | touch-action: manipulation;
577 | display: inline-flex;
578 | text-decoration: none;
579 | cursor: pointer;
580 | border: 0;
581 | padding: 0;
582 | margin: 0.5rem;
583 | width: 1.5rem;
584 | height: 1.5rem;
585 | display: flex;
586 | align-items: center;
587 | justify-content: center;
588 | border-radius: 50%;
589 | background-color: white;
590 | border: 0.2rem solid white;
591 | }
592 |
593 | /* .test {
594 | border: 1px solid white;
595 | } */
596 |
597 | .otherImg {
598 | height: 350px;
599 | width: 350x;
600 | object-fit: contain;
601 | }
602 |
603 | .table {
604 | width: 60%; /* Full-width */
605 | border-collapse: collapse; /* Collapses the border */
606 | margin: 20px 0; /* Adds some space around the table */
607 | font-size: 1.1em; /* Adjusts the font size */
608 | font-family: sans-serif; /* Uses a sans-serif font for a modern look */
609 | min-width: 400px; /* Ensures it doesn’t get too small */
610 | box-shadow: 0 0 20px rgba(0, 0, 0, 0.15); /* Adds a subtle shadow for depth */
611 | }
612 |
613 | .table th,
614 | .table td {
615 | padding: 12px 15px; /* Adds space inside each cell */
616 | text-align: left; /* Aligns text to the left */
617 | border-bottom: 1px solid #dddddd; /* Adds a light line between rows */
618 | }
619 |
620 | .table th {
621 | background-color: goldenrod; /* A pleasant green for the header background */
622 | color: #ffffff; /* White text for contrast */
623 | text-transform: uppercase; /* Makes the header text uppercase */
624 | }
625 |
626 | .table tbody tr:last-of-type {
627 | border-bottom: 2px solid goldenrod; /* A thicker green line under the last row */
628 | }
629 |
630 | .table th:first-child,
631 | .table td:first-child {
632 | width: 60%; /* Adjusts the width of the first column */
633 | }
634 |
635 | .table th:nth-child(2),
636 | .table td:nth-child(2) {
637 | text-align: center;
638 | width: 40%; /* Adjusts the width of the second column, ensuring the total does not exceed 100% */
639 | }
640 | .gifContainer {
641 | display: flex;
642 | flex-direction: column;
643 | align-items: center;
644 | justify-content: center;
645 | background-color: rgba(0, 0, 0, 0.5);
646 | }
647 |
648 | .gifDisplay {
649 | width: 100%;
650 | display: flex;
651 | flex-direction: row;
652 | align-items: center;
653 | justify-content: space-around;
654 | }
655 |
656 | .gifSlide {
657 | position: absolute;
658 | height: 100%;
659 | width: 100%;
660 | display: flex;
661 | flex-direction: column;
662 | align-items: center;
663 | justify-content: space-around;
664 | }
665 |
666 | .gifTitle {
667 | margin: 10%;
668 | font-size: x-large;
669 | font-weight: 400;
670 | }
671 |
672 | .gifDescription {
673 | background-color: rgba(0, 0, 0, 0.5);
674 | color: white;
675 | display: flex;
676 | flex-direction: column;
677 | align-items: center;
678 | justify-content: center;
679 | text-align: center;
680 | padding: 20px;
681 | margin-left: 5%;
682 | margin-right: 5%;
683 | font-family: 'Roboto', sans-serif;
684 | }
685 |
--------------------------------------------------------------------------------