├── backend
├── assistants.json
├── .DS_Store
├── dump.rdb
├── api
│ ├── __init__.py
│ ├── user.py
│ ├── routes.py
│ ├── in_class.py
│ └── webhook.py
├── audio_cache
│ └── output.mp3
├── __pycache__
│ ├── chatbot.cpython-313.pyc
│ ├── context_manager.cpython-313.pyc
│ └── s3_context_manager.cpython-313.pyc
├── utils
│ ├── __pycache__
│ │ ├── s3_utils.cpython-313.pyc
│ │ ├── user_utils.cpython-313.pyc
│ │ ├── socket_utils.cpython-313.pyc
│ │ ├── course_manager.cpython-313.pyc
│ │ ├── firebase_admin.cpython-313.pyc
│ │ └── load_and_process_index.cpython-313.pyc
│ ├── socket_utils.py
│ ├── firebase_admin.py
│ ├── user_utils.py
│ └── load_and_process_index.py
├── functions
│ ├── __pycache__
│ │ ├── slides_navigation.cpython-313.pyc
│ │ └── get_detailed_content.cpython-313.pyc
│ ├── get_detailed_content.py
│ └── slides_navigation.py
├── user_files_utils.py
├── routes
│ ├── delete_routes.py
│ ├── delete_course_routes.py
│ ├── upload_routes.py
│ ├── course_info_routes.py
│ ├── voice_routes.py
│ └── aiTutor_routes.py
└── app.py
├── app
├── vpi-test
│ └── [title]
│ │ └── page.tsx
├── favicon.ico
├── fonts
│ ├── GeistVF.woff
│ └── GeistMonoVF.woff
├── page.tsx
├── my-publish
│ ├── page.tsx
│ ├── data-center
│ │ └── page.tsx
│ ├── publish-details
│ │ └── page.tsx
│ ├── my-channel
│ │ └── page.tsx
│ ├── help-center
│ │ └── page.tsx
│ └── monetization-center
│ │ └── page.tsx
├── courses
│ ├── layout.tsx
│ ├── [id]
│ │ ├── types.ts
│ │ ├── components
│ │ │ ├── SlideViewer.tsx
│ │ │ └── ChatHistory.tsx
│ │ ├── hooks
│ │ │ ├── useConversation.ts
│ │ │ └── useSpeech.ts
│ │ └── page.tsx
│ └── page.tsx
├── dashboard
│ └── layout.tsx
├── services
│ └── chatService.ts
├── layout.tsx
├── welcome
│ └── page.tsx
├── my-uploads
│ └── page.tsx
├── course-preview
│ └── [id]
│ │ └── page.tsx
├── ai-tutor
│ └── page.tsx
├── schedule
│ └── page.tsx
├── globals.css
├── my-courses
│ └── page.tsx
└── vapi-test
│ ├── page.tsx
│ └── [title]
│ └── page.tsx
├── next.config.mjs
├── postcss.config.mjs
├── next-env.d.ts
├── .gitignore
├── package.json
├── tsconfig.json
├── middleware.ts
├── tailwind.config.ts
└── README.md
/backend/assistants.json:
--------------------------------------------------------------------------------
1 | {}
--------------------------------------------------------------------------------
/app/vpi-test/[title]/page.tsx:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/app/favicon.ico
--------------------------------------------------------------------------------
/backend/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/.DS_Store
--------------------------------------------------------------------------------
/backend/dump.rdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/dump.rdb
--------------------------------------------------------------------------------
/app/fonts/GeistVF.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/app/fonts/GeistVF.woff
--------------------------------------------------------------------------------
/app/fonts/GeistMonoVF.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/app/fonts/GeistMonoVF.woff
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {};
3 |
4 | export default nextConfig;
5 |
--------------------------------------------------------------------------------
/backend/api/__init__.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 |
3 | api = Blueprint('api', __name__)
4 |
5 | from .routes import *
6 |
--------------------------------------------------------------------------------
/backend/audio_cache/output.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/audio_cache/output.mp3
--------------------------------------------------------------------------------
/app/page.tsx:
--------------------------------------------------------------------------------
1 | import { redirect } from 'next/navigation'
2 |
3 | export default function HomePage() {
4 | redirect('/welcome')
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/backend/__pycache__/chatbot.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/__pycache__/chatbot.cpython-313.pyc
--------------------------------------------------------------------------------
/app/my-publish/page.tsx:
--------------------------------------------------------------------------------
1 | import { redirect } from "next/navigation"
2 |
3 | export default function MyPublishRedirect() {
4 | redirect("/my-publish/my-channel")
5 | }
--------------------------------------------------------------------------------
/backend/utils/__pycache__/s3_utils.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/s3_utils.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/__pycache__/context_manager.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/__pycache__/context_manager.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/utils/__pycache__/user_utils.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/user_utils.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/__pycache__/s3_context_manager.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/__pycache__/s3_context_manager.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/utils/__pycache__/socket_utils.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/socket_utils.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/utils/__pycache__/course_manager.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/course_manager.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/utils/__pycache__/firebase_admin.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/firebase_admin.cpython-313.pyc
--------------------------------------------------------------------------------
/postcss.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('postcss-load-config').Config} */
2 | const config = {
3 | plugins: {
4 | tailwindcss: {},
5 | },
6 | };
7 |
8 | export default config;
9 |
--------------------------------------------------------------------------------
/backend/functions/__pycache__/slides_navigation.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/functions/__pycache__/slides_navigation.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/utils/__pycache__/load_and_process_index.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/utils/__pycache__/load_and_process_index.cpython-313.pyc
--------------------------------------------------------------------------------
/backend/functions/__pycache__/get_detailed_content.cpython-313.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bobhaotian/speech-driven-lessons/HEAD/backend/functions/__pycache__/get_detailed_content.cpython-313.pyc
--------------------------------------------------------------------------------
/next-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | ///
3 |
4 | // NOTE: This file should not be edited
5 | // see https://nextjs.org/docs/app/building-your-application/configuring/typescript for more information.
6 |
--------------------------------------------------------------------------------
/backend/api/user.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint
2 | import utils.user_utils as user_utils
3 |
4 | user = Blueprint('user', __name__)
5 |
6 | @user.route('/verify-user', methods=['POST', 'OPTIONS'])
7 | def verify_user():
8 | """Route handler for verifying user token and initializing S3"""
9 | return user_utils.handle_verify_user()
--------------------------------------------------------------------------------
/app/courses/layout.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import React from 'react'
4 | import { CourseProvider } from "@/lib/course-context"
5 |
6 | export default function CoursesLayout({ children }: { children: React.ReactNode }) {
7 | return (
8 |
9 | {children}
10 |
11 | )
12 | }
13 |
--------------------------------------------------------------------------------
/app/dashboard/layout.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import React from 'react'
4 | import { CourseProvider } from "@/lib/course-context"
5 |
6 | export default function DashboardLayout({ children }: { children: React.ReactNode }) {
7 | return (
8 |
9 | {children}
10 |
11 | )
12 | }
13 |
--------------------------------------------------------------------------------
/backend/user_files_utils.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import os
3 |
4 | # Set up AWS credentials and S3 resource
5 | ACCESS_KEY = os.getenv("AWS_ACCESS_KEY_ID")
6 | SECRET_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
7 | REGION_NAME = "ca-central-1" # e.g., 'us-east-1'
8 |
9 | # Initialize S3 client
10 | s3_client = boto3.client(
11 | 's3',
12 | aws_access_key_id=ACCESS_KEY,
13 | aws_secret_access_key=SECRET_KEY,
14 | region_name=REGION_NAME
15 | )
16 |
17 | bucket_name = "jasmintechs-tutorion"
--------------------------------------------------------------------------------
/app/services/chatService.ts:
--------------------------------------------------------------------------------
1 | // app/services/chatService.ts
2 | export const initializeChatbot = async (courseTitle: string) => {
3 | const response = await fetch("http://localhost:5000/api/initialize-chatbot", {
4 | method: "POST",
5 | headers: { "Content-Type": "application/json" },
6 | body: JSON.stringify({ course_title: courseTitle }),
7 | });
8 | return response.json();
9 | };
10 |
11 | export const getAIResponse = async (input: string) => {
12 | // API call implementation
13 | };
14 |
--------------------------------------------------------------------------------
/backend/functions/get_detailed_content.py:
--------------------------------------------------------------------------------
1 | from s3_context_manager import ContextManager as S3ContextManager
2 | import utils.s3_utils as s3_utils
3 | from dotenv import load_dotenv
4 | import os
5 |
6 | load_dotenv()
7 |
8 | API_KEY = os.getenv("OPENAI_API_KEY")
9 |
10 | s3_bucket = "jasmintechs-tutorion"
11 |
12 | def get_detailed_content(course_title, user, user_query):
13 | s3_context_manager = S3ContextManager(user, course_title, api_key=API_KEY)
14 | s3_context_manager.load_saved_indices()
15 | return s3_context_manager.get_relevant_chunks(user_query)
16 |
--------------------------------------------------------------------------------
/app/layout.tsx:
--------------------------------------------------------------------------------
1 |
2 | import './globals.css'
3 | import type { Metadata } from 'next'
4 | import { Inter } from 'next/font/google'
5 |
6 | const inter = Inter({ subsets: ['latin'] })
7 |
8 | export const metadata: Metadata = {
9 | title: 'Student Dashboard',
10 | description: 'A comprehensive learning platform with AI tutors',
11 | }
12 |
13 | export default function RootLayout({
14 | children,
15 | }: {
16 | children: React.ReactNode
17 | }) {
18 | return (
19 |
20 |
21 | {children}
22 |
23 |
24 | )
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 | /.pnp
4 | .pnp.js
5 |
6 | # Testing
7 | /coverage
8 |
9 | # Next.js
10 | /.next/
11 | /out/
12 |
13 | # Production
14 | /build
15 |
16 | # Misc
17 | .DS_Store
18 | *.pem
19 |
20 | # Logs
21 | npm-debug.log*
22 | yarn-debug.log*
23 | yarn-error.log*
24 |
25 | # Local .env files
26 | .env
27 | .env.local
28 | .env.development.local
29 | .env.test.local
30 | .env.production.local
31 |
32 | # Python
33 | __pycache__/
34 | *.pyc
35 | *.pyo
36 | *.pyd
37 | .Python
38 | env/
39 | venv/
40 | pip-selfcheck.json
41 | *.egg-info/
42 | dist/
43 | build/
44 |
45 | # Backend .env
46 | backend/.env
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "speech-driven-lessons",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "next": "14.2.4",
13 | "react": "^18",
14 | "react-dom": "^18"
15 | },
16 | "devDependencies": {
17 | "@types/node": "^20",
18 | "@types/react": "^18",
19 | "@types/react-dom": "^18",
20 | "postcss": "^8",
21 | "tailwindcss": "^3.4.1",
22 | "typescript": "^5"
23 | }
24 | }
--------------------------------------------------------------------------------
/backend/api/routes.py:
--------------------------------------------------------------------------------
1 | from .webhook import webhook
2 | from .assistant import assistant
3 | from .in_class import in_class
4 | from .course import course
5 | from .course_generation import course_generation
6 | from .user import user
7 | from flask import request, jsonify
8 | from . import api
9 |
10 | # Register blueprints
11 | api.register_blueprint(webhook, url_prefix='/webhook')
12 | api.register_blueprint(assistant, url_prefix='/assistant')
13 | api.register_blueprint(in_class, url_prefix='/in-class')
14 |
15 | api.register_blueprint(course, url_prefix='/course')
16 | api.register_blueprint(user, url_prefix='/user')
17 | api.register_blueprint(course_generation, url_prefix='/course_generation')
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "lib": ["dom", "dom.iterable", "esnext"],
4 | "allowJs": true,
5 | "skipLibCheck": true,
6 | "strict": true,
7 | "noEmit": true,
8 | "esModuleInterop": true,
9 | "module": "esnext",
10 | "moduleResolution": "bundler",
11 | "resolveJsonModule": true,
12 | "isolatedModules": true,
13 | "jsx": "preserve",
14 | "incremental": true,
15 | "plugins": [
16 | {
17 | "name": "next"
18 | }
19 | ],
20 | "paths": {
21 | "@/*": ["./*"]
22 | }
23 | },
24 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
25 | "exclude": ["node_modules"]
26 | }
27 |
--------------------------------------------------------------------------------
/app/welcome/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useEffect } from 'react';
4 | import { useRouter } from 'next/navigation';
5 | import WelcomeAnimation from "@/components/animations/WelcomeAnimation";
6 | import { useAuth } from "@/auth/firebase";
7 |
8 | export default function WelcomePage() {
9 | const router = useRouter();
10 | const { userEmail, loading, error } = useAuth();
11 |
12 | useEffect(() => {
13 | // Automatically redirect to dashboard after animation completes
14 | const timer = setTimeout(() => {
15 | router.push('/dashboard');
16 | }, 2600);
17 |
18 | return () => clearTimeout(timer);
19 | }, [router]);
20 |
21 | return (
22 |
23 |
24 |
25 | );
26 | }
27 |
28 |
29 |
--------------------------------------------------------------------------------
/backend/utils/socket_utils.py:
--------------------------------------------------------------------------------
1 | # Create this new file
2 |
3 | # This module will hold a reference to the socketio instance
4 | # to avoid circular imports
5 | _socketio = None
6 |
7 | def init_socketio(socketio_instance):
8 | """Initialize the socketio reference"""
9 | global _socketio
10 | _socketio = socketio_instance
11 |
12 | def emit_slide_change(assistant_id, position):
13 | """Emit slide change event to a specific room"""
14 | if _socketio:
15 | _socketio.emit('slide_changed', {'position': position}, room=assistant_id)
16 | else:
17 | print("Warning: socketio not initialized yet")
18 |
19 | def emit_assistant_activity(assistant_id):
20 | """Emit assistant activity event to reset inactivity timer"""
21 | if _socketio:
22 | _socketio.emit('assistant_activity', room=assistant_id)
23 | else:
24 | print("Warning: socketio not initialized yet")
--------------------------------------------------------------------------------
/backend/routes/delete_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import utils.user_utils as user_utils
3 | import utils.s3_utils as s3_utils
4 | import os
5 |
6 | delete_bp = Blueprint('delete', __name__)
7 | UPLOAD_FOLDER = "../uploads"
8 |
9 | @delete_bp.route('/delete-file', methods=['POST'])
10 | def delete_file():
11 | username = user_utils.get_current_user(request)
12 | if not username:
13 | return jsonify({'error': 'Unauthorized'}), 401
14 |
15 | data = request.json
16 | filename = data.get('filename').replace(" ", "_")
17 | coursename = data.get('coursename')
18 |
19 | if not filename or not coursename:
20 | return jsonify({'error': 'filename and coursename are required'}), 400
21 |
22 | file_path = s3_utils.get_s3_file_path(username, coursename, filename)
23 | response = s3_utils.delete_file_from_s3("jasmintechs-tutorion", file_path)
24 |
25 | if response:
26 | return jsonify({'message': 'File deleted successfully'})
27 | else:
28 | return jsonify({'error': 'File not found'}), 404
29 |
--------------------------------------------------------------------------------
/backend/routes/delete_course_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import utils.user_utils as user_utils # Assuming this utility gets the current username
3 | import utils.s3_utils as s3_utils # Assuming this utility interacts with S3
4 |
5 | delete_course_bp = Blueprint('delete_course', __name__)
6 |
7 | @delete_course_bp.route('/delete-course', methods=['POST'])
8 | def delete_course():
9 | username = user_utils.get_current_user(request)
10 | if not username:
11 | return jsonify({'error': 'Unauthorized'}), 401
12 |
13 | data = request.json
14 | course_id = data.get('id')
15 | title = data.get('title')
16 |
17 | print("course_id: ", course_id)
18 | print("title: ", title)
19 |
20 | if not course_id or not title:
21 | return jsonify({'error': 'id and title are required'}), 400
22 |
23 | response = s3_utils.delete_folder_from_s3("jasmintechs-tutorion", s3_utils.get_course_s3_folder(username, title))
24 |
25 | if response:
26 | return jsonify({'message': 'Course deleted successfully'})
27 | else:
28 | return jsonify({'error': 'Course not found'}), 404
29 |
--------------------------------------------------------------------------------
/backend/routes/upload_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | from werkzeug.utils import secure_filename
3 | import utils.user_utils as user_utils
4 | import utils.s3_utils as s3_utils
5 |
6 | upload_bp = Blueprint('upload', __name__)
7 | UPLOAD_FOLDER = "../uploads"
8 |
9 | @upload_bp.route('/upload-files', methods=['POST'])
10 | def upload_files():
11 | username = user_utils.get_current_user(request)
12 | if not username:
13 | return jsonify({'error': 'Unauthorized'}), 401
14 |
15 | if 'files' not in request.files:
16 | return jsonify({'error': 'No files part'}), 400
17 |
18 | if 'coursename' not in request.form:
19 | return jsonify({'error': 'No course name'}), 400
20 |
21 | coursename = request.form['coursename']
22 | files = request.files.getlist('files')
23 | uploaded_files = []
24 |
25 | for file in files:
26 | if file.filename:
27 | filename = secure_filename(file.filename)
28 | file.seek(0)
29 | s3_utils.upload_file_to_s3(file, "jasmintechs-tutorion",
30 | s3_utils.get_s3_file_path(username, coursename, filename))
31 | uploaded_files.append(filename)
32 |
33 | return jsonify({'message': 'Files uploaded successfully', 'files': uploaded_files})
34 |
--------------------------------------------------------------------------------
/backend/routes/course_info_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import os
3 | import json
4 | import utils.user_utils as user_utils # Assuming this utility gets the current username
5 | import utils.s3_utils as s3_utils # Assuming this utility interacts with S3
6 |
7 | course_info_bp = Blueprint('course_info', __name__)
8 |
9 | @course_info_bp.route('/course_info', methods=['POST'])
10 | def course_info():
11 | username = user_utils.get_current_user(request)
12 | if not username:
13 | return jsonify({'error': 'Unauthorized'}), 401
14 |
15 | try:
16 | # Get course info from S3
17 | s3_course_info = s3_utils.get_s3_user_courses_info(username)
18 |
19 | # Ensure we're returning a list of courses
20 | if not isinstance(s3_course_info, list):
21 | s3_course_info = []
22 |
23 | # Process each course to ensure required fields exist
24 | processed_courses = []
25 | for course in s3_course_info:
26 | processed_course = {
27 | 'id': course.get('id'),
28 | 'title': course.get('title', 'Untitled Course'),
29 | 'progress': course.get('progress', 0),
30 | 'hoursCompleted': course.get('hoursCompleted', 0),
31 | 'author': course.get('author', 'Unknown Instructor')
32 | }
33 | processed_courses.append(processed_course)
34 |
35 | return jsonify({'courses': processed_courses})
36 |
37 | except Exception as e:
38 | return jsonify({'error': str(e)}), 500
39 |
--------------------------------------------------------------------------------
/app/courses/[id]/types.ts:
--------------------------------------------------------------------------------
1 | // app/courses/[id]/types.ts
2 | export interface Message {
3 | id: number
4 | sender: "user" | "ai"
5 | text: string
6 | timestamp: Date
7 | slides?: { title: string; content: string }[]
8 | }
9 |
10 | export interface File {
11 | id: string
12 | name: string
13 | size: string
14 | type: string
15 | uploadedAt: string
16 | }
17 |
18 | declare global {
19 | interface Window {
20 | SpeechRecognition: {
21 | new(): SpeechRecognition
22 | }
23 | webkitSpeechRecognition: {
24 | new(): SpeechRecognition
25 | }
26 | }
27 | }
28 |
29 | export interface SpeechRecognition extends EventTarget {
30 | continuous: boolean
31 | interimResults: boolean
32 | lang: string
33 | maxAlternatives: number
34 | onend: ((this: SpeechRecognition, ev: Event) => any) | null
35 | onerror: ((this: SpeechRecognition, ev: Event) => any) | null
36 | onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null
37 | onstart: ((this: SpeechRecognition, ev: Event) => any) | null
38 | start: () => void
39 | stop: () => void
40 | abort: () => void
41 | }
42 |
43 | export interface SpeechRecognitionEvent extends Event {
44 | results: SpeechRecognitionResultList
45 | resultIndex: number
46 | }
47 |
48 | export interface SpeechRecognitionResultList {
49 | length: number
50 | item(index: number): SpeechRecognitionResult
51 | [index: number]: SpeechRecognitionResult
52 | }
53 |
54 | export interface SpeechRecognitionResult {
55 | isFinal: boolean
56 | length: number
57 | item(index: number): SpeechRecognitionAlternative
58 | [index: number]: SpeechRecognitionAlternative
59 | }
60 |
61 | export interface SpeechRecognitionAlternative {
62 | confidence: number
63 | transcript: string
64 | }
--------------------------------------------------------------------------------
/app/courses/[id]/components/SlideViewer.tsx:
--------------------------------------------------------------------------------
1 | // app/courses/[id]/components/SlideViewer.tsx
2 | 'use client'
3 |
4 | import { Button } from "@/components/ui/button"
5 | import { Slide } from "@/components/slide"
6 |
7 | export const SlideViewer = ({
8 | slides,
9 | currentSlideIndex,
10 | setCurrentSlideIndex
11 | }: {
12 | slides: { title: string; content: string }[],
13 | currentSlideIndex: number,
14 | setCurrentSlideIndex: (index: number) => void
15 | }) => {
16 | return (
17 |
18 |
19 | {slides.map((slide, index) => (
20 |
27 | ))}
28 |
29 |
30 |
31 | setCurrentSlideIndex(Math.max(0, currentSlideIndex - 1))}
34 | disabled={currentSlideIndex === 0}
35 | >
36 | Previous
37 |
38 |
39 | Slide {currentSlideIndex + 1} of {slides.length}
40 |
41 | setCurrentSlideIndex(Math.min(slides.length - 1, currentSlideIndex + 1))}
44 | disabled={currentSlideIndex === slides.length - 1}
45 | >
46 | Next
47 |
48 |
49 |
50 | )
51 | }
--------------------------------------------------------------------------------
/app/courses/[id]/components/ChatHistory.tsx:
--------------------------------------------------------------------------------
1 | // app/courses/[id]/components/ChatHistory.tsx
2 | 'use client'
3 |
4 | import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"
5 | import { ScrollArea } from "@/components/ui/scroll-area"
6 | import { Bot } from 'lucide-react'
7 | import { Message } from "../types"
8 |
9 | export const ChatHistory = ({ messages, isTyping }: { messages: Message[], isTyping: boolean }) => {
10 | return (
11 |
12 |
13 | {
14 | messages.map(message => (
15 |
16 | {
17 | message.sender === "ai" ? (
18 |
19 |
20 |
21 |
22 | ) : (
23 |
24 |
25 | U
26 |
27 | )}
28 |
29 |
{ message.text }
30 | < span className = "text-xs text-gray-500 dark:text-gray-400 mt-1 block" >
31 | { message.timestamp.toLocaleTimeString() }
32 |
33 |
34 |
35 | ))}
36 | {
37 | isTyping && (
38 |
39 |
40 |
41 |
42 |
43 | < div className = "flex-1 bg-blue-100 dark:bg-blue-900 p-3 rounded-lg" >
44 |
Typing...
45 |
46 |
47 | )
48 | }
49 |
50 |
51 | )
52 | }
--------------------------------------------------------------------------------
/app/courses/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { MainNav } from "@/components/main-nav"
4 | import { CourseCard } from "@/components/course-card"
5 | import { CreateCourseModal } from "@/components/create-course-modal"
6 | import { Button } from "@/components/ui/button"
7 | import { Plus } from 'lucide-react'
8 | import { useState } from "react"
9 | import { useCourses } from "@/lib/course-context"
10 |
11 | export default function CoursesPage() {
12 | const [createModalOpen, setCreateModalOpen] = useState(false)
13 | const [courseToEdit, setCourseToEdit] = useState(null);
14 | const { courses, removeCourse, addCourse } = useCourses()
15 |
16 | const handleCustomize = (course) => {
17 | setCourseToEdit(course); // 设置要编辑的课程
18 | setCreateModalOpen(true); // 打开 modal
19 | };
20 |
21 | const handleAddCourse = () => {
22 | setCourseToEdit(null); // 清空编辑课程
23 | setCreateModalOpen(true); // 打开 modal
24 | };
25 |
26 | return (
27 |
28 |
29 |
30 |
31 |
My Courses
32 |
33 |
34 | Add Course
35 |
36 |
37 |
38 | {courses.map((course) => (
39 | handleCustomize(course)} // 传递当前课程
48 | onRemove={() => removeCourse(course.id, course.title)}
49 | />
50 | ))}
51 |
52 | {
56 | addCourse(courseData);
57 | setCreateModalOpen(false);
58 | }}
59 | courseToEdit={courseToEdit} // 传递要编辑的课程
60 | />
61 |
62 |
63 | );
64 | }
65 |
66 |
--------------------------------------------------------------------------------
/backend/routes/voice_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify, send_file
2 | import edge_tts
3 | import asyncio
4 | import os
5 | import tempfile
6 | import openai
7 | voice_bp = Blueprint('voice', __name__)
8 | AUDIO_DIR = "audio_cache"
9 | os.makedirs(AUDIO_DIR, exist_ok=True)
10 |
11 | client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
12 | @voice_bp.route('/api/list-voices', methods=['GET'])
13 | def list_voices():
14 | try:
15 | voices = asyncio.run(edge_tts.list_voices())
16 | return jsonify(voices)
17 | except Exception as e:
18 | return jsonify({"error": str(e)}), 500
19 |
20 | @voice_bp.route('/api/generate-audio', methods=['POST'])
21 | def generate_audio(): # Remove async
22 | data = request.json
23 | text = data.get("text", "")
24 | voice = data.get("voice", "en-US-AvaMultilingualNeural")
25 |
26 | if not text:
27 | return jsonify({"error": "No text provided"}), 400
28 |
29 | # File path
30 | audio_file = os.path.join(AUDIO_DIR, "output.mp3")
31 | try:
32 | # Wrap the async operations in asyncio.run()
33 | async def generate():
34 | communicate = edge_tts.Communicate(text, voice)
35 | await communicate.save(audio_file)
36 |
37 | asyncio.run(generate())
38 | return send_file(audio_file, as_attachment=True)
39 | except Exception as e:
40 | return jsonify({"error": str(e)}), 500
41 |
42 |
43 | @voice_bp.route('/api/recognize-openai', methods=['POST'])
44 | def recognize_with_openai():
45 | if 'audio' not in request.files:
46 | return jsonify({"error": "No audio file uploaded"}), 400
47 |
48 | audio_file = request.files['audio']
49 |
50 | try:
51 | # Save the audio file to a temporary location
52 | with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file:
53 | audio_path = temp_file.name
54 | audio_file.save(audio_path)
55 |
56 | # Use OpenAI Whisper API
57 | with open(audio_path, "rb") as file:
58 | response = client.audio.transcriptions.create(
59 | model="whisper-1",
60 | file=file,
61 | response_format="verbose_json",
62 | timestamp_granularities=["segment", "word"]
63 | )
64 |
65 | print(f"response: {response}")
66 |
67 | # Clean up the temporary file
68 | os.remove(audio_path)
69 |
70 | print(f"user prompt: {response.text}")
71 |
72 | # Return the transcription text
73 | return jsonify({"text": response.text})
74 | except Exception as e:
75 | print(f"Error in recognize_with_openai: {str(e)}")
76 | return jsonify({"error": str(e)}), 500
--------------------------------------------------------------------------------
/backend/api/in_class.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from flask import Blueprint, request, jsonify
4 | import utils.user_utils as user_utils
5 | import utils.s3_utils as s3_utils
6 | from dotenv import load_dotenv
7 | from functions.slides_navigation import get_slides, get_current_slide, set_current_slide
8 | from utils.socket_utils import emit_slide_change
9 | import datetime
10 |
11 | load_dotenv()
12 |
13 | S3_BUCKET_NAME = "jasmintechs-tutorion"
14 |
15 | in_class = Blueprint('in-class', __name__)
16 |
17 | @in_class.route('/next-slide', methods=['POST'])
18 | def next_slide():
19 | username = user_utils.get_current_user(request)
20 | if not username:
21 | return jsonify({'error': 'Unauthorized'}), 401
22 |
23 | request_data = request.get_json()
24 | assistant_id = request_data.get('assistant_id', '')
25 |
26 | user_course_data = s3_utils.load_assistant_user_from_s3(assistant_id)
27 | course_id = user_course_data['course_id']
28 |
29 | if not assistant_id:
30 | return jsonify({'error': 'Assistant ID is required'}), 400
31 |
32 | print(f"Going to next slide for {assistant_id}")
33 | slides = get_slides(course_id, username)
34 | if not slides:
35 | return "No slides found for this course."
36 | current_position = get_current_slide(assistant_id)
37 |
38 | new_position = current_position + 1
39 | if new_position < len(slides):
40 | set_current_slide(assistant_id, new_position)
41 | # Emit event to frontend via Socket.IO
42 | emit_slide_change(assistant_id, new_position)
43 | return "Here is the transcript you would like to read for the next slide: " + slides[new_position]['transcript']
44 | else:
45 | return "You're already at the last slide."
46 |
47 | @in_class.route('/save-position', methods=['POST'])
48 | def save_assistant_position():
49 | username = user_utils.get_current_user(request)
50 | if not username:
51 | return jsonify({'error': 'Unauthorized'}), 401
52 |
53 | request_data = request.get_json()
54 | assistant_id = request_data.get('assistant_id', '')
55 | course_id = request_data.get('course_id', '')
56 | position = request_data.get('position')
57 |
58 | if not assistant_id or position is None or not course_id:
59 | return jsonify({'error': 'Missing required parameters'}), 400
60 |
61 | try:
62 | # First, get user data for this assistant
63 | user_data = s3_utils.load_assistant_user_from_s3(assistant_id)
64 | if not user_data:
65 | return jsonify({'error': 'Assistant not found'}), 404
66 |
67 | # Save the position to S3
68 | s3_path = s3_utils.get_s3_file_path(username, course_id, "assistant_position.json")
69 | position_data = {
70 | "assistant_id": assistant_id,
71 | "course_id": course_id,
72 | "last_position": position,
73 | "timestamp": str(datetime.datetime.now())
74 | }
75 |
76 | s3_utils.upload_json_to_s3(position_data, s3_utils.S3_BUCKET_NAME, s3_path)
77 |
78 | return jsonify({'success': True, 'message': 'Position saved successfully'}), 200
79 | except Exception as e:
80 | print(f"Error saving assistant position: {e}")
81 | return jsonify({'error': f'Failed to save position: {str(e)}'}), 500
82 |
83 |
84 |
--------------------------------------------------------------------------------
/backend/utils/firebase_admin.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | import firebase_admin
4 | from firebase_admin import credentials, auth
5 | from dotenv import load_dotenv
6 |
7 | # Configure logging
8 | logging.basicConfig(level=logging.INFO)
9 | logger = logging.getLogger(__name__)
10 |
11 | # Load environment variables
12 | load_dotenv()
13 |
14 | def initialize_firebase_admin():
15 | """Initialize Firebase Admin SDK with credentials from environment variables"""
16 | try:
17 | # Get the private key and ensure it's properly formatted
18 | private_key = os.getenv("FIREBASE_PRIVATE_KEY", "").replace("\\n", "\n")
19 | if not private_key:
20 | raise ValueError("FIREBASE_PRIVATE_KEY environment variable is not set")
21 |
22 | # Create the service account info dictionary
23 | service_account_info = {
24 | "type": "service_account",
25 | "project_id": os.getenv("FIREBASE_PROJECT_ID"),
26 | "private_key_id": os.getenv("FIREBASE_PRIVATE_KEY_ID"),
27 | "private_key": private_key,
28 | "client_email": os.getenv("FIREBASE_CLIENT_EMAIL"),
29 | "client_id": os.getenv("FIREBASE_CLIENT_ID"),
30 | "auth_uri": "https://accounts.google.com/o/oauth2/auth",
31 | "token_uri": "https://oauth2.googleapis.com/token",
32 | "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
33 | "client_x509_cert_url": os.getenv("FIREBASE_CLIENT_CERT_URL")
34 | }
35 |
36 | # Verify all required fields are present
37 | required_fields = ["project_id", "private_key_id", "private_key", "client_email", "client_id", "client_x509_cert_url"]
38 | missing_fields = [field for field in required_fields if not service_account_info.get(field)]
39 | if missing_fields:
40 | raise ValueError(f"Missing required Firebase configuration fields: {', '.join(missing_fields)}")
41 |
42 | # Initialize Firebase Admin if not already initialized
43 | if not firebase_admin._apps:
44 | cred = credentials.Certificate(service_account_info)
45 | firebase_admin.initialize_app(cred)
46 | logger.info("Firebase Admin SDK initialized successfully")
47 | else:
48 | logger.info("Firebase Admin SDK already initialized")
49 |
50 | return True
51 | except Exception as e:
52 | logger.error(f"Error initializing Firebase Admin SDK: {e}")
53 | return False
54 |
55 | def verify_firebase_token(token):
56 | """Verify Firebase ID token and return decoded token or None if invalid"""
57 | try:
58 | if not token:
59 | logger.warning("No token provided")
60 | return None
61 |
62 | # Remove 'Bearer ' prefix if present
63 | if token.startswith('Bearer '):
64 | token = token[7:]
65 |
66 | try:
67 | # Verify the token
68 | decoded_token = auth.verify_id_token(token)
69 | logger.info(f"Token verified successfully for user: {decoded_token.get('email')}")
70 | return decoded_token
71 | except Exception as e:
72 | logger.error(f"Error verifying token: {str(e)}")
73 | return None
74 | except Exception as e:
75 | logger.error(f"Error in verify_firebase_token: {str(e)}")
76 | return None
77 |
78 | # Initialize Firebase Admin when the module is imported
79 | initialize_firebase_admin()
80 |
--------------------------------------------------------------------------------
/backend/routes/aiTutor_routes.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, jsonify, Response, send_file, Blueprint
2 | import openai
3 | import os
4 | from langgraph.checkpoint.memory import MemorySaver
5 | from langgraph.graph import START, MessagesState, StateGraph
6 | from langchain_core.messages import HumanMessage, AIMessage
7 |
8 |
9 | aitutor_bp = Blueprint('aitutor', __name__)
10 |
11 | # Initialize OpenAI client
12 | client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
13 |
14 | # Define a new LangChain graph
15 | workflow = StateGraph(state_schema=MessagesState)
16 |
17 | # Define the function that calls the OpenAI model
18 | def call_model(state: MessagesState):
19 | try:
20 | # Prepare system message and state messages for the conversation
21 | system_message = {
22 | "role": "system",
23 | "content": '''You are a helpful AI assistant for the AI Tutor platform 'Tutorion'. Students will ask you some questions after or before their online courses.
24 | Make sure your response is concise and to the point, don't be too formal and don't be too long.
25 | ---
26 | Some of our information: your role is to help students with their online courses. Go to courses tab if they want to add their customized courses, modify their courses or go and start their classes.
27 | Go to schedule tab if they want to see their schedule.
28 | Go to profile tab if they want to see their profile.
29 | Go to progress tab if they want to see their customized courses related data.
30 | '''
31 | }
32 |
33 | # Add system message at the beginning of the conversation
34 | messages = [system_message] + [{"role": "user", "content": msg.content} for msg in state["messages"]]
35 |
36 | # Call OpenAI model
37 | response = client.chat.completions.create(
38 | model="gpt-4o",
39 | messages=messages,
40 | max_tokens=400,
41 | temperature=0.9
42 | )
43 |
44 | # Extract AI's response
45 | ai_content = response.choices[0].message.content
46 |
47 | # Return the messages including the AI response
48 | return {"messages": [AIMessage(ai_content)]}
49 | except Exception as e:
50 | print(f"Error in call_model: {str(e)}")
51 | return {"messages": [AIMessage("An error occurred while processing your request.")]}
52 |
53 | # Define the (single) node in the graph
54 | workflow.add_edge(START, "model")
55 | workflow.add_node("model", call_model)
56 |
57 | # Add memory
58 | memory = MemorySaver()
59 | app_workflow = workflow.compile(checkpointer=memory)
60 |
61 | # API Endpoint for handling user input
62 | @aitutor_bp.route('/api/aitutor-response', methods=['POST'])
63 | def get_ai_response():
64 | try:
65 | data = request.get_json()
66 | if not data:
67 | return {'error': 'No data provided'}, 400
68 |
69 | user_input = data.get('input')
70 | thread_id = data.get('thread_id', 'default_thread') # Support multiple threads by using a thread ID
71 | if not user_input:
72 | return {'error': 'No input provided'}, 400
73 |
74 | # Create HumanMessage for user input
75 | input_messages = [HumanMessage(user_input)]
76 | config = {"configurable": {"thread_id": thread_id}}
77 |
78 | # Invoke the LangChain workflow
79 | output = app_workflow.invoke({"messages": input_messages}, config)
80 |
81 | # Get the AI's latest response
82 | ai_response = output["messages"][-1].content
83 |
84 | return ai_response
85 | #return {'response': ai_response}
86 | except Exception as e:
87 | print(f"Error in get_ai_response: {str(e)}")
88 | return {'error': str(e)}, 500
--------------------------------------------------------------------------------
/app/my-publish/data-center/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { HeaderOnlyLayout } from "@/components/layout/HeaderOnlyLayout"
4 | import { ScrollArea } from "@/components/ui/scroll-area"
5 | import { PublishSidebar } from "@/components/my-publish/publish-page-sidebar"
6 | import { BarChart, LineChart, PieChart } from "lucide-react"
7 |
8 | export default function DataCenterPage() {
9 | return (
10 |
11 |
12 | {/* Left Sidebar */}
13 |
14 |
15 | {/* Main Content Area */}
16 |
17 |
18 |
19 | {/* Page Header */}
20 |
21 |
Data Center
22 |
Analytics and insights for your content
23 |
24 |
25 | {/* Dashboard Cards */}
26 |
27 |
28 |
29 |
Total Views
30 |
31 |
32 |
6,620
33 |
↑ 12% from last month
34 |
35 |
36 |
37 |
38 |
Watch Time
39 |
40 |
41 |
248 hrs
42 |
↑ 8% from last month
43 |
44 |
45 |
46 |
50 |
824
51 |
↑ 15% from last month
52 |
53 |
54 |
55 | {/* Charts Placeholder */}
56 |
57 |
58 |
Views Over Time
59 |
60 |
Chart visualization would go here
61 |
62 |
63 |
64 |
65 |
Popular Content
66 |
67 |
Chart visualization would go here
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 | )
77 | }
--------------------------------------------------------------------------------
/middleware.ts:
--------------------------------------------------------------------------------
1 | // import { NextResponse } from 'next/server';
2 | // import type { NextRequest } from 'next/server';
3 | //
4 | // // Predefined user credentials
5 | // const USERS = {
6 | // user1: 'password1',
7 | // user2: 'password2',
8 | // };
9 | //
10 | // export function middleware(request: NextRequest) {
11 | // const authHeader = request.headers.get('authorization');
12 | //
13 | // // If no authorization header is provided, trigger the browser's login popup
14 | // if (!authHeader) {
15 | // return new Response('Unauthorized', {
16 | // status: 401,
17 | // headers: { 'WWW-Authenticate': 'Basic realm="Login Required"' },
18 | // });
19 | // }
20 | //
21 | // // Decode the Authorization header
22 | // const credentials = Buffer.from(authHeader.split(' ')[1], 'base64').toString();
23 | // const [username, password] = credentials.split(':');
24 | //
25 | // // Validate the username and password
26 | // if (!(username in USERS && USERS[username] === password)) {
27 | // return new Response('Unauthorized', {
28 | // status: 401,
29 | // headers: { 'WWW-Authenticate': 'Basic realm="Login Required"' },
30 | // });
31 | // }
32 | //
33 | // // Pass the authenticated username to the next step
34 | // request.headers.set('x-authenticated-user', username);
35 | //
36 | // // Allow the request to continue to the app
37 | // return NextResponse.next();
38 | // }
39 |
40 | import { NextResponse } from 'next/server';
41 | import type { NextRequest } from 'next/server';
42 | import { parse } from 'cookie';
43 |
44 | // Predefined user credentials
45 | const USERS = {
46 | user1: 'password1',
47 | user2: 'password2',
48 | };
49 |
50 | function isAuthenticated(request: NextRequest) {
51 | const cookieHeader = request.headers.get('cookie');
52 | console.log('Cookie Header:', cookieHeader);
53 |
54 | if (!cookieHeader) return false;
55 |
56 | const cookies = parse(cookieHeader);
57 | console.log('Parsed Cookies:', cookies);
58 |
59 | // Check for auth cookie (predefined user credentials)
60 | const authCookie = cookies.auth;
61 | if (authCookie) {
62 | const credentials = Buffer.from(authCookie, 'base64').toString();
63 | const [username, password] = credentials.split(':');
64 | if (username in USERS && (USERS as {[key: string]: string})[username] === password) {
65 | return true;
66 | }
67 | }
68 |
69 | // Check for email cookie (Google authentication)
70 | const emailCookie = cookies.user_email;
71 | if (emailCookie) {
72 | console.log('Authenticated via Google with email:', emailCookie);
73 | return true;
74 | }
75 |
76 | console.log('No valid authentication found');
77 | return false;
78 | }
79 |
80 | // Middleware function
81 | export function middleware(request: NextRequest) {
82 | const authenticated = isAuthenticated(request);
83 |
84 | console.log('authenticated', authenticated);
85 | console.log('request.nextUrl.pathname', request.nextUrl.pathname);
86 |
87 | const pathname = request.nextUrl.pathname;
88 |
89 | // Exclude requests for static files and assets
90 | if (pathname.startsWith('/_next') || pathname.startsWith('/static')) {
91 | return NextResponse.next();
92 | }
93 |
94 | if (!authenticated && !request.nextUrl.pathname.startsWith('/login')) {
95 | console.log('redirecting to /login');
96 | return NextResponse.redirect(new URL('/login', request.url));
97 | }
98 |
99 | if (authenticated && request.nextUrl.pathname === '/login') {
100 | return NextResponse.redirect(new URL('/dashboard', request.url));
101 | }
102 |
103 | // Add authenticated username to headers
104 | const response = NextResponse.next();
105 | const authHeader = request.headers.get('authorization');
106 | if (authHeader) {
107 | const credentials = Buffer.from(authHeader.split(' ')[1], 'base64').toString();
108 | const [username] = credentials.split(':');
109 | response.headers.set('x-authenticated-user', username);
110 | }
111 |
112 | return response;
113 | }
114 |
115 |
116 |
--------------------------------------------------------------------------------
/app/my-uploads/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useState, useEffect } from "react"
4 | import { MainLayout } from "@/components/layout/MainLayout"
5 | import { ScrollArea } from "@/components/ui/scroll-area"
6 | import { CustomizeCourseModal } from "@/components/my-uploads/create-course-modal-v2"
7 | import { MyCoursesHeader } from "@/components/my-uploads/my-courses-section-header"
8 | import { CourseCard } from "@/components/my-uploads/CourseCard"
9 | import { FullscreenButton } from "@/components/layout/fullscreen-button"
10 |
11 | // Sample courses data without course codes
12 | // TODO: Replace with API call to fetch courses
13 | // This is temporary mock data that will be replaced with actual data from the backend
14 | const courses = [
15 | {
16 | id: 1,
17 | title: "Introduction to AI",
18 | hoursCompleted: 7.5,
19 | enrolled: 485,
20 | views: 1250,
21 | isPublished: true
22 | },
23 | {
24 | id: 2,
25 | title: "Machine Learning Fundamentals",
26 | hoursCompleted: 4.5,
27 | enrolled: 320,
28 | views: 890,
29 | isPublished: true
30 | },
31 | {
32 | id: 3,
33 | title: "Deep Learning with Python",
34 | hoursCompleted: 2,
35 | enrolled: 156,
36 | views: 430,
37 | isPublished: false
38 | },
39 | {
40 | id: 4,
41 | title: "Natural Language Processing and Contextual Understanding in Modern Applications",
42 | hoursCompleted: 6,
43 | enrolled: 278,
44 | views: 615,
45 | isPublished: true
46 | },
47 | {
48 | id: 5,
49 | title: "CV", // Very short title to test that case
50 | hoursCompleted: 3,
51 | enrolled: 92,
52 | views: 205,
53 | isPublished: false
54 | },
55 | {
56 | id: 6,
57 | title: "Frontend Development with React and TypeScript",
58 | hoursCompleted: 5,
59 | enrolled: 347,
60 | views: 780,
61 | isPublished: false
62 | }
63 | ]
64 |
65 | export default function CoursesPage() {
66 | const [isFullScreen, setIsFullScreen] = useState(false)
67 | // TODO: Add state for courses
68 | // const [courses, setCourses] = useState([])
69 | // const [isLoading, setIsLoading] = useState(true)
70 | // const [error, setError] = useState(null)
71 |
72 | // TODO: API endpoint - Fetch courses from backend
73 | // useEffect(() => {
74 | // async function fetchCourses() {
75 | // try {
76 | // setIsLoading(true);
77 | // // const response = await fetch('/api/courses');
78 | // // if (!response.ok) throw new Error('Failed to fetch courses');
79 | // // const data = await response.json();
80 | // // setCourses(data);
81 | // } catch (error) {
82 | // // setError(error.message);
83 | // console.error('Error fetching courses:', error);
84 | // } finally {
85 | // // setIsLoading(false);
86 | // }
87 | // }
88 | // fetchCourses();
89 | // }, []);
90 |
91 | // Function to toggle fullscreen mode
92 | const toggleFullScreen = () => {
93 | if (!document.fullscreenElement) {
94 | document.documentElement.requestFullscreen().catch(err => {
95 | console.log(`Error attempting to enable fullscreen: ${err.message}`);
96 | });
97 | setIsFullScreen(true);
98 | } else {
99 | if (document.exitFullscreen) {
100 | document.exitFullscreen();
101 | setIsFullScreen(false);
102 | }
103 | }
104 | };
105 |
106 | // Listen for fullscreen change events
107 | useEffect(() => {
108 | const handleFullscreenChange = () => {
109 | setIsFullScreen(!!document.fullscreenElement);
110 | };
111 |
112 | document.addEventListener('fullscreenchange', handleFullscreenChange);
113 | return () => {
114 | document.removeEventListener('fullscreenchange', handleFullscreenChange);
115 | };
116 | }, []);
117 |
118 | return (
119 |
120 |
121 |
122 |
123 |
124 |
125 |
129 |
130 |
131 |
135 |
136 |
137 |
138 |
139 |
140 | {courses.map((course) => (
141 |
142 | ))}
143 |
144 |
145 |
146 |
147 |
148 | )
149 | }
150 |
151 |
--------------------------------------------------------------------------------
/app/course-preview/[id]/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useState, useEffect } from "react"
4 | import { useRouter } from "next/navigation"
5 | import { Button } from "@/components/ui/button"
6 | import { ArrowLeft, Maximize, Minimize } from "lucide-react"
7 | import { LightCourseLayout } from "@/components/layout/LightCourseLayout"
8 | import { SlideViewer } from "@/components/creator-edit/slide-viewer"
9 |
10 | export default function CoursePreviewPage({ params }: { params: { id: string } }) {
11 | const courseId = params.id
12 | const router = useRouter()
13 | const [currentSlide, setCurrentSlide] = useState(0)
14 | const [slides, setSlides] = useState([])
15 | const [isFullScreen, setIsFullScreen] = useState(false)
16 | const [courseTitle, setCourseTitle] = useState("Course Preview")
17 |
18 | // Load course data from localStorage on mount
19 | useEffect(() => {
20 | const savedData = localStorage.getItem('previewCourseData')
21 | if (savedData) {
22 | const data = JSON.parse(savedData)
23 | setSlides(data.slides || [])
24 | setCurrentSlide(data.currentSlide || 0)
25 | setCourseTitle(`Preview: ${data.slides[data.currentSlide]?.title || 'Course'}`)
26 | } else {
27 | // Redirect back if no data is found
28 | router.push(`/creator-edit/${courseId}`)
29 | }
30 | }, [courseId, router])
31 |
32 | // Function to toggle fullscreen mode
33 | const toggleFullScreen = () => {
34 | if (!document.fullscreenElement) {
35 | document.documentElement.requestFullscreen().catch(err => {
36 | console.log(`Error attempting to enable fullscreen: ${err.message}`)
37 | })
38 | setIsFullScreen(true)
39 | } else {
40 | document.exitFullscreen()
41 | setIsFullScreen(false)
42 | }
43 | }
44 |
45 | // Add fullscreen listener
46 | useEffect(() => {
47 | const handleFullscreenChange = () => {
48 | setIsFullScreen(!!document.fullscreenElement)
49 | }
50 |
51 | document.addEventListener('fullscreenchange', handleFullscreenChange)
52 | return () => {
53 | document.removeEventListener('fullscreenchange', handleFullscreenChange)
54 | }
55 | }, [])
56 |
57 | // Navigation functions
58 | const goToPrevSlide = () => {
59 | if (currentSlide > 0) {
60 | setCurrentSlide(currentSlide - 1)
61 | setCourseTitle(`Preview: ${slides[currentSlide - 1]?.title || 'Course'}`)
62 | }
63 | }
64 |
65 | const goToNextSlide = () => {
66 | if (currentSlide < slides.length - 1) {
67 | setCurrentSlide(currentSlide + 1)
68 | setCourseTitle(`Preview: ${slides[currentSlide + 1]?.title || 'Course'}`)
69 | }
70 | }
71 |
72 | return (
73 |
74 | {/* Control buttons */}
75 |
76 |
router.push(`/creator-edit/${courseId}`)}
78 | className="bg-emerald-600 hover:bg-emerald-700 text-white shadow-md rounded-full w-10 h-10 flex items-center justify-center"
79 | title="Back to editor"
80 | >
81 |
82 |
83 |
84 |
85 |
90 | {isFullScreen ? (
91 |
92 | ) : (
93 |
94 | )}
95 |
96 |
97 | {/* Main content */}
98 |
99 |
100 | {/* Slide viewer */}
101 | {slides.length > 0 && (
102 |
107 | )}
108 |
109 | {/* Navigation controls */}
110 |
111 |
116 | Previous
117 |
118 |
119 | Slide {currentSlide + 1} of {slides.length}
120 |
121 |
126 | Next
127 |
128 |
129 |
130 |
131 |
132 | )
133 | }
--------------------------------------------------------------------------------
/app/courses/[id]/hooks/useConversation.ts:
--------------------------------------------------------------------------------
1 | // app/courses/[id]/hooks/useConversation.ts
2 | 'use client'
3 |
4 | import { useState, useEffect, useRef } from "react"
5 | import { Message } from "../types"
6 | import { useRouter } from 'next/navigation'
7 | import { useSpeech } from './useSpeech'
8 |
9 | export const useConversation = (courseId: string) => {
10 | const { speakResponse } = useSpeech(async () => null)
11 | const router = useRouter()
12 |
13 | const [messages, setMessages] = useState([
14 | {
15 | id: 1,
16 | sender: "ai",
17 | text: "Hello! I'm your AI tutor for this course. What would you like to learn today?",
18 | timestamp: new Date(),
19 | },
20 | ])
21 | const [isTyping, setIsTyping] = useState(false)
22 | const [currentSlideIndex, setCurrentSlideIndex] = useState(0)
23 |
24 | const debounceTimeoutRef = useRef(null)
25 | const lastMessageRef = useRef("")
26 |
27 | useEffect(() => {
28 | const initializeChatbot = async () => {
29 | try {
30 | const decodedTitle = decodeURIComponent(courseId)
31 | const response = await fetch("http://localhost:5000/api/initialize-chatbot", {
32 | method: "POST",
33 | headers: { "Content-Type": "application/json" },
34 | credentials: "include",
35 | body: JSON.stringify({ course_title: decodedTitle }),
36 | })
37 |
38 | if (!response.ok) throw new Error('Failed to initialize chatbot')
39 |
40 | const data = await response.json()
41 | console.log("Chatbot initialized:", data)
42 | } catch (error) {
43 | console.error("Error initializing chatbot:", error)
44 | }
45 | }
46 |
47 | initializeChatbot()
48 | }, [courseId])
49 |
50 | const handleConversationCycle = async (userInput: string): Promise => {
51 | if (!userInput.trim()) return null
52 |
53 | setMessages(prev => [...prev, {
54 | id: Date.now(),
55 | sender: "user",
56 | text: userInput,
57 | timestamp: new Date()
58 | }])
59 | setIsTyping(true)
60 |
61 | let aiMessageId = Date.now()
62 | let accumulatedText = ""
63 | let lastSpokenLength = 0
64 |
65 | setMessages(prev => [...prev, {
66 | id: aiMessageId,
67 | sender: "ai",
68 | text: "",
69 | timestamp: new Date(),
70 | slides: []
71 | }])
72 |
73 | try {
74 | const response = await fetch("http://localhost:5000/api/get-ai-response", {
75 | method: "POST",
76 | headers: { "Content-Type": "application/json" },
77 | credentials: "include",
78 | body: JSON.stringify({ input: userInput })
79 | })
80 |
81 | if (!response.body) throw new Error("No response body received")
82 |
83 | const reader = response.body.getReader()
84 | const decoder = new TextDecoder()
85 |
86 | while (true) {
87 | const { done, value } = await reader.read()
88 | if (done) break
89 |
90 | const chunk = decoder.decode(value, { stream: true })
91 | const parsedChunks = chunk.trim().split("\n").map(line => JSON.parse(line))
92 |
93 | for (const { text } of parsedChunks) {
94 | if (text) {
95 | accumulatedText += text
96 | setMessages(prev =>
97 | prev.map(msg => msg.id === aiMessageId ? { ...msg, text: accumulatedText } : msg)
98 | )
99 |
100 | // Speak new content when we have a complete sentence or significant new content
101 | const newContent = accumulatedText.slice(lastSpokenLength)
102 | if (newContent.match(/[.!?]+/) || newContent.length > 100) {
103 | await speakResponse(newContent, true)
104 | lastSpokenLength = accumulatedText.length
105 | }
106 | }
107 | }
108 | }
109 |
110 | // Handle any remaining text that hasn't been spoken
111 | const remainingText = accumulatedText.slice(lastSpokenLength)
112 | if (remainingText.trim()) {
113 | await speakResponse(remainingText, true)
114 | }
115 |
116 | // After streaming is done, request slides
117 | const slidesResponse = await fetch("http://localhost:5000/api/get-slides", {
118 | method: "POST",
119 | headers: { "Content-Type": "application/json" },
120 | credentials: "include",
121 | body: JSON.stringify({ input: userInput })
122 | })
123 |
124 | const slidesData = await slidesResponse.json()
125 |
126 | // Update the AI message with slides if available
127 | setMessages(prev =>
128 | prev.map(msg => msg.id === aiMessageId ? { ...msg, slides: slidesData.slides } : msg)
129 | )
130 |
131 | if (slidesData.slides?.length) setCurrentSlideIndex(0)
132 |
133 | return accumulatedText || "" // Ensure it never returns undefined
134 | } catch (error) {
135 | console.error("Error:", error)
136 | setMessages(prev => prev.map(msg =>
137 | msg.id === aiMessageId ? { ...msg, text: "Sorry, I encountered an error. Please try again." } : msg
138 | ))
139 | return null
140 | } finally {
141 | setIsTyping(false)
142 | }
143 | }
144 |
145 |
146 | return {
147 | messages,
148 | isTyping,
149 | handleConversationCycle,
150 | currentSlideIndex,
151 | setCurrentSlideIndex
152 | };
153 |
154 | }
--------------------------------------------------------------------------------
/backend/functions/slides_navigation.py:
--------------------------------------------------------------------------------
1 | import json
2 | import redis
3 | from utils.socket_utils import emit_slide_change
4 | import utils.s3_utils as s3_utils
5 |
6 | # Initialize Redis connection
7 | redis_client = redis.Redis(host='localhost', port=6379, db=0)
8 |
9 |
10 | def get_slides(course_id, username):
11 | # Load slides based on course_id instead of hardcoded path
12 | slides_data = s3_utils.get_json_from_s3("jasmintechs-tutorion",
13 | s3_utils.get_s3_file_path(username, course_id, "slides.json"))
14 | return slides_data
15 |
16 |
17 | def get_current_slide(assistant_id):
18 | # Get current slide position for this specific user/session
19 | position = redis_client.get(f"slide_position:{assistant_id}")
20 | return int(position) if position else 0
21 |
22 |
23 | def set_current_slide(assistant_id, position):
24 | # Store position in Redis with 24-hour expiry
25 | redis_client.setex(f"slide_position:{assistant_id}", 86400, position)
26 |
27 |
28 | def update_viewing_slide(assistant_id, position):
29 | """Store the slide position the user is currently viewing"""
30 | print(f"Updating viewing slide position for {assistant_id} to {position}")
31 | redis_client.setex(f"viewing_slide_position:{assistant_id}", 86400, position)
32 |
33 | def go_to_next_slide(assistant_id, course_id, username):
34 | print(f"Going to next slide for {assistant_id}")
35 | slides = get_slides(course_id, username)
36 | if not slides:
37 | return "No slides found for this course."
38 | current_position = get_current_slide(assistant_id)
39 |
40 | # Increment position
41 | new_position = current_position + 1
42 | if new_position < len(slides):
43 | set_current_slide(assistant_id, new_position)
44 | # Emit event to frontend via Socket.IO
45 | emit_slide_change(assistant_id, new_position)
46 | return "Here is the transcript you would like to read for the next slide: " + slides[new_position]['transcript']
47 | else:
48 | return "You're already at the last slide."
49 |
50 |
51 | def go_to_previous_slide(assistant_id, course_id, username):
52 | print(f"Going to previous slide for {assistant_id}")
53 | slides = get_slides(course_id, username)
54 | if not slides:
55 | return "No slides found for this course."
56 | current_position = get_current_slide(assistant_id)
57 |
58 | # Decrement position
59 | new_position = max(0, current_position - 1)
60 | if current_position > 0:
61 | set_current_slide(assistant_id, new_position)
62 | # Emit event to frontend via Socket.IO
63 | emit_slide_change(assistant_id, new_position)
64 | return "Here is the transcript you would like to read for the previous slide: " + slides[new_position]['transcript']
65 | else:
66 | return "You're already at the first slide."
67 |
68 |
69 | def go_to_specified_slide(assistant_id, course_id, username, slide_number):
70 | slide_number -= 1 # Adjust for 0-based index
71 | print(f"Going to slide {slide_number} for {assistant_id}")
72 | slides = get_slides(course_id, username)
73 | if not slides:
74 | return "No slides found for this course."
75 | if slide_number < 0 or slide_number >= len(slides):
76 | return "Invalid slide number."
77 | set_current_slide(assistant_id, slide_number)
78 | emit_slide_change(assistant_id, slide_number)
79 | return "Here is the transcript you would like to read for the specified slide: " + slides[slide_number]['transcript']
80 |
81 |
82 | def go_to_viewing_slide(assistant_id, course_id, username):
83 | print(f"Going to viewing slide for {assistant_id}")
84 | slides = get_slides(course_id, username)
85 | if not slides:
86 | return "No slides found for this course."
87 |
88 | # Get the viewing slide position
89 | viewing_position = redis_client.get(f"viewing_slide_position:{assistant_id}")
90 | viewing_position = int(viewing_position) if viewing_position else 0
91 |
92 | # Update the assistant's current slide position
93 | set_current_slide(assistant_id, viewing_position)
94 |
95 | # Emit event to frontend via Socket.IO
96 | emit_slide_change(assistant_id, viewing_position)
97 |
98 | return "Here is the transcript for the slide you're currently viewing: " + slides[viewing_position]['transcript']
99 |
100 |
101 | def go_to_starting_slide(assistant_id, course_id, username):
102 | print(f"Going to starting slide for {assistant_id}")
103 | slides = get_slides(course_id, username)
104 | if not slides:
105 | return "No slides found for this course."
106 |
107 | # Get the last saved position from S3
108 | starting_position = s3_utils.get_assistant_last_position(username, course_id)
109 |
110 | # Ensure position is valid for this course
111 | if starting_position >= len(slides):
112 | starting_position = 0 # Default to first slide if position is invalid
113 |
114 | # Save the current position in Redis
115 | set_current_slide(assistant_id, starting_position)
116 |
117 | # Emit event to frontend via Socket.IO
118 | emit_slide_change(assistant_id, starting_position)
119 |
120 | # Return customized message based on whether we're starting from beginning or resuming
121 | if starting_position == 0:
122 | return f"Welcome to this course. Let's start with the first slide: {slides[starting_position]['transcript']}"
123 | else:
124 | return f"Welcome back to the course. Let's continue from where you left off. Slide {starting_position + 1}: {slides[starting_position]['transcript']}"
125 |
126 |
--------------------------------------------------------------------------------
/backend/utils/user_utils.py:
--------------------------------------------------------------------------------
1 | from base64 import b64decode
2 | import os
3 | import firebase_admin
4 | from firebase_admin import credentials, auth
5 | import logging
6 | from typing import Optional, Tuple, Dict, Any
7 | from .firebase_admin import verify_firebase_token
8 | from flask import request, jsonify, make_response
9 | import utils.s3_utils as s3_utils
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 | def get_current_user(request):
14 | """Get the current user's email from the request."""
15 | try:
16 | # For OPTIONS requests, return None to allow CORS preflight
17 | if request.method == 'OPTIONS':
18 | return None
19 |
20 | # Try to get email from cookies first
21 | user_email = request.cookies.get('user_email')
22 | if user_email:
23 | return user_email
24 |
25 | # If not in cookies, try to get from Authorization header
26 | auth_header = request.headers.get('Authorization')
27 | if not auth_header:
28 | logger.warning("No authorization header provided")
29 | return None
30 |
31 | # Remove 'Bearer ' prefix if present
32 | if auth_header.startswith('Bearer '):
33 | token = auth_header[7:]
34 | else:
35 | token = auth_header
36 |
37 | # Verify the token and get the email
38 | decoded_token = verify_firebase_token(token)
39 | if not decoded_token:
40 | logger.warning("Invalid token provided")
41 | return None
42 |
43 | user_email = decoded_token.get('email')
44 | if not user_email:
45 | logger.warning("No email found in token")
46 | return None
47 |
48 | return user_email
49 | except Exception as e:
50 | logger.error(f"Error getting current user: {str(e)}")
51 | return None
52 |
53 | def get_user_folder(upload_folder, username):
54 | return os.path.join(upload_folder, username)
55 |
56 | def handle_verify_user():
57 | """
58 | Handle the verify-user route request
59 |
60 | Returns:
61 | Flask response object
62 | """
63 | if request.method == 'OPTIONS':
64 | response = make_response()
65 | response.headers.add('Access-Control-Allow-Origin', request.headers.get('Origin', 'http://localhost:3000'))
66 | response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS')
67 | response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
68 | response.headers.add('Access-Control-Allow-Credentials', 'true')
69 | return response
70 |
71 | try:
72 | # Get the token from the Authorization header
73 | auth_header = request.headers.get('Authorization')
74 | if not auth_header:
75 | logger.warning("No authorization header provided")
76 | return jsonify({'error': 'No authorization header provided'}), 401
77 |
78 | # Remove 'Bearer ' prefix if present
79 | if auth_header.startswith('Bearer '):
80 | token = auth_header[7:]
81 | else:
82 | token = auth_header
83 |
84 | # Verify Firebase token
85 | decoded_token = verify_firebase_token(token)
86 | if not decoded_token:
87 | logger.warning("Invalid token provided")
88 | return jsonify({'error': 'Invalid token'}), 401
89 |
90 | user_email = decoded_token.get('email')
91 | if not user_email:
92 | logger.warning("No email found in token")
93 | return jsonify({'error': 'No email found in token'}), 401
94 |
95 | # Create user folder in S3 directly using s3_utils
96 | try:
97 | logger.info(f"Creating S3 folder for user {user_email}")
98 | success = s3_utils.check_and_create_user_folder(user_email)
99 | if not success:
100 | logger.error(f"Failed to create S3 folder for user {user_email}")
101 | return jsonify({'error': 'Failed to initialize user storage'}), 500
102 | logger.info(f"Successfully created S3 folder for user {user_email}")
103 | except Exception as e:
104 | logger.error(f"Error creating S3 folder for user {user_email}: {str(e)}")
105 | return jsonify({'error': f'Error creating user storage: {str(e)}'}), 500
106 |
107 | # Prepare the response data
108 | response_data = {
109 | 'message': 'User verified successfully',
110 | 'email': user_email,
111 | 'courses': [] # Return empty list since we're not fetching courses
112 | }
113 |
114 | # Create response object
115 | response = make_response(jsonify(response_data))
116 |
117 | # Set CORS headers
118 | response.headers.add('Access-Control-Allow-Origin', request.headers.get('Origin', 'http://localhost:3000'))
119 | response.headers.add('Access-Control-Allow-Methods', 'POST, OPTIONS')
120 | response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
121 | response.headers.add('Access-Control-Allow-Credentials', 'true')
122 |
123 | # Set the email cookie
124 | response.set_cookie(
125 | 'user_email',
126 | user_email,
127 | httponly=True,
128 | samesite='Lax',
129 | path='/',
130 | max_age=86400 # 24 hours
131 | )
132 |
133 | logger.info(f"Returning response with cookie for {user_email}")
134 | return response
135 |
136 | except Exception as e:
137 | logger.error(f"Error in verify_user: {str(e)}")
138 | return jsonify({'error': str(e)}), 500
139 |
140 |
141 |
142 |
--------------------------------------------------------------------------------
/backend/utils/load_and_process_index.py:
--------------------------------------------------------------------------------
1 | import tiktoken
2 | import openai
3 | # import faiss # Comment out the direct import
4 | import numpy as np
5 | from difflib import SequenceMatcher
6 | import time
7 | import io
8 | import json
9 | import boto3
10 | from utils.s3_utils import (
11 | get_course_s3_folder,
12 | upload_json_to_s3,
13 | upload_faiss_index_to_s3,
14 | ACCESS_KEY,
15 | SECRET_KEY,
16 | REGION_NAME
17 | )
18 |
19 | # Try to import faiss, make it optional
20 | try:
21 | import faiss
22 | FAISS_AVAILABLE = True
23 | except ImportError:
24 | print("Warning: FAISS not available in load_and_process_index. Vector index functionality will be disabled.")
25 | FAISS_AVAILABLE = False
26 | faiss = None
27 |
28 |
29 | def process_course_context_s3(bucket_name, username, coursename, api_key, max_tokens=2000):
30 | """Standalone function to process course files from S3 and upload indices back to S3"""
31 | start_time = time.time()
32 |
33 | # Initialize S3 client
34 | s3 = boto3.client('s3',
35 | aws_access_key_id=ACCESS_KEY,
36 | aws_secret_access_key=SECRET_KEY,
37 | region_name=REGION_NAME)
38 |
39 | # 1. Load and combine text files from S3
40 | course_prefix = get_course_s3_folder(username, coursename)
41 | all_text = []
42 |
43 | try:
44 | # List and read text files
45 | response = s3.list_objects_v2(Bucket=bucket_name, Prefix=course_prefix)
46 | for obj in response.get('Contents', []):
47 | if obj['Key'].endswith('.txt'):
48 | file_obj = s3.get_object(Bucket=bucket_name, Key=obj['Key'])
49 | all_text.append(file_obj['Body'].read().decode('utf-8'))
50 |
51 | if not all_text:
52 | raise ValueError("No text files found in course directory")
53 |
54 | combined_text = '\n'.join(all_text)
55 | del all_text # Free memory early
56 |
57 | except Exception as e:
58 | print(f"Error loading files from S3: {str(e)}")
59 | return False
60 |
61 | # 2. Split into chunks with memory efficiency
62 | encoder = tiktoken.encoding_for_model("gpt-4")
63 | chunks = []
64 | current_chunk = []
65 | current_token_count = 0
66 |
67 | for line in combined_text.split('\n'):
68 | line_tokens = len(encoder.encode(line + '\n'))
69 | if current_token_count + line_tokens > max_tokens:
70 | if current_chunk:
71 | chunks.append('\n'.join(current_chunk))
72 | current_chunk = []
73 | current_token_count = 0
74 | # Handle long lines that exceed max_tokens
75 | while line_tokens > max_tokens:
76 | chunks.append(line[:len(line) // 2])
77 | line = line[len(line) // 2:]
78 | line_tokens = len(encoder.encode(line + '\n'))
79 | current_chunk.append(line)
80 | current_token_count = line_tokens
81 | else:
82 | current_chunk.append(line)
83 | current_token_count += line_tokens
84 |
85 | if current_chunk:
86 | chunks.append('\n'.join(current_chunk))
87 | del combined_text # Free memory
88 |
89 | # 3. Generate embeddings and build FAISS index (only if FAISS is available)
90 | faiss_index = None
91 | if FAISS_AVAILABLE:
92 | dimension = 3072 # text-embedding-3-large dimension
93 | faiss_index = faiss.IndexFlatL2(dimension)
94 | embeddings = []
95 |
96 | openai_client = openai.OpenAI(api_key=api_key)
97 |
98 | # Process chunks in batches to control memory usage
99 | batch_size = 100
100 | for i in range(0, len(chunks), batch_size):
101 | batch = chunks[i:i + batch_size]
102 | try:
103 | response = openai_client.embeddings.create(
104 | model="text-embedding-3-large",
105 | input=batch
106 | )
107 | batch_embeddings = [e.embedding for e in response.data]
108 | embeddings.extend(batch_embeddings)
109 | except Exception as e:
110 | print(f"Error generating embeddings: {str(e)}")
111 | embeddings.extend([np.zeros(dimension).tolist()] * len(batch))
112 |
113 | # Clear memory between batches
114 | del batch
115 | del response
116 |
117 | # Convert to numpy array and add to FAISS
118 | embeddings_np = np.array(embeddings).astype('float32')
119 | faiss_index.add(embeddings_np)
120 | del embeddings
121 | del embeddings_np
122 | else:
123 | print("Warning: FAISS not available. Skipping vector index creation.")
124 |
125 | # 4. Build inverted index
126 | inverted_index = {}
127 | for i, chunk in enumerate(chunks):
128 | quotes = [line for line in chunk.split('\n') if line.startswith('"')]
129 | for quote in quotes:
130 | inverted_index[quote.lower()] = i
131 |
132 | # 5. Upload all artifacts to S3
133 | base_key = get_course_s3_folder(username, coursename)
134 |
135 | # Upload chunks
136 | upload_json_to_s3(chunks, bucket_name, f"{base_key}chunks.json")
137 | del chunks
138 |
139 | # Upload FAISS index (only if available)
140 | if FAISS_AVAILABLE and faiss_index is not None:
141 | upload_faiss_index_to_s3(faiss_index, bucket_name, f"{base_key}faiss.index")
142 | del faiss_index
143 | else:
144 | print("Warning: FAISS index not created or FAISS not available. Skipping FAISS index upload.")
145 |
146 | # Upload inverted index
147 | upload_json_to_s3(inverted_index, bucket_name, f"{base_key}inverted_index.json")
148 | del inverted_index
149 |
150 | print(f"Total processing time: {time.time() - start_time:.2f} seconds")
151 | return True
152 |
--------------------------------------------------------------------------------
/app/ai-tutor/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useState, useRef, useEffect } from "react"
4 | import { MainNav } from "@/components/main-nav"
5 | import { Button } from "@/components/ui/button"
6 | import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"
7 | import { Input } from "@/components/ui/input"
8 | import { Textarea } from "@/components/ui/textarea"
9 | import { Bot, Send } from 'lucide-react'
10 | import { ScrollArea } from "@/components/ui/scroll-area"
11 |
12 | interface Message {
13 | id: number
14 | sender: "user" | "ai"
15 | text: string
16 | timestamp: Date
17 | }
18 |
19 | export default function AITutorPage() {
20 | const [messages, setMessages] = useState([
21 | {
22 | id: 1,
23 | sender: "ai",
24 | text: "Hello! I'm your AI tutor. How can I assist you today?",
25 | timestamp: new Date()
26 | }
27 | ]);
28 | const [inputMessage, setInputMessage] = useState("");
29 | const [isTyping, setIsTyping] = useState(false);
30 |
31 | const handleSendMessage = async () => {
32 | if (!inputMessage.trim()) return;
33 |
34 | // Add user message to chat
35 | const userMessage: Message = {
36 | id: Date.now(),
37 | sender: "user",
38 | text: inputMessage.trim(),
39 | timestamp: new Date()
40 | };
41 | setMessages(prev => [...prev, userMessage]);
42 | setInputMessage(""); // Clear input
43 | setIsTyping(true);
44 |
45 | try {
46 | // Send message to AI tutor endpoint
47 | const response = await fetch("http://127.0.0.1:5000/api/aitutor-response", {
48 | method: "POST",
49 | headers: { "Content-Type": "application/json" },
50 | body: JSON.stringify({ input: userMessage.text }),
51 | });
52 |
53 | if (!response.ok) {
54 | throw new Error('Network response was not ok');
55 | }
56 |
57 | const data = await response.text(); // Changed from response.json() since your endpoint returns text directly
58 |
59 | // Add AI response to chat
60 | const aiMessage: Message = {
61 | id: Date.now(),
62 | sender: "ai",
63 | text: data,
64 | timestamp: new Date()
65 | };
66 | setMessages(prev => [...prev, aiMessage]);
67 | } catch (error) {
68 | console.error("Error:", error);
69 | // Add error message to chat
70 | const errorMessage: Message = {
71 | id: Date.now(),
72 | sender: "ai",
73 | text: "Sorry, I encountered an error. Please try again.",
74 | timestamp: new Date()
75 | };
76 | setMessages(prev => [...prev, errorMessage]);
77 | } finally {
78 | setIsTyping(false);
79 | }
80 | };
81 |
82 | return (
83 |
84 |
85 |
86 | AI Tutor
87 |
88 |
89 |
90 | Chat with AI Tutor
91 |
92 |
93 |
94 |
95 | {messages.map((message) => (
96 |
101 | {message.sender === "ai" && (
102 |
103 | )}
104 |
108 |
{message.text}
109 |
110 | {message.timestamp.toLocaleTimeString()}
111 |
112 |
113 |
114 | ))}
115 | {isTyping && (
116 |
122 | )}
123 |
124 |
125 |
126 | setInputMessage(e.target.value)}
130 | onKeyDown={(e) => {
131 | if (e.key === 'Enter' && !e.shiftKey) {
132 | e.preventDefault();
133 | handleSendMessage();
134 | }
135 | }}
136 | />
137 |
142 |
143 | Send message
144 |
145 |
146 |
147 |
148 |
149 |
150 | Study Notes
151 |
152 |
153 |
157 |
158 |
159 |
160 |
161 |
162 | );
163 | }
--------------------------------------------------------------------------------
/app/my-publish/publish-details/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useRouter } from "next/navigation";
4 | import { BookOpen, PlusCircle, ChevronRight } from "lucide-react";
5 | import { HeaderOnlyLayout } from "@/components/layout/HeaderOnlyLayout";
6 | import { PublishSidebar } from "@/components/my-publish/publish-page-sidebar";
7 | import { ScrollArea } from "@/components/ui/scroll-area";
8 | import { motion } from "framer-motion";
9 |
10 | export default function PublishDetailsPage() {
11 | const router = useRouter();
12 |
13 | const handleExistingCoursesClick = () => {
14 | router.push("/my-publish/publish-details/publish-from-existing-courses");
15 | };
16 |
17 | const handleCreateCourseClick = () => {
18 | // For now, no redirection - will be implemented later
19 | console.log("Create new course clicked");
20 | };
21 |
22 | return (
23 |
24 |
25 | {/* Left Sidebar */}
26 |
27 |
28 | {/* Main Content Area */}
29 |
30 |
31 |
32 |
33 |
Publish Content
34 |
Choose how you want to publish your AI courses online. We support multiple publication methods to fit your needs.
35 |
36 |
37 |
38 | {/* Option 1: Publish from existing courses - Fancy Design */}
39 |
45 |
46 |
47 |
48 |
53 |
54 |
55 | Publish from Existing Courses
56 |
57 |
58 | Already have courses created? You can select from your existing courses and publish to the platform. This is perfect for repurposing your best material.
59 |
60 |
61 | Browse your courses
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 | {/* Option 2: Create new course - Fancy Design */}
71 |
77 |
78 |
79 |
80 |
85 |
86 |
87 | Create a New Course
88 |
89 |
90 | Are you a teacher looking to create fresh content? Get started with our intuitive course creation tools. Build engaging lessons, add interactive elements, and publish with ease.
91 |
92 |
93 | Start creating
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 | );
108 | }
109 |
--------------------------------------------------------------------------------
/app/my-publish/my-channel/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { HeaderOnlyLayout } from "@/components/layout/HeaderOnlyLayout"
4 | import { ScrollArea } from "@/components/ui/scroll-area"
5 | import { Input } from "@/components/ui/input"
6 | import { Button } from "@/components/ui/button"
7 | import { Search, Edit, Trash2, Eye, BookOpen, BarChart } from "lucide-react"
8 | import { PublishSidebar } from "@/components/my-publish/publish-page-sidebar"
9 |
10 | // Sample course data with added topics
11 | const courses = [
12 | {
13 | id: 1,
14 | title: "Introduction to AI",
15 | students: 2845,
16 | author: "John Doe",
17 | createdAt: "2024-01-15",
18 | tags: ["AI", "Machine Learning", "Artificial Intelligence"]
19 | },
20 | {
21 | id: 2,
22 | title: "Machine Learning Fundamentals",
23 | students: 1256,
24 | author: "Jane Smith",
25 | createdAt: "2024-02-20",
26 | tags: ["Machine Learning", "Data Science", "Artificial Intelligence"]
27 | },
28 | {
29 | id: 3,
30 | title: "Deep Learning with Python",
31 | students: 987,
32 | author: "David Johnson",
33 | createdAt: "2024-03-10",
34 | tags: ["Deep Learning", "Python", "Artificial Intelligence"]
35 | },
36 | {
37 | id: 4,
38 | title: "Neural Networks and Applications",
39 | students: 1532,
40 | author: "Emily Wilson",
41 | createdAt: "2024-04-05",
42 | tags: ["Neural Networks", "Computer Science", "Artificial Intelligence"]
43 | }
44 | ]
45 |
46 | export default function MyChannelPage() {
47 | // Function to get initials from title
48 | const getInitials = (title: string): string => {
49 | return title
50 | .split(' ')
51 | .slice(0, 2)
52 | .map(word => word[0])
53 | .join('')
54 | .toUpperCase();
55 | };
56 |
57 | // Function to format date
58 | const formatDate = (dateString: string): string => {
59 | const date = new Date(dateString);
60 | return date.toLocaleDateString('en-US', {
61 | year: 'numeric',
62 | month: 'short',
63 | day: 'numeric'
64 | });
65 | };
66 |
67 | // Function to handle tags display (show max 2 tags + count for others)
68 | const renderTags = (tags: string[]) => {
69 | if (tags.length <= 2) {
70 | return tags.map((tag, idx) => (
71 |
72 | {tag}
73 |
74 | ));
75 | } else {
76 | return (
77 | <>
78 | {tags.slice(0, 2).map((tag, idx) => (
79 |
80 | {tag}
81 |
82 | ))}
83 |
84 | +{tags.length - 2} more
85 |
86 | >
87 | );
88 | }
89 | };
90 |
91 | return (
92 |
93 |
94 | {/* Left Sidebar */}
95 |
96 |
97 | {/* Main Content Area */}
98 |
99 |
100 |
101 | {/* Search and Filter Row */}
102 |
103 |
My Channel
104 |
105 |
106 |
110 |
111 |
112 |
113 | {/* Course List */}
114 |
115 | {courses.map(course => (
116 |
117 |
118 | {/* Thumbnail (full height) */}
119 |
120 |
121 |
122 |
123 | {/* Content */}
124 |
125 | {/* Title and date */}
126 |
127 |
128 |
{course.title}
129 | {formatDate(course.createdAt)}
130 |
131 |
132 |
133 | {renderTags(course.tags)}
134 |
135 |
136 |
137 | {/* Stats and actions */}
138 |
139 |
140 |
141 |
142 | {course.students}
143 |
144 |
|
145 |
146 | {course.author}
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 | ))}
163 |
164 |
165 |
166 |
167 |
168 |
169 | )
170 | }
--------------------------------------------------------------------------------
/app/schedule/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useState } from "react"
4 | import { ChevronLeft, ChevronRight, Plus, CalendarIcon } from 'lucide-react'
5 | import { format, addDays, subDays, parse, differenceInMinutes, startOfDay } from "date-fns"
6 |
7 | import { MainNav } from "@/components/main-nav"
8 | import { CourseDetails } from "@/components/course-details"
9 | import { Button } from "@/components/ui/button"
10 | import { ScrollArea } from "@/components/ui/scroll-area"
11 | import { Input } from "@/components/ui/input"
12 | import { Calendar } from "@/components/ui/calendar"
13 | import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover"
14 |
15 | // Time slots from 8:00 AM to 8:00 PM
16 | const timeSlots = Array.from({ length: 13 }, (_, i) => {
17 | const hour = i + 8
18 | return `${hour.toString().padStart(2, "0")}:00`
19 | })
20 |
21 | const sampleCourse = {
22 | id: "1",
23 | title: "CS240 AI Tutor",
24 | tutor: "AI Tutor",
25 | email: "cs240.tutor@example.com",
26 | zoomLink: "https://zoom.us/j/123456789",
27 | date: "2024-01-20",
28 | startTime: "08:00",
29 | endTime: "09:10",
30 | description: "We will talk about AVL trees, sorting algorithms, and binary search trees.",
31 | }
32 |
33 | interface Appointment {
34 | id: string
35 | title: string
36 | tutor: string
37 | email: string
38 | zoomLink: string
39 | date: string
40 | startTime: string
41 | endTime: string
42 | description: string
43 | }
44 |
45 | export default function SchedulePage() {
46 | const [selectedCourse, setSelectedCourse] = useState(null)
47 | const [selectedDate, setSelectedDate] = useState(new Date())
48 | const [appointments, setAppointments] = useState([sampleCourse])
49 |
50 | const handlePrevDay = () => setSelectedDate(subDays(selectedDate, 1))
51 | const handleNextDay = () => setSelectedDate(addDays(selectedDate, 1))
52 |
53 | const handleNewAppointment = () => {
54 | const newAppointment: Appointment = {
55 | ...sampleCourse,
56 | id: `appointment-${appointments.length + 1}`,
57 | title: `New Appointment ${appointments.length + 1}`,
58 | date: format(selectedDate, "yyyy-MM-dd"),
59 | startTime: "10:00",
60 | endTime: "11:00",
61 | }
62 | setAppointments([...appointments, newAppointment])
63 | }
64 |
65 | const getAppointmentStyle = (appointment: Appointment) => {
66 | const startTime = parse(appointment.startTime, "HH:mm", new Date())
67 | const endTime = parse(appointment.endTime, "HH:mm", new Date())
68 | const dayStart = parse("08:00", "HH:mm", new Date())
69 |
70 | const startMinutes = differenceInMinutes(startTime, dayStart)
71 | const duration = differenceInMinutes(endTime, startTime)
72 |
73 | const topPercentage = (startMinutes / (12 * 60)) * 100
74 | const heightPercentage = (duration / (12 * 60)) * 100
75 |
76 | return {
77 | top: `${topPercentage}%`,
78 | height: `${heightPercentage}%`,
79 | }
80 | }
81 |
82 | return (
83 |
84 |
85 |
86 |
87 |
88 |
89 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 | {format(selectedDate, "MMMM d, yyyy")}
102 |
103 |
104 |
105 | date && setSelectedDate(date)}
109 | initialFocus
110 | />
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 | New Appointment
121 |
122 |
123 |
124 |
125 | {timeSlots.map((timeSlot, index) => (
126 |
131 |
132 | {timeSlot}
133 |
134 |
135 | ))}
136 | {appointments
137 | .filter(appointment => appointment.date === format(selectedDate, "yyyy-MM-dd"))
138 | .map((appointment) => (
139 |
setSelectedCourse(appointment)}
142 | className="absolute left-24 right-4 rounded-lg bg-primary/10 p-2 text-left hover:bg-primary/20"
143 | style={getAppointmentStyle(appointment)}
144 | >
145 | {appointment.title}
146 |
147 | {appointment.startTime} - {appointment.endTime}
148 |
149 |
150 | ))
151 | }
152 |
153 |
154 |
155 | {selectedCourse && (
156 |
setSelectedCourse(null)}
160 | />
161 | )}
162 |
163 |
164 | )
165 | }
166 |
167 |
--------------------------------------------------------------------------------
/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800;900&display=swap');
5 |
6 | body {
7 | font-family: 'Inter', sans-serif;
8 | overflow-x: hidden;
9 | }
10 |
11 | @layer utilities {
12 | .text-balance {
13 | text-wrap: balance;
14 | }
15 |
16 | .glass-panel {
17 | @apply bg-white/20 backdrop-blur-[8px] border border-white/20 shadow-md;
18 | }
19 |
20 | .dark-glass-panel {
21 | @apply bg-gray-900/20 backdrop-blur-[8px] border border-gray-700/20 shadow-md;
22 | }
23 |
24 | .emerald-glow {
25 | text-shadow: 0 0 10px rgba(16, 185, 129, 0.6);
26 | }
27 |
28 | .emerald-progress {
29 | filter: drop-shadow(0 0 5px rgba(16, 185, 129, 0.4));
30 | }
31 |
32 | .progress-ring {
33 | transition: stroke-dashoffset 0.5s ease;
34 | }
35 |
36 | .bg-circuit-pattern {
37 | background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 304 304' width='304' height='304'%3E%3Cpath fill='%23000000' fill-opacity='0.08' d='M44.1 224a5 5 0 1 1 0 2H0v-2h44.1zm160 48a5 5 0 1 1 0 2H82v-2h122.1zm57.8-46a5 5 0 1 1 0-2H304v2h-42.1zm0 16a5 5 0 1 1 0-2H304v2h-42.1zm6.2-114a5 5 0 1 1 0 2h-86.2a5 5 0 1 1 0-2h86.2zm-256-48a5 5 0 1 1 0 2H0v-2h12.1zm185.8 34a5 5 0 1 1 0-2h86.2a5 5 0 1 1 0 2h-86.2zM258 12.1a5 5 0 1 1-2 0V0h2v12.1zm-64 208a5 5 0 1 1-2 0v-54.2a5 5 0 1 1 2 0v54.2zm48-198.2V80h62v2h-64V21.9a5 5 0 1 1 2 0zm16 16V64h46v2h-48V37.9a5 5 0 1 1 2 0zm-128 96V208h16v12.1a5 5 0 1 1-2 0V210h-16v-76.1a5 5 0 1 1 2 0zm-5.9-21.9a5 5 0 1 1 0 2H114v48H85.9a5 5 0 1 1 0-2H112v-48h12.1zm-6.2 130a5 5 0 1 1 0-2H176v-74.1a5 5 0 1 1 2 0V242h-60.1zm-16-64a5 5 0 1 1 0-2H114v48h10.1a5 5 0 1 1 0 2H112v-48h-10.1zM66 284.1a5 5 0 1 1-2 0V274H50v30h-2v-32h18v12.1zM236.1 176a5 5 0 1 1 0 2H226v94h48v32h-2v-30h-48v-98h12.1zm25.8-30a5 5 0 1 1 0-2H274v44.1a5 5 0 1 1-2 0V146h-10.1zm-64 96a5 5 0 1 1 0-2H208v-80h16v-14h-42.1a5 5 0 1 1 0-2H226v18h-16v80h-12.1zm86.2-210a5 5 0 1 1 0 2H272V0h2v32h10.1zM98 101.9V146H53.9a5 5 0 1 1 0-2H96v-42.1a5 5 0 1 1 2 0zM53.9 34a5 5 0 1 1 0-2H80V0h2v34H53.9zm60.1 3.9V66H82v64H69.9a5 5 0 1 1 0-2H80V64h32V37.9a5 5 0 1 1 2 0zM101.9 82a5 5 0 1 1 0-2H128V37.9a5 5 0 1 1 2 0V82h-28.1zm16-64a5 5 0 1 1 0-2H146v44.1a5 5 0 1 1-2 0V18h-26.1zm102.2 270a5 5 0 1 1 0 2H98v14h-2v-16h124.1zM242 149.9V160h16v34h-16v62h48v48h-2v-46h-48v-66h16v-30h-16v-12.1a5 5 0 1 1 2 0zM53.9 18a5 5 0 1 1 0-2H64V2H48V0h18v18H53.9zm112 32a5 5 0 1 1 0-2H192V0h50v2h-48v48h-28.1zm-48-48a5 5 0 0 1-9.8-2h2.07a3 3 0 1 0 5.66 0H178v34h-18V21.9a5 5 0 1 1 2 0V32h14V2h-58.1zm0 96a5 5 0 1 1 0-2H137l32-32h39V21.9a5 5 0 1 1 2 0V66h-40.17l-32 32H117.9zm28.1 90.1a5 5 0 1 1-2 0v-76.51L175.59 80H224V21.9a5 5 0 1 1 2 0V82h-49.59L146 112.41v75.69zm16 32a5 5 0 1 1-2 0v-99.51L184.59 96H300.1a5 5 0 0 1 3.9-3.9v2.07a3 3 0 0 0 0 5.66v2.07a5 5 0 0 1-3.9-3.9H185.41L162 121.41v98.69zm-144-64a5 5 0 1 1-2 0v-3.51l48-48V48h32V0h2v50H66v55.41l-48 48v2.69zM50 53.9v43.51l-48 48V208h26.1a5 5 0 1 1 0 2H0v-65.41l48-48V53.9a5 5 0 1 1 2 0zm-16 16V89.41l-34 34v-2.82l32-32V69.9a5 5 0 1 1 2 0zM12.1 32a5 5 0 1 1 0 2H9.41L0 43.41V40.6L8.59 32h3.51zm265.8 18a5 5 0 1 1 0-2h18.69l7.41-7.41v2.82L297.41 50H277.9zm-16 160a5 5 0 1 1 0-2H288v-71.41l16-16v2.82l-14 14V210h-28.1zm-208 32a5 5 0 1 1 0-2H64v-22.59L40.59 194H21.9a5 5 0 1 1 0-2H41.41L66 216.59V242H53.9zm150.2 14a5 5 0 1 1 0 2H96v-56.6L56.6 162H37.9a5 5 0 1 1 0-2h19.5L98 200.6V256h106.1zm-150.2 2a5 5 0 1 1 0-2H80v-46.59L48.59 178H21.9a5 5 0 1 1 0-2H49.41L82 208.59V258H53.9zM34 39.8v1.61L9.41 66H0v-2h8.59L32 40.59V0h2v39.8zM2 300.1a5 5 0 0 1 3.9 3.9H3.83A3 3 0 0 0 0 302.17V256h18v48h-2v-46H2v42.1zM34 241v63h-2v-62H0v-2h34v1zM17 18H0v-2h16V0h2v18h-1zm273-2h14v2h-16V0h2v16zm-32 273v15h-2v-14h-14v14h-2v-16h18v1zM0 92.1A5.02 5.02 0 0 1 6 97a5 5 0 0 1-6 4.9v-2.07a3 3 0 1 0 0-5.66V92.1zM80 272h2v32h-2v-32zm37.9 32h-2.07a3 3 0 0 0-5.66 0h-2.07a5 5 0 0 1 9.8 0zM5.9 0A5.02 5.02 0 0 1 0 5.9V3.83A3 3 0 0 0 3.83 0H5.9zm294.2 0h2.07A3 3 0 0 0 304 3.83V5.9a5 5 0 0 1-3.9-5.9zm3.9 300.1v2.07a3 3 0 0 0-1.83 1.83h-2.07a5 5 0 0 1 3.9-3.9zM97 100a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-48 32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32 48a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16-64a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 96a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-144a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-96 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm96 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16-64a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-32 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zM49 36a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-32 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zM33 68a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-48a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 240a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16-64a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm80-176a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32 48a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm112 176a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm-16 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zM17 180a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0 16a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm0-32a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16 0a3 3 0 1 0 0-6 3 3 0 0 0 0 6zM17 84a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm32 64a3 3 0 1 0 0-6 3 3 0 0 0 0 6zm16-16a3 3 0 1 0 0-6 3 3 0 0 0 0 6z'%3E%3C/path%3E%3C/svg%3E");
38 | }
39 | }
40 |
41 | @layer base {
42 | :root {
43 | --background: 0 0% 98%;
44 | --foreground: 222 47% 11%;
45 | --card: 0 0% 100%;
46 | --card-foreground: 222 47% 11%;
47 | --popover: 0 0% 100%;
48 | --popover-foreground: 222 47% 11%;
49 | --primary: 160 84% 39%;
50 | --primary-foreground: 210 40% 98%;
51 | --secondary: 210 40% 96.1%;
52 | --secondary-foreground: 222 47% 11%;
53 | --muted: 210 40% 96.1%;
54 | --muted-foreground: 215.4 16.3% 46.9%;
55 | --accent: 210 40% 96.1%;
56 | --accent-foreground: 222 47% 11%;
57 | --destructive: 0 84.2% 60.2%;
58 | --destructive-foreground: 210 40% 98%;
59 | --border: 214.3 31.8% 91.4%;
60 | --input: 214.3 31.8% 91.4%;
61 | --ring: 221.2 83.2% 53.3%;
62 | --radius: 0.5rem;
63 | --color-1: 0 100% 63%;
64 | --color-2: 270 100% 63%;
65 | --color-3: 210 100% 63%;
66 | --color-4: 195 100% 63%;
67 | --color-5: 90 100% 63%;
68 | }
69 | .dark {
70 | --background: 222 47% 11%;
71 | --foreground: 210 40% 98%;
72 | --card: 222 47% 11%;
73 | --card-foreground: 210 40% 98%;
74 | --popover: 222 47% 11%;
75 | --popover-foreground: 210 40% 98%;
76 | --primary: 160 84% 39%;
77 | --primary-foreground: 222 47% 11%;
78 | --secondary: 217.2 32.6% 17.5%;
79 | --secondary-foreground: 210 40% 98%;
80 | --muted: 217.2 32.6% 17.5%;
81 | --muted-foreground: 215 20.2% 65.1%;
82 | --accent: 217.2 32.6% 17.5%;
83 | --accent-foreground: 210 40% 98%;
84 | --destructive: 0 62.8% 30.6%;
85 | --destructive-foreground: 210 40% 98%;
86 | --border: 217.2 32.6% 17.5%;
87 | --input: 217.2 32.6% 17.5%;
88 | --ring: 224.3 76.3% 48%;
89 | --color-1: 0 100% 63%;
90 | --color-2: 270 100% 63%;
91 | --color-3: 210 100% 63%;
92 | --color-4: 195 100% 63%;
93 | --color-5: 90 100% 63%;
94 | }
95 | }
96 |
97 | @layer base {
98 | * {
99 | @apply border-border;
100 | }
101 | body {
102 | @apply bg-background text-foreground;
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/app/my-publish/help-center/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { HeaderOnlyLayout } from "@/components/layout/HeaderOnlyLayout"
4 | import { ScrollArea } from "@/components/ui/scroll-area"
5 | import { PublishSidebar } from "@/components/my-publish/publish-page-sidebar"
6 | import { Button } from "@/components/ui/button"
7 | import { Input } from "@/components/ui/input"
8 | import { Search, BookOpen, MessageCircle, PlayCircle, ChevronRight, HelpCircle } from "lucide-react"
9 |
10 | export default function HelpCenterPage() {
11 | return (
12 |
13 |
14 | {/* Left Sidebar */}
15 |
16 |
17 | {/* Main Content Area */}
18 |
19 |
20 |
21 | {/* Page Header */}
22 |
23 |
Help Center
24 |
Find answers to your questions and learn how to get the most out of your creator experience
25 |
26 | {/* Search Bar */}
27 |
28 |
29 |
33 |
34 | Search
35 |
36 |
37 |
38 |
39 | {/* Help Categories */}
40 |
41 |
42 |
43 |
44 |
45 |
Getting Started
46 |
Learn the basics of creating and publishing your content
47 |
48 | View Guides
49 |
50 |
51 |
52 |
53 |
56 |
Video Tutorials
57 |
Watch step-by-step tutorials for creating content
58 |
59 | Watch Videos
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
Contact Support
68 |
Get help from our support team for specific issues
69 |
70 | Get Help
71 |
72 |
73 |
74 |
75 | {/* FAQ Section */}
76 |
77 |
78 |
Frequently Asked Questions
79 |
Quick answers to common questions
80 |
81 |
82 |
83 | {[
84 | {
85 | question: "How do I publish my first course?",
86 | answer: "To publish your first course, go to the My Channel section, click on 'Create New Course', and follow the setup wizard to upload your content, set pricing, and publish."
87 | },
88 | {
89 | question: "When will I receive payment for my sales?",
90 | answer: "Payments are processed on the 1st of each month for the previous month's earnings, with a minimum payout threshold of $50. Funds typically arrive within 3-5 business days."
91 | },
92 | {
93 | question: "How can I promote my courses?",
94 | answer: "You can promote your courses through social media sharing, email marketing to your subscribers, collaborating with other creators, and utilizing our affiliate program."
95 | },
96 | {
97 | question: "What file formats are supported for uploads?",
98 | answer: "We support video uploads in MP4, MOV, and AVI formats with a maximum file size of 2GB per video. For documents, we support PDF, DOCX, and PPTX formats."
99 | },
100 | ].map((faq, index) => (
101 |
102 |
103 |
104 |
105 |
{faq.question}
106 |
{faq.answer}
107 |
108 |
109 |
110 | ))}
111 |
112 |
113 |
114 |
115 | View All FAQs
116 |
117 |
118 |
119 |
120 | {/* Contact Card */}
121 |
122 |
Still Need Help?
123 |
Our support team is ready to assist you with any questions
124 |
125 |
126 | Contact Support
127 |
128 |
129 | Submit a Request
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 | )
139 | }
--------------------------------------------------------------------------------
/app/my-publish/monetization-center/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { HeaderOnlyLayout } from "@/components/layout/HeaderOnlyLayout"
4 | import { ScrollArea } from "@/components/ui/scroll-area"
5 | import { PublishSidebar } from "@/components/my-publish/publish-page-sidebar"
6 | import { Button } from "@/components/ui/button"
7 | import { DollarSign, TrendingUp, CreditCard, Wallet, Plus } from "lucide-react"
8 |
9 | export default function MonetizationCenterPage() {
10 | return (
11 |
12 |
13 | {/* Left Sidebar */}
14 |
15 |
16 | {/* Main Content Area */}
17 |
18 |
19 |
20 | {/* Page Header */}
21 |
22 |
Monetization Center
23 |
Manage your earnings and payment options
24 |
25 |
26 | {/* Revenue Overview */}
27 |
28 |
29 |
30 |
Revenue Overview
31 |
32 |
33 |
34 |
Current Balance
35 |
$1,245.89
36 |
37 |
38 |
This Month
39 |
$582.30
40 |
41 |
42 | 12% from last month
43 |
44 |
45 |
46 |
Next Payout
47 |
$1,245.89
48 |
Scheduled for Jun 30, 2023
49 |
50 |
51 |
52 |
53 |
54 | Withdraw Funds
55 |
56 |
57 |
58 |
59 | {/* Revenue Sources */}
60 |
61 |
Revenue Sources
62 |
63 |
64 |
65 |
66 |
Course Sales
67 |
68 |
69 | $845.40
70 | (68%)
71 |
72 |
73 |
74 |
75 |
76 |
Affiliate Commission
77 |
78 |
79 | $246.20
80 | (20%)
81 |
82 |
83 |
84 |
85 |
86 |
Consultation Fees
87 |
88 |
89 | $154.29
90 | (12%)
91 |
92 |
93 |
94 |
95 |
96 | {/* Payment Methods */}
97 |
98 |
99 |
Payment Methods
100 |
101 |
102 | Add New
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
Visa ending in 4242
113 |
Expires 05/2025
114 |
115 |
116 |
117 |
Edit
118 |
119 |
Default
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
PayPal
129 |
example@email.com
130 |
131 |
132 |
133 | Edit
134 | Make Default
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 | )
145 | }
--------------------------------------------------------------------------------
/app/my-courses/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useState, useEffect } from "react"
4 | import { MainLayout } from "@/components/layout/MainLayout"
5 | import { ScrollArea } from "@/components/ui/scroll-area"
6 | import { MyCoursesHeader } from "@/components/my-courses/my-courses-section-header"
7 | import { CourseCard } from "@/components/my-courses/CourseCard"
8 | import { LoadingCourseCard } from "@/components/my-courses/LoadingCourseCard"
9 | import { FullscreenButton } from "@/components/layout/fullscreen-button"
10 | import { CreateCourseCard } from "@/components/my-courses/create-course-card"
11 | import { StatusMessage, StatusType } from "@/components/ui/status-message"
12 | import { requestAssistant } from "@/components/my-courses/utils/vapi-api-endpoints"
13 | import { fetchCoursesAndDrafts } from "@/components/my-courses/utils/course-api-endpoints"
14 | import { useAuth } from "@/auth/firebase"
15 | import { useRouter } from "next/navigation"
16 |
17 | interface CourseData {
18 | id: string;
19 | title: string;
20 | progress: number;
21 | hoursCompleted: number;
22 | author: string;
23 | nextSection?: string;
24 | isDraft: boolean;
25 | createdAt?: string;
26 | }
27 |
28 | export default function CoursesPage() {
29 | const router = useRouter();
30 | const { user, loading: authLoading } = useAuth();
31 | const [isFullScreen, setIsFullScreen] = useState(false)
32 | const [courses, setCourses] = useState([])
33 | const [drafts, setDrafts] = useState([])
34 | const [isLoading, setIsLoading] = useState(true)
35 | const [error, setError] = useState(null)
36 | const [assistantRequests, setAssistantRequests] = useState>({})
37 | const [statusMessage, setStatusMessage] = useState(null)
38 | const [statusType, setStatusType] = useState('info')
39 | const [creatingCourse, setCreatingCourse] = useState<{ title: string, timestamp: number } | null>(null)
40 |
41 | // Fetch courses from backend
42 | const loadCourses = async () => {
43 | if (authLoading) {
44 | return; // Wait for auth state to be determined
45 | }
46 |
47 | if (!user) {
48 | router.push('/login');
49 | return;
50 | }
51 |
52 | try {
53 | setIsLoading(true);
54 | const idToken = await user.getIdToken();
55 | const coursesData = await fetchCoursesAndDrafts(idToken);
56 | setCourses(coursesData.courses);
57 | setDrafts(coursesData.drafts);
58 | } catch (error) {
59 | setError(error instanceof Error ? error.message : 'An unknown error occurred');
60 | console.error('Error fetching courses:', error);
61 | } finally {
62 | setIsLoading(false);
63 | }
64 | };
65 |
66 | // Initial load of courses
67 | useEffect(() => {
68 | loadCourses();
69 | }, [user, authLoading]);
70 |
71 | // Function to request an assistant for a course
72 | const handleRequestAssistant = async (courseId: string, courseTitle: string): Promise => {
73 | if (!user) {
74 | router.push('/login');
75 | return null;
76 | }
77 |
78 | // Mark this course as having a pending assistant request
79 | setAssistantRequests(prev => ({ ...prev, [courseId]: true }));
80 |
81 | try {
82 | // Show status message
83 | setStatusMessage("Preparing your AI assistant...");
84 | setStatusType('info');
85 | console.log("Preparing AI assistant for course:", courseTitle);
86 |
87 | const idToken = await user.getIdToken();
88 | // Request assistant from backend
89 | const assistantId = await requestAssistant(courseId, courseTitle, idToken);
90 |
91 | if (!assistantId) {
92 | throw new Error('Failed to create assistant');
93 | }
94 |
95 | // Clear status message
96 | setStatusMessage(null);
97 |
98 | // Return the assistant ID
99 | return assistantId;
100 | } catch (error) {
101 | console.error('Error creating assistant:', error);
102 | setStatusMessage("Failed to create AI assistant. You can still access the course.");
103 | setStatusType('error');
104 | return null;
105 | } finally {
106 | // Clear the pending state
107 | setAssistantRequests(prev => ({ ...prev, [courseId]: false }));
108 | }
109 | };
110 |
111 | // Function to toggle fullscreen mode
112 | const toggleFullScreen = () => {
113 | if (!document.fullscreenElement) {
114 | document.documentElement.requestFullscreen().catch(err => {
115 | console.log(`Error attempting to enable fullscreen: ${err.message}`);
116 | });
117 | setIsFullScreen(true);
118 | } else {
119 | if (document.exitFullscreen) {
120 | document.exitFullscreen();
121 | setIsFullScreen(false);
122 | }
123 | }
124 | };
125 |
126 | // Listen for fullscreen change events
127 | useEffect(() => {
128 | const handleFullscreenChange = () => {
129 | setIsFullScreen(!!document.fullscreenElement);
130 | };
131 |
132 | document.addEventListener('fullscreenchange', handleFullscreenChange);
133 | return () => {
134 | document.removeEventListener('fullscreenchange', handleFullscreenChange);
135 | };
136 | }, []);
137 |
138 | // Show loading state while auth is being determined
139 | if (authLoading) {
140 | return (
141 |
142 |
147 |
148 | );
149 | }
150 |
151 | return (
152 |
153 |
154 | {/* Status message display */}
155 |
setStatusMessage(null)}
160 | />
161 |
162 |
163 |
164 |
165 |
169 |
173 |
174 |
175 | {isLoading ? (
176 |
177 |
Loading courses...
178 |
179 | ) : error ? (
180 |
181 |
Error: {error}
182 |
183 | ) : (
184 |
185 |
186 | {creatingCourse && (
187 |
191 | )}
192 | {drafts.map((draft) => (
193 |
197 | ))}
198 | {courses.map((course) => (
199 |
204 | ))}
205 | { !creatingCourse && drafts.length === 0 && courses.length === 0 && (
206 |
207 |
You haven't created any courses yet.
208 |
209 | )}
210 |
211 | )}
212 |
213 |
214 |
215 |
216 | )
217 | }
218 |
219 |
--------------------------------------------------------------------------------
/tailwind.config.ts:
--------------------------------------------------------------------------------
1 | import type { Config } from "tailwindcss";
2 |
3 | const config: Config = {
4 | darkMode: ["class"],
5 | content: [
6 | "./pages/**/*.{js,ts,jsx,tsx,mdx}",
7 | "./components/**/*.{js,ts,jsx,tsx,mdx}",
8 | "./app/**/*.{js,ts,jsx,tsx,mdx}",
9 | ],
10 | theme: {
11 | screens: {
12 | xs: '475px',
13 | sm: '640px',
14 | md: '768px',
15 | lg: '1024px',
16 | xl: '1280px',
17 | '2xl': '1536px'
18 | },
19 | extend: {
20 | fontSize: {
21 | xs: [
22 | '0.75rem',
23 | {
24 | lineHeight: '1rem'
25 | }
26 | ],
27 | sm: [
28 | '0.875rem',
29 | {
30 | lineHeight: '1.25rem'
31 | }
32 | ],
33 | base: [
34 | '1rem',
35 | {
36 | lineHeight: '1.5rem'
37 | }
38 | ],
39 | lg: [
40 | '1.125rem',
41 | {
42 | lineHeight: '1.75rem'
43 | }
44 | ],
45 | xl: [
46 | '1.25rem',
47 | {
48 | lineHeight: '1.75rem'
49 | }
50 | ],
51 | '2xl': [
52 | '1.5rem',
53 | {
54 | lineHeight: '2rem'
55 | }
56 | ],
57 | '3xl': [
58 | '1.875rem',
59 | {
60 | lineHeight: '2.25rem'
61 | }
62 | ],
63 | '4xl': [
64 | '2.25rem',
65 | {
66 | lineHeight: '2.5rem'
67 | }
68 | ],
69 | '5xl': [
70 | '3rem',
71 | {
72 | lineHeight: '1.15'
73 | }
74 | ],
75 | '6xl': [
76 | '3.75rem',
77 | {
78 | lineHeight: '1.1'
79 | }
80 | ],
81 | '7xl': [
82 | '4.5rem',
83 | {
84 | lineHeight: '1.05'
85 | }
86 | ],
87 | '8xl': [
88 | '6rem',
89 | {
90 | lineHeight: '1'
91 | }
92 | ],
93 | '9xl': [
94 | '8rem',
95 | {
96 | lineHeight: '1'
97 | }
98 | ]
99 | },
100 | colors: {
101 | background: 'hsl(var(--background))',
102 | foreground: 'hsl(var(--foreground))',
103 | card: {
104 | DEFAULT: 'hsl(var(--card))',
105 | foreground: 'hsl(var(--card-foreground))'
106 | },
107 | popover: {
108 | DEFAULT: 'hsl(var(--popover))',
109 | foreground: 'hsl(var(--popover-foreground))'
110 | },
111 | primary: {
112 | DEFAULT: 'hsl(var(--primary))',
113 | foreground: 'hsl(var(--primary-foreground))'
114 | },
115 | secondary: {
116 | DEFAULT: 'hsl(var(--secondary))',
117 | foreground: 'hsl(var(--secondary-foreground))'
118 | },
119 | muted: {
120 | DEFAULT: 'hsl(var(--muted))',
121 | foreground: 'hsl(var(--muted-foreground))'
122 | },
123 | accent: {
124 | DEFAULT: 'hsl(var(--accent))',
125 | foreground: 'hsl(var(--accent-foreground))'
126 | },
127 | destructive: {
128 | DEFAULT: 'hsl(var(--destructive))',
129 | foreground: 'hsl(var(--destructive-foreground))'
130 | },
131 | border: 'hsl(var(--border))',
132 | input: 'hsl(var(--input))',
133 | ring: 'hsl(var(--ring))',
134 | chart: {
135 | '1': 'hsl(var(--chart-1))',
136 | '2': 'hsl(var(--chart-2))',
137 | '3': 'hsl(var(--chart-3))',
138 | '4': 'hsl(var(--chart-4))',
139 | '5': 'hsl(var(--chart-5))'
140 | },
141 | neon: 'hsl(var(--neon))',
142 | 'color-1': 'hsl(var(--color-1))',
143 | 'color-2': 'hsl(var(--color-2))',
144 | 'color-3': 'hsl(var(--color-3))',
145 | 'color-4': 'hsl(var(--color-4))',
146 | 'color-5': 'hsl(var(--color-5))'
147 | },
148 | borderRadius: {
149 | lg: 'var(--radius)',
150 | md: 'calc(var(--radius) - 2px)',
151 | sm: 'calc(var(--radius) - 4px)'
152 | },
153 | backdropBlur: {
154 | xs: '2px'
155 | },
156 | typography: {
157 | DEFAULT: {
158 | css: {
159 | maxWidth: 'none',
160 | color: 'hsl(var(--foreground))',
161 | h1: {
162 | color: 'hsl(var(--primary))'
163 | },
164 | h2: {
165 | color: 'hsl(var(--primary))'
166 | },
167 | h3: {
168 | color: 'hsl(var(--primary))'
169 | },
170 | strong: {
171 | color: 'hsl(var(--primary))'
172 | },
173 | a: {
174 | color: 'hsl(var(--primary))',
175 | '&:hover': {
176 | color: 'hsl(var(--primary))'
177 | }
178 | }
179 | }
180 | }
181 | },
182 | animation: {
183 | 'accordion-down': 'accordion-down 0.2s ease-out',
184 | 'accordion-up': 'accordion-up 0.2s ease-out',
185 | shimmer: 'shimmer 2s infinite',
186 | 'pulse-slow': 'pulse 3s cubic-bezier(0.4, 0, 0.6, 1) infinite',
187 | 'pulse-gradient': 'pulseGradient 1.5s ease-in-out infinite',
188 | 'bounce-dot': 'bounceDot 1.4s infinite ease-in-out both',
189 | 'bounce-dot-delay-1': 'bounceDot 1.4s infinite ease-in-out both 0.2s',
190 | 'bounce-dot-delay-2': 'bounceDot 1.4s infinite ease-in-out both 0.4s',
191 | 'shimmer-slide': 'shimmer-slide var(--speed) ease-in-out infinite alternate',
192 | 'spin-around': 'spin-around calc(var(--speed) * 2) infinite linear',
193 | rainbow: 'rainbow var(--speed, 2s) infinite linear',
194 | aurora: 'aurora 8s ease-in-out infinite alternate'
195 | },
196 | keyframes: {
197 | 'accordion-down': {
198 | from: {
199 | height: '0'
200 | },
201 | to: {
202 | height: 'var(--radix-accordion-content-height)'
203 | }
204 | },
205 | 'accordion-up': {
206 | from: {
207 | height: 'var(--radix-accordion-content-height)'
208 | },
209 | to: {
210 | height: '0'
211 | }
212 | },
213 | shimmer: {
214 | '0%': {
215 | transform: 'translateX(-100%)'
216 | },
217 | '100%': {
218 | transform: 'translateX(100%)'
219 | }
220 | },
221 | pulseGradient: {
222 | '0%, 100%': {
223 | backgroundPosition: '0% 50%'
224 | },
225 | '50%': {
226 | backgroundPosition: '100% 50%'
227 | }
228 | },
229 | bounceDot: {
230 | '0%, 80%, 100%': {
231 | transform: 'translateY(0)'
232 | },
233 | '40%': {
234 | transform: 'translateY(-4px)'
235 | }
236 | },
237 | 'shimmer-slide': {
238 | to: {
239 | transform: 'translate(calc(100cqw - 100%), 0)'
240 | }
241 | },
242 | 'spin-around': {
243 | '0%': {
244 | transform: 'translateZ(0) rotate(0)'
245 | },
246 | '15%, 35%': {
247 | transform: 'translateZ(0) rotate(90deg)'
248 | },
249 | '65%, 85%': {
250 | transform: 'translateZ(0) rotate(270deg)'
251 | },
252 | '100%': {
253 | transform: 'translateZ(0) rotate(360deg)'
254 | }
255 | },
256 | rainbow: {
257 | '0%': {
258 | 'background-position': '0%'
259 | },
260 | '100%': {
261 | 'background-position': '200%'
262 | }
263 | },
264 | aurora: {
265 | '0%': {
266 | backgroundPosition: '0% 50%',
267 | transform: 'rotate(-5deg) scale(0.9)'
268 | },
269 | '25%': {
270 | backgroundPosition: '50% 100%',
271 | transform: 'rotate(5deg) scale(1.1)'
272 | },
273 | '50%': {
274 | backgroundPosition: '100% 50%',
275 | transform: 'rotate(-3deg) scale(0.95)'
276 | },
277 | '75%': {
278 | backgroundPosition: '50% 0%',
279 | transform: 'rotate(3deg) scale(1.05)'
280 | },
281 | '100%': {
282 | backgroundPosition: '0% 50%',
283 | transform: 'rotate(-5deg) scale(0.9)'
284 | }
285 | }
286 | },
287 | backgroundImage: {
288 | 'grid-pattern': 'linear-gradient(to right, rgba(0, 0, 0, 0.05) 1px, transparent 1px), linear-gradient(to bottom, rgba(0, 0, 0, 0.05) 1px, transparent 1px)'
289 | }
290 | }
291 | },
292 | plugins: [
293 | require("tailwindcss-animate"),
294 | require("@tailwindcss/typography")
295 | ],
296 | };
297 |
298 | export default config;
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Speech-Driven-Lessons
2 | If you'd like to view the full codebase, please contact robertadm1n10@gmail.com
3 |
4 | 🌟Realtime Interactive Lesson Page Demo (with Sound!):
5 |
6 | https://github.com/user-attachments/assets/e34477f9-a241-49af-856e-16cb9b35e57c
7 |
8 | ✨Workflow Demo:
9 |
10 | https://github.com/user-attachments/assets/d4a19b38-0fb4-47a9-86c8-d560b265ad40
11 |
12 |
13 | ## Backend Architecture and Workflow
14 |
15 | ## 1. Overview
16 |
17 | This document provides a detailed description of the backend architecture for the AI Tutor platform. The backend is responsible for handling course creation, content generation, AI-powered chat interactions, and real-time communication for in-class experiences.
18 |
19 | The backend is built using a modern Python technology stack:
20 |
21 | - **Web Framework:** Flask
22 | - **Real-time Communication:** Flask-SocketIO
23 | - **AI/ML:** OpenAI (for chat), Google Gemini (for content generation)
24 | - **Data Storage:** AWS S3 for file storage (course materials, conversation history) and a vector store (likely FAISS, stored on S3) for Retrieval-Augmented Generation (RAG).
25 | - **Authentication:** Firebase Authentication
26 |
27 | ## 2. Architecture
28 |
29 | The backend is designed with a modular architecture, with clear separation of concerns. The main components are:
30 |
31 | - **Flask Application (`app.py`):** The main entry point of the backend. It initializes the Flask app, configures CORS, registers API blueprints, and sets up Socket.IO for real-time communication.
32 | - **API Endpoints (`api/` and `routes/`):** These directories define the RESTful API for the application. The endpoints are organized into blueprints based on functionality (e.g., `course_generation`, `ai_tutor`, `uploads`).
33 | - **Chat Logic (`chatbot.py`):** The `ChatBot` class encapsulates the logic for handling chat interactions. It works with the `S3ContextManager` to retrieve relevant context and interacts with the OpenAI API to generate responses.
34 | - **Content Generation (`course_content_generation/`):** This module is responsible for generating course content using Google's Gemini models. It can generate course outlines and detailed slide-by-slide content with accompanying speech.
35 | - **Data and Context Management (`s3_context_manager.py`, `utils/`):** These modules handle all interactions with AWS S3, including storing and retrieving files, managing course context for the RAG system, and handling user data.
36 | - **Real-time Server (`chatServer.py`):** Although not fully integrated into the main Flask app in the provided code, this suggests a separate server or a component for handling more complex real-time chat scenarios, possibly using WebSockets.
37 |
38 | The following diagram illustrates the typical workflow for an AI chat interaction:
39 |
40 | ```mermaid
41 | sequenceDiagram
42 | participant User
43 | participant Frontend
44 | participant "Flask Backend (app.py)"
45 | participant ChatBot
46 | participant S3ContextManager
47 | participant "Vector Store (on S3)"
48 | participant "OpenAI API"
49 | participant "S3 (for history)"
50 |
51 | User->>+Frontend: Sends message
52 | Frontend->>+"Flask Backend (app.py)": POST /api/get-ai-response
53 | "Flask Backend (app.py)"->>+"Flask Backend (app.py)": Authenticate user
54 | "Flask Backend (app.py)"->>+ChatBot: Create instance
55 | ChatBot->>+S3ContextManager: Create instance for course
56 | S3ContextManager->>+"Vector Store (on S3)": Load course index/context
57 | "Vector Store (on S3)"-->>-S3ContextManager: Returns context
58 | ChatBot->>+ChatBot: process_message(input)
59 | ChatBot->>S3ContextManager: get_context(input)
60 | S3ContextManager-->>ChatBot: returns relevant documents
61 | ChatBot->>+"OpenAI API": Generate response with context
62 | "OpenAI API"-->>-ChatBot: Returns AI response
63 | ChatBot-->>-"Flask Backend (app.py)": Returns response
64 | "Flask Backend (app.py)"->>+"S3 (for history)": Save conversation history
65 | "S3 (for history)"-->>-"Flask Backend (app.py)": Confirms save
66 | "Flask Backend (app.py)"-->>-Frontend: Returns AI response
67 | Frontend-->>-User: Displays response
68 | ```
69 |
70 | ## 3. Core Workflows
71 |
72 | ### Course Creation & Content Generation
73 |
74 | 1. **Upload Initial Documents:** The user uploads initial course materials (e.g., a syllabus, notes) through the frontend. The `upload_routes.py` endpoint on the backend receives these files and stores them in a dedicated folder for the course on AWS S3.
75 | 2. **Generate Course Outline:** The frontend triggers the course outline generation. The backend's `course_generation_routes.py` calls the `gemini_course_outline_generator.py` module. This module uses the Google Gemini API to read the initial documents and generate a structured course outline in JSON format.
76 | 3. **Generate Slides and Speech:** Once the outline is approved, the backend uses `gemini_slide_speech_generator.py` to generate the detailed content for each slide, including the text and a script for text-to-speech conversion.
77 | 4. **Create Vector Index:** After the content is generated, the backend processes the text content of the slides. It chunks the text, generates embeddings (using an OpenAI model), and creates a vector index (e.g., using FAISS). This index is then saved to S3. This index is crucial for the RAG system.
78 |
79 | ### Chat Interaction (Retrieval-Augmented Generation - RAG)
80 |
81 | The chat functionality allows the AI to answer questions based on the specific content of a course.
82 |
83 | 1. **Initialize Chatbot:** When a user starts a chat for a course, the frontend calls the `/api/initialize-chatbot` endpoint. The backend creates an instance of `S3ContextManager`, which loads the pre-computed vector index for that course from S3 into memory.
84 | 2. **User Sends Message:** The user types a message in the chat interface. The frontend sends this message to the `/api/get-ai-response` endpoint.
85 | 3. **Retrieve Relevant Context:** The `ChatBot` instance receives the user's message. It uses the `S3ContextManager` to query the vector index. The query finds the most relevant chunks of text from the course materials based on the user's question.
86 | 4. **Generate Response:** The retrieved text chunks (the context) are prepended to the user's message and a system prompt, and this combined text is sent to the OpenAI API (e.g., `gpt-4`). This technique, known as Retrieval-Augmented Generation (RAG), allows the AI to provide answers that are grounded in the course content, reducing hallucinations and improving accuracy.
87 | 5. **Save History and Return Response:** The AI's response is sent back to the user. The backend also saves the user's question and the AI's response in a JSON file (`course_history.json`) on S3 for future reference.
88 |
89 | ### Real-time Features
90 |
91 | The backend uses Flask-SocketIO to provide real-time functionality for the in-class experience.
92 |
93 | - **Rooms:** When a user joins a course, they join a specific "room" for that course's assistant ID. This allows the backend to send targeted messages to all users in a specific class.
94 | - **Slide Navigation:** When the instructor (or the AI) changes the slide, the backend emits an `update_viewing_slide` event to all users in the room, ensuring that everyone's view is synchronized.
95 | - **Live Interaction:** The Socket.IO connection can be used for other live interactions, such as polls, Q&A sessions, and real-time subtitles.
96 |
97 | ## 4. Data Management
98 |
99 | - **AWS S3:** S3 is the primary data store. Each user has a dedicated "folder" in the S3 bucket. Inside each user's folder, there are sub-folders for each course they have created. A typical course folder contains:
100 | - `course_config.json`: Configuration for the course, including the system prompt for the AI.
101 | - `course_history.json`: The chat history for the course.
102 | - The vector index files (e.g., `faiss_index.bin`).
103 | - The original uploaded documents.
104 | - Generated course content (slides, etc.).
105 | - **Redis (`dump.rdb`):** The presence of `dump.rdb` suggests that Redis is used, likely for caching, session management, or as a message broker for Socket.IO, improving performance and scalability.
106 |
107 | ## 5. Authentication
108 |
109 | User authentication is handled through Firebase.
110 |
111 | 1. **Login on Frontend:** The user logs in on the frontend using Firebase Authentication.
112 | 2. **Send Token to Backend:** For requests that require authentication, the frontend includes the user's Firebase ID token in the `Authorization` header.
113 | 3. **Verify Token on Backend:** The `user_utils.py` module on the backend contains a function that verifies the ID token with Firebase. If the token is valid, the backend processes the request. If not, it returns an "Unauthorized" error.
114 |
115 | This architecture provides a robust and scalable foundation for the AI Tutor platform, enabling advanced features like on-demand content generation, context-aware AI chat, and real-time collaborative learning experiences.
116 |
--------------------------------------------------------------------------------
/backend/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, request, jsonify, make_response
2 | from flask_socketio import SocketIO, emit, join_room
3 | from flask_cors import CORS
4 | from routes.upload_routes import upload_bp
5 | from routes.delete_routes import delete_bp
6 | from routes.customize_routes import customize_bp
7 | from routes.voice_routes import voice_bp
8 | from routes.aiTutor_routes import aitutor_bp
9 | from routes.course_info_routes import course_info_bp
10 | from routes.delete_course_routes import delete_course_bp
11 | from api import api as api_blueprint
12 | import utils.user_utils as user_utils
13 | from utils.load_and_process_index import process_course_context_s3
14 | from s3_context_manager import ContextManager as S3ContextManager
15 | import utils.s3_utils as s3_utils
16 | from chatbot import ChatBot
17 | import os
18 | from utils.socket_utils import init_socketio
19 | from functions.slides_navigation import update_viewing_slide, go_to_starting_slide
20 |
21 | app = Flask(__name__)
22 | CORS(app,
23 | resources={r"/*": {
24 | "origins": ["http://localhost:3000"],
25 | "methods": ["GET", "POST", "PUT", "DELETE", "OPTIONS"],
26 | "allow_headers": ["Content-Type", "Authorization", "X-API-Key"],
27 | "supports_credentials": True,
28 | "expose_headers": ["Content-Type", "Authorization"]
29 | }},
30 | supports_credentials=True)
31 | app.secret_key = os.getenv("FLASK_SECRET_KEY", "supersecretkey")
32 | socketio = SocketIO(app, cors_allowed_origins="*")
33 | init_socketio(socketio) # Pass socketio instance to the utility module
34 |
35 | app.secret_key = os.getenv("FLASK_SECRET_KEY", "supersecretkey") # Add for session management
36 | s3_bucket = "jasmintechs-tutorion"
37 | # Retrieve API key from environment variables
38 | API_KEY = os.getenv("OPENAI_API_KEY")
39 | if not API_KEY:
40 | raise ValueError("OPENAI_API_KEY environment variable must be set.")
41 |
42 | # Register routes
43 | app.register_blueprint(upload_bp)
44 | app.register_blueprint(delete_bp)
45 | app.register_blueprint(customize_bp)
46 | app.register_blueprint(voice_bp)
47 | app.register_blueprint(aitutor_bp)
48 | app.register_blueprint(course_info_bp)
49 | app.register_blueprint(delete_course_bp)
50 | app.register_blueprint(api_blueprint, url_prefix='/api')
51 |
52 | @app.route('/api/initialize-chatbot', methods=['POST'])
53 | def initialize_chatbot():
54 | from flask import request, jsonify
55 | import json
56 | import os
57 |
58 | username = user_utils.get_current_user(request)
59 | if not username:
60 | return jsonify({'error': 'Unauthorized'}), 401
61 |
62 | try:
63 | data = request.get_json()
64 | course_title = data.get('course_title', '')
65 |
66 | # Create a new context manager instance
67 | new_context_manager = S3ContextManager(api_key=API_KEY, user=username, course_title=course_title)
68 |
69 | # Try to load saved indices for the course
70 | if new_context_manager.load_saved_indices():
71 | print(f"Successfully loaded saved indices for course: {course_title}")
72 | else:
73 | print(f"No saved indices found for course: {course_title}")
74 | # Optionally process context if indices don't exist
75 | # new_context_manager.load_and_process_context_by_path(course_dir)
76 | process_course_context_s3(new_context_manager.s3_bucket, username, course_title, API_KEY)
77 |
78 | # Load the course configuration to get the system prompt
79 | # config_path = os.path.join(course_dir, "course_config.json")
80 | try:
81 | config_json = s3_utils.get_json_from_s3(new_context_manager.s3_bucket,
82 | s3_utils.get_s3_file_path(username, course_title, "course_config.json"))
83 | system_prompt = config_json.get("system_prompt")
84 | if not system_prompt:
85 | raise ValueError("No system prompt found in course configuration")
86 | # with open(config_path, 'r', encoding='utf-8') as f:
87 | # course_config = json.load(f)
88 | # system_prompt = course_config.get("system_prompt")
89 | # if not system_prompt:
90 | # raise ValueError("No system prompt found in course configuration")
91 | except Exception as e:
92 | print(f"Error loading course configuration: {str(e)}")
93 | return jsonify({'error': 'Failed to load course configuration'}), 500
94 |
95 | # Create chatbot instance for this session
96 | chatbot = ChatBot(context_manager=new_context_manager, api_key=API_KEY)
97 |
98 | # Update the system prompt
99 | chatbot.update_system_prompt(system_prompt)
100 |
101 | return jsonify({
102 | 'message': 'Chatbot initialized successfully',
103 | 'course': course_title,
104 | 'system_prompt': system_prompt
105 | })
106 |
107 | except Exception as e:
108 | print(f"Error initializing chatbot: {str(e)}")
109 | return jsonify({'error': str(e)}), 500
110 |
111 | @app.route('/api/get-ai-response', methods=['POST'])
112 | def get_ai_response():
113 | from flask import request, jsonify
114 | from datetime import datetime
115 | import json
116 | import os
117 | import time
118 |
119 | start = time.time()
120 |
121 | username = user_utils.get_current_user(request)
122 | if not username:
123 | return jsonify({'error': 'Unauthorized'}), 401
124 |
125 | data = request.get_json()
126 | user_input = data.get('input', '')
127 |
128 | # Client must send course_title in request
129 | course_title = data.get('course_title') # Changed from global
130 | if not course_title:
131 | return jsonify({'error': 'course_title required in request'}), 400
132 |
133 | # Get chatbot from session (or recreate from course data)
134 | chatbot = ChatBot(context_manager=S3ContextManager(user=username, course_title=course_title, api_key=API_KEY))
135 |
136 | response = chatbot.process_message(user_input)
137 |
138 | print(f"AI response time: {time.time() - start:.2f}s")
139 |
140 | history_entry = {
141 | "timestamp": datetime.now().isoformat(),
142 | "user_input": user_input,
143 | "ai_response": response
144 | }
145 |
146 | start = time.time()
147 |
148 | try:
149 | # Get the course path from the course title in the same way as initialize_chatbot
150 | history_s3_key = s3_utils.get_s3_file_path(username, course_title, "course_history.json")
151 |
152 | # Load existing history or create new
153 | history = s3_utils.get_json_from_s3(s3_bucket, history_s3_key)
154 | if history is None:
155 | print("No existing conversation history found")
156 | history = {"conversations": []}
157 |
158 | # Append new conversation
159 | history["conversations"].append(history_entry)
160 |
161 | # Save updated history to S3
162 | s3_utils.upload_json_to_s3(history, s3_bucket, history_s3_key)
163 | print(f"Conversation history updated successfully at {history_s3_key}")
164 |
165 | print(f"History saving time: {time.time() - start:.2f}s")
166 | return jsonify(response)
167 |
168 | except Exception as e:
169 | print(f"Error saving conversation history: {str(e)}")
170 | # Still return the response even if saving history fails
171 | return jsonify(response)
172 |
173 |
174 | # Socket.IO event handlers
175 | @socketio.on('connect')
176 | def handle_connect():
177 | print('Client connected')
178 |
179 | @socketio.on('join_course')
180 | def handle_join(data):
181 | # User joins a specific course room
182 | assistant_id = data.get('assistant_id')
183 | if assistant_id:
184 | join_room(assistant_id)
185 | print(f"User joined course room: {assistant_id}")
186 |
187 | @socketio.on('disconnect')
188 | def handle_disconnect():
189 | print('Client disconnected')
190 |
191 | @socketio.on('update_viewing_slide')
192 | def handle_viewing_slide_update(data):
193 | assistant_id = data.get('assistant_id')
194 | position = data.get('position')
195 | print(f"Received slide update for {assistant_id}: {position}")
196 | if assistant_id and position is not None:
197 | update_viewing_slide(assistant_id, position)
198 |
199 | @socketio.on('welcome_block_start')
200 | def handle_welcome_block_start(data):
201 | assistant_id = data.get('assistant_id')
202 | if assistant_id:
203 | print(f"Welcome block start requested for {assistant_id}")
204 | # Get the assistant and course info from the database/redis
205 | user_course_data = s3_utils.load_assistant_user_from_s3(assistant_id)
206 | if user_course_data:
207 | # Call the go_to_starting_slide function to initiate the course
208 | starting_slide_response = go_to_starting_slide(
209 | assistant_id,
210 | user_course_data['course_id'],
211 | user_course_data['username']
212 | )
213 | print(f"Starting slide response: {starting_slide_response}")
214 |
215 | if __name__ == '__main__':
216 | socketio.run(app, debug=True, host='0.0.0.0', port=5000)
217 |
--------------------------------------------------------------------------------
/backend/api/webhook.py:
--------------------------------------------------------------------------------
1 | from flask import Blueprint, request, jsonify
2 | import utils.s3_utils as s3_utils
3 | import functions.get_detailed_content as get_detailed_content
4 | import functions.slides_navigation as slides_navigation
5 |
6 | webhook = Blueprint('webhook', __name__)
7 |
8 |
9 | @webhook.route('/', methods=['POST'])
10 | async def webhook_route():
11 | # Add your logic here
12 |
13 | request_data = request.get_json()
14 | payload = request_data.get('message')
15 |
16 | if payload['type'] == "function-call":
17 | response = await function_call_handler(payload)
18 | return jsonify(response), 201
19 | elif payload['type'] == "status-update":
20 | response = await status_update_handler(payload)
21 | return jsonify(response), 201
22 | elif payload['type'] == "speech-update":
23 | print("speech-update called")
24 | response = speech_update_handler(payload)
25 | return jsonify(response), 201
26 | elif payload['type'] == "conversation-update":
27 | print("conversation-update called")
28 | response = await conversation_update_handler(payload)
29 | return jsonify(response), 201
30 | elif payload['type'] == "assistant-request":
31 | response = await assistant_request_handler(payload)
32 | return jsonify(response), 201
33 | elif payload['type'] == "end-of-call-report":
34 | await end_of_call_report_handler(payload)
35 | return jsonify({}), 201
36 | elif payload['type'] == "transcript":
37 | response = await transcript_handler(payload)
38 | return jsonify(response), 201
39 | elif payload['type'] == "hang":
40 | response = await hang_event_handler(payload)
41 | return jsonify(response), 201
42 | else:
43 | return jsonify({}), 201
44 |
45 |
46 | async def function_call_handler(payload):
47 | """
48 | Handle Business logic here.
49 | You can handle function calls here. The payload will have function name and parameters.
50 | You can trigger the appropriate function based on your requirements and configurations.
51 | You can also have a set of validators along with each function which can be used to first validate the parameters and then call the functions.
52 | Here Assumption is that the functions are handling the fallback cases as well. They should return the appropriate response in case of any error.
53 | """
54 |
55 | # print(payload)
56 |
57 | function_call = payload.get('functionCall')
58 |
59 | if not function_call:
60 | raise ValueError("Invalid Request.")
61 |
62 | name = function_call.get('name')
63 | parameters = function_call.get('parameters')
64 |
65 | assistant_id = payload.get("assistant", {}).get("id")
66 |
67 | print(f"{assistant_id} - {name} - {parameters}")
68 |
69 | user_course_data = s3_utils.load_assistant_user_from_s3(assistant_id)
70 |
71 | print(user_course_data)
72 |
73 | print(f"function call handler called: {name} - {parameters}")
74 |
75 | if name == 'getDetailedContent':
76 | context = get_detailed_content.get_detailed_content(user_course_data['course_id'],
77 | user_course_data['username'],
78 | parameters['userQuery'])
79 | return context
80 | elif name == 'goToStartingSlide':
81 | context = slides_navigation.go_to_starting_slide(
82 | assistant_id,
83 | user_course_data['course_id'],
84 | user_course_data['username']
85 | )
86 | return context
87 | elif name == 'goToNextSlide':
88 | context = slides_navigation.go_to_next_slide(
89 | assistant_id,
90 | user_course_data['course_id'],
91 | user_course_data['username']
92 | )
93 | return context
94 | elif name == 'goToPreviousSlide':
95 | context = slides_navigation.go_to_previous_slide(
96 | assistant_id,
97 | user_course_data['course_id'],
98 | user_course_data['username']
99 | )
100 | return context
101 | elif name == "goToSpecifiedSlide":
102 | context = slides_navigation.go_to_specified_slide(
103 | assistant_id,
104 | user_course_data['course_id'],
105 | user_course_data['username'],
106 | parameters['slideNumber']
107 | )
108 | return context
109 | elif name == "goToViewingSlide":
110 | context = slides_navigation.go_to_viewing_slide(
111 | assistant_id,
112 | user_course_data['course_id'],
113 | user_course_data['username']
114 | )
115 | return context
116 | else:
117 | return None
118 |
119 |
120 | async def status_update_handler(payload):
121 | """
122 | Handle Business logic here.
123 | Sent during a call whenever the status of the call has changed.
124 | Possible statuses are: "queued","ringing","in-progress","forwarding","ended".
125 | You can have certain logic or handlers based on the call status.
126 | You can also store the information in your database. For example whenever the call gets forwarded.
127 | """
128 | return {}
129 |
130 |
131 | def speech_update_handler(payload):
132 | """
133 | Handle Business logic here.
134 | Sent during a call whenever the status of the call has changed.
135 | Possible statuses are: "queued","ringing","in-progress","forwarding","ended".
136 | You can have certain logic or handlers based on the call status.
137 | You can also store the information in your database. For example whenever the call gets forwarded.
138 | """
139 | # print(payload)
140 | # with open('speech_update.txt', 'a') as f:
141 | # f.write(payload["message"]["artifact"]["messages"])
142 | # print(payload["artifact"]["messages"][-1]["message"])
143 |
144 | return {}
145 |
146 |
147 | async def conversation_update_handler(payload):
148 | """
149 | Handle Business logic here.
150 | Sent during a call whenever the conversation is updated.
151 | You can store the conversation in your database or have some other business logic.
152 | """
153 | print("conversation_update_handler called")
154 | print(payload['conversation'][-1]['content'])
155 | return {}
156 |
157 |
158 | async def end_of_call_report_handler(payload):
159 | """
160 | Handle Business logic here.
161 | You can store the information like summary, typescript, recordingUrl or even the full messages list in the database.
162 | """
163 | return
164 |
165 |
166 | async def transcript_handler(payload):
167 | """
168 | Handle Business logic here.
169 | Sent during a call whenever the transcript is available for certain chunk in the stream.
170 | You can store the transcript in your database or have some other business logic.
171 | """
172 | return
173 |
174 |
175 | async def hang_event_handler(payload):
176 | """
177 | Handle Business logic here.
178 | Sent once the call is terminated by user.
179 | You can update the database or have some followup actions or workflow triggered.
180 | """
181 | return
182 |
183 |
184 | async def assistant_request_handler(payload):
185 | """
186 | Handle Business logic here.
187 | You can fetch your database to see if there is an existing assistant associated with this call. If yes, return the assistant.
188 | You can also fetch some params from your database to create the assistant and return it.
189 | You can have various predefined static assistant here and return them based on the call details.
190 | """
191 |
192 | if payload and 'call' in payload:
193 | assistant = {
194 | 'name': 'Paula',
195 | 'model': {
196 | 'provider': 'openai',
197 | 'model': 'gpt-3.5-turbo',
198 | 'temperature': 0.7,
199 | 'systemPrompt': "You're Paula, an AI assistant who can help user draft beautiful emails to their clients based on the user requirements. Then Call sendEmail function to actually send the email.",
200 | 'functions': [
201 | {
202 | 'name': 'sendEmail',
203 | 'description': 'Send email to the given email address and with the given content.',
204 | 'parameters': {
205 | 'type': 'object',
206 | 'properties': {
207 | 'email': {
208 | 'type': 'string',
209 | 'description': 'Email to which we want to send the content.'
210 | },
211 | 'content': {
212 | 'type': 'string',
213 | 'description': 'Actual Content of the email to be sent.'
214 | }
215 | },
216 | 'required': ['email']
217 | }
218 | }
219 | ]
220 | },
221 | 'voice': {
222 | 'provider': '11labs',
223 | 'voiceId': 'paula'
224 | },
225 | 'firstMessage': "Hi, I'm Paula, your personal email assistant."
226 | }
227 | return {'assistant': assistant}
228 |
229 | raise ValueError('Invalid call details provided.')
230 |
--------------------------------------------------------------------------------
/app/courses/[id]/hooks/useSpeech.ts:
--------------------------------------------------------------------------------
1 | // app/courses/[id]/hooks/useSpeech.ts
2 | 'use client'
3 |
4 | import { useRef, useState } from "react"
5 | import { SpeechRecognition, SpeechRecognitionEvent } from "../types"
6 |
7 | export const useSpeech = (handleConversationCycle: (text: string) => Promise) => {
8 | const [isListening, setIsListening] = useState(false)
9 | const [isMuted, setIsMuted] = useState(false)
10 | const [isVideoOff, setIsVideoOff] = useState(false)
11 |
12 | const shouldContinuousListenRef = useRef(false)
13 | const isSpeakingRef = useRef(false)
14 | const recognitionRef = useRef(null)
15 | const mediaRecorderRef = useRef(null)
16 | const audioChunksRef = useRef([])
17 | const streamRef = useRef(null)
18 |
19 | const startListening = async () => {
20 | console.log("Starting listening attempt...")
21 | console.log("isSpeaking:", isSpeakingRef.current)
22 | console.log("isListening:", isListening)
23 |
24 | if (isSpeakingRef.current || isListening) {
25 | console.log("Blocked by guards, returning")
26 | return
27 | }
28 |
29 | // Clean up any existing instances
30 | if (recognitionRef.current) {
31 | recognitionRef.current.stop()
32 | recognitionRef.current = null
33 | }
34 | if (streamRef.current) {
35 | streamRef.current.getTracks().forEach(track => track.stop())
36 | streamRef.current = null
37 | }
38 |
39 | try {
40 | const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
41 | streamRef.current = stream
42 |
43 | const mediaRecorder = new MediaRecorder(stream)
44 | mediaRecorderRef.current = mediaRecorder
45 | audioChunksRef.current = []
46 |
47 | mediaRecorder.ondataavailable = (event) => {
48 | audioChunksRef.current.push(event.data)
49 | }
50 |
51 | mediaRecorder.onstart = () => {
52 | setIsListening(true)
53 | console.log(isListening)
54 | console.log("Recorder has started!!!!!!")
55 | }
56 |
57 | mediaRecorder.onstop = async () => {
58 | console.log(isListening)
59 | console.log("Recorder has ended!!!!!!")
60 | const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/mp3' })
61 | const formData = new FormData()
62 | formData.append('audio', audioBlob)
63 |
64 | try {
65 | const recognizeResponse = await fetch('http://localhost:5000/api/recognize-openai', {
66 | method: 'POST',
67 | credentials: "include",
68 | body: formData,
69 | })
70 | const recognizeData = await recognizeResponse.json()
71 |
72 | if (recognizeData.text) {
73 | const aiResponse = await handleConversationCycle(recognizeData.text)
74 | //if (aiResponse) await speakResponse(aiResponse)
75 | }
76 | } finally {
77 | stream.getTracks().forEach(track => track.stop())
78 | recognitionRef.current = null
79 | }
80 | }
81 |
82 | const SpeechRecognitionConstructor = (window.SpeechRecognition || window.webkitSpeechRecognition) as any
83 | const recognition = new SpeechRecognitionConstructor()
84 | recognitionRef.current = recognition
85 | recognition.lang = "en-US"
86 | recognition.continuous = false
87 | recognition.interimResults = false
88 |
89 | recognition.onresult = () => {
90 | if (mediaRecorderRef.current?.state === 'recording') {
91 | mediaRecorderRef.current.stop()
92 | }
93 | }
94 |
95 | recognition.onstart = () => {
96 | console.log("Recognition has started!!!!!!")
97 | setIsListening(true)
98 | // You can set a flag here if needed
99 | // e.g., setRecognitionStarted(true)
100 | }
101 |
102 | recognition.onend = () => {
103 | console.log("Recognition has stopped!!!!!!!")
104 | setIsListening(false)
105 | recognitionRef.current = null
106 | mediaRecorderRef.current?.stop()
107 | }
108 |
109 | recognition.onerror = (event: any) => {
110 | console.error("Speech recognition error:", event)
111 | setIsListening(false)
112 | recognitionRef.current = null
113 | }
114 |
115 | setIsListening(true)
116 | console.log("Starting recognition and recording")
117 | recognition.start()
118 | mediaRecorder.start()
119 |
120 | console.log("Recognition and recording started")
121 |
122 |
123 |
124 | shouldContinuousListenRef.current = true
125 | } catch (error) {
126 | console.error("Microphone access error:", error)
127 | }
128 | }
129 |
130 | const stopListening = () => {
131 | shouldContinuousListenRef.current = false
132 | recognitionRef.current?.stop()
133 | mediaRecorderRef.current?.stop()
134 | streamRef.current?.getTracks().forEach(track => track.stop())
135 | setIsListening(false)
136 | }
137 |
138 | const speakResponse = async (text: string, isStreaming: boolean = false) => {
139 | if (!text || isSpeakingRef.current) return
140 |
141 | isSpeakingRef.current = true
142 | shouldContinuousListenRef.current = false
143 |
144 | try {
145 | if (isStreaming) {
146 | try {
147 | const sentences = text.match(/[^.!?]+[.!?]+/g) || [text]
148 | for (const sentence of sentences) {
149 | if (!sentence.trim()) continue
150 |
151 | const audioResponse = await fetch('http://localhost:5000/api/generate-audio', {
152 | method: 'POST',
153 | credentials: "include",
154 | headers: { 'Content-Type': 'application/json' },
155 | body: JSON.stringify({ text: sentence, voice: "en-US-AvaMultilingualNeural" }),
156 | })
157 |
158 | const audioBlob = await audioResponse.blob()
159 | const audioUrl = URL.createObjectURL(audioBlob)
160 | const audio = new Audio(audioUrl)
161 |
162 | await new Promise((resolve) => {
163 | audio.onended = () => {
164 | URL.revokeObjectURL(audioUrl)
165 | resolve(null)
166 | }
167 | audio.play().catch(resolve) // Handle play() rejection
168 | })
169 | }
170 | } finally {
171 | console.log("Cleaning up after streaming")
172 | isSpeakingRef.current = false
173 | shouldContinuousListenRef.current = true
174 | // Force cleanup of any existing instances
175 | if (recognitionRef.current) {
176 | recognitionRef.current.stop()
177 | recognitionRef.current = null
178 | }
179 | if (streamRef.current) {
180 | streamRef.current.getTracks().forEach(track => track.stop())
181 | streamRef.current = null
182 | }
183 | setIsListening(false)
184 | // Increase timeout and add logging
185 | setTimeout(() => {
186 | console.log("Attempting to restart listening")
187 | startListening()
188 | }, 1000)
189 | }
190 | } else {
191 | const audioResponse = await fetch('http://localhost:5000/api/generate-audio', {
192 | method: 'POST',
193 | credentials: "include",
194 | headers: { 'Content-Type': 'application/json' },
195 | body: JSON.stringify({ text, voice: "en-US-AvaMultilingualNeural" }),
196 | })
197 |
198 | const audioBlob = await audioResponse.blob()
199 | const audioUrl = URL.createObjectURL(audioBlob)
200 | const audio = new Audio(audioUrl)
201 |
202 | audio.onended = () => {
203 | isSpeakingRef.current = false
204 | URL.revokeObjectURL(audioUrl)
205 | shouldContinuousListenRef.current = true
206 | setTimeout(() => startListening(), 500)
207 | }
208 |
209 | await audio.play()
210 | }
211 | } catch (error) {
212 | console.error("Audio generation error:", error)
213 | isSpeakingRef.current = false
214 | }
215 | }
216 |
217 | return {
218 | isListening,
219 | isMuted,
220 | isVideoOff,
221 | startListening,
222 | stopListening,
223 | setIsMuted,
224 | setIsVideoOff,
225 | speakResponse
226 | }
227 | }
--------------------------------------------------------------------------------
/app/courses/[id]/page.tsx:
--------------------------------------------------------------------------------
1 | "use client"
2 |
3 | import { useConversation } from "./hooks/useConversation"
4 | import { useSpeech } from "./hooks/useSpeech"
5 | import { SidebarProvider, Sidebar, SidebarContent } from "@/components/ui/sidebar"
6 | import { Button } from "@/components/ui/button"
7 | import { Input } from "@/components/ui/input"
8 | import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip"
9 | import { Mic, MicOff, Video, VideoOff, Send, Menu, Subtitles, Bot, ChevronLeft } from "lucide-react"
10 | import { useState } from "react"
11 | import { ChatHistory } from "./components/ChatHistory"
12 | import { SlideViewer } from "./components/SlideViewer"
13 | import { Card, CardHeader, CardTitle, CardContent } from "@/components/ui/card"
14 | import Image from "next/image"
15 | import CameraComponent from "@/components/camera-component"
16 | import Link from "next/link"
17 |
18 | export default function CoursePage({ params }: { params: { id: string } }) {
19 | const { messages, isTyping, handleConversationCycle, currentSlideIndex, setCurrentSlideIndex } = useConversation(
20 | params.id,
21 | )
22 |
23 | const { isListening, isMuted, isVideoOff, startListening, stopListening, setIsMuted, setIsVideoOff } =
24 | useSpeech(handleConversationCycle)
25 |
26 | const [inputMessage, setInputMessage] = useState("")
27 | const [isSidebarHidden, setIsSidebarHidden] = useState(false)
28 |
29 | const currentAIMessage = messages.filter((m) => m.sender === "ai").slice(-1)[0]
30 |
31 | return (
32 |
33 |
34 | {/* Back to Courses Button */}
35 |
39 |
40 |
Back to Courses
41 |
42 |
43 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 | {/* Video Grid */}
54 |
55 |
56 |
57 |
58 | AI Tutor
59 |
60 |
67 |
68 |
69 |
70 |
71 | You
72 |
73 |
74 |
75 |
76 |
77 | {/* AI Response Card */}
78 |
79 |
80 | AI Tutor Response
81 |
82 |
83 | {currentAIMessage?.slides ? (
84 |
89 | ) : (
90 |
91 |
92 | {currentAIMessage?.text || "Let's start our lesson! Feel free to ask any questions."}
93 |
94 |
95 | )}
96 |
97 |
98 |
99 |
100 | {/* Control Bar */}
101 |
102 |
103 |
104 |
105 |
106 | (isListening ? stopListening() : startListening())}
111 | >
112 | {isListening ? : }
113 |
114 |
115 |
116 | {isListening ? "Stop Listening" : "Start Listening"}
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 | setIsVideoOff(!isVideoOff)}
129 | >
130 | {isVideoOff ? : }
131 |
132 |
133 |
134 | {isVideoOff ? "Turn Video On" : "Turn Video Off"}
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
147 |
148 |
149 |
150 |
151 | Toggle Subtitles
152 |
153 |
154 |
155 |
156 |
157 |
158 |
setInputMessage(e.target.value)}
162 | onKeyDown={(e) => {
163 | if (e.key === "Enter" && !e.shiftKey) {
164 | e.preventDefault()
165 | handleConversationCycle(inputMessage)
166 | setInputMessage("")
167 | }
168 | }}
169 | className="flex-1 bg-white border-[#1B4D3E]/20 focus:border-[#1B4D3E] focus:ring-[#1B4D3E] placeholder-[#1B4D3E]/40"
170 | />
171 |
172 |
173 |
174 |
175 | {
178 | handleConversationCycle(inputMessage)
179 | setInputMessage("")
180 | }}
181 | disabled={isTyping || !inputMessage.trim()}
182 | className="rounded-full h-10 w-10 bg-[#1B4D3E] hover:bg-[#2C5F2D] text-white"
183 | >
184 |
185 |
186 |
187 |
188 | Send message
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 | setIsSidebarHidden(!isSidebarHidden)}
202 | >
203 |
204 |
205 |
206 |
207 | {isSidebarHidden ? "Show Chat History" : "Hide Chat History"}
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 | )
216 | }
217 |
218 |
--------------------------------------------------------------------------------
/app/vapi-test/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect, useRef } from "react";
4 | import Vapi from "@vapi-ai/web";
5 | import { Button } from "@/components/ui/button";
6 | import { Card, CardContent, CardFooter, CardHeader, CardTitle } from "@/components/ui/card";
7 |
8 | const BASE_URL = "http://localhost:5000/api/assistant"; // Flask API base URL
9 |
10 | export default function VoiceChat() {
11 | const [messages, setMessages] = useState>([]);
12 | const [isCallActive, setIsCallActive] = useState(false);
13 | const [transcript, setTranscript] = useState(null);
14 | const [summary, setSummary] = useState(null);
15 | const [recordingUrl, setRecordingUrl] = useState(null);
16 | const [assistantId, setAssistantId] = useState(null); // Store assistant ID
17 | const vapiRef = useRef(null);
18 |
19 | useEffect(() => {
20 | if (typeof window !== "undefined") {
21 | const apiKey = "9d761ee5-d393-4aab-9d43-3265b8d66a66";
22 | vapiRef.current = new Vapi(apiKey);
23 | setupVapiListeners();
24 | }
25 |
26 | // --- Cleanup on unmount or page navigation (best effort).
27 | return () => {
28 | if (vapiRef.current) {
29 | vapiRef.current.stop();
30 | }
31 | // If call still active when unmounting, delete assistant
32 | if (isCallActive && assistantId) {
33 | deleteAssistant(assistantId);
34 | }
35 | };
36 | // eslint-disable-next-line react-hooks/exhaustive-deps
37 | }, []);
38 |
39 | /**
40 | * Optionally: Listen for browser/tab closing or refresh
41 | * to trigger best-effort deletion.
42 | */
43 | useEffect(() => {
44 | const handleBeforeUnload = (event: BeforeUnloadEvent) => {
45 | // If a call is active, do a best-effort delete
46 | if (isCallActive && assistantId) {
47 | // You can use the Beacon API for a non-blocking request
48 | // or a synchronous XHR here. E.g.:
49 | navigator.sendBeacon &&
50 | navigator.sendBeacon(`${BASE_URL}/delete`, JSON.stringify({ assistant_id: assistantId }));
51 | // Or a synchronous fetch/XHR:
52 | // const xhr = new XMLHttpRequest();
53 | // xhr.open("POST", `${BASE_URL}/delete`, false /* synchronous */);
54 | // xhr.setRequestHeader("Content-Type", "application/json");
55 | // xhr.send(JSON.stringify({ assistant_id: assistantId }));
56 | }
57 | };
58 |
59 | window.addEventListener("beforeunload", handleBeforeUnload);
60 | return () => {
61 | window.removeEventListener("beforeunload", handleBeforeUnload);
62 | };
63 | }, [assistantId, isCallActive]);
64 |
65 | const setupVapiListeners = () => {
66 | if (!vapiRef.current) return;
67 |
68 | vapiRef.current.on("call-start", () => {
69 | console.log("Call has started");
70 | setIsCallActive(true);
71 | setMessages([]); // Clear previous messages
72 | setTranscript(null);
73 | setSummary(null);
74 | setRecordingUrl(null);
75 | });
76 |
77 | vapiRef.current.on("call-end", async () => {
78 | console.log("Call has ended");
79 | setIsCallActive(false);
80 | await fetchEndOfCallReport();
81 |
82 | // On normal call-end, also delete the assistant from the backend
83 | if (assistantId) {
84 | await deleteAssistant(assistantId);
85 | }
86 | });
87 |
88 | vapiRef.current.on("message", (message) => {
89 | console.log("Message from assistant:", message);
90 | setMessages((prev) => [...prev, { role: "assistant", content: message.text }]);
91 | });
92 |
93 | vapiRef.current.on("error", (e) => {
94 | console.error("Vapi error:", e);
95 | });
96 |
97 | vapiRef.current.on("speech-start", () => {
98 | console.log("Assistant speech has started.");
99 | });
100 |
101 | vapiRef.current.on("speech-end", () => {
102 | console.log("Assistant speech has ended.");
103 | });
104 | };
105 |
106 | // Fetch End-of-Call Report when the call ends
107 | const fetchEndOfCallReport = async () => {
108 | try {
109 | const response = await fetch("/api/vapi-end-report"); // Example path to your backend
110 | if (!response.ok) throw new Error("Failed to fetch end-of-call report");
111 |
112 | const data = await response.json();
113 | console.log("End-of-Call Report:", data);
114 |
115 | setTranscript(data.message.transcript || "No transcript available");
116 | setSummary(data.message.summary || "No summary available");
117 | setRecordingUrl(data.message.recordingUrl || null);
118 |
119 | if (data.message.messages) {
120 | setMessages((prev) => [
121 | ...prev,
122 | ...data.message.messages.map((m: any) => ({ role: m.role, content: m.message })),
123 | ]);
124 | }
125 | } catch (error) {
126 | console.error("Error fetching End-of-Call Report:", error);
127 | }
128 | };
129 |
130 | // Create the assistant via Flask
131 | const createAssistant = async (): Promise => {
132 | try {
133 | const response = await fetch(`${BASE_URL}/create`, {
134 | credentials: "include",
135 | method: "POST",
136 | credentials: "include",
137 | headers: { "Content-Type": "application/json" },
138 | });
139 | if (!response.ok) throw new Error("Failed to create assistant");
140 |
141 | const data = await response.json();
142 | console.log("Assistant created:", data);
143 | setAssistantId(data.assistant_id);
144 | return data.assistant_id;
145 | } catch (error) {
146 | console.error("Error creating assistant:", error);
147 | return null;
148 | }
149 | };
150 |
151 | // Delete the assistant
152 | const deleteAssistant = async (id: string) => {
153 | try {
154 | // If we never actually created an assistant, skip
155 | if (!id) return;
156 |
157 | const response = await fetch(`${BASE_URL}/delete`, {
158 | credentials: "include",
159 | method: "POST",
160 | headers: { "Content-Type": "application/json" },
161 | body: JSON.stringify({ assistant_id: id }),
162 | });
163 |
164 | if (!response.ok) throw new Error("Failed to delete assistant");
165 |
166 | console.log("Assistant deleted successfully");
167 | setAssistantId(null);
168 | } catch (error) {
169 | console.error("Error deleting assistant:", error);
170 | }
171 | };
172 |
173 | // Start the call by first creating an assistant
174 | const startCall = async () => {
175 | if (!vapiRef.current) return;
176 |
177 | const newAssistantId = await createAssistant();
178 | if (!newAssistantId) return;
179 |
180 | try {
181 | // With Vapi Web SDK v2, you can pass just the assistantId
182 | await vapiRef.current.start(newAssistantId);
183 | console.log("Call started");
184 | } catch (error) {
185 | console.error("Error starting call:", error);
186 | }
187 | };
188 |
189 | // Stop the call - triggers "call-end" event in the VAPI SDK
190 | const stopCall = () => {
191 | if (vapiRef.current) {
192 | vapiRef.current.stop();
193 | console.log("Call stopped");
194 | deleteAssistant(assistantId || "").then(r => {
195 | console.log("Assistant deleted");
196 | })
197 | }
198 | };
199 |
200 | return (
201 |
202 |
203 |
204 | Voice Chat with VAPI AI
205 |
206 |
207 | {messages.map((m, index) => (
208 |
209 |
214 | {m.content}
215 |
216 |
217 | ))}
218 |
219 | {transcript && (
220 |
221 |
Full Transcript:
222 |
{transcript}
223 |
224 | )}
225 |
226 | {summary && (
227 |
228 |
Summary:
229 |
{summary}
230 |
231 | )}
232 |
233 | {recordingUrl && (
234 |
240 | )}
241 |
242 |
243 |
244 | Start Call
245 |
246 |
247 | Stop Call
248 |
249 |
250 |
251 |
252 | );
253 | }
254 |
--------------------------------------------------------------------------------
/app/vapi-test/[title]/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 |
3 | import { useState, useEffect, useRef } from "react";
4 | import { useParams } from "next/navigation";
5 | import Vapi from "@vapi-ai/web";
6 | import { Button } from "@/components/ui/button";
7 | import { Card, CardContent, CardFooter, CardHeader, CardTitle } from "@/components/ui/card";
8 |
9 | const BASE_URL = "http://localhost:5000/api/assistant"; // Flask API base URL
10 |
11 | export default function VoiceChat() {
12 | const params = useParams();
13 | const dynamicTitle = decodeURIComponent(params.title as string || "");
14 |
15 | const [messages, setMessages] = useState>([]);
16 | const [isCallActive, setIsCallActive] = useState(false);
17 | const [transcript, setTranscript] = useState(null);
18 | const [summary, setSummary] = useState(null);
19 | const [recordingUrl, setRecordingUrl] = useState(null);
20 | const [assistantId, setAssistantId] = useState(null);
21 | const vapiRef = useRef(null);
22 |
23 | console.log("dynamicTitle", dynamicTitle);
24 |
25 | useEffect(() => {
26 | if (typeof window !== "undefined") {
27 | const apiKey = process.env.NEXT_PUBLIC_VAPI_API_KEY || "";
28 | vapiRef.current = new Vapi(apiKey);
29 | setupVapiListeners();
30 | }
31 |
32 | // --- Cleanup on unmount or page navigation (best effort).
33 | return () => {
34 | if (vapiRef.current) {
35 | vapiRef.current.stop();
36 | }
37 | // If call still active when unmounting, delete assistant
38 | if (isCallActive && assistantId) {
39 | deleteAssistant(assistantId);
40 | }
41 | };
42 | // eslint-disable-next-line react-hooks/exhaustive-deps
43 | }, []);
44 |
45 | /**
46 | * Optionally: Listen for browser/tab closing or refresh
47 | * to trigger best-effort deletion.
48 | */
49 | useEffect(() => {
50 | const handleBeforeUnload = (event: BeforeUnloadEvent) => {
51 | // If a call is active, do a best-effort delete
52 | if (isCallActive && assistantId) {
53 | // You can use the Beacon API for a non-blocking request
54 | // or a synchronous XHR here. E.g.:
55 | navigator.sendBeacon &&
56 | navigator.sendBeacon(`${BASE_URL}/delete`, JSON.stringify({ assistant_id: assistantId }));
57 | // Or a synchronous fetch/XHR:
58 | // const xhr = new XMLHttpRequest();
59 | // xhr.open("POST", `${BASE_URL}/delete`, false /* synchronous */);
60 | // xhr.setRequestHeader("Content-Type", "application/json");
61 | // xhr.send(JSON.stringify({ assistant_id: assistantId }));
62 | }
63 | };
64 |
65 | window.addEventListener("beforeunload", handleBeforeUnload);
66 | return () => {
67 | window.removeEventListener("beforeunload", handleBeforeUnload);
68 | };
69 | }, [assistantId, isCallActive]);
70 |
71 | const setupVapiListeners = () => {
72 | if (!vapiRef.current) return;
73 |
74 | vapiRef.current.on("call-start", () => {
75 | console.log("Call has started");
76 | setIsCallActive(true);
77 | setMessages([]); // Clear previous messages
78 | setTranscript(null);
79 | setSummary(null);
80 | setRecordingUrl(null);
81 | });
82 |
83 | vapiRef.current.on("call-end", async () => {
84 | console.log("Call has ended");
85 | setIsCallActive(false);
86 | await fetchEndOfCallReport();
87 |
88 | // On normal call-end, also delete the assistant from the backend
89 | if (assistantId) {
90 | await deleteAssistant(assistantId);
91 | }
92 | });
93 |
94 | vapiRef.current.on("message", (message) => {
95 | console.log("Message from assistant:", message);
96 | setMessages((prev) => [...prev, { role: "assistant", content: message.text }]);
97 | });
98 |
99 | vapiRef.current.on("error", (e) => {
100 | console.error("Vapi error:", e);
101 | });
102 |
103 | vapiRef.current.on("speech-start", () => {
104 | console.log("Assistant speech has started.");
105 | });
106 |
107 | vapiRef.current.on("speech-end", () => {
108 | console.log("Assistant speech has ended.");
109 | });
110 | };
111 |
112 | // Fetch End-of-Call Report when the call ends
113 | const fetchEndOfCallReport = async () => {
114 | try {
115 | const response = await fetch("/api/vapi-end-report"); // Example path to your backend
116 | if (!response.ok) throw new Error("Failed to fetch end-of-call report");
117 |
118 | const data = await response.json();
119 | console.log("End-of-Call Report:", data);
120 |
121 | setTranscript(data.message.transcript || "No transcript available");
122 | setSummary(data.message.summary || "No summary available");
123 | setRecordingUrl(data.message.recordingUrl || null);
124 |
125 | if (data.message.messages) {
126 | setMessages((prev) => [
127 | ...prev,
128 | ...data.message.messages.map((m: any) => ({ role: m.role, content: m.message })),
129 | ]);
130 | }
131 | } catch (error) {
132 | console.error("Error fetching End-of-Call Report:", error);
133 | }
134 | };
135 |
136 | // Create the assistant via Flask
137 | const createAssistant = async (): Promise => {
138 | try {
139 | const response = await fetch(`${BASE_URL}/create`, {
140 | method: "POST",
141 | credentials: "include",
142 | headers: { "Content-Type": "application/json" },
143 | body: JSON.stringify({
144 | course_title: dynamicTitle
145 | })
146 | });
147 | if (!response.ok) throw new Error("Failed to create assistant");
148 |
149 | const data = await response.json();
150 | console.log("Assistant created:", data);
151 | setAssistantId(data.assistant_id);
152 | return data.assistant_id;
153 | } catch (error) {
154 | console.error("Error creating assistant:", error);
155 | return null;
156 | }
157 | };
158 |
159 | // Delete the assistant
160 | const deleteAssistant = async (id: string) => {
161 | try {
162 | // If we never actually created an assistant, skip
163 | if (!id) return;
164 |
165 | const response = await fetch(`${BASE_URL}/delete`, {
166 | credentials: "include",
167 | method: "POST",
168 | headers: { "Content-Type": "application/json" },
169 | body: JSON.stringify({ assistant_id: id }),
170 | });
171 |
172 | if (!response.ok) throw new Error("Failed to delete assistant");
173 |
174 | console.log("Assistant deleted successfully");
175 | setAssistantId(null);
176 | } catch (error) {
177 | console.error("Error deleting assistant:", error);
178 | }
179 | };
180 |
181 | // Start the call by first creating an assistant
182 | const startCall = async () => {
183 | if (!vapiRef.current) return;
184 |
185 | const newAssistantId = await createAssistant();
186 | if (!newAssistantId) return;
187 |
188 | try {
189 | // With Vapi Web SDK v2, you can pass just the assistantId
190 | await vapiRef.current.start(newAssistantId);
191 | console.log("Call started");
192 | } catch (error) {
193 | console.error("Error starting call:", error);
194 | }
195 | };
196 |
197 | // Stop the call - triggers "call-end" event in the VAPI SDK
198 | const stopCall = () => {
199 | if (vapiRef.current) {
200 | vapiRef.current.stop();
201 | console.log("Call stopped");
202 | deleteAssistant(assistantId || "").then(r => {
203 | console.log("Assistant deleted");
204 | })
205 | }
206 | };
207 |
208 | return (
209 |
210 |
211 |
212 | Voice Chat with VAPI AI
213 |
214 |
215 | {messages.map((m, index) => (
216 |
217 |
221 | {m.content}
222 |
223 |
224 | ))}
225 |
226 | {transcript && (
227 |
228 |
Full Transcript:
229 |
{transcript}
230 |
231 | )}
232 |
233 | {summary && (
234 |
235 |
Summary:
236 |
{summary}
237 |
238 | )}
239 |
240 | {recordingUrl && (
241 |
247 | )}
248 |
249 |
250 |
251 | Start Call
252 |
253 |
254 | Stop Call
255 |
256 |
257 |
258 |
259 | );
260 | }
--------------------------------------------------------------------------------