├── .gitignore
├── .DS_Store
├── public
├── favicon.ico
├── vercel.svg
├── window.svg
├── file.svg
├── globe.svg
└── next.svg
├── jsconfig.json
├── next.config.mjs
├── pages
├── api
│ └── hello.js
├── _document.js
├── _app.js
├── index.js
├── landing.js
├── pip-log.js
└── interview.js
├── store.js
├── redux
├── historySlice.js
├── transcriptionSlice.js
└── aiResponseSlice.js
├── reducers.js
├── Dockerfile
├── package.json
├── styles
├── globals.css
└── Home.module.css
├── utils
└── config.js
├── theme.js
├── README.md
└── components
└── SettingsDialog.js
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 |
--------------------------------------------------------------------------------
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hariiprasad/interviewcopilot/HEAD/.DS_Store
--------------------------------------------------------------------------------
/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hariiprasad/interviewcopilot/HEAD/public/favicon.ico
--------------------------------------------------------------------------------
/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "paths": {
4 | "@/*": ["./*"]
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {
3 | reactStrictMode: true,
4 | };
5 |
6 | export default nextConfig;
7 |
--------------------------------------------------------------------------------
/pages/api/hello.js:
--------------------------------------------------------------------------------
1 | // Next.js API route support: https://nextjs.org/docs/api-routes/introduction
2 |
3 | export default function handler(req, res) {
4 | res.status(200).json({ name: "John Doe" });
5 | }
6 |
--------------------------------------------------------------------------------
/store.js:
--------------------------------------------------------------------------------
1 | import { configureStore } from '@reduxjs/toolkit';
2 | import rootReducer from './reducers';
3 |
4 | const store = configureStore({
5 | reducer: rootReducer,
6 | });
7 |
8 | export default store;
9 |
--------------------------------------------------------------------------------
/pages/_document.js:
--------------------------------------------------------------------------------
1 | import { Html, Head, Main, NextScript } from "next/document";
2 |
3 | export default function Document() {
4 | return (
5 |
6 |
8 |
9 |
10 |
11 |
12 | );
13 | }
14 |
--------------------------------------------------------------------------------
/public/window.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/public/file.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/redux/historySlice.js:
--------------------------------------------------------------------------------
1 | import { createSlice } from '@reduxjs/toolkit';
2 |
3 | const historySlice = createSlice({
4 | name: 'history',
5 | initialState: [],
6 | reducers: {
7 | addToHistory: (state, action) => {
8 | state.push(action.payload);
9 | },
10 | },
11 | });
12 |
13 | export const { addToHistory } = historySlice.actions;
14 | export default historySlice.reducer;
15 |
--------------------------------------------------------------------------------
/reducers.js:
--------------------------------------------------------------------------------
1 | import { combineReducers } from 'redux';
2 | import transcriptionReducer from './redux/transcriptionSlice';
3 | import aiResponseReducer from './redux/aiResponseSlice';
4 | import historyReducer from './redux/historySlice';
5 |
6 | const rootReducer = combineReducers({
7 | transcription: transcriptionReducer,
8 | aiResponse: aiResponseReducer,
9 | history: historyReducer,
10 | });
11 |
12 | export default rootReducer;
13 |
--------------------------------------------------------------------------------
/redux/transcriptionSlice.js:
--------------------------------------------------------------------------------
1 | import { createSlice } from '@reduxjs/toolkit';
2 |
3 | const transcriptionSlice = createSlice({
4 | name: 'transcription',
5 | initialState: '',
6 | reducers: {
7 | setTranscription: (state, action) => action.payload,
8 | clearTranscription: () => ''
9 | },
10 | });
11 |
12 | export const { setTranscription, clearTranscription } = transcriptionSlice.actions;
13 | export default transcriptionSlice.reducer;
14 |
--------------------------------------------------------------------------------
/redux/aiResponseSlice.js:
--------------------------------------------------------------------------------
1 | import { createSlice } from '@reduxjs/toolkit';
2 |
3 | export const aiResponseSlice = createSlice({
4 | name: 'aiResponse',
5 | initialState: '',
6 | reducers: {
7 | setAIResponse: (state, action) => action.payload,
8 | appendAIResponse: (state, action) => state + action.payload,
9 | },
10 | });
11 |
12 | export const { setAIResponse, appendAIResponse } = aiResponseSlice.actions;
13 | export default aiResponseSlice.reducer;
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Node.js runtime as the base image
2 | FROM node:18-alpine
3 |
4 | # Set the working directory inside the container
5 | WORKDIR /app
6 |
7 | # Copy package.json and package-lock.json first to leverage Docker layer caching
8 | COPY package*.json ./
9 |
10 | # Install dependencies
11 | RUN npm install
12 |
13 | # Copy the rest of the application code into the container
14 | COPY . .
15 |
16 | # Ensure all files have the correct permissions
17 | RUN chmod -R 755 /app
18 |
19 | # Build the Next.js application
20 | RUN npm run build
21 |
22 | # Expose the port your app will run on
23 | EXPOSE 3000
24 |
25 | # Define the command to start your app
26 | CMD ["npm", "start"]
27 |
--------------------------------------------------------------------------------
/pages/_app.js:
--------------------------------------------------------------------------------
1 | import { Provider } from 'react-redux';
2 | import "@/styles/globals.css"; // Ensure this is imported before theme for overrides to work correctly
3 | import store from '../store';
4 | import { ThemeProvider, createTheme } from '@mui/material/styles';
5 | import CssBaseline from '@mui/material/CssBaseline';
6 | import theme from '../theme'; // Import the custom theme
7 |
8 | export default function App({ Component, pageProps }) {
9 | return (
10 |
11 |
12 | {/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
13 |
14 |
15 |
16 |
17 | );
18 | }
19 |
--------------------------------------------------------------------------------
/pages/index.js:
--------------------------------------------------------------------------------
1 | // final/pages/index.js
2 | import { useEffect } from 'react';
3 | import { useRouter } from 'next/router';
4 | import CircularProgress from '@mui/material/CircularProgress';
5 | import Box from '@mui/material/Box';
6 |
7 | // This page will simply redirect to the landing page.
8 | // It can also be used for initial auth checks or loading states in the future.
9 | export default function Index() {
10 | const router = useRouter();
11 |
12 | useEffect(() => {
13 | router.replace('/landing'); // Redirect to the landing page
14 | }, [router]);
15 |
16 | // Optional: Show a loading indicator while redirecting
17 | return (
18 |
27 |
28 |
29 | );
30 | }
31 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "latest",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "@emotion/react": "^11.14.0",
13 | "@emotion/styled": "^11.14.0",
14 | "@google/generative-ai": "^0.23.0",
15 | "@mui/icons-material": "^6.4.1",
16 | "@mui/material": "^6.4.1",
17 | "@reduxjs/toolkit": "^2.5.1",
18 | "highlight.js": "^11.11.1",
19 | "html2canvas": "^1.4.1",
20 | "lodash.throttle": "^4.1.1",
21 | "microsoft-cognitiveservices-speech-sdk": "^1.42.0",
22 | "next": "15.1.6",
23 | "openai": "^4.80.1",
24 | "react": "^19.0.0",
25 | "react-dom": "^19.0.0",
26 | "react-markdown": "^9.0.3",
27 | "react-redux": "^9.2.0",
28 | "react-scroll-to-bottom": "^4.2.0",
29 | "react-syntax-highlighter": "^15.6.1"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/public/globe.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/styles/globals.css:
--------------------------------------------------------------------------------
1 | /* final/styles/globals.css */
2 | html,
3 | body {
4 | padding: 0;
5 | margin: 0;
6 | font-family: "Roboto", "Helvetica", "Arial", sans-serif; /* Default font from theme */
7 | -webkit-font-smoothing: antialiased;
8 | -moz-osx-font-smoothing: grayscale;
9 | height: 100%; /* Ensure html and body take full height */
10 | }
11 |
12 | #__next { /* Next.js wrapper div */
13 | height: 100%;
14 | display: flex;
15 | flex-direction: column;
16 | }
17 |
18 | a {
19 | color: inherit;
20 | text-decoration: none;
21 | }
22 |
23 | * {
24 | box-sizing: border-box;
25 | }
26 |
27 | /*
28 | Any styles previously in Home.module.css that were specific to the interview page
29 | should be reviewed. If they are general layout or component styles, they should ideally
30 | be handled by the Material UI theme or inline sx props in interview.js.
31 | If they are very specific and complex, they could remain in a module.css file imported
32 | into interview.js, but the goal is to rely more on the MUI theme for consistency.
33 | */
34 |
35 | /* Example: If you had specific code block styling in Home.module.css */
36 | /*
37 | .code-block {
38 | background-color: #f5f5f5;
39 | padding: 1em;
40 | border-radius: 4px;
41 | overflow-x: auto;
42 | font-family: 'Courier New', Courier, monospace;
43 | }
44 | .inline-code {
45 | background-color: #e0e0e0;
46 | padding: 0.2em 0.4em;
47 | border-radius: 3px;
48 | font-family: 'Courier New', Courier, monospace;
49 | }
50 | */
51 | /* The above would now be handled by ReactMarkdown component's styling or hljs theme */
52 |
53 |
--------------------------------------------------------------------------------
/utils/config.js:
--------------------------------------------------------------------------------
1 | export const builtInModelGroups = [
2 | {
3 | name: "OpenAI Models",
4 | models: [
5 | { value: "gpt-3.5-turbo", label: "GPT-3.5 Turbo" },
6 | { value: "gpt-4", label: "GPT-4" },
7 | { value: "gpt-4-turbo-preview", label: "GPT-4 Turbo Preview" },
8 | { value: "gpt-4o", label: "GPT-4o (Omni)" },
9 | ]
10 | },
11 | {
12 | name: "Gemini Models",
13 | models: [
14 | { value: "gemini-1.5-flash", label: "Gemini 1.5 Flash" },
15 | { value: "gemini-1.5-pro", label: "Gemini 1.5 Pro" },
16 | { value: "gemini-2.0-flash", label: "Gemini 2.0 Flash " },
17 | { value: "gemini-2.0-pro", label: "Gemini 2.0 Pro " },
18 | { value: "gemini-2.5-flash-preview-05-20", label: "Gemini 2.5 Flash Preview (05-20)" },
19 | { value: "gemini-2.5-pro-preview-05-06", label: "Gemini 2.5 Pro Preview (05-06)" },
20 | ]
21 | }
22 | ];
23 |
24 |
25 | const defaultConfig = {
26 | openaiKey: '',
27 | geminiKey: '',
28 | aiModel: 'gpt-3.5-turbo', // Default to a common one
29 | silenceTimerDuration: 1.2,
30 | responseLength: 'medium',
31 | gptSystemPrompt: `You are an AI interview assistant. Your role is to:
32 | - Highlight key points in responses
33 | - Suggest related technical concepts to explore
34 | - Maintain professional tone`,
35 | azureToken: '',
36 | azureRegion: 'eastus',
37 | azureLanguage: 'en-US',
38 | customModels: [], // Array for user-added models { value: 'model-id', label: 'Display Name', type: 'openai' | 'gemini' }
39 | systemAutoMode: true,
40 | isManualMode: false,
41 | };
42 |
43 | export function getConfig() {
44 | if (typeof window !== 'undefined') {
45 | const storedConfig = localStorage.getItem('interviewCopilotConfig');
46 | let parsed = storedConfig ? JSON.parse(storedConfig) : {};
47 |
48 | // Migrate old config format for aiModel if gptModel exists
49 | if (parsed.gptModel && !parsed.aiModel) {
50 | parsed.aiModel = parsed.gptModel;
51 | delete parsed.gptModel;
52 | }
53 | // Ensure customModels is an array
54 | if (!Array.isArray(parsed.customModels)) {
55 | parsed.customModels = [];
56 | }
57 |
58 | return { ...defaultConfig, ...parsed };
59 | }
60 | return defaultConfig;
61 | }
62 |
63 | export function setConfig(config) {
64 | if (typeof window !== 'undefined') {
65 | // Ensure customModels is an array before saving
66 | const configToSave = {
67 | ...config,
68 | customModels: Array.isArray(config.customModels) ? config.customModels : []
69 | };
70 | localStorage.setItem('interviewCopilotConfig', JSON.stringify(configToSave));
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/styles/Home.module.css:
--------------------------------------------------------------------------------
1 | .page {
2 | --gray-rgb: 0, 0, 0;
3 | --gray-alpha-200: rgba(var(--gray-rgb), 0.08);
4 | --gray-alpha-100: rgba(var(--gray-rgb), 0.05);
5 |
6 | --button-primary-hover: #383838;
7 | --button-secondary-hover: #f2f2f2;
8 |
9 | display: grid;
10 | grid-template-rows: 20px 1fr 20px;
11 | align-items: center;
12 | justify-items: center;
13 | min-height: 100svh;
14 | padding: 80px;
15 | gap: 64px;
16 | font-family: var(--font-geist-sans);
17 | }
18 |
19 | @media (prefers-color-scheme: dark) {
20 | .page {
21 | --gray-rgb: 255, 255, 255;
22 | --gray-alpha-200: rgba(var(--gray-rgb), 0.145);
23 | --gray-alpha-100: rgba(var(--gray-rgb), 0.06);
24 |
25 | --button-primary-hover: #ccc;
26 | --button-secondary-hover: #1a1a1a;
27 | }
28 | }
29 |
30 | .main {
31 | display: flex;
32 | flex-direction: column;
33 | gap: 32px;
34 | grid-row-start: 2;
35 | }
36 |
37 | .main ol {
38 | font-family: var(--font-geist-mono);
39 | padding-left: 0;
40 | margin: 0;
41 | font-size: 14px;
42 | line-height: 24px;
43 | letter-spacing: -0.01em;
44 | list-style-position: inside;
45 | }
46 |
47 | .main li:not(:last-of-type) {
48 | margin-bottom: 8px;
49 | }
50 |
51 | .main code {
52 | font-family: inherit;
53 | background: var(--gray-alpha-100);
54 | padding: 2px 4px;
55 | border-radius: 4px;
56 | font-weight: 600;
57 | }
58 |
59 | .ctas {
60 | display: flex;
61 | gap: 16px;
62 | }
63 |
64 | .ctas a {
65 | appearance: none;
66 | border-radius: 128px;
67 | height: 48px;
68 | padding: 0 20px;
69 | border: none;
70 | border: 1px solid transparent;
71 | transition:
72 | background 0.2s,
73 | color 0.2s,
74 | border-color 0.2s;
75 | cursor: pointer;
76 | display: flex;
77 | align-items: center;
78 | justify-content: center;
79 | font-size: 16px;
80 | line-height: 20px;
81 | font-weight: 500;
82 | }
83 |
84 | a.primary {
85 | background: var(--foreground);
86 | color: var(--background);
87 | gap: 8px;
88 | }
89 |
90 | a.secondary {
91 | border-color: var(--gray-alpha-200);
92 | min-width: 180px;
93 | }
94 |
95 | .footer {
96 | grid-row-start: 3;
97 | display: flex;
98 | gap: 24px;
99 | }
100 |
101 | .footer a {
102 | display: flex;
103 | align-items: center;
104 | gap: 8px;
105 | }
106 |
107 | .footer img {
108 | flex-shrink: 0;
109 | }
110 |
111 | /* Enable hover only on non-touch devices */
112 | @media (hover: hover) and (pointer: fine) {
113 | a.primary:hover {
114 | background: var(--button-primary-hover);
115 | border-color: transparent;
116 | }
117 |
118 | a.secondary:hover {
119 | background: var(--button-secondary-hover);
120 | border-color: transparent;
121 | }
122 |
123 | .footer a:hover {
124 | text-decoration: underline;
125 | text-underline-offset: 4px;
126 | }
127 | }
128 |
129 | @media (max-width: 600px) {
130 | .page {
131 | padding: 32px;
132 | padding-bottom: 80px;
133 | }
134 |
135 | .main {
136 | align-items: center;
137 | }
138 |
139 | .main ol {
140 | text-align: center;
141 | }
142 |
143 | .ctas {
144 | flex-direction: column;
145 | }
146 |
147 | .ctas a {
148 | font-size: 14px;
149 | height: 40px;
150 | padding: 0 16px;
151 | }
152 |
153 | a.secondary {
154 | min-width: auto;
155 | }
156 |
157 | .footer {
158 | flex-wrap: wrap;
159 | align-items: center;
160 | justify-content: center;
161 | }
162 | }
163 |
164 | @media (prefers-color-scheme: dark) {
165 | .logo {
166 | filter: invert();
167 | }
168 | }
169 |
--------------------------------------------------------------------------------
/theme.js:
--------------------------------------------------------------------------------
1 | import { createTheme } from '@mui/material/styles';
2 | import { red, grey } from '@mui/material/colors';
3 |
4 | // Create a theme instance.
5 | const theme = createTheme({
6 | palette: {
7 | primary: {
8 | main: '#1976d2', // A professional blue
9 | light: '#42a5f5',
10 | dark: '#1565c0',
11 | contrastText: '#ffffff',
12 | },
13 | secondary: {
14 | main: '#dc004e', // A vibrant pink/red for accents
15 | light: '#ff4081',
16 | dark: '#9a0036',
17 | contrastText: '#ffffff',
18 | },
19 | error: {
20 | main: red.A400,
21 | },
22 | background: {
23 | default: grey[100], // A very light grey for the app background
24 | paper: '#ffffff', // White for paper elements
25 | },
26 | text: {
27 | primary: grey[900], // Dark grey for primary text
28 | secondary: grey[700], // Medium grey for secondary text
29 | }
30 | },
31 | typography: {
32 | fontFamily: '"Roboto", "Helvetica", "Arial", sans-serif',
33 | h4: {
34 | fontWeight: 700,
35 | color: '#333333', // Darker color for main headings
36 | marginBottom: '0.75em',
37 | },
38 | h5: {
39 | fontWeight: 600,
40 | color: '#444444',
41 | marginBottom: '0.5em',
42 | },
43 | h6: {
44 | fontWeight: 600,
45 | color: '#555555', // Slightly lighter for subheadings
46 | marginBottom: '0.5em',
47 | },
48 | button: {
49 | textTransform: 'none', // Keep button text case as is
50 | fontWeight: 500,
51 | letterSpacing: '0.5px',
52 | },
53 | body1: {
54 | lineHeight: 1.6,
55 | },
56 | caption: {
57 | color: grey[600],
58 | }
59 | },
60 | shape: {
61 | borderRadius: 8, // Default border radius for components
62 | },
63 | components: {
64 | MuiPaper: {
65 | styleOverrides: {
66 | root: {
67 | // borderRadius: 12, // Slightly more rounded corners for paper
68 | boxShadow: '0px 5px 15px rgba(0,0,0,0.08)', // Softer, more modern shadow
69 | }
70 | }
71 | },
72 | MuiButton: {
73 | styleOverrides: {
74 | root: {
75 | // borderRadius: 8, // Consistent rounded corners for buttons
76 | padding: '10px 20px', // More generous padding
77 | boxShadow: 'none', // Remove default button shadow for a flatter look, can add on hover
78 | '&:hover': {
79 | boxShadow: '0px 2px 8px rgba(0,0,0,0.1)', // Subtle shadow on hover
80 | }
81 | },
82 | containedPrimary: {
83 | '&:hover': {
84 | backgroundColor: '#1565c0', // Darken primary on hover
85 | }
86 | },
87 | containedSecondary: {
88 | '&:hover': {
89 | backgroundColor: '#9a0036', // Darken secondary on hover
90 | }
91 | },
92 | }
93 | },
94 | MuiTextField: {
95 | styleOverrides: {
96 | root: {
97 | '& .MuiOutlinedInput-root': {
98 | // borderRadius: 8, // Rounded corners for text fields
99 | '& fieldset': {
100 | // borderColor: 'rgba(0, 0, 0, 0.23)',
101 | },
102 | '&:hover fieldset': {
103 | borderColor: '#1976d2', // Primary color border on hover
104 | },
105 | // '&.Mui-focused fieldset': {
106 | // borderColor: '#1976d2', // Primary color border when focused
107 | // },
108 | },
109 | '& .MuiInputLabel-root.Mui-focused': {
110 | color: '#1976d2', // Primary color for label when focused
111 | }
112 | }
113 | }
114 | },
115 | MuiIconButton: {
116 | styleOverrides: {
117 | root: {
118 | '&:hover': {
119 | backgroundColor: 'rgba(0, 0, 0, 0.06)' // Standard hover for icon buttons
120 | }
121 | }
122 | }
123 | },
124 | MuiChip: {
125 | styleOverrides: {
126 | root: {
127 | // borderRadius: 16, // More rounded chips
128 | fontWeight: 500,
129 | }
130 | }
131 | },
132 | MuiAppBar: {
133 | styleOverrides: {
134 | root: {
135 | boxShadow: '0px 2px 4px -1px rgba(0,0,0,0.06), 0px 4px 5px 0px rgba(0,0,0,0.04), 0px 1px 10px 0px rgba(0,0,0,0.03)', // Softer app bar shadow
136 | }
137 | }
138 | },
139 | MuiList: {
140 | styleOverrides: {
141 | root: {
142 | '& .MuiListItem-root': {
143 | borderRadius: 8, // Rounded list items if they are interactive
144 | }
145 | }
146 | }
147 | },
148 | MuiCard: {
149 | styleOverrides: {
150 | root: {
151 | // borderRadius: 12,
152 | // boxShadow: '0px 5px 15px rgba(0,0,0,0.08)',
153 | }
154 | }
155 | }
156 | }
157 | });
158 |
159 | export default theme;
--------------------------------------------------------------------------------
/pages/landing.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import Head from 'next/head';
3 | import { Box, Button, Container, Grid, Paper, Typography, Avatar } from '@mui/material';
4 | import { styled } from '@mui/material/styles';
5 | import ArrowForwardIcon from '@mui/icons-material/ArrowForward';
6 | import MicIcon from '@mui/icons-material/Mic';
7 | import RecordVoiceOverIcon from '@mui/icons-material/RecordVoiceOver';
8 | import CodeIcon from '@mui/icons-material/Code';
9 | import QuestionAnswerIcon from '@mui/icons-material/QuestionAnswer';
10 | import SettingsIcon from '@mui/icons-material/Settings';
11 | import SpeedIcon from '@mui/icons-material/Speed';
12 | import Link from 'next/link'; // Import Next.js Link
13 |
14 | const HeroSection = styled(Box)(({ theme }) => ({
15 | backgroundColor: theme.palette.primary.main, // Use primary color from theme
16 | color: theme.palette.primary.contrastText,
17 | padding: theme.spacing(12, 2),
18 | textAlign: 'center',
19 | display: 'flex',
20 | flexDirection: 'column',
21 | alignItems: 'center',
22 | justifyContent: 'center',
23 | minHeight: '70vh', // Make hero section taller
24 | clipPath: 'ellipse(150% 100% at 50% 0%)', // Soft curve at the bottom
25 | }));
26 |
27 | const FeaturePaper = styled(Paper)(({ theme }) => ({
28 | padding: theme.spacing(3),
29 | textAlign: 'center',
30 | height: '100%',
31 | display: 'flex',
32 | flexDirection: 'column',
33 | alignItems: 'center',
34 | justifyContent: 'flex-start', // Align items to the top
35 | boxShadow: theme.shadows[3], // Use a subtle shadow from theme
36 | transition: 'transform 0.3s ease-in-out, box-shadow 0.3s ease-in-out',
37 | '&:hover': {
38 | transform: 'translateY(-5px)',
39 | boxShadow: theme.shadows[6],
40 | }
41 | }));
42 |
43 | const FeatureIcon = styled(Avatar)(({ theme }) => ({
44 | backgroundColor: theme.palette.secondary.main,
45 | color: theme.palette.secondary.contrastText,
46 | width: theme.spacing(7),
47 | height: theme.spacing(7),
48 | marginBottom: theme.spacing(2),
49 | }));
50 |
51 | const Section = styled(Box)(({ theme }) => ({
52 | padding: theme.spacing(8, 2),
53 | }));
54 |
55 | const Footer = styled(Box)(({ theme }) => ({
56 | backgroundColor: theme.palette.background.paper,
57 | color: theme.palette.text.secondary,
58 | padding: theme.spacing(4, 2),
59 | textAlign: 'center',
60 | borderTop: `1px solid ${theme.palette.divider}`,
61 | }));
62 |
63 | const features = [
64 | {
65 | icon: ,
66 | title: 'Real-time Transcription',
67 | description: 'Accurate voice-to-text for both interviewer and candidate, powered by Azure Cognitive Services.',
68 | },
69 | {
70 | icon: ,
71 | title: 'AI-Powered Insights',
72 | description: 'Intelligent responses and suggestions with conversational context awareness using OpenAI/Gemini models.',
73 | },
74 | {
75 | icon: ,
76 | title: 'Code Formatting',
77 | description: 'Clear syntax highlighting for technical discussions, making code easy to read and understand.',
78 | },
79 | {
80 | icon: ,
81 | title: 'Silence Detection',
82 | description: 'Automatically submit questions or responses after a configurable period of silence for a smoother flow.',
83 | },
84 | {
85 | icon: ,
86 | title: 'Customizable Settings',
87 | description: 'Tailor AI models, API keys, and behavior to your specific needs and preferences.',
88 | },
89 | ];
90 |
91 | export default function LandingPage() {
92 | return (
93 | <>
94 |
95 | Interview Copilot - Your AI-Powered Interview Assistant
96 |
97 | {/* Remember to add a favicon */}
98 |
99 |
100 |
101 |
102 |
103 | Interview Copilot
104 |
105 |
106 | Elevate your technical interviews with AI-powered real-time transcription, intelligent suggestions, and seamless assistance. Focus on the conversation, let us handle the notes.
107 |
108 |
109 | }
114 | sx={{
115 | padding: '12px 30px',
116 | fontSize: '1.1rem',
117 | boxShadow: '0px 4px 15px rgba(0,0,0,0.2)',
118 | '&:hover': {
119 | boxShadow: '0px 6px 20px rgba(0,0,0,0.25)',
120 | transform: 'translateY(-2px)'
121 | }
122 | }}
123 | >
124 | Start Assisting
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 | Why Choose Interview Copilot?
134 |
135 |
136 | {features.map((feature) => (
137 |
138 | {/* Use elevation for subtle shadow */}
139 | {feature.icon}
140 |
141 | {feature.title}
142 |
143 |
144 | {feature.description}
145 |
146 |
147 |
148 | ))}
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 | About the Tool
157 |
158 |
159 | Interview Copilot is designed to be an indispensable assistant for technical interviews. Whether you're conducting interviews and need to capture key details, or you're a candidate wanting to review your performance, our tool provides the support you need.
160 |
161 |
162 | Our mission is to make interviews more productive and insightful by leveraging the power of AI, allowing participants to focus on what truly matters: the skills, experience, and potential being discussed.
163 |
164 |
165 |
166 |
167 |
175 | >
176 | );
177 | }
178 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Interview Copilot 🚀
2 |
3 | ## [aicopilot.chat](https://aicopilot.chat/)
4 |
5 | An AI-powered interview assistant that provides real-time transcription and intelligent responses during technical interviews, now supporting both OpenAI and the latest Gemini models.
6 |
7 | [](https://opensource.org/licenses/MIT)
8 |
9 | ## Features ✨
10 |
11 | - 🎙️ **Real-time Transcription**: High-accuracy voice-to-text for both the interviewer and candidate using Azure Cognitive Services.
12 | - 🤖 **AI-Powered Insights**: Get intelligent suggestions with conversational context awareness, powered by leading models from OpenAI and Google.
13 | - 🖼️ **Picture-in-Picture (PiP) Mode**: Keep an eye on the AI log in a separate, floating window so you can focus on the interview.
14 | - 💻 **Code Formatting**: Clear syntax highlighting for technical discussions makes code easy to read and understand.
15 | - ✨ **Enhanced UI**: A refreshed and more intuitive user interface for a seamless experience.
16 | - 🔄 **Latest AI Models**: Support for the newest models, including **Gemini 2.5 Pro** and **Gemini 2.5 Flash**.
17 | - 📚 **Question History**: Combine multiple questions from the history to ask the AI for a comprehensive analysis.
18 | - ⏱️ **Silence Detection**: Automatically submits recognized speech after a configurable period of silence for a smoother workflow.
19 | - ⚙️ **Highly Configurable**: Tailor AI models, API keys, response length, and system prompts to your exact needs.
20 |
21 | ## Technologies Used 🛠️
22 |
23 | - **Frontend**: React, Redux, Material-UI
24 | - **AI Services**: OpenAI GPT, Google Gemini, Azure Cognitive Services (Speech)
25 | - **Build Tools**: npm
26 | - **Other Libraries**: React Markdown, Highlight.js, Microsoft Cognitive Services Speech SDK
27 |
28 | ## Getting Started 🚀
29 |
30 | ### Prerequisites
31 |
32 | - Node.js (v18+)
33 | - npm (v9+)
34 | - **OpenAI API key**: Get your key from [OpenAI](https://platform.openai.com/docs/overview).
35 | - **Gemini API key**: Get your key from [Google AI Studio](https://aistudio.google.com/app/apikey).
36 | - **Azure Speech Service key**: Get a free trial key from [Microsoft Azure](https://azure.microsoft.com/en-us/pricing/purchase-options/azure-account).
37 |
38 | ### Installation
39 |
40 | 1. **Clone the repository**
41 |
42 | ```bash
43 | git clone https://github.com/hariiprasad/interviewcopilot.git
44 | cd interviewcopilot
45 | ```
46 |
47 | 2. **Install dependencies**
48 |
49 | ```bash
50 | npm install
51 | ```
52 |
53 | 3. **Run the development server**
54 |
55 | ```bash
56 | npm run dev
57 | ```
58 |
59 | 4. **Access the application**
60 | Open your browser to `http://localhost:3000`
61 |
62 | ## Configuration ⚙️
63 |
64 | 1. Open the **Settings** dialog (⚙️ icon in the header).
65 | 2. Enter your API credentials:
66 | - OpenAI API Key (for OpenAI models)
67 | - Gemini API Key (for Gemini models)
68 | - Azure Speech Service Key
69 | - Azure Region
70 | 3. Configure your preferences:
71 | - AI Model (Choose from OpenAI or Gemini models)
72 | - AI System Prompt
73 | - Auto-Submit & Manual modes
74 | - AI Response Length (concise, medium, lengthy)
75 | - Silence Timer Duration
76 |
77 | ## Usage 🖥️
78 |
79 | ### Main Interface Components
80 |
81 | 1. **System Audio Panel (Left)**
82 |
83 | - Start/Stop system audio capture for the interviewer.
84 | - View and edit the transcribed questions.
85 | - Manage and combine questions from history.
86 |
87 | 2. **AI Assistant Log (Center)**
88 |
89 | - View real-time AI responses.
90 | - Benefit from code formatting and syntax highlighting.
91 | - Access all previous response history.
92 | - Toggle auto-scroll and open the PiP window.
93 |
94 | 3. **Your Mic Panel (Right)**
95 |
96 | - Start/Stop your microphone for candidate audio.
97 | - Toggle manual input mode.
98 | - Manually submit your responses to the AI.
99 |
100 | ## Troubleshooting 🛠️
101 |
102 | **Common Issues:**
103 |
104 | 1. **Audio Permissions**: Ensure your browser has microphone access. If permissions were denied, refresh the page and allow access when prompted.
105 | 2. **API Errors**:
106 | - Double-check that your API keys in settings are correct.
107 | - Verify your internet connection.
108 | - Ensure the correct API key is provided for the selected AI model (e.g., Gemini key for Gemini models).
109 | 3. **Transcription Issues**: For best results, speak clearly with minimal background noise and verify your Azure Speech Service subscription is active.
110 |
111 | ## Contributing 🤝
112 |
113 | We welcome contributions\! Please follow these steps:
114 |
115 | 1. Fork the repository.
116 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`).
117 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`).
118 | 4. Push to the branch (`git push origin feature/AmazingFeature`).
119 | 5. Open a Pull Request.
120 |
121 | ## License 📄
122 |
123 | This project is licensed under the MIT License.
124 |
125 | ## Acknowledgments 🙏
126 |
127 | - OpenAI for their GPT models.
128 | - Google for the Gemini models.
129 | - Microsoft Azure for Cognitive Services.
130 | - The Material-UI team and the broader React community for their fantastic tools.Of course\! Based on the new features like the enhanced UI, Picture-in-Picture (PiP) mode, and the addition of the latest Gemini models, here is an updated version of your `README.md` file.
131 |
132 | -----
133 |
134 | # Interview Copilot 🚀
135 |
136 | ## [aicopilot.chat](https://aicopilot.chat/)
137 |
138 | An AI-powered interview assistant that provides real-time transcription and intelligent responses during technical interviews, now supporting both OpenAI and the latest Gemini models.
139 |
140 | [](https://opensource.org/licenses/MIT)
141 |
142 | ## Features ✨
143 |
144 | - 🎙️ **Real-time Transcription**: High-accuracy voice-to-text for both the interviewer and candidate using Azure Cognitive Services.
145 | - 🤖 **AI-Powered Insights**: Get intelligent suggestions with conversational context awareness, powered by leading models from OpenAI and Google.
146 | - 🖼️ **Picture-in-Picture (PiP) Mode**: Keep an eye on the AI log in a separate, floating window so you can focus on the interview.
147 | - 💻 **Code Formatting**: Clear syntax highlighting for technical discussions makes code easy to read and understand.
148 | - ✨ **Enhanced UI**: A refreshed and more intuitive user interface for a seamless experience.
149 | - 🔄 **Latest AI Models**: Support for the newest models, including **Gemini 2.5 Pro** and **Gemini 2.5 Flash**.
150 | - 📚 **Question History**: Combine multiple questions from the history to ask the AI for a comprehensive analysis.
151 | - ⏱️ **Silence Detection**: Automatically submits recognized speech after a configurable period of silence for a smoother workflow.
152 | - ⚙️ **Highly Configurable**: Tailor AI models, API keys, response length, and system prompts to your exact needs.
153 |
154 | ## Technologies Used 🛠️
155 |
156 | - **Frontend**: React, Redux, Material-UI
157 | - **AI Services**: OpenAI GPT, Google Gemini, Azure Cognitive Services (Speech)
158 | - **Build Tools**: npm
159 | - **Other Libraries**: React Markdown, Highlight.js, Microsoft Cognitive Services Speech SDK
160 |
161 | ## Getting Started 🚀
162 |
163 | ### Prerequisites
164 |
165 | - Node.js (v18+)
166 | - npm (v9+)
167 | - **OpenAI API key**: Get your key from [OpenAI](https://platform.openai.com/docs/overview).
168 | - **Gemini API key**: Get your key from [Google AI Studio](https://aistudio.google.com/app/apikey).
169 | - **Azure Speech Service key**: Get a free trial key from [Microsoft Azure](https://azure.microsoft.com/en-us/pricing/purchase-options/azure-account).
170 |
171 | ### Installation
172 |
173 | 1. **Clone the repository**
174 |
175 | ```bash
176 | git clone https://github.com/hariiprasad/interviewcopilot.git
177 | cd interviewcopilot
178 | ```
179 |
180 | 2. **Install dependencies**
181 |
182 | ```bash
183 | npm install
184 | ```
185 |
186 | 3. **Run the development server**
187 |
188 | ```bash
189 | npm run dev
190 | ```
191 |
192 | 4. **Access the application**
193 | Open your browser to `http://localhost:3000`
194 |
195 | ## Configuration ⚙️
196 |
197 | 1. Open the **Settings** dialog (⚙️ icon in the header).
198 | 2. Enter your API credentials:
199 | - OpenAI API Key (for OpenAI models)
200 | - Gemini API Key (for Gemini models)
201 | - Azure Speech Service Key
202 | - Azure Region
203 | 3. Configure your preferences:
204 | - AI Model (Choose from OpenAI or Gemini models)
205 | - AI System Prompt
206 | - Auto-Submit & Manual modes
207 | - AI Response Length (concise, medium, lengthy)
208 | - Silence Timer Duration
209 |
210 | ## Usage 🖥️
211 |
212 | ### Main Interface Components
213 |
214 | 1. **System Audio Panel (Left)**
215 |
216 | - Start/Stop system audio capture for the interviewer.
217 | - View and edit the transcribed questions.
218 | - Manage and combine questions from history.
219 |
220 | 2. **AI Assistant Log (Center)**
221 |
222 | - View real-time AI responses.
223 | - Benefit from code formatting and syntax highlighting.
224 | - Access all previous response history.
225 | - Toggle auto-scroll and open the PiP window.
226 |
227 | 3. **Your Mic Panel (Right)**
228 |
229 | - Start/Stop your microphone for candidate audio.
230 | - Toggle manual input mode.
231 | - Manually submit your responses to the AI.
232 |
233 | ## Troubleshooting 🛠️
234 |
235 | **Common Issues:**
236 |
237 | 1. **Audio Permissions**: Ensure your browser has microphone access. If permissions were denied, refresh the page and allow access when prompted.
238 | 2. **API Errors**:
239 | - Double-check that your API keys in settings are correct.
240 | - Verify your internet connection.
241 | - Ensure the correct API key is provided for the selected AI model (e.g., Gemini key for Gemini models).
242 | 3. **Transcription Issues**: For best results, speak clearly with minimal background noise and verify your Azure Speech Service subscription is active.
243 |
244 | ## Contributing 🤝
245 |
246 | We welcome contributions\! Please follow these steps:
247 |
248 | 1. Fork the repository.
249 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`).
250 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`).
251 | 4. Push to the branch (`git push origin feature/AmazingFeature`).
252 | 5. Open a Pull Request.
253 |
254 | ## License 📄
255 |
256 | This project is licensed under the MIT License.
257 |
258 | ## Acknowledgments 🙏
259 |
260 | - OpenAI for their GPT models.
261 | - Google for the Gemini models.
262 | - Microsoft Azure for Cognitive Services.
263 | - The Material-UI team and the broader React community for their fantastic tools.
264 |
--------------------------------------------------------------------------------
/pages/pip-log.js:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useCallback, useRef } from 'react';
2 | import Head from 'next/head';
3 | import { Box, Typography, List, ListItem, Paper, Avatar, CircularProgress } from '@mui/material';
4 | import { ThemeProvider, createTheme, styled } from '@mui/material/styles';
5 | import CssBaseline from '@mui/material/CssBaseline';
6 | import ReactMarkdown from 'react-markdown';
7 | import hljs from 'highlight.js';
8 | import 'highlight.js/styles/atom-one-dark.css';
9 | import SmartToyIcon from '@mui/icons-material/SmartToy';
10 | import dynamic from 'next/dynamic';
11 |
12 |
13 | const ScrollToBottom = dynamic(() => import('react-scroll-to-bottom'), {
14 | ssr: false,
15 | });
16 |
17 |
18 | const pipTheme = createTheme({
19 |
20 | palette: { mode: 'light', background: { paper: '#ffffff', default: '#f0f0f0', }, text: { primary: '#111111', secondary: '#555555', }, primary: { main: '#1976d2', contrastText: '#ffffff', }, secondary: { light: '#ff80ab', main: '#f50057', contrastText: '#fff' }, grey: { 400: '#bdbdbd', 500: '#9e9e9e' } }, typography: { fontFamily: '"Roboto", "Helvetica", "Arial", sans-serif', body1: { fontSize: '13px', lineHeight: 1.5 }, body2: { fontSize: '12px', lineHeight: 1.45 }, caption: { fontSize: '11px' }, subtitle2: { fontSize: '13px', fontWeight: 'bold' }, }, components: { MuiPaper: { styleOverrides: { root: { padding: '6px 10px', marginBottom: '6px', borderRadius: '4px', boxShadow: '0 1px 2px rgba(0,0,0,0.1)', overflowWrap: 'break-word', } } }, MuiListItem: { styleOverrides: { root: { paddingTop: '3px', paddingBottom: '3px', alignItems: 'flex-start', } } }, MuiAvatar: { styleOverrides: { root: { width: 26, height: 26, marginRight: '8px', } } } }
21 | });
22 |
23 |
24 | const RootBox = styled(Box)(({ theme }) => ({
25 | height: '100%',
26 | position: 'relative',
27 | backgroundColor: theme.palette.background.default,
28 | overflow: 'hidden',
29 | }));
30 |
31 |
32 | export default function PipLogPage() {
33 | const [historicalResponses, setHistoricalResponses] = useState([]);
34 | const [currentStreamingText, setCurrentStreamingText] = useState('');
35 | const [isCurrentlyProcessing, setIsCurrentlyProcessing] = useState(false);
36 | const [sortOrder, setSortOrder] = useState('newestAtTop');
37 | const theme = pipTheme;
38 | const rootBoxRef = useRef(null);
39 |
40 | const formatAndDisplayResponse = useCallback((response) => {
41 | if (!response) return null;
42 | return (
43 |
49 |
50 |
51 | ) : (
52 | {children}
53 | );
54 | },
55 | p: ({node, ...props}) => ,
56 | strong: ({node, ...props}) => ,
57 | em: ({node, ...props}) => ,
58 | ul: ({node, ...props}) => ,
59 | ol: ({node, ...props}) => ,
60 | li: ({node, ...props}) => ,
61 | }}
62 | >
63 | {response}
64 |
65 | );
66 | }, []);
67 |
68 |
69 | useEffect(() => {
70 | const handleMessage = (event) => {
71 | const { type, payload } = event.data;
72 |
73 | if (type === 'AI_LOG_DATA') {
74 | const data = payload;
75 | setHistoricalResponses(Array.isArray(data.historicalResponses) ? data.historicalResponses : []);
76 | setCurrentStreamingText(data.currentStreamingText || '');
77 | setIsCurrentlyProcessing(data.isProcessing || false);
78 | if (data.sortOrder) {
79 | setSortOrder(data.sortOrder);
80 | }
81 | } else if (type === 'PIP_RESIZE') {
82 | if (rootBoxRef.current && payload && payload.height) {
83 | rootBoxRef.current.style.height = `${payload.height}px`;
84 | }
85 | }
86 | };
87 |
88 | window.addEventListener('message', handleMessage);
89 |
90 | const readyMessage = { type: window.opener ? 'PIP_WINDOW_READY' : 'PIP_IFRAME_READY' };
91 | const target = window.opener || window.parent;
92 | if (target && target !== window) {
93 | target.postMessage(readyMessage, '*');
94 | }
95 |
96 | return () => {
97 | window.removeEventListener('message', handleMessage);
98 | };
99 | }, []);
100 |
101 | let itemsToRender = [...historicalResponses];
102 | if (sortOrder === 'newestAtTop') {
103 | itemsToRender.reverse();
104 | }
105 |
106 | const streamingItemForRender = (isCurrentlyProcessing && currentStreamingText) ?
107 | { text: currentStreamingText, timestamp: 'Streaming...', type: 'current_streaming' } : null;
108 |
109 | if (streamingItemForRender) {
110 | if (sortOrder === 'newestAtTop') {
111 | itemsToRender.unshift(streamingItemForRender);
112 | } else {
113 | itemsToRender.push(streamingItemForRender);
114 | }
115 | }
116 |
117 |
118 | return (
119 |
120 |
121 |
122 | AI Log
123 |
124 |
125 |
131 |
132 | {itemsToRender.map((item, index) => (
133 |
134 |
135 |
136 |
137 |
138 |
139 | AI Assistant
140 | {item.timestamp !== 'Streaming...' && {item.timestamp}}
141 |
142 | {formatAndDisplayResponse(item.text)}
143 | {item.type === 'current_streaming' && }
144 |
145 |
146 | ))}
147 | {itemsToRender.length === 0 && !isCurrentlyProcessing && ( Waiting for AI responses... )}
148 | {isCurrentlyProcessing && itemsToRender.length === 0 && ( AI is thinking... )}
149 |
150 |
151 |
152 |
190 |
191 | );
192 | }
--------------------------------------------------------------------------------
/components/SettingsDialog.js:
--------------------------------------------------------------------------------
1 | import { useState, useEffect } from 'react';
2 | import PropTypes from 'prop-types';
3 | import {
4 | Dialog,
5 | DialogTitle,
6 | DialogContent,
7 | DialogActions,
8 | Button,
9 | TextField,
10 | Select,
11 | MenuItem,
12 | FormControl,
13 | InputLabel,
14 | ListSubheader,
15 | Typography,
16 | Box,
17 | Divider,
18 | IconButton,
19 | Tooltip,
20 | Chip,
21 | Grid,
22 | RadioGroup,
23 | FormControlLabel,
24 | Radio
25 | } from '@mui/material';
26 | import CloseIcon from '@mui/icons-material/Close';
27 | import SaveIcon from '@mui/icons-material/Save';
28 | import AddCircleOutlineIcon from '@mui/icons-material/AddCircleOutline';
29 | import DeleteIcon from '@mui/icons-material/Delete';
30 | import { getConfig, setConfig, builtInModelGroups } from '../utils/config'; // Import builtInModelGroups
31 |
32 | export default function SettingsDialog({ open, onClose, onSave }) {
33 | const [settings, setSettings] = useState(getConfig());
34 | const [newModelName, setNewModelName] = useState('');
35 | const [newModelId, setNewModelId] = useState('');
36 | const [newModelType, setNewModelType] = useState('openai'); // 'openai' or 'gemini'
37 |
38 | useEffect(() => {
39 | if (open) {
40 | setSettings(getConfig());
41 | // Reset new model fields when dialog opens
42 | setNewModelName('');
43 | setNewModelId('');
44 | setNewModelType('openai');
45 | }
46 | }, [open]);
47 |
48 | const handleChange = (e) => {
49 | setSettings({ ...settings, [e.target.name]: e.target.value });
50 | };
51 |
52 | const handleAddNewModel = () => {
53 | if (!newModelName.trim() || !newModelId.trim()) {
54 | alert('Please provide both a display name and an ID for the new model.');
55 | return;
56 | }
57 | const newModel = { label: newModelName.trim(), value: newModelId.trim(), type: newModelType };
58 | const updatedCustomModels = [...(settings.customModels || []), newModel];
59 | setSettings({ ...settings, customModels: updatedCustomModels });
60 | setNewModelName('');
61 | setNewModelId('');
62 | // Keep newModelType for potentially adding another of the same type
63 | };
64 |
65 | const handleRemoveCustomModel = (indexToRemove) => {
66 | const updatedCustomModels = (settings.customModels || []).filter((_, index) => index !== indexToRemove);
67 | // If the currently selected model was the one removed, reset to a default
68 | let currentAiModel = settings.aiModel;
69 | if (settings.customModels[indexToRemove]?.value === currentAiModel) {
70 | currentAiModel = builtInModelGroups[0]?.models[0]?.value || 'gpt-3.5-turbo'; // Fallback
71 | }
72 | setSettings({ ...settings, customModels: updatedCustomModels, aiModel: currentAiModel });
73 | };
74 |
75 |
76 | const handleSave = () => {
77 | // Validate model-key pairing
78 | const selectedModelValue = settings.aiModel;
79 | let selectedModelIsGemini = selectedModelValue.startsWith('gemini');
80 |
81 | // Check if the selected model is a custom Gemini model
82 | const customGeminiModel = (settings.customModels || []).find(m => m.value === selectedModelValue && m.type === 'gemini');
83 | if (customGeminiModel) {
84 | selectedModelIsGemini = true;
85 | }
86 | // Check if the selected model is a custom OpenAI model
87 | const customOpenAIModel = (settings.customModels || []).find(m => m.value === selectedModelValue && m.type === 'openai');
88 | if (customOpenAIModel && !selectedModelIsGemini) { // if it's custom and not already flagged as gemini
89 | // it's an OpenAI type model
90 | }
91 |
92 |
93 | if (selectedModelIsGemini && !settings.geminiKey) {
94 | alert('Selected Gemini model requires a Gemini API key. Please enter a key or select a different model.');
95 | return;
96 | }
97 | if (!selectedModelIsGemini && !customOpenAIModel && !settings.openaiKey && !selectedModelValue.startsWith('gemini')) { // It's a built-in OpenAI or non-Gemini custom
98 | alert('Selected OpenAI model requires an OpenAI API key. Please enter a key or select a different model.');
99 | return;
100 | }
101 | if (customOpenAIModel && !settings.openaiKey) {
102 | alert('Selected custom OpenAI-type model requires an OpenAI API key.');
103 | return;
104 | }
105 |
106 |
107 | if (!settings.azureToken || !settings.azureRegion) {
108 | alert('Azure Speech Token and Region are required for voice transcription.');
109 | }
110 |
111 | setConfig(settings); // Uses the setConfig from utils/config.js
112 | if (onSave) onSave();
113 | onClose();
114 | };
115 |
116 |
117 | return (
118 |
259 | );
260 | }
261 |
262 | SettingsDialog.propTypes = {
263 | open: PropTypes.bool.isRequired,
264 | onClose: PropTypes.func.isRequired,
265 | onSave: PropTypes.func
266 | };
267 |
--------------------------------------------------------------------------------
/pages/interview.js:
--------------------------------------------------------------------------------
1 | import { useCallback, useEffect, useRef, useState } from 'react';
2 |
3 | import Head from 'next/head';
4 | import { useRouter } from 'next/router';
5 | import { useDispatch, useSelector } from 'react-redux';
6 |
7 | // MUI Components
8 | import {
9 | Alert,
10 | AppBar,
11 | Avatar,
12 | Box,
13 | Button,
14 | Card,
15 | CardContent,
16 | CardHeader,
17 | Checkbox,
18 | Chip,
19 | CircularProgress,
20 | Container,
21 | Divider,
22 | FormControlLabel,
23 | Grid,
24 | IconButton,
25 | List,
26 | ListItem,
27 | ListItemText,
28 | Paper,
29 | Snackbar,
30 | Switch,
31 | TextField,
32 | Toolbar,
33 | Tooltip,
34 | Typography,
35 | useTheme
36 | } from '@mui/material';
37 |
38 | // MUI Icons
39 | import ArrowDownwardIcon from '@mui/icons-material/ArrowDownward';
40 | import ArrowUpwardIcon from '@mui/icons-material/ArrowUpward';
41 | import DeleteSweepIcon from '@mui/icons-material/DeleteSweep';
42 | import HearingIcon from '@mui/icons-material/Hearing';
43 | import MicIcon from '@mui/icons-material/Mic';
44 | import MicOffIcon from '@mui/icons-material/MicOff';
45 | import PersonIcon from '@mui/icons-material/Person';
46 | import PictureInPictureAltIcon from '@mui/icons-material/PictureInPictureAlt';
47 | import PlaylistAddCheckIcon from '@mui/icons-material/PlaylistAddCheck';
48 | import ScreenShareIcon from '@mui/icons-material/ScreenShare';
49 | import SendIcon from '@mui/icons-material/Send';
50 | import SettingsIcon from '@mui/icons-material/Settings';
51 | import SmartToyIcon from '@mui/icons-material/SmartToy';
52 | import SpeakerNotesIcon from '@mui/icons-material/SpeakerNotes';
53 | import StopScreenShareIcon from '@mui/icons-material/StopScreenShare';
54 | import SwapVertIcon from '@mui/icons-material/SwapVert';
55 |
56 | // Third-party Libraries
57 | import { GoogleGenerativeAI } from '@google/generative-ai';
58 | import hljs from 'highlight.js';
59 | import 'highlight.js/styles/atom-one-dark.css';
60 | import throttle from 'lodash.throttle';
61 | import * as SpeechSDK from 'microsoft-cognitiveservices-speech-sdk';
62 | import OpenAI from 'openai';
63 | import ReactMarkdown from 'react-markdown';
64 | import ScrollToBottom from 'react-scroll-to-bottom';
65 |
66 | // Local Imports
67 | import SettingsDialog from '../components/SettingsDialog';
68 | import { setAIResponse } from '../redux/aiResponseSlice';
69 | import { addToHistory } from '../redux/historySlice';
70 | import { clearTranscription, setTranscription } from '../redux/transcriptionSlice';
71 | import { getConfig, setConfig as saveConfig } from '../utils/config';
72 |
73 |
74 |
75 | function debounce(func, timeout = 100) {
76 | let timer;
77 | return (...args) => {
78 | clearTimeout(timer);
79 | timer = setTimeout(() => {
80 | func.apply(this, args);
81 | }, timeout);
82 | };
83 | }
84 |
85 |
86 | export default function InterviewPage() {
87 | const dispatch = useDispatch();
88 | const transcriptionFromStore = useSelector(state => state.transcription);
89 | const aiResponseFromStore = useSelector(state => state.aiResponse);
90 | const history = useSelector(state => state.history);
91 | const theme = useTheme();
92 |
93 | const [appConfig, setAppConfig] = useState(getConfig());
94 |
95 | const [systemRecognizer, setSystemRecognizer] = useState(null);
96 | const [micRecognizer, setMicRecognizer] = useState(null);
97 | const [systemAutoMode, setSystemAutoMode] = useState(appConfig.systemAutoMode !== undefined ? appConfig.systemAutoMode : true);
98 | const [openAI, setOpenAI] = useState(null);
99 | const [settingsOpen, setSettingsOpen] = useState(false);
100 | const [isMicrophoneActive, setIsMicrophoneActive] = useState(false);
101 | const [isSystemAudioActive, setIsSystemAudioActive] = useState(false);
102 | const [snackbarOpen, setSnackbarOpen] = useState(false);
103 | const [snackbarMessage, setSnackbarMessage] = useState('');
104 | const [snackbarSeverity, setSnackbarSeverity] = useState('info');
105 | const [selectedQuestions, setSelectedQuestions] = useState([]);
106 | const [isManualMode, setIsManualMode] = useState(appConfig.isManualMode !== undefined ? appConfig.isManualMode : false);
107 | const [micTranscription, setMicTranscription] = useState('');
108 | const [isProcessing, setIsProcessing] = useState(false);
109 | const [isAILoading, setIsAILoading] = useState(true);
110 | const [autoScroll, setAutoScroll] = useState(true);
111 | const [aiResponseSortOrder, setAiResponseSortOrder] = useState('newestAtTop');
112 | const [isPipWindowActive, setIsPipWindowActive] = useState(false);
113 |
114 | const pipWindowRef = useRef(null);
115 | const documentPipWindowRef = useRef(null);
116 | const documentPipIframeRef = useRef(null);
117 | const systemInterimTranscription = useRef('');
118 | const micInterimTranscription = useRef('');
119 | const silenceTimer = useRef(null);
120 | const finalTranscript = useRef({ system: '', microphone: '' });
121 | const isManualModeRef = useRef(isManualMode);
122 | const systemAutoModeRef = useRef(systemAutoMode);
123 | const throttledDispatchSetAIResponseRef = useRef(null);
124 |
125 | const showSnackbar = useCallback((message, severity = 'info') => {
126 | setSnackbarMessage(message);
127 | setSnackbarSeverity(severity);
128 | setSnackbarOpen(true);
129 | }, []);
130 |
131 | const handleSettingsSaved = () => {
132 | const newConfig = getConfig();
133 | setAppConfig(newConfig);
134 | setIsAILoading(true);
135 | setSystemAutoMode(newConfig.systemAutoMode !== undefined ? newConfig.systemAutoMode : true);
136 | setIsManualMode(newConfig.isManualMode !== undefined ? newConfig.isManualMode : false);
137 | };
138 |
139 | useEffect(() => {
140 | const currentConfig = appConfig;
141 | const initializeAI = () => {
142 | try {
143 | if (currentConfig.aiModel.startsWith('gemini')) {
144 | if (!currentConfig.geminiKey) {
145 | showSnackbar('Gemini API key required. Please set it in Settings.', 'error');
146 | setOpenAI(null);
147 | return;
148 | }
149 | const genAI = new GoogleGenerativeAI(currentConfig.geminiKey);
150 | setOpenAI(genAI);
151 | } else {
152 | if (!currentConfig.openaiKey) {
153 | showSnackbar('OpenAI API key required. Please set it in Settings.', 'error');
154 | setOpenAI(null);
155 | return;
156 | }
157 | const openaiClient = new OpenAI({
158 | apiKey: currentConfig.openaiKey,
159 | dangerouslyAllowBrowser: true
160 | });
161 | setOpenAI(openaiClient);
162 | }
163 | } catch (error) {
164 | console.error('Error initializing AI client:', error);
165 | showSnackbar('Error initializing AI client: ' + error.message, 'error');
166 | setOpenAI(null);
167 | } finally {
168 | setIsAILoading(false);
169 | }
170 | };
171 | if (isAILoading) initializeAI();
172 | }, [appConfig, isAILoading, showSnackbar]);
173 |
174 | useEffect(() => { isManualModeRef.current = isManualMode; }, [isManualMode]);
175 | useEffect(() => { systemAutoModeRef.current = systemAutoMode; }, [systemAutoMode]);
176 |
177 | useEffect(() => {
178 | throttledDispatchSetAIResponseRef.current = throttle((payload) => {
179 | dispatch(setAIResponse(payload));
180 | }, 250, { leading: true, trailing: true });
181 |
182 | return () => {
183 | if (throttledDispatchSetAIResponseRef.current && typeof throttledDispatchSetAIResponseRef.current.cancel === 'function') {
184 | throttledDispatchSetAIResponseRef.current.cancel();
185 | }
186 | };
187 | }, [dispatch]);
188 |
189 | const handleSnackbarClose = () => setSnackbarOpen(false);
190 |
191 | const stopRecording = async (source) => {
192 | const recognizer = source === 'system' ? systemRecognizer : micRecognizer;
193 | if (recognizer && typeof recognizer.stopContinuousRecognitionAsync === 'function') {
194 | try {
195 | await recognizer.stopContinuousRecognitionAsync();
196 | if (recognizer.audioConfig && recognizer.audioConfig.privSource && recognizer.audioConfig.privSource.privStream) {
197 | const stream = recognizer.audioConfig.privSource.privStream;
198 | if (stream instanceof MediaStream) {
199 | stream.getTracks().forEach(track => {
200 | track.stop();
201 | });
202 | }
203 | }
204 | if (recognizer.audioConfig && typeof recognizer.audioConfig.close === 'function') {
205 | recognizer.audioConfig.close();
206 | }
207 | } catch (error) {
208 | console.error(`Error stopping ${source} recognition:`, error);
209 | showSnackbar(`Error stopping ${source} audio: ${error.message}`, 'error');
210 | } finally {
211 | if (source === 'system') {
212 | setIsSystemAudioActive(false);
213 | setSystemRecognizer(null);
214 | } else {
215 | setIsMicrophoneActive(false);
216 | setMicRecognizer(null);
217 | }
218 | }
219 | }
220 | };
221 |
222 | const handleClearSystemTranscription = () => {
223 | finalTranscript.current.system = '';
224 | systemInterimTranscription.current = '';
225 | dispatch(clearTranscription());
226 | };
227 |
228 | const handleClearMicTranscription = () => {
229 | finalTranscript.current.microphone = '';
230 | micInterimTranscription.current = '';
231 | setMicTranscription('');
232 | };
233 |
234 | const handleTranscriptionEvent = (text, source) => {
235 | const cleanText = text.replace(/\s+/g, ' ').trim();
236 | if (!cleanText) return;
237 |
238 | finalTranscript.current[source] += cleanText + ' ';
239 |
240 | if (source === 'system') {
241 | dispatch(setTranscription(finalTranscript.current.system + systemInterimTranscription.current));
242 | } else {
243 | setMicTranscription(finalTranscript.current.microphone + micInterimTranscription.current);
244 | }
245 |
246 | const currentConfig = getConfig();
247 | const currentSilenceTimerDuration = currentConfig.silenceTimerDuration;
248 |
249 | if ((source === 'system' && systemAutoModeRef.current) || (source === 'microphone' && !isManualModeRef.current)) {
250 | clearTimeout(silenceTimer.current);
251 | silenceTimer.current = setTimeout(() => {
252 | askOpenAI(finalTranscript.current[source].trim(), source);
253 | }, currentSilenceTimerDuration * 1000);
254 | }
255 | };
256 |
257 | const handleManualInputChange = (value, source) => {
258 | if (source === 'system') {
259 | dispatch(setTranscription(value));
260 | finalTranscript.current.system = value;
261 | } else {
262 | setMicTranscription(value);
263 | finalTranscript.current.microphone = value;
264 | }
265 | };
266 |
267 | const handleManualSubmit = (source) => {
268 | const textToSubmit = source === 'system' ? transcriptionFromStore : micTranscription;
269 | if (textToSubmit.trim()) {
270 | askOpenAI(textToSubmit.trim(), source);
271 | } else {
272 | showSnackbar('Input is empty.', 'warning');
273 | }
274 | };
275 |
276 | const handleKeyPress = (e, source) => {
277 | if (e.key === 'Enter' && !e.shiftKey) {
278 | e.preventDefault();
279 | handleManualSubmit(source);
280 | }
281 | };
282 |
283 | const handleCombineAndSubmit = () => {
284 | if (selectedQuestions.length === 0) {
285 | showSnackbar('No questions selected to combine.', 'warning');
286 | return;
287 | }
288 | const questionHistory = history.filter(e => e.type === 'question').slice().reverse();
289 | const questionTexts = selectedQuestions.map(selectedIndexInReversedArray => {
290 | return questionHistory[selectedIndexInReversedArray]?.text;
291 | }).filter(text => text);
292 |
293 | if (questionTexts.length === 0) {
294 | showSnackbar('Could not retrieve selected question texts.', 'warning');
295 | return;
296 | }
297 |
298 | const combinedText = questionTexts.join('\n\n---\n\n');
299 | askOpenAI(combinedText, 'combined');
300 | setSelectedQuestions([]);
301 | };
302 |
303 | const createRecognizer = async (mediaStream, source) => {
304 | const currentConfig = getConfig();
305 | if (!currentConfig.azureToken || !currentConfig.azureRegion) {
306 | showSnackbar('Azure Speech credentials missing. Please set them in Settings.', 'error');
307 | mediaStream.getTracks().forEach(track => track.stop());
308 | return null;
309 | }
310 |
311 | let audioConfig;
312 | try {
313 | audioConfig = SpeechSDK.AudioConfig.fromStreamInput(mediaStream);
314 | } catch (configError) {
315 | console.error(`Error creating AudioConfig for ${source}:`, configError);
316 | showSnackbar(`Error setting up audio for ${source}: ${configError.message}`, 'error');
317 | mediaStream.getTracks().forEach(track => track.stop());
318 | return null;
319 | }
320 |
321 | const speechConfig = SpeechSDK.SpeechConfig.fromSubscription(currentConfig.azureToken, currentConfig.azureRegion);
322 | speechConfig.speechRecognitionLanguage = currentConfig.azureLanguage;
323 |
324 | const recognizer = new SpeechSDK.SpeechRecognizer(speechConfig, audioConfig);
325 |
326 | recognizer.recognizing = (s, e) => {
327 | if (e.result.reason === SpeechSDK.ResultReason.RecognizingSpeech) {
328 | const interimText = e.result.text;
329 | if (source === 'system') {
330 | systemInterimTranscription.current = interimText;
331 | dispatch(setTranscription(finalTranscript.current.system + interimText));
332 | } else {
333 | micInterimTranscription.current = interimText;
334 | setMicTranscription(finalTranscript.current.microphone + interimText);
335 | }
336 | }
337 | };
338 |
339 | recognizer.recognized = (s, e) => {
340 | if (e.result.reason === SpeechSDK.ResultReason.RecognizedSpeech && e.result.text) {
341 | if (source === 'system') systemInterimTranscription.current = '';
342 | else micInterimTranscription.current = '';
343 | handleTranscriptionEvent(e.result.text, source);
344 | } else if (e.result.reason === SpeechSDK.ResultReason.NoMatch) {
345 | // console.log(`NOMATCH: Speech could not be recognized for ${source}.`);
346 | }
347 | };
348 |
349 | recognizer.canceled = (s, e) => {
350 | console.log(`CANCELED: Reason=${e.reason} for ${source}`);
351 | if (e.reason === SpeechSDK.CancellationReason.Error) {
352 | console.error(`CANCELED: ErrorCode=${e.errorCode}`);
353 | console.error(`CANCELED: ErrorDetails=${e.errorDetails}`);
354 | showSnackbar(`Speech recognition error for ${source}: ${e.errorDetails}`, 'error');
355 | }
356 | stopRecording(source);
357 | };
358 |
359 | recognizer.sessionStopped = (s, e) => {
360 | console.log(`Session stopped event for ${source}.`);
361 | stopRecording(source);
362 | };
363 |
364 | try {
365 | await recognizer.startContinuousRecognitionAsync();
366 | return recognizer;
367 | } catch (error) {
368 | console.error(`Error starting ${source} continuous recognition:`, error);
369 | showSnackbar(`Failed to start ${source} recognition: ${error.message}`, 'error');
370 | if (audioConfig && typeof audioConfig.close === 'function') audioConfig.close();
371 | mediaStream.getTracks().forEach(track => track.stop());
372 | return null;
373 | }
374 | };
375 |
376 | const startSystemAudioRecognition = async () => {
377 | if (isSystemAudioActive) {
378 | await stopRecording('system');
379 | return;
380 | }
381 |
382 | if (!navigator.mediaDevices || !navigator.mediaDevices.getDisplayMedia) {
383 | showSnackbar('Screen sharing is not supported by your browser.', 'error');
384 | setIsSystemAudioActive(false);
385 | return;
386 | }
387 |
388 | try {
389 | const mediaStream = await navigator.mediaDevices.getDisplayMedia({
390 | audio: true,
391 | video: {
392 | displaySurface: 'browser',
393 | logicalSurface: true
394 | }
395 | });
396 |
397 | const audioTracks = mediaStream.getAudioTracks();
398 | if (audioTracks.length === 0) {
399 | showSnackbar('No audio track detected. Please ensure you share a tab with audio.', 'warning');
400 | mediaStream.getTracks().forEach(track => track.stop());
401 | return;
402 | }
403 |
404 | if (systemRecognizer) {
405 | await stopRecording('system');
406 | }
407 |
408 | const recognizerInstance = await createRecognizer(mediaStream, 'system');
409 | if (recognizerInstance) {
410 | setSystemRecognizer(recognizerInstance);
411 | setIsSystemAudioActive(true);
412 | showSnackbar('System audio recording started.', 'success');
413 | mediaStream.getTracks().forEach(track => {
414 | track.onended = () => {
415 | showSnackbar('Tab sharing ended.', 'info');
416 | stopRecording('system');
417 | };
418 | });
419 | } else {
420 | mediaStream.getTracks().forEach(track => track.stop());
421 | }
422 | } catch (error) {
423 | console.error('System audio capture error:', error);
424 | if (error.name === "NotAllowedError") {
425 | showSnackbar('Permission denied for screen recording. Please allow access.', 'error');
426 | } else if (error.name === "NotFoundError") {
427 | showSnackbar('No suitable tab/window found to share.', 'error');
428 | } else if (error.name === "NotSupportedError") {
429 | showSnackbar('System audio capture not supported by your browser.', 'error');
430 | } else {
431 | showSnackbar(`Failed to start system audio capture: ${error.message || 'Unknown error'}`, 'error');
432 | }
433 | setIsSystemAudioActive(false);
434 | }
435 | };
436 |
437 | const startMicrophoneRecognition = async () => {
438 | if (isMicrophoneActive) {
439 | await stopRecording('microphone');
440 | return;
441 | }
442 | try {
443 | const mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
444 | if (micRecognizer) await stopRecording('microphone');
445 |
446 | const recognizerInstance = await createRecognizer(mediaStream, 'microphone');
447 | if (recognizerInstance) {
448 | setMicRecognizer(recognizerInstance);
449 | setIsMicrophoneActive(true);
450 | showSnackbar('Microphone recording started.', 'success');
451 | } else {
452 | mediaStream.getTracks().forEach(track => track.stop());
453 | }
454 | } catch (error) {
455 | console.error('Microphone capture error:', error);
456 | if (error.name === "NotAllowedError" || error.name === "NotFoundError") {
457 | showSnackbar('Permission denied for microphone. Please allow access.', 'error');
458 | } else {
459 | showSnackbar(`Failed to access microphone: ${error.message || 'Unknown error'}`, 'error');
460 | }
461 | setIsMicrophoneActive(false);
462 | }
463 | };
464 |
465 | const askOpenAI = async (text, source) => {
466 | if (!text.trim()) {
467 | showSnackbar('No input text to process.', 'warning');
468 | return;
469 | }
470 | if (!openAI || isAILoading) {
471 | showSnackbar('AI client is not ready. Please wait or check settings.', 'warning');
472 | return;
473 | }
474 |
475 | const currentConfig = getConfig();
476 | const lengthSettings = {
477 | concise: { temperature: 0.4, maxTokens: 250 },
478 | medium: { temperature: 0.6, maxTokens: 500 },
479 | lengthy: { temperature: 0.8, maxTokens: 1000 }
480 | };
481 | const { temperature, maxTokens } = lengthSettings[currentConfig.responseLength || 'medium'];
482 |
483 | setIsProcessing(true);
484 | const timestamp = new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
485 | let streamedResponse = '';
486 |
487 | dispatch(addToHistory({ type: 'question', text, timestamp, source, status: 'pending' }));
488 | dispatch(setAIResponse(''));
489 |
490 | try {
491 | const conversationHistoryForAPI = history
492 | .filter(e => e.text && (e.type === 'question' || e.type === 'response') && e.status !== 'pending')
493 | .slice(-6)
494 | .map(event => ({
495 | role: event.type === 'question' ? 'user' : 'assistant',
496 | content: event.text,
497 | }));
498 |
499 | if (currentConfig.aiModel.startsWith('gemini')) {
500 | const model = openAI.getGenerativeModel({
501 | model: currentConfig.aiModel,
502 | generationConfig: { temperature, maxOutputTokens: maxTokens },
503 | systemInstruction: { parts: [{ text: currentConfig.gptSystemPrompt }] }
504 | });
505 | const chat = model.startChat({
506 | history: conversationHistoryForAPI.map(msg => ({
507 | role: msg.role === 'user' ? 'user' : 'model',
508 | parts: [{ text: msg.content }]
509 | })),
510 | });
511 | const result = await chat.sendMessageStream(text);
512 | for await (const chunk of result.stream) {
513 | if (chunk && typeof chunk.text === 'function') {
514 | const chunkText = chunk.text();
515 | streamedResponse += chunkText;
516 | if (throttledDispatchSetAIResponseRef.current) {
517 | throttledDispatchSetAIResponseRef.current(streamedResponse);
518 | }
519 | }
520 | }
521 | } else {
522 | const messages = [
523 | { role: 'system', content: currentConfig.gptSystemPrompt },
524 | ...conversationHistoryForAPI,
525 | { role: 'user', content: text }
526 | ];
527 | const stream = await openAI.chat.completions.create({
528 | model: currentConfig.aiModel,
529 | messages,
530 | temperature,
531 | max_tokens: maxTokens,
532 | stream: true,
533 | });
534 | for await (const chunk of stream) {
535 | const chunkText = chunk.choices[0]?.delta?.content || '';
536 | streamedResponse += chunkText;
537 | if (throttledDispatchSetAIResponseRef.current) {
538 | throttledDispatchSetAIResponseRef.current(streamedResponse);
539 | }
540 | }
541 | }
542 | if (throttledDispatchSetAIResponseRef.current && typeof throttledDispatchSetAIResponseRef.current.cancel === 'function') {
543 | throttledDispatchSetAIResponseRef.current.cancel();
544 | }
545 | dispatch(setAIResponse(streamedResponse));
546 |
547 | const finalTimestamp = new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
548 | dispatch(addToHistory({ type: 'response', text: streamedResponse, timestamp: finalTimestamp, status: 'completed' }));
549 |
550 | } catch (error) {
551 | console.error("AI request error:", error);
552 | const errorMessage = `AI request failed: ${error.message || 'Unknown error'}`;
553 | showSnackbar(errorMessage, 'error');
554 | dispatch(setAIResponse(`Error: ${errorMessage}`));
555 | dispatch(addToHistory({ type: 'response', text: `Error: ${errorMessage}`, timestamp: new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' }), status: 'error' }));
556 | } finally {
557 | if ((source === 'system' && systemAutoModeRef.current) || (source === 'microphone' && !isManualModeRef.current)) {
558 | finalTranscript.current[source] = '';
559 | if (source === 'system') {
560 | systemInterimTranscription.current = '';
561 | dispatch(setTranscription(''));
562 | } else {
563 | micInterimTranscription.current = '';
564 | setMicTranscription('');
565 | }
566 | }
567 | setIsProcessing(false);
568 | }
569 | };
570 |
571 | const formatAndDisplayResponse = useCallback((response) => {
572 | if (!response) return null;
573 | return (
574 |
591 |
592 |
593 | ) : (
594 |
606 | {children}
607 |
608 | );
609 | },
610 | p: ({ node, ...props }) => ,
611 | strong: ({ node, ...props }) => ,
612 | em: ({ node, ...props }) => ,
613 | ul: ({ node, ...props }) => ,
614 | ol: ({ node, ...props }) => ,
615 | li: ({ node, ...props }) => ,
616 | }}
617 | >
618 | {response}
619 |
620 | );
621 | }, []);
622 |
623 | const renderHistoryItem = (item, index) => {
624 | if (item.type !== 'response') return null;
625 | const Icon = SmartToyIcon;
626 | const title = 'AI Assistant';
627 | const avatarBgColor = theme.palette.secondary.light;
628 |
629 | return (
630 |
631 |
632 |
633 |
634 |
635 |
636 | {title}
637 | {item.timestamp}
638 |
639 | {formatAndDisplayResponse(item.text)}
640 |
641 |
642 | );
643 | };
644 |
645 | const renderQuestionHistoryItem = (item, index) => {
646 | const Icon = item.source === 'system' ? HearingIcon : PersonIcon;
647 | const title = item.source === 'system' ? 'Interviewer' : 'Candidate';
648 | const avatarBgColor = item.source === 'system' ? theme.palette.info.light : theme.palette.success.light;
649 |
650 | return (
651 | {
658 | setSelectedQuestions(prev =>
659 | prev.includes(index) ? prev.filter(x => x !== index) : [...prev, index]
660 | );
661 | }}
662 | color="secondary"
663 | size="small"
664 | />
665 | }
666 | disablePadding
667 | sx={{ py: 0.5, display: 'flex', alignItems: 'center' }}
668 | >
669 |
670 |
671 |
672 |
675 | {item.text}
676 |
677 | }
678 | secondary={`${title} - ${item.timestamp}`}
679 | />
680 |
681 | );
682 | };
683 |
684 | const handleSortOrderToggle = () => {
685 | setAiResponseSortOrder(prev => prev === 'newestAtBottom' ? 'newestAtTop' : 'newestAtBottom');
686 | };
687 |
688 | const getAiResponsesToDisplay = () => {
689 | let responses = history.filter(item => item.type === 'response').slice();
690 | const currentStreamingText = aiResponseFromStore;
691 |
692 | if (isProcessing && currentStreamingText && currentStreamingText.trim() !== '') {
693 | responses.push({ text: currentStreamingText, timestamp: 'Streaming...', type: 'current_streaming' });
694 | }
695 |
696 | if (aiResponseSortOrder === 'newestAtTop') {
697 | return responses.reverse();
698 | }
699 | return responses;
700 | };
701 |
702 | const togglePipWindow = async () => {
703 | if (isPipWindowActive) {
704 | if (documentPipWindowRef.current && typeof documentPipWindowRef.current.close === 'function') {
705 | try {
706 | await documentPipWindowRef.current.close();
707 | } catch (e) { console.error("Error closing document PiP window:", e); }
708 | } else if (pipWindowRef.current && !pipWindowRef.current.closed) {
709 | pipWindowRef.current.close();
710 | }
711 | return; // State update will be handled by pagehide/interval listeners
712 | }
713 |
714 | const addResizeListener = (pipWindow) => {
715 | const handlePipResize = debounce(() => {
716 | if (!pipWindow || (pipWindow.closed)) return;
717 | const target = documentPipIframeRef.current ? documentPipIframeRef.current.contentWindow : pipWindow;
718 | if (target) {
719 | target.postMessage({
720 | type: 'PIP_RESIZE',
721 | payload: {
722 | width: pipWindow.innerWidth,
723 | height: pipWindow.innerHeight
724 | }
725 | }, '*');
726 | }
727 | }, 50);
728 |
729 | pipWindow.addEventListener('resize', handlePipResize);
730 | return () => pipWindow.removeEventListener('resize', handlePipResize); // Return a cleanup function
731 | };
732 |
733 | if (window.documentPictureInPicture && typeof window.documentPictureInPicture.requestWindow === 'function') {
734 | try {
735 | const pipOptions = { width: 400, height: 300 };
736 | const requestedPipWindow = await window.documentPictureInPicture.requestWindow(pipOptions);
737 | documentPipWindowRef.current = requestedPipWindow;
738 | setIsPipWindowActive(true);
739 |
740 | const iframe = documentPipWindowRef.current.document.createElement('iframe');
741 | iframe.src = '/pip-log';
742 | iframe.style.width = '100%';
743 | iframe.style.height = '100%';
744 | iframe.style.border = 'none';
745 | documentPipWindowRef.current.document.body.style.margin = '0';
746 | documentPipWindowRef.current.document.body.style.overflow = 'hidden';
747 | documentPipWindowRef.current.document.body.append(iframe);
748 | documentPipIframeRef.current = iframe;
749 |
750 | const removeResizeListener = addResizeListener(documentPipWindowRef.current);
751 |
752 | iframe.onload = () => {
753 | if (documentPipIframeRef.current && documentPipIframeRef.current.contentWindow) {
754 | documentPipIframeRef.current.contentWindow.postMessage({
755 | type: 'AI_LOG_DATA',
756 | payload: {
757 | historicalResponses: history.filter(item => item.type === 'response'),
758 | currentStreamingText: isProcessing ? aiResponseFromStore : '',
759 | isProcessing: isProcessing,
760 | sortOrder: aiResponseSortOrder
761 | }
762 | }, '*');
763 | }
764 | };
765 |
766 | documentPipWindowRef.current.addEventListener('pagehide', () => {
767 | removeResizeListener();
768 | setIsPipWindowActive(false);
769 | documentPipWindowRef.current = null;
770 | documentPipIframeRef.current = null;
771 | });
772 |
773 | showSnackbar('Native PiP window opened.', 'success');
774 | return;
775 |
776 | } catch (err) {
777 | console.error('Document Picture-in-Picture API error:', err);
778 | showSnackbar(`Native PiP not available or failed. Trying popup. (${err.message})`, 'warning');
779 | }
780 | }
781 |
782 | pipWindowRef.current = window.open('/pip-log', 'AIResponsePiP', 'width=400,height=550,resizable=yes,scrollbars=yes,status=no,toolbar=no,menubar=no,location=no,noopener,noreferrer,popup=yes');
783 |
784 | if (pipWindowRef.current) {
785 | setIsPipWindowActive(true);
786 | const removeResizeListener = addResizeListener(pipWindowRef.current);
787 |
788 | pipWindowRef.current.onload = () => {
789 | if (pipWindowRef.current && !pipWindowRef.current.closed) {
790 | pipWindowRef.current.postMessage({
791 | type: 'AI_LOG_DATA',
792 | payload: {
793 | historicalResponses: history.filter(item => item.type === 'response'),
794 | currentStreamingText: isProcessing ? aiResponseFromStore : '',
795 | isProcessing: isProcessing,
796 | sortOrder: aiResponseSortOrder
797 | }
798 | }, '*');
799 | }
800 | };
801 | const pipCheckInterval = setInterval(() => {
802 | if (pipWindowRef.current && pipWindowRef.current.closed) {
803 | clearInterval(pipCheckInterval);
804 | removeResizeListener();
805 | setIsPipWindowActive(false);
806 | pipWindowRef.current = null;
807 | }
808 | }, 500);
809 | if (pipWindowRef.current) pipWindowRef.current._pipIntervalId = pipCheckInterval;
810 | } else {
811 | showSnackbar('Failed to open PiP window. Please check popup blocker settings.', 'error');
812 | setIsPipWindowActive(false);
813 | }
814 | };
815 |
816 | useEffect(() => {
817 | return () => {
818 | if (pipWindowRef.current && pipWindowRef.current._pipIntervalId) {
819 | clearInterval(pipWindowRef.current._pipIntervalId);
820 | }
821 | if (documentPipWindowRef.current && typeof documentPipWindowRef.current.close === 'function') {
822 | try { documentPipWindowRef.current.close(); } catch (e) { /*ignore*/ }
823 | }
824 | };
825 | }, []);
826 |
827 | useEffect(() => {
828 | let targetWindowForMessage = null;
829 |
830 | if (documentPipWindowRef.current && documentPipIframeRef.current && documentPipIframeRef.current.contentWindow) {
831 | targetWindowForMessage = documentPipIframeRef.current.contentWindow;
832 | } else if (pipWindowRef.current && !pipWindowRef.current.closed) {
833 | targetWindowForMessage = pipWindowRef.current;
834 | }
835 |
836 | if (isPipWindowActive && targetWindowForMessage) {
837 | try {
838 | targetWindowForMessage.postMessage({
839 | type: 'AI_LOG_DATA',
840 | payload: {
841 | historicalResponses: history.filter(item => item.type === 'response'),
842 | currentStreamingText: isProcessing ? aiResponseFromStore : '',
843 | isProcessing: isProcessing,
844 | sortOrder: aiResponseSortOrder
845 | }
846 | }, '*');
847 | } catch (e) {
848 | console.warn("Could not post message to PiP window:", e);
849 | }
850 | }
851 | }, [history, aiResponseFromStore, isPipWindowActive, aiResponseSortOrder, isProcessing]);
852 |
853 | return (
854 | <>
855 |
856 | Interview Copilot - Active Session
857 |
858 |
859 |
860 |
861 |
862 |
863 | Interview Copilot
864 |
865 |
866 | setSettingsOpen(true)} aria-label="settings">
867 |
868 |
869 |
870 |
871 |
872 |
873 |
874 |
875 | {/* Left Panel */}
876 |
877 |
878 | } sx={{ pb: 1 }} />
879 |
880 | setSystemAutoMode(e.target.checked)} color="primary" />}
882 | label="Auto-Submit Question"
883 | sx={{ mb: 1 }}
884 | />
885 | handleManualInputChange(e.target.value, 'system')}
892 | onKeyDown={(e) => handleKeyPress(e, 'system')}
893 | placeholder="Interviewer's speech..."
894 | sx={{ mb: 2 }}
895 | />
896 |
897 | : }
902 | sx={{ flexGrow: 1 }}
903 | >
904 | {isSystemAudioActive ? 'Stop System Audio' : 'Record System Audio'}
905 |
906 |
907 | {isSystemAudioActive ? 'Recording system audio...' : 'Select "Chrome Tab" and check "Share audio" when prompted.'}
908 |
909 |
910 |
911 |
912 | {!systemAutoMode && (
913 |
922 | )}
923 |
924 |
925 |
926 |
927 | }
930 | action={
931 | : }
937 | >
938 | Ask Combined
939 |
940 | }
941 | sx={{ pb: 1, borderBottom: `1px solid ${theme.palette.divider}` }}
942 | />
943 |
944 |
945 |
946 | {history.filter(e => e.type === 'question').slice().reverse().map(renderQuestionHistoryItem)}
947 |
948 |
949 |
950 |
951 |
952 |
953 | {/* Center Panel */}
954 |
955 |
956 | }
959 | action={
960 | <>
961 |
962 |
963 |
964 |
965 |
966 |
967 |
968 | {aiResponseSortOrder === 'newestAtTop' ? : }
969 |
970 |
971 |
972 | {aiResponseSortOrder === 'newestAtTop' ? "Newest First" : "Oldest First"}
973 |
974 | setAutoScroll(e.target.checked)} color="primary" />}
976 | label="Auto Scroll"
977 | sx={{ ml: 1 }}
978 | />
979 | >
980 | }
981 | sx={{ borderBottom: `1px solid ${theme.palette.divider}` }}
982 | />
983 |
984 |
989 |
990 | {getAiResponsesToDisplay().map(renderHistoryItem)}
991 | {isProcessing && (
992 |
993 |
994 | AI is thinking...
995 |
996 | )}
997 |
998 |
999 |
1000 |
1001 |
1002 |
1003 | {/* Right Panel */}
1004 |
1005 |
1006 | } sx={{ pb: 1 }} />
1007 |
1008 | setIsManualMode(e.target.checked)} color="primary" />}
1010 | label="Manual Input Mode"
1011 | sx={{ mb: 1 }}
1012 | />
1013 | handleManualInputChange(e.target.value, 'microphone')}
1020 | onKeyDown={(e) => handleKeyPress(e, 'microphone')}
1021 | placeholder="Your speech or manual input..."
1022 | sx={{ mb: 2, flexGrow: 1 }}
1023 | />
1024 |
1025 | : }
1030 | sx={{ flexGrow: 1 }}
1031 | >
1032 | {isMicrophoneActive ? 'Stop Mic' : 'Start Mic'}
1033 |
1034 |
1035 |
1036 |
1037 | {isManualMode && (
1038 |
1047 | )}
1048 |
1049 |
1050 |
1051 |
1052 |
1053 |
1054 |
1055 | setSettingsOpen(false)}
1058 | onSave={handleSettingsSaved}
1059 | />
1060 |
1066 |
1067 | {snackbarMessage}
1068 |
1069 |
1070 |
1071 |
1101 | >
1102 | );
1103 | }
--------------------------------------------------------------------------------