├── src
├── content
│ ├── app.css
│ ├── index.mjs
│ ├── info.mjs
│ ├── stream-async-iterable.mjs
│ ├── fetch-sse.mjs
│ ├── index.html
│ └── audio.mjs
├── assets
│ ├── logo.png
│ ├── logo_handling.png
│ └── logo_recording.png
├── background
│ └── index.mjs
├── components
│ ├── Callout.jsx
│ ├── TriggerInput.jsx
│ ├── Settings.jsx
│ ├── VoiceDropdown.jsx
│ ├── Popup.jsx
│ └── Info.jsx
├── popup
│ ├── index.mjs
│ └── index.html
└── manifest.json
├── .prettierrc.yaml
├── .gitignore
├── .github
└── workflows
│ └── release.yml
├── package.json
├── LICENSE
└── README.md
/src/content/app.css:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.prettierrc.yaml:
--------------------------------------------------------------------------------
1 | semi: false
2 | singleQuote: true
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | build/
3 | background.js
4 | .DS_Store
5 | *.zip
--------------------------------------------------------------------------------
/src/content/index.mjs:
--------------------------------------------------------------------------------
1 | import * as info from './info.mjs';
2 | import * as audio from './audio.mjs';
--------------------------------------------------------------------------------
/src/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/idosal/assistant-chat-gpt/HEAD/src/assets/logo.png
--------------------------------------------------------------------------------
/src/assets/logo_handling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/idosal/assistant-chat-gpt/HEAD/src/assets/logo_handling.png
--------------------------------------------------------------------------------
/src/assets/logo_recording.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/idosal/assistant-chat-gpt/HEAD/src/assets/logo_recording.png
--------------------------------------------------------------------------------
/src/background/index.mjs:
--------------------------------------------------------------------------------
1 | chrome.runtime.onStartup.addListener(
2 | () => console.log('onStartup')
3 | )
4 |
5 | chrome.runtime.openOptionsPage()
--------------------------------------------------------------------------------
/src/components/Callout.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 |
3 | export default function Callout({ type, children }) {
4 | return
{children}
5 | }
6 |
--------------------------------------------------------------------------------
/src/content/info.mjs:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom";
3 | import Info from "../components/Info";
4 |
5 | ReactDOM.createRoot(document.getElementById("root")).render(
6 | React.createElement(Info)
7 | );
8 |
--------------------------------------------------------------------------------
/src/popup/index.mjs:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from 'react-dom';
3 | import Popup from "../components/Popup";
4 |
5 | ReactDOM.createRoot(document.getElementById("app")).render(
6 | React.createElement(Popup)
7 | );
--------------------------------------------------------------------------------
/src/content/stream-async-iterable.mjs:
--------------------------------------------------------------------------------
1 | export async function* streamAsyncIterable(stream) {
2 | const reader = stream.getReader()
3 | try {
4 | while (true) {
5 | const { done, value } = await reader.read()
6 | if (done) {
7 | return
8 | }
9 | yield value
10 | }
11 | } finally {
12 | reader.releaseLock()
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on: push
3 |
4 | jobs:
5 | build:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - name: Checkout
9 | uses: actions/checkout@v3
10 | - name: Build
11 | uses: actions/setup-node@v3
12 | with:
13 | node-version: 18
14 | - run: npm install
15 | - run: npm run build
16 | - name: Release
17 | uses: softprops/action-gh-release@v1
18 | if: startsWith(github.ref, 'refs/tags/')
19 | with:
20 | files: build/chrome.zip
21 | generate_release_notes: true
--------------------------------------------------------------------------------
/src/content/fetch-sse.mjs:
--------------------------------------------------------------------------------
1 | import { createParser } from 'eventsource-parser'
2 | import { streamAsyncIterable } from './stream-async-iterable.mjs'
3 |
4 | export async function fetchSSE(resource, options) {
5 | const { onMessage, ...fetchOptions } = options
6 | const resp = await fetch(resource, fetchOptions)
7 | const parser = createParser((event) => {
8 | if (event.type === 'event') {
9 | onMessage(event.data)
10 | }
11 | })
12 | for await (const chunk of streamAsyncIterable(resp.body)) {
13 | const str = new TextDecoder().decode(chunk)
14 | parser.feed(str)
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/components/TriggerInput.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { setTriggerPhrase } from "../content/audio.mjs";
3 |
4 | const VoiceDropdown = () => {
5 | const [trigger, setTrigger] = useState('Hey girl');
6 |
7 | const handleChange = (event) => {
8 | setTrigger(event.target.value);
9 | setTriggerPhrase(event.target.value);
10 | };
11 |
12 | return (
13 |
14 |
15 | Trigger phrase
16 |
17 |
18 |
19 | );
20 | };
21 |
22 | export default VoiceDropdown ;
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "chassistant-gpt",
3 | "version": "1.0.0",
4 | "main": "background.js",
5 | "keywords": [],
6 | "author": "idosal",
7 | "license": "ISC",
8 | "scripts": {
9 | "build": "node build.mjs",
10 | "lint": "eslint --ext .js,.mjs .",
11 | "lint:fix": "eslint --ext .js,.mjs . --fix"
12 | },
13 | "dependencies": {
14 | "@chatscope/chat-ui-kit-react": "^1.9.8",
15 | "@chatscope/chat-ui-kit-styles": "^1.4.0",
16 | "archiver": "^5.3.1",
17 | "esbuild": "^0.15.17",
18 | "esbuild-sass-plugin": "^2.4.3",
19 | "eventsource-parser": "^0.0.5",
20 | "punycode": "^2.1.1",
21 | "react": "^18.2.0",
22 | "react-dom": "^18.2.0",
23 | "react-toggle": "^4.1.3",
24 | "uuid": "^9.0.0"
25 | },
26 | "devDependencies": {
27 | "eslint": "^8.29.0",
28 | "prettier": "^2.8.0"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/components/Settings.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState } from 'react';
2 | import { setFillerEnabled, setTriggerPhrase } from "../content/audio.mjs";
3 | import Toggle from 'react-toggle'
4 | import "react-toggle/style.css" // for ES6 modules
5 |
6 | const Settings = () => {
7 | const [isFillerEnabled, setIsFillerEnabled] = useState(true);
8 | // send voice with target name to background script
9 | const handleChange = (event) => {
10 | setIsFillerEnabled(event.target.checked);
11 | setFillerEnabled(event.target.checked);
12 | };
13 |
14 | return (
15 |
16 |
20 | Natural conversation
21 |
22 | );
23 | };
24 |
25 | export default Settings;
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Ido Salomon
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ChassistantGPT",
3 | "description": "Use ChatGPT as your personal assistant",
4 | "version": "1.6.1",
5 | "manifest_version": 3,
6 | "icons": {
7 | "16": "assets/logo.png",
8 | "32": "assets/logo.png",
9 | "48": "assets/logo.png",
10 | "128": "assets/logo.png"
11 | },
12 | "commands": {
13 | "stop-playback": {
14 | "suggested_key": {
15 | "windows": "Ctrl+B",
16 | "mac": "Command+B",
17 | "chromeos": "Ctrl+B",
18 | "linux": "Ctrl+B"
19 | },
20 | "description": "Stop assistant playback"
21 | },
22 | "start-listening": {
23 | "suggested_key": {
24 | "windows": "Ctrl+Shift+E",
25 | "mac": "Command+Shift+E",
26 | "chromeos": "Ctrl+Shift+E",
27 | "linux": "Ctrl+Shift+E"
28 | },
29 | "description": "Push to toggle listening to voice commands"
30 | }
31 | },
32 | "host_permissions": ["https://chat.openai.com/*"],
33 | "background": {
34 | "service_worker": "background/index.js"
35 | },
36 | "action": {
37 | "default_popup": "popup/index.html",
38 | "default_icon": {
39 | "16": "assets/logo.png",
40 | "32": "assets/logo.png",
41 | "48": "assets/logo.png",
42 | "128": "assets/logo.png"
43 | },
44 | "default_title": "ChassistantGPT"
45 | },
46 | "options_ui": {
47 | "open_in_tab": true,
48 | "page": "content/index.html"
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/components/VoiceDropdown.jsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { setVoice, testVoice } from "../content/audio.mjs";
3 |
4 | const VoiceDropdown = () => {
5 | const [voices, setVoices] = useState([]);
6 | const [selectedVoice, setSelectedVoice] = useState('Google US English');
7 |
8 | useEffect(() => {
9 | const voices = new Promise(function (resolve) {
10 | let voices = window.speechSynthesis.getVoices()
11 | if (voices.length !== 0) {
12 | resolve(voices)
13 | } else {
14 | window.speechSynthesis.addEventListener('voiceschanged', function () {
15 | voices = window.speechSynthesis.getVoices()
16 | resolve(voices)
17 | })
18 | }
19 | })
20 |
21 | voices.then((voices) => {
22 | setVoices(voices);
23 | })
24 | }, []);
25 |
26 | // send voice with target name to background script
27 | const handleChange = (event) => {
28 | setSelectedVoice(event.target.value);
29 | setVoice(voices.find(voice => voice.name === event.target.value));
30 | };
31 |
32 | return (
33 |
34 |
35 | Voice
36 |
37 |
38 | {voices.map((voice) => (
39 |
40 | {voice.name} ({voice.lang})
41 |
42 | ))}
43 |
44 | 🔊 Test Voice
45 |
46 | );
47 | };
48 |
49 | export default VoiceDropdown ;
--------------------------------------------------------------------------------
/src/components/Popup.jsx:
--------------------------------------------------------------------------------
1 | import React, { useEffect, useRef, useState } from "react";
2 | import {
3 | MessageList,
4 | Message,
5 | MessageSeparator
6 | } from "@chatscope/chat-ui-kit-react";
7 | import styles from "@chatscope/chat-ui-kit-styles/dist/default/styles.min.css";
8 |
9 | export default function Popup() {
10 | const msgListRef = useRef();
11 | const [history, setHistory] = useState([])
12 |
13 | function handleHistory(response) {
14 | if (!response?.length) {
15 | return;
16 | }
17 |
18 | if (response.length > history?.length) {
19 | msgListRef.current.scrollToBottom("smooth");
20 | }
21 |
22 | setHistory(response);
23 | }
24 |
25 | useEffect(() => {
26 | chrome.runtime.sendMessage({ type: 'getHistory' }, function (response) {
27 | handleHistory(response.history);
28 | })
29 | window.setInterval(() => {
30 | chrome.runtime.sendMessage({ type: 'getHistory' }, function (response) {
31 | handleHistory(response.history);
32 | })
33 | }, 1000)
34 | }, [])
35 |
36 | return
37 |
38 |
43 | { history?.length ? : null }
44 | { history.map(message => )
49 | }
50 |
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/src/popup/index.html:
--------------------------------------------------------------------------------
1 |
2 |
60 |
61 |
62 |
63 | ChassistantGPT
64 |
65 |
66 |
70 |
71 |
72 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ChassistantGPT
2 |
3 | A **Chrome** browser extension that embeds ChatGPT as a hands-free voice assistant in the background. Any ChatGPT prompt is a quick "Hey girl" away!
4 |
5 | ChassistantGPT **supports over 60 languages and dialects**. Pick your native language, and a **custom trigger phrase** (configurable in the tab). For example: who needs "Hey girl" when you can rock "Coucou Skynet" and converse with ChatGPT in French?
6 |
7 | Instead of the voice trigger, you can also press Ctrl/Cmd + Shift + E from anywhere in the browser. Pressing it while ChassitantGPT is already listening for an instruction will stop it.
8 |
9 | https://user-images.githubusercontent.com/18148989/206920857-a726a8f5-7330-44c1-b97f-9af67f4b67f6.mp4
10 |
11 | Please read the contents of the tab that will open when you install the extension. It contains important information about how to use the extension.
12 | To keep the extension free to use, the speech capabilities are only supported in Chrome (excluding other Chromium-based browsers). However, the code is functional on all modern browsers.
13 |
14 |
15 |
16 | ## Installation
17 |
18 | #### Install from Chrome Web Store (recommended)
19 |
20 | [Download from the Chrome Web Store](https://chrome.google.com/webstore/detail/chassistantgpt/kepjfakbnekbifkjnfhmoilbbnncnfjc)
21 |
22 | #### Local Install
23 |
24 | 1. Download `chrome.zip` from [Releases](https://github.com/idosal/assistant-chat-gpt/releases).
25 | 2. Unzip the file.
26 | 3. In Chrome, go to the extensions page (`chrome://extensions`).
27 | 4. Enable Developer Mode.
28 | 5. Drag the unzipped folder anywhere on the page to import it (do not delete the folder afterwards).
29 |
30 | ## Build from source
31 |
32 | 1. Clone the repo
33 | 2. Install dependencies with `npm install`
34 | 3. Run `npm run build`
35 | 4. Follow the steps in the "Local Install" section above (with the resulting `/build/chrome.zip`).
36 |
37 | ## Roadmap
38 | - [X] Turn popup into a chat with the session history.
39 | - [X] Support more languages.
40 | - [X] Customize trigger phrase.
41 | - [X] Add push-to-talk.
42 | - [ ] Improve code playbacks.
43 | - [ ] Beautify tab UI.
44 |
45 | ## Contribution
46 | Pull requests and suggestions are welcome.
47 |
48 | Many thanks to the brilliant @talkor for improving the UI!
49 |
50 | This project's template is based on the very cool [wong2/chat-gpt-google-extension](https://github.com/wong2/chat-gpt-google-extensione)
51 |
52 |
--------------------------------------------------------------------------------
/src/content/index.html:
--------------------------------------------------------------------------------
1 |
2 |
140 |
141 |
142 |
143 | ChassistantGPT
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
--------------------------------------------------------------------------------
/src/components/Info.jsx:
--------------------------------------------------------------------------------
1 | import React, { useEffect, useState } from 'react'
2 | import Callout from './Callout'
3 | import VoiceDropdown from "./VoiceDropdown";
4 | import TriggerInput from "./TriggerInput";
5 | import Settings from "./Settings";
6 |
7 | export default function Info() {
8 | const [isMicrophoneEnabled, setIsMicrophoneEnabled] = useState(false)
9 | const isChrome = checkIsChrome()
10 |
11 | useEffect(async () => {
12 | const permissions = navigator.mediaDevices.getUserMedia({
13 | audio: true,
14 | video: false,
15 | })
16 | permissions.then(() => {
17 | setIsMicrophoneEnabled(true)
18 | })
19 | }, [])
20 |
21 | return (
22 |
23 | Welcome to ChassistantGPT!
24 | {!isChrome &&
25 | Uh oh, it looks like you're not using Chrome. Unfortunately, ChassistantGPT is not supported by other browsers
26 | at this time. If you are using Chrome, please carry on.
27 | }
28 |
29 | If you wish to use ChassistantGPT in this browsing session,{' '}
30 | please keep this tab open . Otherwise, ChassistantGPT won't be able to hear you.
31 |
32 | Status
33 | {isMicrophoneEnabled ? (
34 |
35 |
36 | ChassistantGPT is ready for your
37 | voice commands
38 |
39 |
40 | ) : (
41 |
42 | ChassistantGPT is requesting access to your{' '}
43 | microphone so it may hear your voice commands
44 |
45 | )}
46 | Settings
47 |
48 |
49 |
50 |
51 |
52 | What is ChassistantGPT?
53 |
54 | ChassistantGPT is a ChatGPT voice assistant.
55 |
56 | How To Use
57 |
58 |
59 | Say "Hey girl" followed by your prompt. For
60 | example: "Hey girl, what is love?" or{' '}
61 | "Hey girl... Tell me a joke" .
62 |
63 |
64 | ChassistantGPT supports 60+ languages and dialects . You can change the voice (and language) in the dropdown above. You can also change the trigger phrase to any string in any language using the input field.
65 | When defined, use your custom trigger phrase instead of "Hey girl", and converse with the assistant in your chosen language.
66 |
67 |
68 | If you prefer not to use the voice trigger phrase , you can press Ctrl/Cmd + Shift + E to activate ChassistantGPT. For example: *Ctrl+Shift+E* "What is love?" .
69 |
70 |
71 | If a prompt follows as part of the same sentence, ChassistantGPT will
72 | forward the prompt directly to ChatGPT. If not, a "beep" sound will
73 | follow (accompanied by switching of the popup icon to red), signifying
74 | ChassistantGPT is waiting for input.
75 |
76 |
77 | Before sending to ChatGPT, ChassistantGPT will say "OK, coming up".
78 | While waiting for a response from ChatGPT, the popup icon will turn
79 | green.
80 |
81 |
82 | In addition to the voice response from ChatGPT, you can view the full conversation
83 | at any time by clicking on ChassistantGPT's popup.
84 |
85 |
86 | You may stop ChassistantGPT's voice playback at any time by pressing{' '}
87 | Cmd/Ctrl + B.
88 |
89 |
90 |
91 | Privacy
92 |
93 | ChassistantGPT relies on your existing session with ChatGPT . If you
94 | aren't logged in, please do so at{' '}
95 |
96 | https://chat.openai.com/chat
97 |
98 | .
99 |
100 |
101 | The extension does not store any data.{' '}
102 | It does not transmit data from your device, except for the sentence that directly follows the trigger phrase, which is sent straight to ChatGPT.
103 |
104 |
105 | )
106 | }
107 |
108 | function checkIsChrome() {
109 | const isChromium = !!window.chrome
110 | const brands = window.navigator?.userAgentData?.brands
111 | return isChromium && brands?.length === 3
112 | }
113 |
114 |
--------------------------------------------------------------------------------
/src/content/audio.mjs:
--------------------------------------------------------------------------------
1 | import { v4 as uuidv4 } from 'uuid'
2 | import { fetchSSE } from './fetch-sse.mjs'
3 |
4 | navigator.webkitGetUserMedia(
5 | { audio: true },
6 | () => {},
7 | () => {},
8 | )
9 |
10 | let conversationId = ''
11 | let parentMessageId = ''
12 | let lastMessage = ''
13 | let lastPartIndex = 1
14 | let lastPart = ''
15 | let shouldStop = false
16 | let lastInstruction = ''
17 | let voice
18 | let triggerPhrase = 'hey girl'
19 | let pauseHandler
20 | let longPauseHandler
21 | let isFillerEnabled = true;
22 | const history = []
23 | const voiceTestText = 'The quick brown fox jumps over the lazy dog.';
24 |
25 | const recognition = new webkitSpeechRecognition()
26 | recognition.lang = 'en-US'
27 | recognition.continuous = true
28 |
29 | export function setFillerEnabled(enabled) {
30 | isFillerEnabled = enabled;
31 | }
32 |
33 | const allVoicesObtained = new Promise(function (resolve) {
34 | let voices = window.speechSynthesis.getVoices()
35 | if (voices.length !== 0) {
36 | resolve(voices)
37 | } else {
38 | window.speechSynthesis.addEventListener('voiceschanged', function () {
39 | voices = window.speechSynthesis.getVoices()
40 | resolve(voices)
41 | })
42 | }
43 | })
44 |
45 | allVoicesObtained.then((voices) => {
46 | const usVoice = voices.find((voice) => voice.name === 'Google US English')
47 | if (usVoice) {
48 | setVoice(usVoice)
49 | }
50 | })
51 |
52 | export function getVoice() {
53 | console.log('get voice', voice)
54 | return voice
55 | }
56 |
57 | export function setVoice(v) {
58 | console.log('setVoice', v)
59 | if (recognition.lang !== v.lang) {
60 | recognition.stop();
61 | }
62 |
63 | voice = v;
64 | }
65 |
66 | export function setTriggerPhrase(t) {
67 | console.log('setPhrase', t)
68 |
69 | triggerPhrase = t ? t.toLowerCase() : "hey girl"
70 | }
71 |
72 | function stopAnswer() {
73 | shouldStop = true
74 | }
75 |
76 | function setIcon(url) {
77 | chrome.action.setIcon({
78 | path: chrome.runtime.getURL(url),
79 | })
80 | }
81 |
82 | function updateHistory(message) {
83 | if (!history?.length) {
84 | return;
85 | }
86 |
87 | history[history.length - 1].text = message;
88 | }
89 |
90 | function addToHistory(message, isChatGPT) {
91 | let direction = 'outgoing'
92 | if (isChatGPT) {
93 | lastMessage = message
94 | direction = 'incoming'
95 | } else {
96 | lastInstruction = message
97 | }
98 |
99 | history.push({ text: message, time: new Date(), direction })
100 | }
101 |
102 | async function getAnswerFromChatGPT(question, callback) {
103 | try {
104 | addToHistory(question, false)
105 | lastMessage = ''
106 | const accessToken = await getAccessToken()
107 | const body = {
108 | action: 'next',
109 | messages: [
110 | {
111 | id: uuidv4(),
112 | role: 'user',
113 | content: {
114 | content_type: 'text',
115 | parts: [question],
116 | },
117 | },
118 | ],
119 | model: 'text-davinci-002-render',
120 | parent_message_id: uuidv4(),
121 | }
122 | if (conversationId) {
123 | body.conversation_id = conversationId
124 | }
125 | if (parentMessageId) {
126 | body.parent_message_id = parentMessageId
127 | }
128 |
129 | await fetchSSE('https://chat.openai.com/backend-api/conversation', {
130 | method: 'POST',
131 | headers: {
132 | 'Content-Type': 'application/json',
133 | Authorization: `Bearer ${accessToken}`,
134 | },
135 | body: JSON.stringify(body),
136 | onMessage(message) {
137 | if (message === '[DONE]') {
138 | callback(lastPart)
139 | callback(message)
140 | lastPartIndex = 1
141 | return
142 | }
143 |
144 | const data = JSON.parse(message)
145 | conversationId = data.conversation_id
146 | parentMessageId = data.message.id
147 | const text = data.message?.content?.parts?.[0]
148 | if (text) {
149 | if (lastMessage) {
150 | updateHistory(text)
151 | } else {
152 | addToHistory(lastMessage, true)
153 | }
154 |
155 | lastMessage = text
156 |
157 | const split = data.message?.content?.parts?.[0].split('.')
158 | if (split?.length > 1 && split?.length > lastPartIndex) {
159 | callback(lastPart)
160 | lastPartIndex++
161 | }
162 |
163 | lastPart = split[split.length - 1]
164 | }
165 | },
166 | })
167 | } catch (e) {
168 | clearPauseFillers();
169 | const historySuffix = e.message === 'UNAUTHORIZED' ? '. Please authenticate at https://chat.openai.com/chat' : ''
170 | addToHistory("Error from ChatGPT: " + e.message + historySuffix, true);
171 | const voiceSuffix = e.message === 'UNAUTHORIZED' ? ' Please authenticate at chat.openai.com' : ''
172 | const utterance = new SpeechSynthesisUtterance('I\'m sorry. Chat G P T returned an error.' + voiceSuffix)
173 | utterance.volume = 0.5
174 | if (getVoice()) {
175 | utterance.voice = getVoice()
176 | }
177 | speechSynthesis.speak(utterance)
178 | setIcon('assets/logo.png')
179 | console.error(e)
180 | }
181 | }
182 |
183 | async function getAccessToken() {
184 | const resp = await fetch('https://chat.openai.com/api/auth/session', {})
185 | .then((r) => r.json())
186 | .catch(() => ({}))
187 | if (!resp.accessToken) {
188 | throw new Error('UNAUTHORIZED')
189 | }
190 | return resp.accessToken
191 | }
192 |
193 | function addPauseFillers() {
194 | pauseHandler = window.setTimeout(() => {
195 | const utterance = new SpeechSynthesisUtterance("ummm, lets see");
196 | utterance.volume = 0.5;
197 | utterance.rate = 0.8;
198 | if (getVoice()) {
199 | utterance.voice = getVoice();
200 | }
201 | speechSynthesis.speak(utterance);
202 | }, 5000);
203 |
204 | longPauseHandler = window.setTimeout(() => {
205 | const utterance = new SpeechSynthesisUtterance("uhmmm...");
206 | utterance.volume = 0.5;
207 | utterance.rate = 0.6;
208 | if (getVoice()) {
209 | utterance.voice = getVoice();
210 | }
211 | speechSynthesis.speak(utterance);
212 | }, 12000);
213 | }
214 |
215 | function addAckFiller() {
216 | const utterance = new SpeechSynthesisUtterance("okay. coming up");
217 | utterance.volume = 0.5;
218 | utterance.rate = 0.9;
219 | if (getVoice()) {
220 | utterance.voice = getVoice();
221 | }
222 | speechSynthesis.speak(utterance);
223 | }
224 |
225 | async function getAnswer(question) {
226 | getAnswerFromChatGPT(question, (answer) => {
227 | if (answer === '[DONE]') {
228 | shouldStop = false
229 | return
230 | }
231 |
232 | if (shouldStop) {
233 | return
234 | }
235 |
236 | if (answer) {
237 | processAnswer(answer)
238 | }
239 | })
240 | setIcon('assets/logo_handling.png')
241 |
242 | if (getVoice().lang === 'en-US' && isFillerEnabled) {
243 | addAckFiller();
244 | addPauseFillers();
245 | }
246 | }
247 |
248 | function clearPauseFillers() {
249 | window.clearTimeout(pauseHandler);
250 | pauseHandler = undefined;
251 | window.clearTimeout(longPauseHandler);
252 | longPauseHandler = undefined;
253 | }
254 |
255 | // use the constant voiceTestText to speak with the current voice
256 | export function testVoice() {
257 | const currentUtterance = new SpeechSynthesisUtterance(voiceTestText)
258 | currentUtterance.rate = 0.9
259 | if (getVoice()) {
260 | currentUtterance.voice = getVoice()
261 | }
262 | speechSynthesis.speak(currentUtterance)
263 | }
264 |
265 | function processAnswer(answer) {
266 | clearPauseFillers();
267 | setIcon('assets/logo.png')
268 | const currentUtterance = new SpeechSynthesisUtterance(answer.trimStart())
269 | currentUtterance.rate = 0.9
270 | if (getVoice()) {
271 | currentUtterance.voice = getVoice()
272 | }
273 | speechSynthesis.speak(currentUtterance)
274 | }
275 |
276 | function sessionKeepAlive() {
277 | window.setInterval(() => getAccessToken(), 4 * 60 * 1000)
278 | }
279 |
280 | async function notifyStartListening() {
281 | setIcon('assets/logo_recording.png')
282 | const utterance = new SpeechSynthesisUtterance("beep!");
283 | utterance.rate = 2;
284 | utterance.pitch = 1.5;
285 | if (getVoice()) {
286 | utterance.voice = getVoice();
287 | }
288 | speechSynthesis.speak(utterance);
289 | }
290 |
291 | try {
292 | let isActive = false
293 |
294 | function startListening() {
295 | notifyStartListening();
296 | isActive = true
297 | }
298 |
299 | recognition.addEventListener('result', async () => {
300 | const transcript = event.results[event.results?.length - 1][0].transcript
301 | console.log(transcript)
302 | if (isActive) {
303 | let instruction = transcript
304 | if (transcript.trimStart().startsWith(triggerPhrase)) {
305 | instruction = transcript.trimStart().substring(triggerPhrase.length)
306 | }
307 | isActive = false
308 | getAnswer(instruction)
309 | return
310 | }
311 |
312 | const trimmed = transcript.trimStart().trimEnd().toLowerCase()
313 | if (trimmed.startsWith(triggerPhrase)) {
314 | const instruction = trimmed.substring(triggerPhrase.length)
315 | if (instruction && instruction?.length > 2) {
316 | getAnswer(instruction)
317 | } else {
318 | startListening()
319 | }
320 | }
321 | })
322 |
323 | recognition.addEventListener('error', (event) => {
324 | console.log(event)
325 | setIcon('assets/logo.png')
326 | })
327 |
328 | recognition.onend = function () {
329 | setIcon('assets/logo.png')
330 | recognition.lang = getVoice()?.lang || 'en-US'
331 | recognition.start()
332 | }
333 |
334 | recognition.start()
335 |
336 | chrome.commands.onCommand.addListener(function (command) {
337 | if (command === 'stop-playback') {
338 | speechSynthesis.cancel()
339 | stopAnswer()
340 | }
341 |
342 | if (command === 'start-listening') {
343 | if (isActive) {
344 | isActive = false
345 | setIcon('assets/logo.png')
346 | } else {
347 | startListening()
348 | }
349 | }
350 | })
351 |
352 | // Listen for history query from the popup
353 | chrome.runtime.onMessage.addListener(function (message, sender, sendResponse) {
354 | if (message.type === 'getHistory') {
355 | sendResponse({ history })
356 | }
357 |
358 | return false
359 | })
360 |
361 | tryToPreventClose()
362 | sessionKeepAlive()
363 | } catch (e) {
364 | console.error(e)
365 | }
366 |
367 | function tryToPreventClose() {
368 | window.addEventListener('mousedown', () => {
369 | window.addEventListener('beforeunload', function (e) {
370 | // Show a confirmation message
371 | const confirmationMessage =
372 | "If you leave this page, AssistantGPT won't work until it is restarted. Are you sure you want to leave?"
373 | e.returnValue = confirmationMessage // Gecko, Trident, Chrome 34+
374 | return confirmationMessage // Gecko, WebKit, Chrome <34
375 | })
376 | })
377 | }
378 |
--------------------------------------------------------------------------------