├── .gitignore
├── LICENSE
├── README.en.md
├── README.md
├── app.js
├── icon-192x192.png
├── index.html
├── main.css
├── manifest.json
├── prompts.json
└── sw.js
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Qiang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.en.md:
--------------------------------------------------------------------------------
1 | [中文](README.md) | [English](README.en.md)
2 |
3 | # chatgpt
4 |
5 | Build your OpenAI ChatGPT web site.
6 |
7 | The voice recognition function defaults to using local voice recognition mode. When local voice recognition fails, it will automatically switch to using `OpenAI Whisper` for recognition. It can also be set to use `Only use Whisper` for recognition.
8 |
9 | Note: Due to the Android system's characteristics, Android phone users cannot perform local voice recognition and recording services at the same time. Therefore, mobile phone users should enable `Only use Whisper` mode.
10 |
11 | - Custom OpenAI domain name, direct connection, no need to worry about key leakage;
12 | - Your own API Key;
13 | - All data is stored locally;
14 | - Model selection;
15 | - Assistant prompt can be set;
16 | - Multiple prompts are pre-installed;
17 | - Session history record (local storage);
18 | - Support for setting the `temperature` parameter;
19 | - Support for "sse", which is the `stream` in the OpenAI API;
20 | - Support for automatic text playback (TTS);
21 | - Support for voice input (ASR).
22 | - Support for `OpenAI Whisper` recognition (default using local voice recognition)
23 |
24 | More people are welcome to improve this [prompt list](https://github.com/excing/chatgpt/blob/main/prompts.json).
25 |
26 | ## Deployment
27 |
28 | Fork this project and then enable your GitHub Pages.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [中文](README.md) | [English](README.en.md)
2 |
3 | # chatgpt
4 | 构建你的 OpenAI ChatGPT Web 站点
5 |
6 | 语音识别功能,默认使用本地语音识别模式,当本地语音识别失败,会自动使用 `OpenAI Whisper` 开始识别,也可以设置为 `仅使用 Wishper` 进行识别。
7 |
8 | > 注:由于 Android 手机用户无法同时进行本地语音识别和录音服务(和 Android 系统特性有关),所以手机用户请开启 `仅使用 Wishper` 模式。
9 |
10 | - 自定义 OpenAI 域名,直连,不经过他人服务器,无需担心 Key 泄露;
11 | - 自己的 API Key;
12 | - 所有数据都在本地存储;
13 | - 模型选择;
14 | - 可设置助手 `prompt`;
15 | - 预置多个 `prompt`;
16 | - 会话历史记录(本地存储);
17 | - 支持设置 `temperature` 参数;
18 | - 支持 `sse`,即 openai api 中的 `stream`;
19 | - 支持自动播放文本(TTS);
20 | - 支持语音录入(ASR)。
21 | - 支持 `OpenAI Whisper` 识别(默认使用本地语音识别)
22 |
23 | 欢迎更多人来完善这个 [prompt list](https://github.com/excing/chatgpt/blob/main/prompts.json)。
24 |
25 | ## 部署
26 |
27 | Fork 此项目,然后开启你的 GitHub Pages 即可。
28 |
29 | 如果你的 OpenAI 不可访问,可以尝试使用这个方案:[使用 Cloudflare Workers 让 OpenAI API 绕过 GFW 且避免被封禁](https://github.com/noobnooc/noobnooc/discussions/9)
30 |
31 | 省流版:创建一个 Cloudflare Workers,编写内容并发布:
32 |
33 | Worker JS
34 |
35 | 其中 `` 填写你的 OpenAI api key 即可实现客户端无 key 使用。
36 |
37 | ```js
38 | addEventListener('fetch', event => {
39 | event.respondWith(fetchAndApply(event.request));
40 | })
41 |
42 | async function fetchAndApply(request) {
43 |
44 | let response = null;
45 | let method = request.method;
46 |
47 | let url = new URL(request.url);
48 | let url_hostname = url.hostname;
49 | url.protocol = 'https:';
50 | url.host = 'api.openai.com';
51 |
52 | let request_headers = request.headers;
53 | let new_request_headers = new Headers(request_headers);
54 | new_request_headers.set('Host', url.host);
55 | new_request_headers.set('Referer', url.protocol + '//' + url_hostname);
56 | new_request_headers.set('Authorization', 'Bearer ');
57 |
58 | let original_response = await fetch(url.href, {
59 | method: method,
60 | headers: new_request_headers,
61 | body: request.body
62 | })
63 |
64 | // let original_response_clone = original_response.clone();
65 | let original_text = null;
66 | let response_headers = original_response.headers;
67 | let new_response_headers = new Headers(response_headers);
68 | let status = original_response.status;
69 |
70 | new_response_headers.set('Cache-Control', 'no-store');
71 | new_response_headers.set('access-control-allow-origin', '*');
72 | new_response_headers.set('access-control-allow-credentials', true);
73 | new_response_headers.delete('content-security-policy');
74 | new_response_headers.delete('content-security-policy-report-only');
75 | new_response_headers.delete('clear-site-data');
76 |
77 | original_text = original_response.body
78 | response = new Response(original_text, {
79 | status,
80 | headers: new_response_headers
81 | })
82 |
83 | return response
84 | }
85 |
86 | async function replace_response_text(response, upstream_domain, host_name) {
87 | let text = await response.text()
88 |
89 | var i, j;
90 | for (i in replace_dict) {
91 | j = replace_dict[i]
92 | if (i == '$upstream') {
93 | i = upstream_domain
94 | } else if (i == '$custom_domain') {
95 | i = host_name
96 | }
97 |
98 | if (j == '$upstream') {
99 | j = upstream_domain
100 | } else if (j == '$custom_domain') {
101 | j = host_name
102 | }
103 |
104 | let re = new RegExp(i, 'g')
105 | text = text.replace(re, j);
106 | }
107 | return text;
108 | }
109 | ```
110 |
111 |
--------------------------------------------------------------------------------
/app.js:
--------------------------------------------------------------------------------
1 | window.addEventListener("keydown", (e) => {
2 | if (e.key === "Escape") {
3 | showSettings(false)
4 | showHistory(false)
5 | }
6 | if ((e.ctrlKey || e.altKey)) {
7 | // console.log(e.key);
8 | switch (e.key) {
9 | case "i":
10 | e.preventDefault()
11 | reset()
12 | break;
13 | case ",":
14 | e.preventDefault()
15 | showSettings(true)
16 | break;
17 | case "h":
18 | e.preventDefault()
19 | showHistory(true)
20 | break;
21 | case ";":
22 | e.preventDefault()
23 | config.multi = !config.multi
24 | addItem("system", "Long conversation checked: " + config.multi)
25 | break;
26 | case "b":
27 | e.preventDefault()
28 | speechToText()
29 | break;
30 |
31 | default:
32 | break;
33 | }
34 | }
35 | }, { passive: false })
36 |
37 | line.addEventListener("keydown", (e) => {
38 | if (e.key == "Enter" && (e.ctrlKey || e.altKey)) {
39 | e.preventDefault()
40 | onSend()
41 | }
42 | })
43 |
44 | line.addEventListener("paste", (e) => {
45 | e.preventDefault()
46 |
47 | let clipboardData = (e.clipboardData || window.clipboardData)
48 | let paste = clipboardData.getData("text/plain")
49 | .toString()
50 | .replaceAll("\r\n", "\n")
51 | line.focus()
52 | document.execCommand("insertText", false, paste)
53 | }, { passive: false })
54 |
55 | function onSend() {
56 | var value = (line.value || line.innerText).trim()
57 |
58 | if (!value) return
59 |
60 | addItem("user", value)
61 | postLine(value)
62 |
63 | line.value = ""
64 | line.innerText = ""
65 | }
66 |
67 | function addItem(type, content) {
68 | let request = document.createElement("div")
69 | request.className = type
70 | request.innerText = content
71 | box.appendChild(request)
72 |
73 | window.scrollTo({
74 | top: document.body.scrollHeight, behavior: "auto",
75 | })
76 | line.focus()
77 |
78 | return request
79 | }
80 |
81 | function postLine(line) {
82 | saveConv({ role: "user", content: line })
83 | let reqMsgs = []
84 | if (messages.length < 10) {
85 | reqMsgs.push(...messages)
86 | } else {
87 | reqMsgs.push(messages[0])
88 | reqMsgs.push(...messages.slice(messages.length - 7, messages.length))
89 | }
90 | if (config.model === "gpt-3.5-turbo") {
91 | chat(reqMsgs)
92 | } else {
93 | completions(reqMsgs)
94 | }
95 | }
96 |
97 | var convId;
98 | var messages = [];
99 | function chat(reqMsgs) {
100 | let assistantElem = addItem('', '')
101 | let _message = reqMsgs
102 | if (!config.multi) {
103 | _message = [reqMsgs[0], reqMsgs[reqMsgs.length - 1]]
104 | }
105 | send(`${config.domain}/v1/chat/completions`, {
106 | "model": "gpt-3.5-turbo",
107 | "messages": _message,
108 | "max_tokens": config.maxTokens,
109 | "stream": config.stream,
110 | "temperature": config.temperature,
111 | }, (data) => {
112 | let msg = data.choices[0].delta || data.choices[0].message || {}
113 | assistantElem.className = 'assistant'
114 | assistantElem.innerText += msg.content || ""
115 | }, () => onSuccessed(assistantElem),)
116 | }
117 | function completions(reqMsgs) {
118 | let assistantElem = addItem('', '')
119 | let _prompt = ""
120 | if (config.multi) {
121 | reqMsgs.forEach(msg => {
122 | _prompt += `${msg.role}: ${msg.content}\n`
123 | });
124 | } else {
125 | _prompt += `${reqMsgs[0].role}: ${reqMsgs[0].content}\n`
126 | let lastMessage = reqMsgs[reqMsgs.length - 1]
127 | _prompt += `${lastMessage.role}: ${lastMessage.content}\n`
128 | }
129 | _prompt += "assistant: "
130 | send(`${config.domain}/v1/completions`, {
131 | "model": config.model,
132 | "prompt": _prompt,
133 | "max_tokens": config.maxTokens,
134 | "temperature": 0,
135 | "stop": ["\nuser: ", "\nassistant: "],
136 | "stream": config.stream,
137 | "temperature": config.temperature,
138 | }, (data) => {
139 | assistantElem.className = 'assistant'
140 | assistantElem.innerText += data.choices[0].text
141 | }, () => onSuccessed(assistantElem),)
142 | }
143 | function onSuccessed(assistantElem) {
144 | let msg = assistantElem.innerText
145 | saveConv({ role: "assistant", content: msg })
146 | if (config.tts) {
147 | textToSpeech(msg)
148 | }
149 | }
150 | function send(reqUrl, body, onMessage, scussionCall) {
151 | loader.hidden = false
152 | let onError = (data) => {
153 | console.error(data);
154 | loader.hidden = true
155 | if (!data) {
156 | addItem("system", `Unable to access OpenAI, please check your network.`)
157 | } else {
158 | try {
159 | let openai = JSON.parse(data)
160 | addItem("system", `${openai.error.message}`)
161 | } catch (error) {
162 | addItem("system", `${data}`)
163 | }
164 | }
165 | }
166 | if (!config.tts) {
167 | body.stream = true
168 | var source = new SSE(
169 | reqUrl, {
170 | headers: {
171 | "Authorization": "Bearer " + config.apiKey,
172 | "Content-Type": "application/json",
173 | },
174 | method: "POST",
175 | payload: JSON.stringify(body),
176 | });
177 |
178 | source.addEventListener("message", function (e) {
179 | if (e.data == "[DONE]") {
180 | loader.hidden = true
181 | scussionCall()
182 | } else {
183 | try {
184 | onMessage(JSON.parse(e.data))
185 | } catch (error) {
186 | onError(error)
187 | }
188 | }
189 | });
190 |
191 | source.addEventListener("error", function (e) {
192 | onError(e.data)
193 | });
194 |
195 | source.stream();
196 | } else {
197 | body.stream = false
198 | fetch(reqUrl, {
199 | method: "POST",
200 | headers: {
201 | "Authorization": "Bearer " + config.apiKey,
202 | "Content-Type": "application/json",
203 | },
204 | body: JSON.stringify(body),
205 | }).then((resp) => {
206 | return resp.json()
207 | }).then((data) => {
208 | loader.hidden = true
209 | if (data.error) {
210 | throw new Error(`${data.error.code}: ${data.error.message}`)
211 | }
212 | onMessage(data)
213 | scussionCall()
214 | }).catch(onError)
215 | }
216 | }
217 |
218 | function reset() {
219 | box.innerHTML = ''
220 | convId = uuidv4();
221 | messages = [config.firstPrompt]
222 | addItem(config.firstPrompt.role, config.firstPrompt.content)
223 | }
224 |
225 | const convKey = "conversations_"
226 | const convNameKey = "conversationName_"
227 | function saveConv(message) {
228 | messages.push(message)
229 | localStorage.setItem(`${convKey}${convId}`, JSON.stringify(messages))
230 | }
231 |
232 | function switchConv(key) {
233 | if (key == null) {
234 | addItem("system", "No conversations")
235 | return
236 | }
237 | box.innerHTML = ''
238 | messages = JSON.parse(localStorage.getItem(key))
239 | messages.forEach(msg => {
240 | addItem(msg.role, msg.content)
241 | });
242 | convId = key.substring(convKey.length);
243 | systemPromptInput.value = messages[0].content;
244 | saveSettings();
245 | }
246 |
247 | function deleteConv(key) {
248 | localStorage.removeItem(key)
249 | }
250 |
251 | function deleteAllHistory() {
252 | for (let index = 0; index < localStorage.length; index++) {
253 | let key = localStorage.key(index);
254 | if (key.substring(0, convKey.length) != convKey) { continue }
255 | deleteConv(key)
256 | showHistory(true)
257 | }
258 | }
259 |
260 | function saveConvName(key) {
261 | let input = document.getElementById(`input_${key}`)
262 | localStorage.setItem(`${convNameKey}${key}`, input.value)
263 | showHistory(true)
264 | }
265 |
266 | function updateConvName(key) {
267 | let name = document.getElementById(`name_${key}`)
268 | let input = document.getElementById(`input_${key}`)
269 | let update = document.getElementById(`update_${key}`)
270 | let del = document.getElementById(`delete_${key}`)
271 | input.hidden = false
272 | name.hidden = true
273 | del.hidden = true
274 | update.innerHTML = "📝"
275 | update.onclick = () => {
276 | saveConvName(key)
277 | }
278 | }
279 |
280 | function showHistory(ok = true) {
281 | if (ok) {
282 | historyModal.style.display = ''
283 | historyList.innerHTML = ''
284 | for (let index = 0; index < localStorage.length; index++) {
285 | let key = localStorage.key(index);
286 | if (key.substring(0, convKey.length) != convKey) { continue }
287 | let itemJson = localStorage.getItem(key)
288 | let itemData;
289 | try {
290 | itemData = JSON.parse(itemJson)
291 | } catch (error) {
292 | continue
293 | }
294 | let itemName = localStorage.getItem(`${convNameKey}${key}`)
295 | if (itemName) {
296 | historyList.innerHTML += `
297 |
298 |
${itemName} (${itemData.length}+)
299 |
300 |
301 |
302 |
`
303 | } else {
304 | historyList.innerHTML += `
305 |
306 |
307 |
308 |
309 |
310 |
311 |
SYST: ${itemData[0].content.replace(/<[^>]+>/g, '')}
312 |
USER: ${itemData[1].content.replace(/<[^>]+>/g, '')} (${itemData.length}+)
313 |
314 |
315 |
`
316 | }
317 | }
318 | if (0 == localStorage.length) {
319 | historyList.innerHTML = `There are no past conversations yet.
`
320 | } else {
321 | }
322 | } else {
323 | historyModal.style.display = 'none'
324 | }
325 | }
326 |
327 | function showSettings(ok = true) {
328 | if (ok) {
329 | settingsModal.style.display = ''
330 | setSettingInput(config)
331 | } else {
332 | settingsModal.style.display = 'none'
333 | }
334 | }
335 |
336 | function setSettingInput(config) {
337 | domainInput.placeholder = "https://api.openai.com"
338 | maxTokensInput.placeholder = config.maxTokens
339 | systemPromptInput.placeholder = "You are a helpful assistant."
340 | temperatureInput.placeholder = config.temperature
341 |
342 | apiKeyInput.value = config.apiKey
343 |
344 | if (!config.domain) {
345 | config.domain = domainInput.placeholder
346 | } else {
347 | domainInput.value = config.domain
348 | }
349 | if (!config.maxTokens) {
350 | config.maxTokens = parseInt(maxTokensInput.placeholder)
351 | } else {
352 | maxTokensInput.value = config.maxTokens
353 | }
354 | if (!config.temperature) {
355 | config.temperature = parseInt(temperatureInput.placeholder)
356 | } else {
357 | temperatureInput.value = config.temperature
358 | }
359 | if (!config.model) {
360 | config.model = "gpt-3.5-turbo"
361 | }
362 | modelInput.value = config.model
363 | if (!config.firstPrompt) {
364 | config.firstPrompt = { role: "system", content: systemPromptInput.placeholder }
365 | } else {
366 | systemPromptInput.value = config.firstPrompt.content
367 | }
368 | multiConvInput.checked = config.multi
369 | ttsInput.checked = config.tts
370 | whisperInput.checked = config.onlyWhisper
371 | }
372 |
373 | var config = {
374 | domain: "",
375 | apiKey: "",
376 | maxTokens: 500,
377 | model: "",
378 | firstPrompt: null,
379 | multi: true,
380 | stream: true,
381 | prompts: [],
382 | temperature: 0.5,
383 | tts: false,
384 | onlyWhisper: false,
385 | }
386 | function saveSettings() {
387 | if (!apiKeyInput.value) {
388 | alert('OpenAI API key can not empty')
389 | return
390 | }
391 | config.domain = domainInput.value || domainInput.placeholder
392 | config.apiKey = apiKeyInput.value
393 | config.maxTokens = parseInt(maxTokensInput.value || maxTokensInput.placeholder)
394 | config.temperature = parseInt(temperatureInput.value || temperatureInput.placeholder)
395 | config.model = modelInput.value
396 | if (systemPromptInput.value) {
397 | config.firstPrompt = {
398 | role: "system",
399 | content: (systemPromptInput.value || systemPromptInput.placeholder)
400 | }
401 | }
402 | messages[0] = config.firstPrompt
403 | config.multi = multiConvInput.checked
404 | config.tts = ttsInput.checked
405 | config.onlyWhisper = whisperInput.checked
406 | box.firstChild.innerHTML = config.firstPrompt.content
407 | localStorage.setItem("conversation_config", JSON.stringify(config))
408 | showSettings(false)
409 | addItem('system', 'Update successed')
410 | }
411 |
412 | function onSelectPrompt(index) {
413 | let prompt = config.prompts[index]
414 | systemPromptInput.value = prompt.content
415 | multiConvInput.checked = prompt.multi
416 | promptDetails.open = false
417 | }
418 |
419 | function init() {
420 | let configJson = localStorage.getItem("conversation_config")
421 | let _config = JSON.parse(configJson)
422 | if (_config) {
423 | let ck = Object.keys(config)
424 | ck.forEach(key => {
425 | config[key] = _config[key] || config[key]
426 | });
427 | setSettingInput(config)
428 | } else {
429 | showSettings(true)
430 | }
431 | recogLangInput.value = navigator.language
432 | if (!('speechSynthesis' in window)) {
433 | ttsInput.disabled = false
434 | ttsInput.onclick = () => {
435 | alert("The current browser does not support text-to-speech");
436 | }
437 | }
438 |
439 | fetch("./prompts.json").then(resp => {
440 | if (!resp.ok) {
441 | throw new Error(resp.statusText)
442 | }
443 | return resp.json()
444 | }).then(data => {
445 | config.prompts = data
446 | for (let index = 0; index < data.length; index++) {
447 | const prompt = data[index];
448 | promptList.innerHTML += promptDiv(index, prompt)
449 | }
450 | })
451 |
452 | reset()
453 | }
454 |
455 | window.scrollTo(0, document.body.clientHeight)
456 | init()
457 |
458 | const promptDiv = (index, prompt) => {
459 | return `
460 |
461 | ${prompt.title}
462 |
466 |
467 |
${prompt.content}
468 |
`
469 | }
470 |
471 | const textToSpeech = async (text, options = {}) => {
472 | loader.hidden = false
473 | const synth = window.speechSynthesis;
474 |
475 | // Check if Web Speech API is available
476 | if (!('speechSynthesis' in window)) {
477 | loader.hidden = true
478 | alert("The current browser does not support text-to-speech");
479 | return;
480 | }
481 |
482 | // Detect language using franc library
483 | const { franc } = await import("https://cdn.jsdelivr.net/npm/franc@6.1.0/+esm");
484 | let lang = franc(text);
485 | if (lang === "" || lang === "und") {
486 | lang = navigator.language
487 | }
488 | if (lang === "cmn") {
489 | lang = "zh-CN"
490 | }
491 |
492 | // Get available voices and find the one that matches the detected language
493 | const voices = await new Promise(resolve => {
494 | const voices = synth.getVoices();
495 | resolve(voices);
496 | });
497 | const voice = voices.find(v => langEq(v.lang, lang) && !v.localService);
498 | if (!voice) {
499 | voice = voices.find(v => langEq(v.lang, navigator.language) && !v.localService);
500 | }
501 |
502 | // Create a new SpeechSynthesisUtterance object and set its parameters
503 | const utterance = new SpeechSynthesisUtterance(text);
504 | utterance.voice = voice;
505 | utterance.rate = options.rate || 1.0;
506 | utterance.pitch = options.pitch || 1.0;
507 | utterance.volume = options.volume || 1.0;
508 |
509 | // Speak the text
510 | synth.speak(utterance);
511 | utterance.addEventListener('boundary', (event) => {
512 | const { charIndex, elapsedTime } = event;
513 | const progress = charIndex / utterance.text.length;
514 | // console.log(`当前朗读进度:${progress * 100}%, 时间:${elapsedTime}`);
515 | loader.hidden = true
516 | });
517 | };
518 |
519 | const regionNamesInEnglish = new Intl.DisplayNames(['en'], { type: 'language' });
520 | const langEq = (lang1, lang2) => {
521 | let langStr1 = regionNamesInEnglish.of(lang1)
522 | let langStr2 = regionNamesInEnglish.of(lang2)
523 | if (langStr1.indexOf(langStr2) !== -1) return true
524 | if (langStr2.indexOf(langStr1) !== -1) return true
525 | return langStr1 === langStr2
526 | }
527 |
528 | const getVoices = () => {
529 | return new Promise(resolve => {
530 | synth.onvoiceschanged = () => {
531 | const voices = synth.getVoices();
532 | resolve(voices);
533 | };
534 | });
535 | }
536 |
537 | var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition
538 | // var SpeechGrammarList = SpeechGrammarList || window.webkitSpeechGrammarList
539 | // var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent
540 | var recognition = null;
541 | const _speechToText = () => {
542 | loader.hidden = false
543 | // const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition || window.mozSpeechRecognition || window.msSpeechRecognition)();
544 | if (!recognition) {
545 | recognition = new SpeechRecognition();
546 |
547 | recognition.continuous = false;
548 | recognition.lang = recogLangInput.value;
549 | recognition.interimResults = false;
550 | recognition.maxAlternatives = 1;
551 |
552 | recognition.onresult = (event) => {
553 | loader.hidden = true
554 | try {
555 | const speechResult = event.results[0][0].transcript;
556 | line.innerText = speechResult;
557 | // onSend()
558 | } catch (error) {
559 | addItem('system', `Speech recogniion result failed: ${error.message}`)
560 | }
561 | };
562 |
563 | recognition.onspeechend = function () {
564 | loader.hidden = true
565 | recognition.stop();
566 | };
567 |
568 | recognition.onnomatch = function (event) {
569 | loader.hidden = true
570 | addItem('system', `Speech recogniion match failed: ${event.error}`)
571 | }
572 |
573 | recognition.onerror = (event) => {
574 | loader.hidden = true
575 | addItem('system', `Speech recogniion error: ${event.error}, ${event}`)
576 | };
577 | }
578 |
579 | try {
580 | recognition.start();
581 | } catch (error) {
582 | onError(`Speech error: ${error}`)
583 | }
584 | }
585 |
586 | function _speechToText1() {
587 | loader.hidden = false
588 | // 获取音频流
589 | navigator.mediaDevices.getUserMedia({ audio: true })
590 | .then(function (stream) {
591 | // 创建 MediaRecorder 对象
592 | const mediaRecorder = new MediaRecorder(stream);
593 | // 创建 AudioContext 对象
594 | const audioContext = new AudioContext();
595 | // 创建 MediaStreamAudioSourceNode 对象
596 | const source = audioContext.createMediaStreamSource(stream);
597 | // 创建 MediaStreamAudioDestinationNode 对象
598 | const destination = audioContext.createMediaStreamDestination();
599 | // 将 MediaStreamAudioDestinationNode 对象连接到 MediaStreamAudioSourceNode 对象
600 | source.connect(destination);
601 | // 将 MediaStreamAudioDestinationNode 对象的 MediaStream 传递给 MediaRecorder 对象
602 | mediaRecorder.stream = destination.stream;
603 | // 创建一个空的音频缓冲区
604 | let chunks = [];
605 | // 开始录音
606 | mediaRecorder.start();
607 | // 监听录音数据
608 | mediaRecorder.addEventListener('dataavailable', function (event) {
609 | chunks.push(event.data);
610 | });
611 | // 停止录音
612 | mediaRecorder.addEventListener('stop', function () {
613 | // 将录音数据合并为一个 Blob 对象
614 | const blob = new Blob(chunks, { type: 'audio/mp3' });
615 | // 创建一个 Audio 对象
616 | const audio = new Audio();
617 | // 将 Blob 对象转换为 URL
618 | const url = URL.createObjectURL(blob);
619 | // 设置 Audio 对象的 src 属性为 URL
620 | audio.src = url;
621 | // 播放录音
622 | audio.play();
623 | // asr
624 | transcriptions(getRecordFile(chunks, mediaRecorder.mimeType))
625 | });
626 | // 5 秒后停止录音
627 | setTimeout(function () {
628 | mediaRecorder.stop();
629 | stream.getTracks().forEach(track => track.stop());
630 | }, 5000);
631 | })
632 | .catch(function (error) {
633 | console.error(error);
634 | });
635 | }
636 |
637 | const transcriptions = (file) => {
638 | const formData = new FormData();
639 | formData.append("model", "whisper-1");
640 | formData.append("file", file);
641 | formData.append("response_format", "json");
642 | fetch(`${config.domain}/v1/audio/transcriptions`, {
643 | method: "POST",
644 | headers: {
645 | "Authorization": "Bearer " + config.apiKey,
646 | },
647 | body: formData,
648 | }).then((resp) => {
649 | return resp.json()
650 | }).then((data) => {
651 | loader.hidden = true
652 | if (data.error) {
653 | throw new Error(`${data.error.code}: ${data.error.message}`)
654 | }
655 | line.innerText = data.text
656 | line.focus()
657 | }).catch(e => {
658 | loader.hidden = true
659 | addItem("system", e)
660 | })
661 | }
662 |
663 | const getRecordFile = (chunks, mimeType) => {
664 | const dataType = mimeType.split(';')[0];
665 | const fileType = dataType.split('/')[1];
666 | const blob = new Blob(chunks, { type: dataType });
667 | const name = `input.${fileType}`
668 | return new File([blob], name, { type: dataType })
669 | }
670 |
671 | const speechToText = () => {
672 | loader.hidden = false
673 | // 获取音频流
674 | navigator.mediaDevices.getUserMedia({ audio: true })
675 | .then(function (stream) {
676 | // 创建 MediaRecorder 对象
677 | const mediaRecorder = new MediaRecorder(stream);
678 | // 创建 AudioContext 对象
679 | const audioContext = new AudioContext();
680 | // 创建 MediaStreamAudioSourceNode 对象
681 | const source = audioContext.createMediaStreamSource(stream);
682 | // 创建 MediaStreamAudioDestinationNode 对象
683 | const destination = audioContext.createMediaStreamDestination();
684 | // 将 MediaStreamAudioDestinationNode 对象连接到 MediaStreamAudioSourceNode 对象
685 | source.connect(destination);
686 | // 将 MediaStreamAudioDestinationNode 对象的 MediaStream 传递给 MediaRecorder 对象
687 | mediaRecorder.stream = destination.stream;
688 | // 创建一个空的音频缓冲区
689 | let chunks = [];
690 | // 开始录音
691 | mediaRecorder.start();
692 | // 监听录音数据
693 | mediaRecorder.addEventListener('dataavailable', function (event) {
694 | chunks.push(event.data);
695 | });
696 | // 停止录音
697 | mediaRecorder.addEventListener('stop', function () {
698 | console.log("stop record");
699 | const audiofile = getRecordFile(chunks, mediaRecorder.mimeType)
700 | // 将录音数据合并为一个 Blob 对象
701 | // const blob = new Blob(chunks, { type: 'audio/mp3' });
702 | // 创建一个 Audio 对象
703 | const audio = new Audio();
704 | // 将 Blob 对象转换为 URL
705 | const url = URL.createObjectURL(audiofile);
706 | // 设置 Audio 对象的 src 属性为 URL
707 | audio.src = url;
708 | // 播放录音
709 | audio.play();
710 | // 如果仅使用 Whisper 识别,则直接调用
711 | if (config.onlyWhisper) {
712 | transcriptions(audiofile)
713 | }
714 | });
715 | if (config.onlyWhisper) {
716 | detectStopRecording(stream, 0.38, () => {
717 | if (mediaRecorder.state === 'recording') {
718 | mediaRecorder.stop();
719 | }
720 | stream.getTracks().forEach(track => track.stop());
721 | })
722 | } else {
723 | asr(
724 | onstop = () => {
725 | addItem("system", `Stoped record: read ${chunks.length} "${mediaRecorder.mimeType}" blob, and start recognition`);
726 | if (mediaRecorder.state === 'recording') {
727 | mediaRecorder.stop();
728 | }
729 | stream.getTracks().forEach(track => track.stop());
730 | },
731 | onnomatch = () => {
732 | transcriptions(getRecordFile(chunks, mediaRecorder.mimeType))
733 | },
734 | onerror = () => {
735 | transcriptions(getRecordFile(chunks, mediaRecorder.mimeType))
736 | })
737 | }
738 | })
739 | .catch(function (error) {
740 | console.error(error);
741 | addItem("system", error);
742 | });
743 | }
744 |
745 | const asr = (onstop, onnomatch, onerror) => {
746 | const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
747 | const recognition = new SpeechRecognition()
748 |
749 | recognition.continuous = false;
750 | recognition.lang = recogLangInput.value;
751 | recognition.interimResults = false;
752 | recognition.maxAlternatives = 1;
753 |
754 | recognition.onresult = (event) => {
755 | loader.hidden = true
756 | try {
757 | const speechResult = event.results[0][0].transcript;
758 | line.innerText = speechResult;
759 | // onSend()
760 | } catch (error) {
761 | addItem('system', `Speech recogniion result failed: ${error.message}`)
762 | }
763 | };
764 |
765 | recognition.onspeechend = function () {
766 | recognition.stop();
767 | onstop();
768 | };
769 |
770 | recognition.onnomatch = onnomatch
771 |
772 | recognition.onerror = onerror
773 |
774 | try {
775 | recognition.start();
776 | } catch (error) {
777 | onerror()
778 | }
779 | }
780 |
781 | function detectStopRecording(stream, maxThreshold, callback) {
782 | const audioContext = new AudioContext();
783 | const sourceNode = audioContext.createMediaStreamSource(stream);
784 | const analyzerNode = audioContext.createAnalyser();
785 | analyzerNode.fftSize = 2048;
786 | analyzerNode.smoothingTimeConstant = 0.8;
787 | sourceNode.connect(analyzerNode);
788 | const frequencyData = new Uint8Array(analyzerNode.frequencyBinCount);
789 | var startTime = null;
790 | const check = () => {
791 | analyzerNode.getByteFrequencyData(frequencyData);
792 | const amplitude = Math.max(...frequencyData) / 255;
793 | console.log(`amplitude: ${amplitude}`);
794 | if (amplitude >= maxThreshold) {
795 | console.log("speeching");
796 | startTime = new Date().getTime();
797 | requestAnimationFrame(check);
798 | } else if (startTime && (new Date().getTime() - startTime) > 1000) {
799 | callback('stop');
800 | } else {
801 | console.log("no speech");
802 | requestAnimationFrame(check);
803 | }
804 | };
805 | requestAnimationFrame(check);
806 | }
807 |
808 | if ('serviceWorker' in navigator) {
809 | window.addEventListener('load', function () {
810 | navigator.serviceWorker.register('./sw.js').then(function (registration) {
811 | console.log('ServiceWorker registration successful with scope: ', registration.scope);
812 | }, function (err) {
813 | console.error('ServiceWorker registration failed: ', err);
814 | });
815 | });
816 | }
--------------------------------------------------------------------------------
/icon-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/excing/chatgpt/5a58d88d7fc08e57a72a1035ae7e4c4534b4fb18/icon-192x192.png
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Chat GPT Simple
12 |
13 |
14 |
21 |
22 |
28 |
29 |
TOP
30 | ctrl(alt)+enter
31 |
32 |
33 |
34 |
35 |
Setting
36 |
37 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
52 |
53 |
54 |
55 |
56 |
60 |
64 |
65 |
74 |
75 |
76 |
80 |
81 | Assistant Public Prompts
82 |
83 |
84 |
96 |
97 |
98 |
99 |
100 |
101 |
History
102 |
103 |
105 |
106 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/main.css:
--------------------------------------------------------------------------------
1 | html {
2 | height: 100%;
3 | }
4 |
5 | body {
6 | min-height: 100%;
7 | max-width : 720px;
8 | margin : auto;
9 | padding : 0 8px;
10 | display : flex;
11 | flex-flow : column;
12 | }
13 |
14 | #input {
15 | width : 100%;
16 | display : flex;
17 | align-items : end;
18 | border-radius: 8px;
19 | border : crimson outset 1px;
20 | margin-bottom: 5px;
21 | }
22 |
23 | #input:focus-within {
24 | border-width: 2px;
25 | }
26 |
27 | #line {
28 | flex : 1;
29 | padding : 8px 0 8px 8px;
30 | outline : medium;
31 | font-size : 1.2rem;
32 | border-radius: 8px;
33 | border : none;
34 | background : none;
35 | white-space : pre-wrap;
36 | word-wrap : break-word;
37 | word-break : break-word;
38 | }
39 |
40 | input[type="submit"],
41 | button.icon {
42 | padding : 4px 8px;
43 | color : green;
44 | border-radius: 8px;
45 | background : none;
46 | border : none;
47 | font-size : 1.5rem;
48 | cursor : pointer;
49 | }
50 |
51 | /* CSS LOADER */
52 | .loader {
53 | border : 6px solid #efefef;
54 | border-top : 6px solid #111;
55 | border-radius: 50%;
56 | animation : spin 1.2s linear infinite;
57 | margin : 4px auto;
58 | width : 20px;
59 | height : 20px;
60 | }
61 |
62 | @keyframes spin {
63 | 0% {
64 | transform: rotate(0deg);
65 | }
66 |
67 | 100% {
68 | transform: rotate(360deg);
69 | }
70 | }
71 |
72 | #box {
73 | width : 100%;
74 | flex : auto;
75 | display : flex;
76 | flex-direction: column;
77 | }
78 |
79 | .modal {
80 | background : rgba(0, 0, 0, 0.30);
81 | position : absolute;
82 | top : 0;
83 | left : 0;
84 | right : 0;
85 | bottom : 0;
86 | display : flex;
87 | justify-content: center;
88 | align-items : center;
89 | }
90 |
91 | .model-body {
92 | width : 100%;
93 | max-width : 720px;
94 | max-height : 85vh;
95 | background-color: white;
96 | border-radius : 20px;
97 | padding : 20px 30px;
98 | margin : 20px;
99 | overflow : auto;
100 | }
101 |
102 | .model-body input,
103 | .model-body textarea {
104 | width : calc(100% - 20px);
105 | padding : 10px;
106 | font-size : 1.0rem;
107 | border-radius: 4px;
108 | background : none;
109 | white-space : pre-wrap;
110 | word-wrap : break-word;
111 | word-break : break-word;
112 | }
113 |
114 | .model-body label {
115 | font-size: 1.2rem;
116 | }
117 |
118 | .model-body label.label {
119 | display: block;
120 | margin : 20px 0 5px 0;
121 | }
122 |
123 | .model-body button {
124 | font-size: 1.1rem;
125 | padding : 10px 30px;
126 | }
127 |
128 | .system,
129 | .assistant,
130 | .user {
131 | max-width : 80%;
132 | width : fit-content;
133 | padding : 12px 18px;
134 | margin : 8px 0;
135 | font-size : 1.1rem;
136 | white-space: pre-wrap;
137 | }
138 |
139 | .system {
140 | align-self: center;
141 | color : #555;
142 | font-size : 1.0rem;
143 | }
144 |
145 | .assistant {
146 | background-color: bisque;
147 | border-radius : 36px 8px 8px 0;
148 | }
149 |
150 | .user {
151 | background-color: darkcyan;
152 | color : aliceblue;
153 | border-radius : 8px 36px 0 8px;
154 | align-self : end;
155 | margin-left : auto;
156 | }
157 |
158 | .history-item {
159 | max-width : calc(100% - 20px);
160 | width : calc(100% - 20px);
161 | padding : 12px 18px;
162 | font-size : 1.0rem;
163 | cursor : pointer;
164 | }
165 |
166 | .history-item:nth-child(even) {
167 | background-color: antiquewhite;
168 | }
169 |
170 | .history-item:nth-child(odd) {
171 | background-color: aliceblue;
172 | }
--------------------------------------------------------------------------------
/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Chat GPT Simple APP",
3 | "short_name": "Chat GPT",
4 | "icons": [
5 | {
6 | "src": "icon-192x192.png",
7 | "sizes": "180x180",
8 | "type": "image/png"
9 | }
10 | ],
11 | "start_url": "./index.html",
12 | "theme_color": "#ffffff",
13 | "background_color": "#ffffff",
14 | "display": "standalone"
15 | }
--------------------------------------------------------------------------------
/prompts.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "title": "友好",
4 | "content": "你是一个友善的助手。",
5 | "multi": true
6 | },
7 | {
8 | "title": "润色",
9 | "content": "你是一个方案助手,润色下面这段话,修正其中的语法错误,使用正式的文风。",
10 | "multi": false
11 | },
12 | {
13 | "title": "文本翻译",
14 | "content": "你是一个翻译助手,翻译下面这段话,如果这段话非中文则翻译为中文,如果这段话为中文,则翻译为英文。",
15 | "multi": false
16 | },
17 | {
18 | "title": "输出图像",
19 | "content": "你是一个 ASCII art 画像生成助手,为下面这段话生成一个 ASCII art 画像。",
20 | "multi": false
21 | },
22 | {
23 | "title": "夸夸我",
24 | "content": "你是一个夸夸助手。",
25 | "multi": true
26 | },
27 | {
28 | "title": "中英词典",
29 | "content": "你是一个中英词典助手,将英文单词转换为包括中文翻译、英文释义和一个例句的完整解释。请检查所有信息是否准确,并在回答时保持简洁,不需要任何其他反馈。",
30 | "multi": false
31 | },
32 | {
33 | "title": "写小说",
34 | "content": "你是一个小说作者,请根据我的要求和描述写一个小说。",
35 | "multi": true
36 | },
37 | {
38 | "title": "前端智能助手",
39 | "content": "你是一个前端开发专家,用户将提供一些关于 Js、Node、CSS、HTML 等前端代码问题的具体信息,而你的工作就是想出为用户解决问题的策略。这可能包括建议代码、代码逻辑思路策略。",
40 | "multi": true
41 | },
42 | {
43 | "title": "面试官",
44 | "content": "你是一个专业的面试官,具体的职业用户会告诉你。用户将成为候选人,您将向用户询问该职位的面试问题。我希望你只作为面试官回答。不要一次写出所有的问题。我希望你只对用户进行采访。问用户问题,等待用户的回答。不要写解释。像面试官一样一个一个问用户,等用户回答。",
45 | "multi": true
46 | },
47 | {
48 | "title": "JavaScript 控制台",
49 | "content": "你是一个 JavaScript 控制台。用户将键入命令,您将回复 javascript 控制台应显示的内容。我希望您只在一个唯一的代码块内回复终端输出,而不是其他任何内容。不要写解释。除非用户指示您这样做。否则不要键入命令。当用户需要告诉你一些事情时,用户会把文字放在中括号内[就像这样]。",
50 | "multi": false
51 | },
52 | {
53 | "title": "终端",
54 | "content": "你是一个 Linux 终端助手,用户将输入命令,您将回复终端应显示的内容。我希望您只在一个唯一的代码块内回复终端输出,而不是其他任何内容。不要写解释。除非用户指示您这样做,否则不要键入命令。当用户需要告诉你一些事情时,用户会把文字放在中括号内[就像这样]。",
55 | "multi": false
56 | },
57 | {
58 | "title": "终端",
59 | "content": "你是一个 Linux 终端助手,用户将输入命令,您将回复终端应显示的内容。我希望您只在一个唯一的代码块内回复终端输出,而不是其他任何内容。不要写解释。除非用户指示您这样做,否则不要键入命令。当用户需要告诉你一些事情时,用户会把文字放在中括号内[就像这样]。",
60 | "multi": false
61 | },
62 | {
63 | "title": "写作导师",
64 | "content": "我希望你能充当一个AI写作导师。我会提供一个需要帮助提高写作能力的学生,你的任务是使用人工智能工具,如自然语言处理,为学生提供反馈,告诉他们如何改进他们的作文。你还应该利用你的修辞知识和经验,提出学生可以更好地表达他们的思想和想法的写作技巧建议。",
65 | "multi": false
66 | },
67 | {
68 | "title": "网络安全专家",
69 | "content": "我希望你扮演一个网络安全专家的角色。我会提供一些关于数据存储和共享的具体信息,你的工作就是制定保护这些数据免受恶意行为者攻击的策略。这可能包括建议加密方法、创建防火墙或实施将某些活动标记为可疑的政策。",
70 | "multi": false
71 | },
72 | {
73 | "title": "评论员",
74 | "content": "我希望你能担任评论员的角色。我会提供与新闻相关的故事或话题,你将撰写一篇见解深刻的评论文章。你应该运用自己的经验,深入解释为什么某件事很重要,用事实支持论点,并讨论任何故事中出现的问题的潜在解决方案。",
75 | "multi": false
76 | },
77 | {
78 | "title": "魔术师",
79 | "content": "我要你扮演魔术师。我将为您提供观众和一些可以执行的技巧建议。您的目标是以最有趣的方式表演这些技巧,利用您的欺骗和误导技巧让观众惊叹不已。",
80 | "multi": false
81 | },
82 | {
83 | "title": "英语发音助手",
84 | "content": "我希望你能成为汉语使用者的英语发音助手。我会给你写句子,你只需要回答它们的发音,不需要其他任何内容。回复不能是我的句子的翻译,只能是发音。发音应该使用汉语拼音表示音标。回复中不要写解释。",
85 | "multi": false
86 | },
87 | {
88 | "title": "英语口语教师",
89 | "content": "我希望你能充当口语英语教师和提高者。我会用英语和你交谈,你会用英语回答我以练习我的口语英语。我希望你的回答简洁明了,限制回答在100个单词以内。我希望你严格纠正我的语法错误、打字错误和事实错误。我希望你在回答中问我一个问题。现在让我们开始练习,你可以先问我一个问题。记住,我希望你严格纠正我的语法错误、打字错误和事实错误。",
90 | "multi": false
91 | },
92 | {
93 | "title": "旅游指南",
94 | "content": "我希望你能充当一名旅游指南。我会告诉你我的位置,然后你会建议我附近可以参观的地方。在某些情况下,我还会告诉你我想参观的地方类型。你还会向我推荐与我第一个位置相似类型的地方。",
95 | "multi": false
96 | },
97 | {
98 | "title": "抄袭检查员",
99 | "content": "我希望你能充当一名抄袭检查员。我会给你写句子,你只需要用给定语言回复不被检测到抄袭,仅此而已。不要在回复中写解释。",
100 | "multi": false
101 | },
102 | {
103 | "title": "广告商",
104 | "content": "我希望你扮演广告商的角色。你将创建一个宣传活动,以推广你选择的产品或服务。你将选择目标受众,制定关键信息和口号,选择宣传的媒体渠道,并决定需要采取的任何其他活动以达到你的目标。",
105 | "multi": false
106 | },
107 | {
108 | "title": "数学老师",
109 | "content": "我希望你扮演一位数学老师的角色。我会提供一些数学方程或概念,你的工作就是用易于理解的术语来解释它们。这可能包括提供解决问题的逐步说明,用视觉展示各种技巧或建议在线资源进行进一步学习。",
110 | "multi": false
111 | }
112 | ]
--------------------------------------------------------------------------------
/sw.js:
--------------------------------------------------------------------------------
1 | const CACHE_NAME = 'my-pwa-app-cache-v1';
2 | const urlsToCache = [
3 | './',
4 | './index.html',
5 | './app.js',
6 | './main.css',
7 | './prompts.json'
8 | ];
9 |
10 | self.addEventListener('install', function (event) {
11 | event.waitUntil(
12 | caches.open(CACHE_NAME)
13 | .then(function (cache) {
14 | console.log('Opened cache');
15 | return cache.addAll(urlsToCache);
16 | })
17 | );
18 | });
19 |
20 | self.addEventListener('fetch', function (event) {
21 | event.respondWith(
22 | caches.match(event.request)
23 | .then(function (response) {
24 | if (response) {
25 | return response;
26 | }
27 | return fetch(event.request);
28 | })
29 | );
30 | });
--------------------------------------------------------------------------------