├── .env ├── README.md ├── openai.js ├── package.json ├── server.js ├── azure.js └── views └── index.ejs /.env: -------------------------------------------------------------------------------- 1 | AZURE_KEY=AZURE_KEY 2 | AZURE_REGION=eastus -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yunuscode/yunusai/HEAD/README.md -------------------------------------------------------------------------------- /openai.js: -------------------------------------------------------------------------------- 1 | const { Configuration, OpenAIApi } = require("openai"); 2 | 3 | const configuration = new Configuration({ 4 | apiKey: ``, 5 | }); 6 | const openai = new OpenAIApi(configuration); 7 | 8 | async function getResponseFromBigBrother(question) { 9 | const completion = await openai.createCompletion({ 10 | model: "text-davinci-003", 11 | prompt: question, 12 | max_tokens: 256, 13 | }); 14 | 15 | return completion.data; 16 | } 17 | 18 | module.exports = getResponseFromBigBrother; 19 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "yunusai", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "axios": "^1.3.4", 14 | "cors": "^2.8.5", 15 | "ejs": "^3.1.8", 16 | "express": "^4.18.2", 17 | "microsoft-cognitiveservices-speech-sdk": "^1.25.1", 18 | "openai": "^3.1.0" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /server.js: -------------------------------------------------------------------------------- 1 | const express = require("express"); 2 | const cors = require("cors"); 3 | const PORT = 3000; 4 | const getTranslation = require("./translate"); 5 | const getResponseFromBigBrother = require("./openai"); 6 | 7 | const app = express(); 8 | 9 | app.set("view engine", "ejs"); 10 | 11 | app.listen(PORT); 12 | 13 | app.use(cors()); 14 | app.use(express.urlencoded({ extended: true })); 15 | app.use(express.json()); 16 | 17 | app.get("/", (req, res) => { 18 | res.render("index"); 19 | }); 20 | 21 | app.post("/getResponse", async (req, res) => { 22 | const { question } = req.body; 23 | 24 | console.log(question); 25 | 26 | const response = await getTranslation("uz", "en", question); 27 | const translatedText = response[0][0][0]; 28 | if (!translatedText) { 29 | return; 30 | } 31 | 32 | let bbresponse = await getResponseFromBigBrother(translatedText); 33 | 34 | let uzbResponse = await getTranslation( 35 | "en", 36 | "uz", 37 | bbresponse.choices.map((item) => item.text).join("\n") 38 | ); 39 | 40 | const lresponse = uzbResponse[0].map((item) => item[0]).join("\n"); 41 | 42 | res.json({ 43 | response: lresponse, 44 | }); 45 | }); 46 | -------------------------------------------------------------------------------- /azure.js: -------------------------------------------------------------------------------- 1 | (function () { 2 | "use strict"; 3 | 4 | var sdk = require("microsoft-cognitiveservices-speech-sdk"); 5 | var readline = require("readline"); 6 | 7 | var audioFile = "YourAudioFile.wav"; 8 | // This example requires environment variables named "SPEECH_KEY" and "SPEECH_REGION" 9 | const speechConfig = sdk.SpeechConfig.fromSubscription( 10 | `AZURE_API`, 11 | `eastus` 12 | ); 13 | const audioConfig = sdk.AudioConfig.fromDefaultSpeakerOutput(audioFile); 14 | 15 | // The language of the voice that speaks. 16 | speechConfig.speechSynthesisVoiceName = "uz-UZ-MadinaNeural"; 17 | 18 | // Create the speech synthesizer. 19 | var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig); 20 | 21 | var rl = readline.createInterface({ 22 | input: process.stdin, 23 | output: process.stdout, 24 | }); 25 | 26 | rl.question( 27 | "Enter some text that you want to speak >\n> ", 28 | function (text) { 29 | rl.close(); 30 | // Start the synthesizer and wait for a result. 31 | synthesizer.speakTextAsync( 32 | text, 33 | function (result) { 34 | if ( 35 | result.reason === 36 | sdk.ResultReason.SynthesizingAudioCompleted 37 | ) { 38 | console.log("synthesis finished."); 39 | } else { 40 | console.error( 41 | "Speech synthesis canceled, " + 42 | result.errorDetails + 43 | "\nDid you set the speech resource key and region values?" 44 | ); 45 | } 46 | synthesizer.close(); 47 | synthesizer = null; 48 | }, 49 | function (err) { 50 | console.trace("err - " + err); 51 | synthesizer.close(); 52 | synthesizer = null; 53 | } 54 | ); 55 | console.log("Now synthesizing to: " + audioFile); 56 | } 57 | ); 58 | })(); 59 | -------------------------------------------------------------------------------- /views/index.ejs: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | 6 | 7 |