├── .env.example
├── .gitignore
├── .vscode
└── settings.json
├── LICENSE
├── README.md
├── app
├── action.tsx
├── components
│ ├── AttributionComponent.tsx
│ ├── InputComponent.tsx
│ ├── Mobile.tsx
│ ├── Settings.tsx
│ └── tools
│ │ ├── Clock.tsx
│ │ ├── Spotify.tsx
│ │ └── Weather.tsx
├── config.tsx
├── favicon-.ico
├── globals.css
├── layout.tsx
├── page.tsx
└── utils
│ ├── answerEngine.tsx
│ ├── chatCompletionWithTools.tsx
│ ├── generateChatCompletion.tsx
│ ├── generateTTS.tsx
│ ├── processImage.tsx
│ ├── rateLimiting.tsx
│ ├── tools
│ ├── getSpotify.tsx
│ ├── getTime.tsx
│ └── getWeather.tsx
│ └── transcribeAudio.tsx
├── bun.lockb
├── index.ts
├── next-env.d.ts
├── next.config.mjs
├── package-lock.json
├── package.json
├── postcss.config.mjs
├── public
├── checking.mp3
└── og.jpeg
├── tailwind.config.ts
└── tsconfig.json
/.env.example:
--------------------------------------------------------------------------------
1 | # REQUIRED # https://console.groq.com/keys
2 | GROQ_API_KEY=APIKEYGOESHERE
3 | # RECOMMENDED # https://platform.openai.com/account/api-keys
4 | OPENAI_API_KEY=APIKEYGOESHERE
5 | # RECOMMENDED # https://serper.dev/
6 | SERPER_API=APIKEYGOESHERE
7 | # OPTIONAL for tracing with Langchain: https://smith.langchain.com/
8 | LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
9 | LANGCHAIN_API_KEY=API_KEY_GOES_HERE
10 | LANGCHAIN_PROJECT=project-name-goes=here
11 | LANGCHAIN_CALLBACKS_BACKGROUND=true
12 | LANGCHAIN_TRACING_V2=false # Enable to use Langchain tracing
13 | # OPTIONAL - Rate Limiting: https://console.upstash.com/redis
14 | UPSTASH_REDIS_REST_URL=https://EXAMPLE.upstash.io
15 | UPSTASH_REDIS_REST_TOKEN=APIKEYGOESHERE
16 | # OPTIONAL for Spotify: https://developer.spotify.com/documentation/web-api
17 | SPOTIFY_CLIENT_ID=CLIENT_ID_GOES_HERE
18 | SPOTIFY_CLIENT_SECRET=CLIENT_SECRET_GOES_HERE
19 | # OPTIONAL for FAL.AI Lllava image model: https://fal.ai
20 | FAL_KEY=API_KEY_GOES_HERE
21 | # OPTIONAL for Gemini Flash (Vision): https://aistudio.google.com/
22 | GEMINI_API_KEY=CLIENT_SECRET_GOES_HERE
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore
2 |
3 | # Logs
4 |
5 | logs
6 | _.log
7 | npm-debug.log_
8 | yarn-debug.log*
9 | yarn-error.log*
10 | lerna-debug.log*
11 | .pnpm-debug.log*
12 |
13 | # Caches
14 |
15 | .cache
16 |
17 |
18 | # Diagnostic reports (https://nodejs.org/api/report.html)
19 |
20 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
21 |
22 | # Runtime data
23 |
24 | pids
25 | _.pid
26 | _.seed
27 | *.pid.lock
28 |
29 | # Directory for instrumented libs generated by jscoverage/JSCover
30 |
31 | lib-cov
32 |
33 | # Coverage directory used by tools like istanbul
34 |
35 | coverage
36 | *.lcov
37 |
38 | # nyc test coverage
39 |
40 | .nyc_output
41 |
42 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
43 |
44 | .grunt
45 |
46 | # Bower dependency directory (https://bower.io/)
47 |
48 | bower_components
49 |
50 | # node-waf configuration
51 |
52 | .lock-wscript
53 |
54 | # Compiled binary addons (https://nodejs.org/api/addons.html)
55 |
56 | build/Release
57 |
58 | # Dependency directories
59 |
60 | node_modules/
61 | jspm_packages/
62 |
63 | # Snowpack dependency directory (https://snowpack.dev/)
64 |
65 | web_modules/
66 |
67 | # TypeScript cache
68 |
69 | *.tsbuildinfo
70 |
71 | # Optional npm cache directory
72 |
73 | .npm
74 |
75 | # Optional eslint cache
76 |
77 | .eslintcache
78 |
79 | # Optional stylelint cache
80 |
81 | .stylelintcache
82 |
83 | # Microbundle cache
84 |
85 | .rpt2_cache/
86 | .rts2_cache_cjs/
87 | .rts2_cache_es/
88 | .rts2_cache_umd/
89 |
90 | # Optional REPL history
91 |
92 | .node_repl_history
93 |
94 | # Output of 'npm pack'
95 |
96 | *.tgz
97 |
98 | # Yarn Integrity file
99 |
100 | .yarn-integrity
101 |
102 | # dotenv environment variable files
103 |
104 | .env
105 | .env.development.local
106 | .env.test.local
107 | .env.production.local
108 | .env.local
109 |
110 | # parcel-bundler cache (https://parceljs.org/)
111 |
112 | .parcel-cache
113 |
114 | # Next.js build output
115 |
116 | .next
117 | out
118 |
119 | # Nuxt.js build / generate output
120 |
121 | .nuxt
122 | dist
123 |
124 | # Gatsby files
125 |
126 | # Comment in the public line in if your project uses Gatsby and not Next.js
127 |
128 | # https://nextjs.org/blog/next-9-1#public-directory-support
129 |
130 | # public
131 |
132 | # vuepress build output
133 |
134 | .vuepress/dist
135 |
136 | # vuepress v2.x temp and cache directory
137 |
138 | .temp
139 |
140 | # Docusaurus cache and generated files
141 |
142 | .docusaurus
143 |
144 | # Serverless directories
145 |
146 | .serverless/
147 |
148 | # FuseBox cache
149 |
150 | .fusebox/
151 |
152 | # DynamoDB Local files
153 |
154 | .dynamodb/
155 |
156 | # TernJS port file
157 |
158 | .tern-port
159 |
160 | # Stores VSCode versions used for testing VSCode extensions
161 |
162 | .vscode-test
163 |
164 | # yarn v2
165 |
166 | .yarn/cache
167 | .yarn/unplugged
168 | .yarn/build-state.yml
169 | .yarn/install-state.gz
170 | .pnp.*
171 |
172 | # IntelliJ based IDEs
173 | .idea
174 |
175 | # Finder (MacOS) folder config
176 | .DS_Store
177 | .vercel
178 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "dotenv.enableAutocloaking": false
3 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Developers Digest
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
AI Device Template
2 |
12 | Now supports gpt-4o and gemini-1.5-flash-latest for Vision Inference
13 |
23 |
24 | YouTube Tutorial
25 |
26 |
31 |
32 | This project is an AI-powered voice assistant utilizing various AI models and services to provide intelligent responses to user queries. It supports voice input, transcription, text-to-speech, image processing, and function calling with conditionally rendered UI components. This was inspired by the recent trend of AI Devices such as the Humane AI Pin and the Rabbit R1.
33 |
34 | ## Features
35 |
36 | - **Voice input and transcription:** Using Whisper models from Groq or OpenAI
37 | - **Text-to-speech output:** Using OpenAI's TTS models
38 | - **Image processing:** Using OpenAI's GPT-4 Vision or Fal.ai's Llava-Next models
39 | - **Function calling and conditionally rendered UI components:** Using OpenAI's GPT-3.5-Turbo model
40 | - **Customizable UI settings:** Includes response times, settings toggle, text-to-speech toggle, internet results toggle, and photo upload toggle
41 | - **(Optional) Rate limiting:** Using Upstash
42 | - **(Optional) Tracing:** With Langchain's LangSmith for function execution
43 |
44 | ## Setup
45 |
46 | ### 1. Clone the repository
47 | ```bash
48 | git clone https://github.com/developersdigest/ai-devices.git
49 | ```
50 |
51 | ### 2. Install dependencies
52 | ```bash
53 | npm install
54 | # or
55 | bun install
56 | ```
57 |
58 | ## 3. Add API Keys
59 |
60 | To use this AI-powered voice assistant, you need to provide the necessary API keys for the selected AI models and services.
61 |
62 | ### Required for core functionality
63 | - **Groq API Key** For Llama + Whisper
64 | - **OpenAI API Key** for TTS and Vision + Whisper
65 | - **Serper API Key** for Internet Results
66 |
67 | ### Optional for advanced configuration
68 | - **Langchain Tracing** for function execution tracing
69 | - **Upstash Redis** for IP-based rate limiting
70 | - **Spotify** for Spotify API interactions
71 | - **Fal.AI (Lllava Image Model)** Alternative vision model to GPT-4-Vision
72 |
73 | Replace 'API_KEY_GOES_HERE' with your actual API keys for each service.
74 |
75 | ### 4. Start the development server
76 | ```bash
77 | npm run dev
78 | # or
79 | bun dev
80 | ```
81 |
82 | Access the application at `http://localhost:3000` or through the provided URL.
83 |
84 | ### 5. Deployment
85 |
86 | [](https://vercel.com/new/developersdigests-projects/clone?repository-url=https%3A%2F%2Fgithub.com%2Fdevelopersdigest%2Fai-devices&env=GROQ_API_KEY&env=OPENAI_API_KEY&project-name=ai-devices&repository-name=ai-devices)
87 |
88 | ## Configuration
89 |
90 | Modify `app/config.tsx` to adjust settings and configurations for the AI-powered voice assistant. Here’s an overview of the available options:
91 |
92 | ```typescript
93 | export const config = {
94 | // Inference settings
95 | inferenceModelProvider: 'groq', // 'groq' or 'openai'
96 | inferenceModel: 'llama3-8b-8192', // Groq: 'llama3-70b-8192' or 'llama3-8b-8192'.. OpenAI: 'gpt-4-turbo etc
97 |
98 | // BELOW OPTIONAL are some options for the app to use
99 |
100 | // Whisper settings
101 | whisperModelProvider: 'openai', // 'groq' or 'openai'
102 | whisperModel: 'whisper-1', // Groq: 'whisper-large-v3' OpenAI: 'whisper-1'
103 |
104 | // TTS settings
105 | ttsModelProvider: 'openai', // only openai supported for now...
106 | ttsModel: 'tts-1', // only openai supported for now...s
107 | ttsvoice: 'alloy', // only openai supported for now... [alloy, echo, fable, onyx, nova, and shimmer]
108 |
109 | // OPTIONAL:Vision settings
110 | visionModelProvider: 'google', // 'openai' or 'fal.ai' or 'google'
111 | visionModel: 'gemini-1.5-flash-latest', // OpenAI: 'gpt-4o' or Fal.ai: 'llava-next' or Google: 'gemini-1.5-flash-latest'
112 |
113 | // Function calling + conditionally rendered UI
114 | functionCallingModelProvider: 'openai', // 'openai' current only
115 | functionCallingModel: 'gpt-3.5-turbo', // OpenAI: 'gpt-3-5-turbo'
116 |
117 | // UI settings
118 | enableResponseTimes: false, // Display response times for each message
119 | enableSettingsUIToggle: true, // Display the settings UI toggle
120 | enableTextToSpeechUIToggle: true, // Display the text to speech UI toggle
121 | enableInternetResultsUIToggle: true, // Display the internet results UI toggle
122 | enableUsePhotUIToggle: true, // Display the use photo UI toggle
123 | enabledRabbitMode: true, // Enable the rabbit mode UI toggle
124 | enabledLudicrousMode: true, // Enable the ludicrous mode UI toggle
125 | useAttributionComponent: true, // Use the attribution component to display the attribution of the AI models/services used
126 |
127 | // Rate limiting settings
128 | useRateLimiting: false, // Use Upstash rate limiting to limit the number of requests per user
129 |
130 | // Tracing with Langchain
131 | useLangSmith: true, // Use LangSmith by Langchain to trace the execution of the functions in the config.tsx set to true to use.
132 | };
133 | ```
134 |
135 | ## Contributing
136 |
137 | Contributions are welcome! If you find any issues or have suggestions for improvements, please open an issue or submit a pull request.
138 |
139 | I'm the developer behind Developers Digest. If you find my work helpful or enjoy what I do, consider supporting me. Here are a few ways you can do that:
140 |
141 | - **Patreon**: Support me on Patreon at [patreon.com/DevelopersDigest](https://www.patreon.com/DevelopersDigest)
142 | - **Buy Me A Coffee**: You can buy me a coffee at [buymeacoffee.com/developersdigest](https://www.buymeacoffee.com/developersdigest)
143 | - **Website**: Check out my website at [developersdigest.tech](https://developersdigest.tech)
144 | - **Github**: Follow me on GitHub at [github.com/developersdigest](https://github.com/developersdigest)
145 | - **Twitter**: Follow me on Twitter at [twitter.com/dev__digest](https://twitter.com/dev__digest)
--------------------------------------------------------------------------------
/app/action.tsx:
--------------------------------------------------------------------------------
1 | import 'server-only';
2 | import { createAI, createStreamableValue, createStreamableUI } from 'ai/rsc';
3 | import { config } from './config';
4 | import dotenv from 'dotenv';
5 | dotenv.config();
6 | // Rate limiting
7 | import { Ratelimit } from "@upstash/ratelimit";
8 | import { Redis } from "@upstash/redis";
9 | import { headers } from 'next/headers'
10 | let ratelimit: Ratelimit | undefined;
11 | if (config.useRateLimiting) {
12 | ratelimit = new Ratelimit({
13 | redis: Redis.fromEnv(),
14 | limiter: Ratelimit.slidingWindow(10, "10 m") // 10 requests per 10 minutes
15 | });
16 | }
17 | // Rate limiting
18 | import { transcribeAudio } from './utils/transcribeAudio';
19 | import { generateTTS } from './utils/generateTTS';
20 | import { processImageWithGPT4o, processImageWithLllavaOnFalAI, processImageWithGoogleGenerativeAI } from './utils/processImage';
21 | import { generateChatCompletion } from './utils/generateChatCompletion';
22 | import { answerEngine } from './utils/answerEngine';
23 | import { chatCompletionWithTools } from './utils/chatCompletionWithTools';
24 | import { initializeRateLimit, checkRateLimit } from './utils/rateLimiting';
25 |
26 | async function action(obj: FormData): Promise {
27 | "use server";
28 | const streamable = createStreamableValue();
29 | (async () => {
30 | if (config.useRateLimiting) {
31 | const identifier = headers().get('x-forwarded-for') || headers().get('x-real-ip') || headers().get('cf-connecting-ip') || headers().get('client-ip') || "";
32 | initializeRateLimit();
33 | if (!await checkRateLimit(identifier)) return streamable.done({ 'result': 'Rate Limit Reached. Please try again later.' });
34 | }
35 | const formData = obj;
36 | const audioBlob = formData.get('audio');
37 | const useTTS = formData.get('useTTS') === 'true';
38 | const useInternet = formData.get('useInternet') === 'true';
39 | const usePhotos = formData.get('usePhotos') === 'true';
40 | const useLudicrousMode = formData.get('useLudicrousMode') === 'true';
41 | if (!(audioBlob instanceof Blob)) throw new Error('No audio detected');
42 |
43 | const timestamp = Date.now();
44 | const transcription = await transcribeAudio(audioBlob, timestamp);
45 | streamable.update({ 'transcription': transcription });
46 |
47 | let responseText = '';
48 | if (useLudicrousMode) {
49 | const result = await generateChatCompletion(transcription);
50 | if (result !== undefined) {
51 | responseText = result;
52 | }
53 | } else {
54 | if (usePhotos) {
55 | const image = formData.get('image');
56 | if (image instanceof File) {
57 | if (config.visionModelProvider === 'fal.ai') {
58 | responseText = await processImageWithLllavaOnFalAI(image, transcription);
59 | } else if (config.visionModelProvider === 'google') {
60 | responseText = await processImageWithGoogleGenerativeAI(image, transcription);
61 | } else {
62 | responseText = await processImageWithGPT4o(image, transcription);
63 | }
64 | } else {
65 | responseText = 'You might have forgotten to upload an image';
66 | }
67 | } else {
68 | let result;
69 | if (useInternet) {
70 | result = await answerEngine(transcription);
71 | } else {
72 | result = await generateChatCompletion(transcription);
73 | }
74 |
75 | if (result !== undefined) {
76 | responseText = result;
77 | }
78 |
79 | const tool_results = await chatCompletionWithTools(responseText);
80 | if (tool_results?.uiComponent) {
81 | if (tool_results.uiComponent.component === 'weather') {
82 | streamable.update({ 'weather': tool_results.uiComponent.data });
83 | } else if (tool_results.uiComponent.component === 'spotify') {
84 | streamable.update({ 'spotify': tool_results.uiComponent.data });
85 | } else if (tool_results.uiComponent.component === 'time') {
86 | responseText = tool_results.uiComponent.data;
87 | streamable.update({ 'time': tool_results.uiComponent.data });
88 | }
89 | } else {
90 | streamable.update({ 'message': tool_results?.message });
91 | }
92 | }
93 | }
94 |
95 | streamable.update({ 'result': responseText });
96 | useTTS && streamable.update({ 'audio': await generateTTS(responseText) });
97 | streamable.done({ status: 'done' });
98 | })();
99 | return streamable.value;
100 | }
101 |
102 | const initialAIState: {
103 | role: 'user' | 'assistant' | 'system' | 'function';
104 | content: string;
105 | id?: string;
106 | name?: string;
107 | }[] = [];
108 |
109 | const initialUIState: {
110 | text: string;
111 | id?: string;
112 | }[] = [];
113 |
114 | export const AI = createAI({
115 | actions: { action },
116 | initialAIState,
117 | initialUIState
118 | });
--------------------------------------------------------------------------------
/app/components/AttributionComponent.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { config } from '../config';
3 |
4 | interface AttributionComponentProps {
5 | usePhotos: boolean;
6 | useInternet: boolean;
7 | useTTS: boolean;
8 | useRateLimiting: boolean;
9 | }
10 |
11 | export const AttributionComponent: React.FC = ({ usePhotos, useInternet, useTTS, useRateLimiting }) => {
12 | const {
13 | whisperModelProvider,
14 | whisperModel,
15 | inferenceModelProvider,
16 | inferenceModel,
17 | ttsModelProvider,
18 | visionModelProvider,
19 | visionModel,
20 | useLangSmith,
21 | } = config;
22 |
23 | return (
24 |
25 | speech recognition: {whisperModel}: {whisperModelProvider}
26 | {usePhotos && `, vision: ${visionModel}: ${visionModelProvider}`}
27 | {!usePhotos && `, inference: ${inferenceModel}: ${inferenceModelProvider}`}
28 | {useTTS && `, tts: ${ttsModelProvider}`}
29 | {useLangSmith && ', observability: langsmith'}
30 | {useInternet && ', internet search: serper'}
31 | {useRateLimiting && ', rate limiting: upstash redis'}
32 |
33 | );
34 | };
--------------------------------------------------------------------------------
/app/components/InputComponent.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useState, useRef } from 'react';
3 | import { useDropzone, DropzoneOptions } from 'react-dropzone';
4 |
5 | interface InputComponentProps {
6 | onSubmit: (formData: FormData) => void;
7 | useTTS: boolean;
8 | useInternet: boolean;
9 | usePhotos: boolean;
10 | useLudicrousMode: boolean;
11 | useRabbitMode: boolean;
12 | }
13 |
14 | const InputComponent: React.FC = ({
15 | onSubmit,
16 | useTTS,
17 | useInternet,
18 | usePhotos,
19 | useLudicrousMode,
20 | useRabbitMode,
21 | }) => {
22 | const [selectedImage, setSelectedImage] = useState(null);
23 | const [recording, setRecording] = useState(false);
24 | const mediaRecorderRef = useRef(null);
25 | const chunksRef = useRef([]);
26 |
27 | const { getRootProps, getInputProps, isDragActive } = useDropzone({
28 | onDrop: (acceptedFiles) => {
29 | setSelectedImage(acceptedFiles[0]);
30 | },
31 | accept: {
32 | 'image/png': ['.png'],
33 | 'image/jpeg': ['.jpeg', '.jpg'],
34 | 'image/webp': ['.webp'],
35 | 'image/gif': ['.gif'],
36 | },
37 | } as DropzoneOptions);
38 |
39 | const removeImage = () => {
40 | setSelectedImage(null);
41 | };
42 |
43 | const handleRecording = () => {
44 | if (recording) {
45 | stopRecording();
46 | } else {
47 | startRecording();
48 | }
49 | setRecording(!recording);
50 | };
51 |
52 | const startRecording = () => {
53 | navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
54 | const options = { mimeType: 'audio/webm' };
55 | mediaRecorderRef.current = new MediaRecorder(stream, options);
56 | mediaRecorderRef.current.addEventListener('dataavailable', (event: BlobEvent) => {
57 | chunksRef.current.push(event.data);
58 | });
59 | mediaRecorderRef.current.start();
60 | });
61 | };
62 |
63 | const stopRecording = async () => {
64 | if (mediaRecorderRef.current) {
65 | mediaRecorderRef.current.stop();
66 | mediaRecorderRef.current.addEventListener('stop', async () => {
67 | const audioBlob = new Blob(chunksRef.current, { type: 'audio/webm' });
68 | const formData = new FormData();
69 | formData.append('audio', audioBlob);
70 | formData.append('useTTS', String(useTTS));
71 | formData.append('useInternet', String(useInternet));
72 | formData.append('usePhotos', String(usePhotos));
73 | formData.append('useLudicrousMode', String(useLudicrousMode));
74 | if (selectedImage) {
75 | formData.append('image', selectedImage, selectedImage.name);
76 | }
77 | onSubmit(formData);
78 | chunksRef.current = [];
79 | });
80 | }
81 | };
82 |
83 | return (
84 |
85 |
86 | {useRabbitMode ? (
87 |
98 | ) : (
99 |

108 | )}
109 | {recording && (
110 |
113 | )}
114 | {usePhotos && (
115 |
116 |
121 |
122 | {selectedImage ? (
123 |
})
128 | ) : (
129 |
130 |
Drag and drop an image here
131 |
.png, .jpg, .jpeg, .webp, .gif
132 |
133 | )}
134 |
135 | {selectedImage && (
136 |
142 | )}
143 |
144 | )}
145 |
146 |
147 | );
148 | };
149 |
150 | export default InputComponent;
--------------------------------------------------------------------------------
/app/components/Mobile.tsx:
--------------------------------------------------------------------------------
1 | export const MobileNotSupported: React.FC = () => {
2 | return (
3 |
4 |
5 |
Only Desktop Supported Currently
6 |
Mobile devices coming soon...
7 |
8 |
16 |
17 | );
18 | };
19 |
--------------------------------------------------------------------------------
/app/components/Settings.tsx:
--------------------------------------------------------------------------------
1 | import { config } from '../config';
2 |
3 | interface SettingsProps {
4 | useTTS: boolean;
5 | useInternet: boolean;
6 | usePhotos: boolean;
7 | useLudicrousMode: boolean;
8 | useRabbitMode: boolean;
9 | onTTSToggle: () => void;
10 | onInternetToggle: () => void;
11 | onPhotosToggle: () => void;
12 | onLudicrousModeToggle: () => void;
13 | onRabbitModeToggle: () => void;
14 | setTTS: (useTTS: boolean) => void;
15 | setInternet: (useInternet: boolean) => void;
16 | setPhotos: (usePhotos: boolean) => void;
17 | }
18 |
19 | export const Settings: React.FC = ({
20 | useTTS,
21 | useInternet,
22 | usePhotos,
23 | useLudicrousMode,
24 | useRabbitMode,
25 | onTTSToggle,
26 | onInternetToggle,
27 | onPhotosToggle,
28 | onLudicrousModeToggle,
29 | onRabbitModeToggle,
30 | setTTS,
31 | setInternet,
32 | setPhotos,
33 | }) => {
34 | const handleLudicrousModeToggle = () => {
35 | onLudicrousModeToggle();
36 | if (!useLudicrousMode) {
37 | setTTS(false);
38 | setInternet(false);
39 | setPhotos(false);
40 | }
41 | };
42 |
43 | return (
44 |
45 | {config.enabledLudicrousMode && (
46 | <>
47 |
48 |
62 |
63 |
(Groq Llama3 + Groq Whisper only)
64 | >
65 | )}
66 | {config.enableTextToSpeechUIToggle && (
67 |
68 |
83 |
84 | )}
85 | {config.enableInternetResultsUIToggle && (
86 |
87 |
102 |
103 | )}
104 | {config.enableUsePhotUIToggle && (
105 |
106 |
121 |
122 | )}
123 |
124 |
140 |
141 |
142 | );
143 | };
--------------------------------------------------------------------------------
/app/components/tools/Clock.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useState } from 'react';
2 | import Clock from 'react-clock';
3 |
4 | export function ClockComponent() {
5 | const [value, setValue] = useState(new Date());
6 |
7 |
8 | useEffect(() => {
9 | const interval = setInterval(() => setValue(new Date()), 1000);
10 | return () => {
11 | clearInterval(interval);
12 | };
13 | }, []);
14 |
15 | return (
16 |
17 | );
18 | }
--------------------------------------------------------------------------------
/app/components/tools/Spotify.tsx:
--------------------------------------------------------------------------------
1 | export const SpotifyTrack = ({ trackId, width = 300, height = 80 }: { trackId: string; width?: number; height?: number }) => {
2 | if (!trackId) {
3 | return null; // or return a loading state or error message
4 | }
5 |
6 | return (
7 |
14 | );
15 | };
--------------------------------------------------------------------------------
/app/components/tools/Weather.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { WiDaySunny, WiCloudy, WiRain, WiSnow } from 'react-icons/wi';
3 |
4 | interface WeatherDataItem {
5 | time: string;
6 | temperature: number;
7 | }
8 |
9 | const getWeatherIcon = (temperature: number) => {
10 | // mock icons for now
11 | if (temperature >= 25) {
12 | return ;
13 | } else if (temperature >= 10) {
14 | return ;
15 | } else if (temperature >= 0) {
16 | return ;
17 | } else {
18 | return ;
19 | }
20 | };
21 |
22 | const formatTime = (time: string) => {
23 | const date = new Date(time);
24 | const hours = date.getHours();
25 | const minutes = date.getMinutes();
26 | const ampm = hours >= 12 ? 'pm' : 'am';
27 | const formattedHours = hours % 12 || 12;
28 | const formattedMinutes = minutes.toString().padStart(2, '0');
29 | const formattedTime = `${formattedHours}:${formattedMinutes}${ampm}`;
30 | return formattedTime;
31 | };
32 |
33 | interface WeatherDataProps {
34 | data: WeatherDataItem[];
35 | }
36 |
37 | export const WeatherData: React.FC = ({ data }) => {
38 | const currentTime = new Date();
39 | const currentHour = currentTime.getHours();
40 |
41 | // Find the index of the current hour in the data array
42 | const currentIndex = data.findIndex((item) => {
43 | const itemTime = new Date(item.time);
44 | return itemTime.getHours() === currentHour;
45 | });
46 |
47 | // Slice the data array starting from the current index
48 | const slicedData = data.slice(currentIndex);
49 |
50 | return (
51 |
52 |
53 | {slicedData.map((item, index) => (
54 |
55 |
{formatTime(item.time)}
56 |
{getWeatherIcon(item.temperature)}
57 |
{item.temperature}°C
58 |
59 | ))}
60 |
61 |
62 |
63 | );
64 | };
--------------------------------------------------------------------------------
/app/config.tsx:
--------------------------------------------------------------------------------
1 | export const config = {
2 | // Inference settings
3 | inferenceModelProvider: 'groq', // 'groq' or 'openai'
4 | inferenceModel: 'llama3-8b-8192', // Groq: 'llama3-70b-8192' or 'llama3-8b-8192'.. OpenAI: 'gpt-4-turbo etc
5 |
6 | // Whisper settings
7 | whisperModelProvider: 'openai', // 'groq' or 'openai'
8 | whisperModel: 'whisper-1', // Groq: 'whisper-large-v3' OpenAI: 'whisper-1'
9 |
10 | // TTS settings
11 | ttsModelProvider: 'openai', // only openai supported for now...
12 | ttsModel: 'tts-1', // only openai supported for now...s
13 | ttsvoice: 'alloy', // only openai supported for now... [alloy, echo, fable, onyx, nova, and shimmer]
14 |
15 | // OPTIONAL:Vision settings
16 | visionModelProvider: 'google', // 'openai' or 'fal.ai' or 'google'
17 | visionModel: 'gemini-1.5-flash-latest', // OpenAI: 'gpt-4o' or Fal.ai: 'llava-next' or Google: 'gemini-1.5-flash-latest'
18 |
19 | // Function calling + conditionally rendered UI
20 | functionCallingModelProvider: 'openai', // 'openai' current only
21 | functionCallingModel: 'gpt-3.5-turbo', // OpenAI: 'gpt-3-5-turbo'
22 |
23 | // UI settings
24 | enableResponseTimes: false, // Display response times for each message
25 | enableSettingsUIToggle: true, // Display the settings UI toggle
26 | enableTextToSpeechUIToggle: true, // Display the text to speech UI toggle
27 | enableInternetResultsUIToggle: true, // Display the internet results UI toggle
28 | enableUsePhotUIToggle: true, // Display the use photo UI toggle
29 | enabledRabbitMode: true, // Enable the rabbit mode UI toggle
30 | enabledLudicrousMode: true, // Enable the ludicrous mode UI toggle
31 | useAttributionComponent: true, // Use the attribution component to display the attribution of the AI models/services used
32 |
33 | // Rate limiting settings
34 | useRateLimiting: false, // Use Upstash rate limiting to limit the number of requests per user
35 |
36 | // Tracing with Langchain
37 | useLangSmith: true, // Use LangSmith by Langchain to trace the execution of the functions in the config.tsx set to true to use.
38 | };
--------------------------------------------------------------------------------
/app/favicon-.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developersdigest/ai-devices/8fa8afa217f950b4a8f8046f2095f27cf05c2db5/app/favicon-.ico
--------------------------------------------------------------------------------
/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | :root {
6 | --foreground-rgb: 0, 0, 0;
7 | --background-start-rgb: 214, 219, 220;
8 | --background-end-rgb: 255, 255, 255;
9 | }
10 |
11 | @media (prefers-color-scheme: dark) {
12 | :root {
13 | --foreground-rgb: 255, 255, 255;
14 | --background-start-rgb: 0, 0, 0;
15 | --background-end-rgb: 0, 0, 0;
16 | }
17 | }
18 |
19 | body {
20 | color: rgb(var(--foreground-rgb));
21 | background: linear-gradient(to bottom,
22 | transparent,
23 | rgb(var(--background-end-rgb))) rgb(var(--background-start-rgb));
24 | }
25 |
26 | @layer utilities {
27 | .text-balance {
28 | text-wrap: balance;
29 | }
30 | }
31 |
32 | .prevent-image-drag {
33 | user-drag: none;
34 | -webkit-user-drag: none;
35 | user-select: none;
36 | -moz-user-select: none;
37 | -webkit-user-select: none;
38 | -ms-user-select: none;
39 | }
40 |
41 | @keyframes slideInRight {
42 | 0% {
43 | transform: translateX(100%);
44 | opacity: 0;
45 | }
46 |
47 | 100% {
48 | transform: translateX(0);
49 | opacity: 1;
50 | }
51 | }
52 |
53 | .animate-slide-in-right {
54 | animation: slideInRight 2s ease-out;
55 | }
56 |
57 |
58 | @keyframes scaleUp {
59 | 0% {
60 | transform: scale(0.01);
61 | }
62 |
63 | 100% {
64 | transform: scale(20);
65 | }
66 | }
67 |
68 | .animate-scale-up {
69 | animation: scaleUp 100s linear forwards;
70 | }
71 |
72 |
73 |
74 | /* clock */
75 | .react-clock {
76 | display: block;
77 | position: relative;
78 | }
79 |
80 | .react-clock,
81 | .react-clock *,
82 | .react-clock *:before,
83 | .react-clock *:after {
84 | -moz-box-sizing: border-box;
85 | -webkit-box-sizing: border-box;
86 | box-sizing: border-box;
87 | }
88 |
89 | .react-clock__face {
90 | position: absolute;
91 | top: 0;
92 | bottom: 0;
93 | left: 0;
94 | right: 0;
95 | border: 1px solid white;
96 | border-radius: 50%;
97 | box-shaodw: 0 0 5px #000, 0 0 10px #000, 0 0 20px #000, 0 0 40px #000;
98 | }
99 |
100 | .react-clock__hand {
101 | position: absolute;
102 | top: 0;
103 | bottom: 0;
104 | left: 50%;
105 | right: 50%;
106 | }
107 |
108 | .react-clock__hand__body {
109 | position: absolute;
110 | background-color: white;
111 | transform: translateX(-50%);
112 | box-shadow: 0 0 5px #000, 0 0 10px #000, 0 0 20px #000, 0 0 40px #000;
113 | }
114 |
115 | .react-clock__mark {
116 | position: absolute;
117 | top: 0;
118 | bottom: 0;
119 | left: 50%;
120 | right: 50%;
121 | }
122 |
123 | .react-clock__mark__body {
124 | position: absolute;
125 | background-color: white;
126 | transform: translateX(-50%);
127 | box-shadow: 0 0 5px #000, 0 0 10px #000, 0 0 20px #000, 0 0 40px #000;
128 | }
129 |
130 | .react-clock__mark__number {
131 | position: absolute;
132 | left: -40px;
133 | width: 80px;
134 | text-align: center;
135 | }
136 |
137 | .react-clock__second-hand__body {
138 | background-color: white;
139 | box-shadow: 0 0 5px #000, 0 0 10px #000, 0 0 20px #000, 0 0 40px #000;
140 | }
141 |
142 |
143 | /* clock */
144 |
145 | body {
146 | overflow: hidden!important;
147 | /* Hide scrollbars */
148 | }
--------------------------------------------------------------------------------
/app/layout.tsx:
--------------------------------------------------------------------------------
1 | import type { Metadata } from "next";
2 | import { Inter } from "next/font/google";
3 | import "./globals.css";
4 | import { AI } from "./action";
5 |
6 | const inter = Inter({ subsets: ["latin"] });
7 |
8 | export const metadata: Metadata = {
9 | title: "pin",
10 | description: "Created by Developers Digest",
11 | };
12 |
13 | export default function RootLayout({
14 | children,
15 | }: Readonly<{
16 | children: React.ReactNode;
17 | }>) {
18 | return (
19 |
20 |
21 | {children}
22 |
23 |
24 | );
25 | }
--------------------------------------------------------------------------------
/app/page.tsx:
--------------------------------------------------------------------------------
1 | "use client";
2 | import { useState, useEffect } from 'react';
3 | import { useActions, readStreamableValue } from 'ai/rsc';
4 | import { type AI } from './action';
5 | import { Settings } from './components/Settings';
6 | import { AttributionComponent } from './components/AttributionComponent';
7 | import { MobileNotSupported } from './components/Mobile';
8 | import InputComponent from './components/InputComponent';
9 | import { WeatherData } from './components/tools/Weather';
10 | import { SpotifyTrack } from './components/tools/Spotify';
11 | import { ClockComponent } from './components/tools/Clock';
12 | import { config } from './config';
13 |
14 | interface Message {
15 | rateLimitReached: any;
16 | transcription?: string;
17 | audio?: string;
18 | result?: string;
19 | weather?: string;
20 | spotify?: string;
21 | time?: string;
22 | }
23 |
24 | interface UIComponent {
25 | component: string;
26 | data: any;
27 | }
28 |
29 | const Main = () => {
30 | const { action } = useActions();
31 | const [useLudicrousMode, setUseLudicrousMode] = useState(true);
32 | const [useTTS, setUseTTS] = useState(false);
33 | const [useInternet, setUseInternet] = useState(false);
34 | const [usePhotos, setUsePhotos] = useState(false);
35 | const [useRabbitMode, setuseRabbitMode] = useState(false);
36 | const [useSpotify, setUseSpotify] = useState('');
37 | const [currentTranscription, setCurrentTranscription] = useState<{ transcription: string, responseTime: number } | null>(null);
38 | const [totalResponseTime, setTotalResponseTime] = useState(null);
39 | const [currentUIComponent, setCurrentUIComponent] = useState(null);
40 | const [message, setMessage] = useState<{ message: string; responseTime: number } | null>(null);
41 | const [isMobile, setIsMobile] = useState(false);
42 | const [showSettings, setShowSettings] = useState(false);
43 |
44 | const handleSettingsClick = () => {
45 | setShowSettings(!showSettings);
46 | };
47 |
48 | const handleTTSToggle = () => {
49 | setUseTTS(!useTTS);
50 | };
51 |
52 | const handleInternetToggle = () => {
53 | setUseInternet(!useInternet);
54 | };
55 |
56 | const handleLudicrousModeToggle = () => {
57 | setUseLudicrousMode(!useLudicrousMode);
58 | };
59 |
60 | const handleRabbitModeToggle = () => {
61 | setuseRabbitMode(!useRabbitMode);
62 | };
63 | const handleSubmit = async (formData: FormData) => {
64 | const startTime = Date.now();
65 | const streamableValue = await action(formData);
66 | let transcriptionResponseTime;
67 | let transcriptionCompletionTime;
68 | let messageResponseTime;
69 | let audioResponseTime;
70 | setCurrentUIComponent(null);
71 | setMessage(null);
72 | for await (const message of readStreamableValue(streamableValue)) {
73 | if (message && message.rateLimitReached && typeof message.rateLimitReached === 'string') {
74 | setMessage({ message: message.rateLimitReached, responseTime: 0 });
75 | }
76 | if (message && message.time && typeof message.time === 'string') {
77 | setCurrentUIComponent({ component: 'time', data: message.time });
78 | }
79 | if (message && message.transcription && typeof message.transcription === 'string') {
80 | transcriptionResponseTime = (Date.now() - startTime) / 1000;
81 | transcriptionCompletionTime = Date.now();
82 | setCurrentTranscription({ transcription: message.transcription, responseTime: transcriptionResponseTime });
83 | }
84 | if (message && message.weather && typeof message.weather === 'string') {
85 | setCurrentUIComponent({ component: 'weather', data: JSON.parse(message.weather) });
86 | }
87 | if (message && message.result && typeof message.result === 'string') {
88 | messageResponseTime = (Date.now() - (transcriptionCompletionTime || startTime)) / 1000;
89 | setMessage({ message: message.result, responseTime: messageResponseTime });
90 | }
91 | if (message && message.audio && typeof message.audio === 'string') {
92 | audioResponseTime = (Date.now() - (transcriptionCompletionTime || startTime)) / 1000;
93 | const audio = new Audio(message.audio);
94 | audio.play();
95 | }
96 | if (message && message.spotify && typeof message.spotify === 'string') {
97 | setUseSpotify(message.spotify);
98 | }
99 | }
100 | let totalResponseTime = 0;
101 | if (transcriptionResponseTime) {
102 | totalResponseTime += transcriptionResponseTime;
103 | }
104 | if (messageResponseTime) {
105 | totalResponseTime += messageResponseTime;
106 | }
107 | if (audioResponseTime) {
108 | totalResponseTime += audioResponseTime;
109 | }
110 | setTotalResponseTime(totalResponseTime);
111 | };
112 | useEffect(() => {
113 | const checkMobile = () => {
114 | const isMobileDevice = window.innerWidth <= 768; // Adjust the breakpoint as needed
115 | setIsMobile(isMobileDevice);
116 | };
117 | checkMobile();
118 | window.addEventListener('resize', checkMobile); // Check on window resize
119 | return () => {
120 | window.removeEventListener('resize', checkMobile); // Cleanup the event listener
121 | };
122 | }, []);
123 | return (
124 |
125 | {isMobile ? (
126 |
127 | ) : (
128 | <>
129 |
135 |
140 |
141 |
149 | {currentTranscription && (
150 |
151 |
{currentTranscription.transcription}
152 | {config.enableResponseTimes && (
153 |
Transcription response time: +{currentTranscription.responseTime.toFixed(2)} seconds
154 | )}
155 |
156 | )}
157 |
158 |
159 |
160 | {useRabbitMode ? (
161 |

166 | ) : (
167 |

172 | )}
173 | {useSpotify && (
174 |
175 |
176 |
177 | )}
178 | {message && message.message && !currentUIComponent && (
179 |
186 |
{message.message}
187 |
188 | )}
189 | {currentUIComponent && currentUIComponent.component === 'weather' && (
190 |
191 |
192 |
193 | )}
194 | {currentUIComponent && currentUIComponent.component === 'time' && (
195 |
196 |
197 |
198 | )}
199 | {message && message.message && (
200 |
201 | {config.enableResponseTimes && (
202 |
Message response time: +{message.responseTime.toFixed(2)} seconds
203 | )}
204 |
205 | )}
206 |
207 |
208 |
209 | >
210 | )}
211 | {config.enableSettingsUIToggle && (
212 |
216 |

221 |
222 | )}
223 | {showSettings && (
224 |
setUsePhotos(!usePhotos)}
234 | onRabbitModeToggle={handleRabbitModeToggle}
235 | setTTS={setUseTTS}
236 | setInternet={setUseInternet}
237 | setPhotos={setUsePhotos}
238 | />
239 | )}
240 | {config.useAttributionComponent && (
241 |
242 | )}
243 |
244 | );
245 | };
246 | export default Main;
--------------------------------------------------------------------------------
/app/utils/answerEngine.tsx:
--------------------------------------------------------------------------------
1 | import Groq from 'groq-sdk';
2 | import { config } from '../config';
3 | import { traceable } from "langsmith/traceable";
4 |
5 | const groq = new Groq();
6 |
7 | export const answerEngine = traceable(async (query: string) => {
8 | async function rephraseInput(inputString: string) {
9 | const groqResponse = await groq.chat.completions.create({
10 | model: config.inferenceModel,
11 | messages: [
12 | { role: "system", content: "You are a rephraser and always respond with a rephrased version of the input that is given to a search engine API. Always be succinct and use the same words as the input. ONLY RETURN THE REPHRASED VERSION OF THE INPUT." },
13 | { role: "user", content: inputString },
14 | ],
15 | });
16 | return groqResponse.choices[0].message.content;
17 | }
18 |
19 | async function searchEngineForSources(message: string) {
20 | const rephrasedMessage = await rephraseInput(message);
21 | const data = JSON.stringify({
22 | "q": rephrasedMessage
23 | });
24 | try {
25 | const response = await fetch('https://google.serper.dev/search', {
26 | method: 'POST',
27 | headers: {
28 | 'X-API-KEY': process.env.SERPER_API || "",
29 | 'Content-Type': 'application/json'
30 | },
31 | body: data
32 | });
33 | if (response.status === 403) {
34 | throw new Error('Forbidden');
35 | }
36 | const docs = await response.json();
37 | return docs;
38 | } catch (error) {
39 | throw error;
40 | }
41 | }
42 |
43 | const docs = await searchEngineForSources(query);
44 | if (!docs) {
45 | return "Please obtain an API key to enable the search functionality.";
46 | }
47 |
48 | let sources = JSON.stringify(docs);
49 |
50 | const chatCompletion = await groq.chat.completions.create({
51 | messages: [
52 | {
53 | role: "system",
54 | content: `- Here is my query "${query}", only respond back with the answer in ONE SENTENCE. Never mention the system message.`
55 | },
56 | { role: "user", content: `RETURN ANSWER IN ONE SENTENCE ONLY. ${sources}.` },
57 | ],
58 | model: config.inferenceModel,
59 | });
60 | return chatCompletion.choices[0].message.content;
61 | }, { name: 'answerEngine' });
--------------------------------------------------------------------------------
/app/utils/chatCompletionWithTools.tsx:
--------------------------------------------------------------------------------
1 | import { ChatOpenAI } from "@langchain/openai";
2 | import { ChatGroq } from "@langchain/groq";
3 |
4 | // tools
5 | import { getCurrentWeather } from "./tools/getWeather";
6 | import { searchSong } from "./tools/getSpotify";
7 | import { getTime } from "./tools/getTime";
8 | import { config } from '../config';
9 |
10 | const tool_calls = new ChatOpenAI({
11 | model: "gpt-3.5-turbo",
12 | }).bind({
13 | tools: [
14 | {
15 | type: "function",
16 | function: {
17 | name: "getCurrentWeather",
18 | description: "Retrieves the current weather conditions for a given city based on its latitude and longitude coordinates.",
19 | parameters: {
20 | type: "object",
21 | properties: {
22 | latitude: {
23 | type: "number",
24 | description: "The latitude coordinate of the city in decimal degrees.",
25 | },
26 | longitude: {
27 | type: "number",
28 | description: "The longitude coordinate of the city in decimal degrees.",
29 | },
30 | },
31 | required: ["latitude", "longitude"]
32 | },
33 | },
34 | },
35 | {
36 | type: "function",
37 | function: {
38 | name: "searchSong",
39 | description: "Searches for a song on Spotify based on the provided search query and returns the track ID.",
40 | parameters: {
41 | type: "object",
42 | properties: {
43 | query: {
44 | type: "string",
45 | description: "The search query to find a song on Spotify, such as the song title or artist name.",
46 | },
47 | },
48 | required: ["query"],
49 | },
50 | },
51 | },
52 | {
53 | type: "function",
54 | function: {
55 | name: "getTime",
56 | description: "Retrieves the current time in the local time zone.",
57 | parameters: {
58 | type: "object",
59 | properties: {},
60 | required: []
61 | },
62 | },
63 | }
64 | ],
65 | tool_choice: "auto",
66 | });
67 |
68 |
69 | export const chatCompletionWithTools = async (query: string) => {
70 | const res = await tool_calls.invoke([
71 | ["system", "You are a helpful assistant and only responds in one sentence. Use the tools included only if they are relevant to the query. ."],
72 | ["human", query],
73 | ]);
74 | const toolCalls = res.additional_kwargs.tool_calls;
75 | if (toolCalls && toolCalls.length > 0) {
76 | if (toolCalls[0].function.name === "getCurrentWeather") {
77 | const { function: { arguments: argString } } = toolCalls[0];
78 | const { latitude, longitude } = JSON.parse(argString);
79 | const weatherData = await getCurrentWeather(latitude, longitude);
80 | (weatherData);
81 | return {
82 | uiComponent: {
83 | component: 'weather',
84 | data: weatherData
85 | }
86 | };
87 | } else if (toolCalls[0].function.name === "searchSong") {
88 | const { function: { arguments: argString } } = toolCalls[0];
89 | const { query } = JSON.parse(argString);
90 | const trackId = await searchSong(query);
91 | return {
92 | uiComponent: {
93 | component: 'spotify',
94 | data: trackId
95 | }
96 | };
97 | } else if (toolCalls[0].function.name === "getTime") {
98 | const time = await getTime();
99 | return {
100 | uiComponent: {
101 | component: 'time',
102 | data: time
103 | }
104 | };
105 | }
106 | } else {
107 | return { message: res?.lc_kwargs?.content };
108 | }
109 | };
110 |
111 |
--------------------------------------------------------------------------------
/app/utils/generateChatCompletion.tsx:
--------------------------------------------------------------------------------
1 | import { HumanMessage } from "@langchain/core/messages";
2 | import { ChatOpenAI } from "@langchain/openai";
3 | import { ChatGroq } from "@langchain/groq";
4 | import Groq from 'groq-sdk';
5 | import { config } from '../config';
6 | import { traceable } from "langsmith/traceable";
7 |
8 | const groq = new Groq();
9 |
10 | export const generateChatCompletion = traceable(async (responseText: string) => {
11 | let completion;
12 | if (config.inferenceModelProvider === 'openai') {
13 | const chat = new ChatOpenAI({
14 | model: config.inferenceModel,
15 | maxTokens: 1024,
16 | });
17 | const message = new HumanMessage(responseText);
18 | completion = await chat.invoke([message]);
19 | responseText = completion?.lc_kwargs?.content || "No information available.";
20 | } else if (config.inferenceModelProvider === 'groq') {
21 | completion = await groq.chat.completions.create({
22 | messages: [
23 | { role: "system", content: "You are a helpful assistant and only responds in one sentence. If you don't know the answer, rephrase the question that will be passed to the next model." },
24 | { role: "user", content: responseText },
25 | ],
26 | model: config.inferenceModel,
27 | });
28 | responseText = completion.choices[0].message.content;
29 | } else {
30 | throw new Error('Invalid inference model provider');
31 | }
32 | return responseText;
33 | }, { name: 'generateChatCompletion' });
--------------------------------------------------------------------------------
/app/utils/generateTTS.tsx:
--------------------------------------------------------------------------------
1 | import OpenAI from 'openai';
2 | import { config } from '../config';
3 | import { traceable } from "langsmith/traceable";
4 |
5 | const openai = new OpenAI();
6 |
7 | export const generateTTS = traceable(async (responseText: string) => {
8 | const mp3 = await openai.audio.speech.create({
9 | model: config.ttsModel,
10 | voice: config.ttsvoice as "alloy" | "echo" | "fable" | "onyx" | "nova" | "shimmer",
11 | input: responseText,
12 | });
13 | const buffer = Buffer.from(await mp3.arrayBuffer());
14 | const base64Audio = buffer.toString('base64');
15 | return `data:audio/mpeg;base64,${base64Audio}`;
16 | }, { name: 'generateTTS' });
--------------------------------------------------------------------------------
/app/utils/processImage.tsx:
--------------------------------------------------------------------------------
1 | import { HumanMessage, AIMessage } from "@langchain/core/messages";
2 | import { ChatOpenAI } from "@langchain/openai";
3 | import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
4 | import { config } from '../config';
5 | import { traceable } from "langsmith/traceable";
6 | import * as fal from "@fal-ai/serverless-client";
7 |
8 | export const processImageWithGPT4o = async (imageFile: File, text: string): Promise => {
9 | const imageData = await imageFile.arrayBuffer();
10 | const imageBase64 = Buffer.from(imageData).toString('base64');
11 | const imageMessage = new HumanMessage({
12 | content: [
13 | {
14 | type: "text",
15 | text: `You are a helpful assistant and only respond in one sentence based on this image and the following text. You will not respond with anything else. ${text}`,
16 | },
17 | {
18 | type: "image_url",
19 | image_url: {
20 | "url": `data:image/jpeg;base64,${imageBase64}`,
21 | }
22 | },
23 | ],
24 | });
25 | const chat = new ChatOpenAI({
26 | model: config.visionModel,
27 | });
28 | const res = await chat.invoke([imageMessage]);
29 | return res?.lc_kwargs?.content || "Sorry, I can't do that yet.";
30 | };
31 |
32 | export const processImageWithLllavaOnFalAI = traceable(async (imageFile: File, text: string): Promise => {
33 | const imageData = await imageFile.arrayBuffer();
34 | const imageBase64 = Buffer.from(imageData).toString('base64');
35 | const result: { output: string } = await fal.subscribe(`fal-ai/${config.visionModel}`, {
36 | input: {
37 | image_url: `data:image/jpeg;base64,${imageBase64}`,
38 | prompt: `Respond in one sentence based on this image and the following text and image ${text}`
39 | },
40 | logs: true,
41 | onQueueUpdate: (update) => {
42 | if (update.status === "IN_PROGRESS" && update.logs) {
43 | update.logs.map((log) => log.message).forEach(console.log);
44 | }
45 | },
46 | });
47 | return result.output;
48 | }, { name: 'processImageWithLllavaOnFalAI' });
49 |
50 | export const processImageWithGoogleGenerativeAI = async (imageFile: File, text: string): Promise => {
51 | const imageData = await imageFile.arrayBuffer();
52 | const imageBase64 = Buffer.from(imageData).toString('base64');
53 | const visionModel = new ChatGoogleGenerativeAI({
54 | apiKey: process.env.GEMINI_API_KEY,
55 | model: config.visionModel,
56 | });
57 | const input2 = [
58 | new AIMessage({
59 | content: [
60 | {
61 | type: "text",
62 | text: `You are a helpful assistant and only respond in one sentence based on this image and the following text. You will not respond with anything else.`,
63 | },
64 | ],
65 | }),
66 | new HumanMessage({
67 | content: [
68 | {
69 | type: "text",
70 | text: text,
71 | },
72 | {
73 | type: "image_url",
74 | image_url: `data:image/jpeg;base64,${imageBase64}`,
75 | },
76 | ],
77 | }),
78 | ];
79 |
80 | const res = await visionModel.invoke(input2);
81 |
82 | return String(res.content);
83 | };
--------------------------------------------------------------------------------
/app/utils/rateLimiting.tsx:
--------------------------------------------------------------------------------
1 | // utils/rateLimit.ts
2 | import { Ratelimit } from "@upstash/ratelimit";
3 | import { Redis } from "@upstash/redis";
4 |
5 | let ratelimit: Ratelimit | undefined;
6 |
7 | export function initializeRateLimit() {
8 | ratelimit = new Ratelimit({
9 | redis: Redis.fromEnv(),
10 | limiter: Ratelimit.slidingWindow(250, "1 h"),
11 | });
12 | }
13 |
14 | export async function checkRateLimit(identifier: string) {
15 | const { success } = (await ratelimit?.limit(identifier)) ?? { success: true };
16 | return success;
17 | }
--------------------------------------------------------------------------------
/app/utils/tools/getSpotify.tsx:
--------------------------------------------------------------------------------
1 | import { SpotifyApi } from "@spotify/web-api-ts-sdk";
2 |
3 | const api = SpotifyApi.withClientCredentials(
4 | process.env.SPOTIFY_CLIENT_ID as string,
5 | process.env.SPOTIFY_CLIENT_SECRET as string
6 | );
7 |
8 | export async function searchSong(query: string): Promise {
9 | const items = await api.search(query, ["track"]);
10 | const track = items.tracks.items[0];
11 | if (track) {
12 | const trackId = track.uri.replace('spotify:track:', '');
13 | return trackId;
14 | } else {
15 | return "No matching song found.";
16 | }
17 | }
--------------------------------------------------------------------------------
/app/utils/tools/getTime.tsx:
--------------------------------------------------------------------------------
1 | import { traceable } from "langsmith/traceable";
2 | export const getTime = traceable(async () => {
3 |
4 | return 'The time is now being displayed on your palm.';
5 | }, { name: 'getTime' });
--------------------------------------------------------------------------------
/app/utils/tools/getWeather.tsx:
--------------------------------------------------------------------------------
1 | export async function getCurrentWeather(latitude: number, longitude: number): Promise {
2 | const params = {
3 | latitude: String(latitude),
4 | longitude: String(longitude),
5 | hourly: "temperature_2m",
6 | format: "json",
7 | };
8 |
9 | const url = "https://api.open-meteo.com/v1/forecast";
10 |
11 | const response = await fetch(url + "?" + new URLSearchParams(params));
12 | const data = await response.json();
13 |
14 | const parsedData = JSON.parse(JSON.stringify(data));
15 |
16 | const weatherData = parsedData.hourly.time.map((time: string, index: number) => ({
17 | time,
18 | temperature: parsedData.hourly.temperature_2m[index],
19 | }));
20 |
21 | const formattedData = JSON.stringify(weatherData);
22 |
23 | return formattedData;
24 | }
--------------------------------------------------------------------------------
/app/utils/transcribeAudio.tsx:
--------------------------------------------------------------------------------
1 | import { toFile } from 'openai';
2 | import Groq from 'groq-sdk';
3 | import OpenAI from 'openai';
4 | import { config } from '../config';
5 | import { traceable } from "langsmith/traceable";
6 | const groq = new Groq();
7 | const openai = new OpenAI();
8 | export const transcribeAudio = traceable(async (audioBlob: Blob, timestamp: number) => {
9 | try {
10 | let transcription;
11 | if (config.whisperModelProvider === 'openai') {
12 | transcription = await openai.audio.transcriptions.create({
13 | file: await toFile(audioBlob, `audio-${timestamp}.wav`),
14 | model: config.whisperModel,
15 | });
16 | } else if (config.whisperModelProvider === 'groq') {
17 | transcription = await groq.audio.transcriptions.create({
18 | file: await toFile(audioBlob, `audio-${timestamp}.wav`),
19 | model: config.whisperModel,
20 | });
21 | } else {
22 | throw new Error('Invalid whisper model');
23 | }
24 | return transcription.text;
25 | } catch (error) {
26 | console.error('Error transcribing audio:', error);
27 | return 'Error transcribing audio. Please try again later.';
28 | }
29 | }, { name: 'transcribeAudio' });
--------------------------------------------------------------------------------
/bun.lockb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developersdigest/ai-devices/8fa8afa217f950b4a8f8046f2095f27cf05c2db5/bun.lockb
--------------------------------------------------------------------------------
/index.ts:
--------------------------------------------------------------------------------
1 | console.log("Hello via Bun!");
--------------------------------------------------------------------------------
/next-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | ///
3 |
4 | // NOTE: This file should not be edited
5 | // see https://nextjs.org/docs/basic-features/typescript for more information.
6 |
--------------------------------------------------------------------------------
/next.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {};
3 |
4 | export default nextConfig;
5 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ai-devices",
3 | "version": "0.1.0",
4 | "private": true,
5 | "author": "Developers Digest",
6 | "scripts": {
7 | "dev": "next dev",
8 | "build": "next build",
9 | "start": "next start",
10 | "lint": "next lint"
11 | },
12 | "dependencies": {
13 | "@fal-ai/serverless-client": "^0.9.3",
14 | "@fal-ai/serverless-proxy": "^0.7.4",
15 | "@langchain/community": "^0.0.52",
16 | "@langchain/google-genai": "^0.0.13",
17 | "@langchain/groq": "^0.0.8",
18 | "@langchain/openai": "^0.0.28",
19 | "@spotify/web-api-ts-sdk": "^1.2.0",
20 | "@types/react-scroll": "^1.8.10",
21 | "@upstash/ratelimit": "^1.1.2",
22 | "@upstash/redis": "^1.30.0",
23 | "ai": "latest",
24 | "dotenv": "^16.4.5",
25 | "groq-sdk": "^0.3.2",
26 | "langchain": "^0.1.34",
27 | "langsmith": "^0.1.20",
28 | "next": "14.2.2",
29 | "openai": "^4.38.2",
30 | "openmeteo": "^1.1.3",
31 | "react": "^18",
32 | "react-clock": "^4.6.0",
33 | "react-dom": "^18",
34 | "react-dropzone": "^14.2.3",
35 | "react-icons": "^5.1.0",
36 | "react-scroll": "^1.9.0",
37 | "react-spotify-player": "^1.0.4",
38 | "react-spotify-web-playback": "^0.14.4",
39 | "zod": "^3.23.4"
40 | },
41 | "devDependencies": {
42 | "@types/node": "^20",
43 | "@types/react": "^18",
44 | "@types/react-dom": "^18",
45 | "eslint": "^8",
46 | "eslint-config-next": "14.2.2",
47 | "postcss": "^8",
48 | "tailwindcss": "^3.4.1",
49 | "typescript": "^5"
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/postcss.config.mjs:
--------------------------------------------------------------------------------
1 | /** @type {import('postcss-load-config').Config} */
2 | const config = {
3 | plugins: {
4 | tailwindcss: {},
5 | },
6 | };
7 |
8 | export default config;
9 |
--------------------------------------------------------------------------------
/public/checking.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developersdigest/ai-devices/8fa8afa217f950b4a8f8046f2095f27cf05c2db5/public/checking.mp3
--------------------------------------------------------------------------------
/public/og.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/developersdigest/ai-devices/8fa8afa217f950b4a8f8046f2095f27cf05c2db5/public/og.jpeg
--------------------------------------------------------------------------------
/tailwind.config.ts:
--------------------------------------------------------------------------------
1 | import type { Config } from "tailwindcss";
2 |
3 | const config: Config = {
4 | content: [
5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}",
6 | "./components/**/*.{js,ts,jsx,tsx,mdx}",
7 | "./app/**/*.{js,ts,jsx,tsx,mdx}",
8 | ],
9 | theme: {
10 | extend: {
11 | backgroundImage: {
12 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))",
13 | "gradient-conic":
14 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))",
15 | },
16 | },
17 | },
18 | plugins: [],
19 | };
20 | export default config;
21 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "lib": ["dom", "dom.iterable", "esnext"],
4 | "allowJs": true,
5 | "skipLibCheck": true,
6 | "strict": true,
7 | "noEmit": true,
8 | "esModuleInterop": true,
9 | "module": "esnext",
10 | "moduleResolution": "bundler",
11 | "resolveJsonModule": true,
12 | "isolatedModules": true,
13 | "jsx": "preserve",
14 | "incremental": true,
15 | "plugins": [
16 | {
17 | "name": "next"
18 | }
19 | ],
20 | "paths": {
21 | "@/*": ["./*"]
22 | }
23 | },
24 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "app/utils/tools"],
25 | "exclude": ["node_modules"]
26 | }
27 |
--------------------------------------------------------------------------------