├── .env.example ├── .gitignore ├── README.md ├── app ├── api │ ├── translate-image │ │ ├── route.ts │ │ └── solved-route.ts │ └── translate │ │ ├── route.ts │ │ └── solved-route.ts ├── components │ ├── Chat-solved.tsx │ ├── Chat.tsx │ ├── Icons.tsx │ ├── Image-solved.tsx │ ├── Image.tsx │ ├── LanguageSelector.tsx │ ├── Nav.tsx │ ├── SelectLanguage.tsx │ ├── TextCounter.tsx │ ├── TranslateImageInput.tsx │ ├── TranslateTextInput.tsx │ └── TranslateTextOutput.tsx ├── consts.ts ├── documents │ └── page.tsx ├── favicon.ico ├── globals.css ├── images │ └── page.tsx ├── layout.tsx ├── page.tsx ├── utils │ └── index.ts └── websites │ └── page.tsx ├── next.config.mjs ├── package.json ├── pnpm-lock.yaml ├── postcss.config.mjs ├── public ├── drag_and_drop.png ├── next.svg └── vercel.svg ├── tailwind.config.ts └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | GOOGLE_GENERATIVE_AI_API_KEY="..." -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Por hacer 2 | 3 | 1. Explicar el proyecto en general 4 | - Levantar el proyecto 5 | - Explicar estructura y componentes 6 | 7 | 2. Traducción de texto 8 | -> /app/components/Chat.tsx 9 | -> /app/api/translate/route.ts 10 | 11 | 3. Traducción de imagen a texto 12 | -> /app/components/Image.tsx 13 | -> /app/api/translate-image/route.ts -------------------------------------------------------------------------------- /app/api/translate-image/route.ts: -------------------------------------------------------------------------------- 1 | import { streamText } from 'ai' 2 | import { google } from '@ai-sdk/google' 3 | import { object, picklist, safeParse, string } from 'valibot' 4 | import { base64ToUint8Array } from '@/app/utils' 5 | 6 | const RequestSchema = object({ 7 | prompt: string(), 8 | image: string(), 9 | from: picklist(['Auto', 'English', 'Español']), 10 | to: picklist(['English', 'Español', 'Japanese']) 11 | }) 12 | 13 | export async function POST (req: Request) { 14 | // Extract the message, from and to from the request 15 | const { success, output, issues } = safeParse(RequestSchema, await req.json()) 16 | if (!success) { 17 | return new Response( 18 | JSON.stringify({ issues }), 19 | { status: 400, headers: { 'Content-Type': 'application/json' } } 20 | ) 21 | } 22 | 23 | const { from, to, image } = output 24 | 25 | // 1. Get google model gemini-pro-vision 26 | // check available models: 27 | // https://ai.google.dev/gemini-api/docs/models/gemini?hl=es-419 28 | 29 | // 2. Transform image base64 to ArrayBuffer Uint8 30 | 31 | // 3. Call streamText, passing the model, imageArray, from and to and get the response 32 | 33 | // 4. Response with stream response 34 | } 35 | -------------------------------------------------------------------------------- /app/api/translate-image/solved-route.ts: -------------------------------------------------------------------------------- 1 | import { streamText } from 'ai' 2 | import { google } from '@ai-sdk/google' 3 | import { object, picklist, safeParse, string } from 'valibot' 4 | import { base64ToUint8Array } from '@/app/utils' 5 | import { FROM_LANGUAGES, TO_LANGUAGES } from '@/app/consts' 6 | 7 | const RequestSchema = object({ 8 | prompt: string(), 9 | image: string(), 10 | from: picklist(FROM_LANGUAGES), 11 | to: picklist(TO_LANGUAGES) 12 | }) 13 | 14 | export async function POST (req: Request) { 15 | // Extract the message, from and to from the request 16 | const { success, output, issues } = safeParse(RequestSchema, await req.json()) 17 | if (!success) { 18 | return new Response( 19 | JSON.stringify({ issues }), 20 | { status: 400, headers: { 'Content-Type': 'application/json' } } 21 | ) 22 | } 23 | 24 | const { from, to, image } = output 25 | 26 | // 1. Get google model gemini-pro-vision 27 | const model = google('models/gemini-pro-vision') 28 | 29 | // 2. Transform image base64 to ArrayBuffer Uint8 30 | const formattedImage = base64ToUint8Array(image) 31 | 32 | // 3. Call streamText, 33 | const result = await streamText({ 34 | model, 35 | messages: [ 36 | { 37 | role: 'user', 38 | content: [ 39 | { type: 'text', text: `Translate the following text from ${from} to ${to}. If "Auto" is the from language, then try to detect the original language automatically after reading the text from the image. If no text is detected in the image, return an empty string. Always return directly the translated text. Do not include the prompt in the response.` }, 40 | { type: 'image', image: formattedImage } 41 | ] 42 | } 43 | ], 44 | maxTokens: 4096, 45 | temperature: 0.7 46 | }) 47 | 48 | // 4. Response with stream response 49 | return result.toAIStreamResponse() 50 | } 51 | -------------------------------------------------------------------------------- /app/api/translate/route.ts: -------------------------------------------------------------------------------- 1 | import { object, picklist, safeParse, string } from 'valibot' 2 | import { generateText, streamText } from 'ai' 3 | import { google } from '@ai-sdk/google' 4 | 5 | const RequestSchema = object({ 6 | prompt: string(), 7 | from: picklist(['Auto', 'English', 'Español']), 8 | to: picklist(['English', 'Español', 'Japanese']) 9 | }) 10 | 11 | export async function POST (req: Request) { 12 | // Extract the message, from and to from the request 13 | const { success, output, issues } = safeParse(RequestSchema, await req.json()) 14 | if (!success) { 15 | return new Response( 16 | JSON.stringify({ issues }), 17 | { status: 400, headers: { 'Content-Type': 'application/json' } } 18 | ) 19 | } 20 | 21 | const { prompt, from, to } = output 22 | 23 | // TODO 24 | // 1. Get the Google language model 25 | // 2. Call generateText with the model, prompt, system message, maxTokens and temperature 26 | // 3. Return the response text 27 | // 4. Use streamText and toAIStreamResponse to improve performance and UX 28 | } 29 | -------------------------------------------------------------------------------- /app/api/translate/solved-route.ts: -------------------------------------------------------------------------------- 1 | import { object, picklist, safeParse, string } from 'valibot' 2 | import { streamText } from 'ai' 3 | import { google } from '@ai-sdk/google' 4 | 5 | const RequestSchema = object({ 6 | prompt: string(), 7 | from: picklist(['Auto', 'English', 'Español']), 8 | to: picklist(['English', 'Español', 'Japanese']) 9 | }) 10 | 11 | export async function POST (req: Request) { 12 | // Extract the message, from and to from the request 13 | const { success, output, issues } = safeParse(RequestSchema, await req.json()) 14 | if (!success) { 15 | return new Response( 16 | JSON.stringify({ issues }), 17 | { status: 400, headers: { 'Content-Type': 'application/json' } } 18 | ) 19 | } 20 | 21 | const { prompt, from, to } = output 22 | 23 | // 1. Get the Google language model 24 | const model = google('models/gemini-pro') 25 | 26 | // 2. Call generateText with the model, prompt, system message, maxTokens and temperature 27 | const result = await streamText({ 28 | model, 29 | prompt, 30 | system: `Translate the following text from ${from} to ${to}. If "Auto" is the from language, then try to detect the original language automatically after reading the text. Return directly the translated text. Do not include the prompt in the response.`, 31 | maxTokens: 4096, 32 | temperature: 0.7 33 | }) 34 | 35 | // 3. Return the response text 36 | // 4. Use streamText and toAIStreamResponse to improve performance and UX 37 | return result.toAIStreamResponse() 38 | } 39 | -------------------------------------------------------------------------------- /app/components/Chat-solved.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import { useCompletion } from 'ai/react' 4 | import { useEffect, useState } from 'react' 5 | import { useDebounce } from '@uidotdev/usehooks' 6 | import { TranslateTextOutput } from './TranslateTextOutput' 7 | 8 | import { FROM_LANGUAGES, TO_LANGUAGES } from '../consts' 9 | import { LanguageSelector } from './LanguageSelector' 10 | import { TranslateTextInput } from './TranslateTextInput' 11 | 12 | export function Chat () { 13 | const [from, setFrom] = useState(FROM_LANGUAGES[0]) 14 | const [to, setTo] = useState(TO_LANGUAGES[0]) 15 | const [text, setText] = useState('') 16 | const debouncedSearchTerm = useDebounce(text, 300) 17 | 18 | const { completion, complete, isLoading } = useCompletion({ 19 | api: '/api/translate', 20 | body: { from, to } 21 | }) 22 | 23 | useEffect(() => { 24 | if (debouncedSearchTerm === '') return 25 | complete(debouncedSearchTerm, { body: { from, to } }) 26 | }, [debouncedSearchTerm, from, to]) 27 | 28 | return ( 29 | <> 30 | 31 | 32 |
33 | { setText(newText) }} /> 34 | 35 |
36 | 37 | ) 38 | } 39 | -------------------------------------------------------------------------------- /app/components/Chat.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import { useCompletion } from 'ai/react' 4 | import { useEffect, useState } from 'react' 5 | import { useDebounce } from '@uidotdev/usehooks' 6 | 7 | import { TranslateTextOutput } from './TranslateTextOutput' 8 | import { LanguageSelector } from './LanguageSelector' 9 | import { TranslateTextInput } from './TranslateTextInput' 10 | 11 | import { FROM_LANGUAGES, TO_LANGUAGES } from '../consts' 12 | 13 | export function Chat () { 14 | const [from, setFrom] = useState(FROM_LANGUAGES[0]) 15 | const [to, setTo] = useState(TO_LANGUAGES[0]) 16 | 17 | // 1. Create state to store user text 18 | // 2. Pass text and onChange callback to TranslateTextInput 19 | // 3. Add useCompletion hook to call to `/api/translate` 20 | // 4. Call `complete` on changing the input 21 | // 5. Call `complete` with useEffect when changing from or to 22 | // 6. Add useDebounce to improve performance 23 | // 7. Add useEffect to call complete when param changes 24 | 25 | return ( 26 | <> 27 | 28 | 29 |
30 | 31 | 32 |
33 | 34 | ) 35 | } 36 | -------------------------------------------------------------------------------- /app/components/Icons.tsx: -------------------------------------------------------------------------------- 1 | export const ArrowsIcon = () => ( 2 | 3 | ) 4 | 5 | export const ClipboardIcon = () => ( 6 | 7 | ) 8 | 9 | export const SpeakerIcon = () => ( 10 | 11 | ) 12 | -------------------------------------------------------------------------------- /app/components/Image-solved.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import { useEffect, useState } from 'react' 4 | import { useCompletion } from 'ai/react' 5 | 6 | import { TranslateTextOutput } from './TranslateTextOutput' 7 | import { LanguageSelector } from './LanguageSelector' 8 | 9 | import { fileToBase64 } from '../utils' 10 | import { FROM_LANGUAGES, TO_LANGUAGES } from '../consts' 11 | import { TranslateImageInput } from './TranslateImageInput' 12 | 13 | export function Image () { 14 | const [from, setFrom] = useState(FROM_LANGUAGES[0]) 15 | const [to, setTo] = useState(TO_LANGUAGES[0]) 16 | const [file, setFile] = useState(null) 17 | 18 | const { completion, complete, isLoading } = useCompletion({ 19 | api: '/api/translate-image' 20 | }) 21 | 22 | const handleDrop = async (acceptedFiles: File[]) => { 23 | setFile(acceptedFiles[0]) 24 | } 25 | 26 | useEffect(() => { 27 | async function run () { 28 | if (file === null) return 29 | const image = await fileToBase64(file) 30 | complete('', { body: { from, to, image } }) 31 | } 32 | 33 | run() 34 | }, [from, to, file]) 35 | 36 | const image = file != null ? URL.createObjectURL(file) : null 37 | 38 | return ( 39 | <> 40 | 41 | 42 |
43 | { setFile(null) }} 47 | /> 48 | 49 |
50 | 51 | ) 52 | } 53 | -------------------------------------------------------------------------------- /app/components/Image.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import { useEffect, useState } from 'react' 4 | import { useCompletion } from 'ai/react' 5 | 6 | import { TranslateTextOutput } from './TranslateTextOutput' 7 | import { LanguageSelector } from './LanguageSelector' 8 | 9 | import { fileToBase64 } from '../utils' 10 | import { FROM_LANGUAGES, TO_LANGUAGES } from '../consts' 11 | import { TranslateImageInput } from './TranslateImageInput' 12 | 13 | export function Image () { 14 | const [from, setFrom] = useState(FROM_LANGUAGES[0]) 15 | const [to, setTo] = useState(TO_LANGUAGES[0]) 16 | 17 | // 1. Create state to store the file 18 | // 2. Create handleDrop function to set the file 19 | // 3. Create `image` variable to show the image 20 | // 4. Pass all the necessary props to TranslateImageInput 21 | // 5. Add `onClose` to remove the file on clicking X 22 | 23 | // 6. Add useCompletion hook to call to `/api/translate-image` 24 | // 7. useEffect: `complete` on changing the file, from or to fields 25 | // 7a. Transform image to base64 and pass it to `complete` body 26 | // 8. Pass `completion` and `isLoading` to TranslateTextOutput 27 | 28 | const image = null 29 | 30 | return ( 31 | <> 32 | 33 | 34 |
35 | 36 | 37 |
38 | 39 | ) 40 | } 41 | -------------------------------------------------------------------------------- /app/components/LanguageSelector.tsx: -------------------------------------------------------------------------------- 1 | import { FROM_LANGUAGES, TO_LANGUAGES } from '../consts' 2 | import { SelectLanguage } from './SelectLanguage' 3 | 4 | export const LanguageSelector: React.FC<{ from: string, setFrom: (language: string) => void, to: string, setTo: (language: string) => void }> = ({ from, setFrom, to, setTo }) => { 5 | return ( 6 |
7 |
8 | 9 |
10 | 11 |
12 | 13 |
14 |
15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /app/components/Nav.tsx: -------------------------------------------------------------------------------- 1 | 'use client' 2 | 3 | import Link from 'next/link' 4 | import { usePathname } from 'next/navigation' 5 | import { MdDocumentScanner, MdImage, MdTranslate, MdWeb } from 'react-icons/md' 6 | 7 | const TABS = [{ 8 | path: '/', 9 | label: 'Texto', 10 | icon: 11 | }, { 12 | path: '/images', 13 | label: 'Imágenes', 14 | icon: 15 | }, { 16 | path: '/documents', 17 | label: 'Documentos', 18 | icon: 19 | }, { 20 | path: '/websites', 21 | label: 'Sitios Web', 22 | icon: 23 | }] 24 | 25 | export function Nav () { 26 | const pathname = usePathname() 27 | 28 | return ( 29 |
30 |
31 | { 32 | TABS.map(({ path, label, icon }) => ( 33 | 34 | {icon} 35 | {label} 36 | 37 | )) 38 | } 39 |
40 |
41 | ) 42 | } 43 | 44 | const NavButton: React.FC<{ children: React.ReactNode, href: string, className?: string }> = ({ children, href, className = '' }) => { 45 | return ( 46 | 51 | {children} 52 | 53 | ) 54 | } 55 | -------------------------------------------------------------------------------- /app/components/SelectLanguage.tsx: -------------------------------------------------------------------------------- 1 | export const SelectLanguage: React.FC<{ languages: string[], selected: string, setSelected: (language: string) => void }> = ({ languages, selected, setSelected }) => { 2 | return ( 3 | 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /app/components/TextCounter.tsx: -------------------------------------------------------------------------------- 1 | export const TextCounter = ({ text }: { text: string }) => { 2 | return ( 3 |
4 |
5 |
6 |
7 |
{text.length}/5000
8 |
9 |
10 |
11 |
12 | ) 13 | } 14 | -------------------------------------------------------------------------------- /app/components/TranslateImageInput.tsx: -------------------------------------------------------------------------------- 1 | import Dropzone from 'react-dropzone' 2 | import { MdClose } from 'react-icons/md' 3 | 4 | export function TranslateImageInput ( 5 | { image, onClose, onDrop }: 6 | { image: string | null, onClose: () => void, onDrop: (acceptedFiles: File[]) => void } 7 | ) { 8 | return ( 9 |
10 | { 11 | image !== null 12 | ? ( 13 |
14 | 15 | Imagen seleccionada 16 |
17 | ) 18 | : ( 19 | 20 | {({ getRootProps, getInputProps, isDragActive }) => ( 21 |
22 | 23 |
24 | Arrastrar y soltar 25 | {isDragActive ? 'Suelta tu imagen aquí...' : 'Arrastra y suelta tu imagen aquí...'} 26 |
27 |
28 | )} 29 |
30 | ) 31 | } 32 |
33 | ) 34 | } 35 | -------------------------------------------------------------------------------- /app/components/TranslateTextInput.tsx: -------------------------------------------------------------------------------- 1 | import { TextCounter } from './TextCounter' 2 | 3 | export function TranslateTextInput ({ onChange, text = '' }: { onChange: (text: string) => void, text: string }) { 4 | const handleChange = (e: React.ChangeEvent) => { 5 | onChange(e.target.value) 6 | } 7 | 8 | return ( 9 |
10 |
11 |
12 |