├── frontend ├── requirements_streamlit.txt ├── run_streamlit.py └── streamlit_app.py ├── app ├── __pycache__ │ └── main.cpython-311.pyc ├── main.py └── templates │ └── index.html ├── requirements.txt ├── docker-compose.yml ├── Dockerfile ├── LICENSE └── README.md /frontend/requirements_streamlit.txt: -------------------------------------------------------------------------------- 1 | streamlit==1.29.0 2 | openai==0.28.1 3 | requests==2.31.0 4 | python-dotenv==1.0.0 5 | numpy>=1.21.0 -------------------------------------------------------------------------------- /app/__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sezindartar/-language-learning-assistant/HEAD/app/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.104.1 2 | uvicorn==0.24.0 3 | openai==0.28.1 4 | pydantic==2.5.0 5 | python-multipart==0.0.6 6 | jinja2==3.1.2 7 | aiofiles==23.2.1 8 | python-dotenv==1.0.0 -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | language-assistant: 5 | build: . 6 | ports: 7 | - "8001:8000" # Bu satırı 8001 yap 8 | environment: 9 | - OPENAI_API_KEY=${OPENAI_API_KEY} 10 | volumes: 11 | - ./app:/app 12 | command: uvicorn main:app --host 0.0.0.0 --port 8000 --reload 13 | 14 | volumes: 15 | app_data: -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim 2 | 3 | WORKDIR /app 4 | 5 | # Install dependencies 6 | COPY requirements.txt . 7 | RUN pip install --no-cache-dir -r requirements.txt 8 | 9 | # Copy application code 10 | COPY app/ . 11 | 12 | # Create templates directory if it doesn't exist 13 | RUN mkdir -p templates 14 | 15 | EXPOSE 8000 16 | 17 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Sezin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /frontend/run_streamlit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Streamlit Runner Script 4 | Run this to start the Streamlit app 5 | """ 6 | import openai 7 | import subprocess 8 | import sys 9 | import os 10 | 11 | 12 | openai.api_key = os.getenv("OPENAI_API_KEY") 13 | def main(): 14 | # Set environment variables if needed 15 | if not os.getenv("OPENAI_API_KEY"): 16 | print("⚠️ Warning: OPENAI_API_KEY environment variable not set!") 17 | print("Please set it before running the app:") 18 | print("export OPENAI_API_KEY='your-api-key-here'") 19 | print() 20 | 21 | # Run streamlit 22 | try: 23 | print("🚀 Starting Streamlit Language Learning Assistant...") 24 | print("📱 The app will open in your browser automatically") 25 | print("🔗 Usually at: http://localhost:8501") 26 | print() 27 | 28 | subprocess.run([ 29 | sys.executable, "-m", "streamlit", "run", "streamlit_app.py", 30 | "--server.port", "8501", 31 | "--server.address", "0.0.0.0" 32 | ]) 33 | except KeyboardInterrupt: 34 | print("\n👋 Shutting down Streamlit app...") 35 | except Exception as e: 36 | print(f"❌ Error running Streamlit: {e}") 37 | 38 | if __name__ == "__main__": 39 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🤖 AI Language Learning Assistant 2 | 3 | An intelligent language learning application powered by OpenAI GPT-4, available in both **Streamlit** and **FastAPI** versions with Docker support. 4 | 5 | ## 🌟 Features 6 | 7 | - **Multi-language Support**: Turkish, English, German, French, Italian 8 | - **Automatic Level Detection**: CEFR levels (A1-C2) detected after 3 messages 9 | - **Personalized Responses**: AI adapts to your language level 10 | - **Real-time Conversation**: Interactive chat interface 11 | - **Progress Tracking**: Session management and conversation history 12 | - **Level-appropriate Learning**: Responses tailored to your proficiency 13 | - **Docker Support**: Easy deployment with containerization 14 | 15 | ## 🚀 Quick Start 16 | 17 | ### Prerequisites 18 | 19 | - Python 3.8+ 20 | - OpenAI API Key 21 | - Docker (optional, for containerized deployment) 22 | 23 | ### Installation 24 | 25 | 1. **Clone the repository** 26 | ```bash 27 | git clone https://github.com/YOUR_USERNAME/ai-language-assistant.git 28 | cd ai-language-assistant 29 | ``` 30 | 31 | 2. **Create virtual environment** 32 | ```bash 33 | python -m venv venv 34 | 35 | # Windows 36 | venv\Scripts\activate 37 | 38 | # macOS/Linux 39 | source venv/bin/activate 40 | ``` 41 | 42 | 3. **Install dependencies** 43 | ```bash 44 | pip install -r requirements.txt 45 | ``` 46 | 47 | 4. **Set up environment variables** 48 | 49 | Create a `.env` file in the project root: 50 | ``` 51 | OPENAI_API_KEY=your_openai_api_key_here 52 | ``` 53 | 54 | ## 🐳 Docker Deployment 55 | 56 | ### Option 1: Streamlit Version 57 | ```bash 58 | # Build the image 59 | docker build -t ai-language-assistant-streamlit . 60 | 61 | # Run the container 62 | docker run -p 8501:8501 --env-file .env ai-language-assistant-streamlit 63 | ``` 64 | 65 | ### Option 2: FastAPI Version 66 | ```bash 67 | # Build the FastAPI image 68 | docker build -f Dockerfile.fastapi -t ai-language-assistant-fastapi . 69 | 70 | # Run the container 71 | docker run -p 8000:8000 --env-file .env ai-language-assistant-fastapi 72 | ``` 73 | 74 | ### Option 3: Docker Compose (Both versions) 75 | ```bash 76 | # Start all services 77 | docker-compose up -d 78 | 79 | # Stop all services 80 | docker-compose down 81 | ``` 82 | 83 | Access the applications: 84 | - **Streamlit**: http://localhost:8501 85 | - **FastAPI**: http://localhost:8000 86 | 87 | ## 🖥️ Local Development 88 | 89 | ### Streamlit Version 90 | ```bash 91 | streamlit run streamlit_app.py 92 | ``` 93 | Open: http://localhost:8501 94 | 95 | ### FastAPI Version 96 | ```bash 97 | uvicorn main:app --reload --host 0.0.0.0 --port 8000 98 | ``` 99 | Open: http://localhost:8000 100 | 101 | ## 🎯 How to Use 102 | 103 | ### Streamlit Interface 104 | 1. **Start the Application**: Open your browser and navigate to `http://localhost:8501` 105 | 2. **Choose Your Language**: Start writing in any supported language 106 | 3. **Automatic Detection**: After 3 messages, the system detects language and level 107 | 4. **Practice**: Continue conversations with tailored AI responses 108 | 5. **Track Progress**: Monitor your session in the sidebar 109 | 110 | ### FastAPI Interface 111 | 1. **Start the Application**: Open your browser and navigate to `http://localhost:8000` 112 | 2. **Interactive Chat**: Use the web interface for real-time conversations 113 | 3. **API Endpoints**: Access programmatically via REST API 114 | 4. **Session Management**: Each session maintains conversation history 115 | 116 | ## 🌍 Supported Languages 117 | 118 | - 🇹🇷 **Turkish** (Türkçe) 119 | - 🇬🇧 **English** 120 | - 🇩🇪 **German** (Deutsch) 121 | - 🇫🇷 **French** (Français) 122 | - 🇮🇹 **Italian** (Italiano) 123 | 124 | ## 📊 CEFR Levels 125 | 126 | - **A1**: Beginner 127 | - **A2**: Elementary 128 | - **B1**: Intermediate 129 | - **B2**: Upper Intermediate 130 | - **C1**: Advanced 131 | - **C2**: Proficient 132 | 133 | ## 🏗️ Architecture 134 | 135 | ### Two Implementation Options 136 | 137 | #### 1. Streamlit Version (`streamlit_app.py`) 138 | - **Frontend**: Streamlit web interface 139 | - **State Management**: Streamlit session state 140 | - **Deployment**: Single file, easy to run 141 | - **Best for**: Quick prototyping, simple deployment 142 | 143 | #### 2. FastAPI Version (`main.py`) 144 | - **Backend**: FastAPI REST API 145 | - **Frontend**: HTML templates with JavaScript 146 | - **State Management**: In-memory sessions 147 | - **Deployment**: Production-ready web service 148 | - **Best for**: Scalable applications, API integrations 149 | 150 | ## 🔧 Technical Details 151 | 152 | ### Tech Stack 153 | - **Backend**: FastAPI / Streamlit 154 | - **AI Model**: OpenAI GPT-4 155 | - **Language**: Python 3.8+ 156 | - **Containerization**: Docker 157 | - **Orchestration**: Docker Compose 158 | 159 | ### API Endpoints (FastAPI Version) 160 | - `GET /` - Main chat interface 161 | - `POST /chat` - Send message and get response 162 | - `GET /new-session` - Create new session 163 | 164 | ## 📝 Project Structure 165 | 166 | ``` 167 | ai-language-assistant/ 168 | ├── streamlit_app.py # Streamlit version 169 | ├── main.py # FastAPI version 170 | ├── templates/ # HTML templates (FastAPI) 171 | │ └── index.html 172 | ├── static/ # Static files (FastAPI) 173 | │ ├── style.css 174 | │ └── script.js 175 | ├── requirements.txt # Python dependencies 176 | ├── Dockerfile # Streamlit Docker image 177 | ├── Dockerfile.fastapi # FastAPI Docker image 178 | ├── docker-compose.yml # Multi-container setup 179 | ├── .env # Environment variables 180 | ├── .gitignore # Git ignore file 181 | └── README.md # This file 182 | ``` 183 | 184 | ## 📋 Requirements 185 | 186 | Create `requirements.txt`: 187 | ``` 188 | fastapi>=0.104.0 189 | uvicorn[standard]>=0.24.0 190 | streamlit>=1.28.0 191 | openai>=0.28.0 192 | python-dotenv>=0.19.0 193 | jinja2>=3.1.0 194 | python-multipart>=0.0.6 195 | aiofiles>=23.0.0 196 | requests>=2.28.0 197 | ``` 198 | 199 | ## 🐳 Docker Configuration 200 | 201 | ### Dockerfile (Streamlit) 202 | ```dockerfile 203 | FROM python:3.9-slim 204 | 205 | WORKDIR /app 206 | COPY requirements.txt . 207 | RUN pip install --no-cache-dir -r requirements.txt 208 | 209 | COPY . . 210 | 211 | EXPOSE 8501 212 | 213 | CMD ["streamlit", "run", "streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"] 214 | ``` 215 | 216 | ### Dockerfile.fastapi 217 | ```dockerfile 218 | FROM python:3.9-slim 219 | 220 | WORKDIR /app 221 | COPY requirements.txt . 222 | RUN pip install --no-cache-dir -r requirements.txt 223 | 224 | COPY . . 225 | 226 | EXPOSE 8000 227 | 228 | CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] 229 | ``` 230 | 231 | ### docker-compose.yml 232 | ```yaml 233 | version: '3.8' 234 | 235 | services: 236 | streamlit: 237 | build: . 238 | ports: 239 | - "8501:8501" 240 | env_file: 241 | - .env 242 | volumes: 243 | - .:/app 244 | 245 | fastapi: 246 | build: 247 | context: . 248 | dockerfile: Dockerfile.fastapi 249 | ports: 250 | - "8000:8000" 251 | env_file: 252 | - .env 253 | volumes: 254 | - .:/app 255 | ``` 256 | 257 | ## 🚀 Deployment Options 258 | 259 | ### 1. Local Development 260 | ```bash 261 | # Streamlit 262 | streamlit run streamlit_app.py 263 | 264 | # FastAPI 265 | uvicorn main:app --reload 266 | ``` 267 | 268 | ### 2. Docker Single Container 269 | ```bash 270 | # Streamlit 271 | docker run -p 8501:8501 --env-file .env ai-language-assistant 272 | 273 | # FastAPI 274 | docker run -p 8000:8000 --env-file .env ai-language-assistant-fastapi 275 | ``` 276 | 277 | ### 3. Docker Compose 278 | ```bash 279 | docker-compose up -d 280 | ``` 281 | 282 | ### 4. Cloud Deployment 283 | - **Heroku**: Use `Procfile` for deployment 284 | - **AWS/GCP**: Use container services 285 | - **Streamlit Cloud**: Direct GitHub integration for Streamlit version 286 | 287 | ## 🤝 Contributing 288 | 289 | 1. Fork the repository 290 | 2. Create a feature branch (`git checkout -b feature/amazing-feature`) 291 | 3. Commit your changes (`git commit -m 'Add amazing feature'`) 292 | 4. Push to the branch (`git push origin feature/amazing-feature`) 293 | 5. Open a Pull Request 294 | 295 | ## 🔐 Environment Variables 296 | 297 | ```bash 298 | # Required 299 | OPENAI_API_KEY=your_openai_api_key_here 300 | 301 | # Optional 302 | DEBUG=True 303 | PORT=8000 304 | HOST=0.0.0.0 305 | ``` 306 | 307 | ## 🆘 Troubleshooting 308 | 309 | ### Common Issues 310 | 311 | 1. **OpenAI API Key not found** 312 | - Ensure `.env` file exists with correct API key 313 | - Check environment variable is loaded 314 | 315 | 2. **Docker build fails** 316 | - Ensure Docker is running 317 | - Check Dockerfile syntax 318 | - Verify requirements.txt exists 319 | 320 | 3. **Port already in use** 321 | - Change port in docker-compose.yml 322 | - Kill existing processes: `docker-compose down` 323 | 324 | 4. **Session not persisting** 325 | - FastAPI uses in-memory storage (resets on restart) 326 | - For production, consider Redis or database 327 | 328 | ## 📄 License 329 | 330 | This project is licensed under the MIT License 331 | 332 | -------------------------------------------------------------------------------- /app/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Request, HTTPException 2 | from fastapi.templating import Jinja2Templates 3 | from fastapi.staticfiles import StaticFiles 4 | from fastapi.responses import HTMLResponse 5 | from pydantic import BaseModel 6 | import openai 7 | import os 8 | import json 9 | from typing import Dict, List, Optional 10 | import asyncio 11 | 12 | app = FastAPI(title="AI Dil Öğrenme Asistanı") 13 | 14 | # OpenAI setup - Yeni API yapısı 15 | openai.api_key = os.getenv("OPENAI_API_KEY") 16 | 17 | # Templates setup 18 | templates = Jinja2Templates(directory="templates") 19 | app.mount("/static", StaticFiles(directory="static"), name="static") 20 | 21 | # In-memory session storage (cleared on server restart) 22 | sessions: Dict[str, Dict] = {} 23 | 24 | class ChatMessage(BaseModel): 25 | message: str 26 | session_id: str 27 | 28 | class SessionData(BaseModel): 29 | messages: List[Dict] = [] 30 | detected_language: Optional[str] = None 31 | user_level: Optional[str] = None 32 | message_count: int = 0 33 | conversation_active: bool = False 34 | 35 | # Language detection and level assessment 36 | LANGUAGE_CODES = { 37 | "turkish": "tr", 38 | "english": "en", 39 | "german": "de", 40 | "french": "fr", 41 | "italian": "it" 42 | } 43 | 44 | LEVELS = ["A1", "A2", "B1", "B2", "C1", "C2"] 45 | 46 | async def detect_language_and_level(text: str) -> Dict: 47 | """Detect language and assess level from user text""" 48 | 49 | prompt = f""" 50 | Analyze this combined text from multiple messages and provide: 51 | 1. Language detection (turkish, english, german, french, italian) 52 | 2. CEFR level assessment (A1, A2, B1, B2, C1, C2) 53 | 54 | Combined text from user messages: "{text}" 55 | 56 | Please assess the ACTUAL level based on: 57 | - Grammar complexity and accuracy 58 | - Vocabulary range and sophistication 59 | - Sentence structure complexity 60 | - Language fluency indicators 61 | 62 | Do NOT default to A1. Assess the real level based on the text complexity. 63 | 64 | Respond in JSON format: 65 | {{ 66 | "language": "detected_language", 67 | "level": "assessed_level", 68 | "confidence": "high/medium/low" 69 | }} 70 | """ 71 | 72 | try: 73 | response = await openai.ChatCompletion.acreate( 74 | model="gpt-4", 75 | messages=[{"role": "user", "content": prompt}], 76 | temperature=0.1 77 | ) 78 | 79 | result = json.loads(response.choices[0].message.content) 80 | return result 81 | except Exception as e: 82 | # Fallback - but don't assume A1 83 | return {"language": "english", "level": "B1", "confidence": "low"} 84 | 85 | async def generate_response(user_message: str, language: str, level: str, conversation_history: List[Dict]) -> str: 86 | """Generate appropriate response based on user's language and level""" 87 | 88 | level_descriptions = { 89 | "A1": "very basic, simple sentences, present tense, common vocabulary", 90 | "A2": "basic, simple past/future, everyday topics, familiar vocabulary", 91 | "B1": "intermediate, various tenses, personal experiences, some complex vocabulary", 92 | "B2": "upper-intermediate, complex grammar, abstract topics, advanced vocabulary", 93 | "C1": "advanced, sophisticated language, nuanced expressions, professional vocabulary", 94 | "C2": "proficient, native-like fluency, complex discourse, specialized vocabulary" 95 | } 96 | 97 | history_context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in conversation_history[-6:]]) 98 | 99 | prompt = f""" 100 | You are a language learning assistant. The user is learning {language} at {level} level. 101 | 102 | Level characteristics: {level_descriptions[level]} 103 | 104 | Recent conversation: 105 | {history_context} 106 | 107 | User's new message: "{user_message}" 108 | 109 | Respond in {language} at exactly {level} level. Be engaging, encouraging, and continue the conversation naturally. 110 | Ask follow-up questions to keep the user practicing. Gently correct major errors by modeling correct usage. 111 | 112 | Keep responses conversational and appropriate for {level} level learners. 113 | """ 114 | 115 | try: 116 | response = await openai.ChatCompletion.acreate( 117 | model="gpt-4", 118 | messages=[{"role": "user", "content": prompt}], 119 | temperature=0.7, 120 | max_tokens=200 121 | ) 122 | 123 | return response.choices[0].message.content.strip() 124 | except Exception as e: 125 | return f"Sorry, I'm having trouble responding right now. Please try again. (Error: {str(e)})" 126 | 127 | async def generate_level_suggestions(language: str, current_level: str, conversation_history: List[Dict]) -> str: 128 | """Generate suggestions for progressing to next level""" 129 | 130 | current_index = LEVELS.index(current_level) 131 | if current_index >= len(LEVELS) - 1: 132 | next_level = "C2+ (Advanced proficiency)" 133 | else: 134 | next_level = LEVELS[current_index + 1] 135 | 136 | history_text = "\n".join([msg['content'] for msg in conversation_history if msg['role'] == 'user']) 137 | 138 | prompt = f""" 139 | Based on this {language} conversation at {current_level} level, provide specific suggestions for progressing to {next_level} level. 140 | 141 | User's messages in this conversation: 142 | {history_text} 143 | 144 | Provide 3-4 specific, actionable suggestions in Turkish for improving from {current_level} to {next_level} level in {language}. 145 | Focus on grammar, vocabulary, and practice activities. 146 | 147 | Format as a friendly, encouraging message in Turkish. 148 | """ 149 | 150 | try: 151 | response = await openai.ChatCompletion.acreate( 152 | model="gpt-4", 153 | messages=[{"role": "user", "content": prompt}], 154 | temperature=0.7, 155 | max_tokens=300 156 | ) 157 | 158 | return response.choices[0].message.content.strip() 159 | except Exception as e: 160 | return f"Bir sonraki seviyeye geçmek için daha fazla pratik yapmanızı öneririm. ({current_level} → {next_level})" 161 | 162 | @app.get("/", response_class=HTMLResponse) 163 | async def home(request: Request): 164 | return templates.TemplateResponse("index.html", {"request": request}) 165 | 166 | @app.post("/chat") 167 | async def chat(message_data: ChatMessage): 168 | session_id = message_data.session_id 169 | user_message = message_data.message.strip() 170 | 171 | if not user_message: 172 | raise HTTPException(status_code=400, detail="Message cannot be empty") 173 | 174 | # Initialize session if not exists 175 | if session_id not in sessions: 176 | sessions[session_id] = SessionData().dict() 177 | 178 | session = sessions[session_id] 179 | 180 | # Handle end conversation command 181 | if user_message.lower() in ["konuşmayı bitir", "end conversation", "finish", "bitir"]: 182 | if session["conversation_active"] and len(session["messages"]) > 0: 183 | suggestions = await generate_level_suggestions( 184 | session["detected_language"], 185 | session["user_level"], 186 | session["messages"] 187 | ) 188 | 189 | # Reset session for new conversation 190 | sessions[session_id] = SessionData().dict() 191 | 192 | return { 193 | "response": f"Konuşma tamamlandı! 🎉\n\n{suggestions}\n\nYeni bir konuşma başlatmak için mesaj yazabilirsiniz.", 194 | "suggestions": True, 195 | "detected_language": None, 196 | "user_level": None 197 | } 198 | else: 199 | return {"response": "Henüz aktif bir konuşma yok. Bir şeyler yazarak başlayabilirsiniz!", "suggestions": False} 200 | 201 | # Add user message to history 202 | session["messages"].append({"role": "user", "content": user_message}) 203 | session["message_count"] += 1 204 | 205 | # Detect language and level after exactly 3 messages 206 | if session["message_count"] == 3 and not session["detected_language"]: 207 | all_user_messages = " ".join([msg["content"] for msg in session["messages"] if msg["role"] == "user"]) 208 | detection_result = await detect_language_and_level(all_user_messages) 209 | 210 | session["detected_language"] = detection_result["language"] 211 | session["user_level"] = detection_result["level"] 212 | session["conversation_active"] = True 213 | 214 | # Generate response 215 | if session["detected_language"] and session["user_level"]: 216 | bot_response = await generate_response( 217 | user_message, 218 | session["detected_language"], 219 | session["user_level"], 220 | session["messages"] 221 | ) 222 | 223 | # Add bot response to history 224 | session["messages"].append({"role": "assistant", "content": bot_response}) 225 | 226 | # Update session 227 | sessions[session_id] = session 228 | 229 | return { 230 | "response": bot_response, 231 | "detected_language": session["detected_language"], 232 | "user_level": session["user_level"], 233 | "message_count": session["message_count"], 234 | "suggestions": False 235 | } 236 | else: 237 | # Before detection is complete - respond naturally without level assessment 238 | if session["message_count"] < 3: 239 | # Generate a natural response in detected language or universally 240 | try: 241 | simple_prompt = f""" 242 | Respond naturally and encouragingly to this message: "{user_message}" 243 | 244 | Be friendly and ask a follow-up question to continue the conversation. 245 | Respond in the same language as the user's message. 246 | Keep it conversational and engaging. 247 | """ 248 | 249 | response = await openai.ChatCompletion.acreate( 250 | model="gpt-4", 251 | messages=[{"role": "user", "content": simple_prompt}], 252 | temperature=0.7, 253 | max_tokens=150 254 | ) 255 | 256 | bot_response = response.choices[0].message.content.strip() 257 | session["messages"].append({"role": "assistant", "content": bot_response}) 258 | sessions[session_id] = session 259 | 260 | return { 261 | "response": bot_response, 262 | "detected_language": None, 263 | "user_level": None, 264 | "message_count": session["message_count"], 265 | "suggestions": False 266 | } 267 | except Exception as e: 268 | bot_response = "Thank you for your message! Please continue writing so I can better understand your language level." 269 | 270 | else: 271 | # After 3 messages, still detecting 272 | bot_response = "I'm analyzing your language level... Please continue writing!" 273 | 274 | session["messages"].append({"role": "assistant", "content": bot_response}) 275 | sessions[session_id] = session 276 | return { 277 | "response": bot_response, 278 | "detected_language": None, 279 | "user_level": None, 280 | "message_count": session["message_count"], 281 | "suggestions": False 282 | } 283 | 284 | @app.get("/new-session") 285 | async def new_session(): 286 | """Create a new session ID""" 287 | import uuid 288 | session_id = str(uuid.uuid4()) 289 | return {"session_id": session_id} 290 | 291 | if __name__ == "__main__": 292 | import uvicorn 293 | uvicorn.run(app, host="0.0.0.0", port=8000) -------------------------------------------------------------------------------- /app/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | AI Dil Öğrenme Asistanı 7 | 282 | 283 | 284 |
285 |
286 |

🤖 AI Language Learning Assistant

287 |

Learn Turkish, English, German, French, Italian

288 |
289 | Language: Detecting... 290 | Level: Assessing... 291 | 292 |
293 |
294 | 295 |
296 |
297 |
298 |
299 |

🇹🇷 Türkçe

300 |

Merhaba! Hangi dilde konuşmak istiyorsun? Sadece yazmaya başla, 3 mesaj sonra seviyeni tespit edeceğim.

301 |
302 | 303 |
304 |

🇬🇧 English

305 |

Hello! Which language would you like to practice? Just start writing, I'll detect your level after 3 messages.

306 |
307 | 308 |
309 |

🇩🇪 Deutsch

310 |

Hallo! In welcher Sprache möchtest du üben? Fang einfach an zu schreiben, ich erkenne dein Niveau nach 3 Nachrichten.

311 |
312 | 313 |
314 |

🇫🇷 Français

315 |

Salut ! Dans quelle langue veux-tu pratiquer ? Commence à écrire, je détecterai ton niveau après 3 messages.

316 |
317 | 318 |
319 |

🇮🇹 Italiano

320 |

Ciao! In che lingua vuoi praticare? Inizia a scrivere, rileverò il tuo livello dopo 3 messaggi.

321 |
322 |
323 |
324 | 325 |
326 |
327 | 328 | 329 | 330 |
331 |
332 |
333 | 334 |
335 |
336 | 341 |
342 | 345 |
346 |
347 | 348 | 515 | 516 | -------------------------------------------------------------------------------- /frontend/streamlit_app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | import openai 3 | import os 4 | import json 5 | import uuid 6 | from typing import Dict, List, Optional 7 | import asyncio 8 | import requests 9 | from datetime import datetime 10 | 11 | # Load environment variables from .env file 12 | try: 13 | from dotenv import load_dotenv 14 | load_dotenv() 15 | except ImportError: 16 | st.warning("python-dotenv not installed. Install with: pip install python-dotenv") 17 | 18 | # Streamlit page config 19 | st.set_page_config( 20 | page_title="🤖 AI Language Learning Assistant", 21 | page_icon="🗣️", 22 | layout="wide", 23 | initial_sidebar_state="expanded" 24 | ) 25 | 26 | # Custom CSS 27 | st.markdown(""" 28 | 99 | """, unsafe_allow_html=True) 100 | 101 | # Initialize OpenAI 102 | openai.api_key = os.getenv("OPENAI_API_KEY") 103 | 104 | # Language mappings 105 | LANGUAGE_CODES = { 106 | "turkish": "tr", 107 | "english": "en", 108 | "german": "de", 109 | "french": "fr", 110 | "italian": "it" 111 | } 112 | 113 | LANGUAGE_NAMES = { 114 | 'turkish': 'Türkçe 🇹🇷', 115 | 'english': 'English 🇬🇧', 116 | 'german': 'Deutsch 🇩🇪', 117 | 'french': 'Français 🇫🇷', 118 | 'italian': 'Italiano 🇮🇹' 119 | } 120 | 121 | LEVELS = ["A1", "A2", "B1", "B2", "C1", "C2"] 122 | 123 | # Initialize session state 124 | if 'session_id' not in st.session_state: 125 | st.session_state.session_id = str(uuid.uuid4()) 126 | st.session_state.messages = [] 127 | st.session_state.detected_language = None 128 | st.session_state.user_level = None 129 | st.session_state.message_count = 0 130 | st.session_state.conversation_active = False 131 | 132 | async def detect_language_and_level(text: str) -> Dict: 133 | """Detect language and assess level from user text""" 134 | 135 | prompt = f""" 136 | Analyze this combined text from multiple messages and provide: 137 | 1. Language detection (turkish, english, german, french, italian) 138 | 2. CEFR level assessment (A1, A2, B1, B2, C1, C2) 139 | 140 | Combined text from user messages: "{text}" 141 | 142 | Please assess the ACTUAL level based on: 143 | - Grammar complexity and accuracy 144 | - Vocabulary range and sophistication 145 | - Sentence structure complexity 146 | - Language fluency indicators 147 | 148 | Do NOT default to A1. Assess the real level based on the text complexity. 149 | 150 | Respond in JSON format: 151 | {{ 152 | "language": "detected_language", 153 | "level": "assessed_level", 154 | "confidence": "high/medium/low" 155 | }} 156 | """ 157 | 158 | try: 159 | response = await openai.ChatCompletion.acreate( 160 | model="gpt-4", 161 | messages=[{"role": "user", "content": prompt}], 162 | temperature=0.1 163 | ) 164 | 165 | result = json.loads(response.choices[0].message.content) 166 | return result 167 | except Exception as e: 168 | return {"language": "english", "level": "B1", "confidence": "low"} 169 | 170 | async def generate_response(user_message: str, language: str, level: str, conversation_history: List[Dict]) -> str: 171 | """Generate appropriate response based on user's language and level""" 172 | 173 | level_descriptions = { 174 | "A1": "very basic, simple sentences, present tense, common vocabulary", 175 | "A2": "basic, simple past/future, everyday topics, familiar vocabulary", 176 | "B1": "intermediate, various tenses, personal experiences, some complex vocabulary", 177 | "B2": "upper-intermediate, complex grammar, abstract topics, advanced vocabulary", 178 | "C1": "advanced, sophisticated language, nuanced expressions, professional vocabulary", 179 | "C2": "proficient, native-like fluency, complex discourse, specialized vocabulary" 180 | } 181 | 182 | history_context = "\n".join([f"{msg['role']}: {msg['content']}" for msg in conversation_history[-6:]]) 183 | 184 | prompt = f""" 185 | You are a language learning assistant. The user is learning {language} at {level} level. 186 | 187 | Level characteristics: {level_descriptions[level]} 188 | 189 | Recent conversation: 190 | {history_context} 191 | 192 | User's new message: "{user_message}" 193 | 194 | Respond in {language} at exactly {level} level. Be engaging, encouraging, and continue the conversation naturally. 195 | Ask follow-up questions to keep the user practicing. Gently correct major errors by modeling correct usage. 196 | 197 | Keep responses conversational and appropriate for {level} level learners. 198 | """ 199 | 200 | try: 201 | response = await openai.ChatCompletion.acreate( 202 | model="gpt-4", 203 | messages=[{"role": "user", "content": prompt}], 204 | temperature=0.7, 205 | max_tokens=200 206 | ) 207 | 208 | return response.choices[0].message.content.strip() 209 | except Exception as e: 210 | return f"Sorry, I'm having trouble responding right now. Please try again." 211 | 212 | async def generate_natural_response(user_message: str) -> str: 213 | """Generate natural response before level detection""" 214 | 215 | simple_prompt = f""" 216 | Respond naturally and encouragingly to this message: "{user_message}" 217 | 218 | Be friendly and ask a follow-up question to continue the conversation. 219 | Respond in the same language as the user's message. 220 | Keep it conversational and engaging. 221 | """ 222 | 223 | try: 224 | response = await openai.ChatCompletion.acreate( 225 | model="gpt-4", 226 | messages=[{"role": "user", "content": simple_prompt}], 227 | temperature=0.7, 228 | max_tokens=150 229 | ) 230 | 231 | return response.choices[0].message.content.strip() 232 | except Exception as e: 233 | return "Thank you for your message! Please continue writing so I can better understand your language level." 234 | 235 | async def generate_level_suggestions(language: str, current_level: str, conversation_history: List[Dict]) -> str: 236 | """Generate suggestions for progressing to next level""" 237 | 238 | current_index = LEVELS.index(current_level) 239 | if current_index >= len(LEVELS) - 1: 240 | next_level = "C2+ (Advanced proficiency)" 241 | else: 242 | next_level = LEVELS[current_index + 1] 243 | 244 | history_text = "\n".join([msg['content'] for msg in conversation_history if msg['role'] == 'user']) 245 | 246 | prompt = f""" 247 | Based on this {language} conversation at {current_level} level, provide specific suggestions for progressing to {next_level} level. 248 | 249 | User's messages in this conversation: 250 | {history_text} 251 | 252 | Provide 3-4 specific, actionable suggestions in Turkish for improving from {current_level} to {next_level} level in {language}. 253 | Focus on grammar, vocabulary, and practice activities. 254 | 255 | Format as a friendly, encouraging message in Turkish. 256 | """ 257 | 258 | try: 259 | response = await openai.ChatCompletion.acreate( 260 | model="gpt-4", 261 | messages=[{"role": "user", "content": prompt}], 262 | temperature=0.7, 263 | max_tokens=300 264 | ) 265 | 266 | return response.choices[0].message.content.strip() 267 | except Exception as e: 268 | return f"Bir sonraki seviyeye geçmek için daha fazla pratik yapmanızı öneririm. ({current_level} → {next_level})" 269 | 270 | # Main App 271 | def main(): 272 | # Header 273 | st.markdown(""" 274 |
275 |

🤖 AI Language Learning Assistant

276 |

Learn Turkish, English, German, French, Italian with AI

277 |
278 | """, unsafe_allow_html=True) 279 | 280 | # Sidebar 281 | with st.sidebar: 282 | st.markdown("### 📊 Session Status") 283 | 284 | # Status boxes 285 | if st.session_state.detected_language: 286 | language_display = LANGUAGE_NAMES.get(st.session_state.detected_language, st.session_state.detected_language) 287 | st.markdown(f""" 288 |
289 | Language:
290 | {language_display} 291 |
292 | """, unsafe_allow_html=True) 293 | else: 294 | st.markdown(""" 295 |
296 | Language:
297 | Detecting... 298 |
299 | """, unsafe_allow_html=True) 300 | 301 | st.markdown("
", unsafe_allow_html=True) 302 | 303 | if st.session_state.user_level: 304 | st.markdown(f""" 305 |
306 | Level:
307 | {st.session_state.user_level} 308 |
309 | """, unsafe_allow_html=True) 310 | else: 311 | st.markdown(""" 312 |
313 | Level:
314 | Assessing... 315 |
316 | """, unsafe_allow_html=True) 317 | 318 | st.markdown("
", unsafe_allow_html=True) 319 | st.markdown(f"**Messages:** {st.session_state.message_count}") 320 | st.markdown(f"**Session ID:** {st.session_state.session_id[:8]}...") 321 | 322 | st.markdown("---") 323 | 324 | # End conversation button 325 | if st.button("🔚 End Conversation", type="secondary", use_container_width=True): 326 | if st.session_state.conversation_active and len(st.session_state.messages) > 0: 327 | # Generate suggestions 328 | suggestions = asyncio.run(generate_level_suggestions( 329 | st.session_state.detected_language, 330 | st.session_state.user_level, 331 | st.session_state.messages 332 | )) 333 | 334 | # Add suggestions message 335 | st.session_state.messages.append({ 336 | "role": "suggestions", 337 | "content": f"Konuşma tamamlandı! 🎉\n\n{suggestions}\n\nYeni bir konuşma başlatmak için mesaj yazabilirsiniz." 338 | }) 339 | 340 | # Reset session 341 | st.session_state.session_id = str(uuid.uuid4()) 342 | st.session_state.detected_language = None 343 | st.session_state.user_level = None 344 | st.session_state.message_count = 0 345 | st.session_state.conversation_active = False 346 | 347 | st.rerun() 348 | else: 349 | st.warning("Henüz aktif bir konuşma yok!") 350 | 351 | # Reset button 352 | if st.button("🔄 New Session", type="primary", use_container_width=True): 353 | st.session_state.session_id = str(uuid.uuid4()) 354 | st.session_state.messages = [] 355 | st.session_state.detected_language = None 356 | st.session_state.user_level = None 357 | st.session_state.message_count = 0 358 | st.session_state.conversation_active = False 359 | st.rerun() 360 | 361 | # Main content area 362 | col1, col2 = st.columns([2, 1]) 363 | 364 | with col1: 365 | # Welcome message for new sessions 366 | if not st.session_state.messages: 367 | st.markdown('
', unsafe_allow_html=True) 368 | st.markdown("### 🌍 Welcome! Choose your language and start practicing:") 369 | 370 | # Language cards 371 | languages = [ 372 | ("🇹🇷 Türkçe", "Merhaba! Hangi dilde konuşmak istiyorsun? Sadece yazmaya başla, 3 mesaj sonra seviyeni tespit edeceğim."), 373 | ("🇬🇧 English", "Hello! Which language would you like to practice? Just start writing, I'll detect your level after 3 messages."), 374 | ("🇩🇪 Deutsch", "Hallo! In welcher Sprache möchtest du üben? Fang einfach an zu schreiben, ich erkenne dein Niveau nach 3 Nachrichten."), 375 | ("🇫🇷 Français", "Salut ! Dans quelle langue veux-tu pratiquer ? Commence à écrire, je détecterai ton niveau après 3 messages."), 376 | ("🇮🇹 Italiano", "Ciao! In che lingua vuoi praticare? Inizia a scrivere, rileverò il tuo livello dopo 3 messaggi.") 377 | ] 378 | 379 | for lang_name, message in languages: 380 | st.markdown(f""" 381 |
382 |

{lang_name}

383 |

{message}

384 |
385 | """, unsafe_allow_html=True) 386 | st.markdown('
', unsafe_allow_html=True) 387 | 388 | # Chat messages 389 | if st.session_state.messages: 390 | st.markdown("### 💬 Conversation") 391 | 392 | for msg in st.session_state.messages: 393 | if msg["role"] == "user": 394 | st.markdown(f""" 395 |
396 | You: {msg["content"]} 397 |
398 | """, unsafe_allow_html=True) 399 | elif msg["role"] == "assistant": 400 | st.markdown(f""" 401 |
402 | AI Assistant: {msg["content"]} 403 |
404 | """, unsafe_allow_html=True) 405 | elif msg["role"] == "suggestions": 406 | st.markdown(f""" 407 |
408 | {msg["content"]} 409 |
410 | """, unsafe_allow_html=True) 411 | 412 | # Message input 413 | st.markdown("---") 414 | user_input = st.text_area( 415 | "✍️ Write your message in any language:", 416 | height=100, 417 | placeholder="Start typing to begin your language learning journey..." 418 | ) 419 | 420 | col_send, col_clear = st.columns([1, 1]) 421 | with col_send: 422 | send_button = st.button("📤 Send Message", type="primary", use_container_width=True) 423 | with col_clear: 424 | clear_button = st.button("🗑️ Clear Input", use_container_width=True) 425 | 426 | if clear_button: 427 | st.rerun() 428 | 429 | if send_button and user_input.strip(): 430 | # Add user message 431 | st.session_state.messages.append({"role": "user", "content": user_input.strip()}) 432 | st.session_state.message_count += 1 433 | 434 | # Process message 435 | with st.spinner("🤔 Thinking..."): 436 | # Detect language and level after exactly 3 messages 437 | if st.session_state.message_count == 3 and not st.session_state.detected_language: 438 | all_user_messages = " ".join([msg["content"] for msg in st.session_state.messages if msg["role"] == "user"]) 439 | detection_result = asyncio.run(detect_language_and_level(all_user_messages)) 440 | 441 | st.session_state.detected_language = detection_result["language"] 442 | st.session_state.user_level = detection_result["level"] 443 | st.session_state.conversation_active = True 444 | 445 | # Generate response 446 | if st.session_state.detected_language and st.session_state.user_level: 447 | bot_response = asyncio.run(generate_response( 448 | user_input.strip(), 449 | st.session_state.detected_language, 450 | st.session_state.user_level, 451 | st.session_state.messages 452 | )) 453 | else: 454 | # Before detection is complete 455 | if st.session_state.message_count < 3: 456 | bot_response = asyncio.run(generate_natural_response(user_input.strip())) 457 | else: 458 | bot_response = "I'm analyzing your language level... Please continue writing!" 459 | 460 | # Add bot response 461 | st.session_state.messages.append({"role": "assistant", "content": bot_response}) 462 | 463 | st.rerun() 464 | 465 | with col2: 466 | st.markdown("### 📚 Learning Tips") 467 | 468 | tips = [ 469 | "💡 **Tip 1:** Write naturally - don't worry about making mistakes!", 470 | "🎯 **Tip 2:** The system analyzes your level after 3 messages", 471 | "🔄 **Tip 3:** Practice regularly for best results", 472 | "📈 **Tip 4:** Challenge yourself with complex topics", 473 | "🗣️ **Tip 5:** Use the language you want to practice" 474 | ] 475 | 476 | for tip in tips: 477 | st.markdown(tip) 478 | 479 | st.markdown("---") 480 | st.markdown("### 🌟 Supported Languages") 481 | for lang_code, lang_name in LANGUAGE_NAMES.items(): 482 | st.markdown(f"• {lang_name}") 483 | 484 | st.markdown("---") 485 | st.markdown("### 📊 CEFR Levels") 486 | level_descriptions = { 487 | "A1": "Beginner", 488 | "A2": "Elementary", 489 | "B1": "Intermediate", 490 | "B2": "Upper Intermediate", 491 | "C1": "Advanced", 492 | "C2": "Proficient" 493 | } 494 | 495 | for level, desc in level_descriptions.items(): 496 | st.markdown(f"• **{level}**: {desc}") 497 | 498 | if __name__ == "__main__": 499 | # Check for OpenAI API key 500 | if not os.getenv("OPENAI_API_KEY"): 501 | st.error("🚨 OpenAI API key not found! Please set the OPENAI_API_KEY environment variable.") 502 | st.info("💡 Create a .env file in your project directory with: OPENAI_API_KEY=your_api_key_here") 503 | st.stop() 504 | 505 | main() --------------------------------------------------------------------------------