├── cookbook ├── starter-apps │ ├── playground │ │ ├── .gitignore │ │ ├── requirements.txt │ │ ├── utils │ │ │ └── models.py │ │ ├── readme.md │ │ └── pages │ │ │ ├── 6_🎴_Flash Card.py │ │ │ ├── 1_🧠_Generate_Questions.py │ │ │ ├── 3_🎥_YouTube_to_Questions.py │ │ │ ├── 4_🔮_Doubt Solver.py │ │ │ ├── 7_PYQ to Pre Tool.py │ │ │ ├── 5_📝_Lesson Plan.py │ │ │ └── 2 📄_Generate From Text-PDF-URL.py │ ├── Educhain_pedagogy │ │ ├── Backend │ │ │ ├── .env_sample │ │ │ ├── requirements.txt │ │ │ ├── .gitignore │ │ │ ├── pyproject.toml │ │ │ ├── app │ │ │ │ ├── models │ │ │ │ │ └── pedagogy_models.py │ │ │ │ ├── api │ │ │ │ │ └── routes.py │ │ │ │ └── services │ │ │ │ │ └── educhain_services.py │ │ │ └── main.py │ │ ├── frontend │ │ │ ├── jsconfig.json │ │ │ ├── postcss.config.mjs │ │ │ ├── next.config.mjs │ │ │ ├── src │ │ │ │ ├── app │ │ │ │ │ ├── favicon.ico │ │ │ │ │ ├── layout.js │ │ │ │ │ ├── page.js │ │ │ │ │ └── globals.css │ │ │ │ ├── pages │ │ │ │ │ ├── _app.jsx │ │ │ │ │ └── pedagogy │ │ │ │ │ │ └── [name].jsx │ │ │ │ └── components │ │ │ │ │ ├── PedagogyCard.jsx │ │ │ │ │ └── ParamForm.jsx │ │ │ ├── public │ │ │ │ ├── vercel.svg │ │ │ │ ├── window.svg │ │ │ │ ├── file.svg │ │ │ │ ├── globe.svg │ │ │ │ └── next.svg │ │ │ ├── eslint.config.mjs │ │ │ ├── components.json │ │ │ ├── .gitignore │ │ │ └── package.json │ │ └── README.md │ ├── Origami_tutorial_generator │ │ ├── requirements.txt │ │ ├── solver.py │ │ ├── README.md │ │ └── app.py │ ├── Consultancy-Prep │ │ ├── requirements.txt │ │ ├── README.md │ │ └── c-app.py │ ├── Jee_problem_solver_and_analyzer │ │ ├── requirements.txt │ │ ├── README.md │ │ └── app.py │ ├── multilingual_chatbot │ │ ├── requirements.txt │ │ ├── readme.md │ │ └── app.py │ ├── AI CourtRoom │ │ ├── requirements.txt │ │ ├── README.md │ │ └── app.py │ └── flashcard_generator │ │ ├── requirements.txt │ │ └── readme.md ├── readme.md └── use-cases │ ├── Long_PDFs_to_Quiz.ipynb │ └── generate_quiz_on_latest_news.ipynb ├── educhain ├── utils │ ├── __init__.py │ ├── loaders.py │ └── output_formatter.py ├── core │ ├── __init__.py │ ├── config.py │ └── educhain.py ├── engines │ └── __init__.py ├── __init__.py └── models │ ├── __init__.py │ └── base_models.py ├── images ├── new.png ├── new1.png ├── educhain_diagram.png ├── educhain.svg └── educhain-comparison-svg.svg ├── .gitattributes ├── docs ├── getting-started │ ├── installation.md │ └── quick-start.md ├── features │ ├── mcq_generation.md │ └── mcq_from_data.md └── index.md ├── LICENSE ├── CONTRIBUTING.md ├── setup.py ├── archive ├── utils.py ├── models.py └── content_engine.py └── .gitignore /cookbook/starter-apps/playground/.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/.env_sample: -------------------------------------------------------------------------------- 1 | OPENAI_API_KEY= -------------------------------------------------------------------------------- /educhain/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .loaders import PdfFileLoader, UrlLoader 2 | -------------------------------------------------------------------------------- /images/new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/satvik314/educhain/HEAD/images/new.png -------------------------------------------------------------------------------- /images/new1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/satvik314/educhain/HEAD/images/new1.png -------------------------------------------------------------------------------- /educhain/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .educhain import Educhain 2 | from .config import LLMConfig -------------------------------------------------------------------------------- /educhain/engines/__init__.py: -------------------------------------------------------------------------------- 1 | from .qna_engine import QnAEngine 2 | from .content_engine import ContentEngine -------------------------------------------------------------------------------- /images/educhain_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/satvik314/educhain/HEAD/images/educhain_diagram.png -------------------------------------------------------------------------------- /cookbook/starter-apps/Origami_tutorial_generator/requirements.txt: -------------------------------------------------------------------------------- 1 | openai 2 | educhain 3 | langchain-openai 4 | streamlit -------------------------------------------------------------------------------- /cookbook/starter-apps/Consultancy-Prep/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit>=1.28.0 2 | educhain>=0.1.7 3 | langchain-google-genai>=0.0.7 4 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Jee_problem_solver_and_analyzer/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | openai 3 | educhain 4 | Pillow 5 | langchain_openai -------------------------------------------------------------------------------- /cookbook/starter-apps/multilingual_chatbot/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit>=1.30.0 2 | langchain>=0.1.0 3 | langchain-openai>=0.0.5 4 | openai>=1.10.0 5 | -------------------------------------------------------------------------------- /cookbook/starter-apps/AI CourtRoom/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit 2 | educhain 3 | cerebras-cloud-sdk 4 | langchain-cerebras 5 | python-dotenv 6 | requests 7 | -------------------------------------------------------------------------------- /educhain/__init__.py: -------------------------------------------------------------------------------- 1 | from educhain.core.educhain import Educhain 2 | from educhain.core.config import LLMConfig 3 | 4 | __all__ = ['Educhain', 'LLMConfig'] -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/jsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "paths": { 4 | "@/*": ["./src/*"] 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/postcss.config.mjs: -------------------------------------------------------------------------------- 1 | const config = { 2 | plugins: ["@tailwindcss/postcss"], 3 | }; 4 | 5 | export default config; 6 | -------------------------------------------------------------------------------- /cookbook/starter-apps/flashcard_generator/requirements.txt: -------------------------------------------------------------------------------- 1 | streamlit>=1.30.0 2 | pydantic>=2.5.0 3 | langchain>=0.1.0 4 | langchain-openai>=0.0.5 5 | openai>=1.10.0 6 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | pydantic 4 | dotenv 5 | git+https://github.com/satvik314/educhain.git@ai-dev 6 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = {}; 3 | 4 | export default nextConfig; 5 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/satvik314/educhain/HEAD/cookbook/starter-apps/Educhain_pedagogy/frontend/src/app/favicon.ico -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/requirements.txt: -------------------------------------------------------------------------------- 1 | PyPDF2>=3.0.1 2 | educhain>=0.1.29 3 | langchain-google-genai>=0.0.8 4 | google-generativeai>=0.3.2 5 | python-dotenv>=1.0.1 6 | streamlit>=1.34.0 7 | fpdf==1.7.2 -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/pages/_app.jsx: -------------------------------------------------------------------------------- 1 | import "../app/globals.css"; 2 | 3 | export default function MyApp({ Component, pageProps }) { 4 | return ; 5 | } 6 | 7 | 8 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore PyCharm project files 2 | .idea/ 3 | 4 | # Ignore Python cache files 5 | __pycache__/ 6 | 7 | # Ignore environment files 8 | .env 9 | *.env 10 | 11 | # Ignore virtual environment if any 12 | .venv/ 13 | 14 | # Ignore OS files 15 | .DS_Store 16 | 17 | # Ignore lock files (optional) 18 | *.lock 19 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/public/window.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/public/file.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /educhain/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_models import BaseQuestion, QuestionList 2 | from .qna_models import (MultipleChoiceQuestion, ShortAnswerQuestion, 3 | TrueFalseQuestion, FillInBlankQuestion, MCQList, 4 | ShortAnswerQuestionList, TrueFalseQuestionList, 5 | FillInBlankQuestionList, Option, MCQMath, MCQListMath) 6 | from .content_models import ContentElement, SubTopic, MainTopic, LessonPlan,Flashcard, FlashcardSet 7 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "backend" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.13" 7 | dependencies = [ 8 | "dotenv>=0.9.9", 9 | "educhain", 10 | "fastapi>=0.116.1", 11 | "pydantic>=2.11.7", 12 | "uvicorn>=0.35.0", 13 | ] 14 | 15 | [tool.uv.sources] 16 | educhain = { git = "https://github.com/satvik314/educhain.git", rev = "ai-dev" } 17 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import { dirname } from "path"; 2 | import { fileURLToPath } from "url"; 3 | import { FlatCompat } from "@eslint/eslintrc"; 4 | 5 | const __filename = fileURLToPath(import.meta.url); 6 | const __dirname = dirname(__filename); 7 | 8 | const compat = new FlatCompat({ 9 | baseDirectory: __dirname, 10 | }); 11 | 12 | const eslintConfig = [...compat.extends("next/core-web-vitals")]; 13 | 14 | export default eslintConfig; 15 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "new-york", 4 | "rsc": true, 5 | "tsx": false, 6 | "tailwind": { 7 | "config": "", 8 | "css": "src/app/globals.css", 9 | "baseColor": "neutral", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/utils/models.py: -------------------------------------------------------------------------------- 1 | from educhain import Educhain, LLMConfig 2 | import google.generativeai as genai 3 | from langchain_google_genai import ChatGoogleGenerativeAI 4 | import os 5 | import dotenv 6 | dotenv.load_dotenv() 7 | 8 | def client_model(): 9 | GOOGLE_API_KEY = os.getenv("GEMINI_KEY") 10 | genai.configure(api_key=GOOGLE_API_KEY) 11 | gemini_flash = ChatGoogleGenerativeAI( 12 | model="gemini-2.0-flash", 13 | google_api_key=GOOGLE_API_KEY 14 | ) 15 | flash_config = LLMConfig(custom_model=gemini_flash) 16 | return Educhain(flash_config) -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/app/models/pedagogy_models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | from typing import Dict, Any, Optional 3 | 4 | class ContentRequest(BaseModel): 5 | topic: str 6 | pedagogy: str 7 | params: Dict[str, Any] = {} 8 | 9 | class ContentResponse(BaseModel): 10 | pedagogy: str 11 | topic: str 12 | content: Any 13 | 14 | class PedagogyInfo(BaseModel): 15 | description: str 16 | parameters: Dict[str, str] 17 | 18 | class BloomsTaxonomyParams(BaseModel): 19 | grade_level: str = "High School" 20 | target_level: str = "Intermediate" 21 | 22 | 23 | -------------------------------------------------------------------------------- /educhain/models/base_models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | from typing import List, Optional 3 | 4 | class BaseQuestion(BaseModel): 5 | question: str 6 | answer: str 7 | explanation: Optional[str] = None 8 | 9 | def show(self): 10 | print(f"Question: {self.question}") 11 | print(f"Answer: {self.answer}") 12 | if self.explanation: 13 | print(f"Explanation: {self.explanation}") 14 | print() 15 | 16 | class QuestionList(BaseModel): 17 | questions: List[BaseQuestion] 18 | 19 | def show(self): 20 | for i, question in enumerate(self.questions, 1): 21 | print(f"Question {i}:") 22 | question.show() -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # testing 14 | /coverage 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | 20 | # production 21 | /build 22 | 23 | # misc 24 | .DS_Store 25 | *.pem 26 | 27 | # debug 28 | npm-debug.log* 29 | yarn-debug.log* 30 | yarn-error.log* 31 | .pnpm-debug.log* 32 | 33 | # env files (can opt-in for committing if needed) 34 | .env* 35 | 36 | # vercel 37 | .vercel 38 | 39 | # typescript 40 | *.tsbuildinfo 41 | next-env.d.ts 42 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # tells git to handle line endings automatically for all files 2 | * text=auto 3 | 4 | # prevents jupyter notebooks from being counted in GitHub language stats 5 | *.ipynb linguist-detectable=false 6 | 7 | # ensures Python files are detected correctly and have proper diff handling 8 | *.py text diff=python 9 | 10 | # handles common data files with Unix-style line endings 11 | *.json text eol=lf 12 | *.yml text eol=lf 13 | 14 | # marks documentation files for proper diff viewing 15 | *.md text diff=markdown 16 | *.sh text eol=lf 17 | *.bash text eol=lf 18 | 19 | # Exclude vendored code from language statistics 20 | vendor/* linguist-vendored 21 | third_party/* linguist-vendored 22 | 23 | # Auto-detect text files 24 | * text=auto eol=lf 25 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from app.api.routes import router as api_router 4 | import logging 5 | 6 | logging.basicConfig(level=logging.INFO) 7 | logger = logging.getLogger(__name__) 8 | 9 | app = FastAPI(title="Educhain Pedagogy Backend") 10 | 11 | # CORS setup 12 | app.add_middleware( 13 | CORSMiddleware, 14 | allow_origins=["*"], # Change to frontend domain in production 15 | allow_credentials=True, 16 | allow_methods=["*"], 17 | allow_headers=["*"], 18 | ) 19 | 20 | # Routes 21 | app.include_router(api_router) 22 | 23 | @app.get("/") 24 | def root(): 25 | return {"message": "Educhain backend is running 🚀"} 26 | 27 | 28 | # uv run uvicorn main:app -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/app/layout.js: -------------------------------------------------------------------------------- 1 | import { Geist, Geist_Mono } from "next/font/google"; 2 | import "./globals.css"; 3 | 4 | const geistSans = Geist({ 5 | variable: "--font-geist-sans", 6 | subsets: ["latin"], 7 | }); 8 | 9 | const geistMono = Geist_Mono({ 10 | variable: "--font-geist-mono", 11 | subsets: ["latin"], 12 | }); 13 | 14 | export const metadata = { 15 | title: "Create Next App", 16 | description: "Generated by create next app", 17 | }; 18 | 19 | export default function RootLayout({ children }) { 20 | return ( 21 | 22 | 25 | {children} 26 | 27 | 28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "axios": "^1.11.0", 13 | "class-variance-authority": "^0.7.1", 14 | "clsx": "^2.1.1", 15 | "lucide-react": "^0.540.0", 16 | "next": "15.4.7", 17 | "react": "19.1.0", 18 | "react-dom": "19.1.0", 19 | "tailwind-merge": "^3.3.1" 20 | }, 21 | "devDependencies": { 22 | "@eslint/eslintrc": "^3", 23 | "@tailwindcss/postcss": "^4", 24 | "eslint": "^9", 25 | "eslint-config-next": "15.4.7", 26 | "tailwindcss": "^4", 27 | "tw-animate-css": "^1.3.7" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /educhain/core/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Optional, Any 3 | 4 | class LLMConfig: 5 | def __init__( 6 | self, 7 | api_key: Optional[str] = None, 8 | model_name: str = "gpt-4o-mini", 9 | max_tokens: int = 1500, 10 | temperature: float = 0.7, 11 | custom_model: Optional[Any] = None, 12 | base_url: Optional[str] = None, 13 | default_headers: Optional[dict] = None 14 | ): 15 | # If no API key is provided, try to get it from environment variables 16 | if api_key is None: 17 | api_key = os.getenv("OPENAI_API_KEY") 18 | 19 | self.api_key = api_key 20 | self.model_name = model_name 21 | self.max_tokens = max_tokens 22 | self.temperature = temperature 23 | self.custom_model = custom_model 24 | self.base_url = base_url 25 | self.default_headers = default_headers -------------------------------------------------------------------------------- /educhain/utils/loaders.py: -------------------------------------------------------------------------------- 1 | from PyPDF2 import PdfReader 2 | from bs4 import BeautifulSoup 3 | import re 4 | import requests 5 | 6 | class PdfFileLoader: 7 | def load_data(self, file_path): 8 | reader = PdfReader(file_path) 9 | all_content = [] 10 | 11 | for page in reader.pages: 12 | content = page.extract_text() 13 | content = self.clean_string(content) 14 | all_content.append(content) 15 | 16 | return " ".join(all_content) 17 | 18 | def clean_string(self, text): 19 | text = re.sub(r'\s+', ' ', text) 20 | return text.strip() 21 | 22 | class UrlLoader: 23 | def load_data(self, url): 24 | response = requests.get(url) 25 | soup = BeautifulSoup(response.content, 'html.parser') 26 | content = soup.get_text() 27 | return self.clean_string(content) 28 | 29 | def clean_string(self, text): 30 | text = re.sub(r'\s+', ' ', text) 31 | return text.strip() -------------------------------------------------------------------------------- /docs/getting-started/installation.md: -------------------------------------------------------------------------------- 1 | ## 📥 getting-started/installation.md 2 | ```markdown 3 | # 📥 Installation 4 | 5 | Getting Educhain up and running is a breeze! 🌬️ 6 | 7 | ## 🚀 Quick Install 8 | 9 | ```bash 10 | pip install educhain 11 | ``` 12 | 13 | ## 📋 Requirements 14 | 15 | - Python 3.7+ 16 | - OpenAI API key 17 | 18 | ## 🔧 Detailed Setup 19 | 20 | 1. **Create a virtual environment** (optional but recommended): 21 | ```bash 22 | python -m venv educhain-env 23 | source educhain-env/bin/activate # On Windows, use `educhain-env\Scripts\activate` 24 | ``` 25 | 26 | 2. **Install Educhain**: 27 | ```bash 28 | pip install educhain 29 | ``` 30 | 31 | 3. **Set up your API key**: 32 | ```bash 33 | export OPENAI_API_KEY='your-api-key-here' 34 | ``` 35 | 36 | ## 🎉 Next Steps 37 | 38 | - [🏃‍♂️ Quick Start Guide](quick-start.md) 39 | - [⚙️ Configuration Options](configuration.md) 40 | 41 | Need help? Check our [❓ FAQ](../resources/faq.md) or join our [💬 Discord community](https://discord.gg/educhain)! 42 | ``` 43 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/public/globe.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-2025 Educhain (https://educhain.in) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/app/api/routes.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, HTTPException 2 | from app.models.pedagogy_models import ContentRequest, ContentResponse, PedagogyInfo 3 | from app.services.educhain_services import generate_content, get_pedagogies 4 | import logging 5 | from typing import Dict 6 | 7 | router = APIRouter() 8 | logger = logging.getLogger(__name__) 9 | 10 | @router.post("/generate-content", response_model=ContentResponse) 11 | def generate_content_route(req: ContentRequest): 12 | try: 13 | result = generate_content(req.topic, req.pedagogy, req.params) 14 | return ContentResponse( 15 | pedagogy=req.pedagogy, 16 | topic=req.topic, 17 | content=result 18 | ) 19 | except Exception as e: 20 | logger.error(f"Error generating content: {e}") 21 | raise HTTPException(status_code=500, detail=str(e)) 22 | 23 | @router.get("/available-pedagogies", response_model=Dict[str, PedagogyInfo]) 24 | def get_available_pedagogies_route(): 25 | try: 26 | return get_pedagogies() 27 | except Exception as e: 28 | logger.error(f"Error fetching pedagogies: {e}") 29 | raise HTTPException(status_code=500, detail=str(e)) 30 | 31 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/readme.md: -------------------------------------------------------------------------------- 1 | Buildfast with AI
2 | 3 | # Educhain Playground 4 | 5 | Playground 6 | 7 | A Python + Streamlit powered web application that showcases various core features of **Educhain** — built as a playground for testing, exploration, and building new use-cases for developers. 8 | 9 | Live Demo Linke to Playground : [PlaygroundLink](https://educhain-playground-sgrbz8rwmyhefwqcvnpekj.streamlit.app/) 10 | 11 | --- 12 | 13 | ## 📦 Installation 14 | Install all required dependencies using [requirements.txt](.\requirements.txt): 15 | ``` pip install -r requirements.txt ``` 16 | - educhain 17 | - langchain-google-genai 18 | - google-generativeai 19 | - python-dotenv 20 | - streamlit 21 | - PyPDF2 22 | - fpdf 23 | 24 | ## 🚀 Running Educhain Playground Locally 25 | 1. Setup your Gemini or your prefered LLM API KEY 26 | - For use of other LLMs refer to [educhain_llms.txt](../../educhain_llms.txt) 27 | 2. Follow the steps: 28 | > ```` cd educhain\cookbook\starter-apps\playground ``` 29 | 3. Use the command ``` Streamlit run Home.py ``` In CLI to run the Streamlit application. 30 | 31 | ## 🎯 You're Ready! 32 | Now explore all the powerful tools of Educhain: 33 | ✅ Question Generator · 📚 Lesson Planner · 🌐 Web/YT Input · 🌍 Indic MCQs · 🧠 Doubt Solver · 📘 PYQ-to-Prep and more! 34 | 35 | -------------------------------------------------------------------------------- /images/educhain.svg: -------------------------------------------------------------------------------- 1 | 2 | 45 | 46 | 47 | 48 | 49 | educhain 50 | Revolutionizing education through AI 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Educhain 2 | 3 | Thank you for your interest in contributing to Educhain! We value your input and are excited to collaborate. Here’s how you can get involved: 4 | 5 | ## Reporting Issues 6 | 7 | - Use the GitHub issue tracker to report bugs or suggest enhancements. 8 | - Provide a detailed description, including steps to reproduce the issue. 9 | - Share your environment details (OS, Python version, etc.) to help us resolve issues faster. 10 | 11 | ## Submitting Changes 12 | 13 | 1. **Fork** the repository. 14 | 2. **Create a new branch** for your feature or fix: 15 | ``` 16 | git checkout -b feature/AmazingFeature 17 | ``` 18 | 3. **Make your changes**. 19 | 4. **Commit** with a clear message: 20 | ``` 21 | git commit -m 'Add some AmazingFeature' 22 | ``` 23 | 5. **Push** your branch: 24 | ``` 25 | git push origin feature/AmazingFeature 26 | ``` 27 | 6. **Open a Pull Request** describing your changes and their motivation. 28 | 29 | ## Coding Conventions 30 | 31 | - Follow the [PEP 8](https://pep8.org/) style guide. 32 | - Write clear, well-commented code. 33 | - Include unit tests for any new features or bug fixes. 34 | 35 | ## Documentation 36 | 37 | - Update `README.md` with any interface changes. 38 | - Update the `docs/` folder for substantial modifications or new features. 39 | 40 | ## Questions? 41 | 42 | We’re here to help! Reach out anytime: 43 | 44 | - **Email:** satvik@buildfastwithai.com | shubham@buildfastwithai.com 45 | - **Website:** [@educhain_in](https://educhain.in) 46 | 47 | Thank you for helping make Educhain better! 48 | Made with ❤️ by the Educhain Team 49 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/components/PedagogyCard.jsx: -------------------------------------------------------------------------------- 1 | const ICONS = { 2 | blooms_taxonomy: "🎓", 3 | socratic_questioning: "❓", 4 | project_based_learning: "🧩", 5 | flipped_classroom: "🔁", 6 | inquiry_based_learning: "🔎", 7 | constructivist: "🏗️", 8 | gamification: "🎮", 9 | peer_learning: "🤝", 10 | }; 11 | 12 | function toTitleCase(text) { 13 | return String(text) 14 | .replace(/_/g, " ") 15 | .replace(/\b\w/g, (m) => m.toUpperCase()); 16 | } 17 | 18 | export default function PedagogyCard({ name, description, onClick }) { 19 | const pretty = toTitleCase(name); 20 | const icon = ICONS[name] || "📚"; 21 | 22 | return ( 23 |
27 |
28 | 29 |
30 |
31 | {icon} 32 |
33 |
34 |

35 | {pretty} 36 |

37 |

{description}

38 |
39 |
40 | 41 |
42 | Explore 43 | 44 |
45 |
46 | ); 47 | } 48 | -------------------------------------------------------------------------------- /docs/features/mcq_generation.md: -------------------------------------------------------------------------------- 1 | 2 | # 🧠 Educhain Usage 3 | 4 | Leverage Educhain's flexibility to generate customized content with ease! 🌟 5 | 6 | --- 7 | 8 | ## 🚀 Basic Usage 9 | 10 | ```python 11 | from educhain import Educhain 12 | 13 | # Initialize Educhain client 14 | client = Educhain() 15 | 16 | # Generate "Fill in the Blank" questions with custom instructions 17 | ques = client.qna_engine.generate_questions( 18 | topic="Psychology", 19 | num=10, 20 | question_type="Fill in the Blank", 21 | custom_instructions="Only basic questions" 22 | ) 23 | 24 | # Supported question types: "Multiple Choice" (default), "True/False", "Fill in the Blank", "Short Answer" 25 | print(ques) 26 | ``` 27 | 28 | --- 29 | 30 | ## 🌟 Advanced LLM Integration 31 | 32 | Use custom LLM models like Google's Gemini with Educhain for enhanced content generation! 🚀✨ 33 | 34 | ```python 35 | from langchain_google_genai import ChatGoogleGenerativeAI 36 | from educhain import Educhain, LLMConfig 37 | 38 | # Configure Gemini model 39 | gemini_flash = ChatGoogleGenerativeAI( 40 | model="gemini-1.5-flash-exp-0827", 41 | google_api_key="GOOGLE_API_KEY" 42 | ) 43 | 44 | # Set up LLM configuration 45 | flash_config = LLMConfig(custom_model=gemini_flash) 46 | 47 | # Initialize Educhain with Gemini 48 | client = Educhain(flash_config) 49 | 50 | # Generate questions using Gemini-powered Educhain 51 | ques = client.qna_engine.generate_questions( 52 | topic="Psychology", 53 | num=10 54 | ) 55 | 56 | print(ques) 57 | ``` 58 | 59 | --- 60 | 61 | ## 🌟 Pro Tips 62 | 63 | - Experiment with `custom_instructions` to tailor questions to specific needs. 64 | - Use `LLMConfig` to integrate third-party models for diverse use cases. 65 | 66 | Ready to take Educhain to the next level? Start exploring today! 🚀✨ 67 | ``` 68 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="educhain", 5 | version="0.4.0", 6 | packages=find_packages(), 7 | install_requires=[ 8 | "langchain>=1.0.0", 9 | "langchain-openai>=1.1.0", 10 | "langchain-community>=0.4.1", 11 | "pydantic>=2.0,<3.0", 12 | "langchain-text-splitters", 13 | "langchain-google-genai", 14 | "openai", 15 | "python-dotenv", 16 | "reportlab", 17 | "PyPDF2", 18 | "beautifulsoup4", 19 | "youtube-transcript-api", 20 | "requests", 21 | "chromadb", 22 | "protobuf", 23 | "pillow", 24 | "dataframe-image", 25 | "pandas", 26 | "ipython", 27 | "matplotlib", 28 | "numpy", 29 | "gtts", # Google Text-to-Speech 30 | "pydub", # Audio processing 31 | "mutagen", # Audio metadata handling 32 | ], 33 | extras_require={ 34 | "dev": [ 35 | "pytest", 36 | "black", 37 | "flake8", 38 | ], 39 | }, 40 | author="Satvik Paramkusham", 41 | author_email="satvik@buildfastwithai.com", 42 | description="A Python package for generating educational content using Generative AI", 43 | long_description=open("README.md").read(), 44 | long_description_content_type="text/markdown", 45 | url="https://github.com/satvik314/educhain", 46 | classifiers=[ 47 | "Development Status :: 3 - Alpha", 48 | "Intended Audience :: Developers", 49 | "License :: OSI Approved :: MIT License", 50 | "Programming Language :: Python :: 3", 51 | "Programming Language :: Python :: 3.10", 52 | "Programming Language :: Python :: 3.11", 53 | "Programming Language :: Python :: 3.12", 54 | ], 55 | python_requires='>=3.10', 56 | ) 57 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Origami_tutorial_generator/solver.py: -------------------------------------------------------------------------------- 1 | # solver.py 2 | from educhain import Educhain, LLMConfig 3 | from langchain_openai import ChatOpenAI 4 | import os 5 | 6 | def setup_educhain(api_key): 7 | """Set up Educhain with Horizon Alpha model""" 8 | 9 | horizon_alpha = ChatOpenAI( 10 | openai_api_base="https://openrouter.ai/api/v1", 11 | openai_api_key=api_key, 12 | model_name="openrouter/horizon-beta" 13 | ) 14 | config = LLMConfig(custom_model=horizon_alpha) 15 | return Educhain(config) 16 | 17 | def generate_origami_steps(image_path, educhain_client): 18 | """Generate tutorial from uploaded image""" 19 | prompt = ( 20 | "This is an origami object. Generate a complete, easy-to-follow, step-by-step folding guide to recreate it.\n\n" 21 | "Make sure to include the following in each step:\n" 22 | "🟢 Step number and a friendly instruction\n" 23 | "📄 Paper size to start with (like 'Start with a square paper – 15cm by 15cm')\n" 24 | "✨ What fold to do (like 'Fold the paper in half like a sandwich')\n" 25 | "🔍 What it should look like after the fold (like 'You should see a triangle now')\n" 26 | "🎯 Little tips or checks (like 'Make sure the corners match!' or 'Press the fold neatly')\n" 27 | "🎨 Use emojis and simple words so even a child can understand\n" 28 | "📷 If you can, include simple drawings\n\n" 29 | "Keep it very beginner-friendly, creative, and encouraging. Imagine you're writing it for a 10-year-old doing origami for the first time!" 30 | ) 31 | 32 | result = educhain_client.qna_engine.solve_doubt( 33 | image_source=image_path, 34 | prompt=prompt, 35 | detail_level="High" 36 | ) 37 | # 38 | # if isinstance(result, dict) and "steps" in result: 39 | # explanation = result.get("explanation", "") 40 | # steps = result["steps"] 41 | # notes = result.get("additional_notes", "") 42 | # return explanation, steps, notes 43 | # else: 44 | # return "", [str(result)], "" 45 | return result 46 | -------------------------------------------------------------------------------- /educhain/core/educhain.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Any, Dict, List 2 | from educhain.core.config import LLMConfig 3 | from educhain.engines.qna_engine import QnAEngine 4 | from educhain.engines.content_engine import ContentEngine 5 | 6 | class Educhain: 7 | def __init__(self, config: Optional[LLMConfig] = None): 8 | if config is None: 9 | config = LLMConfig() 10 | self.llm_config = config 11 | self.qna_engine = QnAEngine(config) 12 | self.content_engine = ContentEngine(config) 13 | self.components: Dict[str, Any] = { 14 | "qna_engine": self.qna_engine, 15 | "content_engine": self.content_engine 16 | } 17 | 18 | def get_qna_engine(self) -> QnAEngine: 19 | return self.qna_engine 20 | 21 | def get_content_engine(self) -> ContentEngine: 22 | return self.content_engine 23 | 24 | def get_config(self) -> LLMConfig: 25 | return self.llm_config 26 | 27 | def update_config(self, new_config: LLMConfig) -> None: 28 | self.llm_config = new_config 29 | self.qna_engine = QnAEngine(new_config) 30 | self.content_engine = ContentEngine(new_config) 31 | self.components["qna_engine"] = self.qna_engine 32 | self.components["content_engine"] = self.content_engine 33 | 34 | def add_component(self, component_name: str, component: Any) -> None: 35 | self.components[component_name] = component 36 | setattr(self, component_name, component) 37 | 38 | def get_component(self, component_name: str) -> Any: 39 | return self.components.get(component_name) 40 | 41 | def remove_component(self, component_name: str) -> None: 42 | if component_name in self.components: 43 | del self.components[component_name] 44 | delattr(self, component_name) 45 | 46 | def get_available_components(self) -> List[str]: 47 | return list(self.components.keys()) 48 | 49 | def __str__(self) -> str: 50 | return f"Educhain(config={self.llm_config}, components={self.get_available_components()})" 51 | 52 | def __repr__(self) -> str: 53 | return self.__str__() -------------------------------------------------------------------------------- /cookbook/starter-apps/multilingual_chatbot/readme.md: -------------------------------------------------------------------------------- 1 | # 🌍 SUTRA Multilingual Chatbot 2 | 3 | A powerful multilingual chatbot built with Streamlit and the SUTRA AI model. This app demonstrates how to create a conversational interface that supports multiple languages, particularly Indian languages. 4 | 5 | ![Multilingual Chatbot Demo](https://framerusercontent.com/images/3Ca34Pogzn9I3a7uTsNSlfs9Bdk.png) 6 | 7 | ## ✨ Features 8 | 9 | - 🌍 Support for multiple languages including Hindi, Marathi, Telugu, Tamil, and more 10 | - 💬 Interactive chat interface with streaming responses 11 | - 🔄 Start new conversations with a single click 12 | - 🔑 Secure API key handling 13 | - 💬 Language-specific welcome messages 14 | 15 | ## 🚀 Getting Started 16 | 17 | ### Prerequisites 18 | 19 | - Python 3.8 or higher 20 | - A SUTRA API key from [Two AI](https://www.two.ai/sutra/api) 21 | 22 | ### Installation 23 | 24 | 1. Clone the repository: 25 | ```bash 26 | git clone https://github.com/satvik314/educhain.git 27 | cd educhain/cookbook/starter-apps/multilingual_chatbot 28 | ``` 29 | 30 | 2. Install the required dependencies: 31 | ```bash 32 | pip install -r requirements.txt 33 | ``` 34 | 35 | 3. Run the Streamlit app: 36 | ```bash 37 | streamlit run app.py 38 | ``` 39 | 40 | ## 🔧 Usage 41 | 42 | 1. Enter your SUTRA API key in the sidebar 43 | 2. Select your preferred language from the dropdown menu 44 | 3. Start chatting with the AI assistant 45 | 4. Use the "Start New Chat" button to reset the conversation 46 | 47 | ## 🌐 Supported Languages 48 | 49 | The app currently supports the following languages: 50 | 51 | - English 52 | - Hindi 53 | - Marathi 54 | - Telugu 55 | - Tamil 56 | - Bengali 57 | - Gujarati 58 | - Kannada 59 | - Malayalam 60 | - Punjabi 61 | - French 62 | - Spanish 63 | 64 | ## 🛠️ Customization 65 | 66 | You can customize the app by: 67 | 68 | - Adding more languages to the `LANGUAGES` dictionary 69 | - Creating language-specific welcome messages 70 | - Modifying the system prompt to change the assistant's behavior 71 | - Customizing the UI with Streamlit components 72 | 73 | ## 📄 License 74 | 75 | This project is part of the Educhain library and is licensed under the MIT License - see the LICENSE file for details. 76 | 77 | ## 🔗 Links 78 | 79 | - [SUTRA AI](https://www.two.ai/sutra/api) 80 | - [Educhain GitHub Repository](https://github.com/satvik314/educhain) 81 | - [Educhain Documentation](https://github.com/satvik314/educhain/blob/main/README.md) 82 | 83 | --- 84 | 85 | Built with ❤️ using [SUTRA AI](https://www.two.ai/sutra/api) and [Streamlit](https://streamlit.io) -------------------------------------------------------------------------------- /cookbook/starter-apps/Consultancy-Prep/README.md: -------------------------------------------------------------------------------- 1 | # 🧩 Consulting Interview Prep App 2 | 3 | An AI-powered Streamlit application designed to simplify and personalize consulting interview preparation. This app leverages the **Educhain SDK** and **Gemini LLM** to generate tailored MCQs, consulting frameworks, and guesstimate problems from manual prompts, PDFs, or website URLs. 4 | 5 | --- 6 | 7 | ## 🚀 Features 8 | 9 | * **Multiple Input Options:** Manual Prompt, PDF Upload, Website URL 10 | * **MCQ Generation:** Auto-generated Multiple Choice Questions with difficulty levels 11 | * **Consulting Framework Generation:** Structured problem-solving frameworks via Gemini LLM 12 | * **Guesstimate Problem Generator:** Realistic guesstimate cases and solution approach 13 | * **Difficulty Selector:** Beginner, Intermediate, Advanced 14 | * **Streamlit Frontend:** Clean, interactive user interface 15 | 16 | --- 17 | 18 | ## 🔧 Tech Stack 19 | 20 | * **Python 3.9+** 21 | * [Streamlit](https://streamlit.io/) 22 | * [Educhain SDK](https://pypi.org/project/educhain/) 23 | * [Langchain Google Gemini](https://pypi.org/project/langchain-google-genai/) 24 | 25 | --- 26 | 27 | ## ⚙️ Setup Instructions 28 | 29 | ### 1. Clone this Repository 30 | 31 | ```bash 32 | git clone 33 | cd cookbook/starter-apps/Consultancy-Prep 34 | ``` 35 | 36 | ### 2. Create Virtual Environment (Optional but Recommended) 37 | 38 | ```bash 39 | python -m venv venv 40 | source venv/bin/activate # Linux/Mac 41 | venv\Scripts\activate # Windows 42 | ``` 43 | 44 | ### 3. Install Dependencies 45 | 46 | ```bash 47 | pip install -r requirements.txt 48 | ``` 49 | 50 | ### 4. Configure Google API Key 51 | 52 | * Add your **Google Gemini API key** in Streamlit Cloud Secrets or `.env` if running locally: 53 | 54 | ``` 55 | GOOGLE_API_KEY=your-api-key-here 56 | ``` 57 | 58 | Or use Streamlit Cloud's Secrets Manager. 59 | 60 | ### 5. Run the App 61 | 62 | ```bash 63 | streamlit run c-app.py 64 | ``` 65 | 66 | App will be available at `http://localhost:8501`. 67 | 68 | --- 69 | 70 | ## 📄 Input Options 71 | 72 | * **Manual Prompt:** Enter any business case prompt. 73 | * **PDF Upload:** Upload a casebook or document. 74 | * **Website URL:** Provide a valid URL for case extraction. 75 | 76 | --- 77 | 78 | ## 💡 Future Development 79 | 80 | * Auto MCQ scoring and evaluation 81 | * AI-powered mock interviews 82 | * Case library by consulting firms (McKinsey, BCG, Bain) 83 | * Mobile-friendly version 84 | * Real-time data integration for fresh market cases 85 | 86 | --- 87 | 88 | ## 🤝 Credits 89 | 90 | Built with ❤️ using **Educhain SDK**, **Gemini LLM**, and **Streamlit**. 91 | 92 | --- 93 | -------------------------------------------------------------------------------- /images/educhain-comparison-svg.svg: -------------------------------------------------------------------------------- 1 | 2 | 13 | 14 | 15 | 16 | 17 | Educhain vs Standard Libraries 18 | 19 | 20 | 21 | Educhain 22 | 23 | from educhain import qna_engine 24 | 25 | questions = qna_engine.generate_mcq( 26 | topic="Python basics", 27 | level="Beginner", 28 | num=5 29 | ) 30 | 31 | 32 | Simple API 33 | AI-powered 34 | Structured output 35 | 36 | 37 | 38 | Standard Libraries 39 | 40 | import random 41 | import json 42 | 43 | def generate_question(topic): 44 | # Complex logic here 45 | ... 46 | 47 | questions = [] 48 | for _ in range(5): 49 | q = generate_question("Python basics") 50 | questions.append(q) 51 | 52 | 53 | Complex implementation 54 | Limited to predefined questions 55 | Manual formatting required 56 | 57 | -------------------------------------------------------------------------------- /cookbook/starter-apps/flashcard_generator/readme.md: -------------------------------------------------------------------------------- 1 | # 📚 Educhain Flashcard Generator 2 | 3 | A powerful AI-powered flashcard generator built with Streamlit and the Educhain library. Create customized flashcards on any topic to enhance your learning experience. 4 | 5 | ![Flashcard Generator Demo](https://github.com/satvik314/educhain/raw/main/images/flashcard_demo.png) 6 | 7 | ## ✨ Features 8 | 9 | - 🤖 Powered by OpenAI's GPT models through the Educhain library 10 | - 🎯 Generate flashcards on any topic with a single click 11 | - 🏷️ Color-coded card types (Concept, Definition, Fact, Process, Example, Comparison) 12 | - 📝 Detailed explanations for each flashcard 13 | - 🔢 Customize the number of flashcards generated 14 | - 🔑 Bring your own OpenAI API key 15 | 16 | ## 🚀 Getting Started 17 | 18 | ### Prerequisites 19 | 20 | - Python 3.8 or higher 21 | - An OpenAI API key 22 | 23 | ### Installation 24 | 25 | 1. Clone the repository: 26 | ```bash 27 | git clone https://github.com/satvik314/educhain.git 28 | cd educhain/cookbook/starter-apps/flashcard_generator 29 | ``` 30 | 31 | 2. Install the required dependencies: 32 | ```bash 33 | pip install -r requirements.txt 34 | ``` 35 | 36 | 3. Run the Streamlit app: 37 | ```bash 38 | streamlit run app.py 39 | ``` 40 | 41 | ## 🔧 Usage 42 | 43 | 1. Enter your OpenAI API key in the sidebar 44 | 2. Type a topic for your flashcards (e.g., "Python Programming", "Machine Learning", "World History") 45 | 3. Select the number of flashcards you want to generate (1-20) 46 | 4. Click "Generate Flashcards" 47 | 5. Explore your flashcards by clicking on each expandable card 48 | 49 | ## 🧩 Card Types 50 | 51 | The flashcard generator creates different types of cards to enhance your learning: 52 | 53 | - **Concept** (Red): Core ideas and principles 54 | - **Definition** (Teal): Precise meanings of terms 55 | - **Fact** (Yellow): Specific pieces of information 56 | - **Process** (Purple): Step-by-step procedures 57 | - **Example** (Green): Practical instances or applications 58 | - **Comparison** (Blue): Contrasting related concepts 59 | 60 | ## 🛠️ Customization 61 | 62 | You can customize the app by modifying the following: 63 | 64 | - Change the model used by updating the `model_name` parameter in the `LLMConfig` class 65 | - Adjust the maximum number of flashcards by changing the `max_value` parameter in the number input 66 | - Modify the prompt template to generate different types of content 67 | 68 | ## 📄 License 69 | 70 | This project is part of the Educhain library and is licensed under the MIT License - see the LICENSE file for details. 71 | 72 | ## 🔗 Links 73 | 74 | - [Educhain GitHub Repository](https://github.com/satvik314/educhain) 75 | - [Educhain Documentation](https://github.com/satvik314/educhain/blob/main/README.md) 76 | 77 | --- 78 | 79 | Built with ❤️ using [Educhain](https://github.com/satvik314/educhain) and [Streamlit](https://streamlit.io) -------------------------------------------------------------------------------- /cookbook/starter-apps/Jee_problem_solver_and_analyzer/README.md: -------------------------------------------------------------------------------- 1 | # 📚 JEE Advanced Problem Solver & Analyzer 2 | A lightning-fast, AI-powered assistant that dissects **JEE Advanced** problems straight from an image, powered by **GPT-5 + [Educhain](https://github.com/satvik314/educhain)**. 3 | 4 | ![Python 3.13+](https://img.shields.io/badge/python-3.13+-blue.svg) 5 | ![Streamlit](https://img.shields.io/badge/Built%20with-Streamlit-FF4B4B.svg) 6 | ![OpenAI](https://img.shields.io/badge/Powered%20by-OpenAI-black.svg) 7 | ![Educhain](https://img.shields.io/badge/Integrated-Educhain-24A148.svg) 8 | ![License](https://img.shields.io/badge/license-MIT-green.svg) 9 | 10 | --- 11 | 12 | ## 🌟 Features 13 | - 📸 **Image Upload**: Drop a JEE Advanced image 14 | - 🔍 **Topic Extraction**: Instantly see all concepts involved 15 | - 🧮 **Step-by-Step Solution**: High-detail, exam-grade explanations 16 | - 🐳 **Similar practice problems**: 5 new problems which use similar concept as the given problem 17 | - ⚙️ **GPT-5 Engine**: state-of-the-art reasoning 18 | --- 19 | 20 | ## 🚀 Quick Start 21 | 22 | ### 1. Clone & Enter 23 | ```bash 24 | git clone https://github.com//jee-gpt5-solver.git 25 | cd jee-gpt5-solver 26 | ``` 27 | 28 | ### 2. Install Dependencies 29 | ```bash 30 | # Using pip 31 | pip install -r requirements.txt 32 | 33 | # or modern Python 34 | pip install . 35 | ``` 36 | 37 | > Requirements are **Python ≥3.13**. 38 | > `uv venv` or standard `venv` is recommended. 39 | 40 | ### 3. Launch Streamlit 41 | ```bash 42 | streamlit run app.py 43 | ``` 44 | Your browser will open at *http://localhost:8501*. 45 | 46 | --- 47 | 48 | ## 🔐 Configure OpenAI Key 49 | - In the sidebar paste your **OpenAI API Key** (Have credits ready; GPT-5 usage applies). 50 | - The key is **never stored**—it only lives in memory during the session. 51 | 52 | --- 53 | 54 | ## 📷 How It Works 55 | 1. **Menu (left)**: Enter API key 56 | 2. **Center**: Drop an image (`jpg`, `png`, `jpeg`) 57 | 3. **Click** “Analyze Problem” 58 | → Topics appear as 🟢 bullets 59 | → Complete solution auto-expands below 60 | 61 | --- 62 | 63 | ## 🛠️ Tech Stack 64 | 65 | | Layer | Tech | 66 | |-----------------|-----------------------------| 67 | | LLM Engine | GPT-5 via `langchain-openai`| 68 | | Orchestration | Educhain (`educhain`) | 69 | | UI | Streamlit (responsive, light & dark modes) | 70 | | Image Support | Pillow (PIL) | 71 | | Packaging | `pyproject.toml` → `pip`,`uv` | 72 | 73 | --- 74 | 75 | 76 | ## 🤝 Contributing 77 | Contributions welcome! 78 | 1. Fork the repo 79 | 2. Create a feature branch 80 | 3. `poetry run pytest` (if tests exist) 81 | 4. Open a pull request 🎉 82 | 83 | If you spot bugs, open an [Issue](https://github.com//jee-gpt5-solver/issues) — attach sample images for faster triage. 84 | 85 | --- 86 | 87 | ## 📜 License 88 | MIT © 2024 Build Fast with AI. 89 | 90 | --- 91 | 92 |

93 | Built with ❤️ by Build Fast with AI 94 |

95 | -------------------------------------------------------------------------------- /docs/getting-started/quick-start.md: -------------------------------------------------------------------------------- 1 | ## 🏃‍♂️ getting-started/quick-start.md 2 | 3 | ```markdown 4 | # 🏃‍♂️ Quick Start Guide 5 | 6 | Get up and running with Educhain in minutes! 🚀 7 | 8 | ## 📚 Basic Usage 9 | 10 | Here's a simple example to generate multiple-choice questions: 11 | 12 | ```python 13 | from educhain import qna_engine 14 | 15 | questions = qna_engine.generate_mcq( 16 | topic="Python Programming", 17 | level="Beginner", 18 | num=5 19 | ) 20 | 21 | for i, q in enumerate(questions, 1): 22 | print(f"Question {i}: {q['question']}") 23 | for j, option in enumerate(q['options'], 1): 24 | print(f" {j}. {option}") 25 | print(f"Correct Answer: {q['correct_answer']}\n") 26 | ``` 27 | 28 | ## 🔧 Customization 29 | 30 | Customize your questions with additional parameters: 31 | 32 | ```python 33 | questions = qna_engine.generate_mcq( 34 | topic="Machine Learning", 35 | level="Intermediate", 36 | num=3, 37 | question_type="conceptual", 38 | language="English" 39 | ) 40 | ``` 41 | 42 | ## 📊 Generating Lesson Plans 43 | 44 | Create comprehensive lesson plans with ease: 45 | 46 | ```python 47 | from educhain import content_engine 48 | 49 | lesson_plan = content_engine.generate_lesson_plan( 50 | topic="World War II", 51 | grade_level="High School", 52 | duration="60 minutes" 53 | ) 54 | 55 | print(lesson_plan) 56 | ``` 57 | 58 | ## 🎉 Next Steps 59 | 60 | - Explore [📝 MCQ Generation](../features/mcq-generation.md) in depth 61 | - Learn about [📊 Lesson Plan Generation](../features/lesson-plans.md) 62 | - Check out [🔢 Different Question Types](../features/question-types.md) 63 | 64 | Happy learning with Educhain! 🎓✨ 65 | ``` 66 | 67 | ## ⚙️ getting-started/configuration.md 68 | 69 | ```markdown 70 | # ⚙️ Configuration 71 | 72 | Customize Educhain to fit your needs perfectly! 🎛️ 73 | 74 | ## 🔑 API Key Configuration 75 | 76 | Set your OpenAI API key: 77 | 78 | ```python 79 | import educhain 80 | 81 | educhain.api_key = "your-api-key-here" 82 | ``` 83 | 84 | Or use an environment variable: 85 | 86 | ```bash 87 | export EDUCHAIN_API_KEY="your-api-key-here" 88 | ``` 89 | 90 | ## 🌐 Language Model Selection 91 | 92 | Choose your preferred language model: 93 | 94 | ```python 95 | from educhain import qna_engine 96 | 97 | qna_engine.set_model("gpt-4") # Default is "gpt-3.5-turbo" 98 | ``` 99 | 100 | ## 🎨 Customizing Prompt Templates 101 | 102 | Define your own prompt templates: 103 | 104 | ```python 105 | from educhain import qna_engine 106 | 107 | custom_template = """ 108 | Generate {num} multiple-choice questions about {topic} at {level} level. 109 | Each question should have 4 options and one correct answer. 110 | """ 111 | 112 | qna_engine.set_prompt_template(custom_template) 113 | ``` 114 | 115 | 116 | 117 | ## 🎉 Next Steps 118 | 119 | - Explore [🔬 Advanced Usage](../advanced-usage/custom-prompts.md) 120 | - Learn about [🤖 Different LLM Models](../advanced-usage/llm-models.md) 121 | - Check out our [💡 Best Practices](../guides/best-practices.md) 122 | 123 | Need more help? Join our [💬 Discord community](https://discord.gg/educhain)! 124 | ``` 125 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/Backend/app/services/educhain_services.py: -------------------------------------------------------------------------------- 1 | from educhain import Educhain , LLMConfig 2 | from typing import Any, Dict 3 | import logging 4 | from dotenv import load_dotenv 5 | load_dotenv() 6 | 7 | 8 | logger = logging.getLogger(__name__) 9 | custom_config = LLMConfig(model_name="gpt-4o") 10 | 11 | # Initialize the Educhain client with the custom configuration 12 | client = Educhain(custom_config) 13 | #client = Educhain() # Global client 14 | 15 | def generate_content(topic: str, pedagogy: str, params: Dict[str, Any]) -> Any: 16 | try: 17 | # Ensure params is a dictionary 18 | if not isinstance(params, dict): 19 | params = {} 20 | 21 | # Add default values for all pedagogy parameters 22 | default_params = get_default_params(pedagogy) 23 | for key, default_value in default_params.items(): 24 | if not params.get(key): 25 | params[key] = default_value 26 | 27 | logger.info(f"Generating content for {pedagogy} with topic '{topic}' and params: {params}") 28 | 29 | result = client.content_engine.generate_pedagogy_content( 30 | topic=topic, 31 | pedagogy=pedagogy, 32 | **params 33 | ) 34 | 35 | logger.info(f"Generated content result: {result}") 36 | return result 37 | except Exception as e: 38 | logger.error(f"Educhain error: {e}") 39 | raise 40 | 41 | def get_default_params(pedagogy: str) -> Dict[str, str]: 42 | """Get default parameters for each pedagogy""" 43 | defaults = { 44 | "blooms_taxonomy": { 45 | "grade_level": "High School", 46 | "target_level": "Intermediate" 47 | }, 48 | "socratic_questioning": { 49 | "depth_level": "Intermediate", 50 | "student_level": "High School" 51 | }, 52 | "project_based_learning": { 53 | "project_duration": "4-6 weeks", 54 | "team_size": "3-4 students", 55 | "industry_focus": "General" 56 | }, 57 | "flipped_classroom": { 58 | "class_duration": "50 minutes", 59 | "prep_time": "30-45 minutes", 60 | "technology_level": "Moderate" 61 | }, 62 | "inquiry_based_learning": { 63 | "inquiry_type": "Guided", 64 | "investigation_scope": "Moderate", 65 | "student_autonomy": "Balanced" 66 | }, 67 | "constructivist": { 68 | "prior_knowledge_level": "Mixed", 69 | "social_interaction_focus": "High", 70 | "reflection_emphasis": "Strong" 71 | }, 72 | "gamification": { 73 | "game_mechanics": "Points, badges, levels", 74 | "competition_level": "Moderate", 75 | "technology_platform": "Web-based" 76 | }, 77 | "peer_learning": { 78 | "group_size": "3-4 students", 79 | "collaboration_type": "Mixed", 80 | "skill_diversity": "Moderate" 81 | } 82 | } 83 | return defaults.get(pedagogy, {}) 84 | 85 | 86 | def get_pedagogies() -> Dict[str, Dict[str, Any]]: 87 | try: 88 | return client.content_engine.get_available_pedagogies() 89 | except Exception as e: 90 | logger.error(f"Failed to fetch pedagogies: {e}") 91 | raise 92 | -------------------------------------------------------------------------------- /archive/utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from .models import MCQList ### 3 | from reportlab.lib.pagesizes import letter 4 | from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak 5 | from reportlab.lib.styles import getSampleStyleSheet 6 | from typing import List, Optional 7 | 8 | def to_csv(quiz_data : MCQList, file_name): 9 | """ 10 | Generate a CSV file from a Quiz object. 11 | 12 | Args: 13 | quiz_data (Quiz): Instance of the Quiz class containing a list of Question objects. 14 | file_name (str): Name of the CSV file to be created. 15 | """ 16 | mcq_data = [] 17 | 18 | for question in quiz_data.questions: 19 | mcq_data.append({ 20 | 'question': question.question, 21 | 'option_1': question.options[0], 22 | 'option_2': question.options[1], 23 | 'option_3': question.options[2], 24 | 'option_4': question.options[3], 25 | 'correct_answer': question.correct_answer 26 | }) 27 | 28 | df = pd.DataFrame(mcq_data) 29 | df.to_csv(file_name, index=False) 30 | 31 | 32 | def to_json(quiz_data : MCQList, file_name=None): 33 | 34 | """ 35 | Convert a list of Question objects to JSON and create a JSON file. 36 | 37 | Args: 38 | questions (list): List of Question objects. 39 | file_name (str): Name of the JSON file to be created. 40 | """ 41 | data = [{"question": question.question, "options": question.options, "correct_answer": question.correct_answer} for question in quiz_data.questions] 42 | 43 | df = pd.DataFrame(data) 44 | 45 | if file_name: 46 | df.to_json(file_name, orient='records', indent=4) 47 | 48 | return data 49 | 50 | 51 | def to_pdf(quiz_data : MCQList, file_name, heading=None, subheading=None): 52 | """ 53 | Create a PDF file from a list of MCQ (Multiple Choice Questions). 54 | 55 | Args: 56 | questions (list): List of Question objects. 57 | file_name (str): Name of the PDF file to be created. 58 | heading (str): Heading for the PDF document. (optional) 59 | subheading (str): Subheading for the PDF document. (optional) 60 | """ 61 | styles = getSampleStyleSheet() 62 | 63 | doc = SimpleDocTemplate(file_name, pagesize=letter) 64 | elements = [] 65 | 66 | if heading: 67 | elements.append(Paragraph(heading, styles["Heading1"])) 68 | 69 | if subheading: 70 | elements.append(Paragraph(subheading, styles["Heading2"])) 71 | elements.append(Spacer(1, 12)) 72 | 73 | for i, question in enumerate(quiz_data.questions, start=1): 74 | question_text = f"{i}. {question.question}" 75 | elements.append(Paragraph(question_text, styles["BodyText"])) 76 | 77 | for j, option in enumerate(question.options, start=97): 78 | option_text = f"{chr(j)}) {option}" 79 | elements.append(Paragraph(option_text, styles["BodyText"])) 80 | 81 | elements.append(Spacer(1, 12)) 82 | 83 | elements.append(PageBreak()) # Add a page break before the answers 84 | elements.append(Paragraph("Answers", styles["Heading1"])) 85 | 86 | for i, question in enumerate(quiz_data.questions, start=1): 87 | correct_answer_text = f"{i}. {chr(question.options.index(question.correct_answer) + 97)}) {question.correct_answer}" 88 | elements.append(Paragraph(correct_answer_text, styles["BodyText"])) 89 | 90 | doc.build(elements) 91 | -------------------------------------------------------------------------------- /docs/features/mcq_from_data.md: -------------------------------------------------------------------------------- 1 | # 🖋️ Multiple Choice Question (MCQ) Generation from Data 2 | 3 | Generate engaging MCQs from various data sources using AI! 🧠✨ 4 | 5 | ## 🚀 Basic Usage 6 | 7 | ```python 8 | from educhain import Educhain 9 | 10 | client = Educhain() 11 | 12 | # From URL 13 | url_questions = client.qna_engine.generate_questions_from_data( 14 | source="https://example.com/article", 15 | source_type="url", 16 | num=3 17 | ) 18 | 19 | # From PDF 20 | pdf_questions = client.qna_engine.generate_questions_from_data( 21 | source="path/to/document.pdf", 22 | source_type="pdf", 23 | num=3 24 | ) 25 | 26 | # From Text File 27 | text_questions = client.qna_engine.generate_questions_from_data( 28 | source="path/to/content.txt", 29 | source_type="text", 30 | num=3 31 | ) 32 | ``` 33 | 34 | ## 🎡 Function Parameters 35 | 36 | | Parameter | Description | Example Values | 37 | |-----------|-------------|----------------| 38 | | `source` | Data source for question generation | PDF file path, URL, or text content | 39 | | `source_type` | Type of the data source | "pdf", "url", "text" | 40 | | `num` | Number of questions to generate | 5, 10, 20 | 41 | | `question_type` | Type of questions to generate | "Multiple Choice", "True/False" | 42 | | `prompt_template` | Custom prompt template (optional) | "Generate questions about {topic}..." | 43 | | `custom_instructions` | Additional instructions for question generation (optional) | "Focus on technical details." | 44 | | `response_model` | Custom response model (optional) | CustomModelClass | 45 | | `output_format` | Format for the output questions (optional) | "JSON", "PDF", "Text" | 46 | 47 | ## 🖋️ Example Output 48 | 49 | ```python 50 | MCQList( 51 | questions=[ 52 | MCQ( 53 | question="What is artificial intelligence primarily concerned with?", 54 | options=[ 55 | "Creating intelligent machines", 56 | "Developing faster computers", 57 | "Improving internet connectivity", 58 | "Designing user interfaces" 59 | ], 60 | correct_answer="Creating intelligent machines", 61 | explanation="Artificial intelligence focuses on creating machines that can perform tasks requiring human-like intelligence." 62 | ), 63 | # More questions... 64 | ] 65 | ) 66 | ``` 67 | 68 | ## 🌍 Supported Data Sources 69 | 70 | 1. **PDF Files** 📄: Provide a file path to generate questions from PDF content. 71 | 2. **URLs** 🌐: Input a web page URL to create questions from online content. 72 | 3. **Text Files** 🖋️: Provide text files for generating questions from custom content. 73 | 74 | ## ✨ Advanced Customization 75 | 76 | Enhance your MCQ generation with additional customization: 77 | 78 | - **Custom Prompt Templates:** Use the `prompt_template` parameter to provide specific instructions. 79 | - **Fine-Tune Outputs:** Leverage `custom_instructions` to focus on particular aspects of the source content. 80 | - **Flexible Output Formats:** Choose between JSON, PDF, or plain text for your generated questions. 81 | 82 | 83 | ## 📊 Pro Tips 84 | 85 | - **Refine the Source Content:** Use specific URLs or curated text for targeted question generation. 86 | - **Optimize Learning Objectives:** Adjust the `learning_objective` to align with your educational goals. 87 | - **Experiment with Difficulty Levels:** Tailor `difficulty_level` to your audience, ranging from "Beginner" to "Advanced." 88 | 89 | Ready to create high-quality MCQs? Dive in and let Educhain streamline your educational content creation! 🚀📚 90 | 91 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/6_🎴_Flash Card.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils.models import client_model 3 | client = client_model() 4 | 5 | st.set_page_config(page_title="🧠 Flashcard Generator", layout="wide") 6 | st.markdown("

🃏 AI Flashcard Generator

", unsafe_allow_html=True) 7 | st.markdown("

Generate clean, effective flashcards instantly using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 8 | st.divider() 9 | 10 | st.subheader("📋 Topic for Flashcards") 11 | 12 | with st.form(key="flashcard_form"): 13 | col1, col2 = st.columns([3, 1]) 14 | with col1: 15 | topic = st.text_input("🔍 Enter Topic", placeholder="e.g., Python Basics") 16 | with col2: 17 | num_flashcards = st.slider("🃏 No. of Flashcards", min_value=1, max_value=20, value=5) 18 | 19 | submit = st.form_submit_button("🚀 Generate Flashcards") 20 | 21 | if submit and topic: 22 | with st.spinner("Generating Flashcards using Gemini Flash..."): 23 | try: 24 | flashcards = client.content_engine.generate_flashcards( 25 | topic=topic, 26 | num=num_flashcards 27 | ) 28 | 29 | st.success("✅ Flashcards Generated Successfully!") 30 | st.markdown("---") 31 | for i, card in enumerate(flashcards.flashcards, start=1): 32 | st.markdown(f"### 🃏 Card {i}") 33 | st.markdown(f"**Q:** {card.front}") 34 | st.markdown(f"**A:** {card.back}") 35 | st.markdown("---") 36 | 37 | except Exception as e: 38 | st.error(f"❌ Error generating flashcards:\n\n{e}") 39 | 40 | st.caption("Crafted with ❤️ by EduChain + Gemini Flash ✨") 41 | 42 | with st.popover("Open popover"): 43 | st.markdown("Turn On Developer Mode?") 44 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 45 | 46 | if Developer_Mode: 47 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 48 | st.page_link("https://github.com/satvik314/educhain/blob/main/cookbook/features/generate_flashcards_with_educhain.ipynb", label="GitHub", icon="🔗") 49 | st.markdown(""" 50 | 📦 Key Initialization 51 | ----------------------------------- 52 | from educhain import Educhain, LLMConfig 53 | from langchain_google_genai import ChatGoogleGenerativeAI 54 | 55 | # Step 1: Setup the Gemini Flash LLM 56 | gemini_flash = ChatGoogleGenerativeAI( 57 | model="gemini-2.0-flash", 58 | google_api_key=GOOGLE_API_KEY 59 | ) 60 | 61 | # Step 2: Wrap the model in LLMConfig 62 | flash_config = LLMConfig(custom_model=gemini_flash) 63 | 64 | # Step 3: Create Educhain client using the model config 65 | client = Educhain(flash_config) 66 | 67 | 🧠 What Does client.content_engine.generate_flashcards() Do? 68 | -------------------------------------------------------------- 69 | This is the core function responsible for generating flashcards from a given topic. 70 | 71 | Example: 72 | flashcards = client.content_engine.generate_flashcards( 73 | topic="Python Basics", 74 | num=5 75 | ) 76 | 77 | 🔍 It likely does the following: 78 | - Crafts a structured flashcard generation prompt 79 | - Sends it to Gemini Flash 80 | - Parses and returns clean Q&A flashcards 81 | 82 | ✅ Sample Output: 83 | [ 84 | { 85 | front: "What is a variable in Python?", 86 | back: "A storage location for data with a name." 87 | }, 88 | ... 89 | ] 90 | 91 | ❤️ Summary 92 | Educhain makes flashcard generation effortless using LLMs. 93 | It's perfect for revision, study sessions, and spaced repetition. 94 | """) -------------------------------------------------------------------------------- /cookbook/starter-apps/Origami_tutorial_generator/README.md: -------------------------------------------------------------------------------- 1 | # 📐 Paperfold.ai 2 | 3 | ![Python](https://img.shields.io/badge/python-≥3.13-blue) 4 | ![Streamlit](https://img.shields.io/badge/Streamlit-%F0%9F%8E%88-FF4B4B) 5 | ![License](https://img.shields.io/badge/License-MIT-green) 6 | [![Powered by OpenRouter](https://img.shields.io/badge/Powered%20By-OpenRouter-4F46E5)](https://openrouter.ai) 7 | 8 | > **⭐ Horizon Beta | 🌸 Make amazing origami just by uploading a picture! 🌸** 9 | 10 | PaperFold.AI is an intuitive, AI-powered web app that turns any image of an origami object into a **child-friendly, step-by-step folding guide**. Drop a photo, click **“Generate Tutorial”**, and watch as the Horizon Beta AI model explains where every fold should go—perfect for beginners, educators, and crafty learners of all ages. 11 | 12 | --- 13 | 14 | ## ✨ Features 15 | * 📷 **Upload-and-Create** – simple drag-and-drop or browse for your photo 16 | * 🧠 **AI-Powered Steps** – each fold described in beginner language with emojis 17 | * 🖥️ **Streamlined UI** – built with modern Streamlit for a smooth user experience 18 | * 🔑 **OpenRouter Integration** – configurable API key and gold-standard Horizon Beta model 19 | 20 | --- 21 | 22 | ## 📦 Quickstart 23 | 24 | ### 1. Clone the repo 25 | ```bash 26 | git clone https://github.com/your-org/paperfold.ai.git 27 | cd paperfold.ai 28 | ``` 29 | 30 | ### 2. Set up Python ≥ 3.13 31 | Using **uv**, pipenv, or vanilla venv: 32 | ```bash 33 | # uv (blazing-fast) 34 | uv venv 35 | source .venv/bin/activate 36 | uv pip install -r requirements.txt 37 | 38 | # or traditional 39 | python -m venv .venv 40 | source .venv/bin/activate 41 | pip install -r requirements.txt 42 | ``` 43 | 44 | By default, PaperFold.ai will install `educhain`, `langchain-openai`, and `streamlit`. 45 | 46 | ### 3. Get your OpenRouter API key 47 | 1. Register at [openrouter.ai](https://openrouter.ai/settings/keys) 48 | 2. Create a new key and copy it to the sidebar when you launch the app. 49 | 50 | ### 4. Launch locally 51 | ```bash 52 | streamlit run app.py 53 | # Your browser opens at http://localhost:8501 54 | ``` 55 | 56 | --- 57 | 58 | ## 🪧 Usage 59 | 1. **Start the App** – `streamlit run app.py` 60 | 2. **Enter API Key** – paste your OpenRouter API key in the left sidebar 61 | 3. **Upload Image** – drag a JPG/PNG/PNG of any origami piece into the uploader 62 | 4. **Generate Guide** – click **“✨ Generate Origami Tutorial”** 63 | 5. **Follow Steps** – easy, emoji-rich instructions appear dynamically 64 | 65 | --- 66 | 67 | ## 🏗️ Tech Stack 68 | | Layer | Technology | 69 | |-------|------------| 70 | | LLM & Reasoning | OpenRouter (`horizon-beta`) & Educhain | 71 | | Chat Model | `langchain-openai` ChatOpenAI wrapper | 72 | | UI & Frontend | Streamlit (Python) | 73 | | Requirements | uv/pip (see `pyproject.toml`) | 74 | | OS | macOS, Linux, Windows (Python ≥3.13 recommended) | 75 | 76 | --- 77 | 78 | ## 🧑‍💻 Contributing 79 | We ❤️ contributions! Here’s how to jump in: 80 | 81 | 1. **Fork & branch** 82 | `git checkout -b feature/your-awesome-feature` 83 | 84 | 2. **Install dev tools** 85 | ``` 86 | pip install ruff black pytest 87 | ``` 88 | 89 | 3. **Run checks** 90 | ``` 91 | ruff check . # lint 92 | black . --check # auto-format check 93 | pytest # tests (when added) 94 | ``` 95 | 96 | 4. **Commit with love** 97 | Use [Conventional Commits](https://www.conventionalcommits.org): 98 | `feat(sidebar): dark-mode switch` 99 | 100 | 5. **Open a Pull Request** – describe the problem, the fix, and add screenshots/GIFs for UI changes. 101 | 102 | --- 103 | 104 | ## 📜 License 105 | MIT © 2024 Build Fast with AI community 106 | 107 | --- 108 | 109 | Made with ❤️ by [**Build Fast with AI**](https://buildfastwithai.com) -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/pages/pedagogy/[name].jsx: -------------------------------------------------------------------------------- 1 | import { useRouter } from "next/router"; 2 | import dynamic from "next/dynamic"; 3 | import { useEffect, useState } from "react"; 4 | import { getPedagogies, generateContent } from "../../lib/api"; 5 | import ParamForm from "../../components/ParamForm"; 6 | import OutputRenderer from "../../components/OutputRenderer"; 7 | 8 | function toTitleCase(text) { 9 | return String(text || "") 10 | .replace(/_/g, " ") 11 | .replace(/\b\w/g, (m) => m.toUpperCase()); 12 | } 13 | 14 | function PedagogyPageInner() { 15 | const router = useRouter(); 16 | const { name, topic } = router.query; 17 | const [paramsDef, setParamsDef] = useState({}); 18 | const [output, setOutput] = useState(null); 19 | const [loading, setLoading] = useState(false); 20 | const [error, setError] = useState(""); 21 | 22 | useEffect(() => { 23 | if (name) { 24 | getPedagogies().then((data) => { 25 | setParamsDef(data[name]?.parameters || {}); 26 | }); 27 | } 28 | }, [name]); 29 | 30 | const handleGenerate = async (params) => { 31 | setError(""); 32 | const trimmedTopic = (topic || "").trim(); 33 | if (!trimmedTopic || trimmedTopic.length < 3) { 34 | setError("Please provide a valid topic (at least 3 characters) on the home page and try again."); 35 | return; 36 | } 37 | try { 38 | setLoading(true); 39 | console.log("Sending params:", params); // Debug log 40 | const result = await generateContent(trimmedTopic, name, params); 41 | console.log("Received result:", result); // Debug log 42 | setOutput(result.content); 43 | } catch (e) { 44 | const message = 45 | e?.response?.data?.detail || 46 | e?.message || 47 | "Failed to generate content. Please try again."; 48 | setError(String(message)); 49 | } finally { 50 | setLoading(false); 51 | } 52 | }; 53 | 54 | const displayName = toTitleCase(name); 55 | 56 | return ( 57 |
58 |
59 |
60 |

61 | {displayName || "Pedagogy"} 62 |

63 |

Topic: {topic}

64 | 65 | {!output ? ( 66 |
67 | {error && ( 68 |
69 | {error} 70 |
71 | )} 72 |
73 |

74 | Configure {displayName} Parameters 75 |

76 |

77 | Select the appropriate options for your learning context. All fields will use sensible defaults if not specified. 78 |

79 |
80 | 81 |
82 | ) : ( 83 |
84 | 85 |
86 | )} 87 |
88 |
89 | ); 90 | } 91 | 92 | export default dynamic(() => Promise.resolve(PedagogyPageInner), { ssr: false }); 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | plan.md 10 | test_pedagogy.py 11 | PEDAGOGY_USAGE_GUIDE.md 12 | CLAUDE.md 13 | CONTENT_ENHANCED_EXAMPLE.md 14 | test_content_fields.py 15 | 16 | .pypirc 17 | 18 | # Distribution / packaging 19 | .Python 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # poetry 107 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 108 | # This is especially recommended for binary packages to ensure reproducibility, and is more 109 | # commonly ignored for libraries. 110 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 111 | #poetry.lock 112 | 113 | # pdm 114 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 115 | #pdm.lock 116 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 117 | # in version control. 118 | # https://pdm.fming.dev/#use-with-ide 119 | .pdm.toml 120 | 121 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 122 | __pypackages__/ 123 | 124 | # Celery stuff 125 | celerybeat-schedule 126 | celerybeat.pid 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | 164 | # PyCharm 165 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 166 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 167 | # and can be added to the global gitignore or merged into this file. For a more nuclear 168 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 169 | #.idea/ 170 | 171 | Desktop.ini 172 | .vscode -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/README.md: -------------------------------------------------------------------------------- 1 | # 🧠 Educhain Pedagogy 2 | 3 | [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 4 | [![Python](https://img.shields.io/badge/Python-3.13+-3776ab.svg)](https://www.python.org/downloads/) 5 | [![JavaScript](https://img.shields.io/badge/TypeScript-Next.js-3178c6.svg)](https://nextjs.org/docs) 6 | [![FastAPI](https://img.shields.io/badge/-FastAPI-009485.svg?logo=fastapi)](https://fastapi.tiangolo.com) 7 | [![TailwindCSS](https://img.shields.io/badge/TailwindCSS-4-06b6d4.svg)](https://tailwindcss.com) 8 | 9 | 10 | Generate tailored, style-specific learning experiences across 8 pedagogical approaches—powered by Educhain + GPT-4o. 11 | 12 |
13 | 14 | --- 15 | 16 | ## 🧩 Pedagogies Supported 17 | - Blooms Taxonomy 🎓 18 | - Socratic Questioning 🧠 19 | - Project Based Learning 🧩 20 | - Flipped Classroom 🔁 21 | - Inquiry Based Learning 🔎 22 | - Constructivist 🏗️ 23 | - Gamification 🎮 24 | - Peer Learning 🤝 25 | - Game-Based Learning 🎮 26 | 27 | 28 | --- 29 | 30 | ## ⚡ Quickstart 31 | 32 | ### 1. Prerequisites 33 | - **Python 3.13+** & **uv** (or pip) 34 | - Node.js ≥ 20 & **pnpm** or **npm** 35 | 36 | ```bash 37 | # install uv if you don’t have it 38 | pip install uv 39 | ``` 40 | 41 | ### 2. Clone & install 42 | 43 | ```bash 44 | git clone https://github.com/YOUR_ORG/educhain-pedagogy.git 45 | cd educhain-pedagogy 46 | 47 | # backend 48 | cd backend 49 | `uv add -r requirements.txt ` or `pip install -r requirements.txt` 50 | 51 | de # or `npm install` 52 | ``` 53 | 54 | > The project uses **git-locked Educhain** (`ai-dev` branch). No extra config needed. 55 | 56 | ### 3. Run locally 57 | 58 | #### Backend 59 | ```bash 60 | uv run uvicorn main:app --reload # http://localhost:8000 61 | ``` 62 | 63 | #### Frontend 64 | ```bash 65 | npm run dev # http://localhost:3000 66 | ``` 67 | 68 | ### 4. Environment 69 | A `.env` in `backend/` is automatically loaded via `dotenv`. 70 | Only required key: 71 | ``` 72 | OPENAI_API_KEY=sk-XXXXXXXX 73 | ``` 74 | ### 5 . Backend url in frontend 75 | In `frontend/src/lib/_app.jsx`, set the backend URL of your backend deployment (or `http://localhost:8000` for local dev): 76 | ``` 77 | 78 | --- 79 | 80 | ## 🛠️ Tech Stack 81 | 82 | | Layer | Stack | 83 | |--------------|-----------------------------------------------------------| 84 | | Backend | Python, FastAPI, Educhain (GPT-4o), Pydantic | 85 | | Frontend | Next.js 15, TailwindCSS 4, React 19, Axios, Lucide Icons | 86 | | Package Mgmt | uv (Python) & npm (Node) | 87 | | Deployment | Render (free tier) , Vercel | 88 | 89 | --- 90 | 91 | ## 📘 API Usage Examples 92 | 93 | ### 1. List available pedagogies 94 | ```bash 95 | curl https://educhain-pedagogy.onrender.com/available-pedagogies 96 | ``` 97 | 98 | ### 2. Generate content 99 | ```bash 100 | curl -X POST https://educhain-pedagogy.onrender.com/generate-content \ 101 | -H "Content-Type: application/json" \ 102 | -d '{ 103 | "topic": "Photosynthesis in Grade 8", 104 | "pedagogy": "blooms_taxonomy", 105 | "params": { 106 | "grade_level": "8th Grade", 107 | "target_level": "Intermediate" 108 | } 109 | }' 110 | ``` 111 | 112 | --- 113 | 114 | ## 📁 Project Structure 115 | 116 | ``` 117 | educhain-pedagogy/ 118 | ├── backend/ 119 | │ ├── main.py 120 | │ ├── app/ 121 | │ │ ├── api/ 122 | │ │ ├── models/ 123 | │ │ └── services/ 124 | │ └── pyproject.toml 125 | ├── frontend/ 126 | │ ├── src/ 127 | │ │ ├── components/ 128 | │ │ ├── lib/ 129 | │ │ └── pages/ 130 | │ └── next.config.js 131 | └── README.md 132 | ``` 133 | 134 | --- 135 | 136 | ## 🤝 Contributing 137 | 138 | 1. Fork the repository 139 | 2. Create a feature branch: `git checkout -b feat/` 140 | 3. Commit & push: `git commit -m 'feat: added ____'` 141 | 4. Open a Pull Request 🎉 142 | 143 | 144 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/app/page.js: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useEffect, useRef, useState } from "react"; 4 | import { getPedagogies } from "../lib/api"; 5 | import PedagogyCard from "../components/PedagogyCard"; 6 | import { useRouter } from "next/navigation"; 7 | 8 | export default function Home() { 9 | const [pedagogies, setPedagogies] = useState({}); 10 | const [topic, setTopic] = useState(""); 11 | const [topicError, setTopicError] = useState(""); 12 | const topicRef = useRef(null); 13 | const router = useRouter(); 14 | 15 | useEffect(() => { 16 | getPedagogies().then(setPedagogies); 17 | }, []); 18 | 19 | return ( 20 |
21 | {/* Decorative background */} 22 |
23 | 24 |
25 | {/* Hero */} 26 |
27 |

28 | Educhain Pedagogy 29 |

30 |

31 | Generate tailored learning experiences across pedagogical styles. Choose a pedagogy, set your topic, and explore. 32 |

33 |
34 | 35 | {/* Topic input */} 36 |
37 | { 44 | setTopic(e.target.value); 45 | if (topicError) setTopicError(""); 46 | }} 47 | className={`w-full p-3 rounded-lg bg-black/40 border text-orange-100 placeholder-orange-200/50 focus:outline-none focus:ring-2 ${ 48 | topicError 49 | ? "border-red-500/60 focus:ring-red-500/40" 50 | : "border-orange-500/30 focus:ring-orange-500/40" 51 | }`} 52 | /> 53 | {topicError ? ( 54 |
{topicError}
55 | ) : ( 56 |
Tip: Be specific for richer outputs (e.g., “Photosynthesis for grade 8”).
57 | )} 58 |
59 | 60 | {/* Section title */} 61 |
62 |
63 | Choose a pedagogy 64 |
65 |
66 | 67 | {/* Cards grid */} 68 |
69 | {Object.entries(pedagogies).map(([name, info]) => ( 70 | { 75 | const trimmed = topic.trim(); 76 | if (!trimmed) { 77 | setTopicError("Topic is required."); 78 | topicRef.current?.focus(); 79 | return; 80 | } 81 | if (trimmed.length < 3) { 82 | setTopicError("Enter at least 3 characters."); 83 | topicRef.current?.focus(); 84 | return; 85 | } 86 | router.push(`/pedagogy/${name}?topic=${encodeURIComponent(trimmed)}`); 87 | }} 88 | /> 89 | ))} 90 |
91 |
92 |
93 | ); 94 | } 95 | -------------------------------------------------------------------------------- /cookbook/starter-apps/AI CourtRoom/README.md: -------------------------------------------------------------------------------- 1 | # ⚖️ AI Courtroom 2 | 3 | [![Build Status](https://img.shields.io/badge/Build-Passing-brightgreen.svg)](https://github.com/your-org/ai-courtroom) 4 | [![Streamlit](https://img.shields.io/badge/Powered%20by-Streamlit-FF4B4B.svg?logo=streamlit)](https://streamlit.io) 5 | [![Cerebras](https://img.shields.io/badge/-Cerebras-000000.svg?logo=fastapi)](https://cerebras.ai) 6 | [![Python](https://img.shields.io/badge/Python-3.13+-3776ab.svg?logo=python&logoColor=white)](https://python.org) 7 | [![License](https://img.shields.io/badge/License-MIT-red.svg)](LICENSE) 8 | 9 | --- 10 | 11 | ### 🔍 What is AI Courtroom? 12 | AI Courtroom is a **fully-automated mock-trial simulator** that ingests real legal cases from the web, extracts key facts using **Educhain + Cerebras AI**, and role-plays a multi-character courtroom (Judge ⚖️ → Prosecutor ⚔️ → Defense 🛡️ → Defendant 👤 → Verdict 🔨) in the style you choose—**Serious**, **Dramatic**, or outright **Comedic**. 13 | 14 | Just paste a Wikipedia article, news report, blog post, or any other case URL and watch the AI attorneys battle it out in seconds. 15 | 16 | --- 17 | 18 | ## 🚀 Quick Start 19 | 20 | | Requirement | Command | 21 | |-------------|---------| 22 | | Python | ≥ 3.13 | 23 | | OS | macOS / Linux / Windows (WSL) | 24 | 25 | ### 1. Clone & enter the repo 26 | ```bash 27 | git clone https://github.com/your-org/ai-courtroom.git 28 | cd ai-courtroom 29 | ``` 30 | 31 | ### 2. Create & activate a virtual environment 32 | ```bash 33 | python -m venv venv 34 | source venv/bin/activate # Windows: venv\Scripts\activate 35 | ``` 36 | 37 | ### 3. Install dependencies 38 | ```bash 39 | pip install -r requirements.txt # pip 40 | # OR (if you have pyproject.toml): 41 | # poetry install 42 | ``` 43 | 44 | ### 4. Get a Cerebras API key 45 | 1. Sign up at [Cerebras AI](https://cerebras.ai) 46 | 2. Copy your key from the dashboard 47 | 3. (Optional) Store it as an env variable: 48 | ```bash 49 | export CEREBRAS_API_KEY="cks_××××××" 50 | ``` 51 | 52 | ### 5. Launch the app 53 | ```bash 54 | streamlit run app.py 55 | ``` 56 | A browser tab will open at `http://localhost:8501`. 57 | 58 | --- 59 | 60 | ## 🖥️ Usage 61 | 1. **Sidebar** → paste your **Cerebras API key** 62 | 2. Pick **Courtroom style** (Serious, Dramatic or Comedic) 63 | 3. Set the **number of facts/questions** to extract (1 – 10) 64 | 4. **Paste the URL** to any public legal case (Wikipedia, news, court filings, etc.) 65 | 5. Click **“Ingest case & generate facts”** 66 | 6. Sit back & watch the AI roles rehearse the case live. 67 | 68 | --- 69 | 70 | ## 🧱 Tech Stack 71 | 72 | | Layer | Tech | Purpose | 73 | |-------|------|---------| 74 | | **Frontend** | Streamlit | Interactive web UI | 75 | | **LLM Core** | Cerebras `gpt-oss-120b` | Lightning-fast inference | 76 | | **NLP Chain** | EduChain | Auto-Q&A & fact extraction | 77 | | **Python Version** | ≥ 3.13 | All major deps compiled for it | 78 | | **Dependency Mgmt** | `requirements.txt` & `pyproject.toml` | Pip or Poetry ready | 79 | 80 | --- 81 | 82 | ## 🛠️ Development & Contribution 83 | 84 | ### Pull-Request flow 85 | 1. Fork the repo 86 | 2. Create a feature branch: `git checkout -b feat/amazing-idea` 87 | 3. Commit meaningful messages (`feat:`, `fix:`, `chore:` prefixes) 88 | 4. Run the lint & test helpers (if any are added) 89 | ```bash 90 | pre-commit run --all-files 91 | pytest # optional future addition 92 | ``` 93 | 5. Push & open a PR against `main`. 94 | Every PR is auto-checked by the status badge at the top of this file. 95 | 96 | ### Local dev tips 97 | - Use `streamlit run app.py --server.port=3000` to bind a custom port. 98 | - Set `STREAMLIT_THEME_BASE="dark"` in `.env` for a slick dark mode. 99 | 100 | --- 101 | 102 | ## 📦 Project Structure 103 | ``` 104 | ai-courtroom 105 | ├─ app.py # Main Streamlit entry 106 | ├─ requirements.txt # Quick install with pip 107 | ├─ pyproject.toml # Poetry / PEP-621 compliant spec 108 | └─ README.md # This file 109 | ``` 110 | 111 | --- 112 | 113 | ## 📄 License 114 | MIT © [Build Fast with AI](https://buildfastwithai.com) 115 | 116 | --- 117 | 118 | **Want to showcase this?** 119 | The repo is ready for **Render**/**Railway** deployments using one-click buttons—just switch the runtime version to `3.13-slim` and inject `CEREBRAS_API_KEY` as an **environment variable** in the deployment dashboard. -------------------------------------------------------------------------------- /cookbook/readme.md: -------------------------------------------------------------------------------- 1 | # 📘 Educhain Cookbook Repository 2 | 3 | Welcome to the **Educhain Cookbook Repository**! Your one-stop resource for creating quizzes, study guides, and more using AI. Below is a quick-access table to navigate through the categories. 👇 4 | 5 | ----- 6 | 7 | ## 🗂️ Quick Navigation 8 | 9 | | **Category** | **Description** | **Dropdown** | 10 | |--------------------------|-----------------------------------------|-------------------------------------| 11 | | 🔥 **Features** | Advanced features of Educhain. | [Explore Features](#features) | 12 | | 🛠️ **Use Cases** | Practical applications and examples. | [Explore Use Cases](#use-cases) | 13 | | 🌟 **Getting Started** | Beginner-friendly guides to get started. | [Explore Getting Started](#getting-started) | 14 | | 🛡️ **Providers** | AI model integrations. | [Explore Providers](#providers) | 15 | 16 | --- 17 | 18 | ## 🔥 Features 19 | 20 | Explore Educhain's powerful tools to enhance your learning experience: 21 | 22 | - [Generate MCQs from Data](features/educhain_generate_mcqs_from_data.ipynb) 23 | - [Generate Flashcards (Basics)](features/generate_flashcards_basics.ipynb) 24 | - [Bulk Question Generation](features/bulk_question_generation_using_educhain.ipynb) 25 | - [Visual Question Generation](features/visual_question_generation_using_educhain.ipynb) 26 | - [Generate Questions from YouTube](features/generate_questions_from_youtube.ipynb) 27 | - [Career Connection](features/educhain_career_connection.ipynb) 28 | - [Generate Lesson Plans](features/educhain_generate_lesson_plan.ipynb) 29 | - [Generate Study Guides](features/educhain_generate_study_guide.ipynb) 30 | 31 | ## 🛠️ Use Cases 32 | 33 | Real-world applications of Educhain: 34 | 35 | - [Convert Webpages to Quizzes](use-cases/convert_any_webpage_to_quiz.ipynb) 36 | - [Generate Quizzes from Transcripts](use-cases/generate_quiz_using_transcripts_and_educhain.ipynb) 37 | - [Process Long PDFs into Quizzes](use-cases/long_pdfs_to_quiz.ipynb) 38 | - [Multilingual MCQ Generation with Sutra](use-cases/multilingual_mcq_generation_using_sutra.ipynb) 39 | - [Quiz on Latest News](use-cases/quiz_on_latest_news.ipynb) 40 | - [World's Fastest Quiz](use-cases/educhain_worlds_fastest_quiz.ipynb) 41 | - [Flashcard Use Case Examples](use-cases/generate_flashcard_usecase_examples.ipynb) 42 | - [Llama4 Integration with Groq](use-cases/educhain_with_llama4_using_groq.ipynb) 43 | 44 | ## 🌟 Getting Started 45 | 46 | New to Educhain? Start here: 47 | 48 | - [Educhain Starter Guide](getting-started/educhain_starter_guide.ipynb) 49 | 50 | ## 🛡️ Providers 51 | 52 | Integrate Educhain with various AI providers: 53 | 54 | - [Educhain with Gemini 2.0](providers/educhain_with_gemini.ipynb) 55 | - [Educhain with Groq](providers/educhain_with_groq.ipynb) 56 | - [Educhain with Mistral](providers/educhain_with_mistral.ipynb) 57 | - [Educhain with NVIDIA](providers/educhain_with_nvidia.ipynb) 58 | - [Educhain with OpenRouter](providers/educhain_with_openrouter.ipynb) 59 | - [Educhain with SambaNova Cloud](providers/educhain_with_sambanova.ipynb) 60 | - [Educhain with Together AI](providers/educhain_with_togetherai.ipynb) 61 | - [Educhain with Cohere](providers/educhain_with_cohere.ipynb) 62 | - [Educhain with Claude 3.5 Sonnet](providers/educhain_with_claude.ipynb) 63 | 64 | --- 65 | 66 | ## 🚀 Getting Started 67 | 68 | 1. Clone the repository: 69 | ```bash 70 | git clone https://github.com/satvik314/educhain 71 | ``` 72 | 2. Navigate to the folder: 73 | ```bash 74 | cd educhain/cookbook 75 | ``` 76 | 3. Open the desired `.ipynb` file in your Jupyter Notebook environment and start creating! 77 | 78 | --- 79 | 80 | ## ✨ Features Highlights 81 | - **Comprehensive Quiz Creation**: Generate quizzes from PDFs, web pages, YouTube videos, and transcripts. 82 | - **Custom Flashcards and Study Guides**: Tools tailored for personalized learning. 83 | - **Fast and Efficient**: World’s fastest quiz generation engine included. 84 | - **AI Integration**: Seamless integration with Claude 3.5 Sonnet and more. 85 | 86 | 87 | 88 | --- 89 | 90 | Happy learning! 🎉 91 | 92 | ### Features of This Format: 93 | 1. **Dropdown Table:** Provides a quick glance and direct links to sections. 94 | 2. **Detailed Sections Below:** Each category is elaborated for more context. 95 | 3. **Dynamic Navigation:** Easy to update the table or the corresponding sections. 96 | 4. **Clean Design:** User-friendly, visually structured, and easy to maintain. 97 | 98 | Let me know if you’d like further tweaks! 😊 99 | -------------------------------------------------------------------------------- /cookbook/starter-apps/multilingual_chatbot/app.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from langchain.schema import HumanMessage, SystemMessage, AIMessage 3 | import streamlit as st 4 | import os 5 | 6 | # Set up Streamlit page 7 | st.title("🌍 SUTRA Multilingual Chatbot") 8 | st.write("⚡ Powered by SUTRA AI with support for multiple languages") 9 | 10 | # Language options 11 | LANGUAGES = { 12 | "English": "en", 13 | "Hindi": "hi", 14 | "Marathi": "mr", 15 | "Telugu": "te", 16 | "Tamil": "ta", 17 | "Bengali": "bn", 18 | "Gujarati": "gu", 19 | "Kannada": "kn", 20 | "Malayalam": "ml", 21 | "Punjabi": "pa", 22 | "French": "fr", 23 | "Spanish": "es" 24 | } 25 | 26 | # Sidebar UI 27 | st.sidebar.image("https://framerusercontent.com/images/3Ca34Pogzn9I3a7uTsNSlfs9Bdk.png", use_column_width="auto") 28 | st.sidebar.title("Settings") 29 | 30 | st.sidebar.markdown("🔑 Get your API key from [Two AI Sutra](https://www.two.ai/sutra/api)") 31 | 32 | # API key input 33 | st.session_state.sutra_api_key = st.sidebar.text_input("Enter your SUTRA API Key", type="password") 34 | 35 | # Language selection 36 | selected_lang = st.sidebar.selectbox( 37 | "Select language for responses:", 38 | options=list(LANGUAGES.keys()), 39 | index=0 40 | ) 41 | st.session_state.language = LANGUAGES[selected_lang] 42 | 43 | # Model details 44 | st.sidebar.divider() 45 | st.sidebar.markdown("**Model Details**") 46 | st.sidebar.caption("Running: `sutra-v2`") 47 | st.sidebar.caption("Supports multiple Indian and international languages") 48 | 49 | # New chat button 50 | st.sidebar.divider() 51 | if st.sidebar.button("🔄 Start New Chat", use_container_width=True): 52 | st.session_state.messages = [ 53 | SystemMessage(content=f"You are a helpful AI assistant. Respond in {selected_lang} language when appropriate.") 54 | ] 55 | st.rerun() 56 | 57 | # Initialize chat history with system message 58 | if "messages" not in st.session_state: 59 | st.session_state.messages = [ 60 | SystemMessage(content="You are a helpful AI assistant. Respond in English by default.") 61 | ] 62 | 63 | # Display welcome message in selected language 64 | welcome_messages = { 65 | "en": "Hello! How can I help you today?", 66 | "hi": "नमस्ते! मैं आपकी कैसे मदद कर सकता हूँ?", 67 | "mr": "नमस्कार! मी तुमची कशी मदत करू शकतो?", 68 | "te": "హలో! నేను మీకు ఎలా సహాయం చేయగలను?", 69 | "ta": "வணக்கம்! நான் உங்களுக்கு எப்படி உதவ முடியும்?", 70 | "fr": "Bonjour ! Comment puis-je vous aider aujourd'hui ?", 71 | # Add more language greetings as needed 72 | } 73 | 74 | with st.chat_message("assistant"): 75 | st.write(welcome_messages.get(st.session_state.language, "Hello! How can I help you today?")) 76 | 77 | # Display chat history 78 | for message in st.session_state.messages[1:]: # Skip system message 79 | if isinstance(message, HumanMessage): 80 | with st.chat_message("user"): 81 | st.write(message.content) 82 | else: 83 | with st.chat_message("assistant"): 84 | st.write(message.content) 85 | 86 | # Chat input 87 | if prompt := st.chat_input("Type your message here..."): 88 | # Validate API key 89 | if not st.session_state.sutra_api_key: 90 | st.error("Please enter your Sutra API key in the sidebar") 91 | st.stop() 92 | 93 | # Initialize the ChatOpenAI model with Sutra 94 | try: 95 | chat = ChatOpenAI( 96 | api_key=st.session_state.sutra_api_key, 97 | base_url="https://api.two.ai/v2", 98 | model="sutra-v2" 99 | ) 100 | 101 | # Add user message to chat history 102 | st.session_state.messages.append(HumanMessage(content=prompt)) 103 | 104 | # Display user message 105 | with st.chat_message("user"): 106 | st.write(prompt) 107 | 108 | # Get AI response 109 | with st.chat_message("assistant"): 110 | message_placeholder = st.empty() 111 | full_response = "" 112 | 113 | # Stream the response 114 | for chunk in chat.stream(st.session_state.messages): 115 | if chunk.content: 116 | full_response += chunk.content 117 | message_placeholder.write(full_response) 118 | 119 | # Update with final response 120 | message_placeholder.write(full_response) 121 | 122 | # Add AI response to chat history 123 | st.session_state.messages.append(AIMessage(content=full_response)) 124 | 125 | except Exception as e: 126 | st.error(f"An error occurred: {str(e)}") -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/app/globals.css: -------------------------------------------------------------------------------- 1 | @import "tailwindcss"; 2 | @import "tw-animate-css"; 3 | 4 | @custom-variant dark (&:is(.dark *)); 5 | 6 | @theme inline { 7 | --color-background: var(--background); 8 | --color-foreground: var(--foreground); 9 | --font-sans: var(--font-geist-sans); 10 | --font-mono: var(--font-geist-mono); 11 | --color-sidebar-ring: var(--sidebar-ring); 12 | --color-sidebar-border: var(--sidebar-border); 13 | --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); 14 | --color-sidebar-accent: var(--sidebar-accent); 15 | --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); 16 | --color-sidebar-primary: var(--sidebar-primary); 17 | --color-sidebar-foreground: var(--sidebar-foreground); 18 | --color-sidebar: var(--sidebar); 19 | --color-chart-5: var(--chart-5); 20 | --color-chart-4: var(--chart-4); 21 | --color-chart-3: var(--chart-3); 22 | --color-chart-2: var(--chart-2); 23 | --color-chart-1: var(--chart-1); 24 | --color-ring: var(--ring); 25 | --color-input: var(--input); 26 | --color-border: var(--border); 27 | --color-destructive: var(--destructive); 28 | --color-accent-foreground: var(--accent-foreground); 29 | --color-accent: var(--accent); 30 | --color-muted-foreground: var(--muted-foreground); 31 | --color-muted: var(--muted); 32 | --color-secondary-foreground: var(--secondary-foreground); 33 | --color-secondary: var(--secondary); 34 | --color-primary-foreground: var(--primary-foreground); 35 | --color-primary: var(--primary); 36 | --color-popover-foreground: var(--popover-foreground); 37 | --color-popover: var(--popover); 38 | --color-card-foreground: var(--card-foreground); 39 | --color-card: var(--card); 40 | --radius-sm: calc(var(--radius) - 4px); 41 | --radius-md: calc(var(--radius) - 2px); 42 | --radius-lg: var(--radius); 43 | --radius-xl: calc(var(--radius) + 4px); 44 | } 45 | 46 | :root { 47 | --radius: 0.625rem; 48 | --background: oklch(1 0 0); 49 | --foreground: oklch(0.145 0 0); 50 | --card: oklch(1 0 0); 51 | --card-foreground: oklch(0.145 0 0); 52 | --popover: oklch(1 0 0); 53 | --popover-foreground: oklch(0.145 0 0); 54 | --primary: oklch(0.205 0 0); 55 | --primary-foreground: oklch(0.985 0 0); 56 | --secondary: oklch(0.97 0 0); 57 | --secondary-foreground: oklch(0.205 0 0); 58 | --muted: oklch(0.97 0 0); 59 | --muted-foreground: oklch(0.556 0 0); 60 | --accent: oklch(0.97 0 0); 61 | --accent-foreground: oklch(0.205 0 0); 62 | --destructive: oklch(0.577 0.245 27.325); 63 | --border: oklch(0.922 0 0); 64 | --input: oklch(0.922 0 0); 65 | --ring: oklch(0.708 0 0); 66 | --chart-1: oklch(0.646 0.222 41.116); 67 | --chart-2: oklch(0.6 0.118 184.704); 68 | --chart-3: oklch(0.398 0.07 227.392); 69 | --chart-4: oklch(0.828 0.189 84.429); 70 | --chart-5: oklch(0.769 0.188 70.08); 71 | --sidebar: oklch(0.985 0 0); 72 | --sidebar-foreground: oklch(0.145 0 0); 73 | --sidebar-primary: oklch(0.205 0 0); 74 | --sidebar-primary-foreground: oklch(0.985 0 0); 75 | --sidebar-accent: oklch(0.97 0 0); 76 | --sidebar-accent-foreground: oklch(0.205 0 0); 77 | --sidebar-border: oklch(0.922 0 0); 78 | --sidebar-ring: oklch(0.708 0 0); 79 | } 80 | 81 | .dark { 82 | --background: oklch(0.145 0 0); 83 | --foreground: oklch(0.985 0 0); 84 | --card: oklch(0.205 0 0); 85 | --card-foreground: oklch(0.985 0 0); 86 | --popover: oklch(0.205 0 0); 87 | --popover-foreground: oklch(0.985 0 0); 88 | --primary: oklch(0.922 0 0); 89 | --primary-foreground: oklch(0.205 0 0); 90 | --secondary: oklch(0.269 0 0); 91 | --secondary-foreground: oklch(0.985 0 0); 92 | --muted: oklch(0.269 0 0); 93 | --muted-foreground: oklch(0.708 0 0); 94 | --accent: oklch(0.269 0 0); 95 | --accent-foreground: oklch(0.985 0 0); 96 | --destructive: oklch(0.704 0.191 22.216); 97 | --border: oklch(1 0 0 / 10%); 98 | --input: oklch(1 0 0 / 15%); 99 | --ring: oklch(0.556 0 0); 100 | --chart-1: oklch(0.488 0.243 264.376); 101 | --chart-2: oklch(0.696 0.17 162.48); 102 | --chart-3: oklch(0.769 0.188 70.08); 103 | --chart-4: oklch(0.627 0.265 303.9); 104 | --chart-5: oklch(0.645 0.246 16.439); 105 | --sidebar: oklch(0.205 0 0); 106 | --sidebar-foreground: oklch(0.985 0 0); 107 | --sidebar-primary: oklch(0.488 0.243 264.376); 108 | --sidebar-primary-foreground: oklch(0.985 0 0); 109 | --sidebar-accent: oklch(0.269 0 0); 110 | --sidebar-accent-foreground: oklch(0.985 0 0); 111 | --sidebar-border: oklch(1 0 0 / 10%); 112 | --sidebar-ring: oklch(0.556 0 0); 113 | } 114 | 115 | @layer base { 116 | * { 117 | @apply border-border outline-ring/50; 118 | } 119 | body { 120 | @apply bg-background text-foreground; 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Origami_tutorial_generator/app.py: -------------------------------------------------------------------------------- 1 | # app.py 2 | import streamlit as st 3 | import tempfile 4 | from solver import setup_educhain, generate_origami_steps 5 | 6 | st.set_page_config(page_title="📐 PaperFold.AI", layout="centered", page_icon="🧻") 7 | 8 | st.title("📐 PaperFold.AI") 9 | st.subheader("⭐ Horizon Beta ✂️ Educhain ⭐") 10 | st.markdown("🌸 Make amazing origami just by uploading a picture! 🌸") 11 | 12 | # --- API Key --- 13 | with st.sidebar: 14 | st.markdown( 15 | "", unsafe_allow_html=True 25 | ) 26 | 27 | st.header("🔐 API Settings") 28 | api_key = st.text_input("Enter your OpenRouter API Key", type="password") 29 | st.markdown("---") 30 | st.markdown("Model: `Horizon Beta`") 31 | st.markdown("---") 32 | st.markdown(""" """, unsafe_allow_html=True) 35 | 36 | if not api_key: 37 | st.warning("Please enter your OpenRouter API key in the sidebar and press enter to continue.") 38 | 39 | # --- Image Upload Only --- 40 | uploaded_file = st.file_uploader("📷 Upload an image of your origami object", type=["jpg", "jpeg", "png"]) 41 | 42 | image_path = None 43 | if uploaded_file: 44 | st.image(uploaded_file, caption="🖼️ Your Origami", use_container_width=False, width=250) 45 | with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp: 46 | tmp.write(uploaded_file.read()) 47 | image_path = tmp.name 48 | 49 | 50 | # --- Generate Button --- 51 | if image_path and st.button("✨ Generate Origami Tutorial"): 52 | with st.spinner("🧠 Thinking..."): 53 | try: 54 | educhain_client = setup_educhain(api_key) 55 | result = generate_origami_steps(image_path, educhain_client) 56 | 57 | # Check if the result is a dictionary or a SolvedDoubt object 58 | if isinstance(result, dict): 59 | explanation = result.get("explanation", "") 60 | steps = result.get("steps", []) 61 | notes = result.get("additional_notes", "") 62 | else: 63 | explanation = getattr(result, "explanation", "") 64 | steps = getattr(result, "steps", []) 65 | notes = getattr(result, "additional_notes", "") 66 | 67 | # 📋 Steps 68 | st.markdown( 69 | """ 70 |
71 |

📋 Step-by-step Folding Guide

72 |
    73 | """, unsafe_allow_html=True 74 | ) 75 | 76 | for step in steps: 77 | cleaned_step = step.strip().replace("\n", "
    ") 78 | st.markdown( 79 | f""" 80 |
  • 81 | {cleaned_step} 82 |
  • 83 | """, unsafe_allow_html=True 84 | ) 85 | 86 | st.markdown("
", unsafe_allow_html=True) 87 | 88 | # # 📝 Additional Notes 89 | # if notes: 90 | # st.markdown( 91 | # f""" 92 | #
93 | #

📝 Keep in mind:

94 | #

{notes}

95 | #
96 | # """, unsafe_allow_html=True 97 | # ) 98 | 99 | except Exception as e: 100 | st.error(f"❌ Failed to generate tutorial: {str(e)}") -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # 🎓 Educhain Documentation 2 | 3 | Welcome to the Educhain documentation! 🚀 Educhain is a powerful Python package that leverages Generative AI to create engaging and personalized educational content. 4 | 5 | https://www.buildfastwithai.com/ 6 | 7 | ## 🚀 Quick Links 8 | 9 | | 📚 Getting Started | 🌟 Features | 🛠️ Advanced | 🤝 Community | 10 | |:----------------:|:---------:|:----------:|:-----------:| 11 | | [🔧 Installation](getting-started/installation.md) | [📝 MCQ Generation](features/mcq_generation.md) | [🎨 Custom Prompts](advanced-usage/custom-prompts.md) | [👥 Contributing](contributing.md) | 12 | | [🏃‍♂️ Quick Start](getting-started/quick-start.md) | [📊 MCQ_from_data](features/mcq_from_data.md) | [🤖 LLM Models](advanced-usage/llm-models.md) | [💬 Discord](https://discord.gg/educhain) | 13 | | [⚙️ Configuration](getting-started/configuration.md) | [📤 Export Options](features/export-options.md) | [📚 Data Sources](advanced-usage/data-sources.md) | [🌐 Website](https://educhain.in) | 14 | 15 | ## 📊 Why Educhain? 16 | 17 | Educhain consistently outperforms traditional methods in content generation speed and quality. Our AI-powered platform enables educators to create high-quality learning materials in minutes instead of hours. [Learn more about our performance](resources/case-studies.md) 18 | 19 | ## 🌟 Key Features
Open In Colab
20 | 21 | ### Content Generation 22 | - 📝 Generate Multiple Choice Questions (MCQs) with explanations 23 | - 📚 Create flashcards for effective studying 24 | - 📊 Build comprehensive lesson plans with objectives and activities 25 | - 📗 Generate study guides and educational summaries 26 | 27 | ### Technical Capabilities 28 | - 🤖 Support for various LLM models (Gemini, GPT-4, Claude, etc.) 29 | - 🌐 Multilingual content generation and preservation 30 | - 📷 Visual question generation from images 31 | - 📹 Content extraction from YouTube videos 32 | 33 | ### Integration & Export 34 | - 📁 Export to multiple formats (JSON, PDF, CSV, DOCX) 35 | - 🔗 Generate questions from URLs, PDFs, and text 36 | - 🎨 Customizable prompt templates 37 | - 🔥 Streamlit integration for building educational apps 38 | 39 | ## 🚀 Get Started in Minutes 40 | 41 | ```python 42 | from educhain import Educhain 43 | 44 | client = Educhain() 45 | questions = client.qna_engine.generate_questions( 46 | topic="Indian History", 47 | custom_instructions="Include questions about Maharana Pratap", 48 | num=5 49 | ) 50 | 51 | questions.show() 52 | ``` 53 | 54 | [🏃‍♂️ See our Quick Start guide for more](getting-started/quick-start.md) 55 | 56 | ## 📈 Educhain in Action 57 | 58 | Educators worldwide are using Educhain to transform their teaching. Check out our [success stories](resources/case-studies.md) to see how Educhain is making a difference in classrooms around the globe. 59 | 60 | ## 📚 Starter Apps 61 | 62 | Explore our ready-to-use educational applications built with Educhain: 63 | 64 | - **📚 Flashcard Generator**: Create customized flashcards on any topic with color-coded card types 65 | - **🌍 Multilingual Chatbot**: Educational assistant that supports multiple languages 66 | - **📝 Quiz Creator**: Generate interactive quizzes with explanations 67 | - **📖 Lesson Planner**: Build comprehensive lesson plans with objectives and activities 68 | 69 | Check out our [cookbook directory](/cookbook/starter-apps/) for code examples and deployment instructions. 70 | 71 | ## 💸 Roadmap 72 | 73 | We're constantly improving Educhain! Here's what's coming soon: 74 | 75 | - [x] **Flashcard Generation** to simplify learning 76 | - [x] **Multilingual Support** for global education 77 | - [ ] **Interactive Assessment Tools** for real-time feedback 78 | - [ ] **High-Accuracy Math Questions** with step-by-step solutions 79 | - [ ] **Personalized Learning Paths** based on student performance 80 | - [ ] **Try it out on our [website](https://educhain.in)** for on-the-go content creation 🚀 81 | 82 | 83 | ## 🤝 Contributing 84 | 85 | We welcome contributions from the community! Whether you're fixing bugs, adding new features, or improving documentation, your help is appreciated. 86 | 87 | [🤝 Learn how to contribute](contributing.md) 88 | 89 | ## 📬 Stay Connected 90 | 91 | - 📰 [Blog](https://blog.educhain.in) 92 | - 🐦 [Twitter](https://twitter.com/educhain_ai) 93 | - 💼 [LinkedIn](https://www.linkedin.com/company/educhain-ai) 94 | - 💬 [Discord Community](https://discord.gg/educhain) 95 | 96 | ## 📄 License 97 | 98 | Educhain is open source software [licensed as MIT](https://github.com/educhain/educhain/blob/main/LICENSE). 99 | 100 | --- 101 | 102 | Educhain Banner 103 | 104 | Made with ❤️ by Buildfastwithai 105 | 106 | [www.educhain.in](https://educhain.in) 107 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/1_🧠_Generate_Questions.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils.models import client_model 3 | client = client_model() 4 | 5 | st.set_page_config(page_title="🧠 Generate Questions", layout="wide") 6 | st.markdown("

🧠 AI-Powered Question Generator

", unsafe_allow_html=True) 7 | st.markdown("

Generate smart, quality questions instantly using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 8 | st.divider() 9 | st.subheader("📋 Topic & Options") 10 | 11 | with st.form(key="question_form"): 12 | col1, col2 = st.columns([3, 1]) 13 | with col1: 14 | topic = st.text_input("🔍 Enter Topic", placeholder="e.g., Thermodynamics") 15 | with col2: 16 | num_questions = st.slider("📊 No. of Questions", min_value=1, max_value=20, value=5) 17 | 18 | qtype = st.selectbox("❓ Question Type", [ 19 | "Multiple Choice", 20 | "True/False", 21 | "Fill in the Blank", 22 | "Short Answer" 23 | ]) 24 | instructions = st.text_area("📝 Custom Instructions (Optional)", placeholder="e.g., Focus on beginner-level concepts") 25 | 26 | submit = st.form_submit_button("🚀 Generate Questions") 27 | 28 | if submit and topic: 29 | with st.spinner("Thinking with Gemini Flash..."): 30 | try: 31 | result = client.qna_engine.generate_questions( 32 | topic=topic, 33 | num=num_questions, 34 | question_type=qtype, 35 | custom_instructions=instructions 36 | ) 37 | 38 | st.success("✅ Questions Generated Successfully!") 39 | st.markdown("---") 40 | for i, q in enumerate(result.questions, start=1): 41 | st.markdown(f"### Q{i}. {q.question}") 42 | if hasattr(q, "options") and q.options: 43 | for j, opt in enumerate(q.options): 44 | st.markdown(f"- **{chr(65+j)}.** {opt}") 45 | st.markdown(f"✅ **Correct Answer:** `{q.answer}`") 46 | elif hasattr(q, "answer"): 47 | st.markdown(f"✅ **Answer:** `{q.answer}`") 48 | if hasattr(q, "blank_word") and q.blank_word: 49 | st.caption(f"✏️ Fill in: `{q.blank_word}`") 50 | if getattr(q, "explanation", None): 51 | st.info(f"💡 {q.explanation}") 52 | st.markdown("---") 53 | 54 | except Exception as e: 55 | st.error(f"❌ Error generating questions:\n\n{e}") 56 | 57 | st.caption("Crafted with ❤️ by EduChain + Gemini Flash ✨") 58 | 59 | 60 | with st.popover("Open popover"): 61 | st.markdown(" Turn On Developer Mode? ") 62 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 63 | 64 | if Developer_Mode == True: 65 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 66 | st.page_link("https://github.com/satvik314/educhain/blob/main/cookbook/features/Generate_MCQs_from_Data_Educhain_v3.ipynb", label="GitHub", icon = "🔗") 67 | st.markdown(""" 68 | 📦 Key Initialization 69 | ----------------------------------- 70 | from educhain import Educhain, LLMConfig 71 | from langchain_google_genai import ChatGoogleGenerativeAI 72 | 73 | # Step 1: Setup the Gemini Flash LLM 74 | gemini_flash = ChatGoogleGenerativeAI( 75 | model="gemini-2.0-flash", 76 | google_api_key=GOOGLE_API_KEY 77 | ) 78 | 79 | # Step 2: Wrap the model in LLMConfig 80 | flash_config = LLMConfig(custom_model=gemini_flash) 81 | 82 | # Step 3: Create Educhain client using the model config 83 | client = Educhain(flash_config) 84 | 85 | 🧠 What Does client.qna_engine.generate_questions() Do? 86 | ---------------------------------------------------------- 87 | This is the core function responsible for generating questions from a given topic. 88 | 89 | Example: 90 | result = client.qna_engine.generate_questions( 91 | topic=topic, 92 | num=num_questions, 93 | question_type=qtype, 94 | custom_instructions=instructions 95 | ) 96 | 97 | 🔍 It likely does the following: 98 | - Builds a prompt using topic, number, type, and instructions 99 | - Calls the Gemini Flash model with the prompt 100 | - Parses and returns a structured list of question objects 101 | 102 | ✅ Sample Output: 103 | [ 104 | { 105 | question: "What is the first law of thermodynamics?", 106 | options: ["Energy cannot be created", "Energy can be destroyed", ...], 107 | answer: "A", 108 | explanation: "The first law states...", 109 | blank_word: "energy" # For fill-in-the-blank type 110 | }, 111 | ... 112 | ] 113 | 114 | 🧪 What are Custom Instructions? 115 | ------------------------------------ 116 | This is an optional input that enhances control over output. Examples: 117 | - "Beginner level" 118 | - "Only fact-based MCQs" 119 | - "Include short explanations" 120 | These get incorporated in the prompt to guide the LLM better. 121 | 122 | 🖼️ Output Rendering in Streamlit 123 | ------------------------------------- 124 | After generation, each question is displayed: 125 | - With options (for MCQ) 126 | - Answer is shown clearly 127 | - Explanation is highlighted 128 | - Fill-in-the-blank terms are captioned 129 | 130 | ❤️ Summary 131 | Educhain simplifies interaction with LLMs like Gemini Flash by: 132 | - Abstracting prompt engineering 133 | - Managing model interaction 134 | - Parsing the result into clean Q&A format 135 | 136 | It gives you a plug-and-play question generation engine, perfect for educational tools. 137 | """) 138 | -------------------------------------------------------------------------------- /cookbook/use-cases/Long_PDFs_to_Quiz.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\n", 8 | "\n", 9 | "Educhain is a powerful Python package that leverages Generative AI to create engaging and personalized educational content. From generating multiple-choice questions to crafting comprehensive lesson plans, Educhain makes it easy to apply AI in various educational scenarios." 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "id": "KzHK2RLHcw_E" 16 | }, 17 | "source": [ 18 | "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1xV9PZiEFTwTZJUtttk2bEvX6NKIGJzBd?usp=sharing)" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "Eh0egNoRdb6F" 25 | }, 26 | "source": [ 27 | "## Generate MCQs from Data using [Educhain](https://github.com/satvik314/educhain)\n", 28 | "\n", 29 | "\n", 30 | "Explore the power of AI-driven education with Educhain! This notebook demonstrates how to create high-quality Multiple Choice Questions (MCQs) from various data sources using the Educhain package. ✅\n", 31 | "\n", 32 | "Key Features:\n", 33 | "- Generate MCQs from PDF files, web pages, and plain text\n", 34 | "- Customize difficulty levels and learning objectives\n", 35 | "- Leverage advanced language models for question generation\n", 36 | "\n", 37 | "Perfect for educators, content creators, and e-learning developers looking to automate and enhance their question creation process. Dive in to revolutionize your approach to educational content generation!\n", 38 | "\n" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": { 45 | "colab": { 46 | "base_uri": "https://localhost:8080/" 47 | }, 48 | "id": "iVsx0ZrTcw08", 49 | "outputId": "d81b7976-59ae-4252-cac3-b8344974fdcb" 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "!pip install -qU educhain langchain-google-genai" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": { 59 | "id": "W1AOh8qd9wbq" 60 | }, 61 | "source": [ 62 | "### Initiating Educhain with Gemini Pro 002" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": { 69 | "id": "kkM9e1xM93hS" 70 | }, 71 | "outputs": [], 72 | "source": [ 73 | "from langchain_google_genai import ChatGoogleGenerativeAI\n", 74 | "from educhain import Educhain, LLMConfig\n", 75 | "from google.colab import userdata\n", 76 | "\n", 77 | "gemini = ChatGoogleGenerativeAI(\n", 78 | " model=\"gemini-2.0-flash\",\n", 79 | " api_key=userdata.get(\"GOOGLE_API_KEY\"))\n", 80 | "\n", 81 | "gemini_config = LLMConfig(custom_model=gemini)\n", 82 | "\n", 83 | "client = Educhain(gemini_config)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": { 89 | "id": "pLKufnaS9qIv" 90 | }, 91 | "source": [ 92 | "### Generating MCQs from a PDF" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": { 99 | "colab": { 100 | "base_uri": "https://localhost:8080/" 101 | }, 102 | "id": "zcMOtAdlAlxH", 103 | "outputId": "621b500c-9546-4dd6-a151-6152815a285f" 104 | }, 105 | "outputs": [], 106 | "source": [ 107 | "!wget https://arxiv.org/pdf/2306.05499.pdf" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": { 114 | "colab": { 115 | "base_uri": "https://localhost:8080/" 116 | }, 117 | "id": "ABbLIaYiArLb", 118 | "outputId": "7a1b29ae-d746-4dbd-c8b6-dd03a80a181a" 119 | }, 120 | "outputs": [], 121 | "source": [ 122 | "%%time\n", 123 | "mcqs_from_url = client.qna_engine.generate_questions_from_data(\n", 124 | " source=\"2306.05499.pdf\",\n", 125 | " source_type=\"pdf\",\n", 126 | " num=10\n", 127 | " )\n", 128 | "\n", 129 | "mcqs_from_url.show()" 130 | ] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "metadata": { 135 | "id": "oTbvK5QkBR2n" 136 | }, 137 | "source": [ 138 | "### It also supports URLs " 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "metadata": { 145 | "colab": { 146 | "base_uri": "https://localhost:8080/" 147 | }, 148 | "id": "qHIPmn5c9txq", 149 | "outputId": "b68bf577-7927-4e9e-e6c1-122de1871c97" 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "mcqs_from_url = client.qna_engine.generate_questions_from_data(\n", 154 | " source=\"https://en.wikipedia.org/wiki/Butterfly_effect\",\n", 155 | " source_type=\"url\",\n", 156 | " num=5\n", 157 | " )\n", 158 | "\n", 159 | "mcqs_from_url.show()" 160 | ] 161 | } 162 | ], 163 | "metadata": { 164 | "colab": { 165 | "provenance": [] 166 | }, 167 | "kernelspec": { 168 | "display_name": "Python 3", 169 | "name": "python3" 170 | }, 171 | "language_info": { 172 | "name": "python" 173 | } 174 | }, 175 | "nbformat": 4, 176 | "nbformat_minor": 0 177 | } 178 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/3_🎥_YouTube_to_Questions.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils.models import client_model 3 | client = client_model() 4 | 5 | # Title and instructions 6 | st.markdown("

🎥 YouTube to Questions

", unsafe_allow_html=True) 7 | st.markdown("

Generate smart, quality questions instantly using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 8 | st.markdown(""" 9 | Paste a YouTube video URL and generate questions from the content! 10 | Make sure the video has subtitles or clear speech for best results. 11 | """) 12 | 13 | # Input controls 14 | video_url = st.text_input("Enter YouTube Video URL") 15 | num_questions = st.slider("Number of Questions", 1, 20, 5) 16 | question_type = st.selectbox("Question Type", ["Multiple Choice", "True/False", "Fill in the Blank", "Short Answer"]) 17 | difficulty = st.selectbox("Difficulty Level", ["Beginner", "Intermediate", "Advanced"]) 18 | custom_instr = st.text_area("Custom Instructions (Optional)", "", height=68) 19 | 20 | # Function to display results 21 | def show_result(result): 22 | st.success("✅ Questions Generated!") 23 | for i, q in enumerate(result.questions, 1): 24 | st.markdown(f"### Q{i}. {q.question}") 25 | if hasattr(q, "options") and q.options: 26 | for j, opt in enumerate(q.options): 27 | st.markdown(f"- **{chr(65+j)}.** {opt}") 28 | st.markdown(f"✅ **Answer:** `{q.answer}`") 29 | elif hasattr(q, "answer"): 30 | st.markdown(f"✅ **Answer:** `{q.answer}`") 31 | if hasattr(q, "blank_word") and q.blank_word: 32 | st.caption(f"✏️ Fill in: `{q.blank_word}`") 33 | if getattr(q, "explanation", None): 34 | st.info(f"💡 {q.explanation}") 35 | st.markdown("---") 36 | 37 | # Button action 38 | if st.button("🚀 Generate from YouTube") and video_url: 39 | with st.spinner("Processing video and generating questions..."): 40 | result = client.qna_engine.generate_questions_from_youtube( 41 | url=video_url, 42 | num=num_questions, 43 | question_type=question_type, 44 | difficulty_level=difficulty, 45 | custom_instructions=custom_instr 46 | ) 47 | show_result(result) 48 | 49 | st.markdown("---") 50 | st.caption("Powered by EduChain QnA Engine · Gemini Flash ✨") 51 | 52 | with st.popover("Open popover"): 53 | st.markdown(" Turn On Developer Mode? ") 54 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 55 | 56 | if Developer_Mode == True: 57 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 58 | st.page_link("https://github.com/satvik314/educhain/blob/main/cookbook/features/Generate_questions_from_youtube.ipynb", label="GitHub", icon = "🔗") 59 | st.markdown(""" 60 | 🔧 Overview: 61 | ------------ 62 | This Streamlit app allows users to input a YouTube video link and automatically generate structured questions from its content using Gemini Flash via Educhain. 63 | 64 | It is ideal for turning educational videos, lectures, or tutorials into practice material — assuming the video has subtitles or clear speech. 65 | 66 | 📦 Initialization and Setup: 67 | ----------------------------- 68 | from educhain import Educhain, LLMConfig 69 | from langchain_google_genai import ChatGoogleGenerativeAI 70 | 71 | # Load Gemini API key from .env 72 | GOOGLE_API_KEY = os.getenv("GEMINI_KEY") 73 | 74 | # Create Gemini Flash model and wrap it 75 | gemini_flash = ChatGoogleGenerativeAI( 76 | model="gemini-2.0-flash", 77 | google_api_key=GOOGLE_API_KEY 78 | ) 79 | 80 | # Configure and initialize Educhain 81 | flash_config = LLMConfig(custom_model=gemini_flash) 82 | client = Educhain(flash_config) 83 | 84 | 📝 User Inputs: 85 | ---------------- 86 | - YouTube Video URL 87 | - Number of questions to generate 88 | - Question type (MCQ, True/False, Fill in the Blank, Short Answer) 89 | - Difficulty level (Beginner, Intermediate, Advanced) 90 | - Optional instructions for tone/style/focus 91 | 92 | 🚀 Main Function: 93 | ------------------ 94 | The core function triggered on clicking the button: 95 | 96 | client.qna_engine.generate_questions_from_youtube( 97 | url=video_url, 98 | num=num_questions, 99 | question_type=question_type, 100 | difficulty_level=difficulty, 101 | custom_instructions=custom_instr 102 | ) 103 | 104 | ✨ Internally, this likely performs: 105 | - Downloading/transcribing audio or extracting captions from the YouTube video 106 | - Summarizing or chunking the video content 107 | - Prompting Gemini Flash with structured instructions 108 | - Parsing the response to generate clear, formatted Q&A objects 109 | 110 | 📤 Display Logic: 111 | ------------------ 112 | Results are rendered using the `show_result()` function: 113 | - Displays each question in numbered format 114 | - Lists options for MCQs with the correct answer 115 | - Shows answer directly for other types 116 | - Explanation is shown in an info box if available 117 | - Fill-in-the-blank word is displayed for that type 118 | 119 | Example: 120 | 121 | ### Q1. What is the boiling point of water? 122 | - A. 50°C 123 | - B. 100°C 124 | - C. 150°C 125 | ✅ Answer: B 126 | 💡 Explanation: Water boils at 100°C under standard pressure. 127 | 128 | 🧠 Benefits: 129 | ------------ 130 | - Great for educators converting lectures to quizzes 131 | - Students can auto-generate practice material from videos 132 | - Supports multiple formats and difficulty tuning 133 | 134 | ⚠️ Tip: 135 | -------- 136 | - Videos must have clear speech or captions for best accuracy. 137 | - For noisy, silent, or music-based videos, question quality may drop. 138 | 139 | ❤️ Summary: 140 | ------------- 141 | This YouTube-powered question generation app uses: 142 | - Gemini Flash for fast LLM responses 143 | - Educhain to handle transcription, prompting, and parsing 144 | - Streamlit for a clean user interface 145 | 146 | Together, they provide a powerful way to generate assessments from video learning content. 147 | """) -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/4_🔮_Doubt Solver.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from pydantic import BaseModel, Field 3 | from typing import List, Optional 4 | 5 | from utils.models import client_model 6 | client = client_model() 7 | 8 | class SolvedDoubt(BaseModel): 9 | explanation: str 10 | steps: Optional[List[str]] = Field(default_factory=list) 11 | additional_notes: Optional[str] = None 12 | 13 | st.markdown("

🔮 Visual Doubt Solver

", unsafe_allow_html=True) 14 | st.markdown("

Generate smart, quality questions instantly using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 15 | st.markdown(""" 16 | Upload a question image or diagram and receive a detailed step-by-step explanation. 17 | You can optionally add a specific prompt to guide the response. 18 | """) 19 | 20 | image_file = st.file_uploader("Upload Image of the Doubt (JPG/PNG)", type=["jpg", "jpeg", "png"]) 21 | prompt_text = st.text_area("Add a Custom Prompt (Optional)", "Explain this image in detail.") 22 | detail_level = st.selectbox("Explanation Detail Level", ["High", "Medium", "Low"], index=0) 23 | 24 | def show_doubt_solution(result: SolvedDoubt): 25 | st.success("✅ Doubt Solved!") 26 | 27 | st.markdown("### 📄 Explanation") 28 | st.markdown(result.explanation) 29 | 30 | if result.steps: 31 | st.markdown("### 🔹 Steps:") 32 | for i, step in enumerate(result.steps, 1): 33 | st.markdown(f"**{i}.** {step}") 34 | 35 | if result.additional_notes: 36 | st.markdown("### 📄 Additional Notes") 37 | st.markdown(result.additional_notes) 38 | 39 | if st.button("🚀 Solve Doubt") and image_file: 40 | with st.spinner("Solving your visual doubt with AI..."): 41 | img_path = "temp_doubt_image.png" 42 | with open(img_path, "wb") as f: 43 | f.write(image_file.read()) 44 | 45 | raw_result = client.qna_engine.solve_doubt( 46 | image_source=img_path, 47 | prompt=prompt_text, 48 | detail_level=detail_level 49 | ) 50 | parsed_result = raw_result 51 | show_doubt_solution(parsed_result) 52 | 53 | 54 | st.markdown("---") 55 | st.caption("Powered by EduChain Doubt Solver · Gemini Flash 🌟") 56 | 57 | with st.popover("Open popover"): 58 | st.markdown(" Turn On Developer Mode? ") 59 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 60 | 61 | if Developer_Mode == True: 62 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 63 | st.markdown(""" Code Use: 64 | from educhain import Educhain 65 | 66 | client = Educhain() #Default is 4o-mini (make sure to use a multimodal LLM!) 67 | 68 | question = client.qna_engine.solve_doubt( 69 | image_source="https://i.ytimg.com/vi/OQjkFQAIOck/maxresdefault.jpg", 70 | prompt="Explain the diagram in detail", 71 | detail_level = "High" 72 | ) 73 | 74 | print(question) 75 | """) 76 | st.markdown(""" 77 | 📷 Overview: 78 | ------------- 79 | This Streamlit app allows users to upload an image (question diagram, handwritten math problem, etc.) and receive an AI-generated detailed explanation. 80 | It uses the Educhain `solve_doubt()` engine powered by Gemini Flash to interpret and respond intelligently to visual content. 81 | 82 | 📦 Initialization and Setup: 83 | ----------------------------- 84 | from educhain import Educhain, LLMConfig 85 | from langchain_google_genai import ChatGoogleGenerativeAI 86 | 87 | # Load API key 88 | GOOGLE_API_KEY = os.getenv("GEMINI_KEY") 89 | 90 | # Create Gemini Flash wrapper and config 91 | gemini_flash = ChatGoogleGenerativeAI( 92 | model="gemini-2.0-flash", 93 | google_api_key=GOOGLE_API_KEY 94 | ) 95 | 96 | # Setup Educhain client 97 | flash_config = LLMConfig(custom_model=gemini_flash) 98 | client = Educhain(flash_config) 99 | 100 | 🧠 Model Definition: 101 | --------------------- 102 | Using Pydantic for structured parsing of Educhain's `SolvedDoubt` output: 103 | 104 | class SolvedDoubt(BaseModel): 105 | explanation: str 106 | steps: Optional[List[str]] = Field(default_factory=list) 107 | additional_notes: Optional[str] = None 108 | 109 | This ensures the response from the AI is structured and easily renderable in Streamlit. 110 | 111 | 📝 User Inputs: 112 | ---------------- 113 | - Image upload: PNG, JPG, or JPEG 114 | - Custom prompt: Optional input to guide explanation (e.g., "Explain this in the context of algebra") 115 | - Detail level: High / Medium / Low, influencing how comprehensive the answer will be 116 | 117 | 🚀 Main Function Call: 118 | ----------------------- 119 | Upon clicking “Solve Doubt”, the app: 120 | 121 | 1. Saves the uploaded image temporarily. 122 | 2. Calls: 123 | client.qna_engine.solve_doubt( 124 | image_source=img_path, 125 | prompt=prompt_text, 126 | detail_level=detail_level 127 | ) 128 | 129 | This triggers: 130 | - Image understanding (possibly OCR or visual LLM parsing) 131 | - Prompt fusion (merging the image with your optional instruction) 132 | - LLM-based reasoning and response generation 133 | 134 | 📤 Output Rendering: 135 | ---------------------- 136 | The AI-generated explanation is shown in sections: 137 | 138 | - 📄 Explanation: Main concept or answer derived from the image 139 | - 🔹 Steps: If present, a breakdown of logical/mathematical steps 140 | - 📄 Additional Notes: Extra insights, tips, or warnings (optional) 141 | 142 | Example: 143 | 144 | 📄 Explanation: 145 | "This is a graph of a quadratic function with roots at x = 1 and x = 3..." 146 | 147 | 🔹 Steps: 148 | 1. Identify the function structure. 149 | 2. Note key points and curvature. 150 | 3. Solve for x-intercepts using the factorized form. 151 | 152 | 📄 Additional Notes: 153 | "The vertex lies at the midpoint of the roots: x = 2." 154 | 155 | 🧠 Benefits: 156 | ------------- 157 | - Converts visual academic content into detailed understanding 158 | - Great for solving diagrams, geometry, physics questions, etc. 159 | - Helps students get clarity without typing the entire question 160 | 161 | ❤️ Summary: 162 | ------------- 163 | The Visual Doubt Solver is powered by: 164 | - EduChain’s visual QnA engine 165 | - Gemini Flash for fast, rich language understanding 166 | - Streamlit for interactive UX 167 | 168 | It creates a seamless experience to turn images into structured, step-by-step learning. 169 | """ 170 | ) 171 | -------------------------------------------------------------------------------- /archive/models.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | #import fitz # PyMuPDF for handling PDF files 3 | import re 4 | import requests 5 | from bs4 import BeautifulSoup 6 | from typing import List, Optional, Dict, Any, Literal 7 | # from langchain_core.pydantic_v1 import BaseModel, Field 8 | from pydantic import BaseModel, Field 9 | from langchain_openai import ChatOpenAI 10 | from langchain.prompts import PromptTemplate 11 | from langchain.output_parsers import PydanticOutputParser 12 | import os 13 | import json 14 | from PyPDF2 import PdfReader 15 | 16 | 17 | # Pydantic models 18 | class BaseQuestion(BaseModel): 19 | question: str 20 | answer: str 21 | explanation: Optional[str] = None 22 | 23 | def show(self): 24 | print(f"Question: {self.question}") 25 | print(f"Answer: {self.answer}") 26 | if self.explanation: 27 | print(f"Explanation: {self.explanation}") 28 | print() 29 | 30 | class MultipleChoiceQuestion(BaseQuestion): 31 | options: List[str] 32 | 33 | def show(self): 34 | print(f"Question: {self.question}") 35 | options_str = "\n".join(f" {chr(65 + i)}. {option}" for i, option in enumerate(self.options)) 36 | print(f"Options:\n{options_str}") 37 | print(f"\nCorrect Answer: {self.answer}") 38 | if self.explanation: 39 | print(f"Explanation: {self.explanation}") 40 | print() 41 | 42 | class ShortAnswerQuestion(BaseQuestion): 43 | keywords: List[str] = Field(default_factory=list) 44 | 45 | def show(self): 46 | super().show() 47 | if self.keywords: 48 | print(f"Keywords: {', '.join(self.keywords)}") 49 | print() 50 | 51 | class TrueFalseQuestion(BaseQuestion): 52 | answer: bool 53 | 54 | def show(self): 55 | super().show() 56 | print(f"True/False: {self.answer}") 57 | print() 58 | 59 | class FillInBlankQuestion(BaseQuestion): 60 | blank_word: Optional[str] = None 61 | 62 | def show(self): 63 | super().show() 64 | print(f"Word to fill: {self.blank_word or self.answer}") 65 | print() 66 | 67 | class QuestionList(BaseModel): 68 | questions: List[BaseQuestion] 69 | 70 | def show(self): 71 | for i, question in enumerate(self.questions, 1): 72 | print(f"Question {i}:") 73 | question.show() 74 | 75 | class MCQList(QuestionList): 76 | questions: List[MultipleChoiceQuestion] 77 | 78 | class ShortAnswerQuestionList(QuestionList): 79 | questions: List[ShortAnswerQuestion] 80 | 81 | class TrueFalseQuestionList(QuestionList): 82 | questions: List[TrueFalseQuestion] 83 | 84 | class FillInBlankQuestionList(QuestionList): 85 | questions: List[FillInBlankQuestion] 86 | 87 | class MCQ(MultipleChoiceQuestion): 88 | """A class representing a multiple choice question.""" 89 | correct_answer: str 90 | 91 | def show(self): 92 | super().show() 93 | print(f"Correct Answer: {self.correct_answer}") 94 | print() 95 | 96 | class LessonPlan(BaseModel): 97 | """A class representing a lesson plan.""" 98 | topic: str 99 | objectives: List[str] 100 | introduction: str 101 | content: str 102 | assessment: str 103 | conclusion: str 104 | 105 | def show(self): 106 | print(f"Topic: {self.topic}") 107 | print("Objectives:") 108 | for objective in self.objectives: 109 | print(f"- {objective}") 110 | print(f"Introduction: {self.introduction}") 111 | print(f"Content: {self.content}") 112 | print(f"Assessment: {self.assessment}") 113 | print(f"Conclusion: {self.conclusion}\n") 114 | 115 | class QuestionPaper(BaseModel): 116 | """A class representing a question paper.""" 117 | subject: str 118 | grade_level: int 119 | num_questions: int 120 | question_types: List[str] 121 | time_limit: Optional[int] 122 | difficulty_level: Optional[str] 123 | topics: Optional[List[str]] 124 | questions: List[BaseQuestion] 125 | 126 | def show(self): 127 | print(f"Subject: {self.subject}") 128 | print(f"Grade Level: {self.grade_level}") 129 | print(f"Number of Questions: {self.num_questions}") 130 | print(f"Question Types: {', '.join(self.question_types)}") 131 | print(f"Time Limit: {self.time_limit} minutes" if self.time_limit else "No time limit") 132 | print(f"Difficulty Level: {self.difficulty_level}" if self.difficulty_level else "Not specified") 133 | print(f"Topics: {', '.join(self.topics)}" if self.topics else "Not specified") 134 | print("\nQuestions:") 135 | for i, question in enumerate(self.questions, 1): 136 | print(f"Question {i}:") 137 | question.show() 138 | 139 | 140 | # Loader classes 141 | class PdfFileLoader: 142 | def load_data(self, file_path): 143 | reader = PdfReader(file_path) 144 | all_content = [] 145 | 146 | for page in reader.pages: 147 | content = page.extract_text() 148 | content = self.clean_string(content) 149 | all_content.append(content) 150 | 151 | return " ".join(all_content) 152 | 153 | def clean_string(self, text): 154 | text = re.sub(r'\s+', ' ', text) 155 | return text.strip() 156 | 157 | class UrlLoader: 158 | def load_data(self, url): 159 | response = requests.get(url) 160 | soup = BeautifulSoup(response.content, 'html.parser') 161 | content = soup.get_text() 162 | return self.clean_string(content) 163 | 164 | def clean_string(self, text): 165 | text = re.sub(r'\s+', ' ', text) 166 | return text.strip() 167 | 168 | # Vision Doubt Solving 169 | class LLMConfig(BaseModel): 170 | model_name: str 171 | api_key_name: str 172 | max_tokens: int = 1000 173 | 174 | model_config = { 175 | 'protected_namespaces': () 176 | } 177 | 178 | class DoubtSolverConfig(BaseModel): 179 | gpt4: LLMConfig = LLMConfig(model_name="gpt-4o-mini", api_key_name="OPENAI_API_KEY") 180 | 181 | class SolvedDoubt(BaseModel): 182 | explanation: str 183 | steps: Optional[List[str]] = Field(default_factory=list) 184 | additional_notes: Optional[str] = None 185 | 186 | def show(self): 187 | print("Explanation:") 188 | print(self.explanation) 189 | print("\nSteps:") 190 | for i, step in enumerate(self.steps, 1): 191 | print(f"{i}. {step}") 192 | if self.additional_notes: 193 | print("\nAdditional Notes:") 194 | print(self.additional_notes) 195 | -------------------------------------------------------------------------------- /educhain/utils/output_formatter.py: -------------------------------------------------------------------------------- 1 | # educhain/utils/output_formatter.py 2 | 3 | from typing import Any, Optional, List, Dict 4 | import pandas as pd 5 | from reportlab.lib import colors 6 | from reportlab.lib.pagesizes import letter 7 | from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle 8 | from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle 9 | from reportlab.pdfbase import pdfmetrics 10 | from reportlab.pdfbase.ttfonts import TTFont 11 | import json 12 | from datetime import datetime 13 | 14 | class OutputFormatter: 15 | @staticmethod 16 | def _convert_to_dict_list(data: Any) -> List[Dict]: 17 | """Convert Pydantic model data to a list of dictionaries""" 18 | if hasattr(data, 'questions'): 19 | # If it's a question list model 20 | return [q.model_dump() for q in data.questions] 21 | elif isinstance(data, list): 22 | # If it's already a list 23 | return [item.model_dump() if hasattr(item, 'model_dump') else item for item in data] 24 | else: 25 | # Single item 26 | return [data.model_dump() if hasattr(data, 'model_dump') else data] 27 | 28 | @staticmethod 29 | def to_csv(data: Any, filename: Optional[str] = None) -> str: 30 | """Convert data to CSV format""" 31 | if filename is None: 32 | filename = f"questions_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv" 33 | 34 | dict_list = OutputFormatter._convert_to_dict_list(data) 35 | df = pd.DataFrame(dict_list) 36 | 37 | # Handle nested structures (like options in MCQs) 38 | for col in df.columns: 39 | if isinstance(df[col].iloc[0], (list, dict)): 40 | df[col] = df[col].apply(json.dumps) 41 | 42 | df.to_csv(filename, index=False) 43 | return filename 44 | 45 | @staticmethod 46 | def _format_question(question: Dict, styles: Dict) -> List: 47 | """Format a single question for PDF output""" 48 | elements = [] 49 | 50 | # Question number and text 51 | question_text = Paragraph(f"Q{question.get('id', '')}: {question.get('question', '')}", 52 | styles['Question']) 53 | elements.append(question_text) 54 | elements.append(Spacer(1, 12)) 55 | 56 | # Options (if present) 57 | if 'options' in question: 58 | options = question['options'] 59 | if isinstance(options, str): 60 | try: 61 | options = json.loads(options) 62 | except: 63 | options = [options] 64 | 65 | for i, opt in enumerate(options): 66 | if isinstance(opt, dict): 67 | opt_text = opt.get('text', '') 68 | is_correct = opt.get('correct', 'false') == 'true' 69 | else: 70 | opt_text = str(opt) 71 | is_correct = False 72 | 73 | option_style = styles['CorrectOption'] if is_correct else styles['Option'] 74 | option_text = Paragraph(f"{chr(65+i)}. {opt_text}", option_style) 75 | elements.append(option_text) 76 | elements.append(Spacer(1, 6)) 77 | 78 | # Correct Answer (if not MCQ) 79 | if 'answer' in question: 80 | answer_text = Paragraph(f"Correct Answer: {question['answer']}", 81 | styles['CorrectAnswer']) 82 | elements.append(answer_text) 83 | elements.append(Spacer(1, 6)) 84 | 85 | # Explanation 86 | if question.get('explanation'): 87 | explanation_text = Paragraph(f"Explanation: {question['explanation']}", 88 | styles['Explanation']) 89 | elements.append(explanation_text) 90 | 91 | elements.append(Spacer(1, 20)) 92 | return elements 93 | 94 | @staticmethod 95 | def to_pdf(data: Any, filename: Optional[str] = None) -> str: 96 | """Convert data to PDF format using ReportLab""" 97 | if filename is None: 98 | filename = f"questions_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf" 99 | 100 | # Create the PDF document 101 | doc = SimpleDocTemplate( 102 | filename, 103 | pagesize=letter, 104 | rightMargin=72, 105 | leftMargin=72, 106 | topMargin=72, 107 | bottomMargin=72 108 | ) 109 | 110 | # Define styles 111 | styles = getSampleStyleSheet() 112 | styles.add(ParagraphStyle( 113 | name='Question', 114 | parent=styles['Normal'], 115 | fontSize=12, 116 | spaceAfter=6, 117 | fontName='Helvetica-Bold' 118 | )) 119 | styles.add(ParagraphStyle( 120 | name='Option', 121 | parent=styles['Normal'], 122 | fontSize=11, 123 | leftIndent=20, 124 | fontName='Helvetica' 125 | )) 126 | styles.add(ParagraphStyle( 127 | name='CorrectOption', 128 | parent=styles['Normal'], 129 | fontSize=11, 130 | leftIndent=20, 131 | textColor=colors.green, 132 | fontName='Helvetica-Bold' 133 | )) 134 | styles.add(ParagraphStyle( 135 | name='CorrectAnswer', 136 | parent=styles['Normal'], 137 | fontSize=11, 138 | textColor=colors.green, 139 | fontName='Helvetica-Bold' 140 | )) 141 | styles.add(ParagraphStyle( 142 | name='Explanation', 143 | parent=styles['Normal'], 144 | fontSize=10, 145 | leftIndent=20, 146 | textColor=colors.gray, 147 | fontName='Helvetica-Oblique' 148 | )) 149 | 150 | # Build the document content 151 | elements = [] 152 | 153 | # Add title 154 | title = Paragraph("Generated Questions", styles['Title']) 155 | elements.append(title) 156 | elements.append(Spacer(1, 30)) 157 | 158 | # Process questions 159 | dict_list = OutputFormatter._convert_to_dict_list(data) 160 | for i, question in enumerate(dict_list, 1): 161 | question['id'] = i 162 | elements.extend(OutputFormatter._format_question(question, styles)) 163 | 164 | # Generate the PDF 165 | doc.build(elements) 166 | return filename -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/7_PYQ to Pre Tool.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from PyPDF2 import PdfReader 3 | from utils.models import client_model 4 | client = client_model() 5 | 6 | st.set_page_config(page_title="📘 PYQ-to-Prep", layout="wide") 7 | st.markdown("

📘 PYQ-to-Prep: Smarter Practice from Past Papers

", unsafe_allow_html=True) 8 | st.markdown("

Upload PYQs or Type Your Own to Auto-Generate Practice Questions with AI ⚡

", unsafe_allow_html=True) 9 | st.divider() 10 | 11 | st.subheader("🎛️ Select Your Input Mode") 12 | mode = st.radio("Input Method", ["Upload PYQ PDF", "Paste Text Content", "Mock Practice (AI-Generated)"]) 13 | 14 | def clear_quiz_session_state(): 15 | for key in list(st.session_state.keys()): 16 | if key.startswith("q") and isinstance(st.session_state[key], str): 17 | del st.session_state[key] 18 | 19 | def display_questions(result): 20 | st.success("🎯 Smart Questions Ready!") 21 | score = 0 22 | 23 | for i, q in enumerate(result.questions, 1): 24 | st.markdown(f"### Q{i}. {q.question}") 25 | answer_key = f"q{i}" 26 | 27 | if hasattr(q, "options") and q.options: 28 | selected = st.radio("Choose an answer:", q.options, key=answer_key) 29 | correct_ans = q.answer 30 | if selected == correct_ans: 31 | score += 1 32 | else: 33 | st.markdown(f"✅ **Answer:** `{q.answer}`") 34 | 35 | if getattr(q, "explanation", None): 36 | st.info(f"💡 {q.explanation}") 37 | st.markdown("---") 38 | 39 | if result.questions: 40 | st.success(f"Your Score: {score}/{len(result.questions)}") 41 | 42 | if mode == "Upload PYQ PDF": 43 | uploaded_file = st.file_uploader("📄 Upload PYQ PDF File", type=["pdf"]) 44 | doubt = st.text_input("❓ Got Doubt on Specific Portion? Mention It Here (Optional)") 45 | num_q = st.slider("🔢 Number of Practice Questions", 5, 30, 10) 46 | 47 | if st.button("⚡ Generate from PDF") and uploaded_file: 48 | with st.spinner("Reading your PYQ and preparing questions..."): 49 | clear_quiz_session_state() 50 | reader = PdfReader(uploaded_file) 51 | text = " ".join([page.extract_text() or "" for page in reader.pages]) 52 | text = " ".join(text.split()) 53 | 54 | prompt = doubt if doubt else "Generate diverse questions from this PYQ" 55 | result = client.qna_engine.generate_questions_from_data( 56 | source=text, 57 | source_type="text", 58 | num=num_q, 59 | custom_instructions="Generate a mix of MCQs, True/False, Short and Long Answer questions based on this content. Add Bloom's taxonomy & difficulty levels where relevant." 60 | ) 61 | st.session_state["pdf_result"] = result 62 | clear_quiz_session_state() 63 | 64 | if "pdf_result" in st.session_state and mode == "Upload PYQ PDF": 65 | display_questions(st.session_state["pdf_result"]) 66 | 67 | elif mode == "Paste Text Content": 68 | user_text = st.text_area("📝 Paste Your PYQ Text Here", height=300) 69 | doubt = st.text_input("❓ Any Specific Doubt to Focus On? (Optional)") 70 | num_q = st.slider("🔢 Number of Practice Questions", 5, 30, 10) 71 | 72 | if st.button("📘 Generate from Text") and user_text.strip(): 73 | with st.spinner("Analyzing text and building questions..."): 74 | clear_quiz_session_state() 75 | prompt = doubt if doubt else "Create useful questions from this content" 76 | result = client.qna_engine.generate_questions_from_data( 77 | source=user_text, 78 | source_type="text", 79 | num=num_q, 80 | custom_instructions="Generate a mix of MCQs, True/False, Short and Long Answer questions based on this content.", 81 | ) 82 | st.session_state["text_result"] = result 83 | clear_quiz_session_state() 84 | 85 | if "text_result" in st.session_state and mode == "Paste Text Content": 86 | display_questions(st.session_state["text_result"]) 87 | 88 | 89 | elif mode == "Mock Practice (AI-Generated)": 90 | exam_type = st.selectbox("🎯 Choose Mock Exam Style", ["NEET", "JEE", "Class 10", "Class 12"]) 91 | subject = st.text_input("📘 Enter Subject", placeholder="e.g., Biology") 92 | topic = st.text_input("📚 Optional Topic", placeholder="e.g., Genetics") 93 | num_q = st.slider("🎯 Number of Mock Questions", 5, 30, 10) 94 | 95 | if st.button("🎲 Generate Mock PYQs") and subject: 96 | with st.spinner("Generating fresh questions with Gemini..."): 97 | topic_query = f"{exam_type} {subject} {topic}" 98 | result = client.qna_engine.generate_questions( 99 | topic=topic_query, 100 | num=num_q, 101 | custom_instructions="Generate diverse PYQ-style MCQ, TF, Short and Long answer questions with explanations, Bloom's levels, and difficulty rating." 102 | ) 103 | st.session_state["mock_result"] = result 104 | clear_quiz_session_state() 105 | 106 | if "mock_result" in st.session_state and mode == "Mock Practice (AI-Generated)": 107 | display_questions(st.session_state["mock_result"]) 108 | 109 | st.divider() 110 | st.subheader("🧠 Doubt Solver") 111 | doubt_img = st.file_uploader("📷 Upload Image of Your Doubt", type=["jpg", "jpeg", "png"]) 112 | doubt_prompt = st.text_input("📝 Enter Specific Doubt Prompt (Optional)", placeholder="Explain this diagram in detail") 113 | 114 | if st.button("🤖 Solve Doubt") and doubt_img: 115 | with st.spinner("Analyzing your doubt..."): 116 | img_path = "temp_doubt.png" 117 | with open(img_path, "wb") as f: 118 | f.write(doubt_img.read()) 119 | 120 | explanation = client.qna_engine.solve_doubt( 121 | image_source=img_path, 122 | prompt=doubt_prompt or "Explain this image in detail", 123 | detail_level="High" 124 | ) 125 | 126 | st.success("✅ Doubt Solved!") 127 | st.markdown(f"**Explanation:**\n{explanation.explanation}") 128 | if explanation.steps: 129 | st.markdown("**Steps:**") 130 | for i, step in enumerate(explanation.steps, 1): 131 | st.markdown(f"{i}. {step}") 132 | if explanation.additional_notes: 133 | st.markdown(f"**Additional Notes:**\n{explanation.additional_notes}") 134 | 135 | st.caption("✨ PYQ-to-Prep powered by EduChain + Gemini Flash · With Interactive Quizzes & Doubt Solver") 136 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Jee_problem_solver_and_analyzer/app.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | import streamlit as st 3 | from educhain import Educhain, LLMConfig 4 | from PIL import Image 5 | import os 6 | 7 | # Initialize Educhain client 8 | def initialize_educhain(api_key): 9 | openai_model = ChatOpenAI( 10 | model_name="gpt-5", # Use GPT-5 model 11 | openai_api_key=api_key, 12 | temperature=1 # Adjust temperature if needed 13 | ) 14 | openai_config = LLMConfig(custom_model=openai_model) 15 | client = Educhain(openai_config) 16 | return client 17 | 18 | # Main Streamlit app 19 | def main(): 20 | st.set_page_config(page_title="JEE GPT-5 Solver", layout="wide") 21 | st.title("📚 JEE Advanced Problem Solver and Analyzer") 22 | st.subheader("⭐ GPT-5 X Educhain ⭐") 23 | 24 | # Sidebar 25 | with st.sidebar: 26 | st.markdown( 27 | "", unsafe_allow_html=True 37 | ) 38 | 39 | st.header("🔐 API Settings") 40 | api_key = st.text_input("Enter your OpenAI API Key", type="password") 41 | st.markdown("---") 42 | st.markdown("⭐ Model: `GPT-5`") 43 | st.markdown("---") 44 | st.markdown(""" """, unsafe_allow_html=True) 47 | 48 | if not api_key: 49 | st.warning("Please enter your OpenAI API Key in the sidebar.") 50 | st.stop() 51 | 52 | # Initialize Educhain client 53 | client = initialize_educhain(api_key) 54 | 55 | # File upload section 56 | st.header("📷 Upload JEE Advanced Problem Image") 57 | uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) 58 | 59 | if uploaded_file is not None: 60 | # Save the uploaded image to a temporary file 61 | temp_image_path = "temp_image.jpg" 62 | with open(temp_image_path, "wb") as f: 63 | f.write(uploaded_file.getvalue()) 64 | 65 | image = Image.open(temp_image_path) 66 | st.image(image, caption="Problem Image", use_container_width=True) 67 | 68 | if st.button("Analyze Problem"): 69 | with st.spinner("Analyzing the problem..."): 70 | try: 71 | # Step 1: Extract topics 72 | topics_response = client.qna_engine.solve_doubt( 73 | image_source=temp_image_path, 74 | prompt="List all the topics used in this JEE Advanced problem.", 75 | detail_level="High" 76 | ) 77 | raw_topics = getattr(topics_response, 'explanation', "No topics found.") 78 | st.subheader("📚 Topics Involved") 79 | 80 | # Clean topic output into bullet list 81 | topic_lines = [t.strip("-•\n ") for t in raw_topics.splitlines() if t.strip()] 82 | if topic_lines: 83 | for topic in topic_lines: 84 | st.markdown(f"- {topic}") 85 | else: 86 | st.markdown("No topics found.") 87 | 88 | # Step 2: Generate full solution 89 | solution_response = client.qna_engine.solve_doubt( 90 | image_source=temp_image_path, 91 | prompt="Provide a detailed solution for this JEE Advanced problem.", 92 | detail_level="High" 93 | ) 94 | solution = getattr(solution_response, 'explanation', "No solution found.") 95 | steps = getattr(solution_response, 'steps', []) 96 | notes = getattr(solution_response, 'additional_notes', "No additional notes.") 97 | st.markdown("---") 98 | st.subheader("🧠 Detailed Solution") 99 | st.markdown(solution) 100 | 101 | st.markdown("#### 📌 Step-by-Step Breakdown") 102 | if steps: 103 | for i, step in enumerate(steps, start=1): 104 | st.markdown(f"**Step {i}:** {step}") 105 | else: 106 | st.markdown("_No individual steps found._") 107 | 108 | st.markdown("#### 🗒️ Additional Notes") 109 | st.markdown(notes) 110 | 111 | # Step 3: Generate practice questions 112 | practice_questions_response = client.qna_engine.generate_questions( 113 | topic=raw_topics, 114 | num=5, 115 | question_type="Multiple Choice", 116 | custom_instructions="Generate practice questions based on the same concept." 117 | ) 118 | questions = getattr(practice_questions_response, 'questions', []) 119 | st.markdown("---") 120 | st.subheader("📝 Practice Questions") 121 | for idx, question in enumerate(questions, start=1): 122 | st.markdown(f"**Question {idx}:** {question.question}") 123 | 124 | st.markdown("**Options:**") 125 | for option in question.options: 126 | st.markdown(f"- {option}") 127 | 128 | st.markdown(f"**Answer:** {question.answer}") 129 | st.markdown("**Explanation:**") 130 | st.markdown(question.explanation) 131 | st.markdown("---") 132 | 133 | except Exception as e: 134 | st.error(f"❌ An error occurred:\n\n{e}") 135 | 136 | # Clean up temporary image 137 | os.remove(temp_image_path) 138 | 139 | 140 | if __name__ == "__main__": 141 | main() 142 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Educhain_pedagogy/frontend/src/components/ParamForm.jsx: -------------------------------------------------------------------------------- 1 | import { useState, useEffect } from "react"; 2 | 3 | export default function ParamForm({ paramsDef, onSubmit, isSubmitting = false, pedagogy }) { 4 | const [formData, setFormData] = useState({}); 5 | 6 | // Set default values for all pedagogies 7 | useEffect(() => { 8 | const defaultValues = getDefaultValues(pedagogy); 9 | setFormData(defaultValues); 10 | }, [pedagogy]); 11 | 12 | const getDefaultValues = (pedagogyName) => { 13 | const defaults = { 14 | blooms_taxonomy: { 15 | grade_level: "High School", 16 | target_level: "Intermediate" 17 | }, 18 | socratic_questioning: { 19 | depth_level: "Intermediate", 20 | student_level: "High School" 21 | }, 22 | project_based_learning: { 23 | project_duration: "4-6 weeks", 24 | team_size: "3-4 students", 25 | industry_focus: "General" 26 | }, 27 | flipped_classroom: { 28 | class_duration: "50 minutes", 29 | prep_time: "30-45 minutes", 30 | technology_level: "Moderate" 31 | }, 32 | inquiry_based_learning: { 33 | inquiry_type: "Guided", 34 | investigation_scope: "Moderate", 35 | student_autonomy: "Balanced" 36 | }, 37 | constructivist: { 38 | prior_knowledge_level: "Mixed", 39 | social_interaction_focus: "High", 40 | reflection_emphasis: "Strong" 41 | }, 42 | gamification: { 43 | game_mechanics: "Points, badges, levels", 44 | competition_level: "Moderate", 45 | technology_platform: "Web-based" 46 | }, 47 | peer_learning: { 48 | group_size: "3-4 students", 49 | collaboration_type: "Mixed", 50 | skill_diversity: "Moderate" 51 | } 52 | }; 53 | return defaults[pedagogyName] || {}; 54 | }; 55 | 56 | const handleChange = (key, value) => { 57 | setFormData({ ...formData, [key]: value }); 58 | }; 59 | 60 | const humanize = (text) => 61 | String(text) 62 | .replace(/_/g, " ") 63 | .replace(/\b\w/g, (m) => m.toUpperCase()); 64 | 65 | const getInputType = (key, pedagogyName) => { 66 | // All parameters will be dropdowns for better UX 67 | return "select"; 68 | }; 69 | 70 | const getOptions = (key, pedagogyName) => { 71 | const options = { 72 | // Blooms Taxonomy 73 | grade_level: ["Elementary", "Middle School", "High School", "College", "University"], 74 | target_level: ["Beginner", "Intermediate", "Advanced", "Expert"], 75 | 76 | // Socratic Questioning 77 | depth_level: ["Basic", "Intermediate", "Advanced", "Expert"], 78 | student_level: ["Elementary", "Middle School", "High School", "College", "University"], 79 | 80 | // Project Based Learning 81 | project_duration: ["1-2 weeks", "2-4 weeks", "4-6 weeks", "6-8 weeks", "8+ weeks"], 82 | team_size: ["Individual", "2 students", "3-4 students", "5-6 students", "7+ students"], 83 | industry_focus: ["General", "Technology", "Healthcare", "Education", "Business", "Arts", "Science", "Engineering"], 84 | 85 | // Flipped Classroom 86 | class_duration: ["30 minutes", "45 minutes", "50 minutes", "60 minutes", "90 minutes", "120 minutes"], 87 | prep_time: ["15-20 minutes", "20-30 minutes", "30-45 minutes", "45-60 minutes", "60+ minutes"], 88 | technology_level: ["Basic", "Moderate", "Advanced", "Expert"], 89 | 90 | // Inquiry Based Learning 91 | inquiry_type: ["Structured", "Guided", "Open", "Free"], 92 | investigation_scope: ["Limited", "Moderate", "Extensive", "Comprehensive"], 93 | student_autonomy: ["Low", "Balanced", "High", "Complete"], 94 | 95 | // Constructivist 96 | prior_knowledge_level: ["None", "Basic", "Mixed", "Advanced", "Expert"], 97 | social_interaction_focus: ["Low", "Medium", "High", "Essential"], 98 | reflection_emphasis: ["Minimal", "Moderate", "Strong", "Critical"], 99 | 100 | // Gamification 101 | game_mechanics: ["Points, badges, levels", "Leaderboards", "Achievements", "Quests", "Story-based", "Competition", "Collaboration"], 102 | competition_level: ["None", "Low", "Moderate", "High", "Intense"], 103 | technology_platform: ["Web-based", "Mobile app", "Desktop software", "Mixed reality", "Board games", "Hybrid"], 104 | 105 | // Peer Learning 106 | group_size: ["2 students", "3-4 students", "5-6 students", "7-8 students", "9+ students"], 107 | collaboration_type: ["Individual", "Pairs", "Small groups", "Large groups", "Mixed"], 108 | skill_diversity: ["Low", "Moderate", "High", "Mixed", "Random"] 109 | }; 110 | 111 | return options[key] || ["Option 1", "Option 2", "Option 3"]; 112 | }; 113 | 114 | // For all pedagogies, show the required parameters with dropdowns 115 | const getParametersToShow = () => { 116 | if (pedagogy && paramsDef) { 117 | return paramsDef; 118 | } 119 | // Fallback if no paramsDef provided 120 | return getDefaultValues(pedagogy); 121 | }; 122 | 123 | const parametersToShow = getParametersToShow(); 124 | 125 | return ( 126 |
{ 128 | e.preventDefault(); 129 | // Ensure we always send the default values for the specific pedagogy 130 | const finalData = { ...getDefaultValues(pedagogy), ...formData }; 131 | onSubmit(finalData); 132 | }} 133 | className="space-y-6" 134 | > 135 | {Object.entries(parametersToShow).map(([key, desc]) => ( 136 |
137 | 140 | 153 | {desc && ( 154 |

{desc}

155 | )} 156 |
157 | ))} 158 | 159 | 170 |
171 | ); 172 | } 173 | -------------------------------------------------------------------------------- /cookbook/starter-apps/Consultancy-Prep/c-app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from educhain import Educhain, LLMConfig 3 | from langchain_google_genai import ChatGoogleGenerativeAI 4 | 5 | # ------------------------- Gemini Model Initialization ------------------------- 6 | gemini_flash = ChatGoogleGenerativeAI( 7 | model="gemini-1.5-flash", 8 | google_api_key = st.secrets["GOOGLE_API_KEY"] # Replace with your actual Google API Key 9 | ) 10 | 11 | Gemini_config = LLMConfig(custom_model=gemini_flash) 12 | client = Educhain(Gemini_config) # Initialize Educhain with Gemini config 13 | 14 | # ------------------------- Framework Generator using Gemini ------------------------- 15 | def generate_framework_with_gemini(prompt): 16 | """ 17 | Generates a structured consulting framework for the provided prompt using Gemini. 18 | """ 19 | query = f""" 20 | You are a management consulting expert. Based on the following business case or problem, suggest a structured framework that can be used to approach and solve the case: 21 | 22 | {prompt} 23 | 24 | Provide the framework in clear bullet points. 25 | """ 26 | response = gemini_flash.invoke(query) 27 | return response.content # Extract text content from Gemini response 28 | 29 | # ------------------------- Guesstimate Generator using Gemini ------------------------- 30 | def generate_guesstimate_with_gemini(prompt): 31 | """ 32 | Generates a consulting guesstimate problem and approach using Gemini. 33 | """ 34 | query = f""" 35 | You are helping a candidate prepare for consulting interviews. Generate a guesstimate problem based on this prompt: 36 | 37 | {prompt} 38 | 39 | Also, provide a structured approach to solve this guesstimate. 40 | """ 41 | response = gemini_flash.invoke(query) 42 | return response.content # Extract text content from Gemini response 43 | 44 | # ------------------------- Streamlit App Interface ------------------------- 45 | st.title("🧩 Consulting Interview Prep App") 46 | st.write("Generate practice questions, guesstimates, and frameworks for management consultancy interviews.") 47 | 48 | # User selects input type and difficulty level 49 | input_type = st.selectbox("Choose Input Type", ["Manual Prompt", "Upload PDF File", "Website URL"]) 50 | difficulty_type = st.selectbox("Choose Difficulty Level", ["Beginner", "Intermediate", "Advanced"]) 51 | 52 | user_prompt = None 53 | input_source_type = None # To keep track of source type for Educhain 54 | 55 | # ------------------------- Input Handling ------------------------- 56 | if input_type == "Manual Prompt": 57 | user_prompt = st.text_area("Enter your case prompt:", "Profitability case for an e-commerce company") 58 | if user_prompt and len(user_prompt.strip()) < 10: 59 | st.warning("Please provide a more detailed prompt (at least 10 characters).") 60 | user_prompt = None 61 | 62 | elif input_type == "Upload PDF File": 63 | uploaded_file = st.file_uploader("Upload a PDF Casebook:", type="pdf") 64 | if uploaded_file: 65 | if uploaded_file.size > 10 * 1024 * 1024: # 10MB limit 66 | st.error("File size too large. Please upload a file smaller than 10MB.") 67 | uploaded_file = None 68 | else: 69 | user_prompt = uploaded_file # File object 70 | input_source_type = "pdf" 71 | 72 | elif input_type == "Website URL": 73 | url = st.text_input("Enter Website URL to extract cases:") 74 | if url: 75 | import re 76 | url_pattern = re.compile(r'^https?://.+') 77 | if not url_pattern.match(url): 78 | st.error("Please enter a valid URL starting with http:// or https://") 79 | url = None 80 | else: 81 | user_prompt = url # URL string 82 | input_source_type = "url" 83 | 84 | # ------------------------- Content Generation Trigger ------------------------- 85 | if st.button("Generate Interview Prep Content"): 86 | if user_prompt: 87 | with st.spinner('Generating content...'): 88 | 89 | # MCQ Generation: Manual Prompt vs File/URL 90 | if input_type == "Manual Prompt": 91 | mcq_list = client.qna_engine.generate_questions( 92 | topic=user_prompt, 93 | num=3, 94 | difficulty_level=difficulty_type, 95 | question_type="Multiple Choice" 96 | ) 97 | questions = mcq_list.questions 98 | else: 99 | mcq_list = client.qna_engine.generate_questions_from_data( 100 | source=user_prompt, 101 | source_type=input_source_type, 102 | num=3, 103 | question_type="Multiple Choice", 104 | difficulty_level=difficulty_type, 105 | custom_instructions="Generate consulting related MCQs" 106 | ) 107 | questions = mcq_list.questions 108 | 109 | # Framework & Guesstimate Generation using Gemini 110 | if input_type == "Manual Prompt": 111 | framework_prompt = user_prompt 112 | guesstimate_prompt = user_prompt 113 | else: 114 | framework_prompt = f"Based on the uploaded {'PDF' if input_type == 'Upload PDF File' else 'website'} content, identify a business case and create a structured consulting framework to approach it." 115 | guesstimate_prompt = f"Based on the uploaded {'PDF' if input_type == 'Upload PDF File' else 'website'} content, create a relevant guesstimate problem that would be appropriate for a consulting interview." 116 | 117 | framework = generate_framework_with_gemini( 118 | framework_prompt 119 | ) 120 | guesstimate = generate_guesstimate_with_gemini( 121 | guesstimate_prompt 122 | ) 123 | 124 | # ------------------------- Display Generated Content ------------------------- 125 | st.subheader("🔍 Multiple Choice Questions (MCQs)") 126 | for idx, q in enumerate(questions, 1): 127 | st.write(f"{idx}. {q.question}") 128 | for opt_idx, opt in enumerate(q.options, 1): 129 | # Handle both string options and object options 130 | if isinstance(opt, str): 131 | st.write(f" - {chr(64+opt_idx)}. {opt}") 132 | else: 133 | st.write(f" {chr(64+opt_idx)}. {opt.text}") # fallback for Option object 134 | 135 | # Display correct answer if available 136 | if hasattr(q, 'answer') and q.answer: 137 | with st.expander("Show Answer"): 138 | st.write(f"**Correct Answer:** {q.answer}") 139 | if hasattr(q, 'explanation') and q.explanation: 140 | st.write(f"**Explanation:** {q.explanation}") 141 | st.write("---") 142 | 143 | st.subheader("📝 Suggested Framework") 144 | st.write(framework) 145 | 146 | st.subheader("📊 Guesstimate Problem") 147 | st.write(guesstimate) 148 | else: 149 | st.warning("Please provide valid input to generate content.") 150 | else: 151 | st.info("Provide input and click 'Generate Interview Prep Content' to start.") 152 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/5_📝_Lesson Plan.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from pydantic import BaseModel, Field 3 | from typing import List, Optional 4 | from fpdf import FPDF 5 | from utils.models import client_model 6 | 7 | client = client_model() 8 | 9 | class MainTopic(BaseModel): 10 | title: str 11 | description: str 12 | activities: List[str] 13 | 14 | class LessonPlan(BaseModel): 15 | title: str = Field(..., description="The overall title of the lesson plan.") 16 | subject: str = Field(..., description="The subject area of the lesson.") 17 | learning_objectives: List[str] = Field(..., description="Learning objectives.") 18 | lesson_introduction: str = Field(..., description="Introduction to the topic.") 19 | main_topics: List[MainTopic] = Field(..., description="Topics and activities.") 20 | learning_adaptations: Optional[str] = None 21 | real_world_applications: Optional[str] = None 22 | ethical_considerations: Optional[str] = None 23 | 24 | def show(self): 25 | st.markdown(f"## 📘 {self.title}") 26 | st.markdown(f"**Subject:** {self.subject}") 27 | 28 | st.markdown("### 🎯 Learning Objectives") 29 | for obj in self.learning_objectives: 30 | st.markdown(f"- {obj}") 31 | 32 | st.markdown("### 🧠 Introduction") 33 | st.markdown(self.lesson_introduction) 34 | 35 | st.markdown("### 📚 Main Topics") 36 | for idx, topic in enumerate(self.main_topics, 1): 37 | st.markdown(f"#### {idx}. {topic.title}") 38 | st.markdown(topic.description) 39 | st.markdown("**Activities:**") 40 | for act in topic.activities: 41 | st.markdown(f"- {act}") 42 | 43 | if self.learning_adaptations: 44 | st.markdown("### 🔄 Learning Adaptations") 45 | st.markdown(self.learning_adaptations) 46 | 47 | if self.real_world_applications: 48 | st.markdown("### 🌐 Real-World Applications") 49 | st.markdown(self.real_world_applications) 50 | 51 | if self.ethical_considerations: 52 | st.markdown("### ⚖️ Ethical Considerations") 53 | st.markdown(self.ethical_considerations) 54 | 55 | def to_pdf(self, path="lesson_plan.pdf", watermark: bool = False): 56 | pdf = FPDF() 57 | pdf.add_page() 58 | pdf.set_font("Arial", size=12) 59 | pdf.set_auto_page_break(auto=True, margin=15) 60 | 61 | def write(text): 62 | pdf.multi_cell(0, 10, text) 63 | 64 | write(f"Lesson Plan - {self.title}") 65 | write(f"\nSubject: {self.subject}\n") 66 | write("Objectives:") 67 | for obj in self.learning_objectives: 68 | write(f"- {obj}") 69 | 70 | write("\nIntroduction:\n" + self.lesson_introduction) 71 | 72 | for idx, topic in enumerate(self.main_topics, 1): 73 | write(f"\n{idx}. {topic.title}\n{topic.description}") 74 | write("Activities:") 75 | for act in topic.activities: 76 | write(f"- {act}") 77 | 78 | if self.learning_adaptations: 79 | write("\nLearning Adaptations:\n" + self.learning_adaptations) 80 | 81 | if self.real_world_applications: 82 | write("\nReal-World Applications:\n" + self.real_world_applications) 83 | 84 | if self.ethical_considerations: 85 | write("\nEthical Considerations:\n" + self.ethical_considerations) 86 | 87 | if watermark: 88 | pdf.set_text_color(150, 150, 150) 89 | pdf.set_xy(60, 270) 90 | pdf.set_font("Arial", size=10, style="I") 91 | pdf.cell(0, 10, "EduChain · AI-Powered Learning", align="C") 92 | 93 | pdf.output(path) 94 | return path 95 | 96 | st.markdown("

📘 AI-Powered Lesson Plan Generator

", unsafe_allow_html=True) 97 | st.markdown("

Generate complete academic lesson plans using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 98 | lesson_topic = st.text_input("🔎 Enter a Topic for the Lesson Plan") 99 | add_watermark = st.checkbox("Add Educhain Watermark to PDF", value=True) 100 | 101 | if st.button("📖 Generate Lesson Plan") and lesson_topic: 102 | with st.spinner("Generating your lesson plan..."): 103 | try: 104 | result = client.content_engine.generate_lesson_plan( 105 | topic=lesson_topic, 106 | response_model = LessonPlan 107 | ) 108 | result.show() 109 | 110 | pdf_path = result.to_pdf(watermark=add_watermark) 111 | with open(pdf_path, "rb") as f: 112 | st.download_button("📥 Download Lesson Plan as PDF", f, file_name="lesson_plan.pdf", mime="application/pdf") 113 | 114 | except Exception as e: 115 | st.error("❌ Failed to parse the lesson plan. The topic might be too short or malformed.") 116 | st.exception(e) 117 | 118 | st.markdown("---") 119 | st.caption("Built with ❤️ using EduChain · Gemini Flash 🌟") 120 | 121 | 122 | with st.popover("Open popover"): 123 | st.markdown(" Turn On Developer Mode? ") 124 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 125 | 126 | if Developer_Mode == True: 127 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 128 | st.page_link("https://github.com/satvik314/educhain/blob/main/cookbook/features/educhain_generate_lesson_plan.ipynb", label="GitHub", icon = "🔗") 129 | st.markdown(""" 130 | 🧠 Overview: 131 | ------------- 132 | This Streamlit app allows educators to **automatically generate full lesson plans** using a single topic input. It utilizes EduChain’s content engine with Gemini Flash for generating structured educational plans including: 133 | 134 | - Objectives 135 | - Introduction 136 | - Main content 137 | - Assessment ideas 138 | - Conclusion 139 | 140 | You can also **download the plan as a PDF**, optionally branded with a watermark. 141 | 142 | 📦 Setup and Configuration: 143 | ----------------------------- 144 | from educhain import Educhain, LLMConfig 145 | from langchain_google_genai import ChatGoogleGenerativeAI 146 | 147 | # Load the Gemini API key 148 | GOOGLE_API_KEY = os.getenv("GEMINI_KEY") 149 | 150 | # Configure Gemini Flash and Educhain 151 | gemini_flash = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=GOOGLE_API_KEY) 152 | flash_config = LLMConfig(custom_model=gemini_flash) 153 | client = Educhain(flash_config) 154 | 155 | 📐 LessonPlan Model: 156 | ---------------------- 157 | This app defines a Pydantic model `LessonPlan` that represents the expected structure of the lesson plan returned by Educhain’s API. 158 | 159 | It includes fields for: 160 | - topic 161 | - objectives (list of strings) 162 | - introduction 163 | - content 164 | - assessment 165 | - conclusion 166 | 167 | Two custom methods: 168 | - `.show()` → Renders the plan in the Streamlit UI. 169 | - `.to_pdf(path, watermark)` → Exports the plan to a downloadable PDF. 170 | 171 | 🚀 Main Logic: 172 | ---------------- 173 | When the user inputs a lesson topic and clicks the generate button, the following happens: 174 | 175 | 1. The topic is sent to: 176 | ```python 177 | client.content_engine.generate_lesson_plan( 178 | topic=lesson_topic, 179 | response_model=LessonPlan 180 | ) 181 | """ 182 | ) 183 | -------------------------------------------------------------------------------- /archive/content_engine.py: -------------------------------------------------------------------------------- 1 | from langchain_openai import ChatOpenAI 2 | from langchain.prompts import PromptTemplate 3 | from langchain.chains import LLMChain 4 | from langchain.output_parsers import PydanticOutputParser 5 | from .models import LessonPlan,QuestionPaper , DoubtSolverConfig, SolvedDoubt 6 | from typing import List, Optional ,Any 7 | from langchain.schema import HumanMessage, SystemMessage 8 | import os 9 | import base64 10 | 11 | # Generated Lesson Plan 12 | def generate_lesson_plan(topic, llm=None, response_model=None, prompt_template=None, **kwargs): 13 | if response_model == None: 14 | parser = PydanticOutputParser(pydantic_object=LessonPlan) 15 | format_instructions = parser.get_format_instructions() 16 | else: 17 | parser = PydanticOutputParser(pydantic_object=response_model) 18 | format_instructions = parser.get_format_instructions() 19 | 20 | if prompt_template is None: 21 | prompt_template = """ 22 | Generate a comprehensive lesson plan for the given topic and duration. 23 | Include the following details in the lesson plan: 24 | - Objectives: List the learning objectives of the lesson. 25 | - Introduction: Provide an engaging introduction to the lesson. 26 | - Content Outline: Outline the main points or sections of the lecture content. 27 | - Assessment: Describe how the students' understanding will be assessed. 28 | - Conclusion: Summarize the key takeaways and provide a conclusion for the lesson. 29 | 30 | Topic: {topic} 31 | """ 32 | 33 | # Append the JSON format instruction line to the custom prompt template 34 | prompt_template += "\nThe response should be in JSON format. \n {format_instructions}" 35 | 36 | lesson_plan_prompt = PromptTemplate( 37 | input_variables=["topic"], 38 | template=prompt_template, 39 | partial_variables={"format_instructions": format_instructions} 40 | ) 41 | 42 | if llm: 43 | llm = llm 44 | else: 45 | llm = ChatOpenAI(model="gpt-3.5-turbo") 46 | 47 | lesson_plan_chain = lesson_plan_prompt | llm 48 | 49 | results = lesson_plan_chain.invoke( 50 | {"topic": topic, **kwargs}, 51 | ) 52 | results = results.content 53 | structured_output = parser.parse(results) 54 | return structured_output 55 | 56 | def generate_question_paper( 57 | subject: str, 58 | grade_level: int, 59 | num_questions: int, 60 | question_types: List[str] = ['multiple_choice'], 61 | time_limit: Optional[int] = None, 62 | difficulty_level: Optional[str] = None, 63 | topics: Optional[List[str]] = None, 64 | llm=None, 65 | response_model=None, 66 | prompt_template=None, 67 | **kwargs 68 | ): 69 | if response_model is None: 70 | parser = PydanticOutputParser(pydantic_object=QuestionPaper) 71 | format_instructions = parser.get_format_instructions() 72 | else: 73 | parser = PydanticOutputParser(pydantic_object=response_model) 74 | format_instructions = parser.get_format_instructions() 75 | 76 | if prompt_template is None: 77 | prompt_template = """ 78 | Generate a {num_questions}-question multiple-choice {subject} assessment for grade {grade_level}. 79 | 80 | The assessment should have a time limit of {time_limit} minutes if provided, and a difficulty level of {difficulty_level} if provided. 81 | The assessment should cover the following topics if provided: {topics} 82 | 83 | The response should be in JSON format. 84 | {format_instructions} 85 | """ 86 | 87 | QP_prompt = PromptTemplate( 88 | input_variables=["subject", "grade_level", "num_questions", "time_limit", "difficulty_level", "topics"], 89 | template=prompt_template, 90 | partial_variables={"format_instructions": format_instructions} 91 | ) 92 | 93 | if llm: 94 | llm = llm 95 | else: 96 | llm = ChatOpenAI(model="gpt-3.5-turbo") 97 | 98 | QP_chain = QP_prompt | llm 99 | 100 | results = QP_chain.invoke( 101 | { 102 | "subject": subject, 103 | "grade_level": grade_level, 104 | "num_questions": num_questions, 105 | "question_types": question_types, 106 | "time_limit": time_limit, 107 | "difficulty_level": difficulty_level, 108 | "topics": topics, 109 | **kwargs 110 | } 111 | ) 112 | 113 | structured_output = parser.parse(results.content) 114 | 115 | return structured_output 116 | 117 | # Vision Class 118 | class DoubtSolver: 119 | def __init__(self, config: DoubtSolverConfig = DoubtSolverConfig()): 120 | self.config = config 121 | 122 | def solve(self, 123 | img_path: str, 124 | prompt: str = "Explain how to solve this problem", 125 | llm: Optional[Any] = None, 126 | custom_instructions: Optional[str] = None, 127 | **kwargs) -> Optional[SolvedDoubt]: 128 | 129 | if not img_path: 130 | raise ValueError("Image path or URL is required") 131 | 132 | image_content = self._get_image_content(img_path) 133 | 134 | parser = PydanticOutputParser(pydantic_object=SolvedDoubt) 135 | format_instructions = parser.get_format_instructions() 136 | 137 | system_message = SystemMessage(content="You are a helpful assistant that responds in Markdown. Help with math homework.") 138 | 139 | human_message_content = f""" 140 | Analyze the image and {prompt} 141 | 142 | Provide: 143 | 1. A detailed explanation 144 | 2. Step-by-step solution (if applicable) 145 | 3. Any additional notes or tips 146 | 147 | {custom_instructions or ''} 148 | 149 | {format_instructions} 150 | """ 151 | 152 | human_message = HumanMessage(content=[ 153 | {"type": "text", "text": human_message_content}, 154 | {"type": "image_url", "image_url": {"url": image_content}} 155 | ]) 156 | 157 | if llm is None: 158 | llm = self._get_chat_model() 159 | 160 | try: 161 | response = llm([system_message, human_message]) 162 | result = parser.parse(response.content) 163 | return result 164 | except Exception as e: 165 | print(f"Error in solve: {type(e).__name__}: {str(e)}") 166 | return None 167 | 168 | def _get_chat_model(self) -> ChatOpenAI: 169 | config = self.config.gpt4 170 | return ChatOpenAI( 171 | model_name=config.model_name, 172 | api_key=os.getenv(config.api_key_name), 173 | max_tokens=config.max_tokens, 174 | temperature=0, 175 | ) 176 | 177 | @staticmethod 178 | def _get_image_content(img_path: str) -> str: 179 | try: 180 | if img_path.startswith(('http://', 'https://')): 181 | return img_path 182 | elif img_path.startswith('data:image'): 183 | return img_path 184 | elif os.path.isfile(img_path): 185 | with open(img_path, "rb") as image_file: 186 | image_data = image_file.read() 187 | base64_image = base64.b64encode(image_data).decode('utf-8') 188 | return f"data:image/jpeg;base64,{base64_image}" 189 | else: 190 | raise ValueError("Invalid image path or URL") 191 | except Exception as e: 192 | print(f"Error in _get_image_content: {type(e).__name__}: {str(e)}") 193 | raise 194 | -------------------------------------------------------------------------------- /cookbook/starter-apps/playground/pages/2 📄_Generate From Text-PDF-URL.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from utils.models import client_model 3 | from PyPDF2 import PdfReader 4 | client = client_model() 5 | 6 | st.markdown("

📄 Text / PDF / URL to Question Bank

", unsafe_allow_html=True) 7 | st.markdown("

Generate smart, quality questions instantly using Gemini Flash + EduChain ⚡

", unsafe_allow_html=True) 8 | st.markdown(""" 9 | Easily create questions from a block of text, an online article, or an academic PDF. 10 | Select your data source below and customize the question type and difficulty. 11 | """) 12 | st.divider() 13 | 14 | source_type = st.selectbox("Choose Source Type", ["Text", "URL", "PDF"], index=0) 15 | num = st.slider("Number of Questions", 1, 20, 5) 16 | question_type = st.selectbox("Question Type", ["Multiple Choice", "True/False", "Fill in the Blank", "Short Answer"]) 17 | difficulty = st.selectbox("Difficulty Level", ["Beginner", "Intermediate", "Advanced"]) 18 | custom_instr = st.text_area("Custom Instructions (Optional)", "", height=68) 19 | 20 | def show_result(result): 21 | st.success("✅ Questions Generated!") 22 | for i, q in enumerate(result.questions, 1): 23 | st.markdown(f"### Q{i}. {q.question}") 24 | if hasattr(q, "options") and q.options: 25 | for j, opt in enumerate(q.options): 26 | st.markdown(f"- **{chr(65+j)}.** {opt}") 27 | st.markdown(f"✅ **Answer:** `{q.answer}`") 28 | elif hasattr(q, "answer"): 29 | st.markdown(f"✅ **Answer:** `{q.answer}`") 30 | if hasattr(q, "blank_word") and q.blank_word: 31 | st.caption(f"✏️ Fill in: `{q.blank_word}`") 32 | if getattr(q, "explanation", None): 33 | st.info(f"💡 {q.explanation}") 34 | st.markdown("---") 35 | 36 | if source_type == "Text": 37 | text_input = st.text_area("Paste your content here:", height=200) 38 | if st.button("🚀 Generate from Text") and text_input: 39 | with st.spinner("Generating questions from text..."): 40 | result = client.qna_engine.generate_questions_from_data( 41 | source=text_input, 42 | source_type="text", 43 | num=num, 44 | question_type=question_type, 45 | difficulty_level=difficulty, 46 | custom_instructions=custom_instr 47 | ) 48 | show_result(result) 49 | 50 | elif source_type == "URL": 51 | url_input = st.text_input("Enter the URL of an article or webpage:") 52 | if st.button("🌐 Generate from URL") and url_input: 53 | with st.spinner("Fetching and processing URL..."): 54 | result = client.qna_engine.generate_questions_from_data( 55 | source=url_input, 56 | source_type="url", 57 | num=num, 58 | question_type=question_type, 59 | difficulty_level=difficulty, 60 | custom_instructions=custom_instr 61 | ) 62 | show_result(result) 63 | 64 | elif source_type == "PDF": 65 | uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"]) 66 | if uploaded_file and st.button("📄 Generate from PDF"): 67 | with st.spinner("Extracting text and generating questions..."): 68 | reader = PdfReader(uploaded_file) 69 | pdf_text = " ".join([page.extract_text() or "" for page in reader.pages]) 70 | pdf_text = " ".join(pdf_text.split()) 71 | 72 | result = client.qna_engine.generate_questions_from_data( 73 | source=pdf_text, 74 | source_type="text", 75 | num=num, 76 | question_type=question_type, 77 | difficuly_level = difficulty, 78 | custom_instructions=custom_instr 79 | ) 80 | show_result(result) 81 | 82 | st.markdown("---") 83 | st.caption("Built with ❤️ using EduChain · Gemini Flash ✨") 84 | 85 | with st.popover("Open popover"): 86 | st.markdown(" Turn On Developer Mode? ") 87 | Developer_Mode = st.checkbox("Check 'On' to Turn-on Developer Mode") 88 | 89 | if Developer_Mode == True: 90 | st.write("Welcome Developers!! Here is an in-depth explanation of all of the tools used here.") 91 | st.page_link("https://github.com/satvik314/educhain/blob/main/cookbook/features/Bulk_Question_Generation_Using_Educhain.ipynb", label="GitHub", icon = "🔗") 92 | st.markdown(""" 93 | 🔧 Overview: 94 | ------------ 95 | This app lets users generate intelligent, structured questions from three types of data sources: 96 | 1. Raw Text 97 | 2. Web URL (articles, blogs, etc.) 98 | 3. Uploaded PDF documents 99 | 100 | It uses the Educhain library with Gemini Flash (via LangChain) to extract and convert content into question banks. 101 | 102 | 💡 Initialization and Setup: 103 | ----------------------------- 104 | from educhain import Educhain, LLMConfig 105 | from langchain_google_genai import ChatGoogleGenerativeAI 106 | 107 | # Load Gemini API key from .env 108 | GOOGLE_API_KEY = os.getenv("GEMINI_KEY") 109 | 110 | # Create Gemini Flash model and config 111 | gemini_flash = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=GOOGLE_API_KEY) 112 | flash_config = LLMConfig(custom_model=gemini_flash) 113 | 114 | # Initialize Educhain client 115 | client = Educhain(flash_config) 116 | 117 | 📥 User Input: 118 | -------------- 119 | - Source type: Choose between Text / URL / PDF 120 | - Number of questions 121 | - Question type: MCQ / T/F / Fill-in / Short Answer 122 | - Difficulty: Beginner / Intermediate / Advanced 123 | - Optional: Custom instructions to guide the model (e.g., “focus on factual questions”) 124 | 125 | 🔍 How Generation Works: 126 | -------------------------- 127 | For each data type, the app calls: 128 | 129 | client.qna_engine.generate_questions_from_data( 130 | source=..., # raw text, URL, or filepath 131 | source_type="text|url|pdf", 132 | num=..., # number of questions 133 | question_type=..., # question format 134 | difficulty_level=..., # difficulty 135 | custom_instructions=... # optional text 136 | ) 137 | 138 | Educhain handles: 139 | - Reading and preprocessing the source (e.g., parsing PDF or scraping URL) 140 | - Prompting Gemini Flash with structured prompts 141 | - Extracting Q&A from the response 142 | 143 | 📤 Streamlit Display: 144 | ---------------------- 145 | A helper function `show_result()` displays the generated questions: 146 | - Numbered question titles 147 | - Answer options (for MCQ) 148 | - Final answer (highlighted) 149 | - Explanation or fill-in-the-blank word, if present 150 | 151 | Example output display: 152 | 153 | ### Q1. What is photosynthesis? 154 | - A. Energy from the moon 155 | - B. Conversion of light to chemical energy 156 | ✅ Answer: B 157 | 💡 Explanation: Photosynthesis is the conversion of light energy into chemical energy by plants. 158 | 159 | 📁 Special Notes on PDF Handling: 160 | ----------------------------------- 161 | - Uploaded PDF is saved temporarily as `temp_uploaded.pdf` 162 | - This file path is passed to Educhain which extracts the text from the document internally before generating questions 163 | 164 | ❤️ Summary: 165 | ------------- 166 | This Streamlit app provides a seamless way to generate question banks from multiple content formats. The combination of: 167 | - LangChain + Gemini Flash for fast LLM response 168 | - Educhain for educational logic 169 | - Streamlit for clean UI 170 | 171 | ...makes it a powerful tool for teachers, students, and edtech developers alike. 172 | """) 173 | -------------------------------------------------------------------------------- /cookbook/use-cases/generate_quiz_on_latest_news.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [] 7 | }, 8 | "kernelspec": { 9 | "name": "python3", 10 | "display_name": "Python 3" 11 | }, 12 | "language_info": { 13 | "name": "python" 14 | } 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "markdown", 19 | "source": [ 20 | "\n", 21 | "\n", 22 | "Educhain is a powerful Python package that leverages Generative AI to create\n", 23 | "engaging and personalized educational content. From generating multiple-choice questions to crafting comprehensive lesson plans, Educhain makes it easy to apply AI in various educational scenarios." 24 | ], 25 | "metadata": { 26 | "id": "lQJLfaE-EszB" 27 | } 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "source": [ 32 | "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gfcqUjJKuPNmPRoP5y0u6B3kSZWhZKha?usp=sharing)" 33 | ], 34 | "metadata": { 35 | "id": "Wo6WLPxvU0og" 36 | } 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "source": [ 41 | "## Generating MCQs on Latest News" 42 | ], 43 | "metadata": { 44 | "id": "K3OpxF6KTf5W" 45 | } 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "source": [ 50 | "This Colab notebook demonstrates how to generate multiple-choice questions (MCQs)\n", 51 | "based on the latest news using AI-powered tools.\n", 52 | "\n", 53 | "Key features:\n", 54 | "1. Fetches recent news on a specified topic using Perplexity's Sonar API\n", 55 | "2. Generates MCQs from the fetched news content using Educhain's qna_engine\n", 56 | "3. Customizable number of questions and topic selection\n" 57 | ], 58 | "metadata": { 59 | "id": "gqUYIUkVw8-w" 60 | } 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": { 66 | "id": "5P_SfI0SNKfF" 67 | }, 68 | "outputs": [], 69 | "source": [ 70 | "!pip install -qU educhain --quiet" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "source": [ 76 | "import os\n", 77 | "from google.colab import userdata\n", 78 | "\n", 79 | "os.environ[\"OPENAI_API_KEY\"] = userdata.get('OPENAI_API_KEY')" 80 | ], 81 | "metadata": { 82 | "id": "yzzBu9RWPT8p" 83 | }, 84 | "execution_count": 3, 85 | "outputs": [] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "source": [ 90 | "### Using Perplexity Online APIs to fetch latest news" 91 | ], 92 | "metadata": { 93 | "id": "mK-xA8GKUgzk" 94 | } 95 | }, 96 | { 97 | "cell_type": "code", 98 | "source": [ 99 | "from langchain_openai import ChatOpenAI\n", 100 | "from google.colab import userdata\n", 101 | "\n", 102 | "sonar = ChatOpenAI(model = \"perplexity/llama-3.1-sonar-large-128k-online\",\n", 103 | " openai_api_key = userdata.get(\"OPENROUTER_API_KEY\"),\n", 104 | " openai_api_base = \"https://openrouter.ai/api/v1\"\n", 105 | "\n", 106 | ")\n", 107 | "\n", 108 | "response = sonar.invoke(\"Give me the latest upates on AI\")\n", 109 | "print(response.content)" 110 | ], 111 | "metadata": { 112 | "id": "4H0l7i6oNRKW" 113 | }, 114 | "execution_count": null, 115 | "outputs": [] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "source": [ 120 | "### Generating questions with Educhain on Latest news content" 121 | ], 122 | "metadata": { 123 | "id": "VWwFbjX8VJqx" 124 | } 125 | }, 126 | { 127 | "cell_type": "code", 128 | "source": [ 129 | "from educhain import Educhain\n", 130 | "\n", 131 | "client = Educhain()\n", 132 | "\n", 133 | "response = sonar.invoke(\"Give me the latest news on US Presidential Elections\")\n", 134 | "\n", 135 | "news_content = response.content\n", 136 | "\n", 137 | "news_mcq = client.qna_engine.generate_questions_from_data(\n", 138 | " source=news_content,\n", 139 | " source_type=\"text\",\n", 140 | " num=5,\n", 141 | " )\n", 142 | "\n", 143 | "news_mcq.show()" 144 | ], 145 | "metadata": { 146 | "id": "cCn40vfeOqMA" 147 | }, 148 | "execution_count": null, 149 | "outputs": [] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "source": [ 154 | "###Generate Quiz on Given Topic Using Prompt template" 155 | ], 156 | "metadata": { 157 | "id": "Wb0ExmBBETfu" 158 | } 159 | }, 160 | { 161 | "cell_type": "code", 162 | "source": [ 163 | "from educhain import Educhain\n", 164 | "\n", 165 | "client = Educhain()\n", 166 | "\n", 167 | "def generate_news_mcqs(topic, num_questions=5):\n", 168 | " \"\"\"\n", 169 | " Generate multiple-choice questions based on current news about a given topic.\n", 170 | "\n", 171 | " Args:\n", 172 | " topic (str): The news topic to fetch and generate questions about.\n", 173 | " num_questions (int): The number of questions to generate (default is 5).\n", 174 | "\n", 175 | " Returns:\n", 176 | " None: Prints the generated questions.\n", 177 | " \"\"\"\n", 178 | " try:\n", 179 | " # Create a prompt template for Sonar\n", 180 | " sonar_prompt = f\"\"\"Fetch and summarize the latest news articles about {topic}.\n", 181 | " Focus on the most significant events and developments.\n", 182 | " Provide a concise summary of 3-5 key points.\"\"\"\n", 183 | "\n", 184 | " # Fetch news using Sonar\n", 185 | " response = sonar.invoke(sonar_prompt)\n", 186 | " news_content = response.content\n", 187 | "\n", 188 | " print(f\"Fetched news about {topic}:\")\n", 189 | " print(news_content)\n", 190 | " print(\"\\nGenerating questions based on the news...\\n\")\n", 191 | "\n", 192 | " # Generate MCQs using the qna_engine\n", 193 | " mcq_list = client.qna_engine.generate_questions_from_data(\n", 194 | " source=news_content,\n", 195 | " source_type=\"text\",\n", 196 | " num=num_questions,\n", 197 | " )\n", 198 | "\n", 199 | " # Display the generated questions\n", 200 | " print(f\"Generated {num_questions} questions on the latest news about {topic}:\\n\")\n", 201 | " mcq_list.show()\n", 202 | " except Exception as e:\n", 203 | " print(f\"An error occurred: {str(e)}\")" 204 | ], 205 | "metadata": { 206 | "id": "E4mwsKsQUfmy" 207 | }, 208 | "execution_count": 7, 209 | "outputs": [] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "source": [ 214 | "###Usage" 215 | ], 216 | "metadata": { 217 | "id": "B5EdlGrsEeCt" 218 | } 219 | }, 220 | { 221 | "cell_type": "code", 222 | "source": [ 223 | "generate_news_mcqs(\"US Presidential Elections\", num_questions=5)" 224 | ], 225 | "metadata": { 226 | "id": "d-5pqKfLUzPS" 227 | }, 228 | "execution_count": null, 229 | "outputs": [] 230 | } 231 | ] 232 | } -------------------------------------------------------------------------------- /cookbook/starter-apps/AI CourtRoom/app.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | from educhain import Educhain, LLMConfig 3 | from langchain_cerebras import ChatCerebras 4 | from cerebras.cloud.sdk import Cerebras 5 | import os 6 | import json 7 | 8 | 9 | st.set_page_config(page_title="AI Courtroom — Mock Trials with Cerebras & Educhain", layout="wide") 10 | 11 | st.title("⚖️ AI Courtroom ⚖️") 12 | st.markdown("

👨‍⚖️Judge , ⚔️ Prosecutor , 🛡️Defense , 👤 Defendant and 📜 The Verdict 🔨

", unsafe_allow_html=True) 13 | st.markdown("_Use a real case URL from Wikipedia, news, or a court report to begin._") 14 | st.divider() 15 | # Sidebar: API key input 16 | with st.sidebar: 17 | st.markdown( 18 | "", unsafe_allow_html=True 28 | ) 29 | st.header("Configuration") 30 | CEREBRAS_API_KEY = st.text_input("Enter your Cerebras API Key", type="password") 31 | use_comedic = st.selectbox("Courtroom style", ["Serious", "Dramatic", "Comedic"]) 32 | num_facts = st.slider("Number of case facts/questions (Educhain)", min_value=1, max_value=10, value=9) 33 | st.markdown(" Model: `gpt-oss-120b` (Cerebras)") 34 | st.markdown("---") 35 | st.markdown(""" """, unsafe_allow_html=True) 38 | # Initialize Educhain only after user provides API key — we will initialize the ChatCerebras wrapper for Educhain 39 | if CEREBRAS_API_KEY: 40 | # Initialize Cerebras SDK client for direct calls (feedback, verdict structured output) 41 | cerebras_client = Cerebras(api_key=CEREBRAS_API_KEY) 42 | 43 | # Initialize Educhain with a Cerebras-backed LLM via langchain_cerebras 44 | # NOTE: this requires the `langchain_cerebras` adapter and Educhain to accept LLMConfig 45 | try: 46 | llm = ChatCerebras(model="gpt-oss-120b", api_key=CEREBRAS_API_KEY) 47 | except TypeError: 48 | # Some wrappers take api key via environment 49 | os.environ["CEREBRAS_API_KEY"] = CEREBRAS_API_KEY 50 | llm = ChatCerebras(model="gpt-oss-120b") 51 | 52 | cerebras_llm_config = LLMConfig(custom_model=llm) 53 | client = Educhain(cerebras_llm_config) 54 | else: 55 | cerebras_client = None 56 | client = None 57 | 58 | # Main UI: case URL input 59 | st.subheader("1. Paste a case report URL") 60 | case_url = st.text_input("Case report URL (Wikipedia / news link / case study link)",) 61 | 62 | if st.button("Ingest case & generate facts"): 63 | if not case_url: 64 | st.error("Please paste a valid URL first.") 65 | elif not client: 66 | st.error("Enter your Cerebras API key in the sidebar so the app can initialize Cerebras + Educhain.") 67 | else: 68 | with st.spinner("Fetching case and generating facts via Educhain..."): 69 | try: 70 | custom_instructions = ( 71 | "Generate {num} concise factual statements or True/False style facts about the legal case at the given URL. " 72 | "These facts should be usable as evidence or arguments in a courtroom roleplay. Keep them intermediate difficulty and numbered." 73 | ) 74 | 75 | # Use Educhain to extract a small set of facts / Qs from the URL 76 | facts = client.qna_engine.generate_questions_from_data( 77 | source=case_url, 78 | source_type="url", 79 | num=num_facts, 80 | question_type="True/False", 81 | difficulty_level="Intermediate", 82 | custom_instructions=custom_instructions 83 | ) 84 | 85 | try: 86 | facts_list = facts.model_dump_json() 87 | except Exception: 88 | # fallback: try casting to str 89 | facts_list = str(facts) 90 | 91 | st.success("Facts generated by Educhain — used as case evidence below.") 92 | st.subheader("Case Evidence (Educhain output)") 93 | st.code(facts_list, language="json") 94 | 95 | try: 96 | parsed_facts = json.loads(facts_list) 97 | formatted_facts = json.dumps(parsed_facts, indent=2) 98 | except Exception: 99 | formatted_facts = str(facts_list) 100 | 101 | st.session_state["case_facts"] = formatted_facts 102 | 103 | st.session_state["case_url"] = case_url 104 | 105 | except Exception as e: 106 | st.error(f"Educhain failed to generate facts: {e}") 107 | 108 | # Show quick action: Run Mock Trial 109 | st.subheader("2. Run the Trial") 110 | proceed = st.button("Start Trial (Cerebras will roleplay)") 111 | if proceed: 112 | if "case_facts" not in st.session_state: 113 | st.error("Please ingest a case first using the button above.") 114 | elif not cerebras_client: 115 | st.error("Provide Cerebras API key in the sidebar.") 116 | else: 117 | facts_text = st.session_state["case_facts"] 118 | court_style = use_comedic 119 | 120 | # Prompt: Markdown output for entire trial 121 | system_prompt = ( 122 | "You are a courtroom simulation engine. Roles: Judge, Prosecutor, Defense Lawyer, Defendant, and Jury. " 123 | f"Tone: {court_style}.\n" 124 | "Use the provided case facts as admissible evidence. Play out a full trial including:\n" 125 | "- Opening statements\n" 126 | "- Witness/evidence discussion (use provided facts as evidence)\n" 127 | "- Cross-examination\n" 128 | "- Closing statements\n" 129 | "- Final verdict announcement\n\n" 130 | "IMPORTANT:\n" 131 | "1. Output EVERYTHING in well-formatted **Markdown**.\n" 132 | "2. Use headings for sections (## Opening Statements, ## Evidence, etc.).\n" 133 | "3. Bold speaker names (e.g. **Judge:**) at the start of each dialogue line.\n" 134 | "4. Use bullet points for lists of evidence.\n" 135 | "5. At the very end, include a '## Final Verdict' section with:\n" 136 | " - Verdict: ...\n" 137 | " - Sentence: ...\n" 138 | " - Reasoning Summary: ...\n" 139 | ) 140 | 141 | user_prompt = ( 142 | f"Case URL: {st.session_state.get('case_url', '(unknown)')}\n\n" 143 | f"Case Facts / Evidence (JSON format):\n{facts_text}\n\n" 144 | "Each fact has a 'question' (statement) and an 'explanation'. " 145 | "Use the 'question' for courtroom statements, and the 'explanation' for reasoning when presenting evidence.\n" 146 | "Begin the trial now. Judge speaks first." 147 | ) 148 | 149 | with st.spinner("Cerebras is simulating the courtroom..."): 150 | try: 151 | trial_resp = cerebras_client.chat.completions.create( 152 | model="gpt-oss-120b", 153 | messages=[ 154 | {"role": "system", "content": system_prompt}, 155 | {"role": "user", "content": user_prompt} 156 | ], 157 | reasoning_effort="high", 158 | max_completion_tokens=7000, 159 | temperature=0.9, 160 | top_p=1.0 161 | ) 162 | 163 | # Get Markdown transcript 164 | transcript_md = trial_resp.choices[0].message.content.strip() 165 | 166 | st.subheader("Courtroom Transcript (Markdown)") 167 | st.markdown(transcript_md) 168 | 169 | # Save to session state 170 | st.session_state["last_transcript"] = transcript_md 171 | 172 | except Exception as e: 173 | st.error(f"Cerebras call failed: {e}") 174 | --------------------------------------------------------------------------------