├── .github
└── workflows
│ └── export.yml
├── .gitignore
├── LICENSE
├── README.md
├── agentic_rag
├── .gitignore
├── LICENSE
├── README.md
├── chat
│ ├── __init__.py
│ ├── chat.py
│ └── components
│ │ └── chat.py
├── db
│ ├── 22229130-76fd-4c4e-9e96-510bde4eb589
│ │ ├── data_level0.bin
│ │ ├── header.bin
│ │ ├── length.bin
│ │ └── link_lists.bin
│ └── chroma.sqlite3
├── requirements.txt
└── rxconfig.py
├── ai_stock_analyst_agent
├── .gitignore
├── LICENSE
├── README.md
├── agent
│ ├── __init__.py
│ └── agent.py
├── requirements.txt
└── rxconfig.py
├── browser_use_locally
├── .gitignore
├── LICENSE
├── README.md
├── assets
│ └── chakra_color_mode_provider.js
├── browser_agent
│ ├── __init__.py
│ └── browser_agent.py
├── requirements.txt
├── rxconfig.py
└── uploaded_files
│ └── Attention is all you need.pdf
├── chat_with_github
├── .gitignore
├── LICENSE
├── README.md
├── chat
│ ├── __init__.py
│ └── chat.py
├── requirements.txt
└── rxconfig.py
├── chat_with_pdf_locally
├── .github
│ └── workflows
│ │ └── repository_dispatch.yml
├── .gitignore
├── LICENSE
├── README.md
├── assets
│ └── chakra_color_mode_provider.js
├── chat
│ ├── __init__.py
│ ├── chat.py
│ └── components
│ │ └── chat.py
├── requirements.txt
├── rxconfig.py
└── uploaded_files
│ └── Attention is all you need.pdf
├── deepseek_r1_chatui
├── .github
│ └── workflows
│ │ └── repository_dispatch.yml
├── .gitignore
├── LICENSE
├── README.md
├── assets
│ ├── deepseek_logo.png
│ └── favicon.ico
├── chat
│ ├── __init__.py
│ ├── chat.py
│ ├── components
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── loading_icon.py
│ │ ├── modal.py
│ │ └── navbar.py
│ └── state.py
├── requirements.txt
└── rxconfig.py
├── deepseek_r1_rag
├── .gitignore
├── LICENSE
├── README.md
├── assets
│ └── chakra_color_mode_provider.js
├── chat
│ ├── __init__.py
│ ├── chat.py
│ └── components
│ │ └── chat.py
├── requirements.txt
├── rxconfig.py
└── uploaded_files
│ └── Attention is all you need.pdf
├── multi_modal_ai_agent
├── .DS_Store
├── .gitignore
├── LICENSE
├── README.md
├── __pycache__
│ ├── rxconfig.cpython-310.pyc
│ └── rxconfig.cpython-311.pyc
├── multi_modal_agent
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-310.pyc
│ │ ├── __init__.cpython-311.pyc
│ │ ├── chat.cpython-310.pyc
│ │ ├── multi_modal_agent.cpython-311.pyc
│ │ ├── news_agent.cpython-311.pyc
│ │ ├── state.cpython-310.pyc
│ │ └── utils.cpython-311.pyc
│ ├── multi_modal_agent.py
│ └── multi_modal_agent_agno.py
├── requirements.txt
└── rxconfig.py
├── multi_modal_medical_agent
├── LICENSE
├── README.md
├── agent
│ ├── __init__.py
│ └── agent.py
├── requirements.txt
├── rxconfig.py
└── uploaded_files
│ └── image (16).png
├── news_agent
├── .DS_Store
├── .env
├── .gitignore
├── LICENSE
├── README.md
├── news_agent
│ ├── __init__.py
│ └── news_agent.py
├── requirements.txt
└── rxconfig.py
├── open_deep_researcher
├── .gitignore
├── LICENSE
├── README.md
├── requirements.txt
├── researcher
│ ├── __init__.py
│ └── researcher.py
└── rxconfig.py
├── rag_app
├── .gitignore
├── LICENSE
├── __init__.py
├── assets
│ └── favicon.ico
├── rag_app
│ ├── __init__.py
│ ├── rag
│ │ ├── __init__.py
│ │ ├── main.py
│ │ ├── shared
│ │ │ ├── __init__.py
│ │ │ ├── chat.py
│ │ │ ├── navigation.py
│ │ │ ├── profile.py
│ │ │ ├── profile_components.py
│ │ │ └── style.py
│ │ ├── state.py
│ │ ├── style.py
│ │ └── wrappers
│ │ │ ├── __init__.py
│ │ │ ├── item.py
│ │ │ └── style.py
│ └── rag_app.py
├── requirements.txt
└── rxconfig.py
└── rag_with_docling
├── .gitignore
├── LICENSE
├── README.md
├── chat
├── __init__.py
└── chat.py
├── requirements.txt
└── rxconfig.py
/.github/workflows/export.yml:
--------------------------------------------------------------------------------
1 | name: Check Export
2 |
3 | env:
4 | TELEMETRY_ENABLED: false
5 | on:
6 | push:
7 | branches: [main]
8 | pull_request:
9 | branches: [main]
10 |
11 | jobs:
12 | find-folders:
13 | runs-on: ubuntu-latest
14 | outputs:
15 | folders: ${{ steps.find-rxconfig.outputs.folders }}
16 | steps:
17 | - uses: actions/checkout@v3
18 |
19 | - name: Find folders with rxconfig.py
20 | id: find-rxconfig
21 | run: |
22 | FOLDERS=$(find . -maxdepth 2 -type f -name "rxconfig.py" | xargs dirname | sed 's|^\./||' | jq -R -s -c 'split("\n")[:-1]')
23 | echo "folders=$FOLDERS" >> $GITHUB_OUTPUT
24 | echo "Found folders: $FOLDERS"
25 |
26 | check-export:
27 | needs: find-folders
28 | strategy:
29 | matrix:
30 | folder: ${{ fromJson(needs.find-folders.outputs.folders) }}
31 | runs-on: ubuntu-latest
32 | steps:
33 | - uses: actions/checkout@v4
34 | - uses: astral-sh/setup-uv@v6
35 | with:
36 | python-version: 3.12
37 | activate-environment: true
38 | - name: Install dependencies
39 | working-directory: ${{ matrix.folder }}
40 | run: uv pip install -r requirements.txt
41 | - name: Run export
42 | working-directory: ${{ matrix.folder }}
43 | run: uv run reflex export
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .web
2 | *.py[cod]
3 | assets/external/
4 | *.db
5 | .DS_Store
6 | .idea/
7 | __pycache__/
8 | .venv
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Advanced LLM Applications Collection
2 |
3 | A curated repository of AI Apps built with Reflex, showcasing practical use cases of Large Language Models (LLMs) from providers such as Google, Anthropic, Open AI, and self-hosted open-source models.
4 |
5 | This collection highlights:
6 | - AI agents and their usecases
7 | - RAG (Retrieval-Augmented Generation) implementations
8 | - Best practices for building scalable AI-powered solutions
--------------------------------------------------------------------------------
/agentic_rag/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | __pycache__/
3 | *.db
4 | .web
5 | assets/external/
6 |
--------------------------------------------------------------------------------
/agentic_rag/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/agentic_rag/README.md:
--------------------------------------------------------------------------------
1 | # Agentic RAG with Gemini 2.0 Flash
2 |
3 | This is not just a simple "Chat with PDF" app. It's an **Agentic RAG (Retrieval Augmented Generation)** system powered by **Gemini 2.0 Flash**. The app first searches through the uploaded document for relevant information. If the required information is not found in the document, it seamlessly searches the web and returns a comprehensive response.
4 |
5 | ---
6 |
7 | ## Features
8 | - **Upload PDF Documents:** Easily upload any PDF document to start querying.
9 | - **Agentic RAG Workflow:** Combines document retrieval with web search for accurate and comprehensive answers.
10 | - **Interactive Q&A:** Ask questions about the content of the uploaded PDF or general queries.
11 | - **Powered by Gemini 2.0 Flash:** Utilizes Google's Gemini 2.0 Flash model for fast and accurate responses.
12 | - **Web Search Integration:** If the document doesn't contain the required information, the app searches the web and provides relevant results.
13 |
14 | ---
15 |
16 | ## Getting Started
17 |
18 | ### 1. Clone the Repository
19 | Clone the GitHub repository to your local machine:
20 | ```bash
21 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
22 | cd reflex-llm-examples/agentic_rag
23 | ```
24 |
25 | ### 2. Install Dependencies
26 | Install the required dependencies:
27 | ```bash
28 | pip install -r requirements.txt
29 | ```
30 |
31 | ### 3. Set Up Gemini API Key
32 | To use the Gemini 2.0 Flash model, you need a **Google API Key**. Follow these steps:
33 | Go to [Google AI Studio](https://aistudio.google.com/apikey), get your API Key an set the API key as an environment variable:
34 | ```bash
35 | export GOOGLE_API_KEY="your-api-key-here"
36 | ```
37 |
38 | ### 4. Set Up PgVector
39 | The app uses **PgVector** for vector storage and retrieval. Follow these steps to set it up:
40 |
41 | 1. **Install Docker Desktop:** Ensure Docker Desktop is installed on your machine.
42 | 2. **Run the PgVector Docker Container:** Use the following command to start the PgVector container:
43 | ```bash
44 | docker run -d \
45 | -e POSTGRES_DB=ai \
46 | -e POSTGRES_USER=ai \
47 | -e POSTGRES_PASSWORD=ai \
48 | -e PGDATA=/var/lib/postgresql/data/pgdata \
49 | -v pgvolume:/var/lib/postgresql/data \
50 | -p 5532:5432 \
51 | --name pgvector \
52 | agnohq/pgvector:16
53 | ```
54 |
55 | 3. **Verify the Container is Running:**
56 | Run the following command to confirm that the PgVector container is running:
57 | ```bash
58 | docker ps
59 | ```
60 | You should see an entry for `pgvector` with port `5532` listed.
61 |
62 | 4. **Connect to the Database:**
63 | Access the PostgreSQL shell inside the container with this command:
64 | ```bash
65 | docker exec -it pgvector psql -U ai -d ai
66 | ```
67 |
68 | 5. **Create a New Table with Vector(768):**
69 | Setup up a new table with an embedding column of dimension 768, run the following SQL command:
70 | ```sql
71 | CREATE TABLE pdf_documents_v2 (
72 | id character varying PRIMARY KEY,
73 | name character varying,
74 | meta_data jsonb DEFAULT '{}'::jsonb,
75 | filters jsonb DEFAULT '{}'::jsonb,
76 | content text,
77 | embedding vector(768), -- Embedding column with 768 dimensions
78 | usage jsonb DEFAULT '{}'::jsonb,
79 | content_hash VARCHAR
80 | );
81 | ```
82 |
83 | ### 5. Run the Reflex App
84 | Start the application to begin interacting with your PDF:
85 | ```bash
86 | reflex run
87 | ```
88 |
89 | ---
90 |
91 | ## How It Works
92 | 1. **Upload a PDF:** The app processes the document and creates a searchable knowledge base.
93 | 2. **Ask Questions:** The app first searches the uploaded document for relevant information.
94 | 3. **Web Search Fallback:** If the document doesn't contain the required information, the app searches the web using **DuckDuckGo** and returns the most relevant results.
95 | 4. **Comprehensive Responses:** The app combines information from the document and the web to provide accurate and detailed answers.
96 |
97 | ---
98 |
99 | ## Why Agentic RAG?
100 | - **Document-Centric:** Focuses on extracting information from the uploaded PDF.
101 | - **Web-Augmented:** Ensures no query goes unanswered by leveraging web search when needed.
102 | - **Efficient and Accurate:** Combines the best of both worlds for a seamless experience.
103 |
104 | ---
105 |
106 | ## Troubleshooting
107 | - **Gemini API Key Not Set:** Ensure the `GOOGLE_API_KEY` environment variable is set correctly.
108 | - **PgVector Not Running:** Verify that the PgVector Docker container is running and accessible on port `5532`.
109 |
110 | ---
111 |
112 | ## Contributing
113 | Contributions are welcome! Feel free to open issues or submit pull requests to improve the app.
114 |
115 | ---
--------------------------------------------------------------------------------
/agentic_rag/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/agentic_rag/chat/__init__.py
--------------------------------------------------------------------------------
/agentic_rag/chat/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from chat.components.chat import State, chat, action_bar, sidebar
3 |
4 |
5 | def index() -> rx.Component:
6 | """The main app."""
7 | return rx.box(
8 | sidebar(),
9 | rx.box(
10 | rx.vstack(
11 | rx.hstack(
12 | rx.heading("Agentic RAG using Gemini 2.0 Flash 🔥"),
13 | rx.button(
14 | "New Chat",
15 | on_click=State.create_new_chat,
16 | margin_left="auto",
17 | ),
18 | ),
19 | chat(),
20 | action_bar(),
21 | spacing="4",
22 | align_items="center",
23 | height="100vh",
24 | padding="4em",
25 | ),
26 | margin_left="300px",
27 | width="calc(100% - 300px)",
28 | ),
29 | width="100%",
30 | height="100vh",
31 | background_color=rx.color("mauve", 1),
32 | )
33 |
34 |
35 | app = rx.App()
36 | app.add_page(index)
37 |
--------------------------------------------------------------------------------
/agentic_rag/db/22229130-76fd-4c4e-9e96-510bde4eb589/header.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/agentic_rag/db/22229130-76fd-4c4e-9e96-510bde4eb589/header.bin
--------------------------------------------------------------------------------
/agentic_rag/db/22229130-76fd-4c4e-9e96-510bde4eb589/length.bin:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/agentic_rag/db/22229130-76fd-4c4e-9e96-510bde4eb589/link_lists.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/agentic_rag/db/22229130-76fd-4c4e-9e96-510bde4eb589/link_lists.bin
--------------------------------------------------------------------------------
/agentic_rag/db/chroma.sqlite3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/agentic_rag/db/chroma.sqlite3
--------------------------------------------------------------------------------
/agentic_rag/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | agno
3 | google-generativeai
4 | bs4
5 | duckduckgo-search
6 | qdrant-client
7 | pgvector
8 | psycopg[binary]
9 | pypdf
10 | sqlalchemy
11 | google-genai
--------------------------------------------------------------------------------
/agentic_rag/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | .web
4 | assets/external/
5 | *.db
6 |
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/README.md:
--------------------------------------------------------------------------------
1 | # AI Stock Analyst Agent using Gemini 2.0 Flash (exp)
2 |
3 | The **AI Stock Analyst Agent** leverages **Reflex**, **Agno** and **Gemini 2.0 Flash (exp)** to provide advanced financial analysis. It allows users to get comprehensive insights into stock market performance by analyzing individual stocks and their metrics. The app queries relevant data from stock sources, including historical prices, market analysis, and recommendations, to answer user queries about stocks and financial performance.
4 |
5 | ## Note
6 |
7 | Educational Purpose Only: This project is intended for educational purposes only to demonstrate the power of AI in stock analysis.
8 |
9 | ---
10 |
11 | ## Features
12 |
13 | - **Stock Analysis:** Analyze individual stocks, including key metrics like P/E ratio, market cap, EPS, and 52-week highs and lows.
14 | - **Watchlist Management:** Add or remove stocks from your personalized watchlist for easy monitoring.
15 | - **Gemini 2.0 Flash Integration:** Utilizes Google's Gemini 2.0 Flash for fast, accurate, and dynamic responses.
16 | - **Real-Time Market Data:** Get live stock data, analyst recommendations, and company news from reliable sources like Yahoo Finance.
17 | - **Custom Financial Reports:** In-depth analysis, including executive summaries, professional insights, and risk disclosures.
18 |
19 | ---
20 |
21 | ## Getting Started
22 |
23 | ### 1. Clone the Repository
24 | Clone the GitHub repository to your local machine:
25 | ```bash
26 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
27 | cd reflex-llm-examples/ai_stock_analyst_agent
28 | ```
29 |
30 | ### 2. Install Dependencies
31 | Install the required dependencies:
32 | ```bash
33 | pip install -r requirements.txt
34 | ```
35 |
36 | ### 3. Set Up Gemini API Key
37 | To use the Gemini 2.0 Flash model, you need a **Google API Key**. Follow these steps:
38 | Go to [Google AI Studio](https://aistudio.google.com/apikey), get your API Key, and set it as an environment variable:
39 | ```bash
40 | export GOOGLE_API_KEY="your-api-key-here"
41 | ```
42 |
43 | ### 4. Run the Reflex App
44 | Start the application:
45 | ```bash
46 | reflex run
47 | ```
48 |
49 | ---
50 |
51 | ## How It Works
52 |
53 | 1. **Stock Query:** Ask questions like "Analyze AAPL's performance" or any other stock symbol.
54 | 2. **Gemini 2.0 Flash:** The app generates a detailed report with metrics like the latest stock price, P/E ratio, market cap, analyst recommendations, and more.
55 | 3. **Real-Time Data:** The app integrates with Yahoo Finance and other tools to get real-time market insights.
56 | 4. **Watchlist:** Add stocks to your watchlist for easy monitoring and analysis over time.
57 |
58 | ---
59 |
60 | ## Why AI Stock Agent?
61 |
62 | - **Real-Time Data Access:** Provides live stock information, analyst insights, and historical data to give you a full picture of stock performance.
63 | - **Smart Financial Analysis:** The agent uses the power of Gemini 2.0 Flash and Yahoo Finance tools to give you comprehensive, accurate financial reports.
64 | - **User-Friendly:** Seamless user experience with easy stock addition/removal, and clear, actionable insights.
65 |
66 | ---
67 |
68 | ## Contributing
69 |
70 | We welcome contributions! Feel free to open issues or submit pull requests to improve the app.
71 |
72 | ---
73 |
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/ai_stock_analyst_agent/agent/__init__.py
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/agent/agent.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from typing import List
3 | from dataclasses import dataclass
4 | import asyncio
5 | from textwrap import dedent
6 |
7 | from agno.agent import Agent
8 | from agno.models.google import Gemini
9 | from agno.tools.yfinance import YFinanceTools
10 |
11 |
12 | # Data Models
13 | @dataclass
14 | class QA:
15 | """A question and answer pair."""
16 |
17 | question: str
18 | answer: str
19 |
20 |
21 | # Custom Loading Icon
22 | class LoadingIcon(rx.Component):
23 | """A custom loading icon component."""
24 |
25 | library = "react-loading-icons"
26 | tag = "SpinningCircles"
27 | stroke: rx.Var[str]
28 | stroke_opacity: rx.Var[str]
29 | fill: rx.Var[str]
30 | fill_opacity: rx.Var[str]
31 | stroke_width: rx.Var[str]
32 | speed: rx.Var[str]
33 | height: rx.Var[str]
34 |
35 | def get_event_triggers(self) -> dict:
36 | return {"on_change": lambda status: [status]}
37 |
38 |
39 | loading_icon = LoadingIcon.create
40 |
41 | # Styles
42 | message_style = dict(
43 | display="inline-block",
44 | padding="1em",
45 | border_radius="8px",
46 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
47 | )
48 |
49 | SIDEBAR_STYLE = dict(
50 | width="300px",
51 | height="100vh",
52 | position="fixed",
53 | left=0,
54 | top=0,
55 | padding="2em",
56 | background_color=rx.color("blue", 2),
57 | border_right=f"1px solid {rx.color('blue', 3)}",
58 | )
59 |
60 | STOCK_BUTTON_STYLE = dict(
61 | color=rx.color("blue", 12),
62 | bg="transparent",
63 | border=f"1px solid {rx.color('blue', 6)}",
64 | _hover={"bg": rx.color("blue", 3)},
65 | )
66 |
67 | REMOVE_ICON_STYLE = {
68 | "color": "gray.400",
69 | "cursor": "pointer",
70 | "_hover": {"bg": rx.color("red", 3)},
71 | "font_size": "lg",
72 | "font_weight": "bold",
73 | "margin_right": "2",
74 | }
75 |
76 |
77 | # Application State
78 | class State(rx.State):
79 | """The app state."""
80 |
81 | chats: List[List[QA]] = [[]]
82 | current_chat: int = 0
83 | processing: bool = False
84 | watchlist: List[str] = ["AAPL", "MSFT", "GOOGL", "AMZN", "META"]
85 |
86 | def _create_agent(self) -> Agent:
87 | """Create a fresh agent instance for each interaction"""
88 | return Agent(
89 | model=Gemini(id="gemini-2.0-flash-exp"),
90 | tools=[
91 | YFinanceTools(
92 | stock_price=True,
93 | analyst_recommendations=True,
94 | stock_fundamentals=True,
95 | historical_prices=True,
96 | company_info=True,
97 | company_news=True,
98 | )
99 | ],
100 | instructions=dedent("""\
101 | You are a seasoned Wall Street analyst with deep expertise in market analysis! 📊
102 |
103 | Follow these steps for comprehensive financial analysis:
104 | 1. Market Overview
105 | - Latest stock price
106 | - 52-week high and low
107 | 2. Financial Deep Dive
108 | - Key metrics (P/E, Market Cap, EPS)
109 | 3. Professional Insights
110 | - Analyst recommendations breakdown
111 | - Recent rating changes
112 |
113 | 4. Market Context
114 | - Industry trends and positioning
115 | - Competitive analysis
116 | - Market sentiment indicators
117 |
118 | Your reporting style:
119 | - Begin with an executive summary
120 | - Use tables for data presentation
121 | - Include clear section headers
122 | - Add emoji indicators for trends (📈 📉)
123 | - Highlight key insights with bullet points
124 | - Compare metrics to industry averages
125 | - Include technical term explanations
126 | - End with a forward-looking analysis
127 |
128 | Risk Disclosure:
129 | - Always highlight potential risk factors
130 | - Note market uncertainties
131 | - Mention relevant regulatory concerns
132 | """),
133 | add_datetime_to_instructions=True,
134 | show_tool_calls=True,
135 | markdown=True,
136 | )
137 |
138 | @rx.event(background=True)
139 | async def process_question(self, form_data: dict):
140 | """Process a financial analysis question"""
141 | if self.processing or not form_data.get("question"):
142 | return
143 |
144 | question = form_data["question"]
145 |
146 | async with self:
147 | self.processing = True
148 | self.chats[self.current_chat].append(QA(question=question, answer=""))
149 | yield
150 |
151 | try:
152 | agent = self._create_agent()
153 | response = agent.run(question, stream=True)
154 |
155 | async with self:
156 | answer_content = ""
157 | for chunk in response: # Process each chunk of the response
158 | if hasattr(
159 | chunk, "content"
160 | ): # Check if the chunk has a `content` attribute
161 | answer_content += chunk.content
162 | else:
163 | answer_content += str(chunk)
164 |
165 | # Update the UI with the latest chunk
166 | self.chats[self.current_chat][-1].answer = answer_content
167 | self.chats = self.chats
168 | yield
169 | asyncio.sleep(0.05)
170 |
171 | except Exception as e:
172 | answer_content = f"Error processing question: {str(e)}"
173 |
174 | async with self:
175 | self.chats[self.current_chat][-1].answer = answer_content
176 | self.chats = self.chats
177 | self.processing = False
178 | yield
179 |
180 | def add_to_watchlist(self, form_data: dict[str, str]):
181 | """Add a stock to the watchlist"""
182 | symbol = form_data.get("symbol", "").upper()
183 | if symbol and symbol not in self.watchlist:
184 | self.watchlist.append(symbol.upper())
185 |
186 | def remove_from_watchlist(self, symbol: str):
187 | """Remove a stock from the watchlist"""
188 | if symbol in self.watchlist:
189 | self.watchlist.remove(symbol)
190 |
191 | def create_new_chat(self):
192 | """Create a new chat"""
193 | self.chats.append([])
194 | self.current_chat = len(self.chats) - 1
195 |
196 |
197 | # UI Components
198 | def message(qa: QA) -> rx.Component:
199 | return rx.box(
200 | rx.box(
201 | rx.markdown(
202 | qa.question,
203 | background_color=rx.color("blue", 4),
204 | color=rx.color("blue", 12),
205 | **message_style,
206 | ),
207 | text_align="right",
208 | margin_top="1em",
209 | ),
210 | rx.box(
211 | rx.markdown(
212 | qa.answer,
213 | background_color=rx.color("green", 4),
214 | color=rx.color("green", 12),
215 | **message_style,
216 | ),
217 | text_align="left",
218 | padding_top="1em",
219 | ),
220 | width="100%",
221 | )
222 |
223 |
224 | def chat() -> rx.Component:
225 | return rx.vstack(
226 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
227 | py="8",
228 | flex="1",
229 | width="100%",
230 | max_width="50em",
231 | padding_x="4px",
232 | align_self="center",
233 | overflow_y="auto",
234 | padding_bottom="5em",
235 | )
236 |
237 |
238 | def action_bar() -> rx.Component:
239 | return rx.box(
240 | rx.vstack(
241 | rx.form(
242 | rx.hstack(
243 | rx.input(
244 | placeholder="Ask about any stock (e.g., 'Analyze AAPL's performance')",
245 | id="question",
246 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
247 | disabled=State.processing,
248 | border_color=rx.color("blue", 6),
249 | _focus={"border_color": rx.color("blue", 8)},
250 | background_color="transparent",
251 | ),
252 | rx.button(
253 | rx.cond(
254 | State.processing,
255 | loading_icon(height="1em"),
256 | rx.text("Analyze"),
257 | ),
258 | type_="submit",
259 | disabled=State.processing,
260 | bg=rx.color("green", 9),
261 | color="white",
262 | _hover={"bg": rx.color("green", 10)},
263 | ),
264 | align_items="center",
265 | spacing="3",
266 | ),
267 | on_submit=State.process_question,
268 | width="100%",
269 | reset_on_submit=True,
270 | ),
271 | align_items="center",
272 | width="100%",
273 | ),
274 | position="fixed",
275 | bottom="0",
276 | left="0",
277 | padding_x="350px",
278 | padding_y="16px",
279 | backdrop_filter="auto",
280 | backdrop_blur="lg",
281 | background_color=rx.color("mauve", 2),
282 | border_top=f"1px solid {rx.color('blue', 3)}",
283 | width="100%",
284 | )
285 |
286 |
287 | def sidebar() -> rx.Component:
288 | return rx.box(
289 | rx.vstack(
290 | rx.text(
291 | "Your Personal Market Analyst",
292 | color=rx.color("blue", 11),
293 | font_size="sm",
294 | margin_bottom="2em",
295 | ),
296 | rx.heading("Watchlist", size="4", margin_bottom="1em"),
297 | rx.foreach(
298 | State.watchlist,
299 | lambda symbol: rx.hstack(
300 | rx.text(
301 | "×", # Using × symbol as remove icon
302 | on_click=lambda: State.remove_from_watchlist(symbol),
303 | **REMOVE_ICON_STYLE,
304 | ),
305 | rx.text(symbol, font_size="sm"),
306 | rx.button(
307 | "Analyze",
308 | on_click=lambda: State.process_question(
309 | {"question": f"Analyze {symbol}'s performance"}
310 | ),
311 | size="2",
312 | **STOCK_BUTTON_STYLE,
313 | ),
314 | width="100%",
315 | justify_content="space-between",
316 | ),
317 | ),
318 | rx.form(
319 | rx.hstack(
320 | rx.input(
321 | placeholder="Add stock (e.g., AAPL)",
322 | id="symbol",
323 | size="2",
324 | ),
325 | rx.button(
326 | "Add",
327 | type_="submit",
328 | size="2",
329 | **STOCK_BUTTON_STYLE,
330 | ),
331 | ),
332 | on_submit=State.add_to_watchlist(),
333 | width="100%",
334 | margin_top="2em",
335 | ),
336 | align_items="stretch",
337 | height="100%",
338 | ),
339 | **SIDEBAR_STYLE,
340 | )
341 |
342 |
343 | def index() -> rx.Component:
344 | """The main app."""
345 | return rx.box(
346 | sidebar(),
347 | rx.box(
348 | rx.vstack(
349 | rx.hstack(
350 | rx.heading("📊 AI Finance Agent 📈", size="8"),
351 | rx.button(
352 | "New Chat",
353 | on_click=State.create_new_chat,
354 | margin_left="auto",
355 | ),
356 | ),
357 | chat(),
358 | action_bar(),
359 | spacing="4",
360 | align_items="center",
361 | height="100vh",
362 | padding="4em",
363 | ),
364 | margin_left="300px",
365 | width="calc(100% - 300px)",
366 | ),
367 | width="100%",
368 | height="100vh",
369 | background_color=rx.color("mauve", 1),
370 | )
371 |
372 |
373 | app = rx.App()
374 | app.add_page(index)
375 |
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | agno
3 | google-generativeai
4 | duckduckgo-search
5 | yfinance
6 | google-genai
--------------------------------------------------------------------------------
/ai_stock_analyst_agent/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="agent",
6 | )
7 |
--------------------------------------------------------------------------------
/browser_use_locally/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | assets/external/
3 | *.py[cod]
4 | *.db
5 | .web
6 |
--------------------------------------------------------------------------------
/browser_use_locally/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/browser_use_locally/README.md:
--------------------------------------------------------------------------------
1 | # Browser Use Task Automation
2 |
3 | Browser Use Task Automation is an Open Source Version of Operator released by Open AI that leverages **Browser Use** and **Ollama** to automate various browser-based tasks. The app allows you to input any task description and automates browsing, searching, summarizing, or any web-based interaction you require.
4 |
5 | ---
6 |
7 | ## Features
8 | - **Custom Task Input:** Enter any browser-based task description for automation.
9 | - **Task Automation:** Automate tasks like browsing, searching, summarizing, or interacting with the web.
10 | - **Flexible and Scalable:** Use the app for various tasks across different websites or categories.
11 |
12 | ---
13 |
14 | ## Getting Started
15 |
16 | ### 1. Clone the Repository
17 | Clone the GitHub repository to your local machine:
18 | ```bash
19 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
20 | cd reflex-llm-examples/browser_use_locally
21 | ```
22 |
23 | ### 2. Install Dependencies
24 | Install the required dependencies:
25 | ```bash
26 | pip install -r requirements.txt
27 | ```
28 |
29 | ### 3. Install Playwright
30 | Playwright is required for browser automation. Install it by running:
31 | ```bash
32 | python -m playwright install
33 | ```
34 |
35 | ### 4. Pull and Run DeepSeek-r1 Using Ollama
36 | Download and set up the **DeepSeek-r1** model locally via Ollama:
37 | ```bash
38 | ollama pull qwen2.5:latest
39 | ```
40 |
41 | ### 5. Run the Reflex App
42 | Run the application to start automating browser tasks:
43 | ```bash
44 | reflex run
45 | ```
46 |
47 | ---
48 |
49 | ## Usage
50 |
51 | Once the app is running, you can enter any browser task description (e.g., "Search for AI research papers on arXiv" or "Find the latest tech news") and click **Run Task**. The app will use **Browser Use** to execute the task as described and return the results or summaries.
52 |
53 | ---
54 |
55 | ## Example Task Description
56 |
57 | Here’s an example of a task you can provide:
58 |
59 | ```
60 | 1. Go to https://arxiv.org
61 | 2. Search for "Artificial Intelligence" or browse AI-related categories
62 | 3. Identify the latest papers (published in the last 7 days)
63 | 4. Summarize the title, authors, abstract, and publication date for each paper
64 | ```
65 |
66 | Or, feel free to create your own custom task descriptions for a wide range of automation needs!
67 | --
--------------------------------------------------------------------------------
/browser_use_locally/assets/chakra_color_mode_provider.js:
--------------------------------------------------------------------------------
1 | import { useColorMode as chakraUseColorMode } from "@chakra-ui/react";
2 | import { useTheme } from "next-themes";
3 | import { useEffect, useState } from "react";
4 | import { ColorModeContext, defaultColorMode } from "/utils/context.js";
5 |
6 | export default function ChakraColorModeProvider({ children }) {
7 | const { theme, resolvedTheme, setTheme } = useTheme();
8 | const { colorMode, toggleColorMode } = chakraUseColorMode();
9 | const [resolvedColorMode, setResolvedColorMode] = useState(colorMode);
10 |
11 | useEffect(() => {
12 | if (colorMode != resolvedTheme) {
13 | toggleColorMode();
14 | }
15 | setResolvedColorMode(resolvedTheme);
16 | }, [theme, resolvedTheme]);
17 |
18 | const rawColorMode = colorMode;
19 | const setColorMode = (mode) => {
20 | const allowedModes = ["light", "dark", "system"];
21 | if (!allowedModes.includes(mode)) {
22 | console.error(
23 | `Invalid color mode "${mode}". Defaulting to "${defaultColorMode}".`
24 | );
25 | mode = defaultColorMode;
26 | }
27 | setTheme(mode);
28 | };
29 | return (
30 |
33 | {children}
34 |
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/browser_use_locally/browser_agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/browser_use_locally/browser_agent/__init__.py
--------------------------------------------------------------------------------
/browser_use_locally/browser_agent/browser_agent.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from langchain_ollama import ChatOllama
3 | from browser_use import Agent
4 | import asyncio
5 |
6 |
7 | # Reflex App State
8 | class State(rx.State):
9 | task_description: str = ""
10 | output: str = ""
11 | is_loading: bool = False
12 |
13 | @rx.event(background=True)
14 | async def execute_task(self):
15 | """Run the browser task using Ollama and update the output."""
16 | async with self:
17 | self.is_loading = True
18 | self.output = ""
19 | yield
20 | await asyncio.sleep(1)
21 |
22 | result = await self.run_search()
23 | async with self:
24 | self.output = result.final_result()
25 | self.is_loading = False
26 | yield
27 |
28 | async def run_search(self) -> str:
29 | try:
30 | agent = Agent(
31 | task=self.task_description,
32 | llm=ChatOllama(
33 | model="qwen2.5:latest",
34 | num_ctx=32000, # Context length for the model
35 | ),
36 | max_actions_per_step=1,
37 | )
38 | result = await agent.run()
39 | return result
40 | except Exception as e:
41 | return f"Error: {str(e)}"
42 |
43 |
44 | # Reflex UI
45 | def index():
46 | return rx.container(
47 | rx.vstack(
48 | rx.heading("Open Source Operator using Browser Use 🔥", size="8"),
49 | rx.text("Enter your task description below:"),
50 | rx.text_area(
51 | placeholder="Task description...",
52 | value=State.task_description,
53 | on_change=State.set_task_description,
54 | width="100%",
55 | height="200px",
56 | resize="vertical",
57 | ),
58 | rx.button(
59 | "Run Task",
60 | on_click=State.execute_task,
61 | loading=State.is_loading,
62 | width="100%",
63 | ),
64 | rx.divider(),
65 | rx.cond(
66 | State.output != "",
67 | rx.box(
68 | rx.text("Output:", font_weight="bold"),
69 | rx.text(State.output, font_size="sm"),
70 | border="1px solid #e2e8f0",
71 | padding="1rem",
72 | width="100%",
73 | height="300px",
74 | overflow_y="auto",
75 | bg="gray.50",
76 | rounded="md",
77 | ),
78 | ),
79 | spacing="4",
80 | align="center",
81 | width="100%",
82 | max_width="800px",
83 | padding="2rem",
84 | )
85 | )
86 |
87 |
88 | # Run the Reflex App
89 | app = rx.App()
90 | app.add_page(index)
91 |
--------------------------------------------------------------------------------
/browser_use_locally/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | browser-use
3 | langchain-ollama==0.2.2
4 | ollama
5 | playwright
--------------------------------------------------------------------------------
/browser_use_locally/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | config = rx.Config(
4 | app_name="browser_agent", state_manager_mode=rx.constants.StateManagerMode.MEMORY
5 | )
6 |
--------------------------------------------------------------------------------
/browser_use_locally/uploaded_files/Attention is all you need.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/browser_use_locally/uploaded_files/Attention is all you need.pdf
--------------------------------------------------------------------------------
/chat_with_github/.gitignore:
--------------------------------------------------------------------------------
1 | *.db
2 | assets/external/
3 | .web
4 | __pycache__/
5 | *.py[cod]
6 |
--------------------------------------------------------------------------------
/chat_with_github/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chat_with_github/README.md:
--------------------------------------------------------------------------------
1 | # Chat with Github
2 |
3 | Chat with Github is an LLM app that utilizes **Retrieval Augmented Generation (RAG)** to enable meaningful interaction with Github Readme files. Powered by **Llama 3.2** running locally, the app provides accurate answers to your questions based on the Github Repo.
4 |
5 | ---
6 |
7 | ## Getting Started
8 |
9 | ### 1. Clone the Repository
10 | Clone the GitHub repository to your local machine:
11 | ```bash
12 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
13 | cd reflex-llm-examples/chat_with_github
14 | ```
15 |
16 | ### 2. Install Dependencies
17 | Install the required dependencies:
18 | ```bash
19 | pip install -r requirements.txt
20 | ```
21 |
22 | ### 3. Get your GitHub Access Token
23 | Get your GitHub [Personal Access Token](https://docs.github.com/en/enterprise-server@3.6/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token) with the necessary permissions and set it as environment variable to access any GitHub repository.
24 |
25 | ### 4. Pull and Run Llama 3.2 Using Ollama
26 | Download and set up the Llama 3.2 model locally:
27 | ```bash
28 | ollama pull llama3.2
29 | ```
30 |
31 | ### 5. Run the Reflex App
32 | Run the application to start chatting with your PDF:
33 | ```bash
34 | reflex run
35 | ```
36 |
--------------------------------------------------------------------------------
/chat_with_github/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/chat_with_github/chat/__init__.py
--------------------------------------------------------------------------------
/chat_with_github/chat/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from typing import List
3 | from dataclasses import dataclass
4 | import tempfile
5 | import asyncio
6 | import os
7 | from embedchain import App
8 | from embedchain.loaders.github import GithubLoader
9 |
10 | GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
11 |
12 | # Styles from the reference code
13 | message_style = dict(
14 | display="inline-block",
15 | padding="1em",
16 | border_radius="8px",
17 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
18 | )
19 |
20 |
21 | @dataclass
22 | class QA:
23 | """A question and answer pair."""
24 |
25 | question: str
26 | answer: str
27 |
28 |
29 | class LoadingIcon(rx.Component):
30 | """A custom loading icon component."""
31 |
32 | library = "react-loading-icons"
33 | tag = "SpinningCircles"
34 | stroke: rx.Var[str]
35 | stroke_opacity: rx.Var[str]
36 | fill: rx.Var[str]
37 | fill_opacity: rx.Var[str]
38 | stroke_width: rx.Var[str]
39 | speed: rx.Var[str]
40 | height: rx.Var[str]
41 |
42 | def get_event_triggers(self) -> dict:
43 | return {"on_change": lambda status: [status]}
44 |
45 |
46 | loading_icon = LoadingIcon.create
47 |
48 |
49 | class State(rx.State):
50 | """The app state."""
51 |
52 | chats: List[List[QA]] = [[]]
53 | current_chat: int = 0
54 | processing: bool = False
55 | db_path: str = tempfile.mkdtemp()
56 | upload_status: str = ""
57 | is_loading: bool = False
58 | repo: str = ""
59 |
60 | _app_instance = None
61 |
62 | def get_app(self):
63 | """Get or create the app instance."""
64 | if State._app_instance is None:
65 | State._app_instance = App.from_config(
66 | config={
67 | "llm": {
68 | "provider": "ollama",
69 | "config": {
70 | "model": "llama3:instruct",
71 | "max_tokens": 250,
72 | "temperature": 0.5,
73 | "stream": True,
74 | "base_url": "http://localhost:11434",
75 | },
76 | },
77 | "vectordb": {"provider": "chroma", "config": {"dir": self.db_path}},
78 | "embedder": {
79 | "provider": "ollama",
80 | "config": {
81 | "model": "llama3:instruct",
82 | "base_url": "http://localhost:11434",
83 | },
84 | },
85 | }
86 | )
87 | return State._app_instance
88 |
89 | def get_loader(self):
90 | return GithubLoader(config={"token": GITHUB_TOKEN})
91 |
92 | @rx.event(background=True)
93 | async def process_question(self, form_data: dict):
94 | """Process a question and update the chat."""
95 | if self.processing or not form_data.get("question"):
96 | return
97 |
98 | question = form_data["question"]
99 |
100 | async with self:
101 | self.processing = True
102 | self.chats[self.current_chat].append(QA(question=question, answer=""))
103 | yield
104 | await asyncio.sleep(1)
105 |
106 | app = self.get_app()
107 | answer = app.chat(question)
108 |
109 | async with self:
110 | self.chats[self.current_chat][-1].answer = answer
111 | self.processing = False
112 | self.chats = self.chats
113 | yield
114 | await asyncio.sleep(1)
115 |
116 | @rx.event(background=True)
117 | async def handle_repo_input(self):
118 | """Handle repository addition."""
119 | if self.repo == "":
120 | return
121 |
122 | async with self:
123 | self.is_loading = True
124 | yield
125 | await asyncio.sleep(1)
126 |
127 | try:
128 | app = self.get_app()
129 | loader = self.get_loader()
130 | app.add(f"repo:{self.repo} type:repo", data_type="github", loader=loader)
131 |
132 | async with self:
133 | self.upload_status = f"Added {self.repo} to knowledge base!"
134 | yield
135 | except Exception as e:
136 | async with self:
137 | self.upload_status = f"Error: {str(e)}"
138 | finally:
139 | async with self:
140 | self.processing = False
141 | self.is_loading = False
142 | yield
143 |
144 | def update_repo(self, repo: str):
145 | """Update the repo"""
146 | self.repo = repo
147 |
148 |
149 | def message(qa: QA) -> rx.Component:
150 | """A single question/answer message."""
151 | return rx.box(
152 | rx.box(
153 | rx.markdown(
154 | qa.question,
155 | background_color=rx.color("mauve", 4),
156 | color=rx.color("mauve", 12),
157 | **message_style,
158 | ),
159 | text_align="right",
160 | margin_top="1em",
161 | ),
162 | rx.box(
163 | rx.markdown(
164 | qa.answer,
165 | background_color=rx.color("accent", 4),
166 | color=rx.color("accent", 12),
167 | **message_style,
168 | ),
169 | text_align="left",
170 | padding_top="1em",
171 | ),
172 | width="100%",
173 | )
174 |
175 |
176 | def chat() -> rx.Component:
177 | """List all the messages in a conversation."""
178 | return rx.vstack(
179 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
180 | py="8",
181 | flex="1",
182 | width="100%",
183 | max_width="50em",
184 | padding_x="4px",
185 | align_self="center",
186 | overflow="hidden",
187 | padding_bottom="5em",
188 | )
189 |
190 |
191 | def action_bar() -> rx.Component:
192 | """The action bar to send a new message."""
193 | return rx.box(
194 | rx.vstack(
195 | rx.form(
196 | rx.hstack(
197 | rx.input(
198 | placeholder="Ask about the repository...",
199 | id="question",
200 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
201 | disabled=State.processing,
202 | border_color=rx.color("mauve", 6),
203 | _focus={"border_color": rx.color("mauve", 8)},
204 | background_color="transparent",
205 | ),
206 | rx.button(
207 | rx.cond(
208 | State.processing,
209 | loading_icon(height="1em"),
210 | rx.text("Send"),
211 | ),
212 | type_="submit",
213 | disabled=State.processing,
214 | variant="surface",
215 | cursor="pointer",
216 | ),
217 | align_items="center",
218 | spacing="3",
219 | ),
220 | on_submit=State.process_question,
221 | width="100%",
222 | reset_on_submit=True,
223 | ),
224 | align_items="center",
225 | width="100%",
226 | ),
227 | position="sticky",
228 | bottom="0",
229 | left="0",
230 | padding_y="16px",
231 | backdrop_filter="auto",
232 | backdrop_blur="lg",
233 | # border_top=f"1px solid {rx.color('mauve', 3)}",
234 | # background_color=rx.color("mauve", 2),
235 | width="100%",
236 | )
237 |
238 |
239 | def nav_icon(component: rx.Component) -> rx.badge:
240 | return rx.badge(
241 | component,
242 | color_scheme="gray",
243 | variant="soft",
244 | width="21px",
245 | height="21px",
246 | display="flex",
247 | align_items="center",
248 | justify_content="center",
249 | background="none",
250 | )
251 |
252 |
253 | theme = nav_icon(
254 | rx.el.button(
255 | rx.color_mode.icon(
256 | light_component=rx.icon(
257 | "moon",
258 | size=14,
259 | color=rx.color("slate", 12),
260 | ),
261 | dark_component=rx.icon(
262 | "sun",
263 | size=14,
264 | color=rx.color("slate", 12),
265 | ),
266 | ),
267 | on_click=rx.toggle_color_mode,
268 | ),
269 | )
270 |
271 |
272 | def index():
273 | return rx.vstack(
274 | rx.hstack(
275 | rx.hstack(
276 | rx.heading("Chat with GitHub Repository", size="2", weight="medium"),
277 | ),
278 | rx.hstack(
279 | theme,
280 | ),
281 | border_bottom=f"1px solid {rx.color('gray', 5)}",
282 | width="100%",
283 | height="3em",
284 | bg=rx.color("gray", 2),
285 | position="absolute",
286 | top="0",
287 | left="0",
288 | align="center",
289 | justify="between",
290 | padding="1em",
291 | ),
292 | rx.vstack(
293 | rx.vstack(
294 | rx.heading(
295 | "Chat with GitHub Repositories",
296 | size="7",
297 | weight="medium",
298 | align="center",
299 | width="100%",
300 | ),
301 | rx.heading(
302 | "Llama-3.2 running with Ollama",
303 | size="5",
304 | weight="medium",
305 | align="center",
306 | width="100%",
307 | color=rx.color("slate", 11),
308 | ),
309 | width="100%",
310 | spacing="1",
311 | ),
312 | rx.divider(height="2em", opacity="0"),
313 | rx.hstack(
314 | rx.text(
315 | "Github Repo", # Use last word of label as display text
316 | size="1",
317 | weight="bold",
318 | color=rx.color("slate", 10),
319 | width="120px",
320 | border_right="1px solid gray",
321 | margin_right="0.5em",
322 | ),
323 | rx.input(
324 | value=State.repo,
325 | on_change=State.update_repo,
326 | width="100%",
327 | variant="soft",
328 | bg="transparent",
329 | outline="none",
330 | ),
331 | align="center",
332 | width="100%",
333 | border_bottom=f"0.75px solid {rx.color('gray', 4)}",
334 | ),
335 | rx.divider(height="0.5em", opacity="0"),
336 | rx.button(
337 | "Process",
338 | on_click=State.handle_repo_input,
339 | loading=State.is_loading,
340 | width="100%",
341 | variant="surface",
342 | cursor="pointer",
343 | ),
344 | rx.divider(height="2em", opacity="0"),
345 | rx.vstack(
346 | rx.text(State.upload_status, size="1", align="center", width="100%"),
347 | chat(),
348 | action_bar(),
349 | width="100%",
350 | ),
351 | width="100%",
352 | max_width="30em",
353 | ),
354 | width="100%",
355 | height="100vh",
356 | align="center",
357 | justify="center",
358 | )
359 |
360 |
361 | app = rx.App()
362 | app.add_page(
363 | index,
364 | title="GitHub Repository Chat",
365 | description="Chat with GitHub repositories using AI",
366 | route="/",
367 | )
368 |
--------------------------------------------------------------------------------
/chat_with_github/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | embedchain
3 | ollama
--------------------------------------------------------------------------------
/chat_with_github/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/.github/workflows/repository_dispatch.yml:
--------------------------------------------------------------------------------
1 | name: reflex-chat-repository-dispatch
2 | on:
3 | push:
4 | branches: ['main']
5 | jobs:
6 | test:
7 | name: reflex-chat-repository-dispatch
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Dispatch
11 | uses: peter-evans/repository-dispatch@v3
12 | with:
13 | token: ${{ secrets.HOSTING_REPOSITORY_DISPATCH }}
14 | repository: reflex-dev/reflex-hosting
15 | event-type: push
16 | client-payload: '{"repo": "${{ github.repository }}", "sha": "${{ github.sha }}", "deployment-key": "chat"}'
17 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/.gitignore:
--------------------------------------------------------------------------------
1 | **/*.db
2 | **/*.ipynb
3 | **/*.pyc
4 | **/*.swp
5 | **/.DS_Store
6 | **/.web
7 | **/node_modules/**
8 | **/package-lock.json
9 | **/package.json
10 | *.db
11 | *.py[cod]
12 | .vscode
13 | .web
14 | __pycache__/
15 | assets/external/
16 | backend.zip
17 | bun.lockb
18 | dist/*
19 | frontend.zip
20 | poetry.lock
21 | venv/
22 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/README.md:
--------------------------------------------------------------------------------
1 | # Chat with PDF
2 |
3 | Chat with PDF is an LLM app that utilizes **Retrieval Augmented Generation (RAG)** to enable meaningful interaction with PDF files. Powered by **Llama 3.2** running locally, the app provides accurate answers to your questions based on the content of the uploaded PDF.
4 |
5 | ---
6 |
7 | ## Features
8 | - **Upload PDF Documents:** Easily upload any PDF document to start querying.
9 | - **Interactive Q&A:** Ask questions about the content of the uploaded PDF.
10 | - **Accurate Answers:** Get precise responses using RAG and the Llama 3.2 model.
11 |
12 | ---
13 |
14 | ## Getting Started
15 |
16 | ### 1. Clone the Repository
17 | Clone the GitHub repository to your local machine:
18 | ```bash
19 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
20 | cd reflex-llm-examples/chat_with_pdf_locally
21 | ```
22 |
23 | ### 2. Install Dependencies
24 | Install the required dependencies:
25 | ```bash
26 | pip install -r requirements.txt
27 | ```
28 |
29 | ### 3. Pull and Run Llama 3.2 Using Ollama
30 | Download and set up the Llama 3.2 model locally:
31 | ```bash
32 | ollama pull llama3.2
33 | ```
34 |
35 | ### 4. Run the Reflex App
36 | Run the application to start chatting with your PDF:
37 | ```bash
38 | reflex run
39 | ```
--------------------------------------------------------------------------------
/chat_with_pdf_locally/assets/chakra_color_mode_provider.js:
--------------------------------------------------------------------------------
1 | import { useColorMode as chakraUseColorMode } from "@chakra-ui/react";
2 | import { useTheme } from "next-themes";
3 | import { useEffect, useState } from "react";
4 | import { ColorModeContext, defaultColorMode } from "/utils/context.js";
5 |
6 | export default function ChakraColorModeProvider({ children }) {
7 | const { theme, resolvedTheme, setTheme } = useTheme();
8 | const { colorMode, toggleColorMode } = chakraUseColorMode();
9 | const [resolvedColorMode, setResolvedColorMode] = useState(colorMode);
10 |
11 | useEffect(() => {
12 | if (colorMode != resolvedTheme) {
13 | toggleColorMode();
14 | }
15 | setResolvedColorMode(resolvedTheme);
16 | }, [theme, resolvedTheme]);
17 |
18 | const rawColorMode = colorMode;
19 | const setColorMode = (mode) => {
20 | const allowedModes = ["light", "dark", "system"];
21 | if (!allowedModes.includes(mode)) {
22 | console.error(
23 | `Invalid color mode "${mode}". Defaulting to "${defaultColorMode}".`
24 | );
25 | mode = defaultColorMode;
26 | }
27 | setTheme(mode);
28 | };
29 | return (
30 |
33 | {children}
34 |
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/chat_with_pdf_locally/chat/__init__.py
--------------------------------------------------------------------------------
/chat_with_pdf_locally/chat/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from chat.components.chat import State, chat, action_bar, sidebar
3 |
4 |
5 | def index() -> rx.Component:
6 | """The main app."""
7 | return rx.box(
8 | sidebar(),
9 | rx.box(
10 | rx.vstack(
11 | rx.hstack(
12 | rx.heading("Chat with PDF using Llama 3.2 💬"),
13 | rx.button(
14 | "New Chat",
15 | on_click=State.create_new_chat,
16 | margin_left="auto",
17 | ),
18 | ),
19 | chat(),
20 | action_bar(),
21 | spacing="4",
22 | align_items="center",
23 | height="100vh",
24 | padding="4em",
25 | ),
26 | margin_left="300px",
27 | width="calc(100% - 300px)",
28 | ),
29 | width="100%",
30 | height="100vh",
31 | background_color=rx.color("mauve", 1),
32 | )
33 |
34 |
35 | app = rx.App()
36 | app.add_page(index)
37 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/chat/components/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from typing import List
3 | from dataclasses import dataclass
4 | import tempfile
5 | import base64
6 | import asyncio
7 |
8 | from embedchain import App
9 |
10 | # Styles
11 | message_style = dict(
12 | display="inline-block",
13 | padding="1em",
14 | border_radius="8px",
15 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
16 | )
17 |
18 | SIDEBAR_STYLE = dict(
19 | width="300px",
20 | height="100vh",
21 | position="fixed",
22 | left=0,
23 | top=0,
24 | padding="2em",
25 | background_color=rx.color("mauve", 2),
26 | border_right=f"1px solid {rx.color('mauve', 3)}",
27 | )
28 |
29 | UPLOAD_BUTTON_STYLE = dict(
30 | color=rx.color("mauve", 12),
31 | bg="transparent",
32 | border=f"1px solid {rx.color('mauve', 6)}",
33 | margin_y="1em",
34 | _hover={"bg": rx.color("mauve", 3)},
35 | )
36 |
37 |
38 | @dataclass
39 | class QA:
40 | """A question and answer pair."""
41 |
42 | question: str
43 | answer: str
44 |
45 |
46 | class LoadingIcon(rx.Component):
47 | """A custom loading icon component."""
48 |
49 | library = "react-loading-icons"
50 | tag = "SpinningCircles"
51 | stroke: rx.Var[str]
52 | stroke_opacity: rx.Var[str]
53 | fill: rx.Var[str]
54 | fill_opacity: rx.Var[str]
55 | stroke_width: rx.Var[str]
56 | speed: rx.Var[str]
57 | height: rx.Var[str]
58 |
59 | def get_event_triggers(self) -> dict:
60 | return {"on_change": lambda status: [status]}
61 |
62 |
63 | loading_icon = LoadingIcon.create
64 |
65 |
66 | class State(rx.State):
67 | """The app state."""
68 |
69 | chats: List[List[QA]] = [[]]
70 | base64_pdf: str = ""
71 | uploading: bool = False
72 | current_chat: int = 0
73 | processing: bool = False
74 | db_path: str = tempfile.mkdtemp()
75 | pdf_filename: str = ""
76 | knowledge_base_files: List[str] = []
77 | upload_status: str = ""
78 |
79 | def get_app(self):
80 | return App.from_config(
81 | config={
82 | "llm": {
83 | "provider": "ollama",
84 | "config": {
85 | "model": "llama3.2:latest",
86 | "max_tokens": 250,
87 | "temperature": 0.5,
88 | "stream": True,
89 | "base_url": "http://localhost:11434",
90 | },
91 | },
92 | "vectordb": {"provider": "chroma", "config": {"dir": self.db_path}},
93 | "embedder": {
94 | "provider": "ollama",
95 | "config": {
96 | "model": "llama3.2:latest",
97 | "base_url": "http://localhost:11434",
98 | },
99 | },
100 | }
101 | )
102 |
103 | @rx.event(background=True)
104 | async def process_question(self, form_data: dict):
105 | """Process a question and update the chat."""
106 | if self.processing or not form_data.get("question"):
107 | return
108 |
109 | question = form_data["question"]
110 |
111 | async with self:
112 | self.processing = True
113 | self.chats[self.current_chat].append(QA(question=question, answer=""))
114 | yield
115 | await asyncio.sleep(1)
116 |
117 | app = self.get_app()
118 | answer = app.chat(question)
119 |
120 | async with self:
121 | self.chats[self.current_chat][-1].answer = answer
122 | self.processing = False
123 | self.chats = self.chats
124 | yield
125 | await asyncio.sleep(1)
126 |
127 | async def handle_upload(self, files: List[rx.UploadFile]):
128 | """Handle file upload and processing."""
129 | if not files:
130 | self.upload_status = "No file uploaded!"
131 | return
132 | yield
133 |
134 | self.uploading = True
135 | yield
136 |
137 | file = files[0]
138 | upload_data = await file.read()
139 | outfile = rx.get_upload_dir() / file.filename
140 | self.pdf_filename = file.filename
141 |
142 | with outfile.open("wb") as file_object:
143 | file_object.write(upload_data)
144 |
145 | # Base64 encode the PDF content
146 | base64_pdf = base64.b64encode(upload_data).decode("utf-8")
147 |
148 | self.base64_pdf = base64_pdf
149 |
150 | app = self.get_app()
151 | app.add(str(outfile), data_type="pdf_file")
152 | self.knowledge_base_files.append(self.pdf_filename)
153 | self.upload_status = f"Added {self.pdf_filename} to knowledge base"
154 |
155 | self.uploading = False
156 | yield
157 |
158 | def create_new_chat(self):
159 | """Create a new chat."""
160 | self.chats.append([])
161 | self.current_chat = len(self.chats) - 1
162 |
163 |
164 | def pdf_preview() -> rx.Component:
165 | """PDF preview component."""
166 | return rx.box(
167 | rx.heading("PDF Preview", size="4", margin_bottom="1em"),
168 | rx.cond(
169 | State.base64_pdf != "",
170 | rx.html(
171 | f"""
172 |
178 | """
179 | ),
180 | rx.text("No PDF uploaded yet", color="red"),
181 | ),
182 | width="100%",
183 | margin_top="1em",
184 | border_radius="md",
185 | overflow="hidden",
186 | )
187 |
188 |
189 | def message(qa: QA) -> rx.Component:
190 | """A single question/answer message."""
191 | return rx.box(
192 | rx.box(
193 | rx.markdown(
194 | qa.question,
195 | background_color=rx.color("mauve", 4),
196 | color=rx.color("mauve", 12),
197 | **message_style,
198 | ),
199 | text_align="right",
200 | margin_top="1em",
201 | ),
202 | rx.box(
203 | rx.markdown(
204 | qa.answer,
205 | background_color=rx.color("accent", 4),
206 | color=rx.color("accent", 12),
207 | **message_style,
208 | ),
209 | text_align="left",
210 | padding_top="1em",
211 | ),
212 | width="100%",
213 | )
214 |
215 |
216 | def chat() -> rx.Component:
217 | """List all the messages in a conversation."""
218 | return rx.vstack(
219 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
220 | py="8",
221 | flex="1",
222 | width="100%",
223 | max_width="50em",
224 | padding_x="4px",
225 | align_self="center",
226 | overflow="hidden",
227 | padding_bottom="5em",
228 | )
229 |
230 |
231 | def action_bar() -> rx.Component:
232 | """The action bar to send a new message."""
233 | return rx.box(
234 | rx.vstack(
235 | rx.form(
236 | rx.hstack(
237 | rx.input(
238 | placeholder="Ask about the PDF...",
239 | id="question",
240 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
241 | disabled=State.processing,
242 | border_color=rx.color("mauve", 6),
243 | _focus={"border_color": rx.color("mauve", 8)},
244 | background_color="transparent",
245 | ),
246 | rx.button(
247 | rx.cond(
248 | State.processing,
249 | loading_icon(height="1em"),
250 | rx.text("Send"),
251 | ),
252 | type_="submit",
253 | disabled=State.processing,
254 | bg=rx.color("accent", 9),
255 | color="white",
256 | _hover={"bg": rx.color("accent", 10)},
257 | ),
258 | align_items="center",
259 | spacing="3",
260 | ),
261 | on_submit=State.process_question,
262 | width="100%",
263 | reset_on_submit=True,
264 | ),
265 | align_items="center",
266 | width="100%",
267 | ),
268 | position="sticky",
269 | bottom="0",
270 | left="0",
271 | padding_y="16px",
272 | backdrop_filter="auto",
273 | backdrop_blur="lg",
274 | border_top=f"1px solid {rx.color('mauve', 3)}",
275 | background_color=rx.color("mauve", 2),
276 | width="100%",
277 | )
278 |
279 |
280 | def sidebar() -> rx.Component:
281 | """The sidebar component."""
282 | return rx.box(
283 | rx.vstack(
284 | rx.heading("PDF Upload", size="6", margin_bottom="1em"),
285 | rx.upload(
286 | rx.vstack(
287 | rx.button(
288 | "Browse files",
289 | **UPLOAD_BUTTON_STYLE,
290 | ),
291 | rx.text(
292 | "Drag and drop PDF file here",
293 | font_size="sm",
294 | color=rx.color("mauve", 11),
295 | ),
296 | ),
297 | border=f"1px dashed {rx.color('mauve', 6)}",
298 | padding="2em",
299 | border_radius="md",
300 | accept={".pdf": "application/pdf"},
301 | max_files=1,
302 | multiple=False,
303 | ),
304 | rx.button(
305 | "Add to Knowledge Base",
306 | on_click=State.handle_upload(rx.upload_files()),
307 | loading=State.uploading,
308 | **UPLOAD_BUTTON_STYLE,
309 | ),
310 | rx.cond(
311 | State.pdf_filename != "",
312 | pdf_preview(),
313 | ),
314 | rx.foreach(
315 | State.knowledge_base_files,
316 | lambda file: rx.box(
317 | rx.text(file, font_size="sm"),
318 | padding="0.5em",
319 | border_radius="md",
320 | width="100%",
321 | ),
322 | ),
323 | rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
324 | align_items="stretch",
325 | height="100%",
326 | ),
327 | **SIDEBAR_STYLE,
328 | )
329 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | embedchain
3 | ollama
--------------------------------------------------------------------------------
/chat_with_pdf_locally/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------
/chat_with_pdf_locally/uploaded_files/Attention is all you need.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/chat_with_pdf_locally/uploaded_files/Attention is all you need.pdf
--------------------------------------------------------------------------------
/deepseek_r1_chatui/.github/workflows/repository_dispatch.yml:
--------------------------------------------------------------------------------
1 | name: reflex-chat-repository-dispatch
2 | on:
3 | push:
4 | branches: ['main']
5 | jobs:
6 | test:
7 | name: reflex-chat-repository-dispatch
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Dispatch
11 | uses: peter-evans/repository-dispatch@v3
12 | with:
13 | token: ${{ secrets.HOSTING_REPOSITORY_DISPATCH }}
14 | repository: reflex-dev/reflex-hosting
15 | event-type: push
16 | client-payload: '{"repo": "${{ github.repository }}", "sha": "${{ github.sha }}", "deployment-key": "chat"}'
17 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/.gitignore:
--------------------------------------------------------------------------------
1 | **/*.db
2 | **/*.ipynb
3 | **/*.pyc
4 | **/*.swp
5 | **/.DS_Store
6 | **/.web
7 | **/node_modules/**
8 | **/package-lock.json
9 | **/package.json
10 | *.db
11 | *.py[cod]
12 | .vscode
13 | .web
14 | __pycache__/
15 | assets/external/
16 | backend.zip
17 | bun.lockb
18 | dist/*
19 | frontend.zip
20 | poetry.lock
21 | venv/
22 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/README.md:
--------------------------------------------------------------------------------
1 | # Chat with DeepSeek-r1 Locally
2 |
3 | A user-friendly and highly customizable Python web app designed to demonstrate DeepSeek-r1 in a ChatGPT-style interface, running locally.
4 |
5 | ---
6 |
7 | ## Getting Started
8 |
9 | ### 1. Clone the Repository
10 | Clone the GitHub repository to your local machine:
11 | ```bash
12 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
13 | cd reflex-llm-examples/chat_with_deepseek_r1_locally/deepseek_r1_chatui
14 | ```
15 |
16 | ### 2. Install Dependencies
17 | Install the required dependencies:
18 | ```bash
19 | pip install -r requirements.txt
20 | ```
21 |
22 | ### 3. Pull and Run DeepSeek-r1 Using Ollama
23 | Download and set up the DeepSeek-r1 model locally:
24 | ```bash
25 | ollama pull deepseek-r1:1.5b
26 | ```
27 |
28 | ### 4. Run the Reflex App
29 | Run the application to start chatting with your PDF:
30 | ```bash
31 | reflex run
32 | ```
--------------------------------------------------------------------------------
/deepseek_r1_chatui/assets/deepseek_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/deepseek_r1_chatui/assets/deepseek_logo.png
--------------------------------------------------------------------------------
/deepseek_r1_chatui/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/deepseek_r1_chatui/assets/favicon.ico
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/deepseek_r1_chatui/chat/__init__.py
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/chat.py:
--------------------------------------------------------------------------------
1 | """The main Chat app."""
2 |
3 | import reflex as rx
4 |
5 | from chat.components import chat, navbar
6 |
7 |
8 | def index() -> rx.Component:
9 | """The main app."""
10 | return rx.vstack(
11 | navbar(),
12 | chat.chat(),
13 | chat.action_bar(),
14 | background_color=rx.color("mauve", 1),
15 | color=rx.color("mauve", 12),
16 | min_height="100vh",
17 | align_items="stretch",
18 | spacing="0",
19 | )
20 |
21 |
22 | # Add state and page to the app.
23 | app = rx.App(
24 | theme=rx.theme(
25 | appearance="dark",
26 | accent_color="violet",
27 | ),
28 | )
29 | app.add_page(index)
30 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .loading_icon import loading_icon as loading_icon
2 | from .navbar import navbar as navbar
3 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/components/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from chat.components import loading_icon
4 | from chat.state import QA, State
5 |
6 |
7 | message_style = dict(
8 | display="inline-block",
9 | padding="1em",
10 | border_radius="8px",
11 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
12 | )
13 |
14 |
15 | def message(qa: QA) -> rx.Component:
16 | """A single question/answer message.
17 |
18 | Args:
19 | qa: The question/answer pair.
20 |
21 | Returns:
22 | A component displaying the question/answer pair.
23 | """
24 | return rx.box(
25 | rx.box(
26 | rx.markdown(
27 | qa.question,
28 | background_color=rx.color("mauve", 4),
29 | color=rx.color("mauve", 12),
30 | **message_style,
31 | ),
32 | text_align="right",
33 | margin_top="1em",
34 | ),
35 | rx.box(
36 | rx.markdown(
37 | qa.answer,
38 | background_color=rx.color("accent", 4),
39 | color=rx.color("accent", 12),
40 | **message_style,
41 | ),
42 | text_align="left",
43 | padding_top="1em",
44 | ),
45 | width="100%",
46 | )
47 |
48 |
49 | def chat() -> rx.Component:
50 | """List all the messages in a single conversation."""
51 | return rx.vstack(
52 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
53 | py="8",
54 | flex="1",
55 | width="100%",
56 | max_width="50em",
57 | padding_x="4px",
58 | align_self="center",
59 | overflow="hidden",
60 | padding_bottom="5em",
61 | )
62 |
63 |
64 | def action_bar() -> rx.Component:
65 | """The action bar to send a new message."""
66 | return rx.box(
67 | rx.vstack(
68 | rx.form(
69 | rx.hstack(
70 | rx.input(
71 | placeholder="Type something...",
72 | id="question",
73 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
74 | disabled=State.processing,
75 | border_color=rx.color("mauve", 6),
76 | _focus={"border_color": rx.color("mauve", 8)},
77 | background_color="transparent",
78 | ),
79 | rx.button(
80 | rx.cond(
81 | State.processing,
82 | loading_icon(height="1em"),
83 | rx.text("Send"),
84 | ),
85 | type_="submit",
86 | disabled=State.processing,
87 | bg=rx.color("accent", 9),
88 | color="white",
89 | _hover={"bg": rx.color("accent", 10)},
90 | ),
91 | align_items="center",
92 | spacing="3",
93 | ),
94 | on_submit=State.process_question,
95 | width="100%",
96 | reset_on_submit=True,
97 | ),
98 | align_items="center",
99 | width="100%",
100 | ),
101 | position="sticky",
102 | bottom="0",
103 | left="0",
104 | padding_x="450px",
105 | padding_y="16px",
106 | backdrop_filter="auto",
107 | backdrop_blur="lg",
108 | border_top=f"1px solid {rx.color('mauve', 3)}",
109 | background_color=rx.color("mauve", 2),
110 | width="100%",
111 | )
112 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/components/loading_icon.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | class LoadingIcon(rx.Component):
5 | """A custom loading icon component."""
6 |
7 | library = "react-loading-icons"
8 | tag = "SpinningCircles"
9 | stroke: rx.Var[str]
10 | stroke_opacity: rx.Var[str]
11 | fill: rx.Var[str]
12 | fill_opacity: rx.Var[str]
13 | stroke_width: rx.Var[str]
14 | speed: rx.Var[str]
15 | height: rx.Var[str]
16 |
17 | def get_event_triggers(self) -> dict:
18 | return {"on_change": lambda status: [status]}
19 |
20 |
21 | loading_icon = LoadingIcon.create
22 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/components/modal.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from chat.state import State
3 |
4 |
5 | def modal() -> rx.Component:
6 | """A modal to create a new chat."""
7 | return rx.cond(
8 | State.modal_open,
9 | rx.box(
10 | # Modal overlay (background)
11 | rx.box(
12 | position="fixed",
13 | top="0",
14 | left="0",
15 | right="0",
16 | bottom="0",
17 | background_color="rgba(0, 0, 0, 0.6)",
18 | z_index="1000",
19 | ),
20 | # Modal content
21 | rx.vstack(
22 | # Modal header
23 | rx.hstack(
24 | rx.text("Create new chat", font_size="lg", color="white"),
25 | rx.button(
26 | "✕",
27 | on_click=State.toggle_modal,
28 | color="rgba(255, 255, 255, 0.8)",
29 | background_color="transparent",
30 | _hover={"color": "white"},
31 | cursor="pointer",
32 | padding="0",
33 | ),
34 | justify_content="space-between",
35 | align_items="center",
36 | width="100%",
37 | padding_x="4",
38 | padding_y="3",
39 | ),
40 | # Modal body
41 | rx.box(
42 | rx.input(
43 | placeholder="Type something...",
44 | on_blur=State.set_new_chat_name,
45 | background_color="#222",
46 | border_color="rgba(255, 255, 255, 0.2)",
47 | color="white",
48 | _placeholder={"color": "rgba(255, 255, 255, 0.7)"},
49 | padding="2",
50 | width="100%",
51 | border_radius="md",
52 | ),
53 | width="100%",
54 | padding_x="4",
55 | padding_y="2",
56 | ),
57 | # Modal footer
58 | rx.box(
59 | rx.button(
60 | "Create",
61 | background_color="#5535d4",
62 | color="white",
63 | padding_x="4",
64 | padding_y="2",
65 | border_radius="md",
66 | _hover={"background_color": "#4c2db3"},
67 | on_click=State.create_chat,
68 | ),
69 | width="100%",
70 | padding="4",
71 | display="flex",
72 | justify_content="flex-end",
73 | ),
74 | position="fixed",
75 | top="50%",
76 | left="50%",
77 | transform="translate(-50%, -50%)",
78 | width="90%",
79 | max_width="400px",
80 | background_color="#222",
81 | border_radius="md",
82 | z_index="1001",
83 | spacing="0",
84 | ),
85 | width="100vw",
86 | height="100vh",
87 | position="fixed",
88 | top="0",
89 | left="0",
90 | z_index="1000",
91 | ),
92 | )
93 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/components/navbar.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from chat.state import State
3 |
4 |
5 | def sidebar_chat(chat: str) -> rx.Component:
6 | """A sidebar chat item.
7 |
8 | Args:
9 | chat: The chat item.
10 | """
11 | return rx.drawer.close(
12 | rx.hstack(
13 | rx.button(
14 | chat,
15 | on_click=lambda: State.set_chat(chat),
16 | width="80%",
17 | variant="surface",
18 | ),
19 | rx.button(
20 | rx.icon(
21 | tag="trash",
22 | on_click=State.delete_chat,
23 | stroke_width=1,
24 | ),
25 | width="20%",
26 | variant="surface",
27 | color_scheme="red",
28 | ),
29 | width="100%",
30 | )
31 | )
32 |
33 |
34 | def sidebar(trigger) -> rx.Component:
35 | """The sidebar component."""
36 | return rx.drawer.root(
37 | rx.drawer.trigger(trigger),
38 | rx.drawer.overlay(),
39 | rx.drawer.portal(
40 | rx.drawer.content(
41 | rx.vstack(
42 | rx.heading("Chats", color=rx.color("mauve", 11)),
43 | rx.divider(),
44 | rx.foreach(State.chat_titles, lambda chat: sidebar_chat(chat)),
45 | align_items="stretch",
46 | width="100%",
47 | ),
48 | top="auto",
49 | right="auto",
50 | height="100%",
51 | width="20em",
52 | padding="2em",
53 | background_color=rx.color("mauve", 2),
54 | outline="none",
55 | )
56 | ),
57 | direction="left",
58 | )
59 |
60 |
61 | def modal(trigger) -> rx.Component:
62 | """A modal to create a new chat."""
63 | return rx.dialog.root(
64 | rx.dialog.trigger(trigger),
65 | rx.dialog.content(
66 | rx.hstack(
67 | rx.input(
68 | placeholder="Type something...",
69 | on_blur=State.set_new_chat_name,
70 | width=["15em", "20em", "30em", "30em", "30em", "30em"],
71 | ),
72 | rx.dialog.close(
73 | rx.button(
74 | "Create chat",
75 | on_click=State.create_chat,
76 | ),
77 | ),
78 | background_color=rx.color("mauve", 1),
79 | spacing="2",
80 | width="100%",
81 | ),
82 | ),
83 | )
84 |
85 |
86 | def navbar():
87 | return rx.box(
88 | rx.hstack(
89 | rx.hstack(
90 | rx.avatar(fallback="R1", size="4", variant="solid"),
91 | rx.heading("Chat with DeepSeek-r1 Locally"),
92 | rx.desktop_only(
93 | rx.badge(
94 | State.current_chat,
95 | rx.tooltip(
96 | rx.icon("info", size=14),
97 | content="The current selected chat.",
98 | ),
99 | variant="soft",
100 | )
101 | ),
102 | align_items="center",
103 | ),
104 | rx.hstack(
105 | modal(rx.button("+ New chat")),
106 | sidebar(
107 | rx.button(
108 | rx.icon(
109 | tag="messages-square",
110 | color=rx.color("mauve", 12),
111 | ),
112 | background_color=rx.color("mauve", 6),
113 | )
114 | ),
115 | align_items="center",
116 | ),
117 | justify_content="space-between",
118 | align_items="center",
119 | ),
120 | backdrop_filter="auto",
121 | backdrop_blur="lg",
122 | padding="12px",
123 | border_bottom=f"1px solid {rx.color('mauve', 3)}",
124 | background_color=rx.color("mauve", 2),
125 | position="sticky",
126 | top="0",
127 | z_index="100",
128 | align_items="center",
129 | )
130 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/chat/state.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import AsyncGenerator
3 | import reflex as rx
4 | from ollama import AsyncClient
5 | from langchain.prompts import PromptTemplate
6 |
7 | ollama_client = AsyncClient()
8 |
9 |
10 | class QA(rx.Base):
11 | """A question and answer pair."""
12 |
13 | question: str
14 | answer: str
15 |
16 |
17 | DEFAULT_CHATS = {
18 | "Intros": [],
19 | }
20 |
21 |
22 | class State(rx.State):
23 | """The app state."""
24 |
25 | chats: dict[str, list[QA]] = DEFAULT_CHATS
26 | current_chat: str = "Intros"
27 | question: str = ""
28 | processing: bool = False
29 | new_chat_name: str = ""
30 |
31 | def create_chat(self):
32 | """Create a new chat."""
33 | if self.new_chat_name.strip():
34 | self.current_chat = self.new_chat_name
35 | self.chats[self.new_chat_name] = []
36 | self.new_chat_name = ""
37 |
38 | def delete_chat(self):
39 | """Delete the current chat."""
40 | del self.chats[self.current_chat]
41 | if len(self.chats) == 0:
42 | self.chats = DEFAULT_CHATS
43 | self.current_chat = list(self.chats.keys())[0]
44 |
45 | def set_chat(self, chat_name: str):
46 | """Set the name of the current chat."""
47 | self.current_chat = chat_name
48 |
49 | @rx.var(cache=True)
50 | def chat_titles(self) -> list[str]:
51 | """Get the list of chat titles."""
52 | return list(self.chats.keys())
53 |
54 | def _get_chat_history(self) -> str:
55 | """Get formatted chat history for the current chat."""
56 | history = []
57 | for qa in self.chats[self.current_chat][:-1]: # Exclude the current question
58 | history.extend([f"Human: {qa.question}", f"Assistant: {qa.answer}"])
59 | return "\n".join(history)
60 |
61 | @rx.event(background=True)
62 | async def process_question(self, form_data: dict[str, str]) -> AsyncGenerator:
63 | """Process a question and get streaming response from Ollama."""
64 | # Get and validate question
65 | question = form_data.get("question", "").strip()
66 | if not question:
67 | return
68 |
69 | # Add the question to the list of questions
70 | async with self:
71 | qa = QA(question=question, answer="")
72 | self.chats[self.current_chat].append(qa)
73 | self.processing = True
74 | yield
75 | await asyncio.sleep(0.1)
76 |
77 | try:
78 | # Create prompt template
79 | prompt_template = PromptTemplate(
80 | input_variables=["chat_history", "question"],
81 | template="""You are a helpful AI assistant. Use the following chat history and question to provide a helpful response:
82 |
83 | Chat History:
84 | {chat_history}
85 |
86 | Current Question: {question}
87 |
88 | Please provide a detailed and helpful response.""",
89 | )
90 |
91 | # Generate prompt with chat history
92 | prompt = prompt_template.format(
93 | chat_history=self._get_chat_history(), question=question
94 | )
95 |
96 | # Stream response from Ollama
97 | async for chunk in await ollama_client.chat(
98 | model="deepseek-r1:1.5b",
99 | messages=[{"role": "user", "content": prompt}],
100 | stream=True,
101 | ):
102 | async with self:
103 | if "message" in chunk and "content" in chunk["message"]:
104 | self.chats[self.current_chat][-1].answer += chunk["message"][
105 | "content"
106 | ]
107 | self.chats = self.chats
108 | yield
109 | await asyncio.sleep(0.05)
110 |
111 | except Exception as e:
112 | async with self:
113 | self.chats[self.current_chat][-1].answer = f"Error: {str(e)}"
114 | self.chats = self.chats
115 |
116 | finally:
117 | async with self:
118 | self.processing = False
119 | yield
120 |
--------------------------------------------------------------------------------
/deepseek_r1_chatui/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex>=0.7.11
2 | langchain
3 | ollama
--------------------------------------------------------------------------------
/deepseek_r1_chatui/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.db
3 | .web
4 | *.py[cod]
5 | assets/external/
6 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/README.md:
--------------------------------------------------------------------------------
1 | # Chat with PDF
2 |
3 | Chat with PDF is an LLM app that utilizes **Retrieval Augmented Generation (RAG)** to enable meaningful interaction with PDF files. Powered by **DeepSeek-r1** running locally, the app provides accurate answers to your questions based on the content of the uploaded PDF.
4 |
5 | ---
6 |
7 | ## Features
8 | - **Upload PDF Documents:** Easily upload any PDF document to start querying.
9 | - **Interactive Q&A:** Ask questions about the content of the uploaded PDF.
10 | - **Accurate Answers:** Get precise responses using RAG and the DeepSeek-r1 model.
11 |
12 | ---
13 |
14 | ## Getting Started
15 |
16 | ### 1. Clone the Repository
17 | Clone the GitHub repository to your local machine:
18 | ```bash
19 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
20 | cd reflex-llm-examples/chat_with_deepseek_r1_locally/deepseek_r1_rag
21 | ```
22 |
23 | ### 2. Install Dependencies
24 | Install the required dependencies:
25 | ```bash
26 | pip install -r requirements.txt
27 | ```
28 |
29 | ### 3. Pull and Run DeepSeek-r1 Using Ollama
30 | Download and set up the DeepSeek-r1 model locally:
31 | ```bash
32 | ollama pull deepseek-r1:1.5b
33 | ```
34 |
35 | ### 4. Run the Reflex App
36 | Run the application to start chatting with your PDF:
37 | ```bash
38 | reflex run
39 | ```
40 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/assets/chakra_color_mode_provider.js:
--------------------------------------------------------------------------------
1 | import { useColorMode as chakraUseColorMode } from "@chakra-ui/react";
2 | import { useTheme } from "next-themes";
3 | import { useEffect, useState } from "react";
4 | import { ColorModeContext, defaultColorMode } from "/utils/context.js";
5 |
6 | export default function ChakraColorModeProvider({ children }) {
7 | const { theme, resolvedTheme, setTheme } = useTheme();
8 | const { colorMode, toggleColorMode } = chakraUseColorMode();
9 | const [resolvedColorMode, setResolvedColorMode] = useState(colorMode);
10 |
11 | useEffect(() => {
12 | if (colorMode != resolvedTheme) {
13 | toggleColorMode();
14 | }
15 | setResolvedColorMode(resolvedTheme);
16 | }, [theme, resolvedTheme]);
17 |
18 | const rawColorMode = colorMode;
19 | const setColorMode = (mode) => {
20 | const allowedModes = ["light", "dark", "system"];
21 | if (!allowedModes.includes(mode)) {
22 | console.error(
23 | `Invalid color mode "${mode}". Defaulting to "${defaultColorMode}".`
24 | );
25 | mode = defaultColorMode;
26 | }
27 | setTheme(mode);
28 | };
29 | return (
30 |
33 | {children}
34 |
35 | );
36 | }
37 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/deepseek_r1_rag/chat/__init__.py
--------------------------------------------------------------------------------
/deepseek_r1_rag/chat/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from chat.components.chat import State, chat, action_bar, sidebar
3 |
4 |
5 | def index() -> rx.Component:
6 | """The main app."""
7 | return rx.box(
8 | sidebar(),
9 | rx.box(
10 | rx.vstack(
11 | rx.hstack(
12 | rx.heading("Chat with DeepSeek-R1 💬"),
13 | rx.button(
14 | "New Chat",
15 | on_click=State.create_new_chat,
16 | margin_left="auto",
17 | ),
18 | ),
19 | chat(),
20 | action_bar(),
21 | spacing="4",
22 | align_items="center",
23 | height="100vh",
24 | padding="4em",
25 | ),
26 | margin_left="300px",
27 | width="calc(100% - 300px)",
28 | ),
29 | width="100%",
30 | height="100vh",
31 | background_color=rx.color("mauve", 1),
32 | )
33 |
34 |
35 | app = rx.App()
36 | app.add_page(index)
37 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/chat/components/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from typing import List
3 | from dataclasses import dataclass
4 | import tempfile
5 | import base64
6 | from pathlib import Path
7 | import asyncio
8 |
9 | from llama_index.core import VectorStoreIndex, Settings, SimpleDirectoryReader
10 | from llama_index.llms.ollama import Ollama
11 | from llama_index.core import PromptTemplate
12 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding
13 |
14 | # Styles remain the same
15 | message_style = dict(
16 | display="inline-block",
17 | padding="1em",
18 | border_radius="8px",
19 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
20 | )
21 |
22 | SIDEBAR_STYLE = dict(
23 | width="300px",
24 | height="100vh",
25 | position="fixed",
26 | left=0,
27 | top=0,
28 | padding="2em",
29 | background_color=rx.color("mauve", 2),
30 | border_right=f"1px solid {rx.color('mauve', 3)}",
31 | )
32 |
33 | UPLOAD_BUTTON_STYLE = dict(
34 | color=rx.color("mauve", 12),
35 | bg="transparent",
36 | border=f"1px solid {rx.color('mauve', 6)}",
37 | margin_y="1em",
38 | _hover={"bg": rx.color("mauve", 3)},
39 | )
40 |
41 |
42 | @dataclass
43 | class QA:
44 | """A question and answer pair."""
45 |
46 | question: str
47 | answer: str
48 |
49 |
50 | class LoadingIcon(rx.Component):
51 | """A custom loading icon component."""
52 |
53 | library = "react-loading-icons"
54 | tag = "SpinningCircles"
55 | stroke: rx.Var[str]
56 | stroke_opacity: rx.Var[str]
57 | fill: rx.Var[str]
58 | fill_opacity: rx.Var[str]
59 | stroke_width: rx.Var[str]
60 | speed: rx.Var[str]
61 | height: rx.Var[str]
62 |
63 | def get_event_triggers(self) -> dict:
64 | return {"on_change": lambda status: [status]}
65 |
66 |
67 | loading_icon = LoadingIcon.create
68 |
69 |
70 | class State(rx.State):
71 | """The app state."""
72 |
73 | chats: List[List[QA]] = [[]]
74 | base64_pdf: str = ""
75 | uploading: bool = False
76 | current_chat: int = 0
77 | processing: bool = False
78 | db_path: str = tempfile.mkdtemp()
79 | pdf_filename: str = ""
80 | knowledge_base_files: List[str] = []
81 | upload_status: str = ""
82 |
83 | _query_engine = None
84 | _temp_dir = None
85 |
86 | def setup_llamaindex(self):
87 | """Setup LlamaIndex with models and prompt template."""
88 | if self._query_engine is None and self._temp_dir:
89 | # Setup LLM
90 | llm = Ollama(model="deepseek-r1:1.5b", request_timeout=120.0)
91 |
92 | # Setup embedding model
93 | embed_model = HuggingFaceEmbedding(
94 | model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True
95 | )
96 |
97 | # Configure settings
98 | Settings.embed_model = embed_model
99 | Settings.llm = llm
100 |
101 | # Load documents
102 | loader = SimpleDirectoryReader(
103 | input_dir=self._temp_dir, required_exts=[".pdf"], recursive=True
104 | )
105 | docs = loader.load_data()
106 |
107 | # Create index and query engine
108 | index = VectorStoreIndex.from_documents(docs, show_progress=True)
109 |
110 | # Setup streaming query engine with custom prompt
111 | qa_prompt_tmpl_str = (
112 | "Context information is below.\n"
113 | "---------------------\n"
114 | "{context_str}\n"
115 | "---------------------\n"
116 | "Given the context information above I want you to think step by step to answer the query in a crisp manner, incase case you don't know the answer say 'I don't know!'.\n"
117 | "Query: {query_str}\n"
118 | "Answer: "
119 | )
120 | qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
121 |
122 | self._query_engine = index.as_query_engine(streaming=True)
123 | self._query_engine.update_prompts(
124 | {"response_synthesizer:text_qa_template": qa_prompt_tmpl}
125 | )
126 |
127 | @rx.event(background=True)
128 | async def process_question(self, form_data: dict):
129 | """Process a question and update the chat."""
130 | if self.processing or not form_data.get("question") or not self._query_engine:
131 | return
132 |
133 | question = form_data["question"]
134 |
135 | async with self:
136 | self.processing = True
137 | self.chats[self.current_chat].append(QA(question=question, answer=""))
138 | yield
139 | await asyncio.sleep(0.1)
140 |
141 | # Get streaming response from LlamaIndex
142 | streaming_response = self._query_engine.query(question)
143 | answer = ""
144 |
145 | # Process the streaming response
146 | async with self:
147 | for chunk in streaming_response.response_gen:
148 | answer += chunk
149 | self.chats[self.current_chat][-1].answer = answer
150 | self.chats = self.chats
151 | yield
152 | await asyncio.sleep(0.05)
153 |
154 | self.processing = False
155 | yield
156 |
157 | async def handle_upload(self, files: List[rx.UploadFile]):
158 | """Handle file upload and processing."""
159 | if not files:
160 | self.upload_status = "No file uploaded!"
161 | return
162 | yield
163 |
164 | self.uploading = True
165 | yield
166 |
167 | file = files[0]
168 | upload_data = await file.read()
169 |
170 | # Create temporary directory if not exists
171 | if self._temp_dir is None:
172 | self._temp_dir = tempfile.mkdtemp()
173 |
174 | outfile = Path(self._temp_dir) / file.filename
175 | self.pdf_filename = file.filename
176 |
177 | with outfile.open("wb") as file_object:
178 | file_object.write(upload_data)
179 |
180 | # Base64 encode the PDF content
181 | base64_pdf = base64.b64encode(upload_data).decode("utf-8")
182 | self.base64_pdf = base64_pdf
183 |
184 | # Setup LlamaIndex
185 | self.setup_llamaindex()
186 |
187 | self.knowledge_base_files.append(self.pdf_filename)
188 | self.upload_status = f"Added {self.pdf_filename} to knowledge base"
189 |
190 | self.uploading = False
191 | yield
192 |
193 | def create_new_chat(self):
194 | """Create a new chat."""
195 | self.chats.append([])
196 | self.current_chat = len(self.chats) - 1
197 |
198 |
199 | def pdf_preview() -> rx.Component:
200 | """PDF preview component."""
201 | return rx.box(
202 | rx.heading("PDF Preview", size="4", margin_bottom="1em"),
203 | rx.cond(
204 | State.base64_pdf != "",
205 | rx.html(
206 | f"""
207 |
213 | """
214 | ),
215 | rx.text("No PDF uploaded yet", color="red"),
216 | ),
217 | width="100%",
218 | margin_top="1em",
219 | border_radius="md",
220 | overflow="hidden",
221 | )
222 |
223 |
224 | def message(qa: QA) -> rx.Component:
225 | """A single question/answer message."""
226 | return rx.box(
227 | rx.box(
228 | rx.markdown(
229 | qa.question,
230 | background_color=rx.color("mauve", 4),
231 | color=rx.color("mauve", 12),
232 | **message_style,
233 | ),
234 | text_align="right",
235 | margin_top="1em",
236 | ),
237 | rx.box(
238 | rx.markdown(
239 | qa.answer,
240 | background_color=rx.color("accent", 4),
241 | color=rx.color("accent", 12),
242 | **message_style,
243 | ),
244 | text_align="left",
245 | padding_top="1em",
246 | ),
247 | width="100%",
248 | )
249 |
250 |
251 | def chat() -> rx.Component:
252 | """List all the messages in a conversation."""
253 | return rx.vstack(
254 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
255 | py="8",
256 | flex="1",
257 | width="100%",
258 | max_width="50em",
259 | padding_x="4px",
260 | align_self="center",
261 | overflow_y="auto",
262 | padding_bottom="5em",
263 | )
264 |
265 |
266 | def action_bar() -> rx.Component:
267 | """The action bar to send a new message."""
268 | return rx.box(
269 | rx.vstack(
270 | rx.form(
271 | rx.hstack(
272 | rx.input(
273 | placeholder="Ask about the PDF...",
274 | id="question",
275 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
276 | disabled=State.processing,
277 | border_color=rx.color("mauve", 6),
278 | _focus={"border_color": rx.color("mauve", 8)},
279 | background_color="transparent",
280 | ),
281 | rx.button(
282 | rx.cond(
283 | State.processing,
284 | loading_icon(height="1em"),
285 | rx.text("Send"),
286 | ),
287 | type_="submit",
288 | disabled=State.processing,
289 | bg=rx.color("accent", 9),
290 | color="white",
291 | _hover={"bg": rx.color("accent", 10)},
292 | ),
293 | align_items="center",
294 | spacing="3",
295 | ),
296 | on_submit=State.process_question,
297 | width="100%",
298 | reset_on_submit=True,
299 | ),
300 | align_items="center",
301 | width="100%",
302 | ),
303 | position="sticky",
304 | bottom="0",
305 | left="0",
306 | padding_y="16px",
307 | backdrop_filter="auto",
308 | backdrop_blur="lg",
309 | border_top=f"1px solid {rx.color('mauve', 3)}",
310 | background_color=rx.color("mauve", 2),
311 | width="100%",
312 | )
313 |
314 |
315 | def sidebar() -> rx.Component:
316 | """The sidebar component."""
317 | return rx.box(
318 | rx.vstack(
319 | rx.heading("PDF Upload", size="6", margin_bottom="1em"),
320 | rx.upload(
321 | rx.vstack(
322 | rx.button(
323 | "Browse files",
324 | **UPLOAD_BUTTON_STYLE,
325 | ),
326 | rx.text(
327 | "Drag and drop PDF file here",
328 | font_size="sm",
329 | color=rx.color("mauve", 11),
330 | ),
331 | ),
332 | border=f"1px dashed {rx.color('mauve', 6)}",
333 | padding="2em",
334 | border_radius="md",
335 | accept={".pdf": "application/pdf"},
336 | max_files=1,
337 | multiple=False,
338 | ),
339 | rx.button(
340 | "Add to Knowledge Base",
341 | on_click=State.handle_upload(rx.upload_files()),
342 | loading=State.uploading,
343 | **UPLOAD_BUTTON_STYLE,
344 | ),
345 | rx.cond(
346 | State.pdf_filename != "",
347 | pdf_preview(),
348 | ),
349 | rx.foreach(
350 | State.knowledge_base_files,
351 | lambda file: rx.box(
352 | rx.text(file, font_size="sm"),
353 | padding="0.5em",
354 | border_radius="md",
355 | width="100%",
356 | ),
357 | ),
358 | rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
359 | align_items="stretch",
360 | height="100%",
361 | ),
362 | **SIDEBAR_STYLE,
363 | )
364 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex>=0.7.11
2 | ollama
3 | llama_index
4 | llama-index-embeddings-huggingface
5 | llama-index-llms-ollama
6 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------
/deepseek_r1_rag/uploaded_files/Attention is all you need.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/deepseek_r1_rag/uploaded_files/Attention is all you need.pdf
--------------------------------------------------------------------------------
/multi_modal_ai_agent/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/.DS_Store
--------------------------------------------------------------------------------
/multi_modal_ai_agent/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | __pycache__/
3 | *.db
4 | .web
5 | assets/external/
6 |
--------------------------------------------------------------------------------
/multi_modal_ai_agent/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/multi_modal_ai_agent/README.md:
--------------------------------------------------------------------------------
1 | # Multimodal AI Agent with Gemini and Reflex
2 |
3 | An intelligent video analysis agent that also performs web searches. This application, built with Reflex and Google's Gemini Flash 2.0, enables users to upload videos and ask questions about them. By combining advanced video analysis with web research capabilities, it provides comprehensive, context-aware responses.
4 |
5 | ## Features
6 | - **Video Upload**: Supports multiple formats, including MP4, MOV, and AVI.
7 | - **Real-Time Video Analysis**: Utilizes Google's Gemini Flash 2.0 model.
8 | - **Web Research Integration**: Powered by DuckDuckGo for enhanced context.
9 | - **Interactive Q&A System**: Allows dynamic interaction for tailored responses.
10 | - **Responsive UI**: Clean and user-friendly interface for seamless usage.
11 |
12 | ## Installation
13 |
14 | 1. **Clone the repository**:
15 | ```bash
16 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
17 | cd reflex-llm-examples/multi_modal_ai_agent
18 | ```
19 |
20 | 2. **Set up a virtual environment** (optional but recommended):
21 | ```bash
22 | python -m venv venv
23 | source venv/bin/activate # On Windows, use `venv\Scripts\activate`
24 | ```
25 |
26 | 3. **Install dependencies**:
27 | ```bash
28 | pip install -r requirements.txt
29 | ```
30 |
31 | 4. **Obtain the Google Gemini API Key**:
32 | - Sign up for a Google AI Studio account and generate an API key [here](https://aistudio.google.com/apikey).
33 | - Set up your API key as an environment variable:
34 | ```bash
35 | export GOOGLE_API_KEY=your_api_key_here
36 | ```
37 |
38 | 5. **Run the Reflex application**:
39 | Start the Reflex server with:
40 | ```bash
41 | reflex run
42 | ```
43 |
44 | ## Usage
45 |
46 | 1. **Upload a Video**: Use the drag-and-drop interface to upload your video.
47 | 2. **Ask a Question**: Enter your query about the video in the provided text area.
48 | 3. **Analyze & Research**: Click the "Analyze & Research" button to process the video and generate AI-driven insights.
49 | 4. **View Results**: Access detailed responses combining video analysis and web research.
--------------------------------------------------------------------------------
/multi_modal_ai_agent/__pycache__/rxconfig.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/__pycache__/rxconfig.cpython-310.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/__pycache__/rxconfig.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/__pycache__/rxconfig.cpython-311.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__init__.py
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/__init__.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/__init__.cpython-310.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/chat.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/chat.cpython-310.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/multi_modal_agent.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/multi_modal_agent.cpython-311.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/news_agent.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/news_agent.cpython-311.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/state.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/state.cpython-310.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/__pycache__/utils.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_ai_agent/multi_modal_agent/__pycache__/utils.cpython-311.pyc
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | import google.generativeai as genai
3 | import asyncio
4 |
5 |
6 | class State(rx.State):
7 | """State for the multimodal AI agent application."""
8 |
9 | processing: bool = False
10 | upload_status: str = ""
11 | result: str = ""
12 | video_filename: str = ""
13 | video: str = ""
14 | question: str = ""
15 |
16 | @rx.event
17 | async def handle_upload(self, files: list[rx.UploadFile]):
18 | """Handle video file upload."""
19 | if not files:
20 | self.upload_status = "Please select a video file."
21 | return
22 |
23 | try:
24 | file = files[0]
25 | upload_data = await file.read()
26 |
27 | filename = file.filename
28 | outfile = rx.get_upload_dir() / filename
29 |
30 | # Save the file
31 | with outfile.open("wb") as file_object:
32 | file_object.write(upload_data)
33 |
34 | self.video_filename = filename
35 | self.video = outfile
36 | self.upload_status = "Video uploaded successfully!"
37 |
38 | except Exception as e:
39 | self.upload_status = f"Error uploading video: {str(e)}"
40 |
41 | @rx.event(background=True)
42 | async def analyze_video(self):
43 | """Process video and answer question using AI."""
44 | if not self.question:
45 | async with self:
46 | self.result = "Please enter your question."
47 | return
48 |
49 | if not self.video:
50 | async with self:
51 | self.result = "Please upload a video first."
52 | return
53 |
54 | async with self:
55 | self.processing = True
56 | self.result = "Analyzing Video..."
57 | yield
58 | await asyncio.sleep(1)
59 |
60 | try:
61 | client = genai.Client()
62 |
63 | video_file = client.files.upload(file=str(self.video))
64 | while video_file.state == "PROCESSING":
65 | await asyncio.sleep(2)
66 | # time.sleep(2)
67 | video_file = client.files.get(name=video_file.name)
68 |
69 | response = client.models.generate_content(
70 | model="gemini-2.0-flash",
71 | contents=[
72 | video_file,
73 | "Describe this video.",
74 | ],
75 | )
76 |
77 | async with self:
78 | self.result = response.text
79 | self.processing = False
80 |
81 | except Exception as e:
82 | async with self:
83 | self.processing = False
84 | self.result = f"An error occurred: {str(e)}"
85 |
86 |
87 | def index() -> rx.Component:
88 | return rx.el.div(
89 | rx.el.div(
90 | # Header section with gradient background
91 | rx.el.div(
92 | rx.el.h1(
93 | "Multimodal AI Agent 🕵️♀️ 💬",
94 | class_name="text-5xl font-bold text-white mb-4",
95 | ),
96 | class_name="w-full p-12 bg-gradient-to-r from-blue-600 to-blue-800 rounded-lg shadow-lg mb-8 text-center",
97 | ),
98 | # Upload section
99 | rx.el.div(
100 | rx.upload(
101 | rx.el.div(
102 | rx.el.button(
103 | "Select a Video File",
104 | class_name="bg-white text-blue-600 px-6 py-3 rounded-lg font-semibold border-2 border-blue-600 hover:bg-blue-50 transition-colors",
105 | ),
106 | rx.el.p(
107 | "Drag and drop or click to select",
108 | class_name="text-gray-500 mt-2",
109 | ),
110 | class_name="text-center",
111 | ),
112 | accept={".mp4", ".mov", ".avi"},
113 | max_files=1,
114 | class_name="border-2 border-dashed border-gray-300 rounded-lg p-8 bg-gray-50 hover:bg-gray-100 transition-colors",
115 | id="upload1",
116 | ),
117 | rx.cond(
118 | rx.selected_files("upload1"),
119 | rx.el.p(
120 | rx.selected_files("upload1")[0], class_name="text-gray-600 mt-2"
121 | ),
122 | rx.el.p("", class_name="mt-2"),
123 | ),
124 | rx.el.button(
125 | "Upload",
126 | on_click=State.handle_upload(rx.upload_files(upload_id="upload1")),
127 | class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4",
128 | ),
129 | rx.el.p(State.upload_status, class_name="text-gray-600 mt-2"),
130 | class_name="mb-8 p-6 bg-white rounded-lg shadow-lg",
131 | ),
132 | # Video and Analysis section
133 | rx.cond(
134 | State.video_filename != "",
135 | rx.el.div(
136 | rx.el.div(
137 | rx.video(
138 | url=rx.get_upload_url(State.video_filename),
139 | controls=True,
140 | class_name="w-full rounded-lg shadow-lg",
141 | ),
142 | class_name="mb-6",
143 | ),
144 | rx.el.textarea(
145 | placeholder="Ask any question related to the video - the AI Agent will analyze it",
146 | value=State.question,
147 | on_change=State.set_question,
148 | class_name="w-full p-4 border-2 border-gray-300 rounded-lg focus:border-blue-600 focus:ring-1 focus:ring-blue-600 h-32 resize-none",
149 | ),
150 | rx.el.button(
151 | "Analyze & Research",
152 | on_click=State.analyze_video,
153 | loading=State.processing,
154 | class_name="w-full bg-blue-600 text-white px-6 py-3 rounded-lg font-semibold hover:bg-blue-700 transition-colors mt-4",
155 | ),
156 | rx.cond(
157 | State.result != "",
158 | rx.el.div(
159 | rx.el.h2(
160 | "🤖 Agent Response",
161 | class_name="text-2xl font-bold text-gray-800 mb-4",
162 | ),
163 | rx.markdown(
164 | State.result, class_name="prose prose-blue max-w-none"
165 | ),
166 | class_name="mt-8 p-6 bg-white rounded-lg shadow-lg",
167 | ),
168 | ),
169 | class_name="space-y-6",
170 | ),
171 | ),
172 | class_name="max-w-3xl mx-auto px-4",
173 | ),
174 | class_name="min-h-screen bg-gray-50 py-12",
175 | )
176 |
177 |
178 | app = rx.App()
179 | app.add_page(index)
180 |
--------------------------------------------------------------------------------
/multi_modal_ai_agent/multi_modal_agent/multi_modal_agent_agno.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | import google.generativeai as genai
3 | from agno.agent import Agent
4 | from agno.models.google import Gemini
5 | from agno.tools.duckduckgo import DuckDuckGoTools
6 | import asyncio
7 |
8 |
9 | class State(rx.State):
10 | """State for the multimodal AI agent application."""
11 |
12 | processing: bool = False
13 | upload_status: str = ""
14 | result: str = ""
15 | video_filename: str = ""
16 | video: str = ""
17 | question: str = ""
18 |
19 | async def handle_upload(self, files: list[rx.UploadFile]):
20 | """Handle video file upload."""
21 | if not files:
22 | return
23 |
24 | try:
25 | file = files[0]
26 | upload_data = await file.read()
27 |
28 | filename = file.filename
29 | outfile = rx.get_upload_dir() / filename
30 |
31 | # Save the file
32 | with outfile.open("wb") as file_object:
33 | file_object.write(upload_data)
34 |
35 | self.video_filename = filename
36 | self.video = outfile
37 | self.upload_status = "Video uploaded successfully!"
38 |
39 | except Exception as e:
40 | self.upload_status = f"Error uploading video: {str(e)}"
41 |
42 | @rx.event(background=True)
43 | async def analyze_video(self):
44 | """Process video and answer question using AI agent."""
45 | if not self.question:
46 | async with self:
47 | self.result = "Please enter your question."
48 | return
49 | async with self:
50 | self.processing = True
51 | yield
52 | await asyncio.sleep(1)
53 |
54 | try:
55 | agent = Agent(
56 | name="Multimodal Video Analyst",
57 | model=Gemini(id="gemini-2.0-flash-exp"),
58 | tools=[DuckDuckGoTools()],
59 | markdown=True,
60 | )
61 |
62 | video_file = genai.upload_file(str(self.video))
63 | while video_file.state.name == "PROCESSING":
64 | await asyncio.sleep(2)
65 | # time.sleep(2)
66 | video_file = genai.get_file(video_file.name)
67 |
68 | prompt = f"""
69 | First analyze this video and then answer the following question using both
70 | the video analysis and web research: {self.question}
71 | Provide a comprehensive response focusing on practical, actionable information.
72 | """
73 |
74 | result = agent.run(prompt, videos=[video_file])
75 |
76 | async with self:
77 | self.result = result.content
78 | self.processing = False
79 |
80 | except Exception as e:
81 | async with self:
82 | self.processing = False
83 | self.result = f"An error occurred: {str(e)}"
84 |
85 |
86 | color = "rgb(107,99,246)"
87 |
88 |
89 | def index():
90 | return rx.container(
91 | rx.vstack(
92 | # Header section
93 | rx.heading("Multimodal AI Agent 🕵️♀️ 💬", size="8", mb="6"),
94 | # Upload section
95 | rx.vstack(
96 | rx.upload(
97 | rx.vstack(
98 | rx.button(
99 | "Select a Video File",
100 | color=color,
101 | bg="white",
102 | border=f"1px solid {color}",
103 | ),
104 | rx.text("Drag and drop or click to select"),
105 | ),
106 | accept={".mp4", ".mov", ".avi"},
107 | max_files=1,
108 | border="1px dashed",
109 | padding="20px",
110 | id="upload1",
111 | ),
112 | rx.cond(
113 | rx.selected_files("upload1"),
114 | rx.text(rx.selected_files("upload1")[0]),
115 | rx.text(""),
116 | ),
117 | rx.button(
118 | "Upload",
119 | on_click=State.handle_upload(rx.upload_files(upload_id="upload1")),
120 | ),
121 | rx.text(State.upload_status),
122 | spacing="4",
123 | ),
124 | # Video and Analysis section
125 | rx.cond(
126 | State.video_filename != "",
127 | rx.vstack(
128 | rx.video(
129 | url=rx.get_upload_url(State.video_filename),
130 | width="50%",
131 | controls=True,
132 | ),
133 | rx.text_area(
134 | placeholder="Ask any question related to the video - the AI Agent will analyze it and search the web if needed",
135 | value=State.question,
136 | on_change=State.set_question,
137 | width="600px",
138 | size="2",
139 | ),
140 | rx.button(
141 | "Analyze & Research",
142 | on_click=State.analyze_video,
143 | loading=State.processing,
144 | ),
145 | rx.cond(
146 | State.result != "",
147 | rx.vstack(
148 | rx.heading("🤖 Agent Response", size="4"),
149 | rx.markdown(State.result),
150 | ),
151 | ),
152 | width="100%",
153 | spacing="4",
154 | ),
155 | ),
156 | width="100%",
157 | max_width="800px",
158 | spacing="6",
159 | padding="4",
160 | ),
161 | max_width="600px",
162 | margin="auto",
163 | padding="40px",
164 | )
165 |
166 |
167 | app = rx.App()
168 | app.add_page(index)
169 |
--------------------------------------------------------------------------------
/multi_modal_ai_agent/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | phidata
3 | google-generativeai
4 | duckduckgo-search
--------------------------------------------------------------------------------
/multi_modal_ai_agent/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | config = rx.Config(
4 | app_name="multi_modal_agent",
5 | )
6 |
--------------------------------------------------------------------------------
/multi_modal_medical_agent/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/multi_modal_medical_agent/README.md:
--------------------------------------------------------------------------------
1 | # AI Medical Agent using Gemini 2.0 Flash
2 |
3 | The **AI Medical Agent** leverages **Reflex**, **Agno**, and **Gemini 2.0 Flash** to provide detailed medical analysis on the provided images. It enables users to obtain comprehensive insights into medical conditions by analyzing images and simultaneously searching the web for additional information. The app generates detailed reports that assist in understanding possible diagnoses, conditions, and medical recommendations.
4 |
5 | ## Note
6 |
7 | **Educational Purpose Only:** This project is intended for educational purposes only to demonstrate the power of AI in medical image analysis. It is **not** a substitute for professional medical advice, diagnosis, or treatment.
8 |
9 | ---
10 |
11 | ## Features
12 |
13 | - **Medical Image Analysis:** Analyze images to detect potential medical conditions and provide insights based on AI-powered evaluation.
14 | - **Symptom & Condition Insights:** Extract information related to possible conditions based on image analysis and web data retrieval.
15 | - **Gemini 2.0 Flash Integration:** Utilizes Google's Gemini 2.0 Flash for fast, accurate, and dynamic responses.
16 | - **Web Search & Data Aggregation:** Cross-checks image analysis results with trusted medical sources for enhanced accuracy.
17 | - **Detailed Medical Reports:** Generates in-depth analysis, including professional insights, condition explanations, and potential next steps.
18 |
19 | ---
20 |
21 | ## Getting Started
22 |
23 | ### 1. Clone the Repository
24 | Clone the GitHub repository to your local machine:
25 | ```bash
26 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
27 | cd reflex-llm-examples/multi_modal_medical_agent
28 | ```
29 |
30 | ### 2. Install Dependencies
31 | Install the required dependencies:
32 | ```bash
33 | pip install -r requirements.txt
34 | ```
35 |
36 | ### 3. Set Up Gemini API Key
37 | To use the Gemini 2.0 Flash model, you need a **Google API Key**. Follow these steps:
38 | Go to [Google AI Studio](https://aistudio.google.com/apikey), get your API Key, and set it as an environment variable:
39 | ```bash
40 | export GOOGLE_API_KEY="your-api-key-here"
41 | ```
42 |
43 | ### 4. Run the Reflex App
44 | Start the application:
45 | ```bash
46 | reflex run
47 | ```
48 |
49 | ---
50 |
51 | ## How It Works
52 |
53 | 1. **Medical Image Upload:** Upload an image for analysis.
54 | 2. **Gemini 2.0 Flash Processing:** The app analyzes the image and cross-references web data to provide a detailed report.
55 | 3. **Condition Insights:** The report includes potential conditions, symptom explanations, and possible next steps.
56 | 4. **Trusted Sources:** The app retrieves data from verified medical sources to enhance accuracy.
57 |
58 | ---
59 |
60 | ## Why AI Medical Agent?
61 |
62 | - **AI-Powered Medical Insights:** Provides advanced image analysis with AI to assist in medical understanding.
63 | - **Real-Time Data Access:** Retrieves relevant medical information from trusted sources for enhanced accuracy.
64 | - **User-Friendly:** Simple and intuitive experience, enabling easy image uploads and report generation.
65 |
66 | ---
67 |
68 | ## Contributing
69 |
70 | We welcome contributions! Feel free to open issues or submit pull requests to improve the app.
71 |
72 | ---
73 |
74 |
--------------------------------------------------------------------------------
/multi_modal_medical_agent/agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_medical_agent/agent/__init__.py
--------------------------------------------------------------------------------
/multi_modal_medical_agent/agent/agent.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | import asyncio
3 | from phi.agent import Agent
4 | from phi.model.google import Gemini
5 | from phi.tools.duckduckgo import DuckDuckGo
6 | import os
7 | from PIL import Image
8 |
9 | # Set Google API Key from environment
10 | GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
11 |
12 |
13 | class MedicalState(rx.State):
14 | """State for the medical imaging analysis application."""
15 |
16 | processing: bool = False
17 | upload_status: str = ""
18 | analysis_result: str = ""
19 | image_filename: str = ""
20 | _temp_image_path: str = ""
21 |
22 | query = """
23 | You are a highly skilled medical imaging expert with extensive knowledge in radiology and diagnostic imaging. Analyze the patient's medical image and structure your response as follows:
24 |
25 | ### 1. Image Type & Region
26 | - Specify imaging modality (X-ray/MRI/CT/Ultrasound/etc.)
27 | - Identify the patient's anatomical region and positioning
28 | - Comment on image quality and technical adequacy
29 |
30 | ### 2. Key Findings
31 | - List primary observations systematically
32 | - Note any abnormalities in the patient's imaging with precise descriptions
33 | - Include measurements and densities where relevant
34 | - Describe location, size, shape, and characteristics
35 | - Rate severity: Normal/Mild/Moderate/Severe
36 |
37 | ### 3. Diagnostic Assessment
38 | - Provide primary diagnosis with confidence level
39 | - List differential diagnoses in order of likelihood
40 | - Support each diagnosis with observed evidence from the patient's imaging
41 | - Note any critical or urgent findings
42 |
43 | ### 4. Patient-Friendly Explanation
44 | - Explain the findings in simple, clear language that the patient can understand
45 | - Avoid medical jargon or provide clear definitions
46 | - Include visual analogies if helpful
47 | - Address common patient concerns related to these findings
48 |
49 | ### 5. Research Context
50 | IMPORTANT: Use the DuckDuckGo search tool to:
51 | - Find recent medical literature about similar cases
52 | - Search for standard treatment protocols
53 | - Provide a list of relevant medical links of them too
54 | - Research any relevant technological advances
55 | - Include 2-3 key references to support your analysis
56 |
57 | Format your response using clear markdown headers and bullet points. Be concise yet thorough.
58 | """
59 |
60 | @rx.event
61 | async def handle_upload(self, files: list[rx.UploadFile]):
62 | """Handle medical image upload."""
63 | if not files:
64 | return
65 |
66 | try:
67 | file = files[0]
68 | upload_data = await file.read()
69 |
70 | filename = file.filename
71 | outfile = rx.get_upload_dir() / filename
72 |
73 | # Save the file
74 | with outfile.open("wb") as file_object:
75 | file_object.write(upload_data)
76 |
77 | self.image_filename = filename
78 | self._temp_image_path = str(outfile)
79 | self.upload_status = "Image uploaded successfully!"
80 |
81 | except Exception as e:
82 | self.upload_status = f"Error uploading image: {str(e)}"
83 |
84 | @rx.var
85 | def medical_agent(self) -> Agent | None:
86 | if GOOGLE_API_KEY:
87 | return Agent(
88 | model=Gemini(api_key=GOOGLE_API_KEY, id="gemini-2.0-flash-exp"),
89 | tools=[DuckDuckGo()],
90 | markdown=True,
91 | )
92 | return None
93 |
94 | @rx.event(background=True)
95 | async def analyze_image(self):
96 | """Process image using medical AI agent."""
97 | if not self.medical_agent:
98 | self.analysis_result = "API Key not configured in environment"
99 | return
100 |
101 | async with self:
102 | self.processing = True
103 | self.analysis_result = ""
104 | yield
105 | await asyncio.sleep(1)
106 |
107 | try:
108 | # Process image
109 | with Image.open(self._temp_image_path) as img:
110 | width, height = img.size
111 | aspect_ratio = width / height
112 | new_width = 500
113 | new_height = int(new_width / aspect_ratio)
114 | resized_img = img.resize((new_width, new_height))
115 | resized_img.save(self._temp_image_path)
116 |
117 | # Run analysis
118 | result = self.medical_agent.run(self.query, images=[self._temp_image_path])
119 |
120 | async with self:
121 | self.analysis_result = result.content
122 | self.processing = False
123 |
124 | except Exception as e:
125 | async with self:
126 | self.processing = False
127 | self.analysis_result = f"An error occurred: {str(e)}"
128 | finally:
129 | if os.path.exists(self._temp_image_path):
130 | os.remove(self._temp_image_path)
131 |
132 |
133 | def medical_header() -> rx.Component:
134 | return rx.el.div(
135 | rx.el.div(
136 | rx.el.h1(
137 | "Medical Imaging Analysis Agent 🏥",
138 | class_name="text-3xl md:text-4xl font-bold text-transparent bg-clip-text bg-gradient-to-r from-blue-600 to-cyan-600",
139 | ),
140 | rx.el.p(
141 | "Advanced AI-powered medical image analysis using Gemini 2.0 Flash",
142 | class_name="text-gray-600 mt-2 text-lg",
143 | ),
144 | class_name="text-center space-y-2",
145 | ),
146 | class_name="w-full py-8 bg-gradient-to-r from-blue-50 to-cyan-50 border-b border-blue-100",
147 | )
148 |
149 |
150 | def upload_section() -> rx.Component:
151 | return rx.el.div(
152 | rx.el.div(
153 | rx.upload(
154 | rx.el.div(
155 | rx.el.div(
156 | rx.el.i(class_name="fas fa-upload text-3xl text-blue-500 mb-4"),
157 | rx.el.p(
158 | "Drop your medical image here",
159 | class_name="text-lg font-semibold text-gray-700 mb-2",
160 | ),
161 | rx.el.p(
162 | "or click to browse", class_name="text-sm text-gray-500"
163 | ),
164 | rx.el.p(
165 | "Supported formats: JPG, PNG",
166 | class_name="text-xs text-gray-400 mt-2",
167 | ),
168 | class_name="text-center",
169 | ),
170 | class_name="p-8 border-2 border-dashed border-blue-200 rounded-xl hover:border-blue-400 transition-colors duration-300",
171 | ),
172 | max_files=1,
173 | accept={".jpg", ".jpeg", ".png"},
174 | id="medical_upload",
175 | class_name="cursor-pointer",
176 | ),
177 | rx.cond(
178 | MedicalState.upload_status != "",
179 | rx.el.p(
180 | MedicalState.upload_status,
181 | class_name="mt-4 text-sm text-center text-blue-600",
182 | ),
183 | ),
184 | rx.el.button(
185 | "Upload Image",
186 | on_click=lambda: MedicalState.handle_upload(
187 | rx.upload_files(upload_id="medical_upload")
188 | ),
189 | class_name="mt-4 w-full py-2 px-4 bg-gradient-to-r from-blue-500 to-cyan-500 text-white rounded-lg hover:from-blue-600 hover:to-cyan-600 transition-all duration-300 shadow-md hover:shadow-lg",
190 | ),
191 | class_name="w-full max-w-md mx-auto",
192 | ),
193 | class_name="w-full bg-white p-6 rounded-xl shadow-md",
194 | )
195 |
196 |
197 | def analysis_section() -> rx.Component:
198 | return rx.el.div(
199 | rx.cond(
200 | MedicalState.image_filename != "",
201 | rx.el.div(
202 | rx.el.div(
203 | rx.el.img(
204 | src=rx.get_upload_url(MedicalState.image_filename),
205 | class_name="mx-auto my-4 max-w-2xl h-auto rounded-lg shadow-lg border border-gray-200",
206 | ),
207 | class_name="mb-6",
208 | ),
209 | rx.el.div(
210 | rx.cond(
211 | MedicalState.processing,
212 | rx.el.div(
213 | rx.el.div(
214 | class_name="w-8 h-8 border-4 border-blue-500 border-t-transparent rounded-full animate-spin"
215 | ),
216 | rx.el.p(
217 | "Analyzing image...",
218 | class_name="mt-2 text-sm text-gray-600",
219 | ),
220 | class_name="flex flex-col items-center justify-center p-4",
221 | ),
222 | rx.el.button(
223 | "Analyze Image",
224 | on_click=MedicalState.analyze_image,
225 | diabled=MedicalState.processing,
226 | class_name="w-full py-2 px-4 bg-gradient-to-r from-blue-500 to-cyan-500 text-white rounded-lg hover:from-blue-600 hover:to-cyan-600 transition-all duration-300 shadow-md hover:shadow-lg",
227 | ),
228 | ),
229 | ),
230 | rx.cond(
231 | MedicalState.analysis_result != "",
232 | rx.el.div(
233 | rx.markdown(
234 | MedicalState.analysis_result,
235 | class_name="mt-4 p-4 bg-blue-50 text-blue-700 rounded-lg border border-blue-100",
236 | ),
237 | ),
238 | ),
239 | class_name="space-y-4",
240 | ),
241 | ),
242 | class_name="w-full bg-white p-6 rounded-xl shadow-md mt-6",
243 | )
244 |
245 |
246 | def index() -> rx.Component:
247 | return rx.el.div(
248 | medical_header(),
249 | rx.el.div(
250 | rx.el.div(
251 | upload_section(),
252 | analysis_section(),
253 | class_name="max-w-4xl mx-auto px-4 space-y-6",
254 | ),
255 | class_name="py-8 bg-gray-50 min-h-screen",
256 | ),
257 | class_name="min-h-screen bg-gray-50",
258 | )
259 |
260 |
261 | app = rx.App()
262 | app.add_page(index)
263 |
--------------------------------------------------------------------------------
/multi_modal_medical_agent/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | agno
3 | google-generativeai
4 | duckduckgo-search
5 | phidata
6 | pillow
--------------------------------------------------------------------------------
/multi_modal_medical_agent/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="agent",
6 | )
7 |
--------------------------------------------------------------------------------
/multi_modal_medical_agent/uploaded_files/image (16).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/multi_modal_medical_agent/uploaded_files/image (16).png
--------------------------------------------------------------------------------
/news_agent/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/news_agent/.DS_Store
--------------------------------------------------------------------------------
/news_agent/.env:
--------------------------------------------------------------------------------
1 | OPENAI_BASE_URL=http://localhost:11434/v1
2 | OPENAI_API_KEY=fake-key
--------------------------------------------------------------------------------
/news_agent/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | assets/external/
4 | *.db
5 | .web
6 |
--------------------------------------------------------------------------------
/news_agent/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/news_agent/README.md:
--------------------------------------------------------------------------------
1 | # AI News Agent using Open AI Swarm and Llama 3.2 running locally
2 |
3 | An intelligent research and writing tool built with Reflex, Open AI Swarm and Llama 3.2 running locally using Ollama. This allows users to research a topic and generate a concise, well-structured summary using the advanced AI-powered research and writing agents.
4 |
5 | ## Features
6 | - Perform in-depth research on any topic
7 | - Utilize multiple AI agents for comprehensive information gathering
8 | - Generate structured, markdown-formatted research outputs
9 | - User-friendly web interface
10 | - Real-time processing with async functionality
11 |
12 | ## Installation
13 |
14 | 1. **Clone the repository**:
15 | ```bash
16 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
17 | cd reflex-llm-examples/news_agent
18 | ```
19 |
20 | 2. **Set up a virtual environment** (optional but recommended):
21 | ```bash
22 | python -m venv venv
23 | source venv/bin/activate # On Windows, use `venv\Scripts\activate`
24 | ```
25 |
26 | 3. **Install dependencies**:
27 | ```bash
28 | pip install -r requirements.txt
29 | ```
30 |
31 | 4. **Pull and Run Llama3.2 using Ollama**:
32 | ```bash
33 | # Pull the model
34 | ollama pull llama3.2
35 | ```
36 |
37 | 5. **Run the Reflex application**:
38 | Start the Reflex server with:
39 | ```bash
40 | reflex run
41 | ```
--------------------------------------------------------------------------------
/news_agent/news_agent/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/news_agent/news_agent/__init__.py
--------------------------------------------------------------------------------
/news_agent/news_agent/news_agent.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 | from duckduckgo_search import DDGS
3 | from swarm import Swarm, Agent
4 | from datetime import datetime
5 | from dotenv import load_dotenv
6 | import asyncio
7 |
8 | # Load environment variables
9 | load_dotenv()
10 |
11 | # Initialize Swarm and set model
12 |
13 | MODEL = "llama3.2"
14 | client = Swarm()
15 |
16 |
17 | def fetch_latest_news(topic):
18 | """Retrieve the latest news articles related to a given topic using DuckDuckGo."""
19 |
20 | query = f"{topic} news {datetime.now().strftime('%Y-%m')}"
21 |
22 | with DDGS() as search_engine:
23 | articles = search_engine.text(query, max_results=3)
24 |
25 | if articles:
26 | formatted_results = "\n\n".join(
27 | f"Title: {article['title']}\nURL: {article['href']}\nSummary: {article['body']}"
28 | for article in articles
29 | )
30 | return formatted_results
31 |
32 | return f"No news articles found on the topic: {topic}."
33 |
34 |
35 | # Create specialized agents
36 | search_agent = Agent(
37 | name="News Searcher",
38 | instructions="""
39 | You are an expert in news discovery. Your role involves:
40 | 1. Identifying the latest and most pertinent news articles on the provided topic.
41 | 2. Ensuring all sources are credible and trustworthy.
42 | 3. Presenting the raw search results in a clear and organized manner.
43 | """,
44 | functions=[fetch_latest_news],
45 | model=MODEL,
46 | )
47 |
48 | summary_agent = Agent(
49 | name="Comprehensive News Synthesizer",
50 | instructions="""
51 | You are a skilled news analyst, proficient in synthesizing multiple sources and crafting engaging, concise summaries. Your responsibilities include:
52 |
53 | **Synthesis and Analysis:**
54 | 1. Review the provided news articles thoroughly, extracting key insights and essential details.
55 | 2. Merge information from various sources into a unified narrative, ensuring factual accuracy and journalistic neutrality.
56 | 3. Highlight the main event, key players, significant data, and context to ensure a comprehensive overview.
57 |
58 | **Writing Style and Delivery:**
59 | 4. Write in a clear, active, and accessible tone that balances professionalism with readability.
60 | 5. Simplify complex concepts for a broader audience while maintaining depth and accuracy.
61 | 6. Use specifics over generalities, ensuring that each word adds value.
62 | 7. Craft a synthesis of the main points in a structured format:
63 | - **Main Event:** Clearly introduce the core topic or event.
64 | - **Key Details/Data:** Provide supporting information, such as statistics, facts, or context.
65 | - **Relevance/Implications:** Explain its significance and potential effects.
66 |
67 | **Deliverable:**
68 | Compose an engaging, multi-paragraph summary (300-400 words) with the following structure:
69 | - Start with the most critical development, including key players and their actions.
70 | - Follow with context and supporting details drawn from multiple sources.
71 | - Conclude with the immediate relevance, significance, and any potential short-term implications.
72 |
73 | **IMPORTANT NOTE:** Deliver the content as polished news analysis only. Avoid labels, introductions, or meta-comments. Begin directly with the story, ensuring neutrality and factual accuracy throughout.
74 | """,
75 | model=MODEL,
76 | )
77 |
78 |
79 | class State(rx.State):
80 | """Manage the application state."""
81 |
82 | topic: str = "AI Agents"
83 | raw_news: str = ""
84 | final_summary: str = ""
85 | is_loading: bool = False
86 | error_message: str = ""
87 |
88 | @rx.event(background=True)
89 | async def process_news(self):
90 | """Asynchronous news processing workflow using Swarm agents"""
91 | # Reset previous state
92 | async with self:
93 | self.is_loading = True
94 | self.error_message = ""
95 | self.raw_news = ""
96 | self.final_summary = ""
97 |
98 | yield
99 | await asyncio.sleep(1)
100 |
101 | try:
102 | # Search news using search agent
103 | search_response = client.run(
104 | agent=search_agent,
105 | messages=[
106 | {"role": "user", "content": f"Find recent news about {self.topic}"}
107 | ],
108 | )
109 | async with self:
110 | self.raw_news = search_response.messages[-1]["content"]
111 |
112 | # Synthesize and Generate summary using summary agent
113 | summary_response = client.run(
114 | agent=summary_agent,
115 | messages=[
116 | {
117 | "role": "user",
118 | "content": f"Synthesize these news articles and summarize the synthesis:\n{self.raw_news}",
119 | }
120 | ],
121 | )
122 |
123 | async with self:
124 | self.final_summary = summary_response.messages[-1]["content"]
125 | self.is_loading = False
126 |
127 | except Exception as e:
128 | async with self:
129 | self.error_message = f"An error occurred: {str(e)}"
130 | self.is_loading = False
131 |
132 | def update_topic(self, topic: str):
133 | """Update the search topic"""
134 | self.topic = topic
135 |
136 |
137 | def news_page() -> rx.Component:
138 | """Render the main news processing page"""
139 | return rx.box(
140 | rx.section(
141 | rx.heading("📰 AI News Agent", size="8"),
142 | rx.input(
143 | placeholder="Enter the news topic",
144 | value=State.topic,
145 | on_change=State.update_topic,
146 | width="300px",
147 | ),
148 | rx.button(
149 | "Process News",
150 | on_click=State.process_news,
151 | color_scheme="blue",
152 | loading=State.is_loading,
153 | width="fit-content",
154 | ),
155 | display="flex",
156 | flex_direction="column",
157 | gap="1rem",
158 | ),
159 | # Results Section
160 | rx.cond(
161 | State.final_summary != "",
162 | rx.vstack(
163 | rx.heading("📝 News Summary", size="4"),
164 | rx.markdown(State.final_summary),
165 | rx.button(
166 | "Copy the Summary",
167 | on_click=[
168 | rx.set_clipboard(State.final_summary),
169 | rx.toast.info("Summary copied"),
170 | ],
171 | ),
172 | spacing="4",
173 | width="100%",
174 | ),
175 | ),
176 | spacing="4",
177 | max_width="800px",
178 | margin="auto",
179 | padding="20px",
180 | )
181 |
182 |
183 | app = rx.App(theme=rx.theme(appearance="light", accent_color="blue"))
184 | app.add_page(news_page, route="/")
185 |
--------------------------------------------------------------------------------
/news_agent/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | git+https://github.com/openai/swarm.git
3 | duckduckgo-search
4 | dotenv
--------------------------------------------------------------------------------
/news_agent/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="news_agent",
6 | )
7 |
--------------------------------------------------------------------------------
/open_deep_researcher/.gitignore:
--------------------------------------------------------------------------------
1 | *.db
2 | assets/external/
3 | *.py[cod]
4 | .web
5 | __pycache__/
6 |
--------------------------------------------------------------------------------
/open_deep_researcher/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/open_deep_researcher/README.md:
--------------------------------------------------------------------------------
1 | # OpenDeepResearcher
2 |
3 | This project is based on the [OpenDeepResearcher](https://github.com/mshumer/OpenDeepResearcher) repository and includes an AI researcher that continuously searches for information based on a user query until the system is confident that it has gathered all the necessary details. Built with [Reflex](https://reflex.dev/) for seamless user interaction. It makes use of several services to do so:
4 |
5 | ### Services Used:
6 | - **SERPAPI**: To perform Google searches.
7 | - **Jina**: To fetch and extract webpage content.
8 | - **Google Gemini**: To interact with a LLM for generating search queries, evaluating page relevance, and extracting context.
9 |
10 | ### Features:
11 | - **Iterative Research Loop**: The system refines its search queries iteratively until no further queries are required.
12 | - **Asynchronous Processing**: Searches, webpage fetching, evaluation, and context extraction are performed concurrently to improve speed.
13 | - **Duplicate Filtering**: Aggregates and deduplicates links within each round, ensuring that the same link isn’t processed twice.
14 | - **LLM-Powered Decision Making**: Uses Google Gemini to generate new search queries, decide on page usefulness, extract relevant context, and produce a final comprehensive report.
15 |
16 | ### Requirements:
17 | API access and keys for:
18 | - Google Gemini API
19 | - SERPAPI API
20 | - Jina API
21 |
22 | ### Setup:
23 |
24 | 1. **Clone or Open the Notebook**:
25 | - Download the notebook file or open it directly in Google Colab.
26 |
27 | 2. **Install nest_asyncio**:
28 | - Run the first cell to set up nest_asyncio.
29 |
30 | 3. **Configure API Keys**:
31 | - Replace the placeholder values in the notebook for `GOOGLE_GEMINI_API_KEY`, `SERPAPI_API_KEY`, and `JINA_API_KEY` with your actual API keys.
32 |
33 | ---
34 |
35 | ### Getting Started
36 |
37 | 1. **Clone the Repository**
38 | Clone the GitHub repository to your local machine:
39 | ```bash
40 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
41 | cd reflex-llm-examples/open_deep_researcher
42 | ```
43 |
44 | 2. **Install Dependencies**
45 | Install the required dependencies:
46 | ```bash
47 | pip install -r requirements.txt
48 | ```
49 |
50 | 3. **Set Up API Keys**
51 | To use the Gemini 2.0 Flash model, SERPAPI, and Jina, you need API keys for each service. Follow these steps:
52 |
53 | - **Google Gemini API Key**:
54 | Go to [Google AI Studio](https://cloud.google.com/ai), get your API Key, and set it as an environment variable:
55 | ```bash
56 | export GOOGLE_API_KEY="your-api-key-here"
57 | ```
58 |
59 | - **SERPAPI API Key**:
60 | Go to [SERPAPI](https://serpapi.com/), sign up, and obtain your API key. Set it as an environment variable:
61 | ```bash
62 | export SERPAPI_API_KEY="your-serpapi-api-key-here"
63 | ```
64 |
65 | - **Jina API Key**:
66 | Go to [Jina AI](https://jina.ai/), create an account, and obtain your API key. Set it as an environment variable:
67 | ```bash
68 | export JINA_API_KEY="your-jina-api-key-here"
69 | ```
70 |
71 | 4. **Run the Reflex App**
72 | Start the application:
73 | ```bash
74 | reflex run
75 | ```
76 |
77 | ---
78 |
79 | ### How It Works:
80 | 1. **Input & Query Generation**:
81 | - The user enters a research topic, and Google Gemini generates up to four distinct search queries.
82 |
83 | 2. **Concurrent Search & Processing**:
84 | - **SERPAPI**: Each search query is sent to SERPAPI concurrently.
85 | - **Deduplication**: All retrieved links are aggregated and deduplicated within the current iteration.
86 | - **Jina & Google Gemini**: Each unique link is processed concurrently to fetch webpage content via Jina, evaluate its usefulness with Google Gemini, and extract relevant information if the page is deemed useful.
87 |
88 | 3. **Iterative Refinement**:
89 | - The system passes the aggregated context to Google Gemini to determine if further search queries are needed. New queries are generated if required; otherwise, the loop terminates.
90 |
91 | 4. **Final Report Generation**:
92 | - All gathered context is compiled and sent to Google Gemini to produce a final, comprehensive report addressing the original query.
93 |
94 | ---
--------------------------------------------------------------------------------
/open_deep_researcher/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | google-generativeai
3 | aiohttp
--------------------------------------------------------------------------------
/open_deep_researcher/researcher/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/open_deep_researcher/researcher/__init__.py
--------------------------------------------------------------------------------
/open_deep_researcher/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="researcher",
6 | )
7 |
--------------------------------------------------------------------------------
/rag_app/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.db
3 | assets/external/
4 | .web
5 | *.py[cod]
6 | .idea/
7 | .DS_Store
8 |
--------------------------------------------------------------------------------
/rag_app/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/rag_app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/__init__.py
--------------------------------------------------------------------------------
/rag_app/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/assets/favicon.ico
--------------------------------------------------------------------------------
/rag_app/rag_app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/rag_app/__init__.py
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/rag_app/rag/__init__.py
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/main.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from .shared.chat import chat_area
4 | from .shared.profile import app_profile_panel
5 |
6 |
7 | def rag_ai_app() -> rx.tabs:
8 | return rx.tabs.root(
9 | rx.tabs.list(
10 | rx.tabs.trigger("User Profile Data", value="1", flex="1"),
11 | rx.tabs.trigger("gemini-1.5-flash", value="2", flex="1"),
12 | ),
13 | rx.tabs.content(app_profile_panel(), value="1", bg=rx.color("gray", 2)),
14 | rx.tabs.content(chat_area(), value="2", bg=rx.color("gray", 2)),
15 | default_value="1",
16 | width="100%",
17 | )
18 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/rag_app/rag/shared/__init__.py
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/chat.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from ..state import State
4 | from ..style import Style, Typography
5 | from .style import ChatAreaStyle
6 |
7 |
8 | def chat_message(data: dict[str, str]):
9 | return rx.vstack(
10 | rx.text(data["role"], size="1", weight="bold", **Typography.passive),
11 | rx.text(
12 | data["message"],
13 | size="2",
14 | weight="medium",
15 | line_height="1.75em",
16 | **Typography.active,
17 | ),
18 | spacing="2",
19 | width="100%",
20 | )
21 |
22 |
23 | def chat_box():
24 | return rx.vstack(
25 | # ... rx.vstack => chat history and chat session
26 | rx.vstack(
27 | rx.foreach(State.chat_history, chat_message),
28 | **ChatAreaStyle.chat_session_style,
29 | ),
30 | chat_prompt(),
31 | **ChatAreaStyle.chat_box,
32 | )
33 |
34 |
35 | def chat_prompt():
36 | return rx.hstack(
37 | rx.box(
38 | rx.input(value=State.prompt, on_change=State.set_prompt, width="100%"),
39 | width="100%",
40 | ),
41 | rx.button("send", on_click=State.send_prompt, loading=State.is_generating),
42 | width="100%",
43 | bottom="0",
44 | left="0",
45 | position="absolute",
46 | padding="1em 2em",
47 | )
48 |
49 |
50 | def chat_area() -> rx.vstack:
51 | return rx.vstack(
52 | rx.badge(
53 | rx.text("Using Google's gemini-1.5-flash model.", size="1", weight="bold"),
54 | **ChatAreaStyle.model_tag,
55 | ),
56 | chat_box(),
57 | **Style.chat_area_base,
58 | )
59 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/navigation.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from ..style import Style, Typography
4 |
5 | title: rx.Component = rx.text("getFit.", weight="bold", size="1", **Typography.active)
6 | theme: rx.Component = rx.color_mode.button(size="1", **Typography.active)
7 |
8 |
9 | def app_navigation_bar() -> rx.hstack:
10 | return rx.badge(
11 | rx.hstack(title, theme, **Style.navigation_child),
12 | **Style.navigation_parent,
13 | )
14 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/profile.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from ..shared.profile_components import (
4 | profile_item_activity_stats,
5 | profile_item_physical_stats,
6 | profile_item_unit,
7 | )
8 | from ..state import State
9 | from ..style import Style
10 | from ..wrappers.item import app_profile_item_wrapper
11 |
12 | physical_stats = rx.hstack(
13 | profile_item_physical_stats(State.height, "height", State.set_height),
14 | profile_item_physical_stats(State.weight, "weight", State.set_weight),
15 | profile_item_physical_stats(State.age, "age", State.set_age),
16 | spacing="6",
17 | padding="5px 0px",
18 | display="grid",
19 | width="100%",
20 | grid_template_columns=[f"repeat({i}, minmax(0, 1fr))" for i in [1, 1, 1, 3, 3, 3]],
21 | )
22 |
23 | L1 = ["sedentary", "active", "moderately active", "very active", "super active"]
24 | L2 = [f"{i + 1} day per week" + ("s" if i > 0 else "") for i in range(7)]
25 | L3 = ["light", "moderate", "intense"]
26 | L4 = [f"{i + 1} hour per night" + ("s" if i > 0 else "") for i in range(9)]
27 |
28 | activity_stats = rx.vstack(
29 | rx.hstack(
30 | profile_item_activity_stats("Occupation Type", L1),
31 | profile_item_activity_stats("Exercise Frequency", L2),
32 | **Style.profile_activity_stat_hstack,
33 | ),
34 | rx.hstack(
35 | profile_item_activity_stats("Exercise Intensity", L3),
36 | profile_item_activity_stats("Sleep Pattern", L4),
37 | **Style.profile_activity_stat_hstack,
38 | ),
39 | width="100%",
40 | gap=["12px" if i <= 3 else "32px" for i in range(6)],
41 | )
42 |
43 |
44 | H1 = ["weight loss", "muscle gain", "maintenance"]
45 | H2 = [f"{i} month" + ("s" if i > 1 else "") for i in [1, 3, 6, 12]]
46 |
47 |
48 | health_goals = rx.vstack(
49 | rx.hstack(
50 | profile_item_activity_stats("Primary Goal", H1),
51 | profile_item_activity_stats("Timeframe", H2),
52 | **Style.profile_activity_stat_hstack,
53 | ),
54 | width="100%",
55 | gap=["12px" if i <= 3 else "32px" for i in range(6)],
56 | )
57 |
58 | D1 = [
59 | "vegetarian",
60 | "vegan",
61 | "gluten-free",
62 | "paleo",
63 | "ketogenic",
64 | "low-carb",
65 | "dairy-free",
66 | "none",
67 | ]
68 |
69 | D2 = ["nuts", "shellfish", "dairy", "gluten", "soy", "eggs", "wheat", "none"]
70 |
71 |
72 | diet_restrictions = rx.vstack(
73 | rx.hstack(
74 | profile_item_activity_stats("Dietary Restrictions", D1),
75 | profile_item_activity_stats("Food Allergies", D2),
76 | **Style.profile_activity_stat_hstack,
77 | ),
78 | width="100%",
79 | gap=["12px" if i <= 3 else "32px" for i in range(6)],
80 | )
81 |
82 |
83 | def app_profile_panel() -> rx.vstack:
84 | return rx.vstack(
85 | rx.divider(height="2em", opacity="0"),
86 | rx.box(
87 | rx.vstack(
88 | app_profile_item_wrapper(
89 | "Select the unit of measurements for your data.",
90 | "Unit Measurement",
91 | [profile_item_unit()],
92 | ),
93 | app_profile_item_wrapper(
94 | "Enter details about your physical characteristics.",
95 | "Physical Stats",
96 | [physical_stats],
97 | ),
98 | app_profile_item_wrapper(
99 | "Help us understand your daily lifestyle and activity level.",
100 | "Lifestyle & Activity Level",
101 | [activity_stats],
102 | ),
103 | app_profile_item_wrapper(
104 | "Tell us about your health and fitness goals.",
105 | "Health Goals",
106 | [health_goals],
107 | ),
108 | app_profile_item_wrapper(
109 | "Let us know your dietary preferences and restrictions.",
110 | "Dietary Preferences",
111 | [diet_restrictions],
112 | ),
113 | **Style.profile_inner_content,
114 | ),
115 | padding=["0em 4em" if i >= 5 else "0em 2em" for i in range(6)],
116 | width="100%",
117 | ),
118 | **Style.profile_base,
119 | )
120 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/profile_components.py:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 |
3 | import reflex as rx
4 |
5 | from ..state import State
6 | from ..style import Typography
7 | from .style import ProfileComponentStyle
8 |
9 |
10 | def profile_item_unit():
11 | return rx.radio(
12 | ["metric", "imperial"],
13 | default_value="metric",
14 | on_change=State.set_units,
15 | direction="row",
16 | )
17 |
18 |
19 | def profile_item_physical_stats(value: str, unit: str, fn: Callable):
20 | return rx.hstack(
21 | rx.input(
22 | value=value,
23 | on_change=fn,
24 | **ProfileComponentStyle.profile_item_input,
25 | ),
26 | rx.hstack(
27 | rx.divider(orientation="vertical", width="2px", height="20px"),
28 | rx.text(
29 | State.units[State.selected_unit][unit],
30 | **ProfileComponentStyle.profile_item_input_unit,
31 | **Typography.passive,
32 | ),
33 | ),
34 | **ProfileComponentStyle.profile_item_input_parent,
35 | )
36 |
37 |
38 | def profile_item_activity_stats(title: str, options: list[str]):
39 | return rx.vstack(
40 | rx.text(title, size="1", weight="bold", **Typography.passive),
41 | rx.select(
42 | options,
43 | placeholder="Select an option",
44 | on_change=lambda e: State.set_profile_stats([title, e]),
45 | **ProfileComponentStyle.profile_item_activity,
46 | ),
47 | spacing="2",
48 | width="100%",
49 | )
50 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/shared/style.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 |
3 | import reflex as rx
4 |
5 |
6 | @dataclass
7 | class ProfileComponentStyle:
8 | profile_item_input_parent: dict[str, str] = field(
9 | default_factory=lambda: {
10 | "align": "center",
11 | "border_bottom": f"1px solid {rx.color('slate')}",
12 | "width": "100%",
13 | "justify": "between",
14 | },
15 | )
16 |
17 | profile_item_input_unit: dict[str, str] = field(
18 | default_factory=lambda: {
19 | "size": "1",
20 | "weight": "bold",
21 | "width": "30px",
22 | "align": "center",
23 | },
24 | )
25 |
26 | profile_item_input: dict[str, str] = field(
27 | default_factory=lambda: {
28 | "radius": "none",
29 | "variant": "soft",
30 | "outline": "none",
31 | "background": "none",
32 | "width": "100%",
33 | },
34 | )
35 |
36 | profile_item_activity: dict[str, str] = field(
37 | default_factory=lambda: {
38 | "size": "2",
39 | "variant": "soft",
40 | "width": "100%",
41 | "color_scheme": "gray",
42 | },
43 | )
44 |
45 |
46 | @dataclass
47 | class ChatAreaStyle:
48 | chat_box: dict[str, str] = field(
49 | default_factory=lambda: {
50 | "width": "100%",
51 | "height": "100%",
52 | "border_radius": "8px",
53 | "overflow": "hidden",
54 | "position": "relative",
55 | },
56 | )
57 |
58 | model_tag: dict[str, str] = field(
59 | default_factory=lambda: {
60 | "width": "100%",
61 | "height": "30px",
62 | "radius": "none",
63 | "padding": "0em 2em",
64 | "top": "0",
65 | "left": "0",
66 | "position": "absolute",
67 | "color_scheme": "gray",
68 | "background": rx.color("blue", 3),
69 | "z_index": "20",
70 | },
71 | )
72 |
73 | chat_session_style: dict[str, str] = field(
74 | default_factory=lambda: {
75 | "width": "100%",
76 | "padding": "3em 2em 4em 2em",
77 | "height": "85vh",
78 | "overflow": "auto",
79 | "mask": "linear-gradient(to bottom, hsl(0, 0%, 0%, 1) 85%, hsl(0, 0%, 0%, 0) 100%)",
80 | },
81 | )
82 |
83 |
84 | ProfileComponentStyle: ProfileComponentStyle = ProfileComponentStyle()
85 | ChatAreaStyle: ChatAreaStyle = ChatAreaStyle()
86 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/state.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 |
4 | import google.generativeai as genai
5 | import reflex as rx
6 |
7 | key = os.getenv("KEY")
8 | genai.configure(api_key=key)
9 |
10 | generation_config = {
11 | "temperature": 1,
12 | "top_p": 0.95,
13 | "top_k": 64,
14 | "max_output_tokens": 250,
15 | "response_mime_type": "text/plain",
16 | }
17 |
18 | model = genai.GenerativeModel(
19 | model_name="gemini-1.5-flash",
20 | generation_config=generation_config,
21 | )
22 |
23 | chat_session = model.start_chat()
24 |
25 |
26 | class State(rx.State):
27 | # ... unit of measurement
28 | units: dict[str, dict[str, str]] = {
29 | "metric": {"height": "cm", "weight": "kg", "age": "yrs"},
30 | "imperial": {"height": "ft", "weight": "lbs", "age": "yrs"},
31 | }
32 | selected_unit: str = "metric"
33 | # ... physical stats vars
34 | height: str
35 | weight: str
36 | age: str
37 | # ... form data
38 | data: dict[str, str]
39 | # ... user prompt
40 | prompt: str
41 | # ... chat history
42 | chat_history: list[dict[str, str]]
43 | # ... other chat vars
44 | is_generating: bool = False
45 |
46 | async def set_units(self, unit: str) -> None:
47 | self.selected_unit = unit
48 |
49 | async def set_profile_stats(self, info: list[str]) -> None:
50 | self.data["height"], self.data["weight"], self.data["age"] = (
51 | self.height + self.units[self.selected_unit]["height"],
52 | self.weight + self.units[self.selected_unit]["weight"],
53 | self.age + "years",
54 | )
55 |
56 | self.data[info[0]] = info[1]
57 |
58 | async def check_form_if_complete(self) -> bool:
59 | return len(self.data) == 8
60 |
61 | @rx.var
62 | def track_profil_stat_changes(self) -> dict[str, str]:
63 | if chat_session.history:
64 | chat_session.history.pop(0)
65 |
66 | chat_session.history.insert(
67 | 0,
68 | {
69 | "role": "user",
70 | "parts": [
71 | f"Take into account the following details when generating your answer {self.data}",
72 | ],
73 | },
74 | )
75 |
76 | return self.data
77 |
78 | async def send_prompt(self):
79 | if self.prompt:
80 | self.is_generating = True
81 |
82 | yield
83 | self.chat_history.append({"role": "user", "message": self.prompt})
84 | yield
85 | self.chat_history.append({"role": "gemini-1.5-flash", "message": ""})
86 |
87 | response = await self.send_message_to_chat(self.prompt)
88 |
89 | for word in response.split():
90 | for char in list(word):
91 | self.chat_history[-1]["message"] += char
92 | await asyncio.sleep(0.009)
93 | self.chat_history[-1]["message"] += " "
94 | yield
95 |
96 | self.prompt = ""
97 | self.is_generating = False
98 |
99 | async def send_message_to_chat(self, message):
100 | response = chat_session.send_message(message)
101 | return response.text
102 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/style.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 |
3 | import reflex as rx
4 |
5 |
6 | @dataclass
7 | class Typography:
8 | active: dict[str, str] = field(
9 | default_factory=lambda: {"color": rx.color("slate", 12)},
10 | )
11 |
12 | passive: dict[str, str] = field(
13 | default_factory=lambda: {"color": rx.color("slate", 10)},
14 | )
15 |
16 |
17 | @dataclass
18 | class Style:
19 | base: dict[str, str] = field(
20 | default_factory=lambda: {
21 | "width": "100%",
22 | "min_height": "100vh",
23 | },
24 | )
25 |
26 | navigation_parent: dict[str, str] = field(
27 | default_factory=lambda: {
28 | "width": "100%",
29 | "height": "30px",
30 | "radius": "none",
31 | "padding": "0em 5em",
32 | "top": "0",
33 | "left": "0",
34 | "position": "absolute",
35 | },
36 | )
37 |
38 | navigation_child: dict[str, str] = field(
39 | default_factory=lambda: {
40 | "width": "100%",
41 | "justify": "between",
42 | "align": "center",
43 | },
44 | )
45 |
46 | content: dict[str, str] = field(
47 | default_factory=lambda: {
48 | "width": "100%",
49 | "height": "100%",
50 | "align": "center",
51 | "display": "grid",
52 | "grid_template_columns": [
53 | f"repeat({i}, minmax(0, 1fr))" for i in [1, 1, 1, 1, 2, 2]
54 | ],
55 | },
56 | )
57 |
58 | profile_base: dict[str, str] = field(
59 | default_factory=lambda: {
60 | "width": "100%",
61 | "height": "100%",
62 | "align": "center",
63 | },
64 | )
65 |
66 | profile_inner_content: dict[str, str] = field(
67 | default_factory=lambda: {
68 | "width": "100%",
69 | "height": "100%",
70 | "position": "relative",
71 | "padding": "0em 24px",
72 | "spacing": "8",
73 | "border_left": f"1px solid {rx.color('gray', 6)}",
74 | },
75 | )
76 |
77 | profile_activity_stat_hstack: dict[str, str] = field(
78 | default_factory=lambda: {
79 | "width": "100%",
80 | "display": "grid",
81 | "grid_template_columns": [
82 | f"repeat({i}, minmax(0, 1fr))" for i in [1, 1, 2, 2, 2, 2]
83 | ],
84 | "gap": ["12px" if i <= 3 else "32px" for i in range(6)],
85 | },
86 | )
87 |
88 | chat_area_base: dict[str, str] = field(
89 | default_factory=lambda: {
90 | "width": "100%",
91 | "height": "50vh",
92 | "align": "center",
93 | "padding": "0em 24px",
94 | },
95 | )
96 |
97 |
98 | Typography: Typography = Typography()
99 |
100 | Style: Style = Style()
101 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/wrappers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_app/rag_app/rag/wrappers/__init__.py
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/wrappers/item.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from .style import AppProfileWrapperStyle
4 |
5 |
6 | def blip():
7 | return rx.box(
8 | rx.icon(tag="info", size=11, color=rx.color("slate", 10)),
9 | **AppProfileWrapperStyle.blip,
10 | )
11 |
12 |
13 | def app_profile_item_wrapper(
14 | title: str,
15 | date: str,
16 | components: list[rx.Component] = [],
17 | ):
18 | return rx.hstack(
19 | rx.vstack(
20 | rx.vstack(
21 | rx.hstack(
22 | blip(),
23 | rx.text(date, size="1", weight="bold", color=rx.color("slate", 10)),
24 | align="center",
25 | ),
26 | rx.text(title, size="3", weight="bold", color=rx.color("slate", 11)),
27 | spacing="1",
28 | ),
29 | *components,
30 | width="100%",
31 | ),
32 | **AppProfileWrapperStyle.wrapper,
33 | )
34 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag/wrappers/style.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 |
3 | import reflex as rx
4 |
5 |
6 | @dataclass
7 | class AppProfileWrapperStyle:
8 | blip: dict[str, str] = field(
9 | default_factory=lambda: {
10 | "width": "24px",
11 | "height": "24px",
12 | "border_radius": "24px",
13 | "background": rx.color("gray", 3),
14 | "border": f"1.25px solid {rx.color('gray', 6)}",
15 | "position": "absolute",
16 | "left": "-12px",
17 | "align_items": "center",
18 | "justify_content": "center",
19 | "display": "flex",
20 | },
21 | )
22 |
23 | wrapper: dict[str, str] = field(
24 | default_factory=lambda: {
25 | "width": "100%",
26 | "align": "start",
27 | "justify": "start",
28 | "padding_left": "5px",
29 | "border_radius": "0px 5px 5px 0px",
30 | },
31 | )
32 |
33 |
34 | AppProfileWrapperStyle: AppProfileWrapperStyle = AppProfileWrapperStyle()
35 |
--------------------------------------------------------------------------------
/rag_app/rag_app/rag_app.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | from .rag.main import rag_ai_app
4 |
5 |
6 | # !update UI for easier demoing
7 | def index():
8 | return rag_ai_app()
9 |
10 |
11 | app = rx.App()
12 | app.add_page(index)
13 |
--------------------------------------------------------------------------------
/rag_app/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | google-generativeai
--------------------------------------------------------------------------------
/rag_app/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 | config = rx.Config(
4 | app_name="rag_app",
5 | )
6 |
--------------------------------------------------------------------------------
/rag_with_docling/.gitignore:
--------------------------------------------------------------------------------
1 | .web
2 | assets/external/
3 | *.db
4 | *.py[cod]
5 | __pycache__/
6 |
--------------------------------------------------------------------------------
/rag_with_docling/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Pynecone, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/rag_with_docling/README.md:
--------------------------------------------------------------------------------
1 | # Chat with Excel Files using Docling and
2 |
3 | Chat with Excel is an LLM app that utilizes **Retrieval Augmented Generation (RAG)** to enable meaningful interaction with Excel files. Powered by **DeepSeek-r1** running locally and [Docling Library](https://github.com/DS4SD/docling), the app provides accurate answers to your questions based on the content of the uploaded Excel file.
4 |
5 | ---
6 |
7 | ## Features
8 | - **Upload Excel Documents:** Easily upload any Excel document to start querying.
9 | - **Interactive Q&A:** Ask questions about the content of the uploaded Excel.
10 | - **Accurate Answers:** Get precise responses using RAG and the DeepSeek-r1 model.
11 |
12 | ---
13 |
14 | ## Getting Started
15 |
16 | ### 1. Clone the Repository
17 | Clone the GitHub repository to your local machine:
18 | ```bash
19 | git clone https://github.com/reflex-dev/reflex-llm-examples.git
20 | cd reflex-llm-examples/rag_with_docling
21 | ```
22 |
23 | ### 2. Install Dependencies
24 | Install the required dependencies:
25 | ```bash
26 | pip install -r requirements.txt
27 | ```
28 |
29 | ### 3. Pull and Run DeepSeek-r1 Using Ollama
30 | Download and set up the DeepSeek-r1 model locally:
31 | ```bash
32 | ollama pull deepseek-r1:1.5b
33 | ```
34 |
35 | ### 4. Run the Reflex App
36 | Run the application to start chatting with your Excel File:
37 | ```bash
38 | reflex run
39 | ```
40 |
--------------------------------------------------------------------------------
/rag_with_docling/chat/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reflex-dev/reflex-llm-examples/0aa66e0026785cf26c5218c30b428df7681a5fe1/rag_with_docling/chat/__init__.py
--------------------------------------------------------------------------------
/rag_with_docling/chat/chat.py:
--------------------------------------------------------------------------------
1 | import os
2 | import uuid
3 | import tempfile
4 | import gc
5 | import pandas as pd
6 | from dataclasses import dataclass
7 | from typing import Optional
8 |
9 | import asyncio
10 |
11 | from llama_index.core import (
12 | Settings,
13 | VectorStoreIndex,
14 | SimpleDirectoryReader,
15 | PromptTemplate,
16 | )
17 | from llama_index.llms.ollama import Ollama
18 | from llama_index.embeddings.huggingface import HuggingFaceEmbedding
19 | from llama_index.readers.docling import DoclingReader
20 | from llama_index.core.node_parser import MarkdownNodeParser
21 | import reflex as rx
22 |
23 |
24 | # Data Models
25 | @dataclass
26 | class QA:
27 | """A question and answer pair."""
28 |
29 | question: str
30 | answer: str
31 |
32 |
33 | # Custom Loading Icon
34 | class LoadingIcon(rx.Component):
35 | """A custom loading icon component."""
36 |
37 | library = "react-loading-icons"
38 | tag = "SpinningCircles"
39 | stroke: rx.Var[str]
40 | stroke_opacity: rx.Var[str]
41 | fill: rx.Var[str]
42 | fill_opacity: rx.Var[str]
43 | stroke_width: rx.Var[str]
44 | speed: rx.Var[str]
45 | height: rx.Var[str]
46 |
47 | def get_event_triggers(self) -> dict:
48 | return {"on_change": lambda status: [status]}
49 |
50 |
51 | loading_icon = LoadingIcon.create
52 |
53 | # Styles
54 | message_style = dict(
55 | display="inline-block",
56 | padding="1em",
57 | border_radius="8px",
58 | max_width=["30em", "30em", "50em", "50em", "50em", "50em"],
59 | )
60 |
61 | SIDEBAR_STYLE = dict(
62 | width="300px",
63 | height="100vh",
64 | position="fixed",
65 | left=0,
66 | top=0,
67 | padding="2em",
68 | background_color=rx.color("blue", 2),
69 | border_right=f"1px solid {rx.color('blue', 3)}",
70 | )
71 |
72 | UPLOAD_BUTTON_STYLE = dict(
73 | color=rx.color("mauve", 12),
74 | bg="transparent",
75 | border=f"1px solid {rx.color('mauve', 6)}",
76 | margin_y="1em",
77 | _hover={"bg": rx.color("mauve", 3)},
78 | )
79 |
80 |
81 | # Application State
82 | class State(rx.State):
83 | chats: list[list[QA]] = [[]]
84 | uploaded_file: Optional[str] = None
85 | uploading: bool = False
86 | processing: bool = False
87 | current_chat: int = 0
88 | file_cache: dict = {}
89 | session_id: str = str(uuid.uuid4())
90 | upload_status: str = ""
91 | preview_df: list = []
92 | preview_columns: list = []
93 |
94 | _query_engine = None
95 |
96 | def load_llm(self):
97 | return Ollama(model="deepseek-r1:1.5b", request_timeout=120.0)
98 |
99 | async def handle_upload(self, files: list[rx.UploadFile]):
100 | if not files:
101 | self.upload_status = "No file selected, Please select a file to continue"
102 | return
103 | yield
104 |
105 | self.uploading = True
106 | yield
107 |
108 | try:
109 | file = files[0]
110 | upload_data = await file.read()
111 | file_name = file.filename
112 |
113 | with tempfile.TemporaryDirectory() as temp_dir:
114 | file_path = os.path.join(temp_dir, file_name)
115 | with open(file_path, "wb") as f:
116 | f.write(upload_data)
117 |
118 | file_key = f"{self.session_id}-{file_name}"
119 |
120 | if file_key not in self.file_cache:
121 | reader = DoclingReader()
122 | loader = SimpleDirectoryReader(
123 | input_dir=temp_dir,
124 | file_extractor={".xlsx": reader},
125 | )
126 | docs = loader.load_data()
127 |
128 | llm = self.load_llm()
129 | embed_model = HuggingFaceEmbedding(
130 | model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True
131 | )
132 |
133 | Settings.embed_model = embed_model
134 | node_parser = MarkdownNodeParser()
135 | index = VectorStoreIndex.from_documents(
136 | documents=docs,
137 | transformations=[node_parser],
138 | show_progress=True,
139 | )
140 |
141 | Settings.llm = llm
142 | query_engine = index.as_query_engine(streaming=True)
143 |
144 | qa_prompt_tmpl_str = """
145 | Context information is below.
146 | ---------------------
147 | {context_str}
148 | ---------------------
149 | Given the context information above I want you to think step by step to answer
150 | the query in a highly precise and crisp manner focused on the final answer,
151 | incase case you don't know the answer say 'I don't know!'.
152 | Query: {query_str}
153 | Answer:
154 | """
155 | qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
156 | query_engine.update_prompts(
157 | {"response_synthesizer:text_qa_template": qa_prompt_tmpl}
158 | )
159 |
160 | self.file_cache[file_key] = query_engine
161 | self._query_engine = query_engine
162 | df = pd.read_excel(file_path)
163 | self.preview_columns = [
164 | {"field": col, "header": col} for col in df.columns
165 | ]
166 | self.preview_df = df.to_dict(orient="records")
167 | self.upload_status = f"Uploaded {file_name} successfully"
168 | self.uploading = False
169 | yield
170 |
171 | else:
172 | self._query_engine = self.file_cache[file_key]
173 |
174 | yield
175 | except Exception as e:
176 | self.uploading = False
177 | self.upload_status = f"Error uploading file: {str(e)}"
178 | yield
179 |
180 | def create_new_chat(self):
181 | """Create a new chat."""
182 | self.chats.append([])
183 | self.current_chat = len(self.chats) - 1
184 |
185 | def reset_chat(self):
186 | self.messages = []
187 | gc.collect()
188 |
189 | @rx.event(background=True)
190 | async def process_query(self, form_data: dict):
191 | if self.processing or not form_data.get("question") or not self._query_engine:
192 | return
193 |
194 | question = form_data.get("question")
195 | if not question:
196 | return
197 |
198 | async with self:
199 | self.processing = True
200 | self.chats[self.current_chat].append(QA(question=question, answer=""))
201 | yield
202 | await asyncio.sleep(0.1)
203 |
204 | try:
205 | streaming_response = self._query_engine.query(question)
206 | answer = ""
207 |
208 | async with self:
209 | for chunk in streaming_response.response_gen:
210 | answer += chunk
211 | self.chats[self.current_chat][-1].answer = answer
212 | self.chats = self.chats
213 | yield
214 | await asyncio.sleep(0.05)
215 |
216 | self.processing = False
217 | yield
218 |
219 | except Exception as e:
220 | async with self:
221 | self.chats[self.current_chat][
222 | -1
223 | ].answer = f"Error processing query: {str(e)}"
224 | self.processing = False
225 | yield
226 |
227 |
228 | def excel_preview() -> rx.Component:
229 | if State.preview_df is None:
230 | return rx.box()
231 |
232 | return rx.box(
233 | rx.heading("Excel Preview", size="4"),
234 | rx.data_table(
235 | data=State.preview_df,
236 | columns=State.preview_columns,
237 | pagination=True,
238 | search=True,
239 | sort=True,
240 | ),
241 | padding="1em",
242 | border_radius="8px",
243 | border=f"1px solid {rx.color('blue', 3)}",
244 | margin_top="-2em",
245 | margin_bottom="2em",
246 | )
247 |
248 |
249 | def message(qa: QA) -> rx.Component:
250 | return rx.box(
251 | rx.box(
252 | rx.markdown(
253 | qa.question,
254 | background_color=rx.color("blue", 4),
255 | color=rx.color("blue", 12),
256 | **message_style,
257 | ),
258 | text_align="right",
259 | margin_top="1em",
260 | ),
261 | rx.box(
262 | rx.markdown(
263 | qa.answer,
264 | background_color=rx.color("green", 4),
265 | color=rx.color("green", 12),
266 | **message_style,
267 | ),
268 | text_align="left",
269 | padding_top="1em",
270 | ),
271 | width="100%",
272 | )
273 |
274 |
275 | def action_bar() -> rx.Component:
276 | return rx.box(
277 | rx.vstack(
278 | rx.form(
279 | rx.hstack(
280 | rx.input(
281 | placeholder="Ask your question here...",
282 | id="question",
283 | width=["15em", "20em", "45em", "50em", "50em", "50em"],
284 | disabled=State.processing,
285 | border_color=rx.color("blue", 6),
286 | _focus={"border_color": rx.color("blue", 8)},
287 | background_color="transparent",
288 | ),
289 | rx.button(
290 | rx.cond(
291 | State.processing,
292 | loading_icon(height="1em"),
293 | rx.text("Process"),
294 | ),
295 | type_="submit",
296 | disabled=State.processing,
297 | bg=rx.color("green", 9),
298 | color="white",
299 | _hover={"bg": rx.color("green", 10)},
300 | ),
301 | align_items="center",
302 | spacing="3",
303 | ),
304 | on_submit=State.process_query,
305 | width="100%",
306 | reset_on_submit=True,
307 | ),
308 | align_items="center",
309 | width="100%",
310 | ),
311 | position="fixed",
312 | bottom="0",
313 | left="0",
314 | padding_x="20em",
315 | padding_y="16px",
316 | backdrop_filter="auto",
317 | backdrop_blur="lg",
318 | background_color=rx.color("mauve", 2),
319 | border_top=f"1px solid {rx.color('blue', 3)}",
320 | width="100%",
321 | )
322 |
323 |
324 | def sidebar() -> rx.Component:
325 | """The sidebar component."""
326 | return rx.box(
327 | rx.vstack(
328 | rx.heading("Upload Document", size="6", margin_bottom="1em"),
329 | rx.upload(
330 | rx.vstack(
331 | rx.button(
332 | "Select Excel File",
333 | **UPLOAD_BUTTON_STYLE,
334 | ),
335 | rx.text(
336 | "Drag and drop file here",
337 | font_size="sm",
338 | color=rx.color("mauve", 11),
339 | ),
340 | ),
341 | border=f"1px dashed {rx.color('mauve', 6)}",
342 | padding="2em",
343 | border_radius="md",
344 | accept={
345 | ".xls": "application/vnd.ms-excel",
346 | ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
347 | },
348 | max_files=1,
349 | multiple=False,
350 | ),
351 | rx.button(
352 | "Add to Knowledge Base",
353 | on_click=State.handle_upload(rx.upload_files()),
354 | loading=State.uploading,
355 | **UPLOAD_BUTTON_STYLE,
356 | ),
357 | rx.text(State.upload_status, color=rx.color("mauve", 11), font_size="sm"),
358 | align_items="stretch",
359 | height="100%",
360 | ),
361 | **SIDEBAR_STYLE,
362 | )
363 |
364 |
365 | def chat() -> rx.Component:
366 | return rx.vstack(
367 | rx.box(rx.foreach(State.chats[State.current_chat], message), width="100%"),
368 | py="8",
369 | flex="1",
370 | width="100%",
371 | max_width="50em",
372 | padding_x="4px",
373 | align_self="center",
374 | overflow_y="auto",
375 | padding_bottom="5em",
376 | )
377 |
378 |
379 | def index() -> rx.Component:
380 | """The main app."""
381 | return rx.box(
382 | sidebar(),
383 | rx.box(
384 | rx.vstack(
385 | rx.hstack(
386 | rx.heading(
387 | "Chat with Excel using DeepSeek-R1 💬", margin_right="4em"
388 | ),
389 | rx.button(
390 | "New Chat",
391 | on_click=State.create_new_chat,
392 | margin_left="auto",
393 | ),
394 | ),
395 | chat(),
396 | action_bar(),
397 | spacing="4",
398 | align_items="center",
399 | height="100vh",
400 | padding="4em",
401 | ),
402 | margin_left="300px",
403 | width="calc(100% - 300px)",
404 | ),
405 | width="100%",
406 | height="100vh",
407 | background_color=rx.color("mauve", 1),
408 | )
409 |
410 |
411 | app = rx.App()
412 | app.add_page(index)
413 |
--------------------------------------------------------------------------------
/rag_with_docling/requirements.txt:
--------------------------------------------------------------------------------
1 | reflex==0.7.11
2 | agno
3 | llama_index
4 | llama-index-llms-ollama
5 | llama-index-embeddings-huggingface
6 | llama-index-readers-docling
--------------------------------------------------------------------------------
/rag_with_docling/rxconfig.py:
--------------------------------------------------------------------------------
1 | import reflex as rx
2 |
3 |
4 | config = rx.Config(
5 | app_name="chat",
6 | )
7 |
--------------------------------------------------------------------------------