├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── agent.png ├── app.png ├── backend ├── .env.example ├── .gitignore ├── LICENSE ├── Makefile ├── langgraph.json ├── pyproject.toml ├── src │ └── agent │ │ ├── __init__.py │ │ ├── app.py │ │ ├── configuration.py │ │ ├── graph.py │ │ ├── prompts.py │ │ ├── state.py │ │ ├── tools_and_schemas.py │ │ └── utils.py └── test-agent.ipynb ├── docker-compose.yml └── frontend ├── .gitignore ├── components.json ├── eslint.config.js ├── index.html ├── package-lock.json ├── package.json ├── public └── vite.svg ├── src ├── App.tsx ├── components │ ├── ActivityTimeline.tsx │ ├── ChatMessagesView.tsx │ ├── InputForm.tsx │ ├── WelcomeScreen.tsx │ └── ui │ │ ├── badge.tsx │ │ ├── button.tsx │ │ ├── card.tsx │ │ ├── input.tsx │ │ ├── scroll-area.tsx │ │ ├── select.tsx │ │ ├── tabs.tsx │ │ └── textarea.tsx ├── global.css ├── lib │ └── utils.ts ├── main.tsx └── vite-env.d.ts ├── tsconfig.json ├── tsconfig.node.json └── vite.config.ts /.gitignore: -------------------------------------------------------------------------------- 1 | # Node / Frontend 2 | node_modules/ 3 | frontend/dist/ 4 | frontend/.vite/ 5 | frontend/coverage/ 6 | .DS_Store 7 | *.local 8 | 9 | # Logs 10 | logs 11 | *.log 12 | npm-debug.log* 13 | yarn-debug.log* 14 | yarn-error.log* 15 | pnpm-debug.log* 16 | lerna-debug.log* 17 | 18 | # OS generated files 19 | .DS_Store 20 | .DS_Store? 21 | ._* 22 | .Spotlight-V100 23 | .Trashes 24 | ehthumbs.db 25 | Thumbs.db 26 | 27 | # IDE files 28 | .idea/ 29 | .vscode/ 30 | *.suo 31 | *.ntvs* 32 | *.njsproj 33 | *.sln 34 | *.sw? 35 | 36 | # Optional backend venv (if created in root) 37 | #.venv/ 38 | 39 | # Byte-compiled / optimized / DLL files 40 | __pycache__/ 41 | *.py[cod] 42 | *$py.class 43 | uv.lock 44 | 45 | # C extensions 46 | *.so 47 | 48 | # Distribution / packaging 49 | .Python 50 | build/ 51 | develop-eggs/ 52 | dist/ 53 | downloads/ 54 | eggs/ 55 | .eggs/ 56 | lib64/ 57 | parts/ 58 | sdist/ 59 | var/ 60 | wheels/ 61 | share/python-wheels/ 62 | *.egg-info/ 63 | .installed.cfg 64 | *.egg 65 | MANIFEST 66 | 67 | # PyInstaller 68 | # Usually these files are written by a python script from a template 69 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 70 | *.manifest 71 | *.spec 72 | 73 | # Installer logs 74 | pip-log.txt 75 | pip-delete-this-directory.txt 76 | 77 | # Unit test / coverage reports 78 | htmlcov/ 79 | .tox/ 80 | .nox/ 81 | .coverage 82 | .coverage.* 83 | .cache 84 | nosetests.xml 85 | coverage.xml 86 | *.cover 87 | *.py,cover 88 | .hypothesis/ 89 | .pytest_cache/ 90 | cover/ 91 | 92 | # Translations 93 | *.mo 94 | *.pot 95 | 96 | # Django stuff: 97 | *.log 98 | local_settings.py 99 | db.sqlite3 100 | db.sqlite3-journal 101 | 102 | # Flask stuff: 103 | instance/ 104 | .webassets-cache 105 | 106 | # Scrapy stuff: 107 | .scrapy 108 | 109 | # Sphinx documentation 110 | docs/_build/ 111 | 112 | # PyBuilder 113 | .pybuilder/ 114 | target/ 115 | 116 | # Jupyter Notebook 117 | .ipynb_checkpoints 118 | 119 | # IPython 120 | profile_default/ 121 | ipython_config.py 122 | 123 | # pyenv 124 | # For a library or package, you might want to ignore these files since the code is 125 | # intended to run in multiple environments; otherwise, check them in: 126 | # .python-version 127 | 128 | # pipenv 129 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 130 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 131 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 132 | # install all needed dependencies. 133 | #Pipfile.lock 134 | 135 | # poetry 136 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 137 | # This is especially recommended for binary packages to ensure reproducibility, and is more 138 | # commonly ignored for libraries. 139 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 140 | #poetry.lock 141 | 142 | # pdm 143 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 144 | #pdm.lock 145 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 146 | # in version control. 147 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 148 | .pdm.toml 149 | .pdm-python 150 | .pdm-build/ 151 | 152 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 153 | __pypackages__/ 154 | 155 | # Celery stuff 156 | celerybeat-schedule 157 | celerybeat.pid 158 | 159 | # SageMath parsed files 160 | *.sage.py 161 | 162 | # Environments 163 | .env 164 | .venv 165 | env/ 166 | venv/ 167 | ENV/ 168 | env.bak/ 169 | venv.bak/ 170 | 171 | # Spyder project settings 172 | .spyderproject 173 | .spyproject 174 | 175 | # Rope project settings 176 | .ropeproject 177 | 178 | # mkdocs documentation 179 | /site 180 | 181 | # mypy 182 | .mypy_cache/ 183 | .dmypy.json 184 | dmypy.json 185 | 186 | # Pyre type checker 187 | .pyre/ 188 | 189 | # pytype static type analyzer 190 | .pytype/ 191 | 192 | # Cython debug symbols 193 | cython_debug/ 194 | 195 | # PyCharm 196 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 197 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 198 | # and can be added to the global gitignore or merged into this file. For a more nuclear 199 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 200 | #.idea/ 201 | 202 | backend/.langgraph_api -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Build React Frontend 2 | FROM node:20-alpine AS frontend-builder 3 | 4 | # Set working directory for frontend 5 | WORKDIR /app/frontend 6 | 7 | # Copy frontend package files and install dependencies 8 | COPY frontend/package.json ./ 9 | COPY frontend/package-lock.json ./ 10 | # If you use yarn or pnpm, adjust accordingly (e.g., copy yarn.lock or pnpm-lock.yaml and use yarn install or pnpm install) 11 | RUN npm install 12 | 13 | # Copy the rest of the frontend source code 14 | COPY frontend/ ./ 15 | 16 | # Build the frontend 17 | RUN npm run build 18 | 19 | # Stage 2: Python Backend 20 | FROM docker.io/langchain/langgraph-api:3.11 21 | 22 | # -- Install UV -- 23 | # First install curl, then install UV using the standalone installer 24 | RUN apt-get update && apt-get install -y curl && \ 25 | curl -LsSf https://astral.sh/uv/install.sh | sh && \ 26 | apt-get clean && rm -rf /var/lib/apt/lists/* 27 | ENV PATH="/root/.local/bin:$PATH" 28 | # -- End of UV installation -- 29 | 30 | # -- Copy built frontend from builder stage -- 31 | # The app.py expects the frontend build to be at ../frontend/dist relative to its own location. 32 | # If app.py is at /deps/backend/src/agent/app.py, then ../frontend/dist resolves to /deps/frontend/dist. 33 | COPY --from=frontend-builder /app/frontend/dist /deps/frontend/dist 34 | # -- End of copying built frontend -- 35 | 36 | # -- Adding local package . -- 37 | ADD backend/ /deps/backend 38 | # -- End of local package . -- 39 | 40 | # -- Installing all local dependencies using UV -- 41 | # First, we need to ensure pip is available for UV to use 42 | RUN uv pip install --system pip setuptools wheel 43 | # Install dependencies with UV, respecting constraints 44 | RUN cd /deps/backend && \ 45 | PYTHONDONTWRITEBYTECODE=1 UV_SYSTEM_PYTHON=1 uv pip install --system -c /api/constraints.txt -e . 46 | # -- End of local dependencies install -- 47 | ENV LANGGRAPH_HTTP='{"app": "/deps/backend/src/agent/app.py:app"}' 48 | ENV LANGSERVE_GRAPHS='{"agent": "/deps/backend/src/agent/graph.py:graph"}' 49 | 50 | # -- Ensure user deps didn't inadvertently overwrite langgraph-api 51 | # Create all required directories that the langgraph-api package expects 52 | RUN mkdir -p /api/langgraph_api /api/langgraph_runtime /api/langgraph_license /api/langgraph_storage && \ 53 | touch /api/langgraph_api/__init__.py /api/langgraph_runtime/__init__.py /api/langgraph_license/__init__.py /api/langgraph_storage/__init__.py 54 | # Use pip for this specific package as it has poetry-based build requirements 55 | RUN PYTHONDONTWRITEBYTECODE=1 pip install --no-cache-dir --no-deps -e /api 56 | # -- End of ensuring user deps didn't inadvertently overwrite langgraph-api -- 57 | # -- Removing pip from the final image (but keeping UV) -- 58 | RUN uv pip uninstall --system pip setuptools wheel && \ 59 | rm -rf /usr/local/lib/python*/site-packages/pip* /usr/local/lib/python*/site-packages/setuptools* /usr/local/lib/python*/site-packages/wheel* && \ 60 | find /usr/local/bin -name "pip*" -delete 61 | # -- End of pip removal -- 62 | 63 | WORKDIR /deps/backend 64 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help dev-frontend dev-backend dev 2 | 3 | help: 4 | @echo "Available commands:" 5 | @echo " make dev-frontend - Starts the frontend development server (Vite)" 6 | @echo " make dev-backend - Starts the backend development server (Uvicorn with reload)" 7 | @echo " make dev - Starts both frontend and backend development servers" 8 | 9 | dev-frontend: 10 | @echo "Starting frontend development server..." 11 | @cd frontend && npm run dev 12 | 13 | dev-backend: 14 | @echo "Starting backend development server..." 15 | @cd backend && langgraph dev 16 | 17 | # Run frontend and backend concurrently 18 | dev: 19 | @echo "Starting both frontend and backend development servers..." 20 | @make dev-frontend & make dev-backend -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gemini Fullstack LangGraph Quickstart 2 | 3 | This project demonstrates a fullstack application using a React frontend and a LangGraph-powered backend agent. The agent is designed to perform comprehensive research on a user's query by dynamically generating search terms, querying the web using Google Search, reflecting on the results to identify knowledge gaps, and iteratively refining its search until it can provide a well-supported answer with citations. This application serves as an example of building research-augmented conversational AI using LangGraph and Google's Gemini models. 4 | 5 | ![Gemini Fullstack LangGraph](./app.png) 6 | 7 | ## Features 8 | 9 | - 💬 Fullstack application with a React frontend and LangGraph backend. 10 | - 🧠 Powered by a LangGraph agent for advanced research and conversational AI. 11 | - 🔍 Dynamic search query generation using Google Gemini models. 12 | - 🌐 Integrated web research via Google Search API. 13 | - 🤔 Reflective reasoning to identify knowledge gaps and refine searches. 14 | - 📄 Generates answers with citations from gathered sources. 15 | - 🔄 Hot-reloading for both frontend and backend development during development. 16 | 17 | ## Project Structure 18 | 19 | The project is divided into two main directories: 20 | 21 | - `frontend/`: Contains the React application built with Vite. 22 | - `backend/`: Contains the LangGraph/FastAPI application, including the research agent logic. 23 | 24 | ## Getting Started: Development and Local Testing 25 | 26 | Follow these steps to get the application running locally for development and testing. 27 | 28 | **1. Prerequisites:** 29 | 30 | - Node.js and npm (or yarn/pnpm) 31 | - Python 3.8+ 32 | - **`GEMINI_API_KEY`**: The backend agent requires a Google Gemini API key. 33 | 1. Navigate to the `backend/` directory. 34 | 2. Create a file named `.env` by copying the `backend/.env.example` file. 35 | 3. Open the `.env` file and add your Gemini API key: `GEMINI_API_KEY="YOUR_ACTUAL_API_KEY"` 36 | 37 | **2. Install Dependencies:** 38 | 39 | **Backend:** 40 | 41 | ```bash 42 | cd backend 43 | pip install . 44 | ``` 45 | 46 | **Frontend:** 47 | 48 | ```bash 49 | cd frontend 50 | npm install 51 | ``` 52 | 53 | **3. Run Development Servers:** 54 | 55 | **Backend & Frontend:** 56 | 57 | ```bash 58 | make dev 59 | ``` 60 | This will run the backend and frontend development servers. Open your browser and navigate to the frontend development server URL (e.g., `http://localhost:5173/app`). 61 | 62 | _Alternatively, you can run the backend and frontend development servers separately. For the backend, open a terminal in the `backend/` directory and run `langgraph dev`. The backend API will be available at `http://127.0.0.1:2024`. It will also open a browser window to the LangGraph UI. For the frontend, open a terminal in the `frontend/` directory and run `npm run dev`. The frontend will be available at `http://localhost:5173`._ 63 | 64 | ## How the Backend Agent Works (High-Level) 65 | 66 | The core of the backend is a LangGraph agent defined in `backend/src/agent/graph.py`. It follows these steps: 67 | 68 | ![Agent Flow](./agent.png) 69 | 70 | 1. **Generate Initial Queries:** Based on your input, it generates a set of initial search queries using a Gemini model. 71 | 2. **Web Research:** For each query, it uses the Gemini model with the Google Search API to find relevant web pages. 72 | 3. **Reflection & Knowledge Gap Analysis:** The agent analyzes the search results to determine if the information is sufficient or if there are knowledge gaps. It uses a Gemini model for this reflection process. 73 | 4. **Iterative Refinement:** If gaps are found or the information is insufficient, it generates follow-up queries and repeats the web research and reflection steps (up to a configured maximum number of loops). 74 | 5. **Finalize Answer:** Once the research is deemed sufficient, the agent synthesizes the gathered information into a coherent answer, including citations from the web sources, using a Gemini model. 75 | 76 | ## Deployment 77 | 78 | In production, the backend server serves the optimized static frontend build. LangGraph requires a Redis instance and a Postgres database. Redis is used as a pub-sub broker to enable streaming real time output from background runs. Postgres is used to store assistants, threads, runs, persist thread state and long term memory, and to manage the state of the background task queue with 'exactly once' semantics. For more details on how to deploy the backend server, take a look at the [LangGraph Documentation](https://langchain-ai.github.io/langgraph/concepts/deployment_options/). Below is an example of how to build a Docker image that includes the optimized frontend build and the backend server and run it via `docker-compose`. 79 | 80 | _Note: For the docker-compose.yml example you need a LangSmith API key, you can get one from [LangSmith](https://smith.langchain.com/settings)._ 81 | 82 | _Note: If you are not running the docker-compose.yml example or exposing the backend server to the public internet, you update the `apiUrl` in the `frontend/src/App.tsx` file your host. Currently the `apiUrl` is set to `http://localhost:8123` for docker-compose or `http://localhost:2024` for development._ 83 | 84 | **1. Build the Docker Image:** 85 | 86 | Run the following command from the **project root directory**: 87 | ```bash 88 | docker build -t gemini-fullstack-langgraph -f Dockerfile . 89 | ``` 90 | **2. Run the Production Server:** 91 | 92 | ```bash 93 | GEMINI_API_KEY= LANGSMITH_API_KEY= docker-compose up 94 | ``` 95 | 96 | Open your browser and navigate to `http://localhost:8123/app/` to see the application. The API will be available at `http://localhost:8123`. 97 | 98 | ## Technologies Used 99 | 100 | - [React](https://reactjs.org/) (with [Vite](https://vitejs.dev/)) - For the frontend user interface. 101 | - [Tailwind CSS](https://tailwindcss.com/) - For styling. 102 | - [Shadcn UI](https://ui.shadcn.com/) - For components. 103 | - [LangGraph](https://github.com/langchain-ai/langgraph) - For building the backend research agent. 104 | - [Google Gemini](https://ai.google.dev/models/gemini) - LLM for query generation, reflection, and answer synthesis. 105 | 106 | ## License 107 | 108 | This project is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details. -------------------------------------------------------------------------------- /agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google-gemini/gemini-fullstack-langgraph-quickstart/fddf107e0a9f6c1f3c45f70031b65a7138a281b1/agent.png -------------------------------------------------------------------------------- /app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google-gemini/gemini-fullstack-langgraph-quickstart/fddf107e0a9f6c1f3c45f70031b65a7138a281b1/app.png -------------------------------------------------------------------------------- /backend/.env.example: -------------------------------------------------------------------------------- 1 | # GEMINI_API_KEY= -------------------------------------------------------------------------------- /backend/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | uv.lock 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | cover/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | db.sqlite3-journal 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | .pybuilder/ 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | # For a library or package, you might want to ignore these files since the code is 88 | # intended to run in multiple environments; otherwise, check them in: 89 | # .python-version 90 | 91 | # pipenv 92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 95 | # install all needed dependencies. 96 | #Pipfile.lock 97 | 98 | # poetry 99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 100 | # This is especially recommended for binary packages to ensure reproducibility, and is more 101 | # commonly ignored for libraries. 102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 103 | #poetry.lock 104 | 105 | # pdm 106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 107 | #pdm.lock 108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 109 | # in version control. 110 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 111 | .pdm.toml 112 | .pdm-python 113 | .pdm-build/ 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | -------------------------------------------------------------------------------- /backend/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Philipp Schmid 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /backend/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests 2 | 3 | # Default target executed when no arguments are given to make. 4 | all: help 5 | 6 | # Define a variable for the test file path. 7 | TEST_FILE ?= tests/unit_tests/ 8 | 9 | test: 10 | uv run --with-editable . pytest $(TEST_FILE) 11 | 12 | test_watch: 13 | uv run --with-editable . ptw --snapshot-update --now . -- -vv tests/unit_tests 14 | 15 | test_profile: 16 | uv run --with-editable . pytest -vv tests/unit_tests/ --profile-svg 17 | 18 | extended_tests: 19 | uv run --with-editable . pytest --only-extended $(TEST_FILE) 20 | 21 | 22 | ###################### 23 | # LINTING AND FORMATTING 24 | ###################### 25 | 26 | # Define a variable for Python and notebook files. 27 | PYTHON_FILES=src/ 28 | MYPY_CACHE=.mypy_cache 29 | lint format: PYTHON_FILES=. 30 | lint_diff format_diff: PYTHON_FILES=$(shell git diff --name-only --diff-filter=d main | grep -E '\.py$$|\.ipynb$$') 31 | lint_package: PYTHON_FILES=src 32 | lint_tests: PYTHON_FILES=tests 33 | lint_tests: MYPY_CACHE=.mypy_cache_test 34 | 35 | lint lint_diff lint_package lint_tests: 36 | uv run ruff check . 37 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff format $(PYTHON_FILES) --diff 38 | [ "$(PYTHON_FILES)" = "" ] || uv run ruff check --select I $(PYTHON_FILES) 39 | [ "$(PYTHON_FILES)" = "" ] || uv run mypy --strict $(PYTHON_FILES) 40 | [ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && uv run mypy --strict $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) 41 | 42 | format format_diff: 43 | uv run ruff format $(PYTHON_FILES) 44 | uv run ruff check --select I --fix $(PYTHON_FILES) 45 | 46 | spell_check: 47 | codespell --toml pyproject.toml 48 | 49 | spell_fix: 50 | codespell --toml pyproject.toml -w 51 | 52 | ###################### 53 | # HELP 54 | ###################### 55 | 56 | help: 57 | @echo '----' 58 | @echo 'format - run code formatters' 59 | @echo 'lint - run linters' 60 | @echo 'test - run unit tests' 61 | @echo 'tests - run unit tests' 62 | @echo 'test TEST_FILE= - run all tests in file' 63 | @echo 'test_watch - run unit tests in watch mode' 64 | 65 | -------------------------------------------------------------------------------- /backend/langgraph.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": ["."], 3 | "graphs": { 4 | "agent": "./src/agent/graph.py:graph" 5 | }, 6 | "http": { 7 | "app": "./src/agent/app.py:app" 8 | }, 9 | "env": ".env" 10 | } 11 | -------------------------------------------------------------------------------- /backend/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "agent" 3 | version = "0.0.1" 4 | description = "Backend for the LangGraph agent" 5 | authors = [ 6 | { name = "Philipp Schmid", email = "schmidphilipp1995@gmail.com" }, 7 | ] 8 | readme = "README.md" 9 | license = { text = "MIT" } 10 | requires-python = ">=3.11,<4.0" 11 | dependencies = [ 12 | "langgraph>=0.2.6", 13 | "langchain>=0.3.19", 14 | "langchain-google-genai", 15 | "python-dotenv>=1.0.1", 16 | "langgraph-sdk>=0.1.57", 17 | "langgraph-cli", 18 | "langgraph-api", 19 | "fastapi", 20 | "google-genai", 21 | ] 22 | 23 | 24 | [project.optional-dependencies] 25 | dev = ["mypy>=1.11.1", "ruff>=0.6.1"] 26 | 27 | [build-system] 28 | requires = ["setuptools>=73.0.0", "wheel"] 29 | build-backend = "setuptools.build_meta" 30 | 31 | [tool.ruff] 32 | lint.select = [ 33 | "E", # pycodestyle 34 | "F", # pyflakes 35 | "I", # isort 36 | "D", # pydocstyle 37 | "D401", # First line should be in imperative mood 38 | "T201", 39 | "UP", 40 | ] 41 | lint.ignore = [ 42 | "UP006", 43 | "UP007", 44 | # We actually do want to import from typing_extensions 45 | "UP035", 46 | # Relax the convention by _not_ requiring documentation for every function parameter. 47 | "D417", 48 | "E501", 49 | ] 50 | [tool.ruff.lint.per-file-ignores] 51 | "tests/*" = ["D", "UP"] 52 | [tool.ruff.lint.pydocstyle] 53 | convention = "google" 54 | 55 | [dependency-groups] 56 | dev = [ 57 | "langgraph-cli[inmem]>=0.1.71", 58 | "pytest>=8.3.5", 59 | ] 60 | -------------------------------------------------------------------------------- /backend/src/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from agent.graph import graph 2 | 3 | __all__ = ["graph"] 4 | -------------------------------------------------------------------------------- /backend/src/agent/app.py: -------------------------------------------------------------------------------- 1 | # mypy: disable - error - code = "no-untyped-def,misc" 2 | import pathlib 3 | from fastapi import FastAPI, Request, Response 4 | from fastapi.staticfiles import StaticFiles 5 | import fastapi.exceptions 6 | 7 | # Define the FastAPI app 8 | app = FastAPI() 9 | 10 | 11 | def create_frontend_router(build_dir="../frontend/dist"): 12 | """Creates a router to serve the React frontend. 13 | 14 | Args: 15 | build_dir: Path to the React build directory relative to this file. 16 | 17 | Returns: 18 | A Starlette application serving the frontend. 19 | """ 20 | build_path = pathlib.Path(__file__).parent.parent.parent / build_dir 21 | static_files_path = build_path / "assets" # Vite uses 'assets' subdir 22 | 23 | if not build_path.is_dir() or not (build_path / "index.html").is_file(): 24 | print( 25 | f"WARN: Frontend build directory not found or incomplete at {build_path}. Serving frontend will likely fail." 26 | ) 27 | # Return a dummy router if build isn't ready 28 | from starlette.routing import Route 29 | 30 | async def dummy_frontend(request): 31 | return Response( 32 | "Frontend not built. Run 'npm run build' in the frontend directory.", 33 | media_type="text/plain", 34 | status_code=503, 35 | ) 36 | 37 | return Route("/{path:path}", endpoint=dummy_frontend) 38 | 39 | build_dir = pathlib.Path(build_dir) 40 | 41 | react = FastAPI(openapi_url="") 42 | react.mount( 43 | "/assets", StaticFiles(directory=static_files_path), name="static_assets" 44 | ) 45 | 46 | @react.get("/{path:path}") 47 | async def handle_catch_all(request: Request, path: str): 48 | fp = build_path / path 49 | if not fp.exists() or not fp.is_file(): 50 | fp = build_path / "index.html" 51 | return fastapi.responses.FileResponse(fp) 52 | 53 | return react 54 | 55 | 56 | # Mount the frontend under /app to not conflict with the LangGraph API routes 57 | app.mount( 58 | "/app", 59 | create_frontend_router(), 60 | name="frontend", 61 | ) 62 | -------------------------------------------------------------------------------- /backend/src/agent/configuration.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pydantic import BaseModel, Field 3 | from typing import Any, Optional 4 | 5 | from langchain_core.runnables import RunnableConfig 6 | 7 | 8 | class Configuration(BaseModel): 9 | """The configuration for the agent.""" 10 | 11 | query_generator_model: str = Field( 12 | default="gemini-2.0-flash", 13 | metadata={ 14 | "description": "The name of the language model to use for the agent's query generation." 15 | }, 16 | ) 17 | 18 | reflection_model: str = Field( 19 | default="gemini-2.5-flash-preview-04-17", 20 | metadata={ 21 | "description": "The name of the language model to use for the agent's reflection." 22 | }, 23 | ) 24 | 25 | answer_model: str = Field( 26 | default="gemini-2.5-pro-preview-05-06", 27 | metadata={ 28 | "description": "The name of the language model to use for the agent's answer." 29 | }, 30 | ) 31 | 32 | number_of_initial_queries: int = Field( 33 | default=3, 34 | metadata={"description": "The number of initial search queries to generate."}, 35 | ) 36 | 37 | max_research_loops: int = Field( 38 | default=2, 39 | metadata={"description": "The maximum number of research loops to perform."}, 40 | ) 41 | 42 | @classmethod 43 | def from_runnable_config( 44 | cls, config: Optional[RunnableConfig] = None 45 | ) -> "Configuration": 46 | """Create a Configuration instance from a RunnableConfig.""" 47 | configurable = ( 48 | config["configurable"] if config and "configurable" in config else {} 49 | ) 50 | 51 | # Get raw values from environment or config 52 | raw_values: dict[str, Any] = { 53 | name: os.environ.get(name.upper(), configurable.get(name)) 54 | for name in cls.model_fields.keys() 55 | } 56 | 57 | # Filter out None values 58 | values = {k: v for k, v in raw_values.items() if v is not None} 59 | 60 | return cls(**values) 61 | -------------------------------------------------------------------------------- /backend/src/agent/graph.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from agent.tools_and_schemas import SearchQueryList, Reflection 4 | from dotenv import load_dotenv 5 | from langchain_core.messages import AIMessage 6 | from langgraph.types import Send 7 | from langgraph.graph import StateGraph 8 | from langgraph.graph import START, END 9 | from langchain_core.runnables import RunnableConfig 10 | from google.genai import Client 11 | 12 | from agent.state import ( 13 | OverallState, 14 | QueryGenerationState, 15 | ReflectionState, 16 | WebSearchState, 17 | ) 18 | from agent.configuration import Configuration 19 | from agent.prompts import ( 20 | get_current_date, 21 | query_writer_instructions, 22 | web_searcher_instructions, 23 | reflection_instructions, 24 | answer_instructions, 25 | ) 26 | from langchain_google_genai import ChatGoogleGenerativeAI 27 | from agent.utils import ( 28 | get_citations, 29 | get_research_topic, 30 | insert_citation_markers, 31 | resolve_urls, 32 | ) 33 | 34 | load_dotenv() 35 | 36 | if os.getenv("GEMINI_API_KEY") is None: 37 | raise ValueError("GEMINI_API_KEY is not set") 38 | 39 | # Used for Google Search API 40 | genai_client = Client(api_key=os.getenv("GEMINI_API_KEY")) 41 | 42 | 43 | # Nodes 44 | def generate_query(state: OverallState, config: RunnableConfig) -> QueryGenerationState: 45 | """LangGraph node that generates a search queries based on the User's question. 46 | 47 | Uses Gemini 2.0 Flash to create an optimized search query for web research based on 48 | the User's question. 49 | 50 | Args: 51 | state: Current graph state containing the User's question 52 | config: Configuration for the runnable, including LLM provider settings 53 | 54 | Returns: 55 | Dictionary with state update, including search_query key containing the generated query 56 | """ 57 | configurable = Configuration.from_runnable_config(config) 58 | 59 | # check for custom initial search query count 60 | if state.get("initial_search_query_count") is None: 61 | state["initial_search_query_count"] = configurable.number_of_initial_queries 62 | 63 | # init Gemini 2.0 Flash 64 | llm = ChatGoogleGenerativeAI( 65 | model=configurable.query_generator_model, 66 | temperature=1.0, 67 | max_retries=2, 68 | api_key=os.getenv("GEMINI_API_KEY"), 69 | ) 70 | structured_llm = llm.with_structured_output(SearchQueryList) 71 | 72 | # Format the prompt 73 | current_date = get_current_date() 74 | formatted_prompt = query_writer_instructions.format( 75 | current_date=current_date, 76 | research_topic=get_research_topic(state["messages"]), 77 | number_queries=state["initial_search_query_count"], 78 | ) 79 | # Generate the search queries 80 | result = structured_llm.invoke(formatted_prompt) 81 | return {"query_list": result.query} 82 | 83 | 84 | def continue_to_web_research(state: QueryGenerationState): 85 | """LangGraph node that sends the search queries to the web research node. 86 | 87 | This is used to spawn n number of web research nodes, one for each search query. 88 | """ 89 | return [ 90 | Send("web_research", {"search_query": search_query, "id": int(idx)}) 91 | for idx, search_query in enumerate(state["query_list"]) 92 | ] 93 | 94 | 95 | def web_research(state: WebSearchState, config: RunnableConfig) -> OverallState: 96 | """LangGraph node that performs web research using the native Google Search API tool. 97 | 98 | Executes a web search using the native Google Search API tool in combination with Gemini 2.0 Flash. 99 | 100 | Args: 101 | state: Current graph state containing the search query and research loop count 102 | config: Configuration for the runnable, including search API settings 103 | 104 | Returns: 105 | Dictionary with state update, including sources_gathered, research_loop_count, and web_research_results 106 | """ 107 | # Configure 108 | configurable = Configuration.from_runnable_config(config) 109 | formatted_prompt = web_searcher_instructions.format( 110 | current_date=get_current_date(), 111 | research_topic=state["search_query"], 112 | ) 113 | 114 | # Uses the google genai client as the langchain client doesn't return grounding metadata 115 | response = genai_client.models.generate_content( 116 | model=configurable.query_generator_model, 117 | contents=formatted_prompt, 118 | config={ 119 | "tools": [{"google_search": {}}], 120 | "temperature": 0, 121 | }, 122 | ) 123 | # resolve the urls to short urls for saving tokens and time 124 | resolved_urls = resolve_urls( 125 | response.candidates[0].grounding_metadata.grounding_chunks, state["id"] 126 | ) 127 | # Gets the citations and adds them to the generated text 128 | citations = get_citations(response, resolved_urls) 129 | modified_text = insert_citation_markers(response.text, citations) 130 | sources_gathered = [item for citation in citations for item in citation["segments"]] 131 | 132 | return { 133 | "sources_gathered": sources_gathered, 134 | "search_query": [state["search_query"]], 135 | "web_research_result": [modified_text], 136 | } 137 | 138 | 139 | def reflection(state: OverallState, config: RunnableConfig) -> ReflectionState: 140 | """LangGraph node that identifies knowledge gaps and generates potential follow-up queries. 141 | 142 | Analyzes the current summary to identify areas for further research and generates 143 | potential follow-up queries. Uses structured output to extract 144 | the follow-up query in JSON format. 145 | 146 | Args: 147 | state: Current graph state containing the running summary and research topic 148 | config: Configuration for the runnable, including LLM provider settings 149 | 150 | Returns: 151 | Dictionary with state update, including search_query key containing the generated follow-up query 152 | """ 153 | configurable = Configuration.from_runnable_config(config) 154 | # Increment the research loop count and get the reasoning model 155 | state["research_loop_count"] = state.get("research_loop_count", 0) + 1 156 | reasoning_model = state.get("reasoning_model") or configurable.reasoning_model 157 | 158 | # Format the prompt 159 | current_date = get_current_date() 160 | formatted_prompt = reflection_instructions.format( 161 | current_date=current_date, 162 | research_topic=get_research_topic(state["messages"]), 163 | summaries="\n\n---\n\n".join(state["web_research_result"]), 164 | ) 165 | # init Reasoning Model 166 | llm = ChatGoogleGenerativeAI( 167 | model=reasoning_model, 168 | temperature=1.0, 169 | max_retries=2, 170 | api_key=os.getenv("GEMINI_API_KEY"), 171 | ) 172 | result = llm.with_structured_output(Reflection).invoke(formatted_prompt) 173 | 174 | return { 175 | "is_sufficient": result.is_sufficient, 176 | "knowledge_gap": result.knowledge_gap, 177 | "follow_up_queries": result.follow_up_queries, 178 | "research_loop_count": state["research_loop_count"], 179 | "number_of_ran_queries": len(state["search_query"]), 180 | } 181 | 182 | 183 | def evaluate_research( 184 | state: ReflectionState, 185 | config: RunnableConfig, 186 | ) -> OverallState: 187 | """LangGraph routing function that determines the next step in the research flow. 188 | 189 | Controls the research loop by deciding whether to continue gathering information 190 | or to finalize the summary based on the configured maximum number of research loops. 191 | 192 | Args: 193 | state: Current graph state containing the research loop count 194 | config: Configuration for the runnable, including max_research_loops setting 195 | 196 | Returns: 197 | String literal indicating the next node to visit ("web_research" or "finalize_summary") 198 | """ 199 | configurable = Configuration.from_runnable_config(config) 200 | max_research_loops = ( 201 | state.get("max_research_loops") 202 | if state.get("max_research_loops") is not None 203 | else configurable.max_research_loops 204 | ) 205 | if state["is_sufficient"] or state["research_loop_count"] >= max_research_loops: 206 | return "finalize_answer" 207 | else: 208 | return [ 209 | Send( 210 | "web_research", 211 | { 212 | "search_query": follow_up_query, 213 | "id": state["number_of_ran_queries"] + int(idx), 214 | }, 215 | ) 216 | for idx, follow_up_query in enumerate(state["follow_up_queries"]) 217 | ] 218 | 219 | 220 | def finalize_answer(state: OverallState, config: RunnableConfig): 221 | """LangGraph node that finalizes the research summary. 222 | 223 | Prepares the final output by deduplicating and formatting sources, then 224 | combining them with the running summary to create a well-structured 225 | research report with proper citations. 226 | 227 | Args: 228 | state: Current graph state containing the running summary and sources gathered 229 | 230 | Returns: 231 | Dictionary with state update, including running_summary key containing the formatted final summary with sources 232 | """ 233 | configurable = Configuration.from_runnable_config(config) 234 | reasoning_model = state.get("reasoning_model") or configurable.reasoning_model 235 | 236 | # Format the prompt 237 | current_date = get_current_date() 238 | formatted_prompt = answer_instructions.format( 239 | current_date=current_date, 240 | research_topic=get_research_topic(state["messages"]), 241 | summaries="\n---\n\n".join(state["web_research_result"]), 242 | ) 243 | 244 | # init Reasoning Model, default to Gemini 2.5 Flash 245 | llm = ChatGoogleGenerativeAI( 246 | model=reasoning_model, 247 | temperature=0, 248 | max_retries=2, 249 | api_key=os.getenv("GEMINI_API_KEY"), 250 | ) 251 | result = llm.invoke(formatted_prompt) 252 | 253 | # Replace the short urls with the original urls and add all used urls to the sources_gathered 254 | unique_sources = [] 255 | for source in state["sources_gathered"]: 256 | if source["short_url"] in result.content: 257 | result.content = result.content.replace( 258 | source["short_url"], source["value"] 259 | ) 260 | unique_sources.append(source) 261 | 262 | return { 263 | "messages": [AIMessage(content=result.content)], 264 | "sources_gathered": unique_sources, 265 | } 266 | 267 | 268 | # Create our Agent Graph 269 | builder = StateGraph(OverallState, config_schema=Configuration) 270 | 271 | # Define the nodes we will cycle between 272 | builder.add_node("generate_query", generate_query) 273 | builder.add_node("web_research", web_research) 274 | builder.add_node("reflection", reflection) 275 | builder.add_node("finalize_answer", finalize_answer) 276 | 277 | # Set the entrypoint as `generate_query` 278 | # This means that this node is the first one called 279 | builder.add_edge(START, "generate_query") 280 | # Add conditional edge to continue with search queries in a parallel branch 281 | builder.add_conditional_edges( 282 | "generate_query", continue_to_web_research, ["web_research"] 283 | ) 284 | # Reflect on the web research 285 | builder.add_edge("web_research", "reflection") 286 | # Evaluate the research 287 | builder.add_conditional_edges( 288 | "reflection", evaluate_research, ["web_research", "finalize_answer"] 289 | ) 290 | # Finalize the answer 291 | builder.add_edge("finalize_answer", END) 292 | 293 | graph = builder.compile(name="pro-search-agent") 294 | -------------------------------------------------------------------------------- /backend/src/agent/prompts.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | 4 | # Get current date in a readable format 5 | def get_current_date(): 6 | return datetime.now().strftime("%B %d, %Y") 7 | 8 | 9 | query_writer_instructions = """Your goal is to generate sophisticated and diverse web search queries. These queries are intended for an advanced automated web research tool capable of analyzing complex results, following links, and synthesizing information. 10 | 11 | Instructions: 12 | - Always prefer a single search query, only add another query if the original question requests multiple aspects or elements and one query is not enough. 13 | - Each query should focus on one specific aspect of the original question. 14 | - Don't produce more than {number_queries} queries. 15 | - Queries should be diverse, if the topic is broad, generate more than 1 query. 16 | - Don't generate multiple similar queries, 1 is enough. 17 | - Query should ensure that the most current information is gathered. The current date is {current_date}. 18 | 19 | Format: 20 | - Format your response as a JSON object with ALL three of these exact keys: 21 | - "rationale": Brief explanation of why these queries are relevant 22 | - "query": A list of search queries 23 | 24 | Example: 25 | 26 | Topic: What revenue grew more last year apple stock or the number of people buying an iphone 27 | ```json 28 | {{ 29 | "rationale": "To answer this comparative growth question accurately, we need specific data points on Apple's stock performance and iPhone sales metrics. These queries target the precise financial information needed: company revenue trends, product-specific unit sales figures, and stock price movement over the same fiscal period for direct comparison.", 30 | "query": ["Apple total revenue growth fiscal year 2024", "iPhone unit sales growth fiscal year 2024", "Apple stock price growth fiscal year 2024"], 31 | }} 32 | ``` 33 | 34 | Context: {research_topic}""" 35 | 36 | 37 | web_searcher_instructions = """Conduct targeted Google Searches to gather the most recent, credible information on "{research_topic}" and synthesize it into a verifiable text artifact. 38 | 39 | Instructions: 40 | - Query should ensure that the most current information is gathered. The current date is {current_date}. 41 | - Conduct multiple, diverse searches to gather comprehensive information. 42 | - Consolidate key findings while meticulously tracking the source(s) for each specific piece of information. 43 | - The output should be a well-written summary or report based on your search findings. 44 | - Only include the information found in the search results, don't make up any information. 45 | 46 | Research Topic: 47 | {research_topic} 48 | """ 49 | 50 | reflection_instructions = """You are an expert research assistant analyzing summaries about "{research_topic}". 51 | 52 | Instructions: 53 | - Identify knowledge gaps or areas that need deeper exploration and generate a follow-up query. (1 or multiple). 54 | - If provided summaries are sufficient to answer the user's question, don't generate a follow-up query. 55 | - If there is a knowledge gap, generate a follow-up query that would help expand your understanding. 56 | - Focus on technical details, implementation specifics, or emerging trends that weren't fully covered. 57 | 58 | Requirements: 59 | - Ensure the follow-up query is self-contained and includes necessary context for web search. 60 | 61 | Output Format: 62 | - Format your response as a JSON object with these exact keys: 63 | - "is_sufficient": true or false 64 | - "knowledge_gap": Describe what information is missing or needs clarification 65 | - "follow_up_queries": Write a specific question to address this gap 66 | 67 | Example: 68 | ```json 69 | {{ 70 | "is_sufficient": true, // or false 71 | "knowledge_gap": "The summary lacks information about performance metrics and benchmarks", // "" if is_sufficient is true 72 | "follow_up_queries": ["What are typical performance benchmarks and metrics used to evaluate [specific technology]?"] // [] if is_sufficient is true 73 | }} 74 | ``` 75 | 76 | Reflect carefully on the Summaries to identify knowledge gaps and produce a follow-up query. Then, produce your output following this JSON format: 77 | 78 | Summaries: 79 | {summaries} 80 | """ 81 | 82 | answer_instructions = """Generate a high-quality answer to the user's question based on the provided summaries. 83 | 84 | Instructions: 85 | - The current date is {current_date}. 86 | - You are the final step of a multi-step research process, don't mention that you are the final step. 87 | - You have access to all the information gathered from the previous steps. 88 | - You have access to the user's question. 89 | - Generate a high-quality answer to the user's question based on the provided summaries and the user's question. 90 | - you MUST include all the citations from the summaries in the answer correctly. 91 | 92 | User Context: 93 | - {research_topic} 94 | 95 | Summaries: 96 | {summaries}""" 97 | -------------------------------------------------------------------------------- /backend/src/agent/state.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass, field 4 | from typing import TypedDict 5 | 6 | from langgraph.graph import add_messages 7 | from typing_extensions import Annotated 8 | 9 | 10 | import operator 11 | from dataclasses import dataclass, field 12 | from typing_extensions import Annotated 13 | 14 | 15 | class OverallState(TypedDict): 16 | messages: Annotated[list, add_messages] 17 | search_query: Annotated[list, operator.add] 18 | web_research_result: Annotated[list, operator.add] 19 | sources_gathered: Annotated[list, operator.add] 20 | initial_search_query_count: int 21 | max_research_loops: int 22 | research_loop_count: int 23 | reasoning_model: str 24 | 25 | 26 | class ReflectionState(TypedDict): 27 | is_sufficient: bool 28 | knowledge_gap: str 29 | follow_up_queries: Annotated[list, operator.add] 30 | research_loop_count: int 31 | number_of_ran_queries: int 32 | 33 | 34 | class Query(TypedDict): 35 | query: str 36 | rationale: str 37 | 38 | 39 | class QueryGenerationState(TypedDict): 40 | query_list: list[Query] 41 | 42 | 43 | class WebSearchState(TypedDict): 44 | search_query: str 45 | id: str 46 | 47 | 48 | @dataclass(kw_only=True) 49 | class SearchStateOutput: 50 | running_summary: str = field(default=None) # Final report 51 | -------------------------------------------------------------------------------- /backend/src/agent/tools_and_schemas.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from pydantic import BaseModel, Field 3 | 4 | 5 | class SearchQueryList(BaseModel): 6 | query: List[str] = Field( 7 | description="A list of search queries to be used for web research." 8 | ) 9 | rationale: str = Field( 10 | description="A brief explanation of why these queries are relevant to the research topic." 11 | ) 12 | 13 | 14 | class Reflection(BaseModel): 15 | is_sufficient: bool = Field( 16 | description="Whether the provided summaries are sufficient to answer the user's question." 17 | ) 18 | knowledge_gap: str = Field( 19 | description="A description of what information is missing or needs clarification." 20 | ) 21 | follow_up_queries: List[str] = Field( 22 | description="A list of follow-up queries to address the knowledge gap." 23 | ) 24 | -------------------------------------------------------------------------------- /backend/src/agent/utils.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List 2 | from langchain_core.messages import AnyMessage, AIMessage, HumanMessage 3 | 4 | 5 | def get_research_topic(messages: List[AnyMessage]) -> str: 6 | """ 7 | Get the research topic from the messages. 8 | """ 9 | # check if request has a history and combine the messages into a single string 10 | if len(messages) == 1: 11 | research_topic = messages[-1].content 12 | else: 13 | research_topic = "" 14 | for message in messages: 15 | if isinstance(message, HumanMessage): 16 | research_topic += f"User: {message.content}\n" 17 | elif isinstance(message, AIMessage): 18 | research_topic += f"Assistant: {message.content}\n" 19 | return research_topic 20 | 21 | 22 | def resolve_urls(urls_to_resolve: List[Any], id: int) -> Dict[str, str]: 23 | """ 24 | Create a map of the vertex ai search urls (very long) to a short url with a unique id for each url. 25 | Ensures each original URL gets a consistent shortened form while maintaining uniqueness. 26 | """ 27 | prefix = f"https://vertexaisearch.cloud.google.com/id/" 28 | urls = [site.web.uri for site in urls_to_resolve] 29 | 30 | # Create a dictionary that maps each unique URL to its first occurrence index 31 | resolved_map = {} 32 | for idx, url in enumerate(urls): 33 | if url not in resolved_map: 34 | resolved_map[url] = f"{prefix}{id}-{idx}" 35 | 36 | return resolved_map 37 | 38 | 39 | def insert_citation_markers(text, citations_list): 40 | """ 41 | Inserts citation markers into a text string based on start and end indices. 42 | 43 | Args: 44 | text (str): The original text string. 45 | citations_list (list): A list of dictionaries, where each dictionary 46 | contains 'start_index', 'end_index', and 47 | 'segment_string' (the marker to insert). 48 | Indices are assumed to be for the original text. 49 | 50 | Returns: 51 | str: The text with citation markers inserted. 52 | """ 53 | # Sort citations by end_index in descending order. 54 | # If end_index is the same, secondary sort by start_index descending. 55 | # This ensures that insertions at the end of the string don't affect 56 | # the indices of earlier parts of the string that still need to be processed. 57 | sorted_citations = sorted( 58 | citations_list, key=lambda c: (c["end_index"], c["start_index"]), reverse=True 59 | ) 60 | 61 | modified_text = text 62 | for citation_info in sorted_citations: 63 | # These indices refer to positions in the *original* text, 64 | # but since we iterate from the end, they remain valid for insertion 65 | # relative to the parts of the string already processed. 66 | end_idx = citation_info["end_index"] 67 | marker_to_insert = "" 68 | for segment in citation_info["segments"]: 69 | marker_to_insert += f" [{segment['label']}]({segment['short_url']})" 70 | # Insert the citation marker at the original end_idx position 71 | modified_text = ( 72 | modified_text[:end_idx] + marker_to_insert + modified_text[end_idx:] 73 | ) 74 | 75 | return modified_text 76 | 77 | 78 | def get_citations(response, resolved_urls_map): 79 | """ 80 | Extracts and formats citation information from a Gemini model's response. 81 | 82 | This function processes the grounding metadata provided in the response to 83 | construct a list of citation objects. Each citation object includes the 84 | start and end indices of the text segment it refers to, and a string 85 | containing formatted markdown links to the supporting web chunks. 86 | 87 | Args: 88 | response: The response object from the Gemini model, expected to have 89 | a structure including `candidates[0].grounding_metadata`. 90 | It also relies on a `resolved_map` being available in its 91 | scope to map chunk URIs to resolved URLs. 92 | 93 | Returns: 94 | list: A list of dictionaries, where each dictionary represents a citation 95 | and has the following keys: 96 | - "start_index" (int): The starting character index of the cited 97 | segment in the original text. Defaults to 0 98 | if not specified. 99 | - "end_index" (int): The character index immediately after the 100 | end of the cited segment (exclusive). 101 | - "segments" (list[str]): A list of individual markdown-formatted 102 | links for each grounding chunk. 103 | - "segment_string" (str): A concatenated string of all markdown- 104 | formatted links for the citation. 105 | Returns an empty list if no valid candidates or grounding supports 106 | are found, or if essential data is missing. 107 | """ 108 | citations = [] 109 | 110 | # Ensure response and necessary nested structures are present 111 | if not response or not response.candidates: 112 | return citations 113 | 114 | candidate = response.candidates[0] 115 | if ( 116 | not hasattr(candidate, "grounding_metadata") 117 | or not candidate.grounding_metadata 118 | or not hasattr(candidate.grounding_metadata, "grounding_supports") 119 | ): 120 | return citations 121 | 122 | for support in candidate.grounding_metadata.grounding_supports: 123 | citation = {} 124 | 125 | # Ensure segment information is present 126 | if not hasattr(support, "segment") or support.segment is None: 127 | continue # Skip this support if segment info is missing 128 | 129 | start_index = ( 130 | support.segment.start_index 131 | if support.segment.start_index is not None 132 | else 0 133 | ) 134 | 135 | # Ensure end_index is present to form a valid segment 136 | if support.segment.end_index is None: 137 | continue # Skip if end_index is missing, as it's crucial 138 | 139 | # Add 1 to end_index to make it an exclusive end for slicing/range purposes 140 | # (assuming the API provides an inclusive end_index) 141 | citation["start_index"] = start_index 142 | citation["end_index"] = support.segment.end_index 143 | 144 | citation["segments"] = [] 145 | if ( 146 | hasattr(support, "grounding_chunk_indices") 147 | and support.grounding_chunk_indices 148 | ): 149 | for ind in support.grounding_chunk_indices: 150 | try: 151 | chunk = candidate.grounding_metadata.grounding_chunks[ind] 152 | resolved_url = resolved_urls_map.get(chunk.web.uri, None) 153 | citation["segments"].append( 154 | { 155 | "label": chunk.web.title.split(".")[:-1][0], 156 | "short_url": resolved_url, 157 | "value": chunk.web.uri, 158 | } 159 | ) 160 | except (IndexError, AttributeError, NameError): 161 | # Handle cases where chunk, web, uri, or resolved_map might be problematic 162 | # For simplicity, we'll just skip adding this particular segment link 163 | # In a production system, you might want to log this. 164 | pass 165 | citations.append(citation) 166 | return citations 167 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | volumes: 2 | langgraph-data: 3 | driver: local 4 | services: 5 | langgraph-redis: 6 | image: docker.io/redis:6 7 | healthcheck: 8 | test: redis-cli ping 9 | interval: 5s 10 | timeout: 1s 11 | retries: 5 12 | langgraph-postgres: 13 | image: docker.io/postgres:16 14 | ports: 15 | - "5433:5432" 16 | environment: 17 | POSTGRES_DB: postgres 18 | POSTGRES_USER: postgres 19 | POSTGRES_PASSWORD: postgres 20 | volumes: 21 | - langgraph-data:/var/lib/postgresql/data 22 | healthcheck: 23 | test: pg_isready -U postgres 24 | start_period: 10s 25 | timeout: 1s 26 | retries: 5 27 | interval: 5s 28 | langgraph-api: 29 | image: gemini-fullstack-langgraph 30 | ports: 31 | - "8123:8000" 32 | depends_on: 33 | langgraph-redis: 34 | condition: service_healthy 35 | langgraph-postgres: 36 | condition: service_healthy 37 | environment: 38 | GEMINI_API_KEY: ${GEMINI_API_KEY} 39 | LANGSMITH_API_KEY: ${LANGSMITH_API_KEY} 40 | REDIS_URI: redis://langgraph-redis:6379 41 | POSTGRES_URI: postgres://postgres:postgres@langgraph-postgres:5432/postgres?sslmode=disable 42 | -------------------------------------------------------------------------------- /frontend/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /frontend/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "new-york", 4 | "rsc": false, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "", 8 | "css": "src/app.css", 9 | "baseColor": "neutral", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /frontend/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import tseslint from 'typescript-eslint' 6 | 7 | export default tseslint.config( 8 | { ignores: ['dist'] }, 9 | { 10 | extends: [js.configs.recommended, ...tseslint.configs.recommended], 11 | files: ['**/*.{ts,tsx}'], 12 | languageOptions: { 13 | ecmaVersion: 2020, 14 | globals: globals.browser, 15 | }, 16 | plugins: { 17 | 'react-hooks': reactHooks, 18 | 'react-refresh': reactRefresh, 19 | }, 20 | rules: { 21 | ...reactHooks.configs.recommended.rules, 22 | 'react-refresh/only-export-components': [ 23 | 'warn', 24 | { allowConstantExport: true }, 25 | ], 26 | }, 27 | }, 28 | ) 29 | -------------------------------------------------------------------------------- /frontend/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + React + TS 8 | 9 | 10 |
11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "tsc -b && vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "@langchain/core": "^0.3.55", 14 | "@langchain/langgraph-sdk": "^0.0.74", 15 | "@radix-ui/react-scroll-area": "^1.2.8", 16 | "@radix-ui/react-select": "^2.2.4", 17 | "@radix-ui/react-slot": "^1.2.2", 18 | "@radix-ui/react-tabs": "^1.1.11", 19 | "@radix-ui/react-tooltip": "^1.2.6", 20 | "@tailwindcss/vite": "^4.1.5", 21 | "class-variance-authority": "^0.7.1", 22 | "clsx": "^2.1.1", 23 | "lucide-react": "^0.508.0", 24 | "react": "^19.0.0", 25 | "react-dom": "^19.0.0", 26 | "react-markdown": "^9.0.3", 27 | "react-router-dom": "^7.5.3", 28 | "tailwind-merge": "^3.2.0", 29 | "tailwindcss": "^4.1.5" 30 | }, 31 | "devDependencies": { 32 | "@eslint/js": "^9.22.0", 33 | "@types/node": "^22.15.17", 34 | "@types/react": "^19.1.2", 35 | "@types/react-dom": "^19.1.3", 36 | "@vitejs/plugin-react-swc": "^3.9.0", 37 | "eslint": "^9.22.0", 38 | "eslint-plugin-react-hooks": "^5.2.0", 39 | "eslint-plugin-react-refresh": "^0.4.19", 40 | "globals": "^16.0.0", 41 | "tw-animate-css": "^1.2.9", 42 | "typescript": "~5.7.2", 43 | "typescript-eslint": "^8.26.1", 44 | "vite": "^6.3.4" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /frontend/public/vite.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/App.tsx: -------------------------------------------------------------------------------- 1 | import { useStream } from "@langchain/langgraph-sdk/react"; 2 | import type { Message } from "@langchain/langgraph-sdk"; 3 | import { useState, useEffect, useRef, useCallback } from "react"; 4 | import { ProcessedEvent } from "@/components/ActivityTimeline"; 5 | import { WelcomeScreen } from "@/components/WelcomeScreen"; 6 | import { ChatMessagesView } from "@/components/ChatMessagesView"; 7 | 8 | export default function App() { 9 | const [processedEventsTimeline, setProcessedEventsTimeline] = useState< 10 | ProcessedEvent[] 11 | >([]); 12 | const [historicalActivities, setHistoricalActivities] = useState< 13 | Record 14 | >({}); 15 | const scrollAreaRef = useRef(null); 16 | const hasFinalizeEventOccurredRef = useRef(false); 17 | 18 | const thread = useStream<{ 19 | messages: Message[]; 20 | initial_search_query_count: number; 21 | max_research_loops: number; 22 | reasoning_model: string; 23 | }>({ 24 | apiUrl: import.meta.env.DEV 25 | ? "http://localhost:2024" 26 | : "http://localhost:8123", 27 | assistantId: "agent", 28 | messagesKey: "messages", 29 | onFinish: (event: any) => { 30 | console.log(event); 31 | }, 32 | onUpdateEvent: (event: any) => { 33 | let processedEvent: ProcessedEvent | null = null; 34 | if (event.generate_query) { 35 | processedEvent = { 36 | title: "Generating Search Queries", 37 | data: event.generate_query.query_list.join(", "), 38 | }; 39 | } else if (event.web_research) { 40 | const sources = event.web_research.sources_gathered || []; 41 | const numSources = sources.length; 42 | const uniqueLabels = [ 43 | ...new Set(sources.map((s: any) => s.label).filter(Boolean)), 44 | ]; 45 | const exampleLabels = uniqueLabels.slice(0, 3).join(", "); 46 | processedEvent = { 47 | title: "Web Research", 48 | data: `Gathered ${numSources} sources. Related to: ${ 49 | exampleLabels || "N/A" 50 | }.`, 51 | }; 52 | } else if (event.reflection) { 53 | processedEvent = { 54 | title: "Reflection", 55 | data: event.reflection.is_sufficient 56 | ? "Search successful, generating final answer." 57 | : `Need more information, searching for ${event.reflection.follow_up_queries.join( 58 | ", " 59 | )}`, 60 | }; 61 | } else if (event.finalize_answer) { 62 | processedEvent = { 63 | title: "Finalizing Answer", 64 | data: "Composing and presenting the final answer.", 65 | }; 66 | hasFinalizeEventOccurredRef.current = true; 67 | } 68 | if (processedEvent) { 69 | setProcessedEventsTimeline((prevEvents) => [ 70 | ...prevEvents, 71 | processedEvent!, 72 | ]); 73 | } 74 | }, 75 | }); 76 | 77 | useEffect(() => { 78 | if (scrollAreaRef.current) { 79 | const scrollViewport = scrollAreaRef.current.querySelector( 80 | "[data-radix-scroll-area-viewport]" 81 | ); 82 | if (scrollViewport) { 83 | scrollViewport.scrollTop = scrollViewport.scrollHeight; 84 | } 85 | } 86 | }, [thread.messages]); 87 | 88 | useEffect(() => { 89 | if ( 90 | hasFinalizeEventOccurredRef.current && 91 | !thread.isLoading && 92 | thread.messages.length > 0 93 | ) { 94 | const lastMessage = thread.messages[thread.messages.length - 1]; 95 | if (lastMessage && lastMessage.type === "ai" && lastMessage.id) { 96 | setHistoricalActivities((prev) => ({ 97 | ...prev, 98 | [lastMessage.id!]: [...processedEventsTimeline], 99 | })); 100 | } 101 | hasFinalizeEventOccurredRef.current = false; 102 | } 103 | }, [thread.messages, thread.isLoading, processedEventsTimeline]); 104 | 105 | const handleSubmit = useCallback( 106 | (submittedInputValue: string, effort: string, model: string) => { 107 | if (!submittedInputValue.trim()) return; 108 | setProcessedEventsTimeline([]); 109 | hasFinalizeEventOccurredRef.current = false; 110 | 111 | // convert effort to, initial_search_query_count and max_research_loops 112 | // low means max 1 loop and 1 query 113 | // medium means max 3 loops and 3 queries 114 | // high means max 10 loops and 5 queries 115 | let initial_search_query_count = 0; 116 | let max_research_loops = 0; 117 | switch (effort) { 118 | case "low": 119 | initial_search_query_count = 1; 120 | max_research_loops = 1; 121 | break; 122 | case "medium": 123 | initial_search_query_count = 3; 124 | max_research_loops = 3; 125 | break; 126 | case "high": 127 | initial_search_query_count = 5; 128 | max_research_loops = 10; 129 | break; 130 | } 131 | 132 | const newMessages: Message[] = [ 133 | ...(thread.messages || []), 134 | { 135 | type: "human", 136 | content: submittedInputValue, 137 | id: Date.now().toString(), 138 | }, 139 | ]; 140 | thread.submit({ 141 | messages: newMessages, 142 | initial_search_query_count: initial_search_query_count, 143 | max_research_loops: max_research_loops, 144 | reasoning_model: model, 145 | }); 146 | }, 147 | [thread] 148 | ); 149 | 150 | const handleCancel = useCallback(() => { 151 | thread.stop(); 152 | window.location.reload(); 153 | }, [thread]); 154 | 155 | return ( 156 |
157 |
158 |
163 | {thread.messages.length === 0 ? ( 164 | 169 | ) : ( 170 | 179 | )} 180 |
181 |
182 |
183 | ); 184 | } 185 | -------------------------------------------------------------------------------- /frontend/src/components/ActivityTimeline.tsx: -------------------------------------------------------------------------------- 1 | import { 2 | Card, 3 | CardContent, 4 | CardDescription, 5 | CardHeader, 6 | } from "@/components/ui/card"; 7 | import { ScrollArea } from "@/components/ui/scroll-area"; 8 | import { 9 | Loader2, 10 | Activity, 11 | Info, 12 | Search, 13 | TextSearch, 14 | Brain, 15 | Pen, 16 | ChevronDown, 17 | ChevronUp, 18 | } from "lucide-react"; 19 | import { useEffect, useState } from "react"; 20 | 21 | export interface ProcessedEvent { 22 | title: string; 23 | data: any; 24 | } 25 | 26 | interface ActivityTimelineProps { 27 | processedEvents: ProcessedEvent[]; 28 | isLoading: boolean; 29 | } 30 | 31 | export function ActivityTimeline({ 32 | processedEvents, 33 | isLoading, 34 | }: ActivityTimelineProps) { 35 | const [isTimelineCollapsed, setIsTimelineCollapsed] = 36 | useState(false); 37 | const getEventIcon = (title: string, index: number) => { 38 | if (index === 0 && isLoading && processedEvents.length === 0) { 39 | return ; 40 | } 41 | if (title.toLowerCase().includes("generating")) { 42 | return ; 43 | } else if (title.toLowerCase().includes("thinking")) { 44 | return ; 45 | } else if (title.toLowerCase().includes("reflection")) { 46 | return ; 47 | } else if (title.toLowerCase().includes("research")) { 48 | return ; 49 | } else if (title.toLowerCase().includes("finalizing")) { 50 | return ; 51 | } 52 | return ; 53 | }; 54 | 55 | useEffect(() => { 56 | if (!isLoading && processedEvents.length !== 0) { 57 | setIsTimelineCollapsed(true); 58 | } 59 | }, [isLoading, processedEvents]); 60 | 61 | return ( 62 | 63 | 64 | 65 |
setIsTimelineCollapsed(!isTimelineCollapsed)} 68 | > 69 | Research 70 | {isTimelineCollapsed ? ( 71 | 72 | ) : ( 73 | 74 | )} 75 |
76 |
77 |
78 | {!isTimelineCollapsed && ( 79 | 80 | 81 | {isLoading && processedEvents.length === 0 && ( 82 |
83 |
84 |
85 | 86 |
87 |
88 |

89 | Searching... 90 |

91 |
92 |
93 | )} 94 | {processedEvents.length > 0 ? ( 95 |
96 | {processedEvents.map((eventItem, index) => ( 97 |
98 | {index < processedEvents.length - 1 || 99 | (isLoading && index === processedEvents.length - 1) ? ( 100 |
101 | ) : null} 102 |
103 | {getEventIcon(eventItem.title, index)} 104 |
105 |
106 |

107 | {eventItem.title} 108 |

109 |

110 | {typeof eventItem.data === "string" 111 | ? eventItem.data 112 | : Array.isArray(eventItem.data) 113 | ? (eventItem.data as string[]).join(", ") 114 | : JSON.stringify(eventItem.data)} 115 |

116 |
117 |
118 | ))} 119 | {isLoading && processedEvents.length > 0 && ( 120 |
121 |
122 | 123 |
124 |
125 |

126 | Searching... 127 |

128 |
129 |
130 | )} 131 |
132 | ) : !isLoading ? ( // Only show "No activity" if not loading and no events 133 |
134 | 135 |

No activity to display.

136 |

137 | Timeline will update during processing. 138 |

139 |
140 | ) : null} 141 | 142 | 143 | )} 144 | 145 | ); 146 | } 147 | -------------------------------------------------------------------------------- /frontend/src/components/ChatMessagesView.tsx: -------------------------------------------------------------------------------- 1 | import type React from "react"; 2 | import type { Message } from "@langchain/langgraph-sdk"; 3 | import { ScrollArea } from "@/components/ui/scroll-area"; 4 | import { Loader2, Copy, CopyCheck } from "lucide-react"; 5 | import { InputForm } from "@/components/InputForm"; 6 | import { Button } from "@/components/ui/button"; 7 | import { useState, ReactNode } from "react"; 8 | import ReactMarkdown from "react-markdown"; 9 | import { cn } from "@/lib/utils"; 10 | import { Badge } from "@/components/ui/badge"; 11 | import { 12 | ActivityTimeline, 13 | ProcessedEvent, 14 | } from "@/components/ActivityTimeline"; // Assuming ActivityTimeline is in the same dir or adjust path 15 | 16 | // Markdown component props type from former ReportView 17 | type MdComponentProps = { 18 | className?: string; 19 | children?: ReactNode; 20 | [key: string]: any; 21 | }; 22 | 23 | // Markdown components (from former ReportView.tsx) 24 | const mdComponents = { 25 | h1: ({ className, children, ...props }: MdComponentProps) => ( 26 |

27 | {children} 28 |

29 | ), 30 | h2: ({ className, children, ...props }: MdComponentProps) => ( 31 |

32 | {children} 33 |

34 | ), 35 | h3: ({ className, children, ...props }: MdComponentProps) => ( 36 |

37 | {children} 38 |

39 | ), 40 | p: ({ className, children, ...props }: MdComponentProps) => ( 41 |

42 | {children} 43 |

44 | ), 45 | a: ({ className, children, href, ...props }: MdComponentProps) => ( 46 | 47 | 54 | {children} 55 | 56 | 57 | ), 58 | ul: ({ className, children, ...props }: MdComponentProps) => ( 59 |
    60 | {children} 61 |
62 | ), 63 | ol: ({ className, children, ...props }: MdComponentProps) => ( 64 |
    65 | {children} 66 |
67 | ), 68 | li: ({ className, children, ...props }: MdComponentProps) => ( 69 |
  • 70 | {children} 71 |
  • 72 | ), 73 | blockquote: ({ className, children, ...props }: MdComponentProps) => ( 74 |
    81 | {children} 82 |
    83 | ), 84 | code: ({ className, children, ...props }: MdComponentProps) => ( 85 | 92 | {children} 93 | 94 | ), 95 | pre: ({ className, children, ...props }: MdComponentProps) => ( 96 |
    103 |       {children}
    104 |     
    105 | ), 106 | hr: ({ className, ...props }: MdComponentProps) => ( 107 |
    108 | ), 109 | table: ({ className, children, ...props }: MdComponentProps) => ( 110 |
    111 | 112 | {children} 113 |
    114 |
    115 | ), 116 | th: ({ className, children, ...props }: MdComponentProps) => ( 117 | 124 | {children} 125 | 126 | ), 127 | td: ({ className, children, ...props }: MdComponentProps) => ( 128 | 132 | {children} 133 | 134 | ), 135 | }; 136 | 137 | // Props for HumanMessageBubble 138 | interface HumanMessageBubbleProps { 139 | message: Message; 140 | mdComponents: typeof mdComponents; 141 | } 142 | 143 | // HumanMessageBubble Component 144 | const HumanMessageBubble: React.FC = ({ 145 | message, 146 | mdComponents, 147 | }) => { 148 | return ( 149 |
    152 | 153 | {typeof message.content === "string" 154 | ? message.content 155 | : JSON.stringify(message.content)} 156 | 157 |
    158 | ); 159 | }; 160 | 161 | // Props for AiMessageBubble 162 | interface AiMessageBubbleProps { 163 | message: Message; 164 | historicalActivity: ProcessedEvent[] | undefined; 165 | liveActivity: ProcessedEvent[] | undefined; 166 | isLastMessage: boolean; 167 | isOverallLoading: boolean; 168 | mdComponents: typeof mdComponents; 169 | handleCopy: (text: string, messageId: string) => void; 170 | copiedMessageId: string | null; 171 | } 172 | 173 | // AiMessageBubble Component 174 | const AiMessageBubble: React.FC = ({ 175 | message, 176 | historicalActivity, 177 | liveActivity, 178 | isLastMessage, 179 | isOverallLoading, 180 | mdComponents, 181 | handleCopy, 182 | copiedMessageId, 183 | }) => { 184 | // Determine which activity events to show and if it's for a live loading message 185 | const activityForThisBubble = 186 | isLastMessage && isOverallLoading ? liveActivity : historicalActivity; 187 | const isLiveActivityForThisBubble = isLastMessage && isOverallLoading; 188 | 189 | return ( 190 |
    191 | {activityForThisBubble && activityForThisBubble.length > 0 && ( 192 |
    193 | 197 |
    198 | )} 199 | 200 | {typeof message.content === "string" 201 | ? message.content 202 | : JSON.stringify(message.content)} 203 | 204 | 219 |
    220 | ); 221 | }; 222 | 223 | interface ChatMessagesViewProps { 224 | messages: Message[]; 225 | isLoading: boolean; 226 | scrollAreaRef: React.RefObject; 227 | onSubmit: (inputValue: string, effort: string, model: string) => void; 228 | onCancel: () => void; 229 | liveActivityEvents: ProcessedEvent[]; 230 | historicalActivities: Record; 231 | } 232 | 233 | export function ChatMessagesView({ 234 | messages, 235 | isLoading, 236 | scrollAreaRef, 237 | onSubmit, 238 | onCancel, 239 | liveActivityEvents, 240 | historicalActivities, 241 | }: ChatMessagesViewProps) { 242 | const [copiedMessageId, setCopiedMessageId] = useState(null); 243 | 244 | const handleCopy = async (text: string, messageId: string) => { 245 | try { 246 | await navigator.clipboard.writeText(text); 247 | setCopiedMessageId(messageId); 248 | setTimeout(() => setCopiedMessageId(null), 2000); // Reset after 2 seconds 249 | } catch (err) { 250 | console.error("Failed to copy text: ", err); 251 | } 252 | }; 253 | 254 | return ( 255 |
    256 | 257 |
    258 | {messages.map((message, index) => { 259 | const isLast = index === messages.length - 1; 260 | return ( 261 |
    262 |
    267 | {message.type === "human" ? ( 268 | 272 | ) : ( 273 | 283 | )} 284 |
    285 |
    286 | ); 287 | })} 288 | {isLoading && 289 | (messages.length === 0 || 290 | messages[messages.length - 1].type === "human") && ( 291 |
    292 | {" "} 293 | {/* AI message row structure */} 294 |
    295 | {liveActivityEvents.length > 0 ? ( 296 |
    297 | 301 |
    302 | ) : ( 303 |
    304 | 305 | Processing... 306 |
    307 | )} 308 |
    309 |
    310 | )} 311 |
    312 |
    313 | 0} 318 | /> 319 |
    320 | ); 321 | } 322 | -------------------------------------------------------------------------------- /frontend/src/components/InputForm.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "react"; 2 | import { Button } from "@/components/ui/button"; 3 | import { SquarePen, Brain, Send, StopCircle, Zap, Cpu } from "lucide-react"; 4 | import { Textarea } from "@/components/ui/textarea"; 5 | import { 6 | Select, 7 | SelectContent, 8 | SelectItem, 9 | SelectTrigger, 10 | SelectValue, 11 | } from "@/components/ui/select"; 12 | 13 | // Updated InputFormProps 14 | interface InputFormProps { 15 | onSubmit: (inputValue: string, effort: string, model: string) => void; 16 | onCancel: () => void; 17 | isLoading: boolean; 18 | hasHistory: boolean; 19 | } 20 | 21 | export const InputForm: React.FC = ({ 22 | onSubmit, 23 | onCancel, 24 | isLoading, 25 | hasHistory, 26 | }) => { 27 | const [internalInputValue, setInternalInputValue] = useState(""); 28 | const [effort, setEffort] = useState("medium"); 29 | const [model, setModel] = useState("gemini-2.5-flash-preview-04-17"); 30 | 31 | const handleInternalSubmit = (e?: React.FormEvent) => { 32 | if (e) e.preventDefault(); 33 | if (!internalInputValue.trim()) return; 34 | onSubmit(internalInputValue, effort, model); 35 | setInternalInputValue(""); 36 | }; 37 | 38 | const handleInternalKeyDown = ( 39 | e: React.KeyboardEvent 40 | ) => { 41 | if (e.key === "Enter" && !e.shiftKey) { 42 | e.preventDefault(); 43 | handleInternalSubmit(); 44 | } 45 | }; 46 | 47 | const isSubmitDisabled = !internalInputValue.trim() || isLoading; 48 | 49 | return ( 50 |
    54 |
    59 |