├── .dockerignore ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ ├── feature_request.yml │ └── question.yml ├── pull_request_template.md └── workflows │ └── linting.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README-zh.md ├── README.assets ├── b2aaf634151b4706892693ffb43d9093.png └── iShot_2025-03-23_12.40.08.png ├── README.md ├── assets └── logo.png ├── config.ini.example ├── docker-compose.yml ├── docs ├── Algorithm.md └── DockerDeployment.md ├── env.example ├── examples ├── .env.oai.example ├── batch_eval.py ├── copy_llm_cache_to_another_storage.py ├── generate_query.py ├── get_all_edges_nx.py ├── graph_visual_with_html.py ├── graph_visual_with_neo4j.py ├── insert_custom_kg.py ├── lightrag_azure_openai_demo.py ├── lightrag_bedrock_demo.py ├── lightrag_gemini_demo.py ├── lightrag_gemini_demo_no_tiktoken.py ├── lightrag_gemini_track_token_demo.py ├── lightrag_hf_demo.py ├── lightrag_llamaindex_direct_demo.py ├── lightrag_llamaindex_litellm_demo.py ├── lightrag_lmdeploy_demo.py ├── lightrag_multi_model_all_modes_demo.py ├── lightrag_nvidia_demo.py ├── lightrag_ollama_age_demo.py ├── lightrag_ollama_demo.py ├── lightrag_ollama_gremlin_demo.py ├── lightrag_ollama_neo4j_milvus_mongo_demo.py ├── lightrag_openai_compatible_demo.py ├── lightrag_openai_compatible_demo_embedding_cache.py ├── lightrag_openai_demo.py ├── lightrag_openai_mongodb_graph_demo.py ├── lightrag_openai_neo4j_milvus_redis_demo.py ├── lightrag_siliconcloud_demo.py ├── lightrag_siliconcloud_track_token_demo.py ├── lightrag_tidb_demo.py ├── lightrag_zhipu_demo.py ├── lightrag_zhipu_postgres_demo.py ├── openai_README.md ├── openai_README_zh.md ├── query_keyword_separation_example.py ├── test.py ├── test_chromadb.py ├── test_faiss.py ├── test_neo4j.py ├── test_postgres.py └── vram_management_demo.py ├── lightrag-api ├── lightrag.service.example ├── lightrag ├── __init__.py ├── api │ ├── .env.aoi.example │ ├── .gitignore │ ├── README-zh.md │ ├── README.assets │ │ ├── image-20250323122538997.png │ │ ├── image-20250323122754387.png │ │ ├── image-20250323123011220.png │ │ └── image-20250323194750379.png │ ├── README.md │ ├── __init__.py │ ├── auth.py │ ├── config.py │ ├── docs │ │ └── LightRagWithPostGRESQL.md │ ├── gunicorn_config.py │ ├── lightrag_server.py │ ├── requirements.txt │ ├── routers │ │ ├── __init__.py │ │ ├── document_routes.py │ │ ├── graph_routes.py │ │ ├── ollama_api.py │ │ └── query_routes.py │ ├── run_with_gunicorn.py │ ├── utils_api.py │ └── webui │ │ ├── assets │ │ ├── _basePickBy-DknajloF.js │ │ ├── _baseUniq-CAP6r7gD.js │ │ ├── architectureDiagram-IEHRJDOE-D0cK9xQH.js │ │ ├── blockDiagram-JOT3LUYC-Cw4HkC9Z.js │ │ ├── c4Diagram-VJAJSXHY-BAi4HsbA.js │ │ ├── chunk-4BMEZGHF-DpCyXA5Y.js │ │ ├── chunk-A2AXSNBT-CuE3TSRI.js │ │ ├── chunk-AEK57VVT-WVCu-Vg-.js │ │ ├── chunk-D6G4REZN-Dtz_FViS.js │ │ ├── chunk-RZ5BOZE2-S9T1dBtP.js │ │ ├── chunk-XZIHB7SX-CsCFuiVk.js │ │ ├── classDiagram-GIVACNV2-Cc9VoQHS.js │ │ ├── classDiagram-v2-COTLJTTW-Cc9VoQHS.js │ │ ├── clone-ChJdlG_Y.js │ │ ├── cytoscape.esm-CfBqOv7Q.js │ │ ├── dagre-OKDRZEBW-EaSrFqm9.js │ │ ├── diagram-SSKATNLV-BftWRpGp.js │ │ ├── diagram-VNBRO52H-Cxox9wsZ.js │ │ ├── erDiagram-Q7BY3M3F-DVDK5YlR.js │ │ ├── feature-documents-2vh9tFSJ.js │ │ ├── feature-graph-B_AW4YHh.js │ │ ├── feature-graph-BipNuM18.css │ │ ├── feature-retrieval-BGi6fXB_.js │ │ ├── flowDiagram-4HSFHLVR-2Ie6oHBG.js │ │ ├── ganttDiagram-APWFNJXF-2ggMU9nf.js │ │ ├── gitGraphDiagram-7IBYFJ6S-BFd9vKRJ.js │ │ ├── graph-BZstOaAa.js │ │ ├── graph-vendor-B-X5JegA.js │ │ ├── index-DsHQCgEh.css │ │ ├── index-OgaeGQbi.js │ │ ├── infoDiagram-PH2N3AL5-bBE-CQNo.js │ │ ├── journeyDiagram-U35MCT3I-ChW6RTD3.js │ │ ├── kanban-definition-NDS4AKOZ-DScuzVwU.js │ │ ├── layout-CrYOZt5o.js │ │ ├── markdown-vendor-BBaHfVvE.js │ │ ├── mermaid-vendor-BUVhGKJz.js │ │ ├── mindmap-definition-ALO5MXBD-6rJVQCdz.js │ │ ├── pieDiagram-IB7DONF6-DLQwfe3c.js │ │ ├── quadrantDiagram-7GDLP6J5-C3ZI3EgJ.js │ │ ├── radar-MK3ICKWK-G3-HZCUi.js │ │ ├── react-vendor-DEwriMA6.js │ │ ├── requirementDiagram-KVF5MWMF-C1u-ylie.js │ │ ├── sankeyDiagram-QLVOVGJD-BM2DhXmK.js │ │ ├── sequenceDiagram-X6HHIX6F-DN0k27_p.js │ │ ├── stateDiagram-DGXRK772-CEAzabU9.js │ │ ├── stateDiagram-v2-YXO3MK2T-2rq8ws-h.js │ │ ├── timeline-definition-BDJGKUSR-BJI44F8t.js │ │ ├── ui-vendor-CeCm8EER.js │ │ ├── utils-vendor-BysuhMZA.js │ │ └── xychartDiagram-VJFVF3MP-DvmEcKlh.js │ │ ├── index.html │ │ └── logo.png ├── base.py ├── exceptions.py ├── kg │ ├── __init__.py │ ├── age_impl.py │ ├── chroma_impl.py │ ├── faiss_impl.py │ ├── gremlin_impl.py │ ├── json_doc_status_impl.py │ ├── json_kv_impl.py │ ├── milvus_impl.py │ ├── mongo_impl.py │ ├── nano_vector_db_impl.py │ ├── neo4j_impl.py │ ├── networkx_impl.py │ ├── postgres_impl.py │ ├── qdrant_impl.py │ ├── redis_impl.py │ ├── shared_storage.py │ └── tidb_impl.py ├── lightrag.py ├── llm.py ├── llm │ ├── Readme.md │ ├── __init__.py │ ├── anthropic.py │ ├── azure_openai.py │ ├── bedrock.py │ ├── hf.py │ ├── jina.py │ ├── llama_index_impl.py │ ├── lmdeploy.py │ ├── lollms.py │ ├── nvidia_openai.py │ ├── ollama.py │ ├── openai.py │ ├── siliconcloud.py │ └── zhipu.py ├── namespace.py ├── operate.py ├── prompt.py ├── tools │ ├── __init__.py │ └── lightrag_visualizer │ │ ├── README-zh.md │ │ ├── README.md │ │ ├── __init__.py │ │ ├── assets │ │ ├── Geist-Regular.ttf │ │ ├── LICENSE - Geist.txt │ │ ├── LICENSE - SmileySans.txt │ │ ├── SmileySans-Oblique.ttf │ │ └── place_font_here │ │ ├── graph_visualizer.py │ │ └── requirements.txt ├── types.py ├── utils.py └── utils_graph.py ├── lightrag_webui ├── .gitignore ├── .prettierrc.json ├── README.md ├── bun.lock ├── components.json ├── env.development.smaple ├── env.local.sample ├── eslint.config.js ├── index.html ├── package.json ├── public │ └── logo.png ├── src │ ├── App.tsx │ ├── AppRouter.tsx │ ├── api │ │ └── lightrag.ts │ ├── components │ │ ├── ApiKeyAlert.tsx │ │ ├── AppSettings.tsx │ │ ├── LanguageToggle.tsx │ │ ├── Root.tsx │ │ ├── ThemeProvider.tsx │ │ ├── ThemeToggle.tsx │ │ ├── documents │ │ │ ├── ClearDocumentsDialog.tsx │ │ │ ├── PipelineStatusDialog.tsx │ │ │ └── UploadDocumentsDialog.tsx │ │ ├── graph │ │ │ ├── EditablePropertyRow.tsx │ │ │ ├── FocusOnNode.tsx │ │ │ ├── FullScreenControl.tsx │ │ │ ├── GraphControl.tsx │ │ │ ├── GraphLabels.tsx │ │ │ ├── GraphSearch.tsx │ │ │ ├── LayoutsControl.tsx │ │ │ ├── Legend.tsx │ │ │ ├── LegendButton.tsx │ │ │ ├── PropertiesView.tsx │ │ │ ├── PropertyEditDialog.tsx │ │ │ ├── PropertyRowComponents.tsx │ │ │ ├── Settings.tsx │ │ │ ├── SettingsDisplay.tsx │ │ │ └── ZoomControl.tsx │ │ ├── retrieval │ │ │ ├── ChatMessage.tsx │ │ │ └── QuerySettings.tsx │ │ ├── status │ │ │ ├── StatusCard.tsx │ │ │ ├── StatusDialog.tsx │ │ │ └── StatusIndicator.tsx │ │ └── ui │ │ │ ├── Alert.tsx │ │ │ ├── AlertDialog.tsx │ │ │ ├── AsyncSearch.tsx │ │ │ ├── AsyncSelect.tsx │ │ │ ├── Badge.tsx │ │ │ ├── Button.tsx │ │ │ ├── Card.tsx │ │ │ ├── Checkbox.tsx │ │ │ ├── Command.tsx │ │ │ ├── DataTable.tsx │ │ │ ├── Dialog.tsx │ │ │ ├── EmptyCard.tsx │ │ │ ├── FileUploader.tsx │ │ │ ├── Input.tsx │ │ │ ├── NumberInput.tsx │ │ │ ├── Popover.tsx │ │ │ ├── Progress.tsx │ │ │ ├── ScrollArea.tsx │ │ │ ├── Select.tsx │ │ │ ├── Separator.tsx │ │ │ ├── TabContent.tsx │ │ │ ├── Table.tsx │ │ │ ├── Tabs.tsx │ │ │ ├── Text.tsx │ │ │ └── Tooltip.tsx │ ├── contexts │ │ ├── TabVisibilityProvider.tsx │ │ ├── context.ts │ │ ├── types.ts │ │ └── useTabVisibility.ts │ ├── features │ │ ├── ApiSite.tsx │ │ ├── DocumentManager.tsx │ │ ├── GraphViewer.tsx │ │ ├── LoginPage.tsx │ │ ├── RetrievalTesting.tsx │ │ └── SiteHeader.tsx │ ├── hooks │ │ ├── useDebounce.tsx │ │ ├── useLightragGraph.tsx │ │ ├── useRandomGraph.tsx │ │ └── useTheme.tsx │ ├── i18n.ts │ ├── index.css │ ├── lib │ │ ├── constants.ts │ │ └── utils.ts │ ├── locales │ │ ├── ar.json │ │ ├── en.json │ │ ├── fr.json │ │ ├── zh.json │ │ └── zh_TW.json │ ├── main.tsx │ ├── services │ │ └── navigation.ts │ ├── stores │ │ ├── graph.ts │ │ ├── settings.ts │ │ └── state.ts │ └── vite-env.d.ts ├── tailwind.config.js ├── tsconfig.json └── vite.config.ts ├── reproduce ├── Step_0.py ├── Step_1.py ├── Step_1_openai_compatible.py ├── Step_2.py ├── Step_3.py └── Step_3_openai_compatible.py ├── requirements.txt ├── setup.py └── tests ├── test_graph_storage.py └── test_lightrag_ollama_chat.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # Python-related files and directories 2 | __pycache__ 3 | .cache 4 | 5 | # Virtual environment directories 6 | *.venv 7 | 8 | # Env 9 | env/ 10 | *.env* 11 | .env_example 12 | 13 | # Distribution / build files 14 | site 15 | dist/ 16 | build/ 17 | .eggs/ 18 | *.egg-info/ 19 | *.tgz 20 | *.tar.gz 21 | 22 | # Exclude siles and folders 23 | *.yml 24 | .dockerignore 25 | Dockerfile 26 | Makefile 27 | 28 | # Exclude other projects 29 | /tests 30 | /scripts 31 | 32 | # Python version manager file 33 | .python-version 34 | 35 | # Reports 36 | *.coverage/ 37 | *.log 38 | log/ 39 | *.logfire 40 | 41 | # Cache 42 | .cache/ 43 | .mypy_cache 44 | .pytest_cache 45 | .ruff_cache 46 | .gradio 47 | .logfire 48 | temp/ 49 | 50 | # MacOS-related files 51 | .DS_Store 52 | 53 | # VS Code settings (local configuration files) 54 | .vscode 55 | 56 | # file 57 | TODO.md 58 | 59 | # Exclude Git-related files 60 | .git 61 | .github 62 | .gitignore 63 | .pre-commit-config.yaml 64 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | lightrag/api/webui/** binary 2 | lightrag/api/webui/** linguist-generated 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: File a bug report 3 | title: "[Bug]:" 4 | labels: ["bug", "triage"] 5 | 6 | body: 7 | - type: checkboxes 8 | id: existingcheck 9 | attributes: 10 | label: Do you need to file an issue? 11 | description: Please help us manage our time by avoiding duplicates and common bugs with the steps below. 12 | options: 13 | - label: I have searched the existing issues and this bug is not already filed. 14 | - label: I believe this is a legitimate bug, not just a question or feature request. 15 | - type: textarea 16 | id: description 17 | attributes: 18 | label: Describe the bug 19 | description: A clear and concise description of what the bug is. 20 | placeholder: What went wrong? 21 | - type: textarea 22 | id: reproduce 23 | attributes: 24 | label: Steps to reproduce 25 | description: Steps to reproduce the behavior. 26 | placeholder: How can we replicate the issue? 27 | - type: textarea 28 | id: expected_behavior 29 | attributes: 30 | label: Expected Behavior 31 | description: A clear and concise description of what you expected to happen. 32 | placeholder: What should have happened? 33 | - type: textarea 34 | id: configused 35 | attributes: 36 | label: LightRAG Config Used 37 | description: The LightRAG configuration used for the run. 38 | placeholder: The settings content or LightRAG configuration 39 | value: | 40 | # Paste your config here 41 | - type: textarea 42 | id: screenshotslogs 43 | attributes: 44 | label: Logs and screenshots 45 | description: If applicable, add screenshots and logs to help explain your problem. 46 | placeholder: Add logs and screenshots here 47 | - type: textarea 48 | id: additional_information 49 | attributes: 50 | label: Additional Information 51 | description: | 52 | - LightRAG Version: e.g., v0.1.1 53 | - Operating System: e.g., Windows 10, Ubuntu 20.04 54 | - Python Version: e.g., 3.8 55 | - Related Issues: e.g., #1 56 | - Any other relevant information. 57 | value: | 58 | - LightRAG Version: 59 | - Operating System: 60 | - Python Version: 61 | - Related Issues: 62 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: File a feature request 3 | labels: ["enhancement"] 4 | title: "[Feature Request]:" 5 | 6 | body: 7 | - type: checkboxes 8 | id: existingcheck 9 | attributes: 10 | label: Do you need to file a feature request? 11 | description: Please help us manage our time by avoiding duplicates and common feature request with the steps below. 12 | options: 13 | - label: I have searched the existing feature request and this feature request is not already filed. 14 | - label: I believe this is a legitimate feature request, not just a question or bug. 15 | - type: textarea 16 | id: feature_request_description 17 | attributes: 18 | label: Feature Request Description 19 | description: A clear and concise description of the feature request you would like. 20 | placeholder: What this feature request add more or improve? 21 | - type: textarea 22 | id: additional_context 23 | attributes: 24 | label: Additional Context 25 | description: Add any other context or screenshots about the feature request here. 26 | placeholder: Any additional information 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.yml: -------------------------------------------------------------------------------- 1 | name: Question 2 | description: Ask a general question 3 | labels: ["question"] 4 | title: "[Question]:" 5 | 6 | body: 7 | - type: checkboxes 8 | id: existingcheck 9 | attributes: 10 | label: Do you need to ask a question? 11 | description: Please help us manage our time by avoiding duplicates and common questions with the steps below. 12 | options: 13 | - label: I have searched the existing question and discussions and this question is not already answered. 14 | - label: I believe this is a legitimate question, not just a bug or feature request. 15 | - type: textarea 16 | id: question 17 | attributes: 18 | label: Your Question 19 | description: A clear and concise description of your question. 20 | placeholder: What is your question? 21 | - type: textarea 22 | id: context 23 | attributes: 24 | label: Additional Context 25 | description: Provide any additional context or details that might help us understand your question better. 26 | placeholder: Add any relevant information here 27 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | ## Description 12 | 13 | [Briefly describe the changes made in this pull request.] 14 | 15 | ## Related Issues 16 | 17 | [Reference any related issues or tasks addressed by this pull request.] 18 | 19 | ## Changes Made 20 | 21 | [List the specific changes made in this pull request.] 22 | 23 | ## Checklist 24 | 25 | - [ ] Changes tested locally 26 | - [ ] Code reviewed 27 | - [ ] Documentation updated (if necessary) 28 | - [ ] Unit tests added (if applicable) 29 | 30 | ## Additional Notes 31 | 32 | [Add any additional notes or context for the reviewer(s).] 33 | -------------------------------------------------------------------------------- /.github/workflows/linting.yaml: -------------------------------------------------------------------------------- 1 | name: Linting and Formatting 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | lint-and-format: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v2 18 | 19 | - name: Set up Python 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: '3.x' 23 | 24 | - name: Install dependencies 25 | run: | 26 | python -m pip install --upgrade pip 27 | pip install pre-commit 28 | 29 | - name: Run pre-commit 30 | run: pre-commit run --all-files --show-diff-on-failure 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-related files 2 | __pycache__/ 3 | *.py[cod] 4 | *.egg-info/ 5 | .eggs/ 6 | *.tgz 7 | *.tar.gz 8 | *.ini 9 | 10 | # Virtual Environment 11 | .venv/ 12 | env/ 13 | venv/ 14 | *.env* 15 | .env_example 16 | 17 | # Build / Distribution 18 | dist/ 19 | build/ 20 | site/ 21 | 22 | # Logs / Reports 23 | *.log 24 | *.log.* 25 | *.logfire 26 | *.coverage/ 27 | log/ 28 | 29 | # Caches 30 | .cache/ 31 | .mypy_cache/ 32 | .pytest_cache/ 33 | .ruff_cache/ 34 | .gradio/ 35 | .history/ 36 | temp/ 37 | 38 | # IDE / Editor Files 39 | .idea/ 40 | .vscode/ 41 | .vscode/settings.json 42 | 43 | # Framework-specific files 44 | local_neo4jWorkDir/ 45 | neo4jWorkDir/ 46 | 47 | # Data & Storage 48 | inputs/ 49 | rag_storage/ 50 | examples/input/ 51 | examples/output/ 52 | 53 | # Miscellaneous 54 | .DS_Store 55 | TODO.md 56 | ignore_this.txt 57 | *.ignore.* 58 | 59 | # Project-specific files 60 | dickens*/ 61 | book.txt 62 | lightrag-dev/ 63 | gui/ 64 | 65 | # unit-test files 66 | test_* 67 | 68 | # Cline files 69 | memory-bank/ 70 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | exclude: ^lightrag/api/webui/ 7 | - id: end-of-file-fixer 8 | exclude: ^lightrag/api/webui/ 9 | - id: requirements-txt-fixer 10 | exclude: ^lightrag/api/webui/ 11 | 12 | 13 | - repo: https://github.com/astral-sh/ruff-pre-commit 14 | rev: v0.6.4 15 | hooks: 16 | - id: ruff-format 17 | exclude: ^lightrag/api/webui/ 18 | - id: ruff 19 | args: [--fix, --ignore=E402] 20 | exclude: ^lightrag/api/webui/ 21 | 22 | 23 | - repo: https://github.com/mgedmin/check-manifest 24 | rev: "0.49" 25 | hooks: 26 | - id: check-manifest 27 | stages: [manual] 28 | exclude: ^lightrag/api/webui/ 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build stage 2 | FROM python:3.11-slim AS builder 3 | 4 | WORKDIR /app 5 | 6 | # Install Rust and required build dependencies 7 | RUN apt-get update && apt-get install -y \ 8 | curl \ 9 | build-essential \ 10 | pkg-config \ 11 | && rm -rf /var/lib/apt/lists/* \ 12 | && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ 13 | && . $HOME/.cargo/env 14 | 15 | # Copy only requirements files first to leverage Docker cache 16 | COPY requirements.txt . 17 | COPY lightrag/api/requirements.txt ./lightrag/api/ 18 | 19 | # Install dependencies 20 | ENV PATH="/root/.cargo/bin:${PATH}" 21 | RUN pip install --user --no-cache-dir -r requirements.txt 22 | RUN pip install --user --no-cache-dir -r lightrag/api/requirements.txt 23 | 24 | # Final stage 25 | FROM python:3.11-slim 26 | 27 | WORKDIR /app 28 | 29 | # Copy only necessary files from builder 30 | COPY --from=builder /root/.local /root/.local 31 | COPY ./lightrag ./lightrag 32 | COPY setup.py . 33 | 34 | RUN pip install . 35 | # Make sure scripts in .local are usable 36 | ENV PATH=/root/.local/bin:$PATH 37 | 38 | # Create necessary directories 39 | RUN mkdir -p /app/data/rag_storage /app/data/inputs 40 | 41 | # Docker data directories 42 | ENV WORKING_DIR=/app/data/rag_storage 43 | ENV INPUT_DIR=/app/data/inputs 44 | 45 | # Expose the default port 46 | EXPOSE 9621 47 | 48 | # Set entrypoint 49 | ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"] 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 LightRAG Team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include lightrag/api/webui * 2 | -------------------------------------------------------------------------------- /README.assets/b2aaf634151b4706892693ffb43d9093.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/README.assets/b2aaf634151b4706892693ffb43d9093.png -------------------------------------------------------------------------------- /README.assets/iShot_2025-03-23_12.40.08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/README.assets/iShot_2025-03-23_12.40.08.png -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/assets/logo.png -------------------------------------------------------------------------------- /config.ini.example: -------------------------------------------------------------------------------- 1 | [neo4j] 2 | uri = neo4j+s://xxxxxxxx.databases.neo4j.io 3 | username = neo4j 4 | password = your-password 5 | 6 | [mongodb] 7 | uri = mongodb+srv://name:password@your-cluster-address 8 | database = lightrag 9 | 10 | [redis] 11 | uri=redis://localhost:6379/1 12 | 13 | [qdrant] 14 | uri = http://localhost:16333 15 | 16 | [postgres] 17 | host = localhost 18 | port = 5432 19 | user = your_username 20 | password = your_password 21 | database = your_database 22 | workspace = default # 可选,默认为default 23 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | lightrag: 3 | build: . 4 | ports: 5 | - "${PORT:-9621}:9621" 6 | volumes: 7 | - ./data/rag_storage:/app/data/rag_storage 8 | - ./data/inputs:/app/data/inputs 9 | - ./config.ini:/app/config.ini 10 | - ./.env:/app/.env 11 | env_file: 12 | - .env 13 | restart: unless-stopped 14 | extra_hosts: 15 | - "host.docker.internal:host-gateway" 16 | 17 | neo4j: 18 | image: neo4j:5.26.4-community 19 | container_name: lightrag-server_neo4j-community 20 | restart: always 21 | ports: 22 | - "7474:7474" 23 | - "7687:7687" 24 | environment: 25 | - NEO4J_AUTH=${NEO4J_USERNAME}/${NEO4J_PASSWORD} 26 | - NEO4J_apoc_export_file_enabled=true 27 | - NEO4J_server_bolt_listen__address=0.0.0.0:7687 28 | - NEO4J_server_bolt_advertised__address=neo4j:7687 29 | volumes: 30 | - ./neo4j/plugins:/var/lib/neo4j/plugins 31 | - lightrag_neo4j_import:/var/lib/neo4j/import 32 | - lightrag_neo4j_data:/data 33 | - lightrag_neo4j_backups:/backups 34 | extra_hosts: 35 | - "host.docker.internal:host-gateway" 36 | 37 | volumes: 38 | lightrag_neo4j_import: 39 | lightrag_neo4j_data: 40 | lightrag_neo4j_backups: 41 | -------------------------------------------------------------------------------- /docs/Algorithm.md: -------------------------------------------------------------------------------- 1 | ![LightRAG Indexing Flowchart](https://learnopencv.com/wp-content/uploads/2024/11/LightRAG-VectorDB-Json-KV-Store-Indexing-Flowchart-scaled.jpg) 2 | *Figure 1: LightRAG Indexing Flowchart - Img Caption : [Source](https://learnopencv.com/lightrag/)* 3 | ![LightRAG Retrieval and Querying Flowchart](https://learnopencv.com/wp-content/uploads/2024/11/LightRAG-Querying-Flowchart-Dual-Level-Retrieval-Generation-Knowledge-Graphs-scaled.jpg) 4 | *Figure 2: LightRAG Retrieval and Querying Flowchart - Img Caption : [Source](https://learnopencv.com/lightrag/)* 5 | -------------------------------------------------------------------------------- /examples/.env.oai.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_API_VERSION=2024-08-01-preview 2 | AZURE_OPENAI_DEPLOYMENT=gpt-4o 3 | AZURE_OPENAI_API_KEY=myapikey 4 | AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com 5 | 6 | AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large 7 | AZURE_EMBEDDING_API_VERSION=2023-05-15 8 | -------------------------------------------------------------------------------- /examples/copy_llm_cache_to_another_storage.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sometimes you need to switch a storage solution, but you want to save LLM token and time. 3 | This handy script helps you to copy the LLM caches from one storage solution to another. 4 | (Not all the storage impl are supported) 5 | """ 6 | 7 | import asyncio 8 | import logging 9 | import os 10 | from dotenv import load_dotenv 11 | 12 | from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage 13 | from lightrag.kg.json_kv_impl import JsonKVStorage 14 | from lightrag.namespace import NameSpace 15 | 16 | load_dotenv() 17 | ROOT_DIR = os.environ.get("ROOT_DIR") 18 | WORKING_DIR = f"{ROOT_DIR}/dickens" 19 | 20 | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) 21 | 22 | if not os.path.exists(WORKING_DIR): 23 | os.mkdir(WORKING_DIR) 24 | 25 | # AGE 26 | os.environ["AGE_GRAPH_NAME"] = "chinese" 27 | 28 | postgres_db = PostgreSQLDB( 29 | config={ 30 | "host": "localhost", 31 | "port": 15432, 32 | "user": "rag", 33 | "password": "rag", 34 | "database": "r2", 35 | } 36 | ) 37 | 38 | 39 | async def copy_from_postgres_to_json(): 40 | await postgres_db.initdb() 41 | 42 | from_llm_response_cache = PGKVStorage( 43 | namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, 44 | global_config={"embedding_batch_num": 6}, 45 | embedding_func=None, 46 | db=postgres_db, 47 | ) 48 | 49 | to_llm_response_cache = JsonKVStorage( 50 | namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, 51 | global_config={"working_dir": WORKING_DIR}, 52 | embedding_func=None, 53 | ) 54 | 55 | kv = {} 56 | for c_id in await from_llm_response_cache.all_keys(): 57 | print(f"Copying {c_id}") 58 | workspace = c_id["workspace"] 59 | mode = c_id["mode"] 60 | _id = c_id["id"] 61 | postgres_db.workspace = workspace 62 | obj = await from_llm_response_cache.get_by_mode_and_id(mode, _id) 63 | if mode not in kv: 64 | kv[mode] = {} 65 | kv[mode][_id] = obj[_id] 66 | print(f"Object {obj}") 67 | await to_llm_response_cache.upsert(kv) 68 | await to_llm_response_cache.index_done_callback() 69 | print("Mission accomplished!") 70 | 71 | 72 | async def copy_from_json_to_postgres(): 73 | await postgres_db.initdb() 74 | 75 | from_llm_response_cache = JsonKVStorage( 76 | namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, 77 | global_config={"working_dir": WORKING_DIR}, 78 | embedding_func=None, 79 | ) 80 | 81 | to_llm_response_cache = PGKVStorage( 82 | namespace=NameSpace.KV_STORE_LLM_RESPONSE_CACHE, 83 | global_config={"embedding_batch_num": 6}, 84 | embedding_func=None, 85 | db=postgres_db, 86 | ) 87 | 88 | for mode in await from_llm_response_cache.all_keys(): 89 | print(f"Copying {mode}") 90 | caches = await from_llm_response_cache.get_by_id(mode) 91 | for k, v in caches.items(): 92 | item = {mode: {k: v}} 93 | print(f"\tCopying {item}") 94 | await to_llm_response_cache.upsert(item) 95 | 96 | 97 | if __name__ == "__main__": 98 | asyncio.run(copy_from_json_to_postgres()) 99 | -------------------------------------------------------------------------------- /examples/generate_query.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | 3 | # os.environ["OPENAI_API_KEY"] = "" 4 | 5 | 6 | def openai_complete_if_cache( 7 | model="gpt-4o-mini", prompt=None, system_prompt=None, history_messages=[], **kwargs 8 | ) -> str: 9 | openai_client = OpenAI() 10 | 11 | messages = [] 12 | if system_prompt: 13 | messages.append({"role": "system", "content": system_prompt}) 14 | messages.extend(history_messages) 15 | messages.append({"role": "user", "content": prompt}) 16 | 17 | response = openai_client.chat.completions.create( 18 | model=model, messages=messages, **kwargs 19 | ) 20 | return response.choices[0].message.content 21 | 22 | 23 | if __name__ == "__main__": 24 | description = "" 25 | prompt = f""" 26 | Given the following description of a dataset: 27 | 28 | {description} 29 | 30 | Please identify 5 potential users who would engage with this dataset. For each user, list 5 tasks they would perform with this dataset. Then, for each (user, task) combination, generate 5 questions that require a high-level understanding of the entire dataset. 31 | 32 | Output the results in the following structure: 33 | - User 1: [user description] 34 | - Task 1: [task description] 35 | - Question 1: 36 | - Question 2: 37 | - Question 3: 38 | - Question 4: 39 | - Question 5: 40 | - Task 2: [task description] 41 | ... 42 | - Task 5: [task description] 43 | - User 2: [user description] 44 | ... 45 | - User 5: [user description] 46 | ... 47 | """ 48 | 49 | result = openai_complete_if_cache(model="gpt-4o-mini", prompt=prompt) 50 | 51 | file_path = "./queries.txt" 52 | with open(file_path, "w") as file: 53 | file.write(result) 54 | 55 | print(f"Queries written to {file_path}") 56 | -------------------------------------------------------------------------------- /examples/get_all_edges_nx.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | 3 | G = nx.read_graphml("./dickensTestEmbedcall/graph_chunk_entity_relation.graphml") 4 | 5 | 6 | def get_all_edges_and_nodes(G): 7 | # Get all edges and their properties 8 | edges_with_properties = [] 9 | for u, v, data in G.edges(data=True): 10 | edges_with_properties.append( 11 | { 12 | "start": u, 13 | "end": v, 14 | "label": data.get( 15 | "label", "" 16 | ), # Assuming 'label' is used for edge type 17 | "properties": data, 18 | "start_node_properties": G.nodes[u], 19 | "end_node_properties": G.nodes[v], 20 | } 21 | ) 22 | 23 | return edges_with_properties 24 | 25 | 26 | # Example usage 27 | if __name__ == "__main__": 28 | # Assume G is your NetworkX graph loaded from Neo4j 29 | 30 | all_edges = get_all_edges_and_nodes(G) 31 | 32 | # Print all edges and node properties 33 | for edge in all_edges: 34 | print(f"Edge Label: {edge['label']}") 35 | print(f"Edge Properties: {edge['properties']}") 36 | print(f"Start Node: {edge['start']}") 37 | print(f"Start Node Properties: {edge['start_node_properties']}") 38 | print(f"End Node: {edge['end']}") 39 | print(f"End Node Properties: {edge['end_node_properties']}") 40 | print("---") 41 | -------------------------------------------------------------------------------- /examples/graph_visual_with_html.py: -------------------------------------------------------------------------------- 1 | import pipmaster as pm 2 | 3 | if not pm.is_installed("pyvis"): 4 | pm.install("pyvis") 5 | if not pm.is_installed("networkx"): 6 | pm.install("networkx") 7 | 8 | import networkx as nx 9 | from pyvis.network import Network 10 | import random 11 | 12 | # Load the GraphML file 13 | G = nx.read_graphml("./dickens/graph_chunk_entity_relation.graphml") 14 | 15 | # Create a Pyvis network 16 | net = Network(height="100vh", notebook=True) 17 | 18 | # Convert NetworkX graph to Pyvis network 19 | net.from_nx(G) 20 | 21 | 22 | # Add colors and title to nodes 23 | for node in net.nodes: 24 | node["color"] = "#{:06x}".format(random.randint(0, 0xFFFFFF)) 25 | if "description" in node: 26 | node["title"] = node["description"] 27 | 28 | # Add title to edges 29 | for edge in net.edges: 30 | if "description" in edge: 31 | edge["title"] = edge["description"] 32 | 33 | # Save and display the network 34 | net.show("knowledge_graph.html") 35 | -------------------------------------------------------------------------------- /examples/lightrag_bedrock_demo.py: -------------------------------------------------------------------------------- 1 | """ 2 | LightRAG meets Amazon Bedrock ⛰️ 3 | """ 4 | 5 | import os 6 | import logging 7 | 8 | from lightrag import LightRAG, QueryParam 9 | from lightrag.llm.bedrock import bedrock_complete, bedrock_embed 10 | from lightrag.utils import EmbeddingFunc 11 | from lightrag.kg.shared_storage import initialize_pipeline_status 12 | 13 | import asyncio 14 | import nest_asyncio 15 | 16 | nest_asyncio.apply() 17 | 18 | logging.getLogger("aiobotocore").setLevel(logging.WARNING) 19 | 20 | WORKING_DIR = "./dickens" 21 | if not os.path.exists(WORKING_DIR): 22 | os.mkdir(WORKING_DIR) 23 | 24 | 25 | async def initialize_rag(): 26 | rag = LightRAG( 27 | working_dir=WORKING_DIR, 28 | llm_model_func=bedrock_complete, 29 | llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock", 30 | embedding_func=EmbeddingFunc( 31 | embedding_dim=1024, max_token_size=8192, func=bedrock_embed 32 | ), 33 | ) 34 | 35 | await rag.initialize_storages() 36 | await initialize_pipeline_status() 37 | 38 | return rag 39 | 40 | 41 | def main(): 42 | rag = asyncio.run(initialize_rag()) 43 | 44 | with open("./book.txt", "r", encoding="utf-8") as f: 45 | rag.insert(f.read()) 46 | 47 | for mode in ["naive", "local", "global", "hybrid"]: 48 | print("\n+-" + "-" * len(mode) + "-+") 49 | print(f"| {mode.capitalize()} |") 50 | print("+-" + "-" * len(mode) + "-+\n") 51 | print( 52 | rag.query( 53 | "What are the top themes in this story?", param=QueryParam(mode=mode) 54 | ) 55 | ) 56 | 57 | 58 | if __name__ == "__main__": 59 | main() 60 | -------------------------------------------------------------------------------- /examples/lightrag_gemini_demo.py: -------------------------------------------------------------------------------- 1 | # pip install -q -U google-genai to use gemini as a client 2 | 3 | import os 4 | import numpy as np 5 | from google import genai 6 | from google.genai import types 7 | from dotenv import load_dotenv 8 | from lightrag.utils import EmbeddingFunc 9 | from lightrag import LightRAG, QueryParam 10 | from sentence_transformers import SentenceTransformer 11 | from lightrag.kg.shared_storage import initialize_pipeline_status 12 | 13 | import asyncio 14 | import nest_asyncio 15 | 16 | # Apply nest_asyncio to solve event loop issues 17 | nest_asyncio.apply() 18 | 19 | load_dotenv() 20 | gemini_api_key = os.getenv("GEMINI_API_KEY") 21 | 22 | WORKING_DIR = "./dickens" 23 | 24 | if os.path.exists(WORKING_DIR): 25 | import shutil 26 | 27 | shutil.rmtree(WORKING_DIR) 28 | 29 | os.mkdir(WORKING_DIR) 30 | 31 | 32 | async def llm_model_func( 33 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 34 | ) -> str: 35 | # 1. Initialize the GenAI Client with your Gemini API Key 36 | client = genai.Client(api_key=gemini_api_key) 37 | 38 | # 2. Combine prompts: system prompt, history, and user prompt 39 | if history_messages is None: 40 | history_messages = [] 41 | 42 | combined_prompt = "" 43 | if system_prompt: 44 | combined_prompt += f"{system_prompt}\n" 45 | 46 | for msg in history_messages: 47 | # Each msg is expected to be a dict: {"role": "...", "content": "..."} 48 | combined_prompt += f"{msg['role']}: {msg['content']}\n" 49 | 50 | # Finally, add the new user prompt 51 | combined_prompt += f"user: {prompt}" 52 | 53 | # 3. Call the Gemini model 54 | response = client.models.generate_content( 55 | model="gemini-1.5-flash", 56 | contents=[combined_prompt], 57 | config=types.GenerateContentConfig(max_output_tokens=500, temperature=0.1), 58 | ) 59 | 60 | # 4. Return the response text 61 | return response.text 62 | 63 | 64 | async def embedding_func(texts: list[str]) -> np.ndarray: 65 | model = SentenceTransformer("all-MiniLM-L6-v2") 66 | embeddings = model.encode(texts, convert_to_numpy=True) 67 | return embeddings 68 | 69 | 70 | async def initialize_rag(): 71 | rag = LightRAG( 72 | working_dir=WORKING_DIR, 73 | llm_model_func=llm_model_func, 74 | embedding_func=EmbeddingFunc( 75 | embedding_dim=384, 76 | max_token_size=8192, 77 | func=embedding_func, 78 | ), 79 | ) 80 | 81 | await rag.initialize_storages() 82 | await initialize_pipeline_status() 83 | 84 | return rag 85 | 86 | 87 | def main(): 88 | # Initialize RAG instance 89 | rag = asyncio.run(initialize_rag()) 90 | file_path = "story.txt" 91 | with open(file_path, "r") as file: 92 | text = file.read() 93 | 94 | rag.insert(text) 95 | 96 | response = rag.query( 97 | query="What is the main theme of the story?", 98 | param=QueryParam(mode="hybrid", top_k=5, response_type="single line"), 99 | ) 100 | 101 | print(response) 102 | 103 | 104 | if __name__ == "__main__": 105 | main() 106 | -------------------------------------------------------------------------------- /examples/lightrag_hf_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.hf import hf_model_complete, hf_embed 5 | from lightrag.utils import EmbeddingFunc 6 | from transformers import AutoModel, AutoTokenizer 7 | from lightrag.kg.shared_storage import initialize_pipeline_status 8 | 9 | import asyncio 10 | import nest_asyncio 11 | 12 | nest_asyncio.apply() 13 | 14 | WORKING_DIR = "./dickens" 15 | 16 | if not os.path.exists(WORKING_DIR): 17 | os.mkdir(WORKING_DIR) 18 | 19 | 20 | async def initialize_rag(): 21 | rag = LightRAG( 22 | working_dir=WORKING_DIR, 23 | llm_model_func=hf_model_complete, 24 | llm_model_name="meta-llama/Llama-3.1-8B-Instruct", 25 | embedding_func=EmbeddingFunc( 26 | embedding_dim=384, 27 | max_token_size=5000, 28 | func=lambda texts: hf_embed( 29 | texts, 30 | tokenizer=AutoTokenizer.from_pretrained( 31 | "sentence-transformers/all-MiniLM-L6-v2" 32 | ), 33 | embed_model=AutoModel.from_pretrained( 34 | "sentence-transformers/all-MiniLM-L6-v2" 35 | ), 36 | ), 37 | ), 38 | ) 39 | 40 | await rag.initialize_storages() 41 | await initialize_pipeline_status() 42 | 43 | return rag 44 | 45 | 46 | def main(): 47 | rag = asyncio.run(initialize_rag()) 48 | 49 | with open("./book.txt", "r", encoding="utf-8") as f: 50 | rag.insert(f.read()) 51 | 52 | # Perform naive search 53 | print( 54 | rag.query( 55 | "What are the top themes in this story?", param=QueryParam(mode="naive") 56 | ) 57 | ) 58 | 59 | # Perform local search 60 | print( 61 | rag.query( 62 | "What are the top themes in this story?", param=QueryParam(mode="local") 63 | ) 64 | ) 65 | 66 | # Perform global search 67 | print( 68 | rag.query( 69 | "What are the top themes in this story?", param=QueryParam(mode="global") 70 | ) 71 | ) 72 | 73 | # Perform hybrid search 74 | print( 75 | rag.query( 76 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 77 | ) 78 | ) 79 | 80 | 81 | if __name__ == "__main__": 82 | main() 83 | -------------------------------------------------------------------------------- /examples/lightrag_lmdeploy_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.lmdeploy import lmdeploy_model_if_cache 5 | from lightrag.llm.hf import hf_embed 6 | from lightrag.utils import EmbeddingFunc 7 | from transformers import AutoModel, AutoTokenizer 8 | from lightrag.kg.shared_storage import initialize_pipeline_status 9 | 10 | import asyncio 11 | import nest_asyncio 12 | 13 | nest_asyncio.apply() 14 | 15 | WORKING_DIR = "./dickens" 16 | 17 | if not os.path.exists(WORKING_DIR): 18 | os.mkdir(WORKING_DIR) 19 | 20 | 21 | async def lmdeploy_model_complete( 22 | prompt=None, 23 | system_prompt=None, 24 | history_messages=[], 25 | keyword_extraction=False, 26 | **kwargs, 27 | ) -> str: 28 | model_name = kwargs["hashing_kv"].global_config["llm_model_name"] 29 | return await lmdeploy_model_if_cache( 30 | model_name, 31 | prompt, 32 | system_prompt=system_prompt, 33 | history_messages=history_messages, 34 | ## please specify chat_template if your local path does not follow original HF file name, 35 | ## or model_name is a pytorch model on huggingface.co, 36 | ## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py 37 | ## for a list of chat_template available in lmdeploy. 38 | chat_template="llama3", 39 | # model_format ='awq', # if you are using awq quantization model. 40 | # quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8. 41 | **kwargs, 42 | ) 43 | 44 | 45 | async def initialize_rag(): 46 | rag = LightRAG( 47 | working_dir=WORKING_DIR, 48 | llm_model_func=lmdeploy_model_complete, 49 | llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model 50 | embedding_func=EmbeddingFunc( 51 | embedding_dim=384, 52 | max_token_size=5000, 53 | func=lambda texts: hf_embed( 54 | texts, 55 | tokenizer=AutoTokenizer.from_pretrained( 56 | "sentence-transformers/all-MiniLM-L6-v2" 57 | ), 58 | embed_model=AutoModel.from_pretrained( 59 | "sentence-transformers/all-MiniLM-L6-v2" 60 | ), 61 | ), 62 | ), 63 | ) 64 | 65 | await rag.initialize_storages() 66 | await initialize_pipeline_status() 67 | 68 | return rag 69 | 70 | 71 | def main(): 72 | # Initialize RAG instance 73 | rag = asyncio.run(initialize_rag()) 74 | 75 | # Insert example text 76 | with open("./book.txt", "r", encoding="utf-8") as f: 77 | rag.insert(f.read()) 78 | 79 | # Test different query modes 80 | print("\nNaive Search:") 81 | print( 82 | rag.query( 83 | "What are the top themes in this story?", param=QueryParam(mode="naive") 84 | ) 85 | ) 86 | 87 | print("\nLocal Search:") 88 | print( 89 | rag.query( 90 | "What are the top themes in this story?", param=QueryParam(mode="local") 91 | ) 92 | ) 93 | 94 | print("\nGlobal Search:") 95 | print( 96 | rag.query( 97 | "What are the top themes in this story?", param=QueryParam(mode="global") 98 | ) 99 | ) 100 | 101 | print("\nHybrid Search:") 102 | print( 103 | rag.query( 104 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 105 | ) 106 | ) 107 | 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /examples/lightrag_multi_model_all_modes_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed 5 | from lightrag.kg.shared_storage import initialize_pipeline_status 6 | 7 | WORKING_DIR = "./lightrag_demo" 8 | 9 | if not os.path.exists(WORKING_DIR): 10 | os.mkdir(WORKING_DIR) 11 | 12 | 13 | async def initialize_rag(): 14 | rag = LightRAG( 15 | working_dir=WORKING_DIR, 16 | embedding_func=openai_embed, 17 | llm_model_func=gpt_4o_mini_complete, # Default model for queries 18 | ) 19 | 20 | await rag.initialize_storages() 21 | await initialize_pipeline_status() 22 | 23 | return rag 24 | 25 | 26 | def main(): 27 | # Initialize RAG instance 28 | rag = asyncio.run(initialize_rag()) 29 | 30 | # Load the data 31 | with open("./book.txt", "r", encoding="utf-8") as f: 32 | rag.insert(f.read()) 33 | 34 | # Query with naive mode (default model) 35 | print("--- NAIVE mode ---") 36 | print( 37 | rag.query( 38 | "What are the main themes in this story?", param=QueryParam(mode="naive") 39 | ) 40 | ) 41 | 42 | # Query with local mode (default model) 43 | print("\n--- LOCAL mode ---") 44 | print( 45 | rag.query( 46 | "What are the main themes in this story?", param=QueryParam(mode="local") 47 | ) 48 | ) 49 | 50 | # Query with global mode (default model) 51 | print("\n--- GLOBAL mode ---") 52 | print( 53 | rag.query( 54 | "What are the main themes in this story?", param=QueryParam(mode="global") 55 | ) 56 | ) 57 | 58 | # Query with hybrid mode (default model) 59 | print("\n--- HYBRID mode ---") 60 | print( 61 | rag.query( 62 | "What are the main themes in this story?", param=QueryParam(mode="hybrid") 63 | ) 64 | ) 65 | 66 | # Query with mix mode (default model) 67 | print("\n--- MIX mode ---") 68 | print( 69 | rag.query( 70 | "What are the main themes in this story?", param=QueryParam(mode="mix") 71 | ) 72 | ) 73 | 74 | # Query with a custom model (gpt-4o) for a more complex question 75 | print("\n--- Using custom model for complex analysis ---") 76 | print( 77 | rag.query( 78 | "How does the character development reflect Victorian-era attitudes?", 79 | param=QueryParam( 80 | mode="global", 81 | model_func=gpt_4o_complete, # Override default model with more capable one 82 | ), 83 | ) 84 | ) 85 | 86 | 87 | if __name__ == "__main__": 88 | main() 89 | -------------------------------------------------------------------------------- /examples/lightrag_ollama_age_demo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import nest_asyncio 3 | 4 | nest_asyncio.apply() 5 | 6 | import inspect 7 | import logging 8 | import os 9 | 10 | from lightrag import LightRAG, QueryParam 11 | from lightrag.llm.ollama import ollama_embed, ollama_model_complete 12 | from lightrag.utils import EmbeddingFunc 13 | from lightrag.kg.shared_storage import initialize_pipeline_status 14 | 15 | WORKING_DIR = "./dickens_age" 16 | 17 | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) 18 | 19 | if not os.path.exists(WORKING_DIR): 20 | os.mkdir(WORKING_DIR) 21 | 22 | # AGE 23 | os.environ["AGE_POSTGRES_DB"] = "postgresDB" 24 | os.environ["AGE_POSTGRES_USER"] = "postgresUser" 25 | os.environ["AGE_POSTGRES_PASSWORD"] = "postgresPW" 26 | os.environ["AGE_POSTGRES_HOST"] = "localhost" 27 | os.environ["AGE_POSTGRES_PORT"] = "5455" 28 | os.environ["AGE_GRAPH_NAME"] = "dickens" 29 | 30 | 31 | async def initialize_rag(): 32 | rag = LightRAG( 33 | working_dir=WORKING_DIR, 34 | llm_model_func=ollama_model_complete, 35 | llm_model_name="llama3.1:8b", 36 | llm_model_max_async=4, 37 | llm_model_max_token_size=32768, 38 | llm_model_kwargs={ 39 | "host": "http://localhost:11434", 40 | "options": {"num_ctx": 32768}, 41 | }, 42 | embedding_func=EmbeddingFunc( 43 | embedding_dim=768, 44 | max_token_size=8192, 45 | func=lambda texts: ollama_embed( 46 | texts, embed_model="nomic-embed-text", host="http://localhost:11434" 47 | ), 48 | ), 49 | graph_storage="AGEStorage", 50 | ) 51 | 52 | await rag.initialize_storages() 53 | await initialize_pipeline_status() 54 | 55 | return rag 56 | 57 | 58 | async def print_stream(stream): 59 | async for chunk in stream: 60 | print(chunk, end="", flush=True) 61 | 62 | 63 | def main(): 64 | # Initialize RAG instance 65 | rag = asyncio.run(initialize_rag()) 66 | 67 | # Insert example text 68 | with open("./book.txt", "r", encoding="utf-8") as f: 69 | rag.insert(f.read()) 70 | 71 | # Test different query modes 72 | print("\nNaive Search:") 73 | print( 74 | rag.query( 75 | "What are the top themes in this story?", param=QueryParam(mode="naive") 76 | ) 77 | ) 78 | 79 | print("\nLocal Search:") 80 | print( 81 | rag.query( 82 | "What are the top themes in this story?", param=QueryParam(mode="local") 83 | ) 84 | ) 85 | 86 | print("\nGlobal Search:") 87 | print( 88 | rag.query( 89 | "What are the top themes in this story?", param=QueryParam(mode="global") 90 | ) 91 | ) 92 | 93 | print("\nHybrid Search:") 94 | print( 95 | rag.query( 96 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 97 | ) 98 | ) 99 | 100 | # stream response 101 | resp = rag.query( 102 | "What are the top themes in this story?", 103 | param=QueryParam(mode="hybrid", stream=True), 104 | ) 105 | 106 | if inspect.isasyncgen(resp): 107 | asyncio.run(print_stream(resp)) 108 | else: 109 | print(resp) 110 | 111 | 112 | if __name__ == "__main__": 113 | main() 114 | -------------------------------------------------------------------------------- /examples/lightrag_ollama_demo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import nest_asyncio 3 | 4 | nest_asyncio.apply() 5 | import os 6 | import inspect 7 | import logging 8 | from lightrag import LightRAG, QueryParam 9 | from lightrag.llm.ollama import ollama_model_complete, ollama_embed 10 | from lightrag.utils import EmbeddingFunc 11 | from lightrag.kg.shared_storage import initialize_pipeline_status 12 | 13 | WORKING_DIR = "./dickens" 14 | 15 | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) 16 | 17 | if not os.path.exists(WORKING_DIR): 18 | os.mkdir(WORKING_DIR) 19 | 20 | 21 | async def initialize_rag(): 22 | rag = LightRAG( 23 | working_dir=WORKING_DIR, 24 | llm_model_func=ollama_model_complete, 25 | llm_model_name="gemma2:2b", 26 | llm_model_max_async=4, 27 | llm_model_max_token_size=32768, 28 | llm_model_kwargs={ 29 | "host": "http://localhost:11434", 30 | "options": {"num_ctx": 32768}, 31 | }, 32 | embedding_func=EmbeddingFunc( 33 | embedding_dim=768, 34 | max_token_size=8192, 35 | func=lambda texts: ollama_embed( 36 | texts, embed_model="nomic-embed-text", host="http://localhost:11434" 37 | ), 38 | ), 39 | ) 40 | 41 | await rag.initialize_storages() 42 | await initialize_pipeline_status() 43 | 44 | return rag 45 | 46 | 47 | async def print_stream(stream): 48 | async for chunk in stream: 49 | print(chunk, end="", flush=True) 50 | 51 | 52 | def main(): 53 | # Initialize RAG instance 54 | rag = asyncio.run(initialize_rag()) 55 | 56 | # Insert example text 57 | with open("./book.txt", "r", encoding="utf-8") as f: 58 | rag.insert(f.read()) 59 | 60 | # Test different query modes 61 | print("\nNaive Search:") 62 | print( 63 | rag.query( 64 | "What are the top themes in this story?", param=QueryParam(mode="naive") 65 | ) 66 | ) 67 | 68 | print("\nLocal Search:") 69 | print( 70 | rag.query( 71 | "What are the top themes in this story?", param=QueryParam(mode="local") 72 | ) 73 | ) 74 | 75 | print("\nGlobal Search:") 76 | print( 77 | rag.query( 78 | "What are the top themes in this story?", param=QueryParam(mode="global") 79 | ) 80 | ) 81 | 82 | print("\nHybrid Search:") 83 | print( 84 | rag.query( 85 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 86 | ) 87 | ) 88 | 89 | # stream response 90 | resp = rag.query( 91 | "What are the top themes in this story?", 92 | param=QueryParam(mode="hybrid", stream=True), 93 | ) 94 | 95 | if inspect.isasyncgen(resp): 96 | asyncio.run(print_stream(resp)) 97 | else: 98 | print(resp) 99 | 100 | 101 | if __name__ == "__main__": 102 | main() 103 | -------------------------------------------------------------------------------- /examples/lightrag_ollama_neo4j_milvus_mongo_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | from lightrag import LightRAG, QueryParam 3 | from lightrag.llm.ollama import ollama_model_complete, ollama_embed 4 | from lightrag.utils import EmbeddingFunc 5 | import asyncio 6 | import nest_asyncio 7 | 8 | nest_asyncio.apply() 9 | from lightrag.kg.shared_storage import initialize_pipeline_status 10 | 11 | # WorkingDir 12 | ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 13 | WORKING_DIR = os.path.join(ROOT_DIR, "myKG") 14 | if not os.path.exists(WORKING_DIR): 15 | os.mkdir(WORKING_DIR) 16 | print(f"WorkingDir: {WORKING_DIR}") 17 | 18 | # mongo 19 | os.environ["MONGO_URI"] = "mongodb://root:root@localhost:27017/" 20 | os.environ["MONGO_DATABASE"] = "LightRAG" 21 | 22 | # neo4j 23 | BATCH_SIZE_NODES = 500 24 | BATCH_SIZE_EDGES = 100 25 | os.environ["NEO4J_URI"] = "bolt://localhost:7687" 26 | os.environ["NEO4J_USERNAME"] = "neo4j" 27 | os.environ["NEO4J_PASSWORD"] = "neo4j" 28 | 29 | # milvus 30 | os.environ["MILVUS_URI"] = "http://localhost:19530" 31 | os.environ["MILVUS_USER"] = "root" 32 | os.environ["MILVUS_PASSWORD"] = "root" 33 | os.environ["MILVUS_DB_NAME"] = "lightrag" 34 | 35 | 36 | async def initialize_rag(): 37 | rag = LightRAG( 38 | working_dir=WORKING_DIR, 39 | llm_model_func=ollama_model_complete, 40 | llm_model_name="qwen2.5:14b", 41 | llm_model_max_async=4, 42 | llm_model_max_token_size=32768, 43 | llm_model_kwargs={ 44 | "host": "http://127.0.0.1:11434", 45 | "options": {"num_ctx": 32768}, 46 | }, 47 | embedding_func=EmbeddingFunc( 48 | embedding_dim=1024, 49 | max_token_size=8192, 50 | func=lambda texts: ollama_embed( 51 | texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434" 52 | ), 53 | ), 54 | kv_storage="MongoKVStorage", 55 | graph_storage="Neo4JStorage", 56 | vector_storage="MilvusVectorDBStorage", 57 | ) 58 | 59 | await rag.initialize_storages() 60 | await initialize_pipeline_status() 61 | 62 | return rag 63 | 64 | 65 | def main(): 66 | # Initialize RAG instance 67 | rag = asyncio.run(initialize_rag()) 68 | 69 | # Insert example text 70 | with open("./book.txt", "r", encoding="utf-8") as f: 71 | rag.insert(f.read()) 72 | 73 | # Test different query modes 74 | print("\nNaive Search:") 75 | print( 76 | rag.query( 77 | "What are the top themes in this story?", param=QueryParam(mode="naive") 78 | ) 79 | ) 80 | 81 | print("\nLocal Search:") 82 | print( 83 | rag.query( 84 | "What are the top themes in this story?", param=QueryParam(mode="local") 85 | ) 86 | ) 87 | 88 | print("\nGlobal Search:") 89 | print( 90 | rag.query( 91 | "What are the top themes in this story?", param=QueryParam(mode="global") 92 | ) 93 | ) 94 | 95 | print("\nHybrid Search:") 96 | print( 97 | rag.query( 98 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 99 | ) 100 | ) 101 | 102 | 103 | if __name__ == "__main__": 104 | main() 105 | -------------------------------------------------------------------------------- /examples/lightrag_openai_mongodb_graph_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed 5 | from lightrag.utils import EmbeddingFunc 6 | import numpy as np 7 | from lightrag.kg.shared_storage import initialize_pipeline_status 8 | 9 | ######### 10 | # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() 11 | # import nest_asyncio 12 | # nest_asyncio.apply() 13 | ######### 14 | WORKING_DIR = "./mongodb_test_dir" 15 | if not os.path.exists(WORKING_DIR): 16 | os.mkdir(WORKING_DIR) 17 | 18 | 19 | os.environ["OPENAI_API_KEY"] = "sk-" 20 | os.environ["MONGO_URI"] = "mongodb://0.0.0.0:27017/?directConnection=true" 21 | os.environ["MONGO_DATABASE"] = "LightRAG" 22 | os.environ["MONGO_KG_COLLECTION"] = "MDB_KG" 23 | 24 | # Embedding Configuration and Functions 25 | EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large") 26 | EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192)) 27 | 28 | 29 | async def embedding_func(texts: list[str]) -> np.ndarray: 30 | return await openai_embed( 31 | texts, 32 | model=EMBEDDING_MODEL, 33 | ) 34 | 35 | 36 | async def get_embedding_dimension(): 37 | test_text = ["This is a test sentence."] 38 | embedding = await embedding_func(test_text) 39 | return embedding.shape[1] 40 | 41 | 42 | async def create_embedding_function_instance(): 43 | # Get embedding dimension 44 | embedding_dimension = await get_embedding_dimension() 45 | # Create embedding function instance 46 | return EmbeddingFunc( 47 | embedding_dim=embedding_dimension, 48 | max_token_size=EMBEDDING_MAX_TOKEN_SIZE, 49 | func=embedding_func, 50 | ) 51 | 52 | 53 | async def initialize_rag(): 54 | embedding_func_instance = await create_embedding_function_instance() 55 | 56 | rag = LightRAG( 57 | working_dir=WORKING_DIR, 58 | llm_model_func=gpt_4o_mini_complete, 59 | embedding_func=embedding_func_instance, 60 | graph_storage="MongoGraphStorage", 61 | log_level="DEBUG", 62 | ) 63 | 64 | await rag.initialize_storages() 65 | await initialize_pipeline_status() 66 | 67 | return rag 68 | 69 | 70 | def main(): 71 | # Initialize RAG instance 72 | rag = asyncio.run(initialize_rag()) 73 | 74 | with open("./book.txt", "r", encoding="utf-8") as f: 75 | rag.insert(f.read()) 76 | 77 | # Perform naive search 78 | print( 79 | rag.query( 80 | "What are the top themes in this story?", param=QueryParam(mode="naive") 81 | ) 82 | ) 83 | 84 | # Perform local search 85 | print( 86 | rag.query( 87 | "What are the top themes in this story?", param=QueryParam(mode="local") 88 | ) 89 | ) 90 | 91 | # Perform global search 92 | print( 93 | rag.query( 94 | "What are the top themes in this story?", param=QueryParam(mode="global") 95 | ) 96 | ) 97 | 98 | # Perform hybrid search 99 | print( 100 | rag.query( 101 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 102 | ) 103 | ) 104 | 105 | 106 | if __name__ == "__main__": 107 | main() 108 | -------------------------------------------------------------------------------- /examples/lightrag_openai_neo4j_milvus_redis_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache 5 | from lightrag.utils import EmbeddingFunc 6 | from lightrag.kg.shared_storage import initialize_pipeline_status 7 | 8 | # WorkingDir 9 | ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) 10 | WORKING_DIR = os.path.join(ROOT_DIR, "myKG") 11 | if not os.path.exists(WORKING_DIR): 12 | os.mkdir(WORKING_DIR) 13 | print(f"WorkingDir: {WORKING_DIR}") 14 | 15 | # redis 16 | os.environ["REDIS_URI"] = "redis://localhost:6379" 17 | 18 | # neo4j 19 | BATCH_SIZE_NODES = 500 20 | BATCH_SIZE_EDGES = 100 21 | os.environ["NEO4J_URI"] = "bolt://117.50.173.35:7687" 22 | os.environ["NEO4J_USERNAME"] = "neo4j" 23 | os.environ["NEO4J_PASSWORD"] = "12345678" 24 | 25 | # milvus 26 | os.environ["MILVUS_URI"] = "http://117.50.173.35:19530" 27 | os.environ["MILVUS_USER"] = "root" 28 | os.environ["MILVUS_PASSWORD"] = "Milvus" 29 | os.environ["MILVUS_DB_NAME"] = "lightrag" 30 | 31 | 32 | async def llm_model_func( 33 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 34 | ) -> str: 35 | return await openai_complete_if_cache( 36 | "deepseek-chat", 37 | prompt, 38 | system_prompt=system_prompt, 39 | history_messages=history_messages, 40 | api_key="", 41 | base_url="", 42 | **kwargs, 43 | ) 44 | 45 | 46 | embedding_func = EmbeddingFunc( 47 | embedding_dim=768, 48 | max_token_size=512, 49 | func=lambda texts: ollama_embed( 50 | texts, embed_model="shaw/dmeta-embedding-zh", host="http://117.50.173.35:11434" 51 | ), 52 | ) 53 | 54 | 55 | async def initialize_rag(): 56 | rag = LightRAG( 57 | working_dir=WORKING_DIR, 58 | llm_model_func=llm_model_func, 59 | llm_model_max_token_size=32768, 60 | embedding_func=embedding_func, 61 | chunk_token_size=512, 62 | chunk_overlap_token_size=256, 63 | kv_storage="RedisKVStorage", 64 | graph_storage="Neo4JStorage", 65 | vector_storage="MilvusVectorDBStorage", 66 | doc_status_storage="RedisKVStorage", 67 | ) 68 | 69 | await rag.initialize_storages() 70 | await initialize_pipeline_status() 71 | 72 | return rag 73 | 74 | 75 | def main(): 76 | # Initialize RAG instance 77 | rag = asyncio.run(initialize_rag()) 78 | 79 | with open("./book.txt", "r", encoding="utf-8") as f: 80 | rag.insert(f.read()) 81 | 82 | # Perform naive search 83 | print( 84 | rag.query( 85 | "What are the top themes in this story?", param=QueryParam(mode="naive") 86 | ) 87 | ) 88 | 89 | # Perform local search 90 | print( 91 | rag.query( 92 | "What are the top themes in this story?", param=QueryParam(mode="local") 93 | ) 94 | ) 95 | 96 | # Perform global search 97 | print( 98 | rag.query( 99 | "What are the top themes in this story?", param=QueryParam(mode="global") 100 | ) 101 | ) 102 | 103 | # Perform hybrid search 104 | print( 105 | rag.query( 106 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 107 | ) 108 | ) 109 | 110 | 111 | if __name__ == "__main__": 112 | main() 113 | -------------------------------------------------------------------------------- /examples/lightrag_siliconcloud_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import openai_complete_if_cache 5 | from lightrag.llm.siliconcloud import siliconcloud_embedding 6 | from lightrag.utils import EmbeddingFunc 7 | import numpy as np 8 | from lightrag.kg.shared_storage import initialize_pipeline_status 9 | 10 | WORKING_DIR = "./dickens" 11 | 12 | if not os.path.exists(WORKING_DIR): 13 | os.mkdir(WORKING_DIR) 14 | 15 | 16 | async def llm_model_func( 17 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 18 | ) -> str: 19 | return await openai_complete_if_cache( 20 | "Qwen/Qwen2.5-7B-Instruct", 21 | prompt, 22 | system_prompt=system_prompt, 23 | history_messages=history_messages, 24 | api_key=os.getenv("SILICONFLOW_API_KEY"), 25 | base_url="https://api.siliconflow.cn/v1/", 26 | **kwargs, 27 | ) 28 | 29 | 30 | async def embedding_func(texts: list[str]) -> np.ndarray: 31 | return await siliconcloud_embedding( 32 | texts, 33 | model="netease-youdao/bce-embedding-base_v1", 34 | api_key=os.getenv("SILICONFLOW_API_KEY"), 35 | max_token_size=512, 36 | ) 37 | 38 | 39 | # function test 40 | async def test_funcs(): 41 | result = await llm_model_func("How are you?") 42 | print("llm_model_func: ", result) 43 | 44 | result = await embedding_func(["How are you?"]) 45 | print("embedding_func: ", result) 46 | 47 | 48 | asyncio.run(test_funcs()) 49 | 50 | 51 | async def initialize_rag(): 52 | rag = LightRAG( 53 | working_dir=WORKING_DIR, 54 | llm_model_func=llm_model_func, 55 | embedding_func=EmbeddingFunc( 56 | embedding_dim=768, max_token_size=512, func=embedding_func 57 | ), 58 | ) 59 | 60 | await rag.initialize_storages() 61 | await initialize_pipeline_status() 62 | 63 | return rag 64 | 65 | 66 | def main(): 67 | # Initialize RAG instance 68 | rag = asyncio.run(initialize_rag()) 69 | 70 | with open("./book.txt", "r", encoding="utf-8") as f: 71 | rag.insert(f.read()) 72 | 73 | # Perform naive search 74 | print( 75 | rag.query( 76 | "What are the top themes in this story?", param=QueryParam(mode="naive") 77 | ) 78 | ) 79 | 80 | # Perform local search 81 | print( 82 | rag.query( 83 | "What are the top themes in this story?", param=QueryParam(mode="local") 84 | ) 85 | ) 86 | 87 | # Perform global search 88 | print( 89 | rag.query( 90 | "What are the top themes in this story?", param=QueryParam(mode="global") 91 | ) 92 | ) 93 | 94 | # Perform hybrid search 95 | print( 96 | rag.query( 97 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 98 | ) 99 | ) 100 | 101 | 102 | if __name__ == "__main__": 103 | main() 104 | -------------------------------------------------------------------------------- /examples/lightrag_siliconcloud_track_token_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import openai_complete_if_cache 5 | from lightrag.llm.siliconcloud import siliconcloud_embedding 6 | from lightrag.utils import EmbeddingFunc 7 | from lightrag.utils import TokenTracker 8 | import numpy as np 9 | from lightrag.kg.shared_storage import initialize_pipeline_status 10 | from dotenv import load_dotenv 11 | 12 | load_dotenv() 13 | 14 | token_tracker = TokenTracker() 15 | WORKING_DIR = "./dickens" 16 | 17 | if not os.path.exists(WORKING_DIR): 18 | os.mkdir(WORKING_DIR) 19 | 20 | 21 | async def llm_model_func( 22 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 23 | ) -> str: 24 | return await openai_complete_if_cache( 25 | "Qwen/Qwen2.5-7B-Instruct", 26 | prompt, 27 | system_prompt=system_prompt, 28 | history_messages=history_messages, 29 | api_key=os.getenv("SILICONFLOW_API_KEY"), 30 | base_url="https://api.siliconflow.cn/v1/", 31 | token_tracker=token_tracker, 32 | **kwargs, 33 | ) 34 | 35 | 36 | async def embedding_func(texts: list[str]) -> np.ndarray: 37 | return await siliconcloud_embedding( 38 | texts, 39 | model="BAAI/bge-m3", 40 | api_key=os.getenv("SILICONFLOW_API_KEY"), 41 | max_token_size=512, 42 | ) 43 | 44 | 45 | # function test 46 | async def test_funcs(): 47 | # Context Manager Method 48 | with token_tracker: 49 | result = await llm_model_func("How are you?") 50 | print("llm_model_func: ", result) 51 | 52 | 53 | asyncio.run(test_funcs()) 54 | 55 | 56 | async def initialize_rag(): 57 | rag = LightRAG( 58 | working_dir=WORKING_DIR, 59 | llm_model_func=llm_model_func, 60 | embedding_func=EmbeddingFunc( 61 | embedding_dim=1024, max_token_size=512, func=embedding_func 62 | ), 63 | ) 64 | 65 | await rag.initialize_storages() 66 | await initialize_pipeline_status() 67 | 68 | return rag 69 | 70 | 71 | def main(): 72 | # Initialize RAG instance 73 | rag = asyncio.run(initialize_rag()) 74 | 75 | # Reset tracker before processing queries 76 | token_tracker.reset() 77 | 78 | with open("./book.txt", "r", encoding="utf-8") as f: 79 | rag.insert(f.read()) 80 | 81 | print( 82 | rag.query( 83 | "What are the top themes in this story?", param=QueryParam(mode="naive") 84 | ) 85 | ) 86 | 87 | print( 88 | rag.query( 89 | "What are the top themes in this story?", param=QueryParam(mode="local") 90 | ) 91 | ) 92 | 93 | print( 94 | rag.query( 95 | "What are the top themes in this story?", param=QueryParam(mode="global") 96 | ) 97 | ) 98 | 99 | print( 100 | rag.query( 101 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 102 | ) 103 | ) 104 | 105 | # Display final token usage after main query 106 | print("Token usage:", token_tracker.get_usage()) 107 | 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /examples/lightrag_tidb_demo.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # TiDB storage implementation is deprecated 3 | ########################################### 4 | 5 | import asyncio 6 | import os 7 | 8 | import numpy as np 9 | 10 | from lightrag import LightRAG, QueryParam 11 | from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache 12 | from lightrag.utils import EmbeddingFunc 13 | from lightrag.kg.shared_storage import initialize_pipeline_status 14 | 15 | WORKING_DIR = "./dickens" 16 | 17 | # We use SiliconCloud API to call LLM on Oracle Cloud 18 | # More docs here https://docs.siliconflow.cn/introduction 19 | BASE_URL = "https://api.siliconflow.cn/v1/" 20 | APIKEY = "" 21 | CHATMODEL = "" 22 | EMBEDMODEL = "" 23 | 24 | os.environ["TIDB_HOST"] = "" 25 | os.environ["TIDB_PORT"] = "" 26 | os.environ["TIDB_USER"] = "" 27 | os.environ["TIDB_PASSWORD"] = "" 28 | os.environ["TIDB_DATABASE"] = "lightrag" 29 | 30 | if not os.path.exists(WORKING_DIR): 31 | os.mkdir(WORKING_DIR) 32 | 33 | 34 | async def llm_model_func( 35 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 36 | ) -> str: 37 | return await openai_complete_if_cache( 38 | CHATMODEL, 39 | prompt, 40 | system_prompt=system_prompt, 41 | history_messages=history_messages, 42 | api_key=APIKEY, 43 | base_url=BASE_URL, 44 | **kwargs, 45 | ) 46 | 47 | 48 | async def embedding_func(texts: list[str]) -> np.ndarray: 49 | return await siliconcloud_embedding( 50 | texts, 51 | # model=EMBEDMODEL, 52 | api_key=APIKEY, 53 | ) 54 | 55 | 56 | async def get_embedding_dim(): 57 | test_text = ["This is a test sentence."] 58 | embedding = await embedding_func(test_text) 59 | embedding_dim = embedding.shape[1] 60 | return embedding_dim 61 | 62 | 63 | async def initialize_rag(): 64 | # Detect embedding dimension 65 | embedding_dimension = await get_embedding_dim() 66 | print(f"Detected embedding dimension: {embedding_dimension}") 67 | 68 | # Initialize LightRAG 69 | # We use TiDB DB as the KV/vector 70 | rag = LightRAG( 71 | enable_llm_cache=False, 72 | working_dir=WORKING_DIR, 73 | chunk_token_size=512, 74 | llm_model_func=llm_model_func, 75 | embedding_func=EmbeddingFunc( 76 | embedding_dim=embedding_dimension, 77 | max_token_size=512, 78 | func=embedding_func, 79 | ), 80 | kv_storage="TiDBKVStorage", 81 | vector_storage="TiDBVectorDBStorage", 82 | graph_storage="TiDBGraphStorage", 83 | ) 84 | 85 | await rag.initialize_storages() 86 | await initialize_pipeline_status() 87 | 88 | return rag 89 | 90 | 91 | async def main(): 92 | try: 93 | # Initialize RAG instance 94 | rag = await initialize_rag() 95 | 96 | with open("./book.txt", "r", encoding="utf-8") as f: 97 | rag.insert(f.read()) 98 | 99 | # Perform search in different modes 100 | modes = ["naive", "local", "global", "hybrid"] 101 | for mode in modes: 102 | print("=" * 20, mode, "=" * 20) 103 | print( 104 | await rag.aquery( 105 | "What are the top themes in this story?", 106 | param=QueryParam(mode=mode), 107 | ) 108 | ) 109 | print("-" * 100, "\n") 110 | 111 | except Exception as e: 112 | print(f"An error occurred: {e}") 113 | 114 | 115 | if __name__ == "__main__": 116 | asyncio.run(main()) 117 | -------------------------------------------------------------------------------- /examples/lightrag_zhipu_demo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import asyncio 4 | 5 | 6 | from lightrag import LightRAG, QueryParam 7 | from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding 8 | from lightrag.utils import EmbeddingFunc 9 | from lightrag.kg.shared_storage import initialize_pipeline_status 10 | 11 | WORKING_DIR = "./dickens" 12 | 13 | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) 14 | 15 | if not os.path.exists(WORKING_DIR): 16 | os.mkdir(WORKING_DIR) 17 | 18 | api_key = os.environ.get("ZHIPUAI_API_KEY") 19 | if api_key is None: 20 | raise Exception("Please set ZHIPU_API_KEY in your environment") 21 | 22 | 23 | async def initialize_rag(): 24 | rag = LightRAG( 25 | working_dir=WORKING_DIR, 26 | llm_model_func=zhipu_complete, 27 | llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here. 28 | llm_model_max_async=4, 29 | llm_model_max_token_size=32768, 30 | embedding_func=EmbeddingFunc( 31 | embedding_dim=2048, # Zhipu embedding-3 dimension 32 | max_token_size=8192, 33 | func=lambda texts: zhipu_embedding(texts), 34 | ), 35 | ) 36 | 37 | await rag.initialize_storages() 38 | await initialize_pipeline_status() 39 | 40 | return rag 41 | 42 | 43 | def main(): 44 | # Initialize RAG instance 45 | rag = asyncio.run(initialize_rag()) 46 | 47 | with open("./book.txt", "r", encoding="utf-8") as f: 48 | rag.insert(f.read()) 49 | 50 | # Perform naive search 51 | print( 52 | rag.query( 53 | "What are the top themes in this story?", param=QueryParam(mode="naive") 54 | ) 55 | ) 56 | 57 | # Perform local search 58 | print( 59 | rag.query( 60 | "What are the top themes in this story?", param=QueryParam(mode="local") 61 | ) 62 | ) 63 | 64 | # Perform global search 65 | print( 66 | rag.query( 67 | "What are the top themes in this story?", param=QueryParam(mode="global") 68 | ) 69 | ) 70 | 71 | # Perform hybrid search 72 | print( 73 | rag.query( 74 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 75 | ) 76 | ) 77 | 78 | 79 | if __name__ == "__main__": 80 | main() 81 | -------------------------------------------------------------------------------- /examples/openai_README.md: -------------------------------------------------------------------------------- 1 | 2 | ## API Server Implementation 3 | 4 | LightRAG also provides a FastAPI-based server implementation for RESTful API access to RAG operations. This allows you to run LightRAG as a service and interact with it through HTTP requests. 5 | 6 | ### Setting up the API Server 7 |
8 | Click to expand setup instructions 9 | 10 | 1. First, ensure you have the required dependencies: 11 | ```bash 12 | pip install fastapi uvicorn pydantic 13 | ``` 14 | 15 | 2. Set up your environment variables: 16 | ```bash 17 | export RAG_DIR="your_index_directory" # Optional: Defaults to "index_default" 18 | export OPENAI_BASE_URL="Your OpenAI API base URL" # Optional: Defaults to "https://api.openai.com/v1" 19 | export OPENAI_API_KEY="Your OpenAI API key" # Required 20 | export LLM_MODEL="Your LLM model" # Optional: Defaults to "gpt-4o-mini" 21 | export EMBEDDING_MODEL="Your embedding model" # Optional: Defaults to "text-embedding-3-large" 22 | ``` 23 | 24 | 3. Run the API server: 25 | ```bash 26 | python examples/lightrag_api_openai_compatible_demo.py 27 | ``` 28 | 29 | The server will start on `http://0.0.0.0:8020`. 30 |
31 | 32 | ### API Endpoints 33 | 34 | The API server provides the following endpoints: 35 | 36 | #### 1. Query Endpoint 37 |
38 | Click to view Query endpoint details 39 | 40 | - **URL:** `/query` 41 | - **Method:** POST 42 | - **Body:** 43 | ```json 44 | { 45 | "query": "Your question here", 46 | "mode": "hybrid", // Can be "naive", "local", "global", or "hybrid" 47 | "only_need_context": true // Optional: Defaults to false, if true, only the referenced context will be returned, otherwise the llm answer will be returned 48 | } 49 | ``` 50 | - **Example:** 51 | ```bash 52 | curl -X POST "http://127.0.0.1:8020/query" \ 53 | -H "Content-Type: application/json" \ 54 | -d '{"query": "What are the main themes?", "mode": "hybrid"}' 55 | ``` 56 |
57 | 58 | #### 2. Insert Text Endpoint 59 |
60 | Click to view Insert Text endpoint details 61 | 62 | - **URL:** `/insert` 63 | - **Method:** POST 64 | - **Body:** 65 | ```json 66 | { 67 | "text": "Your text content here" 68 | } 69 | ``` 70 | - **Example:** 71 | ```bash 72 | curl -X POST "http://127.0.0.1:8020/insert" \ 73 | -H "Content-Type: application/json" \ 74 | -d '{"text": "Content to be inserted into RAG"}' 75 | ``` 76 |
77 | 78 | #### 3. Insert File Endpoint 79 |
80 | Click to view Insert File endpoint details 81 | 82 | - **URL:** `/insert_file` 83 | - **Method:** POST 84 | - **Body:** 85 | ```json 86 | { 87 | "file_path": "path/to/your/file.txt" 88 | } 89 | ``` 90 | - **Example:** 91 | ```bash 92 | curl -X POST "http://127.0.0.1:8020/insert_file" \ 93 | -H "Content-Type: application/json" \ 94 | -d '{"file_path": "./book.txt"}' 95 | ``` 96 |
97 | 98 | #### 4. Health Check Endpoint 99 |
100 | Click to view Health Check endpoint details 101 | 102 | - **URL:** `/health` 103 | - **Method:** GET 104 | - **Example:** 105 | ```bash 106 | curl -X GET "http://127.0.0.1:8020/health" 107 | ``` 108 |
109 | 110 | ### Configuration 111 | 112 | The API server can be configured using environment variables: 113 | - `RAG_DIR`: Directory for storing the RAG index (default: "index_default") 114 | - API keys and base URLs should be configured in the code for your specific LLM and embedding model providers 115 | -------------------------------------------------------------------------------- /examples/openai_README_zh.md: -------------------------------------------------------------------------------- 1 | 2 | ## API 服务器实现 3 | 4 | LightRAG also provides a FastAPI-based server implementation for RESTful API access to RAG operations. This allows you to run LightRAG as a service and interact with it through HTTP requests. 5 | LightRAG 还提供基于 FastAPI 的服务器实现,用于对 RAG 操作进行 RESTful API 访问。这允许您将 LightRAG 作为服务运行并通过 HTTP 请求与其交互。 6 | 7 | ### 设置 API 服务器 8 |
9 | 单击展开设置说明 10 | 11 | 1. 首先,确保您具有所需的依赖项: 12 | ```bash 13 | pip install fastapi uvicorn pydantic 14 | ``` 15 | 16 | 2. 设置您的环境变量: 17 | ```bash 18 | export RAG_DIR="your_index_directory" # Optional: Defaults to "index_default" 19 | export OPENAI_BASE_URL="Your OpenAI API base URL" # Optional: Defaults to "https://api.openai.com/v1" 20 | export OPENAI_API_KEY="Your OpenAI API key" # Required 21 | export LLM_MODEL="Your LLM model" # Optional: Defaults to "gpt-4o-mini" 22 | export EMBEDDING_MODEL="Your embedding model" # Optional: Defaults to "text-embedding-3-large" 23 | ``` 24 | 25 | 3. 运行API服务器: 26 | ```bash 27 | python examples/lightrag_api_openai_compatible_demo.py 28 | ``` 29 | 30 | 服务器将启动于 `http://0.0.0.0:8020`. 31 |
32 | 33 | ### API端点 34 | 35 | API服务器提供以下端点: 36 | 37 | #### 1. 查询端点 38 |
39 | 点击查看查询端点详情 40 | 41 | - **URL:** `/query` 42 | - **Method:** POST 43 | - **Body:** 44 | ```json 45 | { 46 | "query": "Your question here", 47 | "mode": "hybrid", // Can be "naive", "local", "global", or "hybrid" 48 | "only_need_context": true // Optional: Defaults to false, if true, only the referenced context will be returned, otherwise the llm answer will be returned 49 | } 50 | ``` 51 | - **Example:** 52 | ```bash 53 | curl -X POST "http://127.0.0.1:8020/query" \ 54 | -H "Content-Type: application/json" \ 55 | -d '{"query": "What are the main themes?", "mode": "hybrid"}' 56 | ``` 57 |
58 | 59 | #### 2. 插入文本端点 60 |
61 | 单击可查看插入文本端点详细信息 62 | 63 | - **URL:** `/insert` 64 | - **Method:** POST 65 | - **Body:** 66 | ```json 67 | { 68 | "text": "Your text content here" 69 | } 70 | ``` 71 | - **Example:** 72 | ```bash 73 | curl -X POST "http://127.0.0.1:8020/insert" \ 74 | -H "Content-Type: application/json" \ 75 | -d '{"text": "Content to be inserted into RAG"}' 76 | ``` 77 |
78 | 79 | #### 3. 插入文件端点 80 |
81 | 单击查看插入文件端点详细信息 82 | 83 | - **URL:** `/insert_file` 84 | - **Method:** POST 85 | - **Body:** 86 | ```json 87 | { 88 | "file_path": "path/to/your/file.txt" 89 | } 90 | ``` 91 | - **Example:** 92 | ```bash 93 | curl -X POST "http://127.0.0.1:8020/insert_file" \ 94 | -H "Content-Type: application/json" \ 95 | -d '{"file_path": "./book.txt"}' 96 | ``` 97 |
98 | 99 | #### 4. 健康检查端点 100 |
101 | 点击查看健康检查端点详细信息 102 | 103 | - **URL:** `/health` 104 | - **Method:** GET 105 | - **Example:** 106 | ```bash 107 | curl -X GET "http://127.0.0.1:8020/health" 108 | ``` 109 |
110 | 111 | ### 配置 112 | 113 | 可以使用环境变量配置API服务器: 114 | - `RAG_DIR`: 存放RAG索引的目录 (default: "index_default") 115 | - 应在代码中为您的特定 LLM 和嵌入模型提供商配置 API 密钥和基本 URL 116 | -------------------------------------------------------------------------------- /examples/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import gpt_4o_mini_complete 5 | from lightrag.kg.shared_storage import initialize_pipeline_status 6 | ######### 7 | # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() 8 | # import nest_asyncio 9 | # nest_asyncio.apply() 10 | ######### 11 | 12 | WORKING_DIR = "./dickens" 13 | 14 | if not os.path.exists(WORKING_DIR): 15 | os.mkdir(WORKING_DIR) 16 | 17 | 18 | async def initialize_rag(): 19 | rag = LightRAG( 20 | working_dir=WORKING_DIR, 21 | llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model 22 | # llm_model_func=gpt_4o_complete # Optionally, use a stronger model 23 | ) 24 | 25 | await rag.initialize_storages() 26 | await initialize_pipeline_status() 27 | 28 | return rag 29 | 30 | 31 | def main(): 32 | # Initialize RAG instance 33 | rag = asyncio.run(initialize_rag()) 34 | 35 | with open("./book.txt", "r", encoding="utf-8") as f: 36 | rag.insert(f.read()) 37 | 38 | # Perform naive search 39 | print( 40 | rag.query( 41 | "What are the top themes in this story?", param=QueryParam(mode="naive") 42 | ) 43 | ) 44 | 45 | # Perform local search 46 | print( 47 | rag.query( 48 | "What are the top themes in this story?", param=QueryParam(mode="local") 49 | ) 50 | ) 51 | 52 | # Perform global search 53 | print( 54 | rag.query( 55 | "What are the top themes in this story?", param=QueryParam(mode="global") 56 | ) 57 | ) 58 | 59 | # Perform hybrid search 60 | print( 61 | rag.query( 62 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 63 | ) 64 | ) 65 | 66 | 67 | if __name__ == "__main__": 68 | main() 69 | -------------------------------------------------------------------------------- /examples/test_faiss.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import asyncio 4 | import numpy as np 5 | 6 | from dotenv import load_dotenv 7 | from sentence_transformers import SentenceTransformer 8 | 9 | from openai import AzureOpenAI 10 | from lightrag import LightRAG, QueryParam 11 | from lightrag.utils import EmbeddingFunc 12 | from lightrag.kg.shared_storage import initialize_pipeline_status 13 | 14 | WORKING_DIR = "./dickens" 15 | # Configure Logging 16 | logging.basicConfig(level=logging.INFO) 17 | 18 | # Load environment variables from .env file 19 | load_dotenv() 20 | AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION") 21 | AZURE_OPENAI_DEPLOYMENT = os.getenv("AZURE_OPENAI_DEPLOYMENT") 22 | AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY") 23 | AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") 24 | 25 | 26 | async def llm_model_func( 27 | prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs 28 | ) -> str: 29 | # Create a client for AzureOpenAI 30 | client = AzureOpenAI( 31 | api_key=AZURE_OPENAI_API_KEY, 32 | api_version=AZURE_OPENAI_API_VERSION, 33 | azure_endpoint=AZURE_OPENAI_ENDPOINT, 34 | ) 35 | 36 | # Build the messages list for the conversation 37 | messages = [] 38 | if system_prompt: 39 | messages.append({"role": "system", "content": system_prompt}) 40 | if history_messages: 41 | messages.extend(history_messages) 42 | messages.append({"role": "user", "content": prompt}) 43 | 44 | # Call the LLM 45 | chat_completion = client.chat.completions.create( 46 | model=AZURE_OPENAI_DEPLOYMENT, 47 | messages=messages, 48 | temperature=kwargs.get("temperature", 0), 49 | top_p=kwargs.get("top_p", 1), 50 | n=kwargs.get("n", 1), 51 | ) 52 | 53 | return chat_completion.choices[0].message.content 54 | 55 | 56 | async def embedding_func(texts: list[str]) -> np.ndarray: 57 | model = SentenceTransformer("all-MiniLM-L6-v2") 58 | embeddings = model.encode(texts, convert_to_numpy=True) 59 | return embeddings 60 | 61 | 62 | async def initialize_rag(): 63 | rag = LightRAG( 64 | working_dir=WORKING_DIR, 65 | llm_model_func=llm_model_func, 66 | embedding_func=EmbeddingFunc( 67 | embedding_dim=384, 68 | max_token_size=8192, 69 | func=embedding_func, 70 | ), 71 | vector_storage="FaissVectorDBStorage", 72 | vector_db_storage_cls_kwargs={ 73 | "cosine_better_than_threshold": 0.2 # Your desired threshold 74 | }, 75 | ) 76 | 77 | await rag.initialize_storages() 78 | await initialize_pipeline_status() 79 | 80 | return rag 81 | 82 | 83 | def main(): 84 | # Initialize RAG instance 85 | rag = asyncio.run(initialize_rag()) 86 | # Insert the custom chunks into LightRAG 87 | book1 = open("./book_1.txt", encoding="utf-8") 88 | book2 = open("./book_2.txt", encoding="utf-8") 89 | 90 | rag.insert([book1.read(), book2.read()]) 91 | 92 | query_text = "What are the main themes?" 93 | 94 | print("Result (Naive):") 95 | print(rag.query(query_text, param=QueryParam(mode="naive"))) 96 | 97 | print("\nResult (Local):") 98 | print(rag.query(query_text, param=QueryParam(mode="local"))) 99 | 100 | print("\nResult (Global):") 101 | print(rag.query(query_text, param=QueryParam(mode="global"))) 102 | 103 | print("\nResult (Hybrid):") 104 | print(rag.query(query_text, param=QueryParam(mode="hybrid"))) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /examples/test_neo4j.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag import LightRAG, QueryParam 4 | from lightrag.llm.openai import gpt_4o_mini_complete 5 | from lightrag.kg.shared_storage import initialize_pipeline_status 6 | 7 | ######### 8 | # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() 9 | # import nest_asyncio 10 | # nest_asyncio.apply() 11 | ######### 12 | 13 | WORKING_DIR = "./local_neo4jWorkDir" 14 | 15 | if not os.path.exists(WORKING_DIR): 16 | os.mkdir(WORKING_DIR) 17 | 18 | 19 | async def initialize_rag(): 20 | rag = LightRAG( 21 | working_dir=WORKING_DIR, 22 | llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model 23 | graph_storage="Neo4JStorage", 24 | log_level="INFO", 25 | # llm_model_func=gpt_4o_complete # Optionally, use a stronger model 26 | ) 27 | 28 | await rag.initialize_storages() 29 | await initialize_pipeline_status() 30 | 31 | return rag 32 | 33 | 34 | def main(): 35 | # Initialize RAG instance 36 | rag = asyncio.run(initialize_rag()) 37 | 38 | with open("./book.txt", "r", encoding="utf-8") as f: 39 | rag.insert(f.read()) 40 | 41 | # Perform naive search 42 | print( 43 | rag.query( 44 | "What are the top themes in this story?", param=QueryParam(mode="naive") 45 | ) 46 | ) 47 | 48 | # Perform local search 49 | print( 50 | rag.query( 51 | "What are the top themes in this story?", param=QueryParam(mode="local") 52 | ) 53 | ) 54 | 55 | # Perform global search 56 | print( 57 | rag.query( 58 | "What are the top themes in this story?", param=QueryParam(mode="global") 59 | ) 60 | ) 61 | 62 | # Perform hybrid search 63 | print( 64 | rag.query( 65 | "What are the top themes in this story?", param=QueryParam(mode="hybrid") 66 | ) 67 | ) 68 | 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /examples/test_postgres.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from lightrag.kg.postgres_impl import PGGraphStorage 4 | from lightrag.llm.ollama import ollama_embedding 5 | from lightrag.utils import EmbeddingFunc 6 | 7 | ######### 8 | # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() 9 | # import nest_asyncio 10 | # nest_asyncio.apply() 11 | ######### 12 | 13 | WORKING_DIR = "./local_neo4jWorkDir" 14 | 15 | if not os.path.exists(WORKING_DIR): 16 | os.mkdir(WORKING_DIR) 17 | 18 | # AGE 19 | os.environ["AGE_GRAPH_NAME"] = "dickens" 20 | 21 | os.environ["POSTGRES_HOST"] = "localhost" 22 | os.environ["POSTGRES_PORT"] = "15432" 23 | os.environ["POSTGRES_USER"] = "rag" 24 | os.environ["POSTGRES_PASSWORD"] = "rag" 25 | os.environ["POSTGRES_DATABASE"] = "rag" 26 | 27 | 28 | async def main(): 29 | graph_db = PGGraphStorage( 30 | namespace="dickens", 31 | embedding_func=EmbeddingFunc( 32 | embedding_dim=1024, 33 | max_token_size=8192, 34 | func=lambda texts: ollama_embedding( 35 | texts, embed_model="bge-m3", host="http://localhost:11434" 36 | ), 37 | ), 38 | global_config={}, 39 | ) 40 | await graph_db.initialize() 41 | labels = await graph_db.get_all_labels() 42 | print("all labels", labels) 43 | 44 | res = await graph_db.get_knowledge_graph("FEZZIWIG") 45 | print("knowledge graphs", res) 46 | 47 | await graph_db.finalize() 48 | 49 | 50 | if __name__ == "__main__": 51 | asyncio.run(main()) 52 | -------------------------------------------------------------------------------- /lightrag-api: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /home/netman/lightrag-xyj/venv/bin/activate 4 | lightrag-server 5 | -------------------------------------------------------------------------------- /lightrag.service.example: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=LightRAG XYJ Ollama Service 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=netman 8 | # Memory settings 9 | MemoryHigh=8G 10 | MemoryMax=12G 11 | WorkingDirectory=/home/netman/lightrag-xyj 12 | ExecStart=/home/netman/lightrag-xyj/lightrag-api 13 | Restart=always 14 | RestartSec=10 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /lightrag/__init__.py: -------------------------------------------------------------------------------- 1 | from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam 2 | 3 | __version__ = "1.3.4" 4 | __author__ = "Zirui Guo" 5 | __url__ = "https://github.com/HKUDS/LightRAG" 6 | -------------------------------------------------------------------------------- /lightrag/api/.env.aoi.example: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_API_VERSION=2024-08-01-preview 2 | AZURE_OPENAI_DEPLOYMENT=gpt-4o 3 | AZURE_OPENAI_API_KEY=myapikey 4 | AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com 5 | 6 | AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large 7 | AZURE_EMBEDDING_API_VERSION=2023-05-15 8 | -------------------------------------------------------------------------------- /lightrag/api/.gitignore: -------------------------------------------------------------------------------- 1 | inputs 2 | rag_storage 3 | -------------------------------------------------------------------------------- /lightrag/api/README.assets/image-20250323122538997.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/api/README.assets/image-20250323122538997.png -------------------------------------------------------------------------------- /lightrag/api/README.assets/image-20250323122754387.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/api/README.assets/image-20250323122754387.png -------------------------------------------------------------------------------- /lightrag/api/README.assets/image-20250323123011220.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/api/README.assets/image-20250323123011220.png -------------------------------------------------------------------------------- /lightrag/api/README.assets/image-20250323194750379.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/api/README.assets/image-20250323194750379.png -------------------------------------------------------------------------------- /lightrag/api/__init__.py: -------------------------------------------------------------------------------- 1 | __api_version__ = "0161" 2 | -------------------------------------------------------------------------------- /lightrag/api/requirements.txt: -------------------------------------------------------------------------------- 1 | aiofiles 2 | ascii_colors 3 | asyncpg 4 | distro 5 | fastapi 6 | graspologic>=3.4.1 7 | httpcore 8 | httpx 9 | jiter 10 | numpy 11 | openai 12 | passlib[bcrypt] 13 | pipmaster 14 | PyJWT 15 | python-dotenv 16 | python-jose[cryptography] 17 | python-multipart 18 | pytz 19 | pyuca 20 | tenacity 21 | tiktoken 22 | uvicorn 23 | -------------------------------------------------------------------------------- /lightrag/api/routers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains all the routers for the LightRAG API. 3 | """ 4 | 5 | from .document_routes import router as document_router 6 | from .query_routes import router as query_router 7 | from .graph_routes import router as graph_router 8 | from .ollama_api import OllamaAPI 9 | 10 | __all__ = ["document_router", "query_router", "graph_router", "OllamaAPI"] 11 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/_basePickBy-DknajloF.js: -------------------------------------------------------------------------------- 1 | import{e as v,c as b,g as m,k as O,h as P,j as p,l as w,m as c,n as x,t as A,o as N}from"./_baseUniq-CAP6r7gD.js";import{aU as g,aq as _,aV as $,aW as E,aX as F,aY as I,aZ as M,a_ as y,a$ as B,b0 as T}from"./mermaid-vendor-BUVhGKJz.js";var S=/\s/;function q(n){for(var r=n.length;r--&&S.test(n.charAt(r)););return r}var G=/^\s+/;function H(n){return n&&n.slice(0,q(n)+1).replace(G,"")}var o=NaN,L=/^[-+]0x[0-9a-f]+$/i,R=/^0b[01]+$/i,W=/^0o[0-7]+$/i,X=parseInt;function Y(n){if(typeof n=="number")return n;if(v(n))return o;if(g(n)){var r=typeof n.valueOf=="function"?n.valueOf():n;n=g(r)?r+"":r}if(typeof n!="string")return n===0?n:+n;n=H(n);var t=R.test(n);return t||W.test(n)?X(n.slice(2),t?2:8):L.test(n)?o:+n}var z=1/0,C=17976931348623157e292;function K(n){if(!n)return n===0?n:0;if(n=Y(n),n===z||n===-1/0){var r=n<0?-1:1;return r*C}return n===n?n:0}function U(n){var r=K(n),t=r%1;return r===r?t?r-t:r:0}function fn(n){var r=n==null?0:n.length;return r?b(n):[]}var l=Object.prototype,Z=l.hasOwnProperty,dn=_(function(n,r){n=Object(n);var t=-1,e=r.length,a=e>2?r[2]:void 0;for(a&&$(r[0],r[1],a)&&(e=1);++t-1?a[f?r[i]:i]:void 0}}var J=Math.max;function Q(n,r,t){var e=n==null?0:n.length;if(!e)return-1;var a=t==null?0:U(t);return a<0&&(a=J(e+a,0)),P(n,m(r),a)}var hn=D(Q);function V(n,r){var t=-1,e=I(n)?Array(n.length):[];return p(n,function(a,f,i){e[++t]=r(a,f,i)}),e}function gn(n,r){var t=M(n)?w:V;return t(n,m(r))}var j=Object.prototype,k=j.hasOwnProperty;function nn(n,r){return n!=null&&k.call(n,r)}function mn(n,r){return n!=null&&c(n,r,nn)}function rn(n,r){return n{const e=a.append("rect");if(e.attr("x",t.x),e.attr("y",t.y),e.attr("fill",t.fill),e.attr("stroke",t.stroke),e.attr("width",t.width),e.attr("height",t.height),t.name&&e.attr("name",t.name),t.rx&&e.attr("rx",t.rx),t.ry&&e.attr("ry",t.ry),t.attrs!==void 0)for(const r in t.attrs)e.attr(r,t.attrs[r]);return t.class&&e.attr("class",t.class),e},"drawRect"),d=n((a,t)=>{const e={x:t.startx,y:t.starty,width:t.stopx-t.startx,height:t.stopy-t.starty,fill:t.fill,stroke:t.stroke,class:"rect"};c(a,e).lower()},"drawBackgroundRect"),g=n((a,t)=>{const e=t.text.replace(x," "),r=a.append("text");r.attr("x",t.x),r.attr("y",t.y),r.attr("class","legend"),r.style("text-anchor",t.anchor),t.class&&r.attr("class",t.class);const s=r.append("tspan");return s.attr("x",t.x+t.textMargin*2),s.text(e),r},"drawText"),h=n((a,t,e,r)=>{const s=a.append("image");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",i)},"drawImage"),m=n((a,t,e,r)=>{const s=a.append("use");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",`#${i}`)},"drawEmbeddedImage"),y=n(()=>({x:0,y:0,width:100,height:100,fill:"#EDF2AE",stroke:"#666",anchor:"start",rx:0,ry:0}),"getNoteRect"),p=n(()=>({x:0,y:0,width:100,height:100,"text-anchor":"start",style:"#666",textMargin:0,rx:0,ry:0,tspan:!0}),"getTextObj");export{d as a,p as b,m as c,c as d,h as e,g as f,y as g}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/chunk-RZ5BOZE2-S9T1dBtP.js: -------------------------------------------------------------------------------- 1 | import{_ as n,d as r,e as d,l as g}from"./mermaid-vendor-BUVhGKJz.js";var u=n((e,t)=>{let o;return t==="sandbox"&&(o=r("#i"+e)),(t==="sandbox"?r(o.nodes()[0].contentDocument.body):r("body")).select(`[id="${e}"]`)},"getDiagramElement"),b=n((e,t,o,i)=>{e.attr("class",o);const{width:a,height:s,x:h,y:x}=l(e,t);d(e,s,a,i);const c=w(h,x,a,s,t);e.attr("viewBox",c),g.debug(`viewBox configured: ${c} with padding: ${t}`)},"setupViewPortForSVG"),l=n((e,t)=>{var i;const o=((i=e.node())==null?void 0:i.getBBox())||{width:0,height:0,x:0,y:0};return{width:o.width+t*2,height:o.height+t*2,x:o.x,y:o.y}},"calculateDimensionsWithPadding"),w=n((e,t,o,i,a)=>`${e-a} ${t-a} ${o} ${i}`,"createViewBox");export{u as g,b as s}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/chunk-XZIHB7SX-CsCFuiVk.js: -------------------------------------------------------------------------------- 1 | import{_ as s}from"./mermaid-vendor-BUVhGKJz.js";var t,e=(t=class{constructor(i){this.init=i,this.records=this.init()}reset(){this.records=this.init()}},s(t,"ImperativeState"),t);export{e as I}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/classDiagram-GIVACNV2-Cc9VoQHS.js: -------------------------------------------------------------------------------- 1 | import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-CuE3TSRI.js";import{_ as i}from"./mermaid-vendor-BUVhGKJz.js";import"./chunk-RZ5BOZE2-S9T1dBtP.js";import"./feature-graph-B_AW4YHh.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/classDiagram-v2-COTLJTTW-Cc9VoQHS.js: -------------------------------------------------------------------------------- 1 | import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-CuE3TSRI.js";import{_ as i}from"./mermaid-vendor-BUVhGKJz.js";import"./chunk-RZ5BOZE2-S9T1dBtP.js";import"./feature-graph-B_AW4YHh.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/clone-ChJdlG_Y.js: -------------------------------------------------------------------------------- 1 | import{b as r}from"./_baseUniq-CAP6r7gD.js";var e=4;function a(o){return r(o,e)}export{a as c}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/infoDiagram-PH2N3AL5-bBE-CQNo.js: -------------------------------------------------------------------------------- 1 | import{_ as e,l as o,K as i,e as n,L as p}from"./mermaid-vendor-BUVhGKJz.js";import{p as m}from"./radar-MK3ICKWK-G3-HZCUi.js";import"./feature-graph-B_AW4YHh.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CAP6r7gD.js";import"./_basePickBy-DknajloF.js";import"./clone-ChJdlG_Y.js";var g={parse:e(async r=>{const a=await m("info",r);o.debug(a)},"parse")},v={version:p.version},d=e(()=>v.version,"getVersion"),c={getVersion:d},l=e((r,a,s)=>{o.debug(`rendering info diagram 2 | `+r);const t=i(a);n(t,100,400,!0),t.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size",32).style("text-anchor","middle").text(`v${s}`)},"draw"),f={draw:l},L={parser:g,db:c,renderer:f};export{L as diagram}; 3 | -------------------------------------------------------------------------------- /lightrag/api/webui/assets/stateDiagram-v2-YXO3MK2T-2rq8ws-h.js: -------------------------------------------------------------------------------- 1 | import{s as r,b as e,a,S as s}from"./chunk-AEK57VVT-WVCu-Vg-.js";import{_ as i}from"./mermaid-vendor-BUVhGKJz.js";import"./chunk-RZ5BOZE2-S9T1dBtP.js";import"./feature-graph-B_AW4YHh.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var b={parser:a,get db(){return new s(2)},renderer:e,styles:r,init:i(t=>{t.state||(t.state={}),t.state.arrowMarkerAbsolute=t.arrowMarkerAbsolute},"init")};export{b as diagram}; 2 | -------------------------------------------------------------------------------- /lightrag/api/webui/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | Lightrag 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 |
26 | 27 | 28 | -------------------------------------------------------------------------------- /lightrag/api/webui/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/api/webui/logo.png -------------------------------------------------------------------------------- /lightrag/exceptions.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import httpx 4 | from typing import Literal 5 | 6 | 7 | class APIStatusError(Exception): 8 | """Raised when an API response has a status code of 4xx or 5xx.""" 9 | 10 | response: httpx.Response 11 | status_code: int 12 | request_id: str | None 13 | 14 | def __init__( 15 | self, message: str, *, response: httpx.Response, body: object | None 16 | ) -> None: 17 | super().__init__(message, response.request, body=body) 18 | self.response = response 19 | self.status_code = response.status_code 20 | self.request_id = response.headers.get("x-request-id") 21 | 22 | 23 | class APIConnectionError(Exception): 24 | def __init__( 25 | self, *, message: str = "Connection error.", request: httpx.Request 26 | ) -> None: 27 | super().__init__(message, request, body=None) 28 | 29 | 30 | class BadRequestError(APIStatusError): 31 | status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride] 32 | 33 | 34 | class AuthenticationError(APIStatusError): 35 | status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride] 36 | 37 | 38 | class PermissionDeniedError(APIStatusError): 39 | status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride] 40 | 41 | 42 | class NotFoundError(APIStatusError): 43 | status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride] 44 | 45 | 46 | class ConflictError(APIStatusError): 47 | status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride] 48 | 49 | 50 | class UnprocessableEntityError(APIStatusError): 51 | status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride] 52 | 53 | 54 | class RateLimitError(APIStatusError): 55 | status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride] 56 | 57 | 58 | class APITimeoutError(APIConnectionError): 59 | def __init__(self, request: httpx.Request) -> None: 60 | super().__init__(message="Request timed out.", request=request) 61 | -------------------------------------------------------------------------------- /lightrag/llm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/llm/__init__.py -------------------------------------------------------------------------------- /lightrag/llm/jina.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pipmaster as pm # Pipmaster for dynamic library install 3 | 4 | # install specific modules 5 | if not pm.is_installed("lmdeploy"): 6 | pm.install("lmdeploy") 7 | if not pm.is_installed("tenacity"): 8 | pm.install("tenacity") 9 | 10 | 11 | import numpy as np 12 | import aiohttp 13 | 14 | 15 | async def fetch_data(url, headers, data): 16 | async with aiohttp.ClientSession() as session: 17 | async with session.post(url, headers=headers, json=data) as response: 18 | response_json = await response.json() 19 | data_list = response_json.get("data", []) 20 | return data_list 21 | 22 | 23 | async def jina_embed( 24 | texts: list[str], 25 | dimensions: int = 1024, 26 | late_chunking: bool = False, 27 | base_url: str = None, 28 | api_key: str = None, 29 | ) -> np.ndarray: 30 | if api_key: 31 | os.environ["JINA_API_KEY"] = api_key 32 | url = "https://api.jina.ai/v1/embeddings" if not base_url else base_url 33 | headers = { 34 | "Content-Type": "application/json", 35 | "Authorization": f"Bearer {os.environ['JINA_API_KEY']}", 36 | } 37 | data = { 38 | "model": "jina-embeddings-v3", 39 | "normalized": True, 40 | "embedding_type": "float", 41 | "dimensions": f"{dimensions}", 42 | "late_chunking": late_chunking, 43 | "input": texts, 44 | } 45 | data_list = await fetch_data(url, headers, data) 46 | return np.array([dp["embedding"] for dp in data_list]) 47 | -------------------------------------------------------------------------------- /lightrag/llm/nvidia_openai.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | if sys.version_info < (3, 9): 5 | pass 6 | else: 7 | pass 8 | 9 | import pipmaster as pm # Pipmaster for dynamic library install 10 | 11 | # install specific modules 12 | if not pm.is_installed("openai"): 13 | pm.install("openai") 14 | 15 | from openai import ( 16 | AsyncOpenAI, 17 | APIConnectionError, 18 | RateLimitError, 19 | APITimeoutError, 20 | ) 21 | from tenacity import ( 22 | retry, 23 | stop_after_attempt, 24 | wait_exponential, 25 | retry_if_exception_type, 26 | ) 27 | 28 | from lightrag.utils import ( 29 | wrap_embedding_func_with_attrs, 30 | ) 31 | 32 | 33 | import numpy as np 34 | 35 | 36 | @wrap_embedding_func_with_attrs(embedding_dim=2048, max_token_size=512) 37 | @retry( 38 | stop=stop_after_attempt(3), 39 | wait=wait_exponential(multiplier=1, min=4, max=60), 40 | retry=retry_if_exception_type( 41 | (RateLimitError, APIConnectionError, APITimeoutError) 42 | ), 43 | ) 44 | async def nvidia_openai_embed( 45 | texts: list[str], 46 | model: str = "nvidia/llama-3.2-nv-embedqa-1b-v1", 47 | # refer to https://build.nvidia.com/nim?filters=usecase%3Ausecase_text_to_embedding 48 | base_url: str = "https://integrate.api.nvidia.com/v1", 49 | api_key: str = None, 50 | input_type: str = "passage", # query for retrieval, passage for embedding 51 | trunc: str = "NONE", # NONE or START or END 52 | encode: str = "float", # float or base64 53 | ) -> np.ndarray: 54 | if api_key: 55 | os.environ["OPENAI_API_KEY"] = api_key 56 | 57 | openai_async_client = ( 58 | AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url) 59 | ) 60 | response = await openai_async_client.embeddings.create( 61 | model=model, 62 | input=texts, 63 | encoding_format=encode, 64 | extra_body={"input_type": input_type, "truncate": trunc}, 65 | ) 66 | return np.array([dp.embedding for dp in response.data]) 67 | -------------------------------------------------------------------------------- /lightrag/llm/siliconcloud.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.version_info < (3, 9): 4 | pass 5 | else: 6 | pass 7 | import pipmaster as pm # Pipmaster for dynamic library install 8 | 9 | # install specific modules 10 | if not pm.is_installed("lmdeploy"): 11 | pm.install("lmdeploy") 12 | 13 | from openai import ( 14 | APIConnectionError, 15 | RateLimitError, 16 | APITimeoutError, 17 | ) 18 | from tenacity import ( 19 | retry, 20 | stop_after_attempt, 21 | wait_exponential, 22 | retry_if_exception_type, 23 | ) 24 | 25 | 26 | import numpy as np 27 | import aiohttp 28 | import base64 29 | import struct 30 | 31 | 32 | @retry( 33 | stop=stop_after_attempt(3), 34 | wait=wait_exponential(multiplier=1, min=4, max=60), 35 | retry=retry_if_exception_type( 36 | (RateLimitError, APIConnectionError, APITimeoutError) 37 | ), 38 | ) 39 | async def siliconcloud_embedding( 40 | texts: list[str], 41 | model: str = "netease-youdao/bce-embedding-base_v1", 42 | base_url: str = "https://api.siliconflow.cn/v1/embeddings", 43 | max_token_size: int = 512, 44 | api_key: str = None, 45 | ) -> np.ndarray: 46 | if api_key and not api_key.startswith("Bearer "): 47 | api_key = "Bearer " + api_key 48 | 49 | headers = {"Authorization": api_key, "Content-Type": "application/json"} 50 | 51 | truncate_texts = [text[0:max_token_size] for text in texts] 52 | 53 | payload = {"model": model, "input": truncate_texts, "encoding_format": "base64"} 54 | 55 | base64_strings = [] 56 | async with aiohttp.ClientSession() as session: 57 | async with session.post(base_url, headers=headers, json=payload) as response: 58 | content = await response.json() 59 | if "code" in content: 60 | raise ValueError(content) 61 | base64_strings = [item["embedding"] for item in content["data"]] 62 | 63 | embeddings = [] 64 | for string in base64_strings: 65 | decode_bytes = base64.b64decode(string) 66 | n = len(decode_bytes) // 4 67 | float_array = struct.unpack("<" + "f" * n, decode_bytes) 68 | embeddings.append(float_array) 69 | return np.array(embeddings) 70 | -------------------------------------------------------------------------------- /lightrag/namespace.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Iterable 4 | 5 | 6 | class NameSpace: 7 | KV_STORE_FULL_DOCS = "full_docs" 8 | KV_STORE_TEXT_CHUNKS = "text_chunks" 9 | KV_STORE_LLM_RESPONSE_CACHE = "llm_response_cache" 10 | 11 | VECTOR_STORE_ENTITIES = "entities" 12 | VECTOR_STORE_RELATIONSHIPS = "relationships" 13 | VECTOR_STORE_CHUNKS = "chunks" 14 | 15 | GRAPH_STORE_CHUNK_ENTITY_RELATION = "chunk_entity_relation" 16 | 17 | DOC_STATUS = "doc_status" 18 | 19 | 20 | def make_namespace(prefix: str, base_namespace: str): 21 | return prefix + base_namespace 22 | 23 | 24 | def is_namespace(namespace: str, base_namespace: str | Iterable[str]): 25 | if isinstance(base_namespace, str): 26 | return namespace.endswith(base_namespace) 27 | return any(is_namespace(namespace, ns) for ns in base_namespace) 28 | -------------------------------------------------------------------------------- /lightrag/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/tools/__init__.py -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/README-zh.md: -------------------------------------------------------------------------------- 1 | # 3D GraphML Viewer 2 | 3 | 一个基于 Dear ImGui 和 ModernGL 的交互式 3D 图可视化工具。 4 | 5 | ## 功能特点 6 | 7 | - **3D 交互式可视化**: 使用 ModernGL 实现高性能的 3D 图形渲染 8 | - **多种布局算法**: 支持多种图布局方式 9 | - Spring 布局 10 | - Circular 布局 11 | - Shell 布局 12 | - Random 布局 13 | - **社区检测**: 支持图社区结构的自动检测和可视化 14 | - **交互控制**: 15 | - WASD + QE 键控制相机移动 16 | - 鼠标右键拖拽控制视角 17 | - 节点选择和高亮 18 | - 可调节节点大小和边宽度 19 | - 可控制标签显示 20 | - 可在节点的Connections间快速跳转 21 | - **社区检测**: 支持图社区结构的自动检测和可视化 22 | - **交互控制**: 23 | - WASD + QE 键控制相机移动 24 | - 鼠标右键拖拽控制视角 25 | - 节点选择和高亮 26 | - 可调节节点大小和边宽度 27 | - 可控制标签显示 28 | 29 | ## 技术栈 30 | 31 | - **imgui_bundle**: 用户界面 32 | - **ModernGL**: OpenGL 图形渲染 33 | - **NetworkX**: 图数据结构和算法 34 | - **NumPy**: 数值计算 35 | - **community**: 社区检测 36 | 37 | ## 使用方法 38 | 39 | 1. **启动程序**: 40 | ```bash 41 | pip install lightrag-hku[tools] 42 | lightrag-viewer 43 | ``` 44 | 45 | 2. **加载字体**: 46 | - 将中文字体文件 `font.ttf` 放置在 `assets` 目录下 47 | - 或者修改 `CUSTOM_FONT` 常量来使用其他字体文件 48 | 49 | 3. **加载图文件**: 50 | - 点击界面上的 "Load GraphML" 按钮 51 | - 选择 GraphML 格式的图文件 52 | 53 | 4. **交互控制**: 54 | - **相机移动**: 55 | - W: 前进 56 | - S: 后退 57 | - A: 左移 58 | - D: 右移 59 | - Q: 上升 60 | - E: 下降 61 | - **视角控制**: 62 | - 按住鼠标右键拖动来旋转视角 63 | - **节点交互**: 64 | - 鼠标悬停可高亮节点 65 | - 点击可选中节点 66 | 67 | 5. **可视化设置**: 68 | - 可通过 UI 控制面板调整: 69 | - 布局类型 70 | - 节点大小 71 | - 边的宽度 72 | - 标签显示 73 | - 标签大小 74 | - 背景颜色 75 | 76 | ## 自定义设置 77 | 78 | - **节点缩放**: 通过 `node_scale` 参数调整节点大小 79 | - **边宽度**: 通过 `edge_width` 参数调整边的宽度 80 | - **标签显示**: 可通过 `show_labels` 开关标签显示 81 | - **标签大小**: 使用 `label_size` 调整标签大小 82 | - **标签颜色**: 通过 `label_color` 设置标签颜色 83 | - **视距控制**: 使用 `label_culling_distance` 控制标签显示的最大距离 84 | 85 | ## 性能优化 86 | 87 | - 使用 ModernGL 进行高效的图形渲染 88 | - 视距裁剪优化标签显示 89 | - 社区检测算法优化大规模图的可视化效果 90 | 91 | ## 系统要求 92 | 93 | - Python 3.10+ 94 | - OpenGL 3.3+ 兼容的显卡 95 | - 支持的操作系统:Windows/Linux/MacOS 96 | -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/tools/lightrag_visualizer/__init__.py -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/assets/Geist-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/tools/lightrag_visualizer/assets/Geist-Regular.ttf -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/assets/SmileySans-Oblique.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/tools/lightrag_visualizer/assets/SmileySans-Oblique.ttf -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/assets/place_font_here: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag/tools/lightrag_visualizer/assets/place_font_here -------------------------------------------------------------------------------- /lightrag/tools/lightrag_visualizer/requirements.txt: -------------------------------------------------------------------------------- 1 | imgui_bundle 2 | moderngl 3 | networkx 4 | numpy 5 | pyglm 6 | python-louvain 7 | scipy 8 | tk 9 | -------------------------------------------------------------------------------- /lightrag/types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pydantic import BaseModel 4 | from typing import Any, Optional 5 | 6 | 7 | class GPTKeywordExtractionFormat(BaseModel): 8 | high_level_keywords: list[str] 9 | low_level_keywords: list[str] 10 | 11 | 12 | class KnowledgeGraphNode(BaseModel): 13 | id: str 14 | labels: list[str] 15 | properties: dict[str, Any] # anything else goes here 16 | 17 | 18 | class KnowledgeGraphEdge(BaseModel): 19 | id: str 20 | type: Optional[str] 21 | source: str # id of source node 22 | target: str # id of target node 23 | properties: dict[str, Any] # anything else goes here 24 | 25 | 26 | class KnowledgeGraph(BaseModel): 27 | nodes: list[KnowledgeGraphNode] = [] 28 | edges: list[KnowledgeGraphEdge] = [] 29 | is_truncated: bool = False 30 | -------------------------------------------------------------------------------- /lightrag_webui/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /lightrag_webui/.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/prettierrc", 3 | "semi": false, 4 | "tabWidth": 2, 5 | "singleQuote": true, 6 | "printWidth": 100, 7 | "trailingComma": "none", 8 | "endOfLine": "crlf", 9 | "plugins": ["prettier-plugin-tailwindcss"] 10 | } 11 | -------------------------------------------------------------------------------- /lightrag_webui/README.md: -------------------------------------------------------------------------------- 1 | # LightRAG WebUI 2 | 3 | LightRAG WebUI is a React-based web interface for interacting with the LightRAG system. It provides a user-friendly interface for querying, managing, and exploring LightRAG's functionalities. 4 | 5 | ## Installation 6 | 7 | 1. **Install Bun:** 8 | 9 | If you haven't already installed Bun, follow the official documentation: [https://bun.sh/docs/installation](https://bun.sh/docs/installation) 10 | 11 | 2. **Install Dependencies:** 12 | 13 | In the `lightrag_webui` directory, run the following command to install project dependencies: 14 | 15 | ```bash 16 | bun install --frozen-lockfile 17 | ``` 18 | 19 | 3. **Build the Project:** 20 | 21 | Run the following command to build the project: 22 | 23 | ```bash 24 | bun run build --emptyOutDir 25 | ``` 26 | 27 | This command will bundle the project and output the built files to the `lightrag/api/webui` directory. 28 | 29 | ## Development 30 | 31 | - **Start the Development Server:** 32 | 33 | If you want to run the WebUI in development mode, use the following command: 34 | 35 | ```bash 36 | bun run dev 37 | ``` 38 | 39 | ## Script Commands 40 | 41 | The following are some commonly used script commands defined in `package.json`: 42 | 43 | - `bun install`: Installs project dependencies. 44 | - `bun run dev`: Starts the development server. 45 | - `bun run build`: Builds the project. 46 | - `bun run lint`: Runs the linter. 47 | -------------------------------------------------------------------------------- /lightrag_webui/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "new-york", 4 | "rsc": false, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "", 8 | "css": "src/index.css", 9 | "baseColor": "zinc", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } 22 | -------------------------------------------------------------------------------- /lightrag_webui/env.development.smaple: -------------------------------------------------------------------------------- 1 | # Development environment configuration 2 | VITE_BACKEND_URL=/api 3 | -------------------------------------------------------------------------------- /lightrag_webui/env.local.sample: -------------------------------------------------------------------------------- 1 | VITE_BACKEND_URL=http://localhost:9621 2 | VITE_API_PROXY=true 3 | VITE_API_ENDPOINTS=/,/api,/documents,/graphs,/graph,/health,/query,/docs,/openapi.json,/login,/auth-status 4 | -------------------------------------------------------------------------------- /lightrag_webui/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import reactHooks from 'eslint-plugin-react-hooks' 4 | import reactRefresh from 'eslint-plugin-react-refresh' 5 | import stylisticJs from '@stylistic/eslint-plugin-js' 6 | import tseslint from 'typescript-eslint' 7 | import prettier from 'eslint-config-prettier' 8 | import react from 'eslint-plugin-react' 9 | 10 | export default tseslint.config( 11 | { ignores: ['dist'] }, 12 | { 13 | extends: [js.configs.recommended, ...tseslint.configs.recommended, prettier], 14 | files: ['**/*.{ts,tsx,js,jsx}'], 15 | languageOptions: { 16 | ecmaVersion: 2020, 17 | globals: globals.browser 18 | }, 19 | settings: { react: { version: '19.0' } }, 20 | plugins: { 21 | 'react-hooks': reactHooks, 22 | 'react-refresh': reactRefresh, 23 | '@stylistic/js': stylisticJs, 24 | react 25 | }, 26 | rules: { 27 | ...reactHooks.configs.recommended.rules, 28 | 'react-refresh/only-export-components': ['warn', { allowConstantExport: true }], 29 | ...react.configs.recommended.rules, 30 | ...react.configs['jsx-runtime'].rules, 31 | '@stylistic/js/indent': ['error', 2], 32 | '@stylistic/js/quotes': ['error', 'single'], 33 | '@typescript-eslint/no-explicit-any': ['off'] 34 | } 35 | } 36 | ) 37 | -------------------------------------------------------------------------------- /lightrag_webui/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | Lightrag 11 | 12 | 13 |
14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /lightrag_webui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lightrag-webui", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "bunx --bun vite", 8 | "build": "bunx --bun vite build", 9 | "lint": "eslint .", 10 | "preview": "bunx --bun vite preview", 11 | "dev-no-bun": "vite", 12 | "build-no-bun": "vite build --emptyOutDir", 13 | "preview-no-bun": "vite preview" 14 | }, 15 | "dependencies": { 16 | "@faker-js/faker": "^9.5.0", 17 | "@radix-ui/react-alert-dialog": "^1.1.6", 18 | "@radix-ui/react-checkbox": "^1.1.4", 19 | "@radix-ui/react-dialog": "^1.1.6", 20 | "@radix-ui/react-popover": "^1.1.6", 21 | "@radix-ui/react-progress": "^1.1.2", 22 | "@radix-ui/react-scroll-area": "^1.2.3", 23 | "@radix-ui/react-select": "^2.1.6", 24 | "@radix-ui/react-separator": "^1.1.2", 25 | "@radix-ui/react-slot": "^1.1.2", 26 | "@radix-ui/react-tabs": "^1.1.3", 27 | "@radix-ui/react-tooltip": "^1.1.8", 28 | "@radix-ui/react-use-controllable-state": "^1.1.0", 29 | "@react-sigma/core": "^5.0.2", 30 | "@react-sigma/graph-search": "^5.0.3", 31 | "@react-sigma/layout-circlepack": "^5.0.2", 32 | "@react-sigma/layout-circular": "^5.0.2", 33 | "@react-sigma/layout-force": "^5.0.2", 34 | "@react-sigma/layout-forceatlas2": "^5.0.2", 35 | "@react-sigma/layout-noverlap": "^5.0.2", 36 | "@react-sigma/layout-random": "^5.0.2", 37 | "@react-sigma/minimap": "^5.0.2", 38 | "@sigma/edge-curve": "^3.1.0", 39 | "@sigma/node-border": "^3.0.0", 40 | "axios": "^1.7.9", 41 | "class-variance-authority": "^0.7.1", 42 | "clsx": "^2.1.1", 43 | "cmdk": "^1.0.4", 44 | "graphology": "^0.26.0", 45 | "graphology-generators": "^0.11.2", 46 | "i18next": "^24.2.2", 47 | "lucide-react": "^0.475.0", 48 | "mermaid": "^11.6.0", 49 | "minisearch": "^7.1.2", 50 | "react": "^19.0.0", 51 | "react-dom": "^19.0.0", 52 | "react-dropzone": "^14.3.6", 53 | "react-error-boundary": "^5.0.0", 54 | "react-i18next": "^15.4.1", 55 | "react-markdown": "^9.1.0", 56 | "react-number-format": "^5.4.3", 57 | "react-router-dom": "^7.3.0", 58 | "react-syntax-highlighter": "^15.6.1", 59 | "rehype-react": "^8.0.0", 60 | "remark-gfm": "^4.0.1", 61 | "remark-math": "^6.0.0", 62 | "seedrandom": "^3.0.5", 63 | "sigma": "^3.0.1", 64 | "sonner": "^1.7.4", 65 | "tailwind-merge": "^3.0.2", 66 | "tailwind-scrollbar": "^4.0.1", 67 | "typography": "^0.16.24", 68 | "zustand": "^5.0.3" 69 | }, 70 | "devDependencies": { 71 | "@eslint/js": "^9.21.0", 72 | "@stylistic/eslint-plugin-js": "^3.1.0", 73 | "@tailwindcss/vite": "^4.0.8", 74 | "@types/bun": "^1.2.3", 75 | "@types/node": "^22.13.5", 76 | "@types/react": "^19.0.10", 77 | "@types/react-dom": "^19.0.4", 78 | "@types/react-i18next": "^8.1.0", 79 | "@types/react-syntax-highlighter": "^15.5.13", 80 | "@types/seedrandom": "^3.0.8", 81 | "@vitejs/plugin-react-swc": "^3.8.0", 82 | "eslint": "^9.21.0", 83 | "eslint-config-prettier": "^10.0.1", 84 | "eslint-plugin-react": "^7.37.4", 85 | "eslint-plugin-react-hooks": "^5.1.0", 86 | "eslint-plugin-react-refresh": "^0.4.19", 87 | "globals": "^15.15.0", 88 | "graphology-types": "^0.24.8", 89 | "prettier": "^3.5.2", 90 | "prettier-plugin-tailwindcss": "^0.6.11", 91 | "tailwindcss": "^4.0.8", 92 | "tailwindcss-animate": "^1.0.7", 93 | "typescript": "~5.7.3", 94 | "typescript-eslint": "^8.24.1", 95 | "vite": "^6.1.1" 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /lightrag_webui/public/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HKUDS/LightRAG/2d5401d4752f78e32da9de2180cd1732d161f78f/lightrag_webui/public/logo.png -------------------------------------------------------------------------------- /lightrag_webui/src/AppRouter.tsx: -------------------------------------------------------------------------------- 1 | import { HashRouter as Router, Routes, Route, useNavigate } from 'react-router-dom' 2 | import { useEffect, useState } from 'react' 3 | import { useAuthStore } from '@/stores/state' 4 | import { navigationService } from '@/services/navigation' 5 | import { Toaster } from 'sonner' 6 | import App from './App' 7 | import LoginPage from '@/features/LoginPage' 8 | import ThemeProvider from '@/components/ThemeProvider' 9 | 10 | const AppContent = () => { 11 | const [initializing, setInitializing] = useState(true) 12 | const { isAuthenticated } = useAuthStore() 13 | const navigate = useNavigate() 14 | 15 | // Set navigate function for navigation service 16 | useEffect(() => { 17 | navigationService.setNavigate(navigate) 18 | }, [navigate]) 19 | 20 | // Token validity check 21 | useEffect(() => { 22 | 23 | const checkAuth = async () => { 24 | try { 25 | const token = localStorage.getItem('LIGHTRAG-API-TOKEN') 26 | 27 | if (token && isAuthenticated) { 28 | setInitializing(false); 29 | return; 30 | } 31 | 32 | if (!token) { 33 | useAuthStore.getState().logout() 34 | } 35 | } catch (error) { 36 | console.error('Auth initialization error:', error) 37 | if (!isAuthenticated) { 38 | useAuthStore.getState().logout() 39 | } 40 | } finally { 41 | setInitializing(false) 42 | } 43 | } 44 | 45 | checkAuth() 46 | 47 | return () => { 48 | } 49 | }, [isAuthenticated]) 50 | 51 | // Redirect effect for protected routes 52 | useEffect(() => { 53 | if (!initializing && !isAuthenticated) { 54 | const currentPath = window.location.hash.slice(1); 55 | if (currentPath !== '/login') { 56 | console.log('Not authenticated, redirecting to login'); 57 | navigate('/login'); 58 | } 59 | } 60 | }, [initializing, isAuthenticated, navigate]); 61 | 62 | // Show nothing while initializing 63 | if (initializing) { 64 | return null 65 | } 66 | 67 | return ( 68 | 69 | } /> 70 | : null} 73 | /> 74 | 75 | ) 76 | } 77 | 78 | const AppRouter = () => { 79 | return ( 80 | 81 | 82 | 83 | 89 | 90 | 91 | ) 92 | } 93 | 94 | export default AppRouter 95 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ApiKeyAlert.tsx: -------------------------------------------------------------------------------- 1 | import { useState, useCallback, useEffect } from 'react' 2 | import { useTranslation } from 'react-i18next' 3 | import { 4 | AlertDialog, 5 | AlertDialogContent, 6 | AlertDialogDescription, 7 | AlertDialogHeader, 8 | AlertDialogTitle 9 | } from '@/components/ui/AlertDialog' 10 | import Button from '@/components/ui/Button' 11 | import Input from '@/components/ui/Input' 12 | import { useSettingsStore } from '@/stores/settings' 13 | import { useBackendState } from '@/stores/state' 14 | import { InvalidApiKeyError, RequireApiKeError } from '@/api/lightrag' 15 | 16 | interface ApiKeyAlertProps { 17 | open: boolean; 18 | onOpenChange: (open: boolean) => void; 19 | } 20 | 21 | const ApiKeyAlert = ({ open: opened, onOpenChange: setOpened }: ApiKeyAlertProps) => { 22 | const { t } = useTranslation() 23 | const apiKey = useSettingsStore.use.apiKey() 24 | const [tempApiKey, setTempApiKey] = useState('') 25 | const message = useBackendState.use.message() 26 | 27 | useEffect(() => { 28 | setTempApiKey(apiKey || '') 29 | }, [apiKey, opened]) 30 | 31 | useEffect(() => { 32 | if (message) { 33 | if (message.includes(InvalidApiKeyError) || message.includes(RequireApiKeError)) { 34 | setOpened(true) 35 | } 36 | } 37 | }, [message, setOpened]) 38 | 39 | const setApiKey = useCallback(() => { 40 | useSettingsStore.setState({ apiKey: tempApiKey || null }) 41 | setOpened(false) 42 | }, [tempApiKey, setOpened]) 43 | 44 | const handleTempApiKeyChange = useCallback( 45 | (e: React.ChangeEvent) => { 46 | setTempApiKey(e.target.value) 47 | }, 48 | [setTempApiKey] 49 | ) 50 | 51 | return ( 52 | 53 | 54 | 55 | {t('apiKeyAlert.title')} 56 | 57 | {t('apiKeyAlert.description')} 58 | 59 | 60 |
61 |
e.preventDefault()}> 62 | 70 | 71 | 74 |
75 | {message && ( 76 |
77 | {message} 78 |
79 | )} 80 |
81 |
82 |
83 | ) 84 | } 85 | 86 | export default ApiKeyAlert 87 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/AppSettings.tsx: -------------------------------------------------------------------------------- 1 | import { useState, useCallback } from 'react' 2 | import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover' 3 | import Button from '@/components/ui/Button' 4 | import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/Select' 5 | import { useSettingsStore } from '@/stores/settings' 6 | import { PaletteIcon } from 'lucide-react' 7 | import { useTranslation } from 'react-i18next' 8 | import { cn } from '@/lib/utils' 9 | 10 | interface AppSettingsProps { 11 | className?: string 12 | } 13 | 14 | export default function AppSettings({ className }: AppSettingsProps) { 15 | const [opened, setOpened] = useState(false) 16 | const { t } = useTranslation() 17 | 18 | const language = useSettingsStore.use.language() 19 | const setLanguage = useSettingsStore.use.setLanguage() 20 | 21 | const theme = useSettingsStore.use.theme() 22 | const setTheme = useSettingsStore.use.setTheme() 23 | 24 | const handleLanguageChange = useCallback((value: string) => { 25 | setLanguage(value as 'en' | 'zh' | 'fr' | 'ar' | 'zh_TW') 26 | }, [setLanguage]) 27 | 28 | const handleThemeChange = useCallback((value: string) => { 29 | setTheme(value as 'light' | 'dark' | 'system') 30 | }, [setTheme]) 31 | 32 | return ( 33 | 34 | 35 | 38 | 39 | 40 |
41 |
42 | 43 | 55 |
56 | 57 |
58 | 59 | 69 |
70 |
71 |
72 |
73 | ) 74 | } 75 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/LanguageToggle.tsx: -------------------------------------------------------------------------------- 1 | import Button from '@/components/ui/Button' 2 | import { useCallback } from 'react' 3 | import { controlButtonVariant } from '@/lib/constants' 4 | import { useTranslation } from 'react-i18next' 5 | import { useSettingsStore } from '@/stores/settings' 6 | 7 | /** 8 | * Component that toggles the language between English and Chinese. 9 | */ 10 | export default function LanguageToggle() { 11 | const { i18n } = useTranslation() 12 | const currentLanguage = i18n.language 13 | const setLanguage = useSettingsStore.use.setLanguage() 14 | 15 | const setEnglish = useCallback(() => { 16 | i18n.changeLanguage('en') 17 | setLanguage('en') 18 | }, [i18n, setLanguage]) 19 | 20 | const setChinese = useCallback(() => { 21 | i18n.changeLanguage('zh') 22 | setLanguage('zh') 23 | }, [i18n, setLanguage]) 24 | 25 | if (currentLanguage === 'zh') { 26 | return ( 27 | 36 | ) 37 | } 38 | return ( 39 | 48 | ) 49 | } 50 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/Root.tsx: -------------------------------------------------------------------------------- 1 | import { StrictMode, useEffect, useState } from 'react' 2 | import { initializeI18n } from '@/i18n' 3 | import App from '@/App' 4 | 5 | export const Root = () => { 6 | const [isI18nInitialized, setIsI18nInitialized] = useState(false) 7 | 8 | useEffect(() => { 9 | // Initialize i18n immediately with persisted language 10 | initializeI18n().then(() => { 11 | setIsI18nInitialized(true) 12 | }) 13 | }, []) 14 | 15 | if (!isI18nInitialized) { 16 | return null // or a loading spinner 17 | } 18 | 19 | return ( 20 | 21 | 22 | 23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ThemeProvider.tsx: -------------------------------------------------------------------------------- 1 | import { createContext, useEffect } from 'react' 2 | import { Theme, useSettingsStore } from '@/stores/settings' 3 | 4 | type ThemeProviderProps = { 5 | children: React.ReactNode 6 | } 7 | 8 | type ThemeProviderState = { 9 | theme: Theme 10 | setTheme: (theme: Theme) => void 11 | } 12 | 13 | const initialState: ThemeProviderState = { 14 | theme: 'system', 15 | setTheme: () => null 16 | } 17 | 18 | const ThemeProviderContext = createContext(initialState) 19 | 20 | /** 21 | * Component that provides the theme state and setter function to its children. 22 | */ 23 | export default function ThemeProvider({ children, ...props }: ThemeProviderProps) { 24 | const theme = useSettingsStore.use.theme() 25 | const setTheme = useSettingsStore.use.setTheme() 26 | 27 | useEffect(() => { 28 | const root = window.document.documentElement 29 | root.classList.remove('light', 'dark') 30 | 31 | if (theme === 'system') { 32 | const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)') 33 | const handleChange = (e: MediaQueryListEvent) => { 34 | root.classList.remove('light', 'dark') 35 | root.classList.add(e.matches ? 'dark' : 'light') 36 | } 37 | 38 | root.classList.add(mediaQuery.matches ? 'dark' : 'light') 39 | mediaQuery.addEventListener('change', handleChange) 40 | 41 | return () => mediaQuery.removeEventListener('change', handleChange) 42 | } else { 43 | root.classList.add(theme) 44 | } 45 | }, [theme]) 46 | 47 | const value = { 48 | theme, 49 | setTheme 50 | } 51 | 52 | return ( 53 | 54 | {children} 55 | 56 | ) 57 | } 58 | 59 | export { ThemeProviderContext } 60 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ThemeToggle.tsx: -------------------------------------------------------------------------------- 1 | import Button from '@/components/ui/Button' 2 | import useTheme from '@/hooks/useTheme' 3 | import { MoonIcon, SunIcon } from 'lucide-react' 4 | import { useCallback } from 'react' 5 | import { controlButtonVariant } from '@/lib/constants' 6 | import { useTranslation } from 'react-i18next' 7 | 8 | /** 9 | * Component that toggles the theme between light and dark. 10 | */ 11 | export default function ThemeToggle() { 12 | const { theme, setTheme } = useTheme() 13 | const setLight = useCallback(() => setTheme('light'), [setTheme]) 14 | const setDark = useCallback(() => setTheme('dark'), [setTheme]) 15 | const { t } = useTranslation() 16 | 17 | if (theme === 'dark') { 18 | return ( 19 | 28 | ) 29 | } 30 | return ( 31 | 40 | ) 41 | } 42 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/FocusOnNode.tsx: -------------------------------------------------------------------------------- 1 | import { useCamera, useSigma } from '@react-sigma/core' 2 | import { useEffect } from 'react' 3 | import { useGraphStore } from '@/stores/graph' 4 | 5 | /** 6 | * Component that highlights a node and centers the camera on it. 7 | */ 8 | const FocusOnNode = ({ node, move }: { node: string | null; move?: boolean }) => { 9 | const sigma = useSigma() 10 | const { gotoNode } = useCamera() 11 | 12 | /** 13 | * When the selected item changes, highlighted the node and center the camera on it. 14 | */ 15 | useEffect(() => { 16 | const graph = sigma.getGraph(); 17 | 18 | if (move) { 19 | if (node && graph.hasNode(node)) { 20 | try { 21 | graph.setNodeAttribute(node, 'highlighted', true); 22 | gotoNode(node); 23 | } catch (error) { 24 | console.error('Error focusing on node:', error); 25 | } 26 | } else { 27 | // If no node is selected but move is true, reset to default view 28 | sigma.setCustomBBox(null); 29 | sigma.getCamera().animate({ x: 0.5, y: 0.5, ratio: 1 }, { duration: 0 }); 30 | } 31 | useGraphStore.getState().setMoveToSelectedNode(false); 32 | } else if (node && graph.hasNode(node)) { 33 | try { 34 | graph.setNodeAttribute(node, 'highlighted', true); 35 | } catch (error) { 36 | console.error('Error highlighting node:', error); 37 | } 38 | } 39 | 40 | return () => { 41 | if (node && graph.hasNode(node)) { 42 | try { 43 | graph.setNodeAttribute(node, 'highlighted', false); 44 | } catch (error) { 45 | console.error('Error cleaning up node highlight:', error); 46 | } 47 | } 48 | } 49 | }, [node, move, sigma, gotoNode]) 50 | 51 | return null 52 | } 53 | 54 | export default FocusOnNode 55 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/FullScreenControl.tsx: -------------------------------------------------------------------------------- 1 | import { useFullScreen } from '@react-sigma/core' 2 | import { MaximizeIcon, MinimizeIcon } from 'lucide-react' 3 | import { controlButtonVariant } from '@/lib/constants' 4 | import Button from '@/components/ui/Button' 5 | import { useTranslation } from 'react-i18next' 6 | 7 | /** 8 | * Component that toggles full screen mode. 9 | */ 10 | const FullScreenControl = () => { 11 | const { isFullScreen, toggle } = useFullScreen() 12 | const { t } = useTranslation() 13 | 14 | return ( 15 | <> 16 | {isFullScreen ? ( 17 | 20 | ) : ( 21 | 24 | )} 25 | 26 | ) 27 | } 28 | 29 | export default FullScreenControl 30 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/Legend.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import { useTranslation } from 'react-i18next' 3 | import { useGraphStore } from '@/stores/graph' 4 | import { Card } from '@/components/ui/Card' 5 | import { ScrollArea } from '@/components/ui/ScrollArea' 6 | 7 | interface LegendProps { 8 | className?: string 9 | } 10 | 11 | const Legend: React.FC = ({ className }) => { 12 | const { t } = useTranslation() 13 | const typeColorMap = useGraphStore.use.typeColorMap() 14 | 15 | if (!typeColorMap || typeColorMap.size === 0) { 16 | return null 17 | } 18 | 19 | return ( 20 | 21 |

{t('graphPanel.legend')}

22 | 23 |
24 | {Array.from(typeColorMap.entries()).map(([type, color]) => ( 25 |
26 |
30 | 31 | {t(`graphPanel.nodeTypes.${type.toLowerCase()}`, type)} 32 | 33 |
34 | ))} 35 |
36 | 37 | 38 | ) 39 | } 40 | 41 | export default Legend 42 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/LegendButton.tsx: -------------------------------------------------------------------------------- 1 | import { useCallback } from 'react' 2 | import { BookOpenIcon } from 'lucide-react' 3 | import Button from '@/components/ui/Button' 4 | import { controlButtonVariant } from '@/lib/constants' 5 | import { useSettingsStore } from '@/stores/settings' 6 | import { useTranslation } from 'react-i18next' 7 | 8 | /** 9 | * Component that toggles legend visibility. 10 | */ 11 | const LegendButton = () => { 12 | const { t } = useTranslation() 13 | const showLegend = useSettingsStore.use.showLegend() 14 | const setShowLegend = useSettingsStore.use.setShowLegend() 15 | 16 | const toggleLegend = useCallback(() => { 17 | setShowLegend(!showLegend) 18 | }, [showLegend, setShowLegend]) 19 | 20 | return ( 21 | 29 | ) 30 | } 31 | 32 | export default LegendButton 33 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/PropertyRowComponents.tsx: -------------------------------------------------------------------------------- 1 | import { PencilIcon } from 'lucide-react' 2 | import Text from '@/components/ui/Text' 3 | import { useTranslation } from 'react-i18next' 4 | 5 | interface PropertyNameProps { 6 | name: string 7 | } 8 | 9 | export const PropertyName = ({ name }: PropertyNameProps) => { 10 | const { t } = useTranslation() 11 | 12 | const getPropertyNameTranslation = (propName: string) => { 13 | const translationKey = `graphPanel.propertiesView.node.propertyNames.${propName}` 14 | const translation = t(translationKey) 15 | return translation === translationKey ? propName : translation 16 | } 17 | 18 | return ( 19 | 20 | {getPropertyNameTranslation(name)} 21 | 22 | ) 23 | } 24 | 25 | interface EditIconProps { 26 | onClick: () => void 27 | } 28 | 29 | export const EditIcon = ({ onClick }: EditIconProps) => ( 30 |
31 | 35 |
36 | ) 37 | 38 | interface PropertyValueProps { 39 | value: any 40 | onClick?: () => void 41 | tooltip?: string 42 | } 43 | 44 | export const PropertyValue = ({ value, onClick, tooltip }: PropertyValueProps) => ( 45 |
46 | 54 |
55 | ) 56 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/SettingsDisplay.tsx: -------------------------------------------------------------------------------- 1 | import { useSettingsStore } from '@/stores/settings' 2 | import { useTranslation } from 'react-i18next' 3 | 4 | /** 5 | * Component that displays current values of important graph settings 6 | * Positioned to the right of the toolbar at the bottom-left corner 7 | */ 8 | const SettingsDisplay = () => { 9 | const { t } = useTranslation() 10 | const graphQueryMaxDepth = useSettingsStore.use.graphQueryMaxDepth() 11 | const graphMaxNodes = useSettingsStore.use.graphMaxNodes() 12 | 13 | return ( 14 |
15 |
{t('graphPanel.sideBar.settings.depth')}: {graphQueryMaxDepth}
16 |
{t('graphPanel.sideBar.settings.max')}: {graphMaxNodes}
17 |
18 | ) 19 | } 20 | 21 | export default SettingsDisplay 22 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/graph/ZoomControl.tsx: -------------------------------------------------------------------------------- 1 | import { useCamera, useSigma } from '@react-sigma/core' 2 | import { useCallback } from 'react' 3 | import Button from '@/components/ui/Button' 4 | import { ZoomInIcon, ZoomOutIcon, FullscreenIcon, RotateCwIcon, RotateCcwIcon } from 'lucide-react' 5 | import { controlButtonVariant } from '@/lib/constants' 6 | import { useTranslation } from 'react-i18next'; 7 | 8 | /** 9 | * Component that provides zoom controls for the graph viewer. 10 | */ 11 | const ZoomControl = () => { 12 | const { zoomIn, zoomOut, reset } = useCamera({ duration: 200, factor: 1.5 }) 13 | const sigma = useSigma() 14 | const { t } = useTranslation(); 15 | 16 | const handleZoomIn = useCallback(() => zoomIn(), [zoomIn]) 17 | const handleZoomOut = useCallback(() => zoomOut(), [zoomOut]) 18 | const handleResetZoom = useCallback(() => { 19 | if (!sigma) return 20 | 21 | try { 22 | // First clear any custom bounding box and refresh 23 | sigma.setCustomBBox(null) 24 | sigma.refresh() 25 | 26 | // Get graph after refresh 27 | const graph = sigma.getGraph() 28 | 29 | // Check if graph has nodes before accessing them 30 | if (!graph?.order || graph.nodes().length === 0) { 31 | // Use reset() for empty graph case 32 | reset() 33 | return 34 | } 35 | 36 | sigma.getCamera().animate( 37 | { x: 0.5, y: 0.5, ratio: 1.1 }, 38 | { duration: 1000 } 39 | ) 40 | } catch (error) { 41 | console.error('Error resetting zoom:', error) 42 | // Use reset() as fallback on error 43 | reset() 44 | } 45 | }, [sigma, reset]) 46 | 47 | const handleRotate = useCallback(() => { 48 | if (!sigma) return 49 | 50 | const camera = sigma.getCamera() 51 | const currentAngle = camera.angle 52 | const newAngle = currentAngle + Math.PI / 8 53 | 54 | camera.animate( 55 | { angle: newAngle }, 56 | { duration: 200 } 57 | ) 58 | }, [sigma]) 59 | 60 | const handleRotateCounterClockwise = useCallback(() => { 61 | if (!sigma) return 62 | 63 | const camera = sigma.getCamera() 64 | const currentAngle = camera.angle 65 | const newAngle = currentAngle - Math.PI / 8 66 | 67 | camera.animate( 68 | { angle: newAngle }, 69 | { duration: 200 } 70 | ) 71 | }, [sigma]) 72 | 73 | return ( 74 | <> 75 | 83 | 91 | 99 | 102 | 105 | 106 | ) 107 | } 108 | 109 | export default ZoomControl 110 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/status/StatusCard.tsx: -------------------------------------------------------------------------------- 1 | import { LightragStatus } from '@/api/lightrag' 2 | import { useTranslation } from 'react-i18next' 3 | 4 | const StatusCard = ({ status }: { status: LightragStatus | null }) => { 5 | const { t } = useTranslation() 6 | if (!status) { 7 | return
{t('graphPanel.statusCard.unavailable')}
8 | } 9 | 10 | return ( 11 |
12 |
13 |

{t('graphPanel.statusCard.storageInfo')}

14 |
15 | {t('graphPanel.statusCard.workingDirectory')}: 16 | {status.working_directory} 17 | {t('graphPanel.statusCard.inputDirectory')}: 18 | {status.input_directory} 19 |
20 |
21 | 22 |
23 |

{t('graphPanel.statusCard.llmConfig')}

24 |
25 | {t('graphPanel.statusCard.llmBinding')}: 26 | {status.configuration.llm_binding} 27 | {t('graphPanel.statusCard.llmBindingHost')}: 28 | {status.configuration.llm_binding_host} 29 | {t('graphPanel.statusCard.llmModel')}: 30 | {status.configuration.llm_model} 31 | {t('graphPanel.statusCard.maxTokens')}: 32 | {status.configuration.max_tokens} 33 |
34 |
35 | 36 |
37 |

{t('graphPanel.statusCard.embeddingConfig')}

38 |
39 | {t('graphPanel.statusCard.embeddingBinding')}: 40 | {status.configuration.embedding_binding} 41 | {t('graphPanel.statusCard.embeddingBindingHost')}: 42 | {status.configuration.embedding_binding_host} 43 | {t('graphPanel.statusCard.embeddingModel')}: 44 | {status.configuration.embedding_model} 45 |
46 |
47 | 48 |
49 |

{t('graphPanel.statusCard.storageConfig')}

50 |
51 | {t('graphPanel.statusCard.kvStorage')}: 52 | {status.configuration.kv_storage} 53 | {t('graphPanel.statusCard.docStatusStorage')}: 54 | {status.configuration.doc_status_storage} 55 | {t('graphPanel.statusCard.graphStorage')}: 56 | {status.configuration.graph_storage} 57 | {t('graphPanel.statusCard.vectorStorage')}: 58 | {status.configuration.vector_storage} 59 |
60 |
61 |
62 | ) 63 | } 64 | 65 | export default StatusCard 66 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/status/StatusDialog.tsx: -------------------------------------------------------------------------------- 1 | import { LightragStatus } from '@/api/lightrag' 2 | import { useTranslation } from 'react-i18next' 3 | import { 4 | Dialog, 5 | DialogContent, 6 | DialogHeader, 7 | DialogTitle, 8 | DialogDescription, 9 | } from '@/components/ui/Dialog' 10 | import StatusCard from './StatusCard' 11 | 12 | interface StatusDialogProps { 13 | open: boolean 14 | onOpenChange: (open: boolean) => void 15 | status: LightragStatus | null 16 | } 17 | 18 | const StatusDialog = ({ open, onOpenChange, status }: StatusDialogProps) => { 19 | const { t } = useTranslation() 20 | 21 | return ( 22 | 23 | 24 | 25 | {t('graphPanel.statusDialog.title')} 26 | 27 | {t('graphPanel.statusDialog.description')} 28 | 29 | 30 | 31 | 32 | 33 | ) 34 | } 35 | 36 | export default StatusDialog 37 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/status/StatusIndicator.tsx: -------------------------------------------------------------------------------- 1 | import { cn } from '@/lib/utils' 2 | import { useBackendState } from '@/stores/state' 3 | import { useEffect, useState } from 'react' 4 | import StatusDialog from './StatusDialog' 5 | import { useTranslation } from 'react-i18next' 6 | 7 | const StatusIndicator = () => { 8 | const { t } = useTranslation() 9 | const health = useBackendState.use.health() 10 | const lastCheckTime = useBackendState.use.lastCheckTime() 11 | const status = useBackendState.use.status() 12 | const [animate, setAnimate] = useState(false) 13 | const [dialogOpen, setDialogOpen] = useState(false) 14 | 15 | // listen to health change 16 | useEffect(() => { 17 | setAnimate(true) 18 | const timer = setTimeout(() => setAnimate(false), 300) 19 | return () => clearTimeout(timer) 20 | }, [lastCheckTime]) 21 | 22 | return ( 23 |
24 |
setDialogOpen(true)} 27 | > 28 |
38 | 39 | {health ? t('graphPanel.statusIndicator.connected') : t('graphPanel.statusIndicator.disconnected')} 40 | 41 |
42 | 43 | 48 |
49 | ) 50 | } 51 | 52 | export default StatusIndicator 53 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Alert.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import { cva, type VariantProps } from 'class-variance-authority' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const alertVariants = cva( 7 | 'relative w-full rounded-lg border px-4 py-3 text-sm [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground [&>svg~*]:pl-7', 8 | { 9 | variants: { 10 | variant: { 11 | default: 'bg-background text-foreground', 12 | destructive: 13 | 'border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive' 14 | } 15 | }, 16 | defaultVariants: { 17 | variant: 'default' 18 | } 19 | } 20 | ) 21 | 22 | const Alert = React.forwardRef< 23 | HTMLDivElement, 24 | React.HTMLAttributes & VariantProps 25 | >(({ className, variant, ...props }, ref) => ( 26 |
27 | )) 28 | Alert.displayName = 'Alert' 29 | 30 | const AlertTitle = React.forwardRef>( 31 | ({ className, ...props }, ref) => ( 32 |
37 | ) 38 | ) 39 | AlertTitle.displayName = 'AlertTitle' 40 | 41 | const AlertDescription = React.forwardRef< 42 | HTMLParagraphElement, 43 | React.HTMLAttributes 44 | >(({ className, ...props }, ref) => ( 45 |
46 | )) 47 | AlertDescription.displayName = 'AlertDescription' 48 | 49 | export { Alert, AlertTitle, AlertDescription } 50 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Badge.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import { cva, type VariantProps } from 'class-variance-authority' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const badgeVariants = cva( 7 | 'inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2', 8 | { 9 | variants: { 10 | variant: { 11 | default: 'border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80', 12 | secondary: 13 | 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80', 14 | destructive: 15 | 'border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80', 16 | outline: 'text-foreground' 17 | } 18 | }, 19 | defaultVariants: { 20 | variant: 'default' 21 | } 22 | } 23 | ) 24 | 25 | export interface BadgeProps 26 | extends React.HTMLAttributes, 27 | VariantProps {} 28 | 29 | function Badge({ className, variant, ...props }: BadgeProps) { 30 | return
31 | } 32 | 33 | export default Badge 34 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Button.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import { Slot } from '@radix-ui/react-slot' 3 | import { cva, type VariantProps } from 'class-variance-authority' 4 | import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/Tooltip' 5 | import { cn } from '@/lib/utils' 6 | 7 | // eslint-disable-next-line react-refresh/only-export-components 8 | export const buttonVariants = cva( 9 | 'inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0', 10 | { 11 | variants: { 12 | variant: { 13 | default: 'bg-primary text-primary-foreground hover:bg-primary/90', 14 | destructive: 'bg-destructive text-destructive-foreground hover:bg-destructive/90', 15 | outline: 'border border-input bg-background hover:bg-accent hover:text-accent-foreground', 16 | secondary: 'bg-secondary text-secondary-foreground hover:bg-secondary/80', 17 | ghost: 'hover:bg-accent hover:text-accent-foreground', 18 | link: 'text-primary underline-offset-4 hover:underline' 19 | }, 20 | size: { 21 | default: 'h-10 px-4 py-2', 22 | sm: 'h-9 rounded-md px-3', 23 | lg: 'h-11 rounded-md px-8', 24 | icon: 'size-8' 25 | } 26 | }, 27 | defaultVariants: { 28 | variant: 'default', 29 | size: 'default' 30 | } 31 | } 32 | ) 33 | 34 | interface ButtonProps 35 | extends React.ButtonHTMLAttributes, 36 | VariantProps { 37 | asChild?: boolean 38 | side?: 'top' | 'right' | 'bottom' | 'left' 39 | tooltip?: string 40 | } 41 | 42 | const Button = React.forwardRef( 43 | ({ className, variant, tooltip, size, side = 'right', asChild = false, ...props }, ref) => { 44 | const Comp = asChild ? Slot : 'button' 45 | if (!tooltip) { 46 | return ( 47 | 52 | ) 53 | } 54 | 55 | return ( 56 | 57 | 58 | 59 | 64 | 65 | {tooltip} 66 | 67 | 68 | ) 69 | } 70 | ) 71 | Button.displayName = 'Button' 72 | 73 | export type ButtonVariantType = Exclude< 74 | NonNullable[0]>['variant'], 75 | undefined 76 | > 77 | 78 | export default Button 79 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Card.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | 3 | import { cn } from '@/lib/utils' 4 | 5 | const Card = React.forwardRef>( 6 | ({ className, ...props }, ref) => ( 7 |
12 | ) 13 | ) 14 | Card.displayName = 'Card' 15 | 16 | const CardHeader = React.forwardRef>( 17 | ({ className, ...props }, ref) => ( 18 |
19 | ) 20 | ) 21 | CardHeader.displayName = 'CardHeader' 22 | 23 | const CardTitle = React.forwardRef>( 24 | ({ className, ...props }, ref) => ( 25 |
30 | ) 31 | ) 32 | CardTitle.displayName = 'CardTitle' 33 | 34 | const CardDescription = React.forwardRef>( 35 | ({ className, ...props }, ref) => ( 36 |
37 | ) 38 | ) 39 | CardDescription.displayName = 'CardDescription' 40 | 41 | const CardContent = React.forwardRef>( 42 | ({ className, ...props }, ref) => ( 43 |
44 | ) 45 | ) 46 | CardContent.displayName = 'CardContent' 47 | 48 | const CardFooter = React.forwardRef>( 49 | ({ className, ...props }, ref) => ( 50 |
51 | ) 52 | ) 53 | CardFooter.displayName = 'CardFooter' 54 | 55 | export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } 56 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Checkbox.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as CheckboxPrimitive from '@radix-ui/react-checkbox' 3 | import { Check } from 'lucide-react' 4 | 5 | import { cn } from '@/lib/utils' 6 | 7 | const Checkbox = React.forwardRef< 8 | React.ComponentRef, 9 | React.ComponentPropsWithoutRef 10 | >(({ className, ...props }, ref) => ( 11 | 19 | 20 | 21 | 22 | 23 | )) 24 | Checkbox.displayName = CheckboxPrimitive.Root.displayName 25 | 26 | export default Checkbox 27 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/DataTable.tsx: -------------------------------------------------------------------------------- 1 | import { ColumnDef, flexRender, getCoreRowModel, useReactTable } from '@tanstack/react-table' 2 | 3 | import { 4 | Table, 5 | TableBody, 6 | TableCell, 7 | TableHead, 8 | TableHeader, 9 | TableRow 10 | } from '@/components/ui/Table' 11 | 12 | interface DataTableProps { 13 | columns: ColumnDef[] 14 | data: TData[] 15 | } 16 | 17 | export default function DataTable({ columns, data }: DataTableProps) { 18 | const table = useReactTable({ 19 | data, 20 | columns, 21 | getCoreRowModel: getCoreRowModel() 22 | }) 23 | 24 | return ( 25 |
26 | 27 | 28 | {table.getHeaderGroups().map((headerGroup) => ( 29 | 30 | {headerGroup.headers.map((header) => { 31 | return ( 32 | 33 | {header.isPlaceholder 34 | ? null 35 | : flexRender(header.column.columnDef.header, header.getContext())} 36 | 37 | ) 38 | })} 39 | 40 | ))} 41 | 42 | 43 | {table.getRowModel().rows?.length ? ( 44 | table.getRowModel().rows.map((row) => ( 45 | 46 | {row.getVisibleCells().map((cell) => ( 47 | 48 | {flexRender(cell.column.columnDef.cell, cell.getContext())} 49 | 50 | ))} 51 | 52 | )) 53 | ) : ( 54 | 55 | 56 | No results. 57 | 58 | 59 | )} 60 | 61 |
62 |
63 | ) 64 | } 65 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/EmptyCard.tsx: -------------------------------------------------------------------------------- 1 | import { cn } from '@/lib/utils' 2 | import { Card, CardDescription, CardTitle } from '@/components/ui/Card' 3 | import { FilesIcon } from 'lucide-react' 4 | 5 | interface EmptyCardProps extends React.ComponentPropsWithoutRef { 6 | title: string 7 | description?: string 8 | action?: React.ReactNode 9 | icon?: React.ComponentType<{ className?: string }> 10 | } 11 | 12 | export default function EmptyCard({ 13 | title, 14 | description, 15 | icon: Icon = FilesIcon, 16 | action, 17 | className, 18 | ...props 19 | }: EmptyCardProps) { 20 | return ( 21 | 28 |
29 |
31 |
32 | {title} 33 | {description ? {description} : null} 34 |
35 | {action ? action : null} 36 |
37 | ) 38 | } 39 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Input.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import { cn } from '@/lib/utils' 3 | 4 | const Input = React.forwardRef>( 5 | ({ className, type, ...props }, ref) => { 6 | return ( 7 | 16 | ) 17 | } 18 | ) 19 | Input.displayName = 'Input' 20 | 21 | export default Input 22 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Popover.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as PopoverPrimitive from '@radix-ui/react-popover' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const Popover = PopoverPrimitive.Root 7 | 8 | const PopoverTrigger = PopoverPrimitive.Trigger 9 | 10 | // Define the props type to include positioning props 11 | type PopoverContentProps = React.ComponentPropsWithoutRef & { 12 | collisionPadding?: number | Partial>; 13 | sticky?: 'partial' | 'always'; 14 | avoidCollisions?: boolean; 15 | }; 16 | 17 | const PopoverContent = React.forwardRef< 18 | React.ComponentRef, 19 | PopoverContentProps 20 | >(({ className, align = 'center', sideOffset = 4, collisionPadding, sticky, avoidCollisions = false, ...props }, ref) => ( 21 | 22 | 35 | 36 | )) 37 | PopoverContent.displayName = PopoverPrimitive.Content.displayName 38 | 39 | export { Popover, PopoverTrigger, PopoverContent } 40 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Progress.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as ProgressPrimitive from '@radix-ui/react-progress' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const Progress = React.forwardRef< 7 | React.ComponentRef, 8 | React.ComponentPropsWithoutRef 9 | >(({ className, value, ...props }, ref) => ( 10 | 15 | 19 | 20 | )) 21 | Progress.displayName = ProgressPrimitive.Root.displayName 22 | 23 | export default Progress 24 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/ScrollArea.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as ScrollAreaPrimitive from '@radix-ui/react-scroll-area' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const ScrollArea = React.forwardRef< 7 | React.ComponentRef, 8 | React.ComponentPropsWithoutRef 9 | >(({ className, children, ...props }, ref) => ( 10 | 15 | 16 | {children} 17 | 18 | 19 | 20 | 21 | )) 22 | ScrollArea.displayName = ScrollAreaPrimitive.Root.displayName 23 | 24 | const ScrollBar = React.forwardRef< 25 | React.ComponentRef, 26 | React.ComponentPropsWithoutRef 27 | >(({ className, orientation = 'vertical', ...props }, ref) => ( 28 | 39 | 40 | 41 | )) 42 | ScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName 43 | 44 | export { ScrollArea, ScrollBar } 45 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Separator.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as SeparatorPrimitive from '@radix-ui/react-separator' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const Separator = React.forwardRef< 7 | React.ComponentRef, 8 | React.ComponentPropsWithoutRef 9 | >(({ className, orientation = 'horizontal', decorative = true, ...props }, ref) => ( 10 | 21 | )) 22 | Separator.displayName = SeparatorPrimitive.Root.displayName 23 | 24 | export default Separator 25 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/TabContent.tsx: -------------------------------------------------------------------------------- 1 | import React, { useEffect } from 'react'; 2 | import { useTabVisibility } from '@/contexts/useTabVisibility'; 3 | 4 | interface TabContentProps { 5 | tabId: string; 6 | children: React.ReactNode; 7 | className?: string; 8 | } 9 | 10 | /** 11 | * TabContent component that manages visibility based on tab selection 12 | * Works with the TabVisibilityContext to show/hide content based on active tab 13 | */ 14 | const TabContent: React.FC = ({ tabId, children, className = '' }) => { 15 | const { isTabVisible, setTabVisibility } = useTabVisibility(); 16 | const isVisible = isTabVisible(tabId); 17 | 18 | // Register this tab with the context when mounted 19 | useEffect(() => { 20 | setTabVisibility(tabId, true); 21 | 22 | // Cleanup when unmounted 23 | return () => { 24 | setTabVisibility(tabId, false); 25 | }; 26 | }, [tabId, setTabVisibility]); 27 | 28 | // Use CSS to hide content instead of not rendering it 29 | // This prevents components from unmounting when tabs are switched 30 | return ( 31 |
32 | {children} 33 |
34 | ); 35 | }; 36 | 37 | export default TabContent; 38 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Table.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | 3 | import { cn } from '@/lib/utils' 4 | 5 | const Table = React.forwardRef>( 6 | ({ className, ...props }, ref) => ( 7 |
8 | 9 | 10 | ) 11 | ) 12 | Table.displayName = 'Table' 13 | 14 | const TableHeader = React.forwardRef< 15 | HTMLTableSectionElement, 16 | React.HTMLAttributes 17 | >(({ className, ...props }, ref) => ( 18 | 19 | )) 20 | TableHeader.displayName = 'TableHeader' 21 | 22 | const TableBody = React.forwardRef< 23 | HTMLTableSectionElement, 24 | React.HTMLAttributes 25 | >(({ className, ...props }, ref) => ( 26 | 27 | )) 28 | TableBody.displayName = 'TableBody' 29 | 30 | const TableFooter = React.forwardRef< 31 | HTMLTableSectionElement, 32 | React.HTMLAttributes 33 | >(({ className, ...props }, ref) => ( 34 | tr]:last:border-b-0', className)} 37 | {...props} 38 | /> 39 | )) 40 | TableFooter.displayName = 'TableFooter' 41 | 42 | const TableRow = React.forwardRef>( 43 | ({ className, ...props }, ref) => ( 44 | 52 | ) 53 | ) 54 | TableRow.displayName = 'TableRow' 55 | 56 | const TableHead = React.forwardRef< 57 | HTMLTableCellElement, 58 | React.ThHTMLAttributes 59 | // eslint-disable-next-line react/prop-types 60 | >(({ className, ...props }, ref) => ( 61 |
[role=checkbox]]:translate-y-[2px]', 65 | className 66 | )} 67 | {...props} 68 | /> 69 | )) 70 | TableHead.displayName = 'TableHead' 71 | 72 | const TableCell = React.forwardRef< 73 | HTMLTableCellElement, 74 | React.TdHTMLAttributes 75 | // eslint-disable-next-line react/prop-types 76 | >(({ className, ...props }, ref) => ( 77 | [role=checkbox]]:translate-y-[2px]', 81 | className 82 | )} 83 | {...props} 84 | /> 85 | )) 86 | TableCell.displayName = 'TableCell' 87 | 88 | const TableCaption = React.forwardRef< 89 | HTMLTableCaptionElement, 90 | React.HTMLAttributes 91 | >(({ className, ...props }, ref) => ( 92 |
93 | )) 94 | TableCaption.displayName = 'TableCaption' 95 | 96 | export { Table, TableHeader, TableBody, TableFooter, TableHead, TableRow, TableCell, TableCaption } 97 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Tabs.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as TabsPrimitive from '@radix-ui/react-tabs' 3 | 4 | import { cn } from '@/lib/utils' 5 | 6 | const Tabs = TabsPrimitive.Root 7 | 8 | const TabsList = React.forwardRef< 9 | React.ComponentRef, 10 | React.ComponentPropsWithoutRef 11 | >(({ className, ...props }, ref) => ( 12 | 20 | )) 21 | TabsList.displayName = TabsPrimitive.List.displayName 22 | 23 | const TabsTrigger = React.forwardRef< 24 | React.ComponentRef, 25 | React.ComponentPropsWithoutRef 26 | >(({ className, ...props }, ref) => ( 27 | 35 | )) 36 | TabsTrigger.displayName = TabsPrimitive.Trigger.displayName 37 | 38 | const TabsContent = React.forwardRef< 39 | React.ComponentRef, 40 | React.ComponentPropsWithoutRef 41 | >(({ className, ...props }, ref) => ( 42 | 54 | )) 55 | TabsContent.displayName = TabsPrimitive.Content.displayName 56 | 57 | export { Tabs, TabsList, TabsTrigger, TabsContent } 58 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Text.tsx: -------------------------------------------------------------------------------- 1 | import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from '@/components/ui/Tooltip' 2 | import { cn } from '@/lib/utils' 3 | 4 | const Text = ({ 5 | text, 6 | className, 7 | tooltipClassName, 8 | tooltip, 9 | side, 10 | onClick 11 | }: { 12 | text: string 13 | className?: string 14 | tooltipClassName?: string 15 | tooltip?: string 16 | side?: 'top' | 'right' | 'bottom' | 'left' 17 | onClick?: () => void 18 | }) => { 19 | if (!tooltip) { 20 | return ( 21 | 27 | ) 28 | } 29 | 30 | return ( 31 | 32 | 33 | 34 | 40 | 41 | 42 | {tooltip} 43 | 44 | 45 | 46 | ) 47 | } 48 | 49 | export default Text 50 | -------------------------------------------------------------------------------- /lightrag_webui/src/components/ui/Tooltip.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react' 2 | import * as TooltipPrimitive from '@radix-ui/react-tooltip' 3 | import { cn } from '@/lib/utils' 4 | 5 | const TooltipProvider = TooltipPrimitive.Provider 6 | 7 | const Tooltip = TooltipPrimitive.Root 8 | 9 | const TooltipTrigger = TooltipPrimitive.Trigger 10 | 11 | const processTooltipContent = (content: string) => { 12 | if (typeof content !== 'string') return content 13 | return ( 14 |
15 | {content} 16 |
17 | ) 18 | } 19 | 20 | const TooltipContent = React.forwardRef< 21 | React.ComponentRef, 22 | React.ComponentPropsWithoutRef & { 23 | side?: 'top' | 'right' | 'bottom' | 'left' 24 | align?: 'start' | 'center' | 'end' 25 | } 26 | >(({ className, side = 'left', align = 'start', children, ...props }, ref) => { 27 | const contentRef = React.useRef(null); 28 | 29 | React.useEffect(() => { 30 | if (contentRef.current) { 31 | contentRef.current.scrollTop = 0; 32 | } 33 | }, [children]); 34 | 35 | return ( 36 | 46 | {typeof children === 'string' ? processTooltipContent(children) : children} 47 | 48 | ); 49 | }) 50 | TooltipContent.displayName = TooltipPrimitive.Content.displayName 51 | 52 | export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } 53 | -------------------------------------------------------------------------------- /lightrag_webui/src/contexts/TabVisibilityProvider.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect, useMemo } from 'react'; 2 | import { TabVisibilityContext } from './context'; 3 | import { TabVisibilityContextType } from './types'; 4 | import { useSettingsStore } from '@/stores/settings'; 5 | 6 | interface TabVisibilityProviderProps { 7 | children: React.ReactNode; 8 | } 9 | 10 | /** 11 | * Provider component for the TabVisibility context 12 | * Manages the visibility state of tabs throughout the application 13 | */ 14 | export const TabVisibilityProvider: React.FC = ({ children }) => { 15 | // Get current tab from settings store 16 | const currentTab = useSettingsStore.use.currentTab(); 17 | 18 | // Initialize visibility state with all tabs visible 19 | const [visibleTabs, setVisibleTabs] = useState>(() => ({ 20 | 'documents': true, 21 | 'knowledge-graph': true, 22 | 'retrieval': true, 23 | 'api': true 24 | })); 25 | 26 | // Keep all tabs visible because we use CSS to control TAB visibility instead of React 27 | useEffect(() => { 28 | setVisibleTabs((prev) => ({ 29 | ...prev, 30 | 'documents': true, 31 | 'knowledge-graph': true, 32 | 'retrieval': true, 33 | 'api': true 34 | })); 35 | }, [currentTab]); 36 | 37 | // Create the context value with memoization to prevent unnecessary re-renders 38 | const contextValue = useMemo( 39 | () => ({ 40 | visibleTabs, 41 | setTabVisibility: (tabId: string, isVisible: boolean) => { 42 | setVisibleTabs((prev) => ({ 43 | ...prev, 44 | [tabId]: isVisible, 45 | })); 46 | }, 47 | isTabVisible: (tabId: string) => !!visibleTabs[tabId], 48 | }), 49 | [visibleTabs] 50 | ); 51 | 52 | return ( 53 | 54 | {children} 55 | 56 | ); 57 | }; 58 | 59 | export default TabVisibilityProvider; 60 | -------------------------------------------------------------------------------- /lightrag_webui/src/contexts/context.ts: -------------------------------------------------------------------------------- 1 | import { createContext } from 'react'; 2 | import { TabVisibilityContextType } from './types'; 3 | 4 | // Default context value 5 | const defaultContext: TabVisibilityContextType = { 6 | visibleTabs: {}, 7 | setTabVisibility: () => {}, 8 | isTabVisible: () => false, 9 | }; 10 | 11 | // Create the context 12 | export const TabVisibilityContext = createContext(defaultContext); 13 | -------------------------------------------------------------------------------- /lightrag_webui/src/contexts/types.ts: -------------------------------------------------------------------------------- 1 | export interface TabVisibilityContextType { 2 | visibleTabs: Record; 3 | setTabVisibility: (tabId: string, isVisible: boolean) => void; 4 | isTabVisible: (tabId: string) => boolean; 5 | } 6 | -------------------------------------------------------------------------------- /lightrag_webui/src/contexts/useTabVisibility.ts: -------------------------------------------------------------------------------- 1 | import { useContext } from 'react'; 2 | import { TabVisibilityContext } from './context'; 3 | import { TabVisibilityContextType } from './types'; 4 | 5 | /** 6 | * Custom hook to access the tab visibility context 7 | * @returns The tab visibility context 8 | */ 9 | export const useTabVisibility = (): TabVisibilityContextType => { 10 | const context = useContext(TabVisibilityContext); 11 | 12 | if (!context) { 13 | throw new Error('useTabVisibility must be used within a TabVisibilityProvider'); 14 | } 15 | 16 | return context; 17 | }; 18 | -------------------------------------------------------------------------------- /lightrag_webui/src/features/ApiSite.tsx: -------------------------------------------------------------------------------- 1 | import { useState, useEffect } from 'react' 2 | import { useTabVisibility } from '@/contexts/useTabVisibility' 3 | import { backendBaseUrl } from '@/lib/constants' 4 | import { useTranslation } from 'react-i18next' 5 | 6 | export default function ApiSite() { 7 | const { t } = useTranslation() 8 | const { isTabVisible } = useTabVisibility() 9 | const isApiTabVisible = isTabVisible('api') 10 | const [iframeLoaded, setIframeLoaded] = useState(false) 11 | 12 | // Load the iframe once on component mount 13 | useEffect(() => { 14 | if (!iframeLoaded) { 15 | setIframeLoaded(true) 16 | } 17 | }, [iframeLoaded]) 18 | 19 | // Use CSS to hide content when tab is not visible 20 | return ( 21 |
22 | {iframeLoaded ? ( 23 |