├── .dockerignore ├── .github └── workflows │ └── docker-build-push.yml ├── .gitignore ├── .python-version ├── .vscode └── launch.json ├── Dockerfile ├── Dockerfile-ollama-local ├── LICENSE ├── Ollama-instruction.md ├── README.es.md ├── README.ja.md ├── README.kr.md ├── README.md ├── README.vi.md ├── README.zh.md ├── api ├── README.md ├── __init__.py ├── api.py ├── bedrock_client.py ├── config.py ├── config │ ├── embedder.json │ ├── generator.json │ └── repo.json ├── data_pipeline.py ├── main.py ├── ollama_patch.py ├── openai_client.py ├── openrouter_client.py ├── rag.py ├── requirements.txt ├── simple_chat.py ├── test_api.py └── websocket_wiki.py ├── docker-compose.yml ├── eslint.config.mjs ├── next.config.ts ├── package-lock.json ├── package.json ├── postcss.config.mjs ├── public ├── file.svg ├── globe.svg ├── next.svg ├── vercel.svg └── window.svg ├── pyproject.toml ├── run.sh ├── screenshots ├── DeepResearch.png ├── Deepwiki.png ├── Interface.png ├── Ollama.png └── privaterepo.png ├── src ├── app │ ├── [owner] │ │ └── [repo] │ │ │ ├── page.tsx │ │ │ ├── slides │ │ │ └── page.tsx │ │ │ └── workshop │ │ │ └── page.tsx │ ├── api │ │ ├── chat │ │ │ └── stream │ │ │ │ └── route.ts │ │ ├── models │ │ │ └── config │ │ │ │ └── route.ts │ │ └── wiki │ │ │ └── projects │ │ │ └── route.ts │ ├── favicon.ico │ ├── globals.css │ ├── layout.tsx │ ├── page.tsx │ └── wiki │ │ └── projects │ │ └── page.tsx ├── components │ ├── Ask.tsx │ ├── ConfigurationModal.tsx │ ├── Markdown.tsx │ ├── Mermaid.tsx │ ├── ModelSelectionModal.tsx │ ├── ProcessedProjects.tsx │ ├── UserSelector.tsx │ ├── WikiTreeView.tsx │ ├── WikiTypeSelector.tsx │ └── theme-toggle.tsx ├── contexts │ └── LanguageContext.tsx ├── hooks │ └── useProcessedProjects.ts ├── i18n.ts ├── messages │ ├── en.json │ ├── es.json │ ├── ja.json │ ├── kr.json │ ├── vi.json │ └── zh.json ├── types │ ├── repoinfo.tsx │ └── wiki │ │ ├── wikipage.tsx │ │ └── wikistructure.tsx └── utils │ ├── getRepoUrl.tsx │ ├── urlDecoder.tsx │ └── websocketClient.ts ├── tailwind.config.js ├── tsconfig.json └── uv.lock /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | .github 5 | 6 | # Node.js 7 | node_modules 8 | npm-debug.log 9 | yarn-debug.log 10 | yarn-error.log 11 | 12 | # Next.js 13 | .next 14 | out 15 | 16 | # Python cache files (but keep api/ directory) 17 | __pycache__/ 18 | *.py[cod] 19 | *$py.class 20 | *.so 21 | .Python 22 | env/ 23 | build/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib/ 30 | lib64/ 31 | parts/ 32 | sdist/ 33 | var/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | # Keep api/ directory but exclude cache 38 | api/__pycache__/ 39 | api/*.pyc 40 | 41 | # Environment variables 42 | # .env is now allowed to be included in the build 43 | .env.local 44 | .env.development.local 45 | .env.test.local 46 | .env.production.local 47 | 48 | # Docker 49 | Dockerfile 50 | docker-compose.yml 51 | .dockerignore 52 | 53 | # Misc 54 | .DS_Store 55 | *.pem 56 | README.md 57 | LICENSE 58 | screenshots/ 59 | *.md 60 | !api/README.md 61 | -------------------------------------------------------------------------------- /.github/workflows/docker-build-push.yml: -------------------------------------------------------------------------------- 1 | name: Build and Push Docker Image 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | # Allow manual trigger 9 | workflow_dispatch: 10 | 11 | env: 12 | REGISTRY: ghcr.io 13 | IMAGE_NAME: ${{ github.repository }} 14 | 15 | jobs: 16 | build-and-push: 17 | runs-on: ubuntu-latest 18 | permissions: 19 | contents: read 20 | packages: write 21 | 22 | steps: 23 | - name: Checkout repository 24 | uses: actions/checkout@v4 25 | 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v3 28 | 29 | - name: Log in to the Container registry 30 | uses: docker/login-action@v3 31 | with: 32 | registry: ${{ env.REGISTRY }} 33 | username: ${{ github.actor }} 34 | password: ${{ secrets.GITHUB_TOKEN }} 35 | 36 | - name: Extract metadata (tags, labels) for Docker 37 | id: meta 38 | uses: docker/metadata-action@v5 39 | with: 40 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 41 | tags: | 42 | type=semver,pattern={{version}} 43 | type=semver,pattern={{major}}.{{minor}} 44 | type=sha,format=short 45 | type=ref,event=branch 46 | type=ref,event=pr 47 | latest 48 | 49 | - name: Create empty .env file for build 50 | run: touch .env 51 | 52 | - name: Build and push Docker image 53 | uses: docker/build-push-action@v5 54 | with: 55 | context: . 56 | push: ${{ github.event_name != 'pull_request' }} 57 | tags: ${{ steps.meta.outputs.tags }} 58 | labels: ${{ steps.meta.outputs.labels }} 59 | cache-from: type=gha 60 | cache-to: type=gha,mode=max -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.* 7 | .yarn/* 8 | !.yarn/patches 9 | !.yarn/plugins 10 | !.yarn/releases 11 | !.yarn/versions 12 | 13 | # Python 14 | __pycache__/ 15 | *.py[cod] 16 | *$py.class 17 | *.so 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | api/logs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | *.venv 36 | # testing 37 | /coverage 38 | 39 | # next.js 40 | /.next/ 41 | /out/ 42 | 43 | # production 44 | /build 45 | 46 | # misc 47 | .DS_Store 48 | *.pem 49 | 50 | # debug 51 | npm-debug.log* 52 | yarn-debug.log* 53 | yarn-error.log* 54 | .pnpm-debug.log* 55 | 56 | # env files (can opt-in for committing if needed) 57 | .env* 58 | 59 | # vercel 60 | .vercel 61 | 62 | # typescript 63 | *.tsbuildinfo 64 | next-env.d.ts 65 | 66 | .idea/ 67 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Deepwiki-Open", 6 | "type": "python", 7 | "request": "launch", 8 | "module": "uvicorn", 9 | "args": [ 10 | "api.api:app", 11 | "--reload", 12 | "--port", 13 | "8001" 14 | ], 15 | "jinja": true, 16 | "justMyCode": true 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1-labs 2 | 3 | FROM node:20-alpine AS node_base 4 | 5 | FROM node_base AS node_deps 6 | WORKDIR /app 7 | COPY package.json package-lock.json ./ 8 | RUN npm ci --legacy-peer-deps 9 | 10 | FROM node_base AS node_builder 11 | WORKDIR /app 12 | COPY --from=node_deps /app/node_modules ./node_modules 13 | # Copy only necessary files for Next.js build 14 | COPY package.json package-lock.json next.config.ts tsconfig.json tailwind.config.js postcss.config.mjs ./ 15 | COPY src/ ./src/ 16 | COPY public/ ./public/ 17 | # Increase Node.js memory limit for build and disable telemetry 18 | ENV NODE_OPTIONS="--max-old-space-size=4096" 19 | ENV NEXT_TELEMETRY_DISABLED=1 20 | RUN NODE_ENV=production npm run build 21 | 22 | FROM python:3.11-slim AS py_deps 23 | WORKDIR /app 24 | RUN python -m venv /opt/venv 25 | ENV PATH="/opt/venv/bin:$PATH" 26 | COPY api/requirements.txt ./api/ 27 | RUN pip install --no-cache -r api/requirements.txt 28 | 29 | # Use Python 3.11 as final image 30 | FROM python:3.11-slim 31 | 32 | # Set working directory 33 | WORKDIR /app 34 | 35 | # Install Node.js and npm 36 | RUN apt-get update && apt-get install -y \ 37 | curl \ 38 | gnupg \ 39 | git \ 40 | && mkdir -p /etc/apt/keyrings \ 41 | && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ 42 | && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \ 43 | && apt-get update \ 44 | && apt-get install -y nodejs \ 45 | && apt-get clean \ 46 | && rm -rf /var/lib/apt/lists/* 47 | 48 | ENV PATH="/opt/venv/bin:$PATH" 49 | 50 | # Copy Python dependencies 51 | COPY --from=py_deps /opt/venv /opt/venv 52 | COPY api/ ./api/ 53 | 54 | # Copy Node app 55 | COPY --from=node_builder /app/public ./public 56 | COPY --from=node_builder /app/.next/standalone ./ 57 | COPY --from=node_builder /app/.next/static ./.next/static 58 | 59 | # Expose the port the app runs on 60 | EXPOSE ${PORT:-8001} 3000 61 | 62 | # Create a script to run both backend and frontend 63 | RUN echo '#!/bin/bash\n\ 64 | # Load environment variables from .env file if it exists\n\ 65 | if [ -f .env ]; then\n\ 66 | export $(grep -v "^#" .env | xargs -r)\n\ 67 | fi\n\ 68 | \n\ 69 | # Check for required environment variables\n\ 70 | if [ -z "$OPENAI_API_KEY" ] || [ -z "$GOOGLE_API_KEY" ]; then\n\ 71 | echo "Warning: OPENAI_API_KEY and/or GOOGLE_API_KEY environment variables are not set."\n\ 72 | echo "These are required for DeepWiki to function properly."\n\ 73 | echo "You can provide them via a mounted .env file or as environment variables when running the container."\n\ 74 | fi\n\ 75 | \n\ 76 | # Start the API server in the background with the configured port\n\ 77 | python -m api.main --port ${PORT:-8001} &\n\ 78 | PORT=3000 HOSTNAME=0.0.0.0 node server.js &\n\ 79 | wait -n\n\ 80 | exit $?' > /app/start.sh && chmod +x /app/start.sh 81 | 82 | # Set environment variables 83 | ENV PORT=8001 84 | ENV NODE_ENV=production 85 | ENV SERVER_BASE_URL=http://localhost:${PORT:-8001} 86 | 87 | # Create empty .env file (will be overridden if one exists at runtime) 88 | RUN touch .env 89 | 90 | # Command to run the application 91 | CMD ["/app/start.sh"] 92 | -------------------------------------------------------------------------------- /Dockerfile-ollama-local: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1-labs 2 | 3 | FROM node:20-alpine AS node_base 4 | 5 | FROM node_base AS node_deps 6 | WORKDIR /app 7 | COPY package.json package-lock.json ./ 8 | RUN npm ci --legacy-peer-deps 9 | 10 | FROM node_base AS node_builder 11 | WORKDIR /app 12 | COPY --from=node_deps /app/node_modules ./node_modules 13 | COPY --exclude=./api . . 14 | RUN NODE_ENV=production npm run build 15 | 16 | FROM python:3.11-slim AS py_deps 17 | WORKDIR /app 18 | RUN python -m venv /opt/venv 19 | ENV PATH="/opt/venv/bin:$PATH" 20 | COPY api/requirements.txt ./api/ 21 | RUN pip install --no-cache -r api/requirements.txt 22 | 23 | FROM python:3.11-slim AS ollama_base 24 | RUN apt-get update && apt-get install -y \ 25 | curl 26 | # Detect architecture and download appropriate Ollama version 27 | # ARG TARGETARCH can be set at build time with --build-arg TARGETARCH=arm64 or TARGETARCH=amd64 28 | ARG TARGETARCH=arm64 29 | RUN OLLAMA_ARCH="" && \ 30 | if [ "$TARGETARCH" = "arm64" ]; then \ 31 | echo "Building for ARM64 architecture." && \ 32 | OLLAMA_ARCH="arm64"; \ 33 | elif [ "$TARGETARCH" = "amd64" ]; then \ 34 | echo "Building for AMD64 architecture." && \ 35 | OLLAMA_ARCH="amd64"; \ 36 | else \ 37 | echo "Error: Unsupported architecture '$TARGETARCH'. Supported architectures are 'arm64' and 'amd64'." >&2 && \ 38 | exit 1; \ 39 | fi && \ 40 | curl -L "https://ollama.com/download/ollama-linux-${OLLAMA_ARCH}.tgz" -o ollama.tgz && \ 41 | tar -C /usr -xzf ollama.tgz && \ 42 | rm ollama.tgz 43 | 44 | RUN ollama serve > /dev/null 2>&1 & \ 45 | sleep 20 && \ 46 | ollama pull nomic-embed-text && \ 47 | ollama pull qwen3:1.7b 48 | 49 | # Use Python 3.11 as final image 50 | FROM python:3.11-slim 51 | 52 | # Set working directory 53 | WORKDIR /app 54 | 55 | # Install Node.js and npm 56 | RUN apt-get update && apt-get install -y \ 57 | curl \ 58 | gnupg \ 59 | git \ 60 | && mkdir -p /etc/apt/keyrings \ 61 | && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ 62 | && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \ 63 | && apt-get update \ 64 | && apt-get install -y nodejs \ 65 | && apt-get clean \ 66 | && rm -rf /var/lib/apt/lists/* 67 | 68 | ENV PATH="/opt/venv/bin:$PATH" 69 | 70 | # Copy Python dependencies 71 | COPY --from=py_deps /opt/venv /opt/venv 72 | COPY api/ ./api/ 73 | 74 | # Copy Node app 75 | COPY --from=node_builder /app/public ./public 76 | COPY --from=node_builder /app/.next/standalone ./ 77 | COPY --from=node_builder /app/.next/static ./.next/static 78 | COPY --from=ollama_base /usr/bin/ollama /usr/local/bin/ 79 | COPY --from=ollama_base /root/.ollama /root/.ollama 80 | 81 | # Expose the port the app runs on 82 | EXPOSE ${PORT:-8001} 3000 83 | 84 | # Create a script to run both backend and frontend 85 | RUN echo '#!/bin/bash\n\ 86 | # Start ollama serve in background\n\ 87 | ollama serve > /dev/null 2>&1 &\n\ 88 | \n\ 89 | # Load environment variables from .env file if it exists\n\ 90 | if [ -f .env ]; then\n\ 91 | export $(grep -v "^#" .env | xargs -r)\n\ 92 | fi\n\ 93 | \n\ 94 | # Check for required environment variables\n\ 95 | if [ -z "$OPENAI_API_KEY" ] || [ -z "$GOOGLE_API_KEY" ]; then\n\ 96 | echo "Warning: OPENAI_API_KEY and/or GOOGLE_API_KEY environment variables are not set."\n\ 97 | echo "These are required for DeepWiki to function properly."\n\ 98 | echo "You can provide them via a mounted .env file or as environment variables when running the container."\n\ 99 | fi\n\ 100 | \n\ 101 | # Start the API server in the background with the configured port\n\ 102 | python -m api.main --port ${PORT:-8001} &\n\ 103 | PORT=3000 HOSTNAME=0.0.0.0 node server.js &\n\ 104 | wait -n\n\ 105 | exit $?' > /app/start.sh && chmod +x /app/start.sh 106 | 107 | # Set environment variables 108 | ENV PORT=8001 109 | ENV NODE_ENV=production 110 | ENV SERVER_BASE_URL=http://localhost:${PORT:-8001} 111 | 112 | # Create empty .env file (will be overridden if one exists at runtime) 113 | RUN touch .env 114 | 115 | # Command to run the application 116 | CMD ["/app/start.sh"] 117 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Sheing Ng 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Ollama-instruction.md: -------------------------------------------------------------------------------- 1 | # Using DeepWiki with Ollama: Beginner's Guide 2 | 3 | DeepWiki supports local AI models through Ollama, which is perfect if you want to: 4 | 5 | - Run everything locally without relying on cloud APIs 6 | - Avoid API costs from OpenAI or Google 7 | - Have more privacy with your code analysis 8 | 9 | ## Step 1: Install Ollama 10 | 11 | ### For Windows 12 | - Download Ollama from the [official website](https://ollama.com/download) 13 | - Run the installer and follow the on-screen instructions 14 | - After installation, Ollama will run in the background (check your system tray) 15 | 16 | ### For macOS 17 | - Download Ollama from the [official website](https://ollama.com/download) 18 | - Open the downloaded file and drag Ollama to your Applications folder 19 | - Launch Ollama from your Applications folder 20 | 21 | ### For Linux 22 | - Run the following command: 23 | ```bash 24 | curl -fsSL https://ollama.com/install.sh | sh 25 | ``` 26 | 27 | ## Step 2: Download Required Models 28 | 29 | Open a terminal (Command Prompt or PowerShell on Windows) and run: 30 | 31 | ```bash 32 | ollama pull nomic-embed-text 33 | ollama pull qwen3:1.7b 34 | ``` 35 | 36 | The first command downloads the embedding model that DeepWiki uses to understand your code. The second downloads a small but capable language model for generating documentation. 37 | 38 | ## Step 3: Set Up DeepWiki 39 | 40 | Clone the DeepWiki repository: 41 | ```bash 42 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 43 | cd deepwiki-open 44 | ``` 45 | 46 | Create a `.env` file in the project root: 47 | ``` 48 | # No need for API keys when using Ollama locally 49 | PORT=8001 50 | # Optionally, provide OLLAMA_HOST if Ollama is not local 51 | OLLAMA_HOST=your_ollama_host # (default: http://localhost:11434) 52 | ``` 53 | 54 | Start the backend: 55 | ```bash 56 | pip install -r api/requirements.txt 57 | python -m api.main 58 | ``` 59 | 60 | Start the frontend: 61 | ```bash 62 | npm install 63 | npm run dev 64 | ``` 65 | 66 | ## Step 4: Use DeepWiki with Ollama 67 | 68 | 1. Open http://localhost:3000 in your browser 69 | 2. Enter a GitHub, GitLab, or Bitbucket repository URL 70 | 3. Check the use "Local Ollama Model" option 71 | 4. Click "Generate Wiki" 72 | 73 | ![Ollama Option](screenshots/Ollama.png) 74 | 75 | ## Alternative using Dockerfile 76 | 77 | 1. Build the docker image `docker build -f Dockerfile-ollama-local -t deepwiki:ollama-local .` 78 | 2. Run the container: 79 | ```bash 80 | # For regular use 81 | docker run -p 3000:3000 -p 8001:8001 --name deepwiki \ 82 | -v ~/.adalflow:/root/.adalflow \ 83 | -e OLLAMA_HOST=your_ollama_host \ 84 | deepwiki:ollama-local 85 | 86 | # For local repository analysis 87 | docker run -p 3000:3000 -p 8001:8001 --name deepwiki \ 88 | -v ~/.adalflow:/root/.adalflow \ 89 | -e OLLAMA_HOST=your_ollama_host \ 90 | -v /path/to/your/repo:/app/local-repos/repo-name \ 91 | deepwiki:ollama-local 92 | ``` 93 | 94 | 3. When using local repositories in the interface: use `/app/local-repos/repo-name` as the local repository path. 95 | 96 | 4. Open http://localhost:3000 in your browser 97 | 98 | Note: For Apple Silicon Macs, the Dockerfile automatically uses ARM64 binaries for better performance. 99 | 100 | ## How It Works 101 | 102 | When you select "Use Local Ollama", DeepWiki will: 103 | 104 | 1. Use the `nomic-embed-text` model for creating embeddings of your code 105 | 2. Use the `qwen3:1.7b` model for generating documentation 106 | 3. Process everything locally on your machine 107 | 108 | ## Troubleshooting 109 | 110 | ### "Cannot connect to Ollama server" 111 | - Make sure Ollama is running in the background. You can check by running `ollama list` in your terminal. 112 | - Verify that Ollama is running on the default port (11434) 113 | - Try restarting Ollama 114 | 115 | ### Slow generation 116 | - Local models are typically slower than cloud APIs. Consider using a smaller repository or a more powerful computer. 117 | - The `qwen3:1.7b` model is optimized for speed and quality balance. Larger models will be slower but may produce better results. 118 | 119 | ### Out of memory errors 120 | - If you encounter memory issues, try using a smaller model like `phi3:mini` instead of larger models. 121 | - Close other memory-intensive applications while running Ollama 122 | 123 | ## Advanced: Using Different Models 124 | 125 | If you want to try different models, you can modify the `api/config/generator.json` file: 126 | 127 | ```python 128 | "generator_ollama": { 129 | "model_client": OllamaClient, 130 | "model_kwargs": { 131 | "model": "qwen3:1.7b", # Change this to another model 132 | "options": { 133 | "temperature": 0.7, 134 | "top_p": 0.8, 135 | } 136 | }, 137 | }, 138 | ``` 139 | 140 | You can replace `"model": "qwen3:1.7b"` with any model you've pulled with Ollama. For a list of available models, visit [Ollama's model library](https://ollama.com/library) or run `ollama list` in your terminal. 141 | 142 | Similarly, you can change the embedding model: 143 | 144 | ```python 145 | "embedder_ollama": { 146 | "model_client": OllamaClient, 147 | "model_kwargs": { 148 | "model": "nomic-embed-text" # Change this to another embedding model 149 | }, 150 | }, 151 | ``` 152 | 153 | ## Performance Considerations 154 | 155 | ### Hardware Requirements 156 | 157 | For optimal performance with Ollama: 158 | - **CPU**: 4+ cores recommended 159 | - **RAM**: 8GB minimum, 16GB+ recommended 160 | - **Storage**: 10GB+ free space for models 161 | - **GPU**: Optional but highly recommended for faster processing 162 | 163 | ### Model Selection Guide 164 | 165 | | Model | Size | Speed | Quality | Use Case | 166 | |-------|------|-------|---------|----------| 167 | | phi3:mini | 1.3GB | Fast | Good | Small projects, quick testing | 168 | | qwen3:1.7b | 3.8GB | Medium | Better | Default, good balance | 169 | | llama3:8b | 8GB | Slow | Best | Complex projects, detailed analysis | 170 | 171 | ## Limitations 172 | 173 | When using Ollama with DeepWiki: 174 | 175 | 1. **No Internet Access**: The models run completely offline and cannot access external information 176 | 2. **Limited Context Window**: Local models typically have smaller context windows than cloud APIs 177 | 3. **Less Powerful**: Local models may not match the quality of the latest cloud models 178 | 179 | ## Conclusion 180 | 181 | Using DeepWiki with Ollama gives you a completely local, private solution for code documentation. While it may not match the speed or quality of cloud-based solutions, it provides a free and privacy-focused alternative that works well for most projects. 182 | 183 | Enjoy using DeepWiki with your local Ollama models! 184 | -------------------------------------------------------------------------------- /README.ja.md: -------------------------------------------------------------------------------- 1 | # DeepWiki-Open 2 | 3 | ![DeepWiki バナー](screenshots/Deepwiki.png) 4 | 5 | **DeepWiki**は、GitHub、GitLab、または Bitbucket リポジトリのための美しくインタラクティブな Wiki を自動的に作成します!リポジトリ名を入力するだけで、DeepWiki は以下を行います: 6 | 7 | 1. コード構造を分析 8 | 2. 包括的なドキュメントを生成 9 | 3. すべての仕組みを説明する視覚的な図を作成 10 | 4. すべてを簡単に閲覧できる Wiki に整理 11 | 12 | [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://buymeacoffee.com/sheing) 13 | 14 | [![Twitter/X](https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/sashimikun_void) 15 | [![Discord](https://img.shields.io/badge/Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.com/invite/VQMBGR8u5v) 16 | 17 | [English](./README.md) | [简体中文](./README.zh.md) | [日本語](./README.ja.md) | [Español](./README.es.md) | [한국어](./README.kr.md) | [Tiếng Việt](./README.vi.md) 18 | 19 | ## ✨ 特徴 20 | 21 | - **即時ドキュメント生成**: あらゆる GitHub、GitLab、または Bitbucket リポジトリを数秒で Wiki に変換 22 | - **プライベートリポジトリ対応**: 個人アクセストークンを使用してプライベートリポジトリに安全にアクセス 23 | - **スマート分析**: AI を活用したコード構造と関係の理解 24 | - **美しい図表**: アーキテクチャとデータフローを視覚化する自動 Mermaid 図 25 | - **簡単なナビゲーション**: Wiki を探索するためのシンプルで直感的なインターフェース 26 | - **質問機能**: RAG 搭載 AI を使用してリポジトリとチャットし、正確な回答を得る 27 | - **詳細調査**: 複雑なトピックを徹底的に調査する多段階研究プロセス 28 | - **複数のモデルプロバイダー**: Google Gemini、OpenAI、OpenRouter、およびローカル Ollama モデルのサポート 29 | 30 | ## 🚀 クイックスタート(超簡単!) 31 | 32 | ### オプション 1: Docker を使用 33 | 34 | ```bash 35 | # リポジトリをクローン 36 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 37 | cd deepwiki-open 38 | 39 | # APIキーを含む.envファイルを作成 40 | echo "GOOGLE_API_KEY=your_google_api_key" > .env 41 | echo "OPENAI_API_KEY=your_openai_api_key" >> .env 42 | # オプション: OpenRouterモデルを使用する場合はOpenRouter APIキーを追加 43 | echo "OPENROUTER_API_KEY=your_openrouter_api_key" >> .env 44 | 45 | # Docker Composeで実行 46 | docker-compose up 47 | ``` 48 | 49 | (上記の Docker コマンドおよび`docker-compose.yml`の設定では、ホスト上の`~/.adalflow`ディレクトリをコンテナ内の`/root/.adalflow`にマウントします。このパスは以下のものを保存するために使用されます: 50 | 51 | - クローンされたリポジトリ (`~/.adalflow/repos/`) 52 | - それらのエンベディングとインデックス (`~/.adalflow/databases/`) 53 | - 生成された Wiki のキャッシュ (`~/.adalflow/wikicache/`) 54 | 55 | これにより、コンテナが停止または削除されてもデータが永続化されます。) 56 | 57 | > 💡 **これらのキーの入手先:** 58 | > 59 | > - Google API キーは[Google AI Studio](https://makersuite.google.com/app/apikey)から取得 60 | > - OpenAI API キーは[OpenAI Platform](https://platform.openai.com/api-keys)から取得 61 | 62 | ### オプション 2: 手動セットアップ(推奨) 63 | 64 | #### ステップ 1: API キーの設定 65 | 66 | プロジェクトのルートに`.env`ファイルを作成し、以下のキーを追加します: 67 | 68 | ``` 69 | GOOGLE_API_KEY=your_google_api_key 70 | OPENAI_API_KEY=your_openai_api_key 71 | # オプション: OpenRouterモデルを使用する場合は追加 72 | OPENROUTER_API_KEY=your_openrouter_api_key 73 | ``` 74 | 75 | #### ステップ 2: バックエンドの起動 76 | 77 | ```bash 78 | # Pythonの依存関係をインストール 79 | pip install -r api/requirements.txt 80 | 81 | # APIサーバーを起動 82 | python -m api.main 83 | ``` 84 | 85 | #### ステップ 3: フロントエンドの起動 86 | 87 | ```bash 88 | # JavaScript依存関係をインストール 89 | npm install 90 | # または 91 | yarn install 92 | 93 | # Webアプリを起動 94 | npm run dev 95 | # または 96 | yarn dev 97 | ``` 98 | 99 | #### ステップ 4: DeepWiki を使用! 100 | 101 | 1. ブラウザで[http://localhost:3000](http://localhost:3000)を開く 102 | 2. GitHub、GitLab、または Bitbucket リポジトリを入力(例:`https://github.com/openai/codex`、`https://github.com/microsoft/autogen`、`https://gitlab.com/gitlab-org/gitlab`、または`https://bitbucket.org/redradish/atlassian_app_versions`) 103 | 3. プライベートリポジトリの場合は、「+ アクセストークンを追加」をクリックして GitHub または GitLab の個人アクセストークンを入力 104 | 4. 「Wiki を生成」をクリックして、魔法が起こるのを見守りましょう! 105 | 106 | ## 🔍 仕組み 107 | 108 | DeepWiki は AI を使用して: 109 | 110 | 1. GitHub、GitLab、または Bitbucket リポジトリをクローンして分析(トークン認証によるプライベートリポジトリを含む) 111 | 2. スマート検索のためのコードの埋め込みを作成 112 | 3. コンテキスト対応 AI でドキュメントを生成(Google Gemini、OpenAI、OpenRouter、またはローカル Ollama モデルを使用) 113 | 4. コードの関係を説明する視覚的な図を作成 114 | 5. すべてを構造化された Wiki に整理 115 | 6. 質問機能を通じてリポジトリとのインテリジェントな Q&A を可能に 116 | 7. 詳細調査機能で深い研究能力を提供 117 | 118 | ```mermaid 119 | graph TD 120 | A[ユーザーがGitHub/GitLab/Bitbucketリポジトリを入力] --> AA{プライベートリポジトリ?} 121 | AA -->|はい| AB[アクセストークンを追加] 122 | AA -->|いいえ| B[リポジトリをクローン] 123 | AB --> B 124 | B --> C[コード構造を分析] 125 | C --> D[コード埋め込みを作成] 126 | 127 | D --> M{モデルプロバイダーを選択} 128 | M -->|Google Gemini| E1[Geminiで生成] 129 | M -->|OpenAI| E2[OpenAIで生成] 130 | M -->|OpenRouter| E3[OpenRouterで生成] 131 | M -->|ローカルOllama| E4[Ollamaで生成] 132 | 133 | E1 --> E[ドキュメントを生成] 134 | E2 --> E 135 | E3 --> E 136 | E4 --> E 137 | 138 | D --> F[視覚的な図を作成] 139 | E --> G[Wikiとして整理] 140 | F --> G 141 | G --> H[インタラクティブなDeepWiki] 142 | 143 | classDef process stroke-width:2px; 144 | classDef data stroke-width:2px; 145 | classDef result stroke-width:2px; 146 | classDef decision stroke-width:2px; 147 | 148 | class A,D data; 149 | class AA,M decision; 150 | class B,C,E,F,G,AB,E1,E2,E3,E4 process; 151 | class H result; 152 | ``` 153 | 154 | ## 🛠️ プロジェクト構造 155 | 156 | ``` 157 | deepwiki/ 158 | ├── api/ # バックエンドAPIサーバー 159 | │ ├── main.py # APIエントリーポイント 160 | │ ├── api.py # FastAPI実装 161 | │ ├── rag.py # 検索拡張生成 162 | │ ├── data_pipeline.py # データ処理ユーティリティ 163 | │ └── requirements.txt # Python依存関係 164 | │ 165 | ├── src/ # フロントエンドNext.jsアプリ 166 | │ ├── app/ # Next.jsアプリディレクトリ 167 | │ │ └── page.tsx # メインアプリケーションページ 168 | │ └── components/ # Reactコンポーネント 169 | │ └── Mermaid.tsx # Mermaid図レンダラー 170 | │ 171 | ├── public/ # 静的アセット 172 | ├── package.json # JavaScript依存関係 173 | └── .env # 環境変数(作成する必要あり) 174 | ``` 175 | 176 | ## 🛠️ 高度な設定 177 | 178 | ### 環境変数 179 | 180 | | 変数 | 説明 | 必須 | 注意 | 181 | | ----------------------------- | --------------------------------------------------------------- | ---- | ------------------------------------------------------------------------------------------------------------- | 182 | | `GOOGLE_API_KEY` | AI 生成のための Google Gemini API キー | ◯ | | 183 | | `OPENAI_API_KEY` | 埋め込みのための OpenAI API キー | ◯ | | 184 | | `OPENROUTER_API_KEY` | 代替モデルのための OpenRouter API キー | ✗ | OpenRouter モデルを使用する場合にのみ必須です | 185 | | `PORT` | API サーバーのポート(デフォルト:8001) | ✗ | API とフロントエンドを同じマシンでホストする場合、`NEXT_PUBLIC_SERVER_BASE_URL`のポートを適宜変更してください | 186 | | `NEXT_PUBLIC_SERVER_BASE_URL` | API サーバーのベース URL(デフォルト:`http://localhost:8001`) | ✗ | | 187 | 188 | ### 設定ファイル 189 | 190 | DeepWikiはシステムの様々な側面を管理するためにJSON設定ファイルを使用しています: 191 | 192 | 1. **`generator.json`**: テキスト生成モデルの設定 193 | - 利用可能なモデルプロバイダー(Google、OpenAI、OpenRouter、Ollama)を定義 194 | - 各プロバイダーのデフォルトおよび利用可能なモデルを指定 195 | - temperatureやtop_pなどのモデル固有のパラメータを含む 196 | 197 | 2. **`embedder.json`**: 埋め込みモデルとテキスト処理の設定 198 | - ベクトルストレージ用の埋め込みモデルを定義 199 | - RAG用の検索設定を含む 200 | - ドキュメントチャンク分割のためのテキスト分割設定を指定 201 | 202 | 3. **`repo.json`**: リポジトリ処理の設定 203 | - 特定のファイルやディレクトリを除外するファイルフィルターを含む 204 | - リポジトリサイズ制限と処理ルールを定義 205 | 206 | デフォルトでは、これらのファイルは`api/config/`ディレクトリにあります。`DEEPWIKI_CONFIG_DIR`環境変数を使用して、その場所をカスタマイズできます。 207 | 208 | ### Docker セットアップ 209 | 210 | Docker を使用して DeepWiki を実行できます: 211 | 212 | ```bash 213 | # GitHub Container Registryからイメージをプル 214 | docker pull ghcr.io/asyncfuncai/deepwiki-open:latest 215 | 216 | # 環境変数を設定してコンテナを実行 217 | docker run -p 8001:8001 -p 3000:3000 \ 218 | -e GOOGLE_API_KEY=your_google_api_key \ 219 | -e OPENAI_API_KEY=your_openai_api_key \ 220 | -e OPENROUTER_API_KEY=your_openrouter_api_key \ 221 | -v ~/.adalflow:/root/.adalflow \ 222 | ghcr.io/asyncfuncai/deepwiki-open:latest 223 | ``` 224 | 225 | このコマンドは、ホスト上の ⁠~/.adalflow をコンテナ内の ⁠/root/.adalflow にマウントします。このパスは以下のものを保存するために使用されます: 226 | 227 | - クローンされたリポジトリ (⁠~/.adalflow/repos/) 228 | - それらのエンベディングとインデックス (⁠~/.adalflow/databases/) 229 | - 生成された Wiki のキャッシュ (⁠~/.adalflow/wikicache/) 230 | 231 | これにより、コンテナが停止または削除されてもデータが永続化されます。 232 | または、提供されている ⁠docker-compose.yml ファイルを使用します。 233 | 234 | ```bash 235 | # まず.envファイルをAPIキーで編集 236 | docker-compose up 237 | ``` 238 | 239 | (⁠docker-compose.yml ファイルは、上記の ⁠docker run コマンドと同様に、データ永続化のために ⁠~/.adalflow をマウントするように事前設定されています。) 240 | 241 | #### Docker で.env ファイルを使用する 242 | 243 | .env ファイルをコンテナにマウントすることもできます: 244 | 245 | ```bash 246 | # APIキーを含む.envファイルを作成 247 | echo "GOOGLE_API_KEY=your_google_api_key" > .env 248 | echo "OPENAI_API_KEY=your_openai_api_key" >> .env 249 | echo "OPENROUTER_API_KEY=your_openrouter_api_key" >> .env 250 | 251 | # .envファイルをマウントしてコンテナを実行 252 | docker run -p 8001:8001 -p 3000:3000 \ 253 | -v $(pwd)/.env:/app/.env \ 254 | -v ~/.adalflow:/root/.adalflow \ 255 | ghcr.io/asyncfuncai/deepwiki-open:latest 256 | ``` 257 | 258 | このコマンドは、ホスト上の ⁠~/.adalflow をコンテナ内の ⁠/root/.adalflow にマウントします。このパスは以下のものを保存するために使用されます: 259 | 260 | - クローンされたリポジトリ (⁠~/.adalflow/repos/) 261 | - それらのエンベディングとインデックス (⁠~/.adalflow/databases/) 262 | - 生成された Wiki のキャッシュ (⁠~/.adalflow/wikicache/) 263 | 264 | これにより、コンテナが停止または削除されてもデータが永続化されます。 265 | 266 | #### Docker イメージをローカルでビルドする 267 | 268 | Docker イメージをローカルでビルドしたい場合: 269 | 270 | ```bash 271 | # リポジトリをクローン 272 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 273 | cd deepwiki-open 274 | 275 | # Dockerイメージをビルド 276 | docker build -t deepwiki-open . 277 | 278 | # コンテナを実行 279 | docker run -p 8001:8001 -p 3000:3000 \ 280 | -e GOOGLE_API_KEY=your_google_api_key \ 281 | -e OPENAI_API_KEY=your_openai_api_key \ 282 | -e OPENROUTER_API_KEY=your_openrouter_api_key \ 283 | deepwiki-open 284 | ``` 285 | 286 | # API サーバー詳細 287 | 288 | API サーバーは以下を提供します: 289 | 290 | - リポジトリのクローンとインデックス作成 291 | - RAG(Retrieval Augmented Generation:検索拡張生成) 292 | - ストリーミングチャット補完 293 | 294 | 詳細については、API README を参照してください。 295 | 296 | ## 🤖 プロバイダーベースのモデル選択システム 297 | 298 | DeepWikiでは、複数のLLMプロバイダーをサポートする柔軟なプロバイダーベースのモデル選択システムを実装しています: 299 | 300 | ### サポートされているプロバイダーとモデル 301 | 302 | - **Google**: デフォルトは `gemini-2.0-flash`、また `gemini-1.5-flash`、`gemini-1.0-pro` などもサポート 303 | - **OpenAI**: デフォルトは `gpt-4o`、また `o4-mini` などもサポート 304 | - **OpenRouter**: Claude、Llama、Mistralなど、統一APIを通じて複数のモデルにアクセス 305 | - **Ollama**: `llama3` などのローカルで実行するオープンソースモデルをサポート 306 | 307 | ### 環境変数 308 | 309 | 各プロバイダーには、対応するAPI鍵の環境変数が必要です: 310 | 311 | ``` 312 | # API鍵 313 | GOOGLE_API_KEY=あなたのGoogle API鍵 # Google Geminiモデルに必要 314 | OPENAI_API_KEY=あなたのOpenAI鍵 # OpenAIモデルに必要 315 | OPENROUTER_API_KEY=あなたのOpenRouter鍵 # OpenRouterモデルに必要 316 | 317 | # OpenAI APIベースURL設定 318 | OPENAI_BASE_URL=https://カスタムAPIエンドポイント.com/v1 # オプション、カスタムOpenAI APIエンドポイント用 319 | ``` 320 | 321 | ### サービスプロバイダー向けのカスタムモデル選択 322 | 323 | カスタムモデル選択機能は、あなたの組織のユーザーに様々なAIモデルの選択肢を提供するために特別に設計されています: 324 | 325 | - あなたは組織内のユーザーに様々なAIモデルの選択肢を提供できます 326 | - あなたはコード変更なしで急速に進化するLLM環境に迅速に適応できます 327 | - あなたは事前定義リストにない専門的またはファインチューニングされたモデルをサポートできます 328 | 329 | サービスプロバイダーは、事前定義されたオプションから選択するか、フロントエンドインターフェースでカスタムモデル識別子を入力することで、モデル提供を実装できます。 330 | 331 | ### エンタープライズプライベートチャネル向けのベースURL設定 332 | 333 | OpenAIクライアントのbase_url設定は、主にプライベートAPIチャネルを持つエンタープライズユーザー向けに設計されています。この機能は: 334 | 335 | - プライベートまたは企業固有のAPIエンドポイントへの接続を可能に 336 | - 組織が自己ホスト型または独自にデプロイされたLLMサービスを使用可能に 337 | - サードパーティのOpenAI API互換サービスとの統合をサポート 338 | 339 | **近日公開**: 将来のアップデートでは、ユーザーがリクエストで自分のAPI鍵を提供する必要があるモードをDeepWikiがサポートする予定です。これにより、プライベートチャネルを持つエンタープライズ顧客は、DeepWikiデプロイメントと認証情報を共有することなく、既存のAPI設定を使用できるようになります。 340 | 341 | ## 🔌 OpenRouter 連携 342 | 343 | DeepWiki は、モデルプロバイダーとして OpenRouter をサポートするようになり、単一の API を通じて数百の AI モデルにアクセスできるようになりました。 344 | 345 | - 複数のモデルオプション: OpenAI、Anthropic、Google、Meta、Mistralなど、統一APIを通じて複数のモデルにアクセス 346 | - 簡単な設定: OpenRouter API キーを追加し、使用したいモデルを選択するだけ 347 | - コスト効率: 予算とパフォーマンスのニーズに合ったモデルを選択 348 | - 簡単な切り替え: コードを変更することなく、異なるモデル間を切り替え可能 349 | 350 | ### DeepWiki で OpenRouter を使用する方法 351 | 352 | 1. API キーを取得: OpenRouter でサインアップし、API キーを取得します 353 | 2. 環境に追加: ⁠.env ファイルに ⁠OPENROUTER_API_KEY=your_key を追加します 354 | 3. UI で有効化: ホームページの「OpenRouter API を使用」オプションをチェックします 355 | 4. モデルを選択: GPT-4o、Claude 3.5 Sonnet、Gemini 2.0 などの人気モデルから選択します 356 | 357 | OpenRouter は特に以下のような場合に便利です: 358 | 359 | - 複数のサービスにサインアップせずに異なるモデルを試したい 360 | - お住まいの地域で制限されている可能性のあるモデルにアクセスしたい 361 | - 異なるモデルプロバイダー間でパフォーマンスを比較したい 362 | - ニーズに基づいてコストとパフォーマンスを最適化したい 363 | 364 | ## 🤖 質問と詳細調査機能 365 | 366 | ### 質問機能 367 | 368 | 質問機能を使用すると、検索拡張生成(RAG)を使用してリポジトリとチャットできます: 369 | 370 | - **コンテキスト対応の回答**: リポジトリの実際のコードに基づいた正確な回答を取得 371 | - **RAG 搭載**: システムは関連するコードスニペットを取得して根拠のある回答を提供 372 | - **リアルタイムストリーミング**: よりインタラクティブな体験のために、生成されるレスポンスをリアルタイムで確認 373 | - **会話履歴**: システムは質問間のコンテキストを維持し、より一貫性のあるインタラクションを実現 374 | 375 | ### 詳細調査機能 376 | 377 | 詳細調査は、複数ターンの研究プロセスでリポジトリ分析を次のレベルに引き上げます: 378 | 379 | - **詳細な調査**: 複数の研究反復を通じて複雑なトピックを徹底的に探索 380 | - **構造化されたプロセス**: 明確な研究計画、更新、包括的な結論を含む 381 | - **自動継続**: AI は結論に達するまで自動的に研究を継続(最大 5 回の反復) 382 | - **研究段階**: 383 | 1. **研究計画**: アプローチと初期調査結果の概要 384 | 2. **研究更新**: 新しい洞察を加えて前の反復を発展 385 | 3. **最終結論**: すべての反復に基づく包括的な回答を提供 386 | 387 | 詳細調査を使用するには、質問を送信する前に質問インターフェースの「詳細調査」スイッチをオンにするだけです。 388 | 389 | ## 📱 スクリーンショット 390 | 391 | ![DeepWikiメインインターフェース](screenshots/Interface.png) 392 | _DeepWiki のメインインターフェース_ 393 | 394 | ![プライベートリポジトリサポート](screenshots/privaterepo.png) 395 | _個人アクセストークンを使用したプライベートリポジトリへのアクセス_ 396 | 397 | ![詳細調査機能](screenshots/DeepResearch.png) 398 | _詳細調査は複雑なトピックに対して多段階の調査を実施_ 399 | 400 | ### デモビデオ 401 | 402 | [![DeepWikiデモビデオ](https://img.youtube.com/vi/zGANs8US8B4/0.jpg)](https://youtu.be/zGANs8US8B4) 403 | 404 | _DeepWiki の動作を見る!_ 405 | 406 | ## ❓ トラブルシューティング 407 | 408 | ### API キーの問題 409 | 410 | - **「環境変数が見つかりません」**: `.env`ファイルがプロジェクトのルートにあり、必要な API キーが含まれていることを確認 411 | - **「API キーが無効です」**: キー全体が余分なスペースなしで正しくコピーされていることを確認 412 | - **「OpenRouter API エラー」**: OpenRouter API キーが有効で、十分なクレジットがあることを確認 413 | 414 | ### 接続の問題 415 | 416 | - **「API サーバーに接続できません」**: API サーバーがポート 8001 で実行されていることを確認 417 | - **「CORS エラー」**: API はすべてのオリジンを許可するように設定されていますが、問題がある場合は、フロントエンドとバックエンドを同じマシンで実行してみてください 418 | 419 | ### 生成の問題 420 | 421 | - **「Wiki の生成中にエラーが発生しました」**: 非常に大きなリポジトリの場合は、まず小さいものから試してみてください 422 | - **「無効なリポジトリ形式」**: 有効な GitHub、GitLab、または Bitbucket URL の形式を使用していることを確認 423 | - **「リポジトリ構造を取得できませんでした」**: プライベートリポジトリの場合、適切な権限を持つ有効な個人アクセストークンを入力したことを確認 424 | - **「図のレンダリングエラー」**: アプリは自動的に壊れた図を修正しようとします 425 | 426 | ### 一般的な解決策 427 | 428 | 1. **両方のサーバーを再起動**: 単純な再起動でほとんどの問題が解決することがあります 429 | 2. **コンソールログを確認**: ブラウザの開発者ツールを開いて JavaScript エラーを確認 430 | 3. **API ログを確認**: API が実行されているターミナルで Python エラーを確認 431 | 432 | ## 🤝 貢献 433 | 434 | 貢献は歓迎します!以下のことを自由に行ってください: 435 | 436 | - バグや機能リクエストの問題を開く 437 | - コードを改善するためのプルリクエストを提出 438 | - フィードバックやアイデアを共有 439 | 440 | ## 📄 ライセンス 441 | 442 | このプロジェクトは MIT ライセンスの下でライセンスされています - 詳細は[LICENSE](LICENSE)ファイルを参照してください。 443 | 444 | ## ⭐ スター履歴 445 | 446 | [![スター履歴チャート](https://api.star-history.com/svg?repos=AsyncFuncAI/deepwiki-open&type=Date)](https://star-history.com/#AsyncFuncAI/deepwiki-open&Date) 447 | -------------------------------------------------------------------------------- /README.kr.md: -------------------------------------------------------------------------------- 1 | # DeepWiki-Open 2 | 3 | ![DeepWiki Banner](screenshots/Deepwiki.png) 4 | 5 | **DeepWiki**는 제가 직접 구현한 프로젝트로, GitHub, GitLab 또는 BitBucket 저장소에 대해 아름답고 대화형 위키를 자동 생성합니다! 저장소 이름만 입력하면 DeepWiki가 다음을 수행합니다: 6 | 7 | 1. 코드 구조 분석 8 | 2. 포괄적인 문서 생성 9 | 3. 모든 작동 방식을 설명하는 시각적 다이어그램 생성 10 | 4. 이를 쉽게 탐색할 수 있는 위키로 정리 11 | 12 | [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://buymeacoffee.com/sheing) 13 | 14 | [![Twitter/X](https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/sashimikun_void) 15 | [![Discord](https://img.shields.io/badge/Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.com/invite/VQMBGR8u5v) 16 | 17 | [English](./README.md) | [简体中文](./README.zh.md) | [日本語](./README.ja.md) | [Español](./README.es.md) | [한국어](./README.kr.md) | [Tiếng Việt](./README.vi.md) 18 | 19 | ## ✨ 주요 기능 20 | 21 | - **즉시 문서화**: 어떤 GitHub, GitLab 또는 BitBucket 저장소든 몇 초 만에 위키로 변환 22 | - **비공개 저장소 지원**: 개인 액세스 토큰으로 비공개 저장소 안전하게 접근 23 | - **스마트 분석**: AI 기반 코드 구조 및 관계 이해 24 | - **아름다운 다이어그램**: 아키텍처와 데이터 흐름을 시각화하는 자동 Mermaid 다이어그램 25 | - **쉬운 탐색**: 간단하고 직관적인 인터페이스로 위키 탐색 가능 26 | - **Ask 기능**: RAG 기반 AI와 저장소에 대해 대화하며 정확한 답변 얻기 27 | - **DeepResearch**: 복잡한 주제를 철저히 조사하는 다중 턴 연구 프로세스 28 | - **다양한 모델 제공자 지원**: Google Gemini, OpenAI, OpenRouter, 로컬 Ollama 모델 지원 29 | 30 | ## 🚀 빠른 시작 (초간단!) 31 | 32 | ### 옵션 1: Docker 사용 33 | 34 | ```bash 35 | # 저장소 클론 36 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 37 | cd deepwiki-open 38 | 39 | # API 키를 포함한 .env 파일 생성 40 | echo "GOOGLE_API_KEY=your_google_api_key" > .env 41 | echo "OPENAI_API_KEY=your_openai_api_key" >> .env 42 | # 선택 사항: OpenRouter 모델 사용 시 API 키 추가 43 | echo "OPENROUTER_API_KEY=your_openrouter_api_key" >> .env 44 | 45 | # Docker Compose로 실행 46 | docker-compose up 47 | ``` 48 | 49 | > 💡 **API 키는 어디서 얻나요:** 50 | > - [Google AI Studio](https://makersuite.google.com/app/apikey)에서 Google API 키 받기 51 | > - [OpenAI 플랫폼](https://platform.openai.com/api-keys)에서 OpenAI API 키 받기 52 | 53 | ### 옵션 2: 수동 설정 (권장) 54 | 55 | #### 1단계: API 키 설정 56 | 57 | 프로젝트 루트에 `.env` 파일을 만들고 다음 키들을 추가하세요: 58 | 59 | ``` 60 | GOOGLE_API_KEY=your_google_api_key 61 | OPENAI_API_KEY=your_openai_api_key 62 | # 선택 사항: OpenRouter 모델 사용 시 추가 63 | OPENROUTER_API_KEY=your_openrouter_api_key 64 | ``` 65 | 66 | #### 2단계: 백엔드 시작 67 | 68 | ```bash 69 | # Python 의존성 설치 70 | pip install -r api/requirements.txt 71 | 72 | # API 서버 실행 73 | python -m api.main 74 | ``` 75 | 76 | #### 3단계: 프론트엔드 시작 77 | 78 | ```bash 79 | # JavaScript 의존성 설치 80 | npm install 81 | # 또는 82 | yarn install 83 | 84 | # 웹 앱 실행 85 | npm run dev 86 | # 또는 87 | yarn dev 88 | ``` 89 | 90 | #### 4단계: DeepWiki 사용하기! 91 | 92 | 1. 브라우저에서 [http://localhost:3000](http://localhost:3000) 열기 93 | 2. GitHub, GitLab 또는 Bitbucket 저장소 입력 (예: `https://github.com/openai/codex`, `https://github.com/microsoft/autogen`, `https://gitlab.com/gitlab-org/gitlab`, `https://bitbucket.org/redradish/atlassian_app_versions`) 94 | 3. 비공개 저장소인 경우 "+ 액세스 토큰 추가" 클릭 후 GitHub 또는 GitLab 개인 액세스 토큰 입력 95 | 4. "Generate Wiki" 클릭 후 마법을 지켜보기! 96 | 97 | ## 🔍 작동 방식 98 | 99 | DeepWiki는 AI를 사용하여 다음을 수행합니다: 100 | 101 | 1. GitHub, GitLab 또는 Bitbucket 저장소 복제 및 분석 (토큰 인증이 필요한 비공개 저장소 포함) 102 | 2. 스마트 검색을 위한 코드 임베딩 생성 103 | 3. 문맥 인지 AI로 문서 생성 (Google Gemini, OpenAI, OpenRouter 또는 로컬 Ollama 모델 사용) 104 | 4. 코드 관계를 설명하는 시각적 다이어그램 생성 105 | 5. 모든 것을 구조화된 위키로 정리 106 | 6. Ask 기능을 통한 저장소와의 지능형 Q&A 지원 107 | 7. DeepResearch로 심층 연구 기능 제공 108 | 109 | ```mermaid 110 | graph TD 111 | A[사용자가 GitHub/GitLab/Bitbucket 저장소 입력] --> AA{비공개 저장소인가?} 112 | AA -->|예| AB[액세스 토큰 추가] 113 | AA -->|아니오| B[저장소 복제] 114 | AB --> B 115 | B --> C[코드 구조 분석] 116 | C --> D[코드 임베딩 생성] 117 | 118 | D --> M{모델 제공자 선택} 119 | M -->|Google Gemini| E1[Gemini로 생성] 120 | M -->|OpenAI| E2[OpenAI로 생성] 121 | M -->|OpenRouter| E3[OpenRouter로 생성] 122 | M -->|로컬 Ollama| E4[Ollama로 생성] 123 | 124 | E1 --> E[문서 생성] 125 | E2 --> E 126 | E3 --> E 127 | E4 --> E 128 | 129 | D --> F[시각적 다이어그램 생성] 130 | E --> G[위키로 정리] 131 | F --> G 132 | G --> H[대화형 DeepWiki] 133 | 134 | classDef process stroke-width:2px; 135 | classDef data stroke-width:2px; 136 | classDef result stroke-width:2px; 137 | classDef decision stroke-width:2px; 138 | 139 | class A,D data; 140 | class AA,M decision; 141 | class B,C,E,F,G,AB,E1,E2,E3,E4 process; 142 | class H result; 143 | ``` 144 | 145 | ## 🛠️ 프로젝트 구조 146 | 147 | ``` 148 | deepwiki/ 149 | ├── api/ # 백엔드 API 서버 150 | │ ├── main.py # API 진입점 151 | │ ├── api.py # FastAPI 구현 152 | │ ├── rag.py # Retrieval Augmented Generation 153 | │ ├── data_pipeline.py # 데이터 처리 유틸리티 154 | │ └── requirements.txt # Python 의존성 155 | │ 156 | ├── src/ # 프론트엔드 Next.js 앱 157 | │ ├── app/ # Next.js 앱 디렉토리 158 | │ │ └── page.tsx # 메인 애플리케이션 페이지 159 | │ └── components/ # React 컴포넌트 160 | │ └── Mermaid.tsx # Mermaid 다이어그램 렌더러 161 | │ 162 | ├── public/ # 정적 자산 163 | ├── package.json # JavaScript 의존성 164 | └── .env # 환경 변수 (직접 생성) 165 | ``` 166 | 167 | ## 🛠️ 고급 설정 168 | 169 | ### 환경 변수 170 | 171 | | 변수명 | 설명 | 필수 | 비고 | 172 | |----------|-------------|----------|------| 173 | | `GOOGLE_API_KEY` | AI 생성용 Google Gemini API 키 | 예 | 174 | | `OPENAI_API_KEY` | 임베딩용 OpenAI API 키 | 예 | 175 | | `OPENROUTER_API_KEY` | 대체 모델용 OpenRouter API 키 | 아니오 | OpenRouter 모델 사용 시 필요 | 176 | | `PORT` | API 서버 포트 (기본값: 8001) | 아니오 | API와 프론트엔드를 같은 머신에서 호스팅 시 `SERVER_BASE_URL`의 포트도 변경 필요 | 177 | | `SERVER_BASE_URL` | API 서버 기본 URL (기본값: http://localhost:8001) | 아니오 | 178 | 179 | ### 설정 파일 180 | 181 | DeepWiki는 시스템의 다양한 측면을 관리하기 위해 JSON 설정 파일을 사용합니다: 182 | 183 | 1. **`generator.json`**: 텍스트 생성 모델 설정 184 | - 사용 가능한 모델 제공자(Google, OpenAI, OpenRouter, Ollama) 정의 185 | - 각 제공자의 기본 및 사용 가능한 모델 지정 186 | - temperature와 top_p 같은 모델별 매개변수 포함 187 | 188 | 2. **`embedder.json`**: 임베딩 모델 및 텍스트 처리 설정 189 | - 벡터 저장소용 임베딩 모델 정의 190 | - RAG를 위한 검색기 설정 포함 191 | - 문서 청킹을 위한 텍스트 분할기 설정 지정 192 | 193 | 3. **`repo.json`**: 저장소 처리 설정 194 | - 특정 파일 및 디렉토리를 제외하는 파일 필터 포함 195 | - 저장소 크기 제한 및 처리 규칙 정의 196 | 197 | 기본적으로 이러한 파일은 `api/config/` 디렉토리에 위치합니다. `DEEPWIKI_CONFIG_DIR` 환경 변수를 사용하여 위치를 사용자 정의할 수 있습니다. 198 | 199 | ### Docker 설정 200 | 201 | Docker를 사용하여 DeepWiki를 실행할 수 있습니다: 202 | 203 | ```bash 204 | # GitHub 컨테이너 레지스트리에서 이미지 가져오기 205 | docker pull ghcr.io/asyncfuncai/deepwiki-open:latest 206 | 207 | # 환경 변수와 함께 컨테이너 실행 208 | docker run -p 8001:8001 -p 3000:3000 \ 209 | -e GOOGLE_API_KEY=your_google_api_key \ 210 | -e OPENAI_API_KEY=your_openai_api_key \ 211 | -e OPENROUTER_API_KEY=your_openrouter_api_key \ 212 | -v ~/.adalflow:/root/.adalflow \ 213 | ghcr.io/asyncfuncai/deepwiki-open:latest 214 | ``` 215 | 216 | 이 명령어는 또한 호스트의 `~/.adalflow`를 컨테이너의 `/root/.adalflow`에 마운트합니다. 이 경로는 다음을 저장하는 데 사용됩니다: 217 | - 복제된 저장소 (`~/.adalflow/repos/`) 218 | - 해당 저장소의 임베딩 및 인덱스 (`~/.adalflow/databases/`) 219 | - 생성된 위키의 캐시 (`~/.adalflow/wikicache/`) 220 | 221 | 이를 통해 컨테이너가 중지되거나 제거되어도 데이터가 유지됩니다. 222 | 223 | 또는 제공된 `docker-compose.yml` 파일을 사용하세요: 224 | 225 | ```bash 226 | # API 키가 포함된 .env 파일을 먼저 편집 227 | docker-compose up 228 | ``` 229 | 230 | (`docker-compose.yml` 파일은 위의 `docker run` 명령어와 유사하게 데이터 지속성을 위해 `~/.adalflow`를 마운트하도록 미리 구성되어 있습니다.) 231 | 232 | #### Docker에서 .env 파일 사용하기 233 | 234 | .env 파일을 컨테이너에 마운트할 수도 있습니다: 235 | 236 | ```bash 237 | # API 키가 포함된 .env 파일 생성 238 | echo "GOOGLE_API_KEY=your_google_api_key" > .env 239 | echo "OPENAI_API_KEY=your_openai_api_key" >> .env 240 | echo "OPENROUTER_API_KEY=your_openrouter_api_key" >> .env 241 | 242 | # .env 파일을 마운트하여 컨테이너 실행 243 | docker run -p 8001:8001 -p 3000:3000 \ 244 | -v $(pwd)/.env:/app/.env \ 245 | -v ~/.adalflow:/root/.adalflow \ 246 | ghcr.io/asyncfuncai/deepwiki-open:latest 247 | ``` 248 | 249 | 이 명령어는 또한 호스트의 `~/.adalflow`를 컨테이너의 `/root/.adalflow`에 마운트합니다. 이 경로는 다음을 저장하는 데 사용됩니다: 250 | - 복제된 저장소 (`~/.adalflow/repos/`) 251 | - 해당 저장소의 임베딩 및 인덱스 (`~/.adalflow/databases/`) 252 | - 생성된 위키의 캐시 (`~/.adalflow/wikicache/`) 253 | 254 | 이를 통해 컨테이너가 중지되거나 제거되어도 데이터가 유지됩니다. 255 | 256 | #### 로컬에서 Docker 이미지 빌드하기 257 | 258 | 로컬에서 Docker 이미지를 빌드하려면: 259 | 260 | ```bash 261 | # 저장소 클론 262 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 263 | cd deepwiki-open 264 | 265 | # Docker 이미지 빌드 266 | docker build -t deepwiki-open . 267 | 268 | # 컨테이너 실행 269 | docker run -p 8001:8001 -p 3000:3000 \ 270 | -e GOOGLE_API_KEY=your_google_api_key \ 271 | -e OPENAI_API_KEY=your_openai_api_key \ 272 | -e OPENROUTER_API_KEY=your_openrouter_api_key \ 273 | deepwiki-open 274 | ``` 275 | 276 | ### API 서버 상세 정보 277 | 278 | API 서버는 다음을 제공합니다: 279 | - 저장소 복제 및 인덱싱 280 | - RAG (Retrieval Augmented Generation) 281 | - 스트리밍 채팅 완성 282 | 283 | 자세한 내용은 [API README](./api/README.md)를 참조하세요. 284 | 285 | ## 🤖 제공자 기반 모델 선택 시스템 286 | 287 | DeepWiki는 이제 여러 LLM 제공자를 지원하는 유연한 제공자 기반 모델 선택 시스템을 구현했습니다: 288 | 289 | ### 지원되는 제공자 및 모델 290 | 291 | - **Google**: 기본값 `gemini-2.0-flash`, 또한 `gemini-1.5-flash`, `gemini-1.0-pro` 등도 지원 292 | - **OpenAI**: 기본값 `gpt-4o`, 또한 `o4-mini` 등도 지원 293 | - **OpenRouter**: Claude, Llama, Mistral 등 통합 API를 통해 다양한 모델 접근 가능 294 | - **Ollama**: `llama3`와 같은 로컬에서 실행되는 오픈소스 모델 지원 295 | 296 | ### 환경 변수 297 | 298 | 각 제공자는 해당 API 키 환경 변수가 필요합니다: 299 | 300 | ``` 301 | # API 키 302 | GOOGLE_API_KEY=귀하의_구글_API_키 # Google Gemini 모델에 필요 303 | OPENAI_API_KEY=귀하의_OpenAI_키 # OpenAI 모델에 필요 304 | OPENROUTER_API_KEY=귀하의_OpenRouter_키 # OpenRouter 모델에 필요 305 | 306 | # OpenAI API 기본 URL 구성 307 | OPENAI_BASE_URL=https://사용자정의_API_엔드포인트.com/v1 # 선택 사항, 사용자 정의 OpenAI API 엔드포인트용 308 | ``` 309 | 310 | ### 서비스 제공자를 위한 사용자 정의 모델 선택 311 | 312 | 사용자 정의 모델 선택 기능은 다음이 필요한 서비스 제공자를 위해 특별히 설계되었습니다: 313 | 314 | - 귀하는 조직 내 사용자에게 다양한 AI 모델 선택 옵션을 제공할 수 있습니다 315 | - 귀하는 코드 변경 없이 빠르게 진화하는 LLM 환경에 신속하게 적응할 수 있습니다 316 | - 귀하는 사전 정의된 목록에 없는 특수하거나 미세 조정된 모델을 지원할 수 있습니다 317 | 318 | 서비스 제공자는 사전 정의된 옵션에서 선택하거나 프론트엔드 인터페이스에서 사용자 정의 모델 식별자를 입력하여 모델 제공을 구현할 수 있습니다. 319 | 320 | ### 기업 전용 채널을 위한 기본 URL 구성 321 | 322 | OpenAI 클라이언트의 base_url 구성은 주로 비공개 API 채널이 있는 기업 사용자를 위해 설계되었습니다. 이 기능은: 323 | 324 | - 비공개 또는 기업 전용 API 엔드포인트 연결 가능 325 | - 조직이 자체 호스팅되거나 사용자 정의 배포된 LLM 서비스 사용 가능 326 | - 서드파티 OpenAI API 호환 서비스와의 통합 지원 327 | 328 | **출시 예정**: 향후 업데이트에서 DeepWiki는 사용자가 요청에서 자신의 API 키를 제공해야 하는 모드를 지원할 예정입니다. 이를 통해 비공개 채널이 있는 기업 고객은 DeepWiki 배포와 자격 증명을 공유하지 않고도 기존 API 구성을 사용할 수 있습니다. 329 | 330 | ## 🔌 OpenRouter 통합 331 | 332 | DeepWiki는 이제 [OpenRouter](https://openrouter.ai/)를 모델 제공자로 지원하여, 단일 API를 통해 수백 개의 AI 모델에 접근할 수 있습니다: 333 | 334 | - **다양한 모델 옵션**: OpenAI, Anthropic, Google, Meta, Mistral 등 다양한 모델 이용 가능 335 | - **간편한 설정**: OpenRouter API 키만 추가하고 원하는 모델 선택 336 | - **비용 효율성**: 예산과 성능에 맞는 모델 선택 가능 337 | - **손쉬운 전환**: 코드 변경 없이 다양한 모델 간 전환 가능 338 | 339 | ### DeepWiki에서 OpenRouter 사용법 340 | 341 | 1. **API 키 받기**: [OpenRouter](https://openrouter.ai/) 가입 후 API 키 획득 342 | 2. **환경 변수 추가**: `.env` 파일에 `OPENROUTER_API_KEY=your_key` 추가 343 | 3. **UI에서 활성화**: 홈페이지에서 "Use OpenRouter API" 옵션 체크 344 | 4. **모델 선택**: GPT-4o, Claude 3.5 Sonnet, Gemini 2.0 등 인기 모델 선택 345 | 346 | OpenRouter는 특히 다음과 같은 경우 유용합니다: 347 | - 여러 서비스에 가입하지 않고 다양한 모델 시도 348 | - 지역 제한이 있는 모델 접근 349 | - 모델 제공자별 성능 비교 350 | - 비용과 성능 최적화 351 | 352 | ## 🤖 Ask 및 DeepResearch 기능 353 | 354 | ### Ask 기능 355 | 356 | Ask 기능은 Retrieval Augmented Generation (RAG)을 사용해 저장소와 대화할 수 있습니다: 357 | 358 | - **문맥 인지 답변**: 저장소 내 실제 코드 기반으로 정확한 답변 제공 359 | - **RAG 기반**: 관련 코드 조각을 검색해 근거 있는 답변 생성 360 | - **실시간 스트리밍**: 답변 생성 과정을 실시간으로 확인 가능 361 | - **대화 기록 유지**: 질문 간 문맥을 유지해 더 일관된 대화 가능 362 | 363 | ### DeepResearch 기능 364 | 365 | DeepResearch는 다중 턴 연구 프로세스를 통해 저장소 분석을 한층 심화합니다: 366 | 367 | - **심층 조사**: 여러 연구 반복을 통해 복잡한 주제 철저히 탐구 368 | - **구조화된 프로세스**: 연구 계획, 업데이트, 최종 결론 단계로 진행 369 | - **자동 연속 진행**: AI가 최대 5회 반복해 연구를 계속 진행 370 | - **연구 단계**: 371 | 1. **연구 계획**: 접근법과 초기 발견 사항 개요 작성 372 | 2. **연구 업데이트**: 이전 반복 내용을 바탕으로 새로운 통찰 추가 373 | 3. **최종 결론**: 모든 반복을 종합한 포괄적 답변 제공 374 | 375 | DeepResearch를 사용하려면 질문 제출 전 Ask 인터페이스에서 "Deep Research" 스위치를 켜세요. 376 | 377 | ## 📱 스크린샷 378 | 379 | ![DeepWiki Main Interface](screenshots/Interface.png) 380 | *DeepWiki의 메인 인터페이스* 381 | 382 | ![Private Repository Support](screenshots/privaterepo.png) 383 | *개인 액세스 토큰으로 비공개 저장소 접근* 384 | 385 | ![DeepResearch Feature](screenshots/DeepResearch.png) 386 | *DeepResearch는 복잡한 주제에 대해 다중 턴 조사를 수행* 387 | 388 | ### 데모 영상 389 | 390 | [![DeepWiki Demo Video](https://img.youtube.com/vi/zGANs8US8B4/0.jpg)](https://youtu.be/zGANs8US8B4) 391 | 392 | *DeepWiki 작동 영상 보기!* 393 | 394 | ## ❓ 문제 해결 395 | 396 | ### API 키 문제 397 | - **"환경 변수 누락"**: `.env` 파일이 프로젝트 루트에 있고 필요한 API 키가 포함되어 있는지 확인 398 | - **"API 키가 유효하지 않음"**: 키를 정확히 복사했는지, 공백이 없는지 확인 399 | - **"OpenRouter API 오류"**: OpenRouter API 키가 유효하고 충분한 크레딧이 있는지 확인 400 | 401 | ### 연결 문제 402 | - **"API 서버에 연결할 수 없음"**: API 서버가 포트 8001에서 실행 중인지 확인 403 | - **"CORS 오류"**: API가 모든 출처를 허용하도록 설정되어 있지만 문제가 있으면 프론트엔드와 백엔드를 같은 머신에서 실행해 보세요 404 | 405 | ### 생성 문제 406 | - **"위키 생성 오류"**: 아주 큰 저장소는 먼저 작은 저장소로 시도해 보세요 407 | - **"잘못된 저장소 형식"**: 유효한 GitHub, GitLab 또는 Bitbucket URL 형식인지 확인 408 | - **"저장소 구조를 가져올 수 없음"**: 비공개 저장소라면 적절한 권한의 개인 액세스 토큰을 입력했는지 확인 409 | - **"다이어그램 렌더링 오류"**: 앱이 자동으로 다이어그램 오류를 수정하려 시도합니다 410 | 411 | ### 일반적인 해결법 412 | 1. **서버 둘 다 재시작**: 간단한 재시작으로 대부분 문제 해결 413 | 2. **콘솔 로그 확인**: 브라우저 개발자 도구에서 자바스크립트 오류 확인 414 | 3. **API 로그 확인**: API 실행 터미널에서 Python 오류 확인 415 | 416 | ## 🤝 기여 417 | 418 | 기여를 환영합니다! 다음을 자유롭게 해주세요: 419 | - 버그나 기능 요청을 위한 이슈 열기 420 | - 코드 개선을 위한 풀 리퀘스트 제출 421 | - 피드백과 아이디어 공유 422 | 423 | ## 📄 라이선스 424 | 425 | 이 프로젝트는 MIT 라이선스 하에 있습니다 - 자세한 내용은 [LICENSE](LICENSE) 파일 참고. 426 | 427 | ## ⭐ 스타 히스토리 428 | 429 | [![Star History Chart](https://api.star-history.com/svg?repos=AsyncFuncAI/deepwiki-open&type=Date)](https://star-history.com/#AsyncFuncAI/deepwiki-open&Date) 430 | -------------------------------------------------------------------------------- /README.zh.md: -------------------------------------------------------------------------------- 1 | # DeepWiki-Open 2 | 3 | ![DeepWiki 横幅](screenshots/Deepwiki.png) 4 | 5 | **DeepWiki**可以为任何GitHub、GitLab或BitBucket代码仓库自动创建美观、交互式的Wiki!只需输入仓库名称,DeepWiki将: 6 | 7 | 1. 分析代码结构 8 | 2. 生成全面的文档 9 | 3. 创建可视化图表解释一切如何运作 10 | 4. 将所有内容整理成易于导航的Wiki 11 | 12 | [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://buymeacoffee.com/sheing) 13 | 14 | [![Twitter/X](https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white)](https://x.com/sashimikun_void) 15 | [![Discord](https://img.shields.io/badge/Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.com/invite/VQMBGR8u5v) 16 | 17 | [English](./README.md) | [简体中文](./README.zh.md) | [日本語](./README.ja.md) | [Español](./README.es.md) | [한국어](./README.kr.md) | [Tiếng Việt](./README.vi.md) 18 | 19 | ## ✨ 特点 20 | 21 | - **即时文档**:几秒钟内将任何GitHub、GitLab或BitBucket仓库转换为Wiki 22 | - **私有仓库支持**:使用个人访问令牌安全访问私有仓库 23 | - **智能分析**:AI驱动的代码结构和关系理解 24 | - **精美图表**:自动生成Mermaid图表可视化架构和数据流 25 | - **简易导航**:简单、直观的界面探索Wiki 26 | - **提问功能**:使用RAG驱动的AI与您的仓库聊天,获取准确答案 27 | - **深度研究**:多轮研究过程,彻底调查复杂主题 28 | - **多模型提供商**:支持Google Gemini、OpenAI、OpenRouter和本地Ollama模型 29 | 30 | ## 🚀 快速开始(超级简单!) 31 | 32 | ### 选项1:使用Docker 33 | 34 | ```bash 35 | # 克隆仓库 36 | git clone https://github.com/AsyncFuncAI/deepwiki-open.git 37 | cd deepwiki-open 38 | 39 | # 创建包含API密钥的.env文件 40 | echo "GOOGLE_API_KEY=your_google_api_key" > .env 41 | echo "OPENAI_API_KEY=your_openai_api_key" >> .env 42 | # 可选:如果您想使用OpenRouter模型,添加OpenRouter API密钥 43 | echo "OPENROUTER_API_KEY=your_openrouter_api_key" >> .env 44 | 45 | # 使用Docker Compose运行 46 | docker-compose up 47 | ``` 48 | 49 | (上述 Docker 命令以及 `docker-compose.yml` 配置会挂载您主机上的 `~/.adalflow` 目录到容器内的 `/root/.adalflow`。此路径用于存储: 50 | - 克隆的仓库 (`~/.adalflow/repos/`) 51 | - 仓库的嵌入和索引 (`~/.adalflow/databases/`) 52 | - 缓存的已生成 Wiki 内容 (`~/.adalflow/wikicache/`) 53 | 54 | 这确保了即使容器停止或移除,您的数据也能持久保存。) 55 | 56 | > 💡 **获取这些密钥的地方:** 57 | > - 从[Google AI Studio](https://makersuite.google.com/app/apikey)获取Google API密钥 58 | > - 从[OpenAI Platform](https://platform.openai.com/api-keys)获取OpenAI API密钥 59 | 60 | ### 选项2:手动设置(推荐) 61 | 62 | #### 步骤1:设置API密钥 63 | 64 | 在项目根目录创建一个`.env`文件,包含以下密钥: 65 | 66 | ``` 67 | GOOGLE_API_KEY=your_google_api_key 68 | OPENAI_API_KEY=your_openai_api_key 69 | # 可选:如果您想使用OpenRouter模型,添加此项 70 | OPENROUTER_API_KEY=your_openrouter_api_key 71 | ``` 72 | 73 | #### 步骤2:启动后端 74 | 75 | ```bash 76 | # 安装Python依赖 77 | pip install -r api/requirements.txt 78 | 79 | # 启动API服务器 80 | python -m api.main 81 | ``` 82 | 83 | #### 步骤3:启动前端 84 | 85 | ```bash 86 | # 安装JavaScript依赖 87 | npm install 88 | # 或 89 | yarn install 90 | 91 | # 启动Web应用 92 | npm run dev 93 | # 或 94 | yarn dev 95 | ``` 96 | 97 | #### 步骤4:使用DeepWiki! 98 | 99 | 1. 在浏览器中打开[http://localhost:3000](http://localhost:3000) 100 | 2. 输入GitHub、GitLab或Bitbucket仓库(如`https://github.com/openai/codex`、`https://github.com/microsoft/autogen`、`https://gitlab.com/gitlab-org/gitlab`或`https://bitbucket.org/redradish/atlassian_app_versions`) 101 | 3. 对于私有仓库,点击"+ 添加访问令牌"并输入您的GitHub或GitLab个人访问令牌 102 | 4. 点击"生成Wiki",见证奇迹的发生! 103 | 104 | ## 🔍 工作原理 105 | 106 | DeepWiki使用AI来: 107 | 108 | 1. 克隆并分析GitHub、GitLab或Bitbucket仓库(包括使用令牌认证的私有仓库) 109 | 2. 创建代码嵌入用于智能检索 110 | 3. 使用上下文感知AI生成文档(使用Google Gemini、OpenAI、OpenRouter或本地Ollama模型) 111 | 4. 创建可视化图表解释代码关系 112 | 5. 将所有内容组织成结构化Wiki 113 | 6. 通过提问功能实现与仓库的智能问答 114 | 7. 通过深度研究功能提供深入研究能力 115 | 116 | ```mermaid 117 | graph TD 118 | A[用户输入GitHub/GitLab/Bitbucket仓库] --> AA{私有仓库?} 119 | AA -->|是| AB[添加访问令牌] 120 | AA -->|否| B[克隆仓库] 121 | AB --> B 122 | B --> C[分析代码结构] 123 | C --> D[创建代码嵌入] 124 | 125 | D --> M{选择模型提供商} 126 | M -->|Google Gemini| E1[使用Gemini生成] 127 | M -->|OpenAI| E2[使用OpenAI生成] 128 | M -->|OpenRouter| E3[使用OpenRouter生成] 129 | M -->|本地Ollama| E4[使用Ollama生成] 130 | 131 | E1 --> E[生成文档] 132 | E2 --> E 133 | E3 --> E 134 | E4 --> E 135 | 136 | D --> F[创建可视化图表] 137 | E --> G[组织为Wiki] 138 | F --> G 139 | G --> H[交互式DeepWiki] 140 | 141 | classDef process stroke-width:2px; 142 | classDef data stroke-width:2px; 143 | classDef result stroke-width:2px; 144 | classDef decision stroke-width:2px; 145 | 146 | class A,D data; 147 | class AA,M decision; 148 | class B,C,E,F,G,AB,E1,E2,E3,E4 process; 149 | class H result; 150 | ``` 151 | 152 | ## 🛠️ 项目结构 153 | 154 | ``` 155 | deepwiki/ 156 | ├── api/ # 后端API服务器 157 | │ ├── main.py # API入口点 158 | │ ├── api.py # FastAPI实现 159 | │ ├── rag.py # 检索增强生成 160 | │ ├── data_pipeline.py # 数据处理工具 161 | │ └── requirements.txt # Python依赖 162 | │ 163 | ├── src/ # 前端Next.js应用 164 | │ ├── app/ # Next.js应用目录 165 | │ │ └── page.tsx # 主应用页面 166 | │ └── components/ # React组件 167 | │ └── Mermaid.tsx # Mermaid图表渲染器 168 | │ 169 | ├── public/ # 静态资源 170 | ├── package.json # JavaScript依赖 171 | └── .env # 环境变量(需要创建) 172 | ``` 173 | 174 | ## 🤖 提问和深度研究功能 175 | 176 | ### 提问功能 177 | 178 | 提问功能允许您使用检索增强生成(RAG)与您的仓库聊天: 179 | 180 | - **上下文感知响应**:基于仓库中实际代码获取准确答案 181 | - **RAG驱动**:系统检索相关代码片段,提供有根据的响应 182 | - **实时流式传输**:实时查看生成的响应,获得更交互式的体验 183 | - **对话历史**:系统在问题之间保持上下文,实现更连贯的交互 184 | 185 | ### 深度研究功能 186 | 187 | 深度研究通过多轮研究过程将仓库分析提升到新水平: 188 | 189 | - **深入调查**:通过多次研究迭代彻底探索复杂主题 190 | - **结构化过程**:遵循清晰的研究计划,包含更新和全面结论 191 | - **自动继续**:AI自动继续研究直到达成结论(最多5次迭代) 192 | - **研究阶段**: 193 | 1. **研究计划**:概述方法和初步发现 194 | 2. **研究更新**:在前一轮迭代基础上增加新见解 195 | 3. **最终结论**:基于所有迭代提供全面答案 196 | 197 | 要使用深度研究,只需在提交问题前在提问界面中切换"深度研究"开关。 198 | 199 | ## 📱 截图 200 | 201 | ![DeepWiki主界面](screenshots/Interface.png) 202 | *DeepWiki的主界面* 203 | 204 | ![私有仓库支持](screenshots/privaterepo.png) 205 | *使用个人访问令牌访问私有仓库* 206 | 207 | ![深度研究功能](screenshots/DeepResearch.png) 208 | *深度研究为复杂主题进行多轮调查* 209 | 210 | ### 演示视频 211 | 212 | [![DeepWiki演示视频](https://img.youtube.com/vi/zGANs8US8B4/0.jpg)](https://youtu.be/zGANs8US8B4) 213 | 214 | *观看DeepWiki实际操作!* 215 | 216 | ## ❓ 故障排除 217 | 218 | ### API密钥问题 219 | - **"缺少环境变量"**:确保您的`.env`文件位于项目根目录并包含所需的API密钥 220 | - **"API密钥无效"**:检查您是否正确复制了完整密钥,没有多余空格 221 | - **"OpenRouter API错误"**:验证您的OpenRouter API密钥有效且有足够的额度 222 | 223 | ### 连接问题 224 | - **"无法连接到API服务器"**:确保API服务器在端口8001上运行 225 | - **"CORS错误"**:API配置为允许所有来源,但如果您遇到问题,请尝试在同一台机器上运行前端和后端 226 | 227 | ### 生成问题 228 | - **"生成Wiki时出错"**:对于非常大的仓库,请先尝试较小的仓库 229 | - **"无效的仓库格式"**:确保您使用有效的GitHub、GitLab或Bitbucket URL格式 230 | - **"无法获取仓库结构"**:对于私有仓库,确保您输入了具有适当权限的有效个人访问令牌 231 | - **"图表渲染错误"**:应用程序将自动尝试修复损坏的图表 232 | 233 | ### 常见解决方案 234 | 1. **重启两个服务器**:有时简单的重启可以解决大多数问题 235 | 2. **检查控制台日志**:打开浏览器开发者工具查看任何JavaScript错误 236 | 3. **检查API日志**:查看运行API的终端中的Python错误 237 | 238 | ## 🤝 贡献 239 | 240 | 欢迎贡献!随时: 241 | - 为bug或功能请求开issue 242 | - 提交pull request改进代码 243 | - 分享您的反馈和想法 244 | 245 | ## 📄 许可证 246 | 247 | 本项目根据MIT许可证授权 - 详情请参阅[LICENSE](LICENSE)文件。 248 | 249 | ## ⭐ 星标历史 250 | 251 | [![星标历史图表](https://api.star-history.com/svg?repos=AsyncFuncAI/deepwiki-open&type=Date)](https://star-history.com/#AsyncFuncAI/deepwiki-open&Date) 252 | 253 | ## 🤖 基于提供者的模型选择系统 254 | 255 | DeepWiki 现在实现了灵活的基于提供者的模型选择系统,支持多种 LLM 提供商: 256 | 257 | ### 支持的提供商和模型 258 | 259 | - **Google**: 默认使用 `gemini-2.0-flash`,还支持 `gemini-1.5-flash`、`gemini-1.0-pro` 等 260 | - **OpenAI**: 默认使用 `gpt-4o`,还支持 `o4-mini` 等 261 | - **OpenRouter**: 通过统一 API 访问多种模型,包括 Claude、Llama、Mistral 等 262 | - **Ollama**: 支持本地运行的开源模型,如 `llama3` 263 | 264 | ### 环境变量 265 | 266 | 每个提供商需要相应的 API 密钥环境变量: 267 | 268 | ``` 269 | # API 密钥 270 | GOOGLE_API_KEY=你的谷歌API密钥 # 使用 Google Gemini 模型必需 271 | OPENAI_API_KEY=你的OpenAI密钥 # 使用 OpenAI 模型必需 272 | OPENROUTER_API_KEY=你的OpenRouter密钥 # 使用 OpenRouter 模型必需 273 | 274 | # OpenAI API 基础 URL 配置 275 | OPENAI_BASE_URL=https://自定义API端点.com/v1 # 可选,用于自定义 OpenAI API 端点 276 | ``` 277 | 278 | ### 为服务提供者设计的自定义模型选择 279 | 280 | 自定义模型选择功能专为需要以下功能的服务提供者设计: 281 | 282 | - 您可在您的组织内部为用户提供多种 AI 模型选择 283 | - 您无需代码更改即可快速适应快速发展的 LLM 领域 284 | - 您可支持预定义列表中没有的专业或微调模型 285 | 286 | 使用者可以通过从服务提供者预定义选项中选择或在前端界面中输入自定义模型标识符来实现其模型产品。 287 | 288 | ### 为企业私有渠道设计的基础 URL 配置 289 | 290 | OpenAI 客户端的 base_url 配置主要为拥有私有 API 渠道的企业用户设计。此功能: 291 | 292 | - 支持连接到私有或企业特定的 API 端点 293 | - 允许组织使用自己的自托管或自定义部署的 LLM 服务 294 | - 支持与第三方 OpenAI API 兼容服务的集成 295 | 296 | **即将推出**:在未来的更新中,DeepWiki 将支持一种模式,用户需要在请求中提供自己的 API 密钥。这将允许拥有私有渠道的企业客户使用其现有的 API 安排,而不是与 DeepWiki 部署共享凭据。 297 | 298 | ### 环境变量 299 | 300 | 每个提供商需要其相应的API密钥环境变量: 301 | 302 | ``` 303 | # API密钥 304 | GOOGLE_API_KEY=your_google_api_key # Google Gemini模型必需 305 | OPENAI_API_KEY=your_openai_api_key # OpenAI模型必需 306 | OPENROUTER_API_KEY=your_openrouter_api_key # OpenRouter模型必需 307 | 308 | # OpenAI API基础URL配置 309 | OPENAI_BASE_URL=https://custom-api-endpoint.com/v1 # 可选,用于自定义OpenAI API端点 310 | 311 | # 配置目录 312 | DEEPWIKI_CONFIG_DIR=/path/to/custom/config/dir # 可选,用于自定义配置文件位置 313 | ``` 314 | 如果不使用ollama模式,那么需要配置OpenAI API密钥用于embeddings。其他密钥只有配置并使用使用对应提供商的模型时才需要。 315 | 316 | ### 配置文件 317 | 318 | DeepWiki使用JSON配置文件管理系统的各个方面: 319 | 320 | 1. **`generator.json`**:文本生成模型配置 321 | - 定义可用的模型提供商(Google、OpenAI、OpenRouter、Ollama) 322 | - 指定每个提供商的默认和可用模型 323 | - 包含特定模型的参数,如temperature和top_p 324 | 325 | 2. **`embedder.json`**:嵌入模型和文本处理配置 326 | - 定义用于向量存储的嵌入模型 327 | - 包含用于RAG的检索器配置 328 | - 指定文档分块的文本分割器设置 329 | 330 | 3. **`repo.json`**:仓库处理配置 331 | - 包含排除特定文件和目录的文件过滤器 332 | - 定义仓库大小限制和处理规则 333 | 334 | 默认情况下,这些文件位于`api/config/`目录中。您可以使用`DEEPWIKI_CONFIG_DIR`环境变量自定义它们的位置。 335 | 336 | ### 面向服务提供商的自定义模型选择 337 | 338 | 自定义模型选择功能专为需要以下功能的服务提供者设计: 339 | 340 | - 您可在您的组织内部为用户提供多种 AI 模型选择 341 | - 您无需代码更改即可快速适应快速发展的 LLM 领域 342 | - 您可支持预定义列表中没有的专业或微调模型 343 | 344 | 使用者可以通过从服务提供者预定义选项中选择或在前端界面中输入自定义模型标识符来实现其模型产品。 345 | 346 | ### 为企业私有渠道设计的基础 URL 配置 347 | 348 | OpenAI 客户端的 base_url 配置主要为拥有私有 API 渠道的企业用户设计。此功能: 349 | 350 | - 支持连接到私有或企业特定的 API 端点 351 | - 允许组织使用自己的自托管或自定义部署的 LLM 服务 352 | - 支持与第三方 OpenAI API 兼容服务的集成 353 | 354 | **即将推出**:在未来的更新中,DeepWiki 将支持一种模式,用户需要在请求中提供自己的 API 密钥。这将允许拥有私有渠道的企业客户使用其现有的 API 安排,而不是与 DeepWiki 部署共享凭据。 355 | -------------------------------------------------------------------------------- /api/README.md: -------------------------------------------------------------------------------- 1 | # 🚀 DeepWiki API 2 | 3 | This is the backend API for DeepWiki, providing smart code analysis and AI-powered documentation generation. 4 | 5 | ## ✨ Features 6 | 7 | - **Streaming AI Responses**: Real-time responses using Google's Generative AI (Gemini) 8 | - **Smart Code Analysis**: Automatically analyzes GitHub repositories 9 | - **RAG Implementation**: Retrieval Augmented Generation for context-aware responses 10 | - **Local Storage**: All data stored locally - no cloud dependencies 11 | - **Conversation History**: Maintains context across multiple questions 12 | 13 | ## 🔧 Quick Setup 14 | 15 | ### Step 1: Install Dependencies 16 | 17 | ```bash 18 | # From the project root 19 | pip install -r api/requirements.txt 20 | ``` 21 | 22 | ### Step 2: Set Up Environment Variables 23 | 24 | Create a `.env` file in the project root: 25 | 26 | ``` 27 | # Required API Keys 28 | GOOGLE_API_KEY=your_google_api_key # Required for Google Gemini models 29 | OPENAI_API_KEY=your_openai_api_key # Required for embeddings and OpenAI models 30 | 31 | # Optional API Keys 32 | OPENROUTER_API_KEY=your_openrouter_api_key # Required only if using OpenRouter models 33 | 34 | # AWS Bedrock Configuration 35 | AWS_ACCESS_KEY_ID=your_aws_access_key_id # Required for AWS Bedrock models 36 | AWS_SECRET_ACCESS_KEY=your_aws_secret_key # Required for AWS Bedrock models 37 | AWS_REGION=us-east-1 # Optional, defaults to us-east-1 38 | AWS_ROLE_ARN=your_aws_role_arn # Optional, for role-based authentication 39 | 40 | # OpenAI API Configuration 41 | OPENAI_BASE_URL=https://custom-api-endpoint.com/v1 # Optional, for custom OpenAI API endpoints 42 | 43 | # Ollama host 44 | OLLAMA_HOST=https://your_ollama_host" # Optional: Add Ollama host if not local. default: http://localhost:11434 45 | 46 | # Server Configuration 47 | PORT=8001 # Optional, defaults to 8001 48 | ``` 49 | 50 | If you're not using Ollama mode, you need to configure an OpenAI API key for embeddings. Other API keys are only required when configuring and using models from the corresponding providers. 51 | 52 | > 💡 **Where to get these keys:** 53 | > - Get a Google API key from [Google AI Studio](https://makersuite.google.com/app/apikey) 54 | > - Get an OpenAI API key from [OpenAI Platform](https://platform.openai.com/api-keys) 55 | > - Get an OpenRouter API key from [OpenRouter](https://openrouter.ai/keys) 56 | > - Get AWS credentials from [AWS IAM Console](https://console.aws.amazon.com/iam/) 57 | 58 | #### Advanced Environment Configuration 59 | 60 | ##### Provider-Based Model Selection 61 | DeepWiki supports multiple LLM providers. The environment variables above are required depending on which providers you want to use: 62 | 63 | - **Google Gemini**: Requires `GOOGLE_API_KEY` 64 | - **OpenAI**: Requires `OPENAI_API_KEY` 65 | - **OpenRouter**: Requires `OPENROUTER_API_KEY` 66 | - **AWS Bedrock**: Requires `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` 67 | - **Ollama**: No API key required (runs locally) 68 | 69 | ##### Custom OpenAI API Endpoints 70 | The `OPENAI_BASE_URL` variable allows you to specify a custom endpoint for the OpenAI API. This is useful for: 71 | 72 | - Enterprise users with private API channels 73 | - Organizations using self-hosted or custom-deployed LLM services 74 | - Integration with third-party OpenAI API-compatible services 75 | 76 | **Example:** you can use the endpoint which support the OpenAI protocol provided by any organization 77 | ``` 78 | OPENAI_BASE_URL=https://custom-openai-endpoint.com/v1 79 | ``` 80 | 81 | ##### Configuration Files 82 | DeepWiki now uses JSON configuration files to manage various system components instead of hardcoded values: 83 | 84 | 1. **`generator.json`**: Configuration for text generation models 85 | - Located in `api/config/` by default 86 | - Defines available model providers (Google, OpenAI, OpenRouter, AWS Bedrock, Ollama) 87 | - Specifies default and available models for each provider 88 | - Contains model-specific parameters like temperature and top_p 89 | 90 | 2. **`embedder.json`**: Configuration for embedding models and text processing 91 | - Located in `api/config/` by default 92 | - Defines embedding models for vector storage 93 | - Contains retriever configuration for RAG 94 | - Specifies text splitter settings for document chunking 95 | 96 | 3. **`repo.json`**: Configuration for repository handling 97 | - Located in `api/config/` by default 98 | - Contains file filters to exclude certain files and directories 99 | - Defines repository size limits and processing rules 100 | 101 | You can customize the configuration directory location using the environment variable: 102 | 103 | ``` 104 | DEEPWIKI_CONFIG_DIR=/path/to/custom/config/dir # Optional, for custom config file location 105 | ``` 106 | 107 | This allows you to maintain different configurations for various environments or deployment scenarios without modifying the code. 108 | 109 | ### Step 3: Start the API Server 110 | 111 | ```bash 112 | # From the project root 113 | python -m api.main 114 | ``` 115 | 116 | The API will be available at `http://localhost:8001` 117 | 118 | ## 🧠 How It Works 119 | 120 | ### 1. Repository Indexing 121 | When you provide a GitHub repository URL, the API: 122 | - Clones the repository locally (if not already cloned) 123 | - Reads all files in the repository 124 | - Creates embeddings for the files using OpenAI 125 | - Stores the embeddings in a local database 126 | 127 | ### 2. Smart Retrieval (RAG) 128 | When you ask a question: 129 | - The API finds the most relevant code snippets 130 | - These snippets are used as context for the AI 131 | - The AI generates a response based on this context 132 | 133 | ### 3. Real-Time Streaming 134 | - Responses are streamed in real-time 135 | - You see the answer as it's being generated 136 | - This creates a more interactive experience 137 | 138 | ## 📡 API Endpoints 139 | 140 | ### GET / 141 | Returns basic API information and available endpoints. 142 | 143 | ### POST /chat/completions/stream 144 | Streams an AI-generated response about a GitHub repository. 145 | 146 | **Request Body:** 147 | 148 | ```json 149 | { 150 | "repo_url": "https://github.com/username/repo", 151 | "messages": [ 152 | { 153 | "role": "user", 154 | "content": "What does this repository do?" 155 | } 156 | ], 157 | "filePath": "optional/path/to/file.py" // Optional 158 | } 159 | ``` 160 | 161 | **Response:** 162 | A streaming response with the generated text. 163 | 164 | ## 📝 Example Code 165 | 166 | ```python 167 | import requests 168 | 169 | # API endpoint 170 | url = "http://localhost:8001/chat/completions/stream" 171 | 172 | # Request data 173 | payload = { 174 | "repo_url": "https://github.com/AsyncFuncAI/deepwiki-open", 175 | "messages": [ 176 | { 177 | "role": "user", 178 | "content": "Explain how React components work" 179 | } 180 | ] 181 | } 182 | 183 | # Make streaming request 184 | response = requests.post(url, json=payload, stream=True) 185 | 186 | # Process the streaming response 187 | for chunk in response.iter_content(chunk_size=None): 188 | if chunk: 189 | print(chunk.decode('utf-8'), end='', flush=True) 190 | ``` 191 | 192 | ## 💾 Storage 193 | 194 | All data is stored locally on your machine: 195 | - Cloned repositories: `~/.adalflow/repos/` 196 | - Embeddings and indexes: `~/.adalflow/databases/` 197 | - Generated wiki cache: `~/.adalflow/wikicache/` 198 | 199 | No cloud storage is used - everything runs on your computer! 200 | -------------------------------------------------------------------------------- /api/__init__.py: -------------------------------------------------------------------------------- 1 | # Make the api package importable 2 | 3 | # api package 4 | -------------------------------------------------------------------------------- /api/bedrock_client.py: -------------------------------------------------------------------------------- 1 | """AWS Bedrock ModelClient integration.""" 2 | 3 | import os 4 | import json 5 | import logging 6 | import boto3 7 | import botocore 8 | import backoff 9 | from typing import Dict, Any, Optional, List, Generator, Union, AsyncGenerator 10 | 11 | from adalflow.core.model_client import ModelClient 12 | from adalflow.core.types import ModelType, GeneratorOutput 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | class BedrockClient(ModelClient): 17 | __doc__ = r"""A component wrapper for the AWS Bedrock API client. 18 | 19 | AWS Bedrock provides a unified API that gives access to various foundation models 20 | including Amazon's own models and third-party models like Anthropic Claude. 21 | 22 | Example: 23 | ```python 24 | from api.bedrock_client import BedrockClient 25 | 26 | client = BedrockClient() 27 | generator = adal.Generator( 28 | model_client=client, 29 | model_kwargs={"model": "anthropic.claude-3-sonnet-20240229-v1:0"} 30 | ) 31 | ``` 32 | """ 33 | 34 | def __init__( 35 | self, 36 | aws_access_key_id: Optional[str] = None, 37 | aws_secret_access_key: Optional[str] = None, 38 | aws_region: Optional[str] = None, 39 | aws_role_arn: Optional[str] = None, 40 | *args, 41 | **kwargs 42 | ) -> None: 43 | """Initialize the AWS Bedrock client. 44 | 45 | Args: 46 | aws_access_key_id: AWS access key ID. If not provided, will use environment variable AWS_ACCESS_KEY_ID. 47 | aws_secret_access_key: AWS secret access key. If not provided, will use environment variable AWS_SECRET_ACCESS_KEY. 48 | aws_region: AWS region. If not provided, will use environment variable AWS_REGION. 49 | aws_role_arn: AWS IAM role ARN for role-based authentication. If not provided, will use environment variable AWS_ROLE_ARN. 50 | """ 51 | super().__init__(*args, **kwargs) 52 | self.aws_access_key_id = aws_access_key_id or os.environ.get("AWS_ACCESS_KEY_ID") 53 | self.aws_secret_access_key = aws_secret_access_key or os.environ.get("AWS_SECRET_ACCESS_KEY") 54 | self.aws_region = aws_region or os.environ.get("AWS_REGION", "us-east-1") 55 | self.aws_role_arn = aws_role_arn or os.environ.get("AWS_ROLE_ARN") 56 | 57 | self.sync_client = self.init_sync_client() 58 | self.async_client = None # Initialize async client only when needed 59 | 60 | def init_sync_client(self): 61 | """Initialize the synchronous AWS Bedrock client.""" 62 | try: 63 | # Create a session with the provided credentials 64 | session = boto3.Session( 65 | aws_access_key_id=self.aws_access_key_id, 66 | aws_secret_access_key=self.aws_secret_access_key, 67 | region_name=self.aws_region 68 | ) 69 | 70 | # If a role ARN is provided, assume that role 71 | if self.aws_role_arn: 72 | sts_client = session.client('sts') 73 | assumed_role = sts_client.assume_role( 74 | RoleArn=self.aws_role_arn, 75 | RoleSessionName="DeepWikiBedrockSession" 76 | ) 77 | credentials = assumed_role['Credentials'] 78 | 79 | # Create a new session with the assumed role credentials 80 | session = boto3.Session( 81 | aws_access_key_id=credentials['AccessKeyId'], 82 | aws_secret_access_key=credentials['SecretAccessKey'], 83 | aws_session_token=credentials['SessionToken'], 84 | region_name=self.aws_region 85 | ) 86 | 87 | # Create the Bedrock client 88 | bedrock_runtime = session.client( 89 | service_name='bedrock-runtime', 90 | region_name=self.aws_region 91 | ) 92 | 93 | return bedrock_runtime 94 | 95 | except Exception as e: 96 | log.error(f"Error initializing AWS Bedrock client: {str(e)}") 97 | # Return None to indicate initialization failure 98 | return None 99 | 100 | def init_async_client(self): 101 | """Initialize the asynchronous AWS Bedrock client. 102 | 103 | Note: boto3 doesn't have native async support, so we'll use the sync client 104 | in async methods and handle async behavior at a higher level. 105 | """ 106 | # For now, just return the sync client 107 | return self.sync_client 108 | 109 | def _get_model_provider(self, model_id: str) -> str: 110 | """Extract the provider from the model ID. 111 | 112 | Args: 113 | model_id: The model ID, e.g., "anthropic.claude-3-sonnet-20240229-v1:0" 114 | 115 | Returns: 116 | The provider name, e.g., "anthropic" 117 | """ 118 | if "." in model_id: 119 | return model_id.split(".")[0] 120 | return "amazon" # Default provider 121 | 122 | def _format_prompt_for_provider(self, provider: str, prompt: str, messages=None) -> Dict[str, Any]: 123 | """Format the prompt according to the provider's requirements. 124 | 125 | Args: 126 | provider: The provider name, e.g., "anthropic" 127 | prompt: The prompt text 128 | messages: Optional list of messages for chat models 129 | 130 | Returns: 131 | A dictionary with the formatted prompt 132 | """ 133 | if provider == "anthropic": 134 | # Format for Claude models 135 | if messages: 136 | # Format as a conversation 137 | formatted_messages = [] 138 | for msg in messages: 139 | role = "user" if msg.get("role") == "user" else "assistant" 140 | formatted_messages.append({ 141 | "role": role, 142 | "content": [{"type": "text", "text": msg.get("content", "")}] 143 | }) 144 | return { 145 | "anthropic_version": "bedrock-2023-05-31", 146 | "messages": formatted_messages, 147 | "max_tokens": 4096 148 | } 149 | else: 150 | # Format as a single prompt 151 | return { 152 | "anthropic_version": "bedrock-2023-05-31", 153 | "messages": [ 154 | {"role": "user", "content": [{"type": "text", "text": prompt}]} 155 | ], 156 | "max_tokens": 4096 157 | } 158 | elif provider == "amazon": 159 | # Format for Amazon Titan models 160 | return { 161 | "inputText": prompt, 162 | "textGenerationConfig": { 163 | "maxTokenCount": 4096, 164 | "stopSequences": [], 165 | "temperature": 0.7, 166 | "topP": 0.8 167 | } 168 | } 169 | elif provider == "cohere": 170 | # Format for Cohere models 171 | return { 172 | "prompt": prompt, 173 | "max_tokens": 4096, 174 | "temperature": 0.7, 175 | "p": 0.8 176 | } 177 | elif provider == "ai21": 178 | # Format for AI21 models 179 | return { 180 | "prompt": prompt, 181 | "maxTokens": 4096, 182 | "temperature": 0.7, 183 | "topP": 0.8 184 | } 185 | else: 186 | # Default format 187 | return {"prompt": prompt} 188 | 189 | def _extract_response_text(self, provider: str, response: Dict[str, Any]) -> str: 190 | """Extract the generated text from the response. 191 | 192 | Args: 193 | provider: The provider name, e.g., "anthropic" 194 | response: The response from the Bedrock API 195 | 196 | Returns: 197 | The generated text 198 | """ 199 | if provider == "anthropic": 200 | return response.get("content", [{}])[0].get("text", "") 201 | elif provider == "amazon": 202 | return response.get("results", [{}])[0].get("outputText", "") 203 | elif provider == "cohere": 204 | return response.get("generations", [{}])[0].get("text", "") 205 | elif provider == "ai21": 206 | return response.get("completions", [{}])[0].get("data", {}).get("text", "") 207 | else: 208 | # Try to extract text from the response 209 | if isinstance(response, dict): 210 | for key in ["text", "content", "output", "completion"]: 211 | if key in response: 212 | return response[key] 213 | return str(response) 214 | 215 | @backoff.on_exception( 216 | backoff.expo, 217 | (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError), 218 | max_time=5, 219 | ) 220 | def call(self, api_kwargs: Dict = None, model_type: ModelType = None) -> Any: 221 | """Make a synchronous call to the AWS Bedrock API.""" 222 | api_kwargs = api_kwargs or {} 223 | 224 | # Check if client is initialized 225 | if not self.sync_client: 226 | error_msg = "AWS Bedrock client not initialized. Check your AWS credentials and region." 227 | log.error(error_msg) 228 | return error_msg 229 | 230 | if model_type == ModelType.LLM: 231 | model_id = api_kwargs.get("model", "anthropic.claude-3-sonnet-20240229-v1:0") 232 | provider = self._get_model_provider(model_id) 233 | 234 | # Get the prompt from api_kwargs 235 | prompt = api_kwargs.get("input", "") 236 | messages = api_kwargs.get("messages") 237 | 238 | # Format the prompt according to the provider 239 | request_body = self._format_prompt_for_provider(provider, prompt, messages) 240 | 241 | # Add model parameters if provided 242 | if "temperature" in api_kwargs: 243 | if provider == "anthropic": 244 | request_body["temperature"] = api_kwargs["temperature"] 245 | elif provider == "amazon": 246 | request_body["textGenerationConfig"]["temperature"] = api_kwargs["temperature"] 247 | elif provider == "cohere": 248 | request_body["temperature"] = api_kwargs["temperature"] 249 | elif provider == "ai21": 250 | request_body["temperature"] = api_kwargs["temperature"] 251 | 252 | if "top_p" in api_kwargs: 253 | if provider == "anthropic": 254 | request_body["top_p"] = api_kwargs["top_p"] 255 | elif provider == "amazon": 256 | request_body["textGenerationConfig"]["topP"] = api_kwargs["top_p"] 257 | elif provider == "cohere": 258 | request_body["p"] = api_kwargs["top_p"] 259 | elif provider == "ai21": 260 | request_body["topP"] = api_kwargs["top_p"] 261 | 262 | # Convert request body to JSON 263 | body = json.dumps(request_body) 264 | 265 | try: 266 | # Make the API call 267 | response = self.sync_client.invoke_model( 268 | modelId=model_id, 269 | body=body 270 | ) 271 | 272 | # Parse the response 273 | response_body = json.loads(response["body"].read()) 274 | 275 | # Extract the generated text 276 | generated_text = self._extract_response_text(provider, response_body) 277 | 278 | return generated_text 279 | 280 | except Exception as e: 281 | log.error(f"Error calling AWS Bedrock API: {str(e)}") 282 | return f"Error: {str(e)}" 283 | else: 284 | raise ValueError(f"Model type {model_type} is not supported by AWS Bedrock client") 285 | 286 | async def acall(self, api_kwargs: Dict = None, model_type: ModelType = None) -> Any: 287 | """Make an asynchronous call to the AWS Bedrock API.""" 288 | # For now, just call the sync method 289 | # In a real implementation, you would use an async library or run the sync method in a thread pool 290 | return self.call(api_kwargs, model_type) 291 | 292 | def convert_inputs_to_api_kwargs( 293 | self, input: Any = None, model_kwargs: Dict = None, model_type: ModelType = None 294 | ) -> Dict: 295 | """Convert inputs to API kwargs for AWS Bedrock.""" 296 | model_kwargs = model_kwargs or {} 297 | api_kwargs = {} 298 | 299 | if model_type == ModelType.LLM: 300 | api_kwargs["model"] = model_kwargs.get("model", "anthropic.claude-3-sonnet-20240229-v1:0") 301 | api_kwargs["input"] = input 302 | 303 | # Add model parameters 304 | if "temperature" in model_kwargs: 305 | api_kwargs["temperature"] = model_kwargs["temperature"] 306 | if "top_p" in model_kwargs: 307 | api_kwargs["top_p"] = model_kwargs["top_p"] 308 | 309 | return api_kwargs 310 | else: 311 | raise ValueError(f"Model type {model_type} is not supported by AWS Bedrock client") 312 | -------------------------------------------------------------------------------- /api/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import logging 4 | from pathlib import Path 5 | from typing import List 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | from api.openai_client import OpenAIClient 10 | from api.openrouter_client import OpenRouterClient 11 | from api.bedrock_client import BedrockClient 12 | from adalflow import GoogleGenAIClient, OllamaClient 13 | 14 | # Get API keys from environment variables 15 | OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') 16 | GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY') 17 | OPENROUTER_API_KEY = os.environ.get('OPENROUTER_API_KEY') 18 | AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID') 19 | AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY') 20 | AWS_REGION = os.environ.get('AWS_REGION') 21 | AWS_ROLE_ARN = os.environ.get('AWS_ROLE_ARN') 22 | 23 | # Set keys in environment (in case they're needed elsewhere in the code) 24 | if OPENAI_API_KEY: 25 | os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY 26 | if GOOGLE_API_KEY: 27 | os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY 28 | if OPENROUTER_API_KEY: 29 | os.environ["OPENROUTER_API_KEY"] = OPENROUTER_API_KEY 30 | if AWS_ACCESS_KEY_ID: 31 | os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY_ID 32 | if AWS_SECRET_ACCESS_KEY: 33 | os.environ["AWS_SECRET_ACCESS_KEY"] = AWS_SECRET_ACCESS_KEY 34 | if AWS_REGION: 35 | os.environ["AWS_REGION"] = AWS_REGION 36 | if AWS_ROLE_ARN: 37 | os.environ["AWS_ROLE_ARN"] = AWS_ROLE_ARN 38 | 39 | # Get configuration directory from environment variable, or use default if not set 40 | CONFIG_DIR = os.environ.get('DEEPWIKI_CONFIG_DIR', None) 41 | 42 | # Client class mapping 43 | CLIENT_CLASSES = { 44 | "GoogleGenAIClient": GoogleGenAIClient, 45 | "OpenAIClient": OpenAIClient, 46 | "OpenRouterClient": OpenRouterClient, 47 | "OllamaClient": OllamaClient, 48 | "BedrockClient": BedrockClient 49 | } 50 | 51 | # Load JSON configuration file 52 | def load_json_config(filename): 53 | try: 54 | # If environment variable is set, use the directory specified by it 55 | if CONFIG_DIR: 56 | config_path = Path(CONFIG_DIR) / filename 57 | else: 58 | # Otherwise use default directory 59 | config_path = Path(__file__).parent / "config" / filename 60 | 61 | logger.info(f"Loading configuration from {config_path}") 62 | 63 | if not config_path.exists(): 64 | logger.warning(f"Configuration file {config_path} does not exist") 65 | return {} 66 | 67 | with open(config_path, 'r') as f: 68 | return json.load(f) 69 | except Exception as e: 70 | logger.error(f"Error loading configuration file {filename}: {str(e)}") 71 | return {} 72 | 73 | # Load generator model configuration 74 | def load_generator_config(): 75 | generator_config = load_json_config("generator.json") 76 | 77 | # Add client classes to each provider 78 | if "providers" in generator_config: 79 | for provider_id, provider_config in generator_config["providers"].items(): 80 | # Try to set client class from client_class 81 | if provider_config.get("client_class") in CLIENT_CLASSES: 82 | provider_config["model_client"] = CLIENT_CLASSES[provider_config["client_class"]] 83 | # Fall back to default mapping based on provider_id 84 | elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock"]: 85 | default_map = { 86 | "google": GoogleGenAIClient, 87 | "openai": OpenAIClient, 88 | "openrouter": OpenRouterClient, 89 | "ollama": OllamaClient, 90 | "bedrock": BedrockClient 91 | } 92 | provider_config["model_client"] = default_map[provider_id] 93 | else: 94 | logger.warning(f"Unknown provider or client class: {provider_id}") 95 | 96 | return generator_config 97 | 98 | # Load embedder configuration 99 | def load_embedder_config(): 100 | embedder_config = load_json_config("embedder.json") 101 | 102 | # Process client classes 103 | for key in ["embedder", "embedder_ollama"]: 104 | if key in embedder_config and "client_class" in embedder_config[key]: 105 | class_name = embedder_config[key]["client_class"] 106 | if class_name in CLIENT_CLASSES: 107 | embedder_config[key]["model_client"] = CLIENT_CLASSES[class_name] 108 | 109 | return embedder_config 110 | 111 | def get_embedder_config(): 112 | """ 113 | Get the current embedder configuration. 114 | 115 | Returns: 116 | dict: The embedder configuration with model_client resolved 117 | """ 118 | return configs.get("embedder", {}) 119 | 120 | def is_ollama_embedder(): 121 | """ 122 | Check if the current embedder configuration uses OllamaClient. 123 | 124 | Returns: 125 | bool: True if using OllamaClient, False otherwise 126 | """ 127 | embedder_config = get_embedder_config() 128 | if not embedder_config: 129 | return False 130 | 131 | # Check if model_client is OllamaClient 132 | model_client = embedder_config.get("model_client") 133 | if model_client: 134 | return model_client.__name__ == "OllamaClient" 135 | 136 | # Fallback: check client_class string 137 | client_class = embedder_config.get("client_class", "") 138 | return client_class == "OllamaClient" 139 | 140 | # Load repository and file filters configuration 141 | def load_repo_config(): 142 | return load_json_config("repo.json") 143 | 144 | # Default excluded directories and files 145 | DEFAULT_EXCLUDED_DIRS: List[str] = [ 146 | # Virtual environments and package managers 147 | "./.venv/", "./venv/", "./env/", "./virtualenv/", 148 | "./node_modules/", "./bower_components/", "./jspm_packages/", 149 | # Version control 150 | "./.git/", "./.svn/", "./.hg/", "./.bzr/", 151 | # Cache and compiled files 152 | "./__pycache__/", "./.pytest_cache/", "./.mypy_cache/", "./.ruff_cache/", "./.coverage/", 153 | # Build and distribution 154 | "./dist/", "./build/", "./out/", "./target/", "./bin/", "./obj/", 155 | # Documentation 156 | "./docs/", "./_docs/", "./site-docs/", "./_site/", 157 | # IDE specific 158 | "./.idea/", "./.vscode/", "./.vs/", "./.eclipse/", "./.settings/", 159 | # Logs and temporary files 160 | "./logs/", "./log/", "./tmp/", "./temp/", 161 | ] 162 | 163 | DEFAULT_EXCLUDED_FILES: List[str] = [ 164 | "yarn.lock", "pnpm-lock.yaml", "npm-shrinkwrap.json", "poetry.lock", 165 | "Pipfile.lock", "requirements.txt.lock", "Cargo.lock", "composer.lock", 166 | ".lock", ".DS_Store", "Thumbs.db", "desktop.ini", "*.lnk", ".env", 167 | ".env.*", "*.env", "*.cfg", "*.ini", ".flaskenv", ".gitignore", 168 | ".gitattributes", ".gitmodules", ".github", ".gitlab-ci.yml", 169 | ".prettierrc", ".eslintrc", ".eslintignore", ".stylelintrc", 170 | ".editorconfig", ".jshintrc", ".pylintrc", ".flake8", "mypy.ini", 171 | "pyproject.toml", "tsconfig.json", "webpack.config.js", "babel.config.js", 172 | "rollup.config.js", "jest.config.js", "karma.conf.js", "vite.config.js", 173 | "next.config.js", "*.min.js", "*.min.css", "*.bundle.js", "*.bundle.css", 174 | "*.map", "*.gz", "*.zip", "*.tar", "*.tgz", "*.rar", "*.7z", "*.iso", 175 | "*.dmg", "*.img", "*.msix", "*.appx", "*.appxbundle", "*.xap", "*.ipa", 176 | "*.deb", "*.rpm", "*.msi", "*.exe", "*.dll", "*.so", "*.dylib", "*.o", 177 | "*.obj", "*.jar", "*.war", "*.ear", "*.jsm", "*.class", "*.pyc", "*.pyd", 178 | "*.pyo", "__pycache__", "*.a", "*.lib", "*.lo", "*.la", "*.slo", "*.dSYM", 179 | "*.egg", "*.egg-info", "*.dist-info", "*.eggs", "node_modules", 180 | "bower_components", "jspm_packages", "lib-cov", "coverage", "htmlcov", 181 | ".nyc_output", ".tox", "dist", "build", "bld", "out", "bin", "target", 182 | "packages/*/dist", "packages/*/build", ".output" 183 | ] 184 | 185 | # Initialize empty configuration 186 | configs = {} 187 | 188 | # Load all configuration files 189 | generator_config = load_generator_config() 190 | embedder_config = load_embedder_config() 191 | repo_config = load_repo_config() 192 | 193 | # Update configuration 194 | if generator_config: 195 | configs["default_provider"] = generator_config.get("default_provider", "google") 196 | configs["providers"] = generator_config.get("providers", {}) 197 | 198 | # Update embedder configuration 199 | if embedder_config: 200 | for key in ["embedder", "embedder_ollama", "retriever", "text_splitter"]: 201 | if key in embedder_config: 202 | configs[key] = embedder_config[key] 203 | 204 | # Update repository configuration 205 | if repo_config: 206 | for key in ["file_filters", "repository"]: 207 | if key in repo_config: 208 | configs[key] = repo_config[key] 209 | 210 | def get_model_config(provider="google", model=None): 211 | """ 212 | Get configuration for the specified provider and model 213 | 214 | Parameters: 215 | provider (str): Model provider ('google', 'openai', 'openrouter', 'ollama', 'bedrock') 216 | model (str): Model name, or None to use default model 217 | 218 | Returns: 219 | dict: Configuration containing model_client, model and other parameters 220 | """ 221 | # Get provider configuration 222 | if "providers" not in configs: 223 | raise ValueError("Provider configuration not loaded") 224 | 225 | provider_config = configs["providers"].get(provider) 226 | if not provider_config: 227 | raise ValueError(f"Configuration for provider '{provider}' not found") 228 | 229 | model_client = provider_config.get("model_client") 230 | if not model_client: 231 | raise ValueError(f"Model client not specified for provider '{provider}'") 232 | 233 | # If model not provided, use default model for the provider 234 | if not model: 235 | model = provider_config.get("default_model") 236 | if not model: 237 | raise ValueError(f"No default model specified for provider '{provider}'") 238 | 239 | # Get model parameters (if present) 240 | model_params = {} 241 | if model in provider_config.get("models", {}): 242 | model_params = provider_config["models"][model] 243 | else: 244 | default_model = provider_config.get("default_model") 245 | model_params = provider_config["models"][default_model] 246 | 247 | # Prepare base configuration 248 | result = { 249 | "model_client": model_client, 250 | } 251 | 252 | # Provider-specific adjustments 253 | if provider == "ollama": 254 | # Ollama uses a slightly different parameter structure 255 | if "options" in model_params: 256 | result["model_kwargs"] = {"model": model, **model_params["options"]} 257 | else: 258 | result["model_kwargs"] = {"model": model} 259 | else: 260 | # Standard structure for other providers 261 | result["model_kwargs"] = {"model": model, **model_params} 262 | 263 | return result 264 | -------------------------------------------------------------------------------- /api/config/embedder.json: -------------------------------------------------------------------------------- 1 | { 2 | "embedder": { 3 | "client_class": "OpenAIClient", 4 | "batch_size": 500, 5 | "model_kwargs": { 6 | "model": "text-embedding-3-small", 7 | "dimensions": 256, 8 | "encoding_format": "float" 9 | } 10 | }, 11 | "embedder_ollama": { 12 | "client_class": "OllamaClient", 13 | "model_kwargs": { 14 | "model": "nomic-embed-text" 15 | } 16 | }, 17 | "retriever": { 18 | "top_k": 20 19 | }, 20 | "text_splitter": { 21 | "split_by": "word", 22 | "chunk_size": 350, 23 | "chunk_overlap": 100 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /api/config/generator.json: -------------------------------------------------------------------------------- 1 | { 2 | "default_provider": "google", 3 | "providers": { 4 | "google": { 5 | "default_model": "gemini-2.0-flash", 6 | "supportsCustomModel": true, 7 | "models": { 8 | "gemini-2.0-flash": { 9 | "temperature": 0.7, 10 | "top_p": 0.8, 11 | "top_k": 20 12 | }, 13 | "gemini-2.5-flash-preview-05-20": { 14 | "temperature": 0.7, 15 | "top_p": 0.8, 16 | "top_k": 20 17 | }, 18 | "gemini-2.5-pro-preview-03-25": { 19 | "temperature": 0.7, 20 | "top_p": 0.8, 21 | "top_k": 20 22 | } 23 | } 24 | }, 25 | "openai": { 26 | "default_model": "gpt-4o", 27 | "supportsCustomModel": true, 28 | "models": { 29 | "gpt-4o": { 30 | "temperature": 0.7, 31 | "top_p": 0.8 32 | }, 33 | "gpt-4.1": { 34 | "temperature": 0.7, 35 | "top_p": 0.8 36 | }, 37 | "o1": { 38 | "temperature": 0.7, 39 | "top_p": 0.8 40 | }, 41 | "o3": { 42 | "temperature": 0.7, 43 | "top_p": 0.8 44 | }, 45 | "o4-mini": { 46 | "temperature": 0.7, 47 | "top_p": 0.8 48 | } 49 | } 50 | }, 51 | "openrouter": { 52 | "default_model": "openai/gpt-4o", 53 | "supportsCustomModel": true, 54 | "models": { 55 | "openai/gpt-4o": { 56 | "temperature": 0.7, 57 | "top_p": 0.8 58 | }, 59 | "deepseek/deepseek-r1": { 60 | "temperature": 0.7, 61 | "top_p": 0.8 62 | }, 63 | "openai/gpt-4.1": { 64 | "temperature": 0.7, 65 | "top_p": 0.8 66 | }, 67 | "openai/o1": { 68 | "temperature": 0.7, 69 | "top_p": 0.8 70 | }, 71 | "openai/o3": { 72 | "temperature": 0.7, 73 | "top_p": 0.8 74 | }, 75 | "openai/o4-mini": { 76 | "temperature": 0.7, 77 | "top_p": 0.8 78 | }, 79 | "anthropic/claude-3.7-sonnet": { 80 | "temperature": 0.7, 81 | "top_p": 0.8 82 | }, 83 | "anthropic/claude-3.5-sonnet": { 84 | "temperature": 0.7, 85 | "top_p": 0.8 86 | } 87 | } 88 | }, 89 | "ollama": { 90 | "default_model": "qwen3:1.7b", 91 | "supportsCustomModel": true, 92 | "models": { 93 | "qwen3:1.7b": { 94 | "options": { 95 | "temperature": 0.7, 96 | "top_p": 0.8, 97 | "num_ctx": 32000 98 | } 99 | }, 100 | "llama3:8b": { 101 | "options": { 102 | "temperature": 0.7, 103 | "top_p": 0.8, 104 | "num_ctx": 8000 105 | } 106 | }, 107 | "qwen3:8b": { 108 | "options": { 109 | "temperature": 0.7, 110 | "top_p": 0.8, 111 | "num_ctx": 32000 112 | } 113 | } 114 | } 115 | }, 116 | "bedrock": { 117 | "client_class": "BedrockClient", 118 | "default_model": "anthropic.claude-3-sonnet-20240229-v1:0", 119 | "supportsCustomModel": true, 120 | "models": { 121 | "anthropic.claude-3-sonnet-20240229-v1:0": { 122 | "temperature": 0.7, 123 | "top_p": 0.8 124 | }, 125 | "anthropic.claude-3-haiku-20240307-v1:0": { 126 | "temperature": 0.7, 127 | "top_p": 0.8 128 | }, 129 | "anthropic.claude-3-opus-20240229-v1:0": { 130 | "temperature": 0.7, 131 | "top_p": 0.8 132 | }, 133 | "amazon.titan-text-express-v1": { 134 | "temperature": 0.7, 135 | "top_p": 0.8 136 | }, 137 | "cohere.command-r-v1:0": { 138 | "temperature": 0.7, 139 | "top_p": 0.8 140 | }, 141 | "ai21.j2-ultra-v1": { 142 | "temperature": 0.7, 143 | "top_p": 0.8 144 | } 145 | } 146 | } 147 | } 148 | } -------------------------------------------------------------------------------- /api/config/repo.json: -------------------------------------------------------------------------------- 1 | { 2 | "file_filters": { 3 | "excluded_dirs": [ 4 | "./.venv/", 5 | "./venv/", 6 | "./env/", 7 | "./virtualenv/", 8 | "./node_modules/", 9 | "./bower_components/", 10 | "./jspm_packages/", 11 | "./.git/", 12 | "./.svn/", 13 | "./.hg/", 14 | "./.bzr/" 15 | ], 16 | "excluded_files": [ 17 | "yarn.lock", 18 | "pnpm-lock.yaml", 19 | "npm-shrinkwrap.json", 20 | "poetry.lock", 21 | "Pipfile.lock", 22 | "requirements.txt.lock", 23 | "Cargo.lock", 24 | "composer.lock", 25 | ".lock", 26 | ".DS_Store", 27 | "Thumbs.db", 28 | "desktop.ini", 29 | "*.lnk", 30 | ".env", 31 | ".env.*", 32 | "*.env", 33 | "*.cfg", 34 | "*.ini", 35 | ".flaskenv", 36 | ".gitignore", 37 | ".gitattributes", 38 | ".gitmodules", 39 | ".github", 40 | ".gitlab-ci.yml", 41 | ".prettierrc", 42 | ".eslintrc", 43 | ".eslintignore", 44 | ".stylelintrc", 45 | ".editorconfig", 46 | ".jshintrc", 47 | ".pylintrc", 48 | ".flake8", 49 | "mypy.ini", 50 | "pyproject.toml", 51 | "tsconfig.json", 52 | "webpack.config.js", 53 | "babel.config.js", 54 | "rollup.config.js", 55 | "jest.config.js", 56 | "karma.conf.js", 57 | "vite.config.js", 58 | "next.config.js", 59 | "*.min.js", 60 | "*.min.css", 61 | "*.bundle.js", 62 | "*.bundle.css", 63 | "*.map", 64 | "*.gz", 65 | "*.zip", 66 | "*.tar", 67 | "*.tgz", 68 | "*.rar", 69 | "*.7z", 70 | "*.iso", 71 | "*.dmg", 72 | "*.img", 73 | "*.msix", 74 | "*.appx", 75 | "*.appxbundle", 76 | "*.xap", 77 | "*.ipa", 78 | "*.deb", 79 | "*.rpm", 80 | "*.msi", 81 | "*.exe", 82 | "*.dll", 83 | "*.so", 84 | "*.dylib", 85 | "*.o", 86 | "*.obj", 87 | "*.jar", 88 | "*.war", 89 | "*.ear", 90 | "*.jsm", 91 | "*.class", 92 | "*.pyc", 93 | "*.pyd", 94 | "*.pyo", 95 | "__pycache__", 96 | "*.a", 97 | "*.lib", 98 | "*.lo", 99 | "*.la", 100 | "*.slo", 101 | "*.dSYM", 102 | "*.egg", 103 | "*.egg-info", 104 | "*.dist-info", 105 | "*.eggs", 106 | "node_modules", 107 | "bower_components", 108 | "jspm_packages", 109 | "lib-cov", 110 | "coverage", 111 | "htmlcov", 112 | ".nyc_output", 113 | ".tox", 114 | "dist", 115 | "build", 116 | "bld", 117 | "out", 118 | "bin", 119 | "target", 120 | "packages/*/dist", 121 | "packages/*/build", 122 | ".output" 123 | ] 124 | }, 125 | "repository": { 126 | "max_size_mb": 50000 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /api/main.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | import os 3 | import sys 4 | import logging 5 | from dotenv import load_dotenv 6 | 7 | # Load environment variables from .env file 8 | load_dotenv() 9 | 10 | # --- Unified Logging Configuration --- 11 | # Determine the project's base directory (assuming main.py is in 'api' subdirectory) 12 | # Adjust if your structure is different, e.g., if main.py is at the root. 13 | # This assumes 'api/main.py', so logs will be in 'api/logs/application.log' 14 | LOG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs") 15 | os.makedirs(LOG_DIR, exist_ok=True) 16 | LOG_FILE_PATH = os.path.join(LOG_DIR, "application.log") 17 | 18 | logging.basicConfig( 19 | level=logging.INFO, 20 | format="%(asctime)s - %(lineno)d %(filename)s:%(funcName)s - %(levelname)s - %(message)s", 21 | handlers=[ 22 | logging.FileHandler(LOG_FILE_PATH), 23 | logging.StreamHandler() # Also keep logging to console 24 | ], 25 | force=True # Ensure this configuration takes precedence and clears any existing handlers 26 | ) 27 | 28 | # Get a logger for this main module (optional, but good practice) 29 | logger = logging.getLogger(__name__) 30 | 31 | # Add the current directory to the path so we can import the api package 32 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 33 | 34 | # Check for required environment variables 35 | required_env_vars = ['GOOGLE_API_KEY', 'OPENAI_API_KEY'] 36 | missing_vars = [var for var in required_env_vars if not os.environ.get(var)] 37 | if missing_vars: 38 | logger.warning(f"Missing environment variables: {', '.join(missing_vars)}") 39 | logger.warning("Some functionality may not work correctly without these variables.") 40 | 41 | if __name__ == "__main__": 42 | # Get port from environment variable or use default 43 | port = int(os.environ.get("PORT", 8001)) 44 | 45 | # Import the app here to ensure environment variables are set first 46 | from api.api import app 47 | 48 | logger.info(f"Starting Streaming API on port {port}") 49 | 50 | # Run the FastAPI app with uvicorn 51 | # Disable reload in production/Docker environment 52 | is_development = os.environ.get("NODE_ENV") != "production" 53 | uvicorn.run( 54 | "api.api:app", 55 | host="0.0.0.0", 56 | port=port, 57 | reload=is_development 58 | ) 59 | -------------------------------------------------------------------------------- /api/ollama_patch.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence, List 2 | from copy import deepcopy 3 | from tqdm import tqdm 4 | import logging 5 | import adalflow as adal 6 | from adalflow.core.types import Document 7 | from adalflow.core.component import DataComponent 8 | 9 | # Configure logging 10 | logging.basicConfig( 11 | level=logging.INFO, 12 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' 13 | ) 14 | logger = logging.getLogger(__name__) 15 | 16 | class OllamaDocumentProcessor(DataComponent): 17 | """ 18 | Process documents for Ollama embeddings by processing one document at a time. 19 | Adalflow Ollama Client does not support batch embedding, so we need to process each document individually. 20 | """ 21 | def __init__(self, embedder: adal.Embedder) -> None: 22 | super().__init__() 23 | self.embedder = embedder 24 | 25 | def __call__(self, documents: Sequence[Document]) -> Sequence[Document]: 26 | output = deepcopy(documents) 27 | logger.info(f"Processing {len(output)} documents individually for Ollama embeddings") 28 | 29 | successful_docs = [] 30 | expected_embedding_size = None 31 | 32 | for i, doc in enumerate(tqdm(output, desc="Processing documents for Ollama embeddings")): 33 | try: 34 | # Get embedding for a single document 35 | result = self.embedder(input=doc.text) 36 | if result.data and len(result.data) > 0: 37 | embedding = result.data[0].embedding 38 | 39 | # Validate embedding size consistency 40 | if expected_embedding_size is None: 41 | expected_embedding_size = len(embedding) 42 | logger.info(f"Expected embedding size set to: {expected_embedding_size}") 43 | elif len(embedding) != expected_embedding_size: 44 | file_path = getattr(doc, 'meta_data', {}).get('file_path', f'document_{i}') 45 | logger.warning(f"Document '{file_path}' has inconsistent embedding size {len(embedding)} != {expected_embedding_size}, skipping") 46 | continue 47 | 48 | # Assign the embedding to the document 49 | output[i].vector = embedding 50 | successful_docs.append(output[i]) 51 | else: 52 | file_path = getattr(doc, 'meta_data', {}).get('file_path', f'document_{i}') 53 | logger.warning(f"Failed to get embedding for document '{file_path}', skipping") 54 | except Exception as e: 55 | file_path = getattr(doc, 'meta_data', {}).get('file_path', f'document_{i}') 56 | logger.error(f"Error processing document '{file_path}': {e}, skipping") 57 | 58 | logger.info(f"Successfully processed {len(successful_docs)}/{len(output)} documents with consistent embeddings") 59 | return successful_docs -------------------------------------------------------------------------------- /api/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi>=0.95.0 2 | uvicorn[standard]>=0.21.1 3 | pydantic>=2.0.0 4 | google-generativeai>=0.3.0 5 | tiktoken>=0.5.0 6 | adalflow>=0.1.0 7 | numpy>=1.24.0 8 | faiss-cpu>=1.7.4 9 | langid>=1.1.6 10 | requests>=2.28.0 11 | jinja2>=3.1.2 12 | python-dotenv>=1.0.0 13 | openai>=1.76.2 14 | ollama>=0.4.8 15 | aiohttp>=3.8.4 16 | boto3>=1.34.0 17 | websockets>=11.0.3 18 | 19 | -------------------------------------------------------------------------------- /api/test_api.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import sys 4 | 5 | def test_streaming_endpoint(repo_url, query, file_path=None): 6 | """ 7 | Test the streaming endpoint with a given repository URL and query. 8 | 9 | Args: 10 | repo_url (str): The GitHub repository URL 11 | query (str): The query to send 12 | file_path (str, optional): Path to a file in the repository 13 | """ 14 | # Define the API endpoint 15 | url = "http://localhost:8000/chat/completions/stream" 16 | 17 | # Define the request payload 18 | payload = { 19 | "repo_url": repo_url, 20 | "messages": [ 21 | { 22 | "role": "user", 23 | "content": query 24 | } 25 | ], 26 | "filePath": file_path 27 | } 28 | 29 | print(f"Testing streaming endpoint with:") 30 | print(f" Repository: {repo_url}") 31 | print(f" Query: {query}") 32 | if file_path: 33 | print(f" File Path: {file_path}") 34 | print("\nResponse:") 35 | 36 | try: 37 | # Make the request with streaming enabled 38 | response = requests.post(url, json=payload, stream=True) 39 | 40 | # Check if the request was successful 41 | if response.status_code != 200: 42 | print(f"Error: {response.status_code}") 43 | try: 44 | error_data = json.loads(response.content) 45 | print(f"Error details: {error_data.get('detail', 'Unknown error')}") 46 | except: 47 | print(f"Error content: {response.content}") 48 | return 49 | 50 | # Process the streaming response 51 | for chunk in response.iter_content(chunk_size=None): 52 | if chunk: 53 | print(chunk.decode('utf-8'), end='', flush=True) 54 | 55 | print("\n\nStreaming completed successfully.") 56 | 57 | except Exception as e: 58 | print(f"Error: {str(e)}") 59 | 60 | if __name__ == "__main__": 61 | # Get command line arguments 62 | if len(sys.argv) < 3: 63 | print("Usage: python test_api.py [file_path]") 64 | sys.exit(1) 65 | 66 | repo_url = sys.argv[1] 67 | query = sys.argv[2] 68 | file_path = sys.argv[3] if len(sys.argv) > 3 else None 69 | 70 | test_streaming_endpoint(repo_url, query, file_path) 71 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | deepwiki: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | ports: 9 | - "${PORT:-8001}:${PORT:-8001}" # API port 10 | - "3000:3000" # Next.js port 11 | env_file: 12 | - .env 13 | environment: 14 | - PORT=${PORT:-8001} 15 | - NODE_ENV=production 16 | - SERVER_BASE_URL=http://localhost:${PORT:-8001} 17 | - NEXT_PUBLIC_SERVER_BASE_URL=http://localhost:${PORT:-8001} 18 | volumes: 19 | - ~/.adalflow:/root/.adalflow # Persist repository and embedding data 20 | # Resource limits for docker-compose up (not Swarm mode) 21 | mem_limit: 6g 22 | mem_reservation: 2g 23 | # Health check configuration 24 | healthcheck: 25 | test: ["CMD", "curl", "-f", "http://localhost:${PORT:-8001}/health"] 26 | interval: 60s 27 | timeout: 10s 28 | retries: 3 29 | start_period: 30s 30 | -------------------------------------------------------------------------------- /eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import { dirname } from "path"; 2 | import { fileURLToPath } from "url"; 3 | import { FlatCompat } from "@eslint/eslintrc"; 4 | 5 | const __filename = fileURLToPath(import.meta.url); 6 | const __dirname = dirname(__filename); 7 | 8 | const compat = new FlatCompat({ 9 | baseDirectory: __dirname, 10 | }); 11 | 12 | const eslintConfig = [ 13 | ...compat.extends("next/core-web-vitals", "next/typescript"), 14 | ]; 15 | 16 | export default eslintConfig; 17 | -------------------------------------------------------------------------------- /next.config.ts: -------------------------------------------------------------------------------- 1 | import type { NextConfig } from "next"; 2 | 3 | const TARGET_SERVER_BASE_URL = process.env.SERVER_BASE_URL || 'http://localhost:8001'; 4 | 5 | const nextConfig: NextConfig = { 6 | /* config options here */ 7 | output: 'standalone', 8 | // Optimize build for Docker 9 | experimental: { 10 | optimizePackageImports: ['@mermaid-js/mermaid', 'react-syntax-highlighter'], 11 | }, 12 | // Reduce memory usage during build 13 | webpack: (config, { isServer }) => { 14 | if (!isServer) { 15 | config.resolve.fallback = { 16 | ...config.resolve.fallback, 17 | fs: false, 18 | }; 19 | } 20 | // Optimize bundle size 21 | config.optimization = { 22 | ...config.optimization, 23 | splitChunks: { 24 | chunks: 'all', 25 | cacheGroups: { 26 | vendor: { 27 | test: /[\\/]node_modules[\\/]/, 28 | name: 'vendors', 29 | chunks: 'all', 30 | }, 31 | }, 32 | }, 33 | }; 34 | return config; 35 | }, 36 | async rewrites() { 37 | return [ 38 | { 39 | source: '/api/wiki_cache/:path*', 40 | destination: `${TARGET_SERVER_BASE_URL}/api/wiki_cache/:path*`, 41 | }, 42 | { 43 | source: '/export/wiki/:path*', 44 | destination: `${TARGET_SERVER_BASE_URL}/export/wiki/:path*`, 45 | }, 46 | { 47 | source: '/api/wiki_cache', 48 | destination: `${TARGET_SERVER_BASE_URL}/api/wiki_cache`, 49 | }, 50 | { 51 | source: '/local_repo/structure', 52 | destination: `${TARGET_SERVER_BASE_URL}/local_repo/structure`, 53 | }, 54 | ]; 55 | }, 56 | }; 57 | 58 | export default nextConfig; 59 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "deepwiki-open", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev --turbopack --port 3000", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint" 10 | }, 11 | "dependencies": { 12 | "mermaid": "^11.4.1", 13 | "next": "15.3.1", 14 | "next-intl": "^4.1.0", 15 | "next-themes": "^0.4.6", 16 | "react": "^19.0.0", 17 | "react-dom": "^19.0.0", 18 | "react-icons": "^5.5.0", 19 | "react-markdown": "^10.1.0", 20 | "react-syntax-highlighter": "^15.6.1", 21 | "rehype-raw": "^7.0.0", 22 | "remark-gfm": "^4.0.1", 23 | "svg-pan-zoom": "^3.6.2" 24 | }, 25 | "devDependencies": { 26 | "@eslint/eslintrc": "^3", 27 | "@tailwindcss/postcss": "^4", 28 | "@types/node": "^20", 29 | "@types/react": "^19", 30 | "@types/react-dom": "^19", 31 | "@types/react-syntax-highlighter": "^15.5.13", 32 | "eslint": "^9", 33 | "eslint-config-next": "15.3.1", 34 | "tailwindcss": "^4", 35 | "typescript": "^5" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /postcss.config.mjs: -------------------------------------------------------------------------------- 1 | const config = { 2 | plugins: ["@tailwindcss/postcss"], 3 | }; 4 | 5 | export default config; 6 | -------------------------------------------------------------------------------- /public/file.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/globe.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /public/window.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "deepwiki-open" 3 | version = "0.1.0" 4 | description = "Add your description here" 5 | readme = "README.md" 6 | requires-python = ">=3.12" 7 | dependencies = [ 8 | "fastapi>=0.95.0", 9 | "uvicorn>=0.21.1", 10 | "pydantic>=2.0.0", 11 | "google-generativeai>=0.3.0", 12 | "tiktoken>=0.5.0", 13 | "adalflow>=0.1.0", 14 | "numpy>=1.24.0", 15 | "faiss-cpu>=1.7.4", 16 | "langid>=1.1.6", 17 | "requests>=2.28.0", 18 | "jinja2>=3.1.2", 19 | "python-dotenv>=1.0.0", 20 | "openai>=1.76.2", 21 | "ollama>=0.4.8", 22 | "aiohttp>=3.8.4", 23 | "boto3>=1.34.0" 24 | ] 25 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | uv run -m api.main -------------------------------------------------------------------------------- /screenshots/DeepResearch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/screenshots/DeepResearch.png -------------------------------------------------------------------------------- /screenshots/Deepwiki.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/screenshots/Deepwiki.png -------------------------------------------------------------------------------- /screenshots/Interface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/screenshots/Interface.png -------------------------------------------------------------------------------- /screenshots/Ollama.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/screenshots/Ollama.png -------------------------------------------------------------------------------- /screenshots/privaterepo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/screenshots/privaterepo.png -------------------------------------------------------------------------------- /src/app/api/chat/stream/route.ts: -------------------------------------------------------------------------------- 1 | import { NextRequest, NextResponse } from 'next/server'; 2 | 3 | // The target backend server base URL, derived from environment variable or defaulted. 4 | // This should match the logic in your frontend's page.tsx for consistency. 5 | const TARGET_SERVER_BASE_URL = process.env.SERVER_BASE_URL || 'http://localhost:8001'; 6 | 7 | // This is a fallback HTTP implementation that will be used if WebSockets are not available 8 | // or if there's an error with the WebSocket connection 9 | export async function POST(req: NextRequest) { 10 | try { 11 | const requestBody = await req.json(); // Assuming the frontend sends JSON 12 | 13 | // Note: This endpoint now uses the HTTP fallback instead of WebSockets 14 | // The WebSocket implementation is in src/utils/websocketClient.ts 15 | // This HTTP endpoint is kept for backward compatibility 16 | console.log('Using HTTP fallback for chat completion instead of WebSockets'); 17 | 18 | const targetUrl = `${TARGET_SERVER_BASE_URL}/chat/completions/stream`; 19 | 20 | // Make the actual request to the backend service 21 | const backendResponse = await fetch(targetUrl, { 22 | method: 'POST', 23 | headers: { 24 | 'Content-Type': 'application/json', 25 | 'Accept': 'text/event-stream', // Indicate that we expect a stream 26 | }, 27 | body: JSON.stringify(requestBody), 28 | }); 29 | 30 | // If the backend service returned an error, forward that error to the client 31 | if (!backendResponse.ok) { 32 | const errorBody = await backendResponse.text(); 33 | const errorHeaders = new Headers(); 34 | backendResponse.headers.forEach((value, key) => { 35 | errorHeaders.set(key, value); 36 | }); 37 | return new NextResponse(errorBody, { 38 | status: backendResponse.status, 39 | statusText: backendResponse.statusText, 40 | headers: errorHeaders, 41 | }); 42 | } 43 | 44 | // Ensure the backend response has a body to stream 45 | if (!backendResponse.body) { 46 | return new NextResponse('Stream body from backend is null', { status: 500 }); 47 | } 48 | 49 | // Create a new ReadableStream to pipe the data from the backend to the client 50 | const stream = new ReadableStream({ 51 | async start(controller) { 52 | const reader = backendResponse.body!.getReader(); 53 | try { 54 | while (true) { 55 | const { done, value } = await reader.read(); 56 | if (done) { 57 | break; 58 | } 59 | controller.enqueue(value); 60 | } 61 | } catch (error) { 62 | console.error('Error reading from backend stream in proxy:', error); 63 | controller.error(error); 64 | } finally { 65 | controller.close(); 66 | reader.releaseLock(); // Important to release the lock on the reader 67 | } 68 | }, 69 | cancel(reason) { 70 | console.log('Client cancelled stream request:', reason); 71 | } 72 | }); 73 | 74 | // Set up headers for the response to the client 75 | const responseHeaders = new Headers(); 76 | // Copy the Content-Type from the backend response (e.g., 'text/event-stream') 77 | const contentType = backendResponse.headers.get('Content-Type'); 78 | if (contentType) { 79 | responseHeaders.set('Content-Type', contentType); 80 | } 81 | // It's good practice for streams not to be cached or transformed by intermediaries. 82 | responseHeaders.set('Cache-Control', 'no-cache, no-transform'); 83 | 84 | return new NextResponse(stream, { 85 | status: backendResponse.status, // Should be 200 for a successful stream start 86 | headers: responseHeaders, 87 | }); 88 | 89 | } catch (error) { 90 | console.error('Error in API proxy route (/api/chat/stream):', error); 91 | let errorMessage = 'Internal Server Error in proxy'; 92 | if (error instanceof Error) { 93 | errorMessage = error.message; 94 | } 95 | return new NextResponse(JSON.stringify({ error: errorMessage }), { 96 | status: 500, 97 | headers: { 'Content-Type': 'application/json' }, 98 | }); 99 | } 100 | } 101 | 102 | // Optional: Handle OPTIONS requests for CORS if you ever call this from a different origin 103 | // or use custom headers that trigger preflight requests. For same-origin, it's less critical. 104 | export async function OPTIONS() { 105 | return new NextResponse(null, { 106 | status: 204, // No Content 107 | headers: { 108 | 'Access-Control-Allow-Origin': '*', // Be more specific in production if needed 109 | 'Access-Control-Allow-Methods': 'POST, OPTIONS', 110 | 'Access-Control-Allow-Headers': 'Content-Type, Authorization', // Adjust as per client's request headers 111 | }, 112 | }); 113 | } -------------------------------------------------------------------------------- /src/app/api/models/config/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | // The target backend server base URL, derived from environment variable or defaulted. 4 | const TARGET_SERVER_BASE_URL = process.env.SERVER_BASE_URL || 'http://localhost:8001'; 5 | 6 | export async function GET() { 7 | try { 8 | const targetUrl = `${TARGET_SERVER_BASE_URL}/models/config`; 9 | 10 | // Make the actual request to the backend service 11 | const backendResponse = await fetch(targetUrl, { 12 | method: 'GET', 13 | headers: { 14 | 'Accept': 'application/json', 15 | } 16 | }); 17 | 18 | // If the backend service responds with an error 19 | if (!backendResponse.ok) { 20 | return NextResponse.json( 21 | { error: `Backend service responded with status: ${backendResponse.status}` }, 22 | { status: backendResponse.status } 23 | ); 24 | } 25 | 26 | // Forward the response from the backend 27 | const modelConfig = await backendResponse.json(); 28 | return NextResponse.json(modelConfig); 29 | } catch (error) { 30 | console.error('Error fetching model configurations:', error); 31 | return new NextResponse(JSON.stringify({ error: error }), { 32 | status: 500, 33 | headers: { 'Content-Type': 'application/json' }, 34 | }); 35 | } 36 | } 37 | 38 | // Handle OPTIONS requests for CORS if needed 39 | export function OPTIONS() { 40 | return new NextResponse(null, { 41 | status: 204, 42 | headers: { 43 | 'Access-Control-Allow-Origin': '*', 44 | 'Access-Control-Allow-Methods': 'GET', 45 | 'Access-Control-Allow-Headers': 'Content-Type, Authorization', 46 | }, 47 | }); 48 | } 49 | -------------------------------------------------------------------------------- /src/app/api/wiki/projects/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | // This should match the expected structure from your Python backend 4 | interface ApiProcessedProject { 5 | id: string; 6 | owner: string; 7 | repo: string; 8 | name: string; 9 | repo_type: string; 10 | submittedAt: number; 11 | language: string; 12 | } 13 | 14 | // Ensure this matches your Python backend configuration 15 | const PYTHON_BACKEND_URL = process.env.PYTHON_BACKEND_HOST || 'http://localhost:8001'; 16 | const PROJECTS_API_ENDPOINT = `${PYTHON_BACKEND_URL}/api/processed_projects`; 17 | 18 | export async function GET() { 19 | try { 20 | const response = await fetch(PROJECTS_API_ENDPOINT, { 21 | method: 'GET', 22 | headers: { 23 | 'Content-Type': 'application/json', 24 | // Add any other headers your Python backend might require, e.g., API keys 25 | }, 26 | cache: 'no-store', // Ensure fresh data is fetched every time 27 | }); 28 | 29 | if (!response.ok) { 30 | // Try to parse error from backend, otherwise use status text 31 | let errorBody = { error: `Failed to fetch from Python backend: ${response.statusText}` }; 32 | try { 33 | errorBody = await response.json(); 34 | } catch { 35 | // If parsing JSON fails, errorBody will retain its default value 36 | // The error from backend is logged in the next line anyway 37 | } 38 | console.error(`Error from Python backend (${PROJECTS_API_ENDPOINT}): ${response.status} - ${JSON.stringify(errorBody)}`); 39 | return NextResponse.json(errorBody, { status: response.status }); 40 | } 41 | 42 | const projects: ApiProcessedProject[] = await response.json(); 43 | return NextResponse.json(projects); 44 | 45 | } catch (error: unknown) { 46 | console.error(`Network or other error when fetching from ${PROJECTS_API_ENDPOINT}:`, error); 47 | const message = error instanceof Error ? error.message : 'An unknown error occurred'; 48 | return NextResponse.json( 49 | { error: `Failed to connect to the Python backend. ${message}` }, 50 | { status: 503 } // Service Unavailable 51 | ); 52 | } 53 | } -------------------------------------------------------------------------------- /src/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AsyncFuncAI/deepwiki-open/f1029ff7bdcafb3bd445f56ac9333cf193c15c69/src/app/favicon.ico -------------------------------------------------------------------------------- /src/app/globals.css: -------------------------------------------------------------------------------- 1 | @import "tailwindcss"; 2 | 3 | /* Define dark mode variant */ 4 | @custom-variant dark (&:where([data-theme="dark"], [data-theme="dark"] *)); 5 | 6 | :root { 7 | /* Japanese aesthetic color palette - light mode */ 8 | --background: #f8f4e6; /* Warm off-white like washi paper */ 9 | --foreground: #333333; /* Soft black for text */ 10 | --shadow-color: rgba(0, 0, 0, 0.05); 11 | --accent-primary: #9b7cb9; /* Soft purple (Fuji) */ 12 | --accent-secondary: #d7c4bb; /* Soft beige (Kinari) */ 13 | --border-color: #e0d8c8; /* Soft beige border */ 14 | --card-bg: #fffaf0; /* Slightly warmer than background */ 15 | --highlight: #e8927c; /* Soft coral (Akane) */ 16 | --muted: #a59e8c; /* Soft gray-brown (Nezumi) */ 17 | --link-color: #7c5aa0; /* Slightly darker purple for links */ 18 | } 19 | 20 | html[data-theme='dark'] { 21 | /* Japanese aesthetic color palette - dark mode */ 22 | --background: #1a1a1a; /* Deep charcoal */ 23 | --foreground: #f0f0f0; /* Soft white */ 24 | --shadow-color: rgba(0, 0, 0, 0.2); 25 | --accent-primary: #9370db; /* Soft lavender */ 26 | --accent-secondary: #5d4037; /* Warm brown */ 27 | --border-color: #2c2c2c; /* Dark border */ 28 | --card-bg: #222222; /* Slightly lighter than background */ 29 | --highlight: #e57373; /* Soft red */ 30 | --muted: #8c8c8c; /* Muted gray */ 31 | --link-color: #b19cd9; /* Lighter purple for dark mode links */ 32 | } 33 | 34 | @theme inline { 35 | --color-background: var(--background); 36 | --color-foreground: var(--foreground); 37 | --font-sans: var(--font-geist-sans); 38 | --font-mono: var(--font-geist-mono); 39 | } 40 | 41 | body { 42 | background: var(--background); 43 | color: var(--foreground); 44 | font-family: var(--font-sans), sans-serif; 45 | } 46 | 47 | /* Custom shadow styles - more subtle for Japanese aesthetic */ 48 | .shadow-custom { 49 | box-shadow: 0 4px 8px -2px var(--shadow-color); 50 | } 51 | 52 | /* Paper texture background */ 53 | .paper-texture { 54 | background-color: var(--card-bg); 55 | background-image: url("data:image/svg+xml,%3Csvg width='100' height='100' viewBox='0 0 100 100' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M11 18c3.866 0 7-3.134 7-7s-3.134-7-7-7-7 3.134-7 7 3.134 7 7 7zm48 25c3.866 0 7-3.134 7-7s-3.134-7-7-7-7 3.134-7 7 3.134 7 7 7zm-43-7c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zm63 31c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zM34 90c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zm56-76c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zM12 86c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm28-65c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm23-11c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm-6 60c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm29 22c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zM32 63c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm57-13c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm-9-21c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM60 91c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM35 41c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM12 60c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2z' fill='%23e0d8c8' fill-opacity='0.1' fill-rule='evenodd'/%3E%3C/svg%3E"); 56 | } 57 | 58 | /* Dark mode paper texture */ 59 | html[data-theme='dark'] .paper-texture { 60 | background-image: url("data:image/svg+xml,%3Csvg width='100' height='100' viewBox='0 0 100 100' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M11 18c3.866 0 7-3.134 7-7s-3.134-7-7-7-7 3.134-7 7 3.134 7 7 7zm48 25c3.866 0 7-3.134 7-7s-3.134-7-7-7-7 3.134-7 7 3.134 7 7 7zm-43-7c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zm63 31c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zM34 90c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zm56-76c1.657 0 3-1.343 3-3s-1.343-3-3-3-3 1.343-3 3 1.343 3 3 3zM12 86c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm28-65c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm23-11c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm-6 60c2.21 0 4-1.79 4-4s-1.79-4-4-4-4 1.79-4 4 1.79 4 4 4zm29 22c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zM32 63c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm57-13c2.76 0 5-2.24 5-5s-2.24-5-5-5-5 2.24-5 5 2.24 5 5 5zm-9-21c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM60 91c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM35 41c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2zM12 60c1.105 0 2-.895 2-2s-.895-2-2-2-2 .895-2 2 .895 2 2 2z' fill='%23333333' fill-opacity='0.1' fill-rule='evenodd'/%3E%3C/svg%3E"); 61 | } 62 | 63 | /* Japanese-style buttons */ 64 | .btn-japanese { 65 | background-color: var(--accent-primary); 66 | color: white; 67 | border: none; 68 | border-radius: 0.25rem; 69 | padding: 0.5rem 1.5rem; 70 | font-weight: 500; 71 | transition: all 0.3s ease; 72 | position: relative; 73 | overflow: hidden; 74 | } 75 | 76 | .btn-japanese:hover { 77 | background-color: var(--highlight); 78 | } 79 | 80 | .btn-japanese:before { 81 | content: ''; 82 | position: absolute; 83 | top: 0; 84 | left: 0; 85 | width: 0; 86 | height: 100%; 87 | background-color: rgba(255, 255, 255, 0.2); 88 | transition: width 0.3s ease; 89 | } 90 | 91 | .btn-japanese:hover:before { 92 | width: 100%; 93 | } 94 | 95 | /* Japanese-style inputs */ 96 | .input-japanese { 97 | background-color: transparent; 98 | border: 1px solid var(--border-color); 99 | border-radius: 0.25rem; 100 | padding: 0.5rem 1rem; 101 | transition: all 0.3s ease; 102 | } 103 | 104 | .input-japanese:focus { 105 | border-color: var(--accent-primary); 106 | box-shadow: 0 0 0 2px rgba(155, 124, 185, 0.2); 107 | outline: none; 108 | } 109 | 110 | /* Japanese-style cards */ 111 | .card-japanese { 112 | background-color: var(--card-bg); 113 | border: 1px solid var(--border-color); 114 | border-radius: 0.5rem; 115 | overflow: hidden; 116 | transition: all 0.3s ease; 117 | } 118 | 119 | .card-japanese:hover { 120 | box-shadow: 0 4px 12px var(--shadow-color); 121 | } 122 | 123 | /* Line clamp utilities */ 124 | .line-clamp-1 { 125 | overflow: hidden; 126 | display: -webkit-box; 127 | -webkit-box-orient: vertical; 128 | -webkit-line-clamp: 1; 129 | } 130 | 131 | .line-clamp-2 { 132 | overflow: hidden; 133 | display: -webkit-box; 134 | -webkit-box-orient: vertical; 135 | -webkit-line-clamp: 2; 136 | } 137 | 138 | .line-clamp-3 { 139 | overflow: hidden; 140 | display: -webkit-box; 141 | -webkit-box-orient: vertical; 142 | -webkit-line-clamp: 3; 143 | } 144 | -------------------------------------------------------------------------------- /src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import type { Metadata } from "next"; 2 | import { Noto_Sans_JP, Noto_Serif_JP, Geist_Mono } from "next/font/google"; 3 | import "./globals.css"; 4 | import { ThemeProvider } from "next-themes"; 5 | import { LanguageProvider } from "@/contexts/LanguageContext"; 6 | 7 | // Japanese-friendly fonts 8 | const notoSansJP = Noto_Sans_JP({ 9 | variable: "--font-geist-sans", 10 | subsets: ["latin"], 11 | weight: ["400", "500", "700"], 12 | display: "swap", 13 | }); 14 | 15 | const notoSerifJP = Noto_Serif_JP({ 16 | variable: "--font-serif-jp", 17 | subsets: ["latin"], 18 | weight: ["400", "500", "700"], 19 | display: "swap", 20 | }); 21 | 22 | const geistMono = Geist_Mono({ 23 | variable: "--font-geist-mono", 24 | subsets: ["latin"], 25 | }); 26 | 27 | export const metadata: Metadata = { 28 | title: "Deepwiki Open Source | Sheing Ng", 29 | description: "Created by Sheing Ng", 30 | }; 31 | 32 | export default function RootLayout({ 33 | children 34 | }: Readonly<{ 35 | children: React.ReactNode; 36 | }>) { 37 | return ( 38 | 39 | 42 | 43 | 44 | {children} 45 | 46 | 47 | 48 | 49 | ); 50 | } 51 | -------------------------------------------------------------------------------- /src/app/wiki/projects/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React from 'react'; 4 | import ProcessedProjects from '@/components/ProcessedProjects'; 5 | import { useLanguage } from '@/contexts/LanguageContext'; 6 | 7 | export default function WikiProjectsPage() { 8 | const { messages } = useLanguage(); 9 | 10 | return ( 11 |
12 | 17 |
18 | ); 19 | } -------------------------------------------------------------------------------- /src/components/Markdown.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactMarkdown from 'react-markdown'; 3 | import remarkGfm from 'remark-gfm'; 4 | import rehypeRaw from 'rehype-raw'; 5 | import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'; 6 | import { tomorrow } from 'react-syntax-highlighter/dist/cjs/styles/prism'; 7 | import Mermaid from './Mermaid'; 8 | 9 | interface MarkdownProps { 10 | content: string; 11 | } 12 | 13 | const Markdown: React.FC = ({ content }) => { 14 | // Define markdown components 15 | const MarkdownComponents: React.ComponentProps['components'] = { 16 | p({ children, ...props }: { children?: React.ReactNode }) { 17 | return

{children}

; 18 | }, 19 | h1({ children, ...props }: { children?: React.ReactNode }) { 20 | return

{children}

; 21 | }, 22 | h2({ children, ...props }: { children?: React.ReactNode }) { 23 | // Special styling for ReAct headings 24 | if (children && typeof children === 'string') { 25 | const text = children.toString(); 26 | if (text.includes('Thought') || text.includes('Action') || text.includes('Observation') || text.includes('Answer')) { 27 | return ( 28 |

38 | {children} 39 |

40 | ); 41 | } 42 | } 43 | return

{children}

; 44 | }, 45 | h3({ children, ...props }: { children?: React.ReactNode }) { 46 | return

{children}

; 47 | }, 48 | h4({ children, ...props }: { children?: React.ReactNode }) { 49 | return

{children}

; 50 | }, 51 | ul({ children, ...props }: { children?: React.ReactNode }) { 52 | return
    {children}
; 53 | }, 54 | ol({ children, ...props }: { children?: React.ReactNode }) { 55 | return
    {children}
; 56 | }, 57 | li({ children, ...props }: { children?: React.ReactNode }) { 58 | return
  • {children}
  • ; 59 | }, 60 | a({ children, href, ...props }: { children?: React.ReactNode; href?: string }) { 61 | return ( 62 | 69 | {children} 70 | 71 | ); 72 | }, 73 | blockquote({ children, ...props }: { children?: React.ReactNode }) { 74 | return ( 75 |
    79 | {children} 80 |
    81 | ); 82 | }, 83 | table({ children, ...props }: { children?: React.ReactNode }) { 84 | return ( 85 |
    86 | 87 | {children} 88 |
    89 |
    90 | ); 91 | }, 92 | thead({ children, ...props }: { children?: React.ReactNode }) { 93 | return {children}; 94 | }, 95 | tbody({ children, ...props }: { children?: React.ReactNode }) { 96 | return {children}; 97 | }, 98 | tr({ children, ...props }: { children?: React.ReactNode }) { 99 | return {children}; 100 | }, 101 | th({ children, ...props }: { children?: React.ReactNode }) { 102 | return ( 103 | 107 | {children} 108 | 109 | ); 110 | }, 111 | td({ children, ...props }: { children?: React.ReactNode }) { 112 | return {children}; 113 | }, 114 | code(props: { 115 | inline?: boolean; 116 | className?: string; 117 | children?: React.ReactNode; 118 | // eslint-disable-next-line @typescript-eslint/no-explicit-any 119 | [key: string]: any; // Using any here as it's required for ReactMarkdown components 120 | }) { 121 | const { inline, className, children, ...otherProps } = props; 122 | const match = /language-(\w+)/.exec(className || ''); 123 | const codeContent = children ? String(children).replace(/\n$/, '') : ''; 124 | 125 | // Handle Mermaid diagrams 126 | if (!inline && match && match[1] === 'mermaid') { 127 | return ( 128 |
    129 | 134 |
    135 | ); 136 | } 137 | 138 | // Handle code blocks 139 | if (!inline && match) { 140 | return ( 141 |
    142 |
    143 | {match[1]} 144 | 166 |
    167 | 177 | {codeContent} 178 | 179 |
    180 | ); 181 | } 182 | 183 | // Handle inline code 184 | return ( 185 | 189 | {children} 190 | 191 | ); 192 | }, 193 | }; 194 | 195 | return ( 196 |
    197 | 202 | {content} 203 | 204 |
    205 | ); 206 | }; 207 | 208 | export default Markdown; -------------------------------------------------------------------------------- /src/components/ModelSelectionModal.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React, { useEffect, useState } from 'react'; 4 | import { useLanguage } from '@/contexts/LanguageContext'; 5 | import UserSelector from './UserSelector'; 6 | import WikiTypeSelector from './WikiTypeSelector'; 7 | 8 | interface ModelSelectionModalProps { 9 | isOpen: boolean; 10 | onClose: () => void; 11 | provider: string; 12 | setProvider: (value: string) => void; 13 | model: string; 14 | setModel: (value: string) => void; 15 | isCustomModel: boolean; 16 | setIsCustomModel: (value: boolean) => void; 17 | customModel: string; 18 | setCustomModel: (value: string) => void; 19 | onApply: () => void; 20 | 21 | // Wiki type options 22 | isComprehensiveView: boolean; 23 | setIsComprehensiveView: (value: boolean) => void; 24 | 25 | // File filter options - optional 26 | excludedDirs?: string; 27 | setExcludedDirs?: (value: string) => void; 28 | excludedFiles?: string; 29 | setExcludedFiles?: (value: string) => void; 30 | includedDirs?: string; 31 | setIncludedDirs?: (value: string) => void; 32 | includedFiles?: string; 33 | setIncludedFiles?: (value: string) => void; 34 | showFileFilters?: boolean; 35 | showWikiType: boolean; 36 | } 37 | 38 | export default function ModelSelectionModal({ 39 | isOpen, 40 | onClose, 41 | provider, 42 | setProvider, 43 | model, 44 | setModel, 45 | isCustomModel, 46 | setIsCustomModel, 47 | customModel, 48 | setCustomModel, 49 | onApply, 50 | isComprehensiveView, 51 | setIsComprehensiveView, 52 | excludedDirs = '', 53 | setExcludedDirs, 54 | excludedFiles = '', 55 | setExcludedFiles, 56 | includedDirs = '', 57 | setIncludedDirs, 58 | includedFiles = '', 59 | setIncludedFiles, 60 | showFileFilters = false, 61 | showWikiType = true, 62 | }: ModelSelectionModalProps) { 63 | const { messages: t } = useLanguage(); 64 | 65 | // Local state for form values (to only apply changes when the user clicks "Submit") 66 | const [localProvider, setLocalProvider] = useState(provider); 67 | const [localModel, setLocalModel] = useState(model); 68 | const [localIsCustomModel, setLocalIsCustomModel] = useState(isCustomModel); 69 | const [localCustomModel, setLocalCustomModel] = useState(customModel); 70 | const [localIsComprehensiveView, setLocalIsComprehensiveView] = useState(isComprehensiveView); 71 | const [localExcludedDirs, setLocalExcludedDirs] = useState(excludedDirs); 72 | const [localExcludedFiles, setLocalExcludedFiles] = useState(excludedFiles); 73 | const [localIncludedDirs, setLocalIncludedDirs] = useState(includedDirs); 74 | const [localIncludedFiles, setLocalIncludedFiles] = useState(includedFiles); 75 | 76 | // Reset local state when modal is opened 77 | useEffect(() => { 78 | if (isOpen) { 79 | setLocalProvider(provider); 80 | setLocalModel(model); 81 | setLocalIsCustomModel(isCustomModel); 82 | setLocalCustomModel(customModel); 83 | setLocalIsComprehensiveView(isComprehensiveView); 84 | setLocalExcludedDirs(excludedDirs); 85 | setLocalExcludedFiles(excludedFiles); 86 | setLocalIncludedDirs(includedDirs); 87 | setLocalIncludedFiles(includedFiles); 88 | } 89 | }, [isOpen, provider, model, isCustomModel, customModel, isComprehensiveView, excludedDirs, excludedFiles, includedDirs, includedFiles]); 90 | 91 | // Handler for applying changes 92 | const handleApply = () => { 93 | setProvider(localProvider); 94 | setModel(localModel); 95 | setIsCustomModel(localIsCustomModel); 96 | setCustomModel(localCustomModel); 97 | setIsComprehensiveView(localIsComprehensiveView); 98 | if (setExcludedDirs) setExcludedDirs(localExcludedDirs); 99 | if (setExcludedFiles) setExcludedFiles(localExcludedFiles); 100 | if (setIncludedDirs) setIncludedDirs(localIncludedDirs); 101 | if (setIncludedFiles) setIncludedFiles(localIncludedFiles); 102 | onApply(); 103 | onClose(); 104 | }; 105 | 106 | if (!isOpen) return null; 107 | 108 | return ( 109 |
    110 |
    111 |
    112 | {/* Modal header with close button */} 113 |
    114 |

    115 | {t.form?.modelSelection || 'Model Selection'} 116 |

    117 | 126 |
    127 | 128 | {/* Modal body */} 129 |
    130 | {/* Wiki Type Selector */} 131 | { 132 | showWikiType && 136 | } 137 | 138 | {/* Divider */} 139 |
    140 | 141 | {/* Model Selector */} 142 | setLocalExcludedDirs(value) : undefined} 154 | excludedFiles={localExcludedFiles} 155 | setExcludedFiles={showFileFilters ? (value: string) => setLocalExcludedFiles(value) : undefined} 156 | includedDirs={localIncludedDirs} 157 | setIncludedDirs={showFileFilters ? (value: string) => setLocalIncludedDirs(value) : undefined} 158 | includedFiles={localIncludedFiles} 159 | setIncludedFiles={showFileFilters ? (value: string) => setLocalIncludedFiles(value) : undefined} 160 | /> 161 |
    162 | 163 | {/* Modal footer */} 164 |
    165 | 172 | 179 |
    180 |
    181 |
    182 |
    183 | ); 184 | } 185 | -------------------------------------------------------------------------------- /src/components/ProcessedProjects.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React, { useState, useEffect, useMemo } from 'react'; 4 | import Link from 'next/link'; 5 | import { FaSearch, FaTimes, FaTh, FaList } from 'react-icons/fa'; 6 | 7 | // Interface should match the structure from the API 8 | interface ProcessedProject { 9 | id: string; 10 | owner: string; 11 | repo: string; 12 | name: string; 13 | repo_type: string; 14 | submittedAt: number; 15 | language: string; 16 | } 17 | 18 | interface ProcessedProjectsProps { 19 | showHeader?: boolean; 20 | maxItems?: number; 21 | className?: string; 22 | messages?: any; // Translation messages 23 | } 24 | 25 | export default function ProcessedProjects({ 26 | showHeader = true, 27 | maxItems, 28 | className = "", 29 | messages 30 | }: ProcessedProjectsProps) { 31 | const [projects, setProjects] = useState([]); 32 | const [isLoading, setIsLoading] = useState(true); 33 | const [error, setError] = useState(null); 34 | const [searchQuery, setSearchQuery] = useState(''); 35 | const [viewMode, setViewMode] = useState<'card' | 'list'>('card'); 36 | 37 | // Default messages fallback 38 | const defaultMessages = { 39 | title: 'Processed Wiki Projects', 40 | searchPlaceholder: 'Search projects by name, owner, or repository...', 41 | noProjects: 'No projects found in the server cache. The cache might be empty or the server encountered an issue.', 42 | noSearchResults: 'No projects match your search criteria.', 43 | processedOn: 'Processed on:', 44 | loadingProjects: 'Loading projects...', 45 | errorLoading: 'Error loading projects:', 46 | backToHome: 'Back to Home' 47 | }; 48 | 49 | const t = (key: string) => { 50 | if (messages?.projects?.[key]) { 51 | return messages.projects[key]; 52 | } 53 | return defaultMessages[key as keyof typeof defaultMessages] || key; 54 | }; 55 | 56 | useEffect(() => { 57 | const fetchProjects = async () => { 58 | setIsLoading(true); 59 | setError(null); 60 | try { 61 | const response = await fetch('/api/wiki/projects'); 62 | if (!response.ok) { 63 | throw new Error(`Failed to fetch projects: ${response.statusText}`); 64 | } 65 | const data = await response.json(); 66 | if (data.error) { 67 | throw new Error(data.error); 68 | } 69 | setProjects(data as ProcessedProject[]); 70 | } catch (e: unknown) { 71 | console.error("Failed to load projects from API:", e); 72 | const message = e instanceof Error ? e.message : "An unknown error occurred."; 73 | setError(message); 74 | setProjects([]); 75 | } finally { 76 | setIsLoading(false); 77 | } 78 | }; 79 | 80 | fetchProjects(); 81 | }, []); 82 | 83 | // Filter projects based on search query 84 | const filteredProjects = useMemo(() => { 85 | if (!searchQuery.trim()) { 86 | return maxItems ? projects.slice(0, maxItems) : projects; 87 | } 88 | 89 | const query = searchQuery.toLowerCase(); 90 | const filtered = projects.filter(project => 91 | project.name.toLowerCase().includes(query) || 92 | project.owner.toLowerCase().includes(query) || 93 | project.repo.toLowerCase().includes(query) || 94 | project.repo_type.toLowerCase().includes(query) 95 | ); 96 | 97 | return maxItems ? filtered.slice(0, maxItems) : filtered; 98 | }, [projects, searchQuery, maxItems]); 99 | 100 | const clearSearch = () => { 101 | setSearchQuery(''); 102 | }; 103 | 104 | return ( 105 |
    106 | {showHeader && ( 107 |
    108 |
    109 |

    {t('title')}

    110 | 111 | {t('backToHome')} 112 | 113 |
    114 |
    115 | )} 116 | 117 | {/* Search Bar and View Toggle */} 118 |
    119 | {/* Search Bar */} 120 |
    121 | setSearchQuery(e.target.value)} 125 | placeholder={t('searchPlaceholder')} 126 | className="input-japanese block w-full pl-4 pr-12 py-2.5 border border-[var(--border-color)] rounded-lg bg-[var(--background)] text-[var(--foreground)] placeholder:text-[var(--muted)] focus:outline-none focus:border-[var(--accent-primary)] focus:ring-1 focus:ring-[var(--accent-primary)]" 127 | /> 128 | {searchQuery && ( 129 | 135 | )} 136 |
    137 | 138 | {/* View Toggle */} 139 |
    140 | 151 | 162 |
    163 |
    164 | 165 | {isLoading &&

    {t('loadingProjects')}

    } 166 | {error &&

    {t('errorLoading')} {error}

    } 167 | 168 | {!isLoading && !error && filteredProjects.length > 0 && ( 169 |
    170 | {filteredProjects.map((project) => ( 171 | viewMode === 'card' ? ( 172 |
    173 | 177 |

    178 | {project.name} 179 |

    180 |
    181 | 182 | {project.repo_type} 183 | 184 | 185 | {project.language} 186 | 187 |
    188 |

    189 | {t('processedOn')} {new Date(project.submittedAt).toLocaleDateString()} 190 |

    191 | 192 |
    193 | ) : ( 194 |
    195 | 199 |
    200 |

    201 | {project.name} 202 |

    203 |

    204 | {t('processedOn')} {new Date(project.submittedAt).toLocaleDateString()} • {project.repo_type} • {project.language} 205 |

    206 |
    207 |
    208 | 209 | {project.repo_type} 210 | 211 |
    212 | 213 |
    214 | ) 215 | ))} 216 |
    217 | )} 218 | 219 | {!isLoading && !error && projects.length > 0 && filteredProjects.length === 0 && searchQuery && ( 220 |

    {t('noSearchResults')}

    221 | )} 222 | 223 | {!isLoading && !error && projects.length === 0 && ( 224 |

    {t('noProjects')}

    225 | )} 226 |
    227 | ); 228 | } 229 | -------------------------------------------------------------------------------- /src/components/WikiTreeView.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React, { useState } from 'react'; 4 | import { FaChevronRight, FaChevronDown } from 'react-icons/fa'; 5 | 6 | // Import interfaces from the page component 7 | interface WikiPage { 8 | id: string; 9 | title: string; 10 | content: string; 11 | filePaths: string[]; 12 | importance: 'high' | 'medium' | 'low'; 13 | relatedPages: string[]; 14 | parentId?: string; 15 | isSection?: boolean; 16 | children?: string[]; 17 | } 18 | 19 | interface WikiSection { 20 | id: string; 21 | title: string; 22 | pages: string[]; 23 | subsections?: string[]; 24 | } 25 | 26 | interface WikiStructure { 27 | id: string; 28 | title: string; 29 | description: string; 30 | pages: WikiPage[]; 31 | sections: WikiSection[]; 32 | rootSections: string[]; 33 | } 34 | 35 | interface WikiTreeViewProps { 36 | wikiStructure: WikiStructure; 37 | currentPageId: string | undefined; 38 | onPageSelect: (pageId: string) => void; 39 | messages?: { 40 | pages?: string; 41 | [key: string]: string | undefined; 42 | }; 43 | } 44 | 45 | const WikiTreeView: React.FC = ({ 46 | wikiStructure, 47 | currentPageId, 48 | onPageSelect, 49 | }) => { 50 | const [expandedSections, setExpandedSections] = useState>( 51 | new Set(wikiStructure.rootSections) 52 | ); 53 | 54 | const toggleSection = (sectionId: string, event: React.MouseEvent) => { 55 | event.stopPropagation(); 56 | setExpandedSections(prev => { 57 | const newSet = new Set(prev); 58 | if (newSet.has(sectionId)) { 59 | newSet.delete(sectionId); 60 | } else { 61 | newSet.add(sectionId); 62 | } 63 | return newSet; 64 | }); 65 | }; 66 | 67 | const renderSection = (sectionId: string, level = 0) => { 68 | const section = wikiStructure.sections.find(s => s.id === sectionId); 69 | if (!section) return null; 70 | 71 | const isExpanded = expandedSections.has(sectionId); 72 | 73 | return ( 74 |
    75 | 88 | 89 | {isExpanded && ( 90 |
    0 ? 'pl-2 border-l border-[var(--border-color)]/30' : ''}`}> 91 | {/* Render pages in this section */} 92 | {section.pages.map(pageId => { 93 | const page = wikiStructure.pages.find(p => p.id === pageId); 94 | if (!page) return null; 95 | 96 | return ( 97 | 119 | ); 120 | })} 121 | 122 | {/* Render subsections recursively */} 123 | {section.subsections?.map(subsectionId => 124 | renderSection(subsectionId, level + 1) 125 | )} 126 |
    127 | )} 128 |
    129 | ); 130 | }; 131 | 132 | // If there are no sections defined yet, or if sections/rootSections are empty arrays, fall back to the flat list view 133 | if (!wikiStructure.sections || wikiStructure.sections.length === 0 || !wikiStructure.rootSections || wikiStructure.rootSections.length === 0) { 134 | console.log("WikiTreeView: Falling back to flat list view due to missing or empty sections/rootSections"); 135 | return ( 136 |
      137 | {wikiStructure.pages.map(page => ( 138 |
    • 139 | 160 |
    • 161 | ))} 162 |
    163 | ); 164 | } 165 | 166 | // Log information about the sections for debugging 167 | console.log("WikiTreeView: Rendering tree view with sections:", wikiStructure.sections); 168 | console.log("WikiTreeView: Root sections:", wikiStructure.rootSections); 169 | 170 | return ( 171 |
    172 | {wikiStructure.rootSections.map(sectionId => { 173 | const section = wikiStructure.sections.find(s => s.id === sectionId); 174 | if (!section) { 175 | console.warn(`WikiTreeView: Could not find section with id ${sectionId}`); 176 | return null; 177 | } 178 | return renderSection(sectionId); 179 | })} 180 |
    181 | ); 182 | }; 183 | 184 | export default WikiTreeView; -------------------------------------------------------------------------------- /src/components/WikiTypeSelector.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React from 'react'; 4 | import { useLanguage } from '@/contexts/LanguageContext'; 5 | import { FaBookOpen, FaList } from 'react-icons/fa'; 6 | 7 | interface WikiTypeSelectorProps { 8 | isComprehensiveView: boolean; 9 | setIsComprehensiveView: (value: boolean) => void; 10 | } 11 | 12 | const WikiTypeSelector: React.FC = ({ 13 | isComprehensiveView, 14 | setIsComprehensiveView, 15 | }) => { 16 | const { messages: t } = useLanguage(); 17 | 18 | return ( 19 |
    20 | 23 |
    24 | 48 | 49 | 73 |
    74 |
    75 | ); 76 | }; 77 | 78 | export default WikiTypeSelector; 79 | -------------------------------------------------------------------------------- /src/components/theme-toggle.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useTheme } from "next-themes"; 4 | 5 | export default function ThemeToggle() { 6 | const { theme, setTheme } = useTheme(); 7 | 8 | return ( 9 | 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /src/contexts/LanguageContext.tsx: -------------------------------------------------------------------------------- 1 | /* eslint-disable @typescript-eslint/no-explicit-any */ 2 | 'use client'; 3 | 4 | import React, { createContext, useContext, useState, useEffect, ReactNode } from 'react'; 5 | import { locales } from '@/i18n'; 6 | 7 | type Messages = Record; 8 | type LanguageContextType = { 9 | language: string; 10 | setLanguage: (lang: string) => void; 11 | messages: Messages; 12 | }; 13 | 14 | const LanguageContext = createContext(undefined); 15 | 16 | export function LanguageProvider({ children }: { children: ReactNode }) { 17 | // Initialize with 'en' or get from localStorage if available 18 | const [language, setLanguageState] = useState('en'); 19 | const [messages, setMessages] = useState({}); 20 | const [isLoading, setIsLoading] = useState(true); 21 | 22 | // Helper function to detect browser language 23 | const detectBrowserLanguage = (): string => { 24 | try { 25 | if (typeof window === 'undefined' || typeof navigator === 'undefined') { 26 | return 'en'; // Default to English on server-side 27 | } 28 | 29 | // Get browser language (navigator.language returns full locale like 'en-US') 30 | const browserLang = navigator.language || (navigator as any).userLanguage || ''; 31 | console.log('Detected browser language:', browserLang); 32 | 33 | if (!browserLang) { 34 | return 'en'; // Default to English if browser language is not available 35 | } 36 | 37 | // Extract the language code (first 2 characters) 38 | const langCode = browserLang.split('-')[0].toLowerCase(); 39 | console.log('Extracted language code:', langCode); 40 | 41 | // Check if the detected language is supported 42 | if (locales.includes(langCode as any)) { 43 | console.log('Language supported, using:', langCode); 44 | return langCode; 45 | } 46 | 47 | // Special case for Chinese variants 48 | if (langCode === 'zh') { 49 | console.log('Chinese language detected'); 50 | // Check for traditional Chinese variants 51 | if (browserLang.includes('TW') || browserLang.includes('HK')) { 52 | console.log('Traditional Chinese variant detected'); 53 | return 'zh'; // Use Mandarin for traditional Chinese 54 | } 55 | return 'zh'; // Use Mandarin for simplified Chinese 56 | } 57 | 58 | console.log('Language not supported, defaulting to English'); 59 | return 'en'; // Default to English if not supported 60 | } catch (error) { 61 | console.error('Error detecting browser language:', error); 62 | return 'en'; // Default to English on error 63 | } 64 | }; 65 | 66 | // Load language preference from localStorage on mount 67 | useEffect(() => { 68 | const loadLanguage = async () => { 69 | try { 70 | // Only access localStorage in the browser 71 | let storedLanguage; 72 | if (typeof window !== 'undefined') { 73 | storedLanguage = localStorage.getItem('language'); 74 | 75 | // If no language is stored, detect browser language 76 | if (!storedLanguage) { 77 | console.log('No language in localStorage, detecting browser language'); 78 | storedLanguage = detectBrowserLanguage(); 79 | 80 | // Store the detected language 81 | localStorage.setItem('language', storedLanguage); 82 | } 83 | } else { 84 | console.log('Running on server-side, using default language'); 85 | storedLanguage = 'en'; 86 | } 87 | 88 | const validLanguage = locales.includes(storedLanguage as any) ? storedLanguage : 'en'; 89 | 90 | // Load messages for the language 91 | const langMessages = (await import(`../messages/${validLanguage}.json`)).default; 92 | 93 | setLanguageState(validLanguage); 94 | setMessages(langMessages); 95 | 96 | // Update HTML lang attribute (only in browser) 97 | if (typeof document !== 'undefined') { 98 | document.documentElement.lang = validLanguage; 99 | } 100 | } catch (error) { 101 | console.error('Failed to load language:', error); 102 | // Fallback to English 103 | console.log('Falling back to English due to error'); 104 | const enMessages = (await import('../messages/en.json')).default; 105 | setMessages(enMessages); 106 | } finally { 107 | setIsLoading(false); 108 | } 109 | }; 110 | 111 | loadLanguage(); 112 | }, []); 113 | 114 | // Update language and load new messages 115 | const setLanguage = async (lang: string) => { 116 | try { 117 | console.log('Setting language to:', lang); 118 | const validLanguage = locales.includes(lang as any) ? lang : 'en'; 119 | 120 | // Load messages for the new language 121 | const langMessages = (await import(`../messages/${validLanguage}.json`)).default; 122 | 123 | setLanguageState(validLanguage); 124 | setMessages(langMessages); 125 | 126 | // Store in localStorage (only in browser) 127 | if (typeof window !== 'undefined') { 128 | localStorage.setItem('language', validLanguage); 129 | } 130 | 131 | // Update HTML lang attribute (only in browser) 132 | if (typeof document !== 'undefined') { 133 | document.documentElement.lang = validLanguage; 134 | } 135 | } catch (error) { 136 | console.error('Failed to set language:', error); 137 | } 138 | }; 139 | 140 | if (isLoading) { 141 | return ( 142 |
    143 |
    144 |
    145 |

    Loading...

    146 |
    147 |
    148 | ); 149 | } 150 | 151 | return ( 152 | 153 | {children} 154 | 155 | ); 156 | } 157 | 158 | export function useLanguage() { 159 | const context = useContext(LanguageContext); 160 | if (context === undefined) { 161 | throw new Error('useLanguage must be used within a LanguageProvider'); 162 | } 163 | return context; 164 | } 165 | -------------------------------------------------------------------------------- /src/hooks/useProcessedProjects.ts: -------------------------------------------------------------------------------- 1 | import { useState, useEffect } from 'react'; 2 | 3 | interface ProcessedProject { 4 | id: string; 5 | owner: string; 6 | repo: string; 7 | name: string; 8 | repo_type: string; 9 | submittedAt: number; 10 | language: string; 11 | } 12 | 13 | export function useProcessedProjects() { 14 | const [projects, setProjects] = useState([]); 15 | const [isLoading, setIsLoading] = useState(true); 16 | const [error, setError] = useState(null); 17 | 18 | useEffect(() => { 19 | const fetchProjects = async () => { 20 | setIsLoading(true); 21 | setError(null); 22 | try { 23 | const response = await fetch('/api/wiki/projects'); 24 | if (!response.ok) { 25 | throw new Error(`Failed to fetch projects: ${response.statusText}`); 26 | } 27 | const data = await response.json(); 28 | if (data.error) { 29 | throw new Error(data.error); 30 | } 31 | setProjects(data as ProcessedProject[]); 32 | } catch (e: unknown) { 33 | console.error("Failed to load projects from API:", e); 34 | const message = e instanceof Error ? e.message : "An unknown error occurred."; 35 | setError(message); 36 | setProjects([]); 37 | } finally { 38 | setIsLoading(false); 39 | } 40 | }; 41 | 42 | fetchProjects(); 43 | }, []); 44 | 45 | return { projects, isLoading, error }; 46 | } 47 | -------------------------------------------------------------------------------- /src/i18n.ts: -------------------------------------------------------------------------------- 1 | import { getRequestConfig } from 'next-intl/server'; 2 | 3 | // Define the list of supported locales 4 | export const locales = ['en', 'ja', 'zh', 'es', 'kr', 'vi']; 5 | 6 | export default getRequestConfig(async ({ locale }) => { 7 | // Use a default locale if the requested one isn't supported 8 | const safeLocale = locales.includes(locale as string) ? locale : 'en'; 9 | 10 | return { 11 | locale: safeLocale as string, 12 | messages: (await import(`./messages/${safeLocale}.json`)).default 13 | }; 14 | }); 15 | -------------------------------------------------------------------------------- /src/messages/en.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "AI-powered documentation", 5 | "generateWiki": "Generate Wiki", 6 | "processing": "Processing...", 7 | "error": "Error", 8 | "submit": "Submit", 9 | "cancel": "Cancel", 10 | "close": "Close", 11 | "loading": "Loading..." 12 | }, 13 | "loading": { 14 | "initializing": "Initializing wiki generation...", 15 | "fetchingStructure": "Fetching repository structure...", 16 | "determiningStructure": "Determining wiki structure...", 17 | "clearingCache": "Clearing server cache...", 18 | "preparingDownload": "Please wait while we prepare your download..." 19 | }, 20 | "home": { 21 | "welcome": "Welcome to DeepWiki-Open", 22 | "welcomeTagline": "AI-powered documentation for your code repositories", 23 | "description": "Generate comprehensive documentation from GitHub, GitLab, or Bitbucket repositories with just a few clicks.", 24 | "quickStart": "Quick Start", 25 | "enterRepoUrl": "Enter a repository URL in one of these formats:", 26 | "advancedVisualization": "Advanced Visualization with Mermaid Diagrams", 27 | "diagramDescription": "DeepWiki automatically generates interactive diagrams to help you understand code structure and relationships:", 28 | "flowDiagram": "Flow Diagram", 29 | "sequenceDiagram": "Sequence Diagram" 30 | }, 31 | "form": { 32 | "repository": "Repository", 33 | "configureWiki": "Configure Wiki", 34 | "repoPlaceholder": "owner/repo or GitHub/GitLab/Bitbucket URL", 35 | "wikiLanguage": "Wiki Language", 36 | "modelOptions": "Model Options", 37 | "modelProvider": "Model Provider", 38 | "modelSelection": "Model Selection", 39 | "wikiType": "Wiki Type", 40 | "comprehensive": "Comprehensive", 41 | "concise": "Concise", 42 | "comprehensiveDescription": "Detailed wiki with structured sections and more pages", 43 | "conciseDescription": "Simplified wiki with fewer pages and essential information", 44 | "providerGoogle": "Google", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama (Local)", 48 | "localOllama": "Local Ollama Model", 49 | "experimental": "Experimental", 50 | "useOpenRouter": "Use OpenRouter API", 51 | "openRouterModel": "OpenRouter Model", 52 | "useOpenai": "Use Openai API", 53 | "openaiModel": "Openai Model", 54 | "useCustomModel": "Use custom model", 55 | "customModelPlaceholder": "Enter custom model name", 56 | "addTokens": "+ Add access tokens for private repositories", 57 | "hideTokens": "- Hide access tokens", 58 | "accessToken": "Access Token for Private Repositories", 59 | "selectPlatform": "Select Platform", 60 | "personalAccessToken": "{platform} Personal Access Token", 61 | "tokenPlaceholder": "Enter your {platform} token", 62 | "tokenSecurityNote": "Token is stored in memory only and never persisted.", 63 | "defaultFiltersInfo": "Default filters include common directories like node_modules, .git, and common build artifact files.", 64 | "fileFilterTitle": "File Filter Configuration", 65 | "advancedOptions": "Advanced Options", 66 | "viewDefaults": "View Default Filters", 67 | "showFilters": "Show Filters", 68 | "hideFilters": "Hide Filters", 69 | "excludedDirs": "Directories to Exclude", 70 | "excludedDirsHelp": "One directory path per line. Paths starting with ./ are relative to repository root.", 71 | "enterExcludedDirs": "Enter excluded directories, one per line...", 72 | "excludedFiles": "Files to Exclude", 73 | "excludedFilesHelp": "One filename per line. Wildcards (*) are supported.", 74 | "enterExcludedFiles": "Enter excluded files, one per line...", 75 | "defaultFilters": "Default Excluded Files & Directories", 76 | "directories": "Directories", 77 | "files": "Files", 78 | "scrollToViewMore": "Scroll to view more", 79 | "changeModel": "Change Model", 80 | "defaultNote": "These defaults are already applied. Add your custom exclusions above.", 81 | "hideDefault": "Hide Default", 82 | "viewDefault": "View Default", 83 | "includedDirs": "Included Directories", 84 | "includedFiles": "Included Files", 85 | "enterIncludedDirs": "Enter included directories, one per line...", 86 | "enterIncludedFiles": "Enter included files, one per line...", 87 | "filterMode": "Filter Mode", 88 | "excludeMode": "Exclude Paths", 89 | "includeMode": "Include Only Paths", 90 | "excludeModeDescription": "Specify paths to exclude from processing (default behavior)", 91 | "includeModeDescription": "Specify only the paths to include, ignoring all others" 92 | }, 93 | "footer": { 94 | "copyright": "DeepWiki - AI-powered documentation for code repositories" 95 | }, 96 | "ask": { 97 | "placeholder": "Ask a question about this repository...", 98 | "askButton": "Ask", 99 | "deepResearch": "Deep Research", 100 | "researchInProgress": "Research in progress...", 101 | "continueResearch": "Continue Research", 102 | "viewPlan": "View Plan", 103 | "viewUpdates": "View Updates", 104 | "viewConclusion": "View Conclusion" 105 | }, 106 | "repoPage": { 107 | "refreshWiki": "Refresh Wiki", 108 | "confirmRefresh": "Confirm Refresh", 109 | "cancel": "Cancel", 110 | "home": "Home", 111 | "errorTitle": "Error", 112 | "errorMessageDefault": "Please check that your repository exists and is public. Valid formats are \"owner/repo\", \"https://github.com/owner/repo\", \"https://gitlab.com/owner/repo\", \"https://bitbucket.org/owner/repo\", or local folder paths like \"C:\\\\path\\\\to\\\\folder\" or \"/path/to/folder\".", 113 | "backToHome": "Back to Home", 114 | "exportWiki": "Export Wiki", 115 | "exportAsMarkdown": "Export as Markdown", 116 | "exportAsJson": "Export as JSON", 117 | "pages": "Pages", 118 | "relatedFiles": "Related Files:", 119 | "relatedPages": "Related Pages:", 120 | "selectPagePrompt": "Select a page from the navigation to view its content", 121 | "askAboutRepo": "Ask questions about this repository" 122 | }, 123 | "nav": { 124 | "wikiProjects": "Wiki Projects" 125 | }, 126 | "projects": { 127 | "title": "Processed Wiki Projects", 128 | "searchPlaceholder": "Search projects by name, owner, or repository...", 129 | "noProjects": "No projects found in the server cache. The cache might be empty or the server encountered an issue.", 130 | "noSearchResults": "No projects match your search criteria.", 131 | "processedOn": "Processed on:", 132 | "loadingProjects": "Loading projects...", 133 | "errorLoading": "Error loading projects:", 134 | "backToHome": "Back to Home", 135 | "browseExisting": "Browse Existing Projects", 136 | "existingProjects": "Existing Projects", 137 | "recentProjects": "Recent Projects" 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/messages/es.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "Documentación impulsada por IA", 5 | "generateWiki": "Generar Wiki", 6 | "processing": "Procesando...", 7 | "error": "Error", 8 | "submit": "Enviar", 9 | "cancel": "Cancelar", 10 | "close": "Cerrar", 11 | "loading": "Cargando..." 12 | }, 13 | "loading": { 14 | "initializing": "Inicializando generación de wiki...", 15 | "fetchingStructure": "Obteniendo estructura del repositorio...", 16 | "determiningStructure": "Determinando estructura del wiki...", 17 | "clearingCache": "Limpiando caché del servidor...", 18 | "preparingDownload": "Por favor espere mientras preparamos su descarga..." 19 | }, 20 | "home": { 21 | "welcome": "Bienvenido a DeepWiki", 22 | "welcomeTagline": "Documentación impulsada por IA para repositorios de código", 23 | "description": "Genera documentación completa de repositorios GitHub, GitLab o Bitbucket con solo unos clics.", 24 | "quickStart": "Inicio Rápido", 25 | "enterRepoUrl": "Ingresa una URL de repositorio en uno de estos formatos:", 26 | "advancedVisualization": "Visualización Avanzada con Diagramas Mermaid", 27 | "diagramDescription": "DeepWiki genera automáticamente diagramas interactivos para ayudarte a entender la estructura y relaciones del código:", 28 | "flowDiagram": "Diagrama de Flujo", 29 | "sequenceDiagram": "Diagrama de Secuencia" 30 | }, 31 | "form": { 32 | "repository": "Repositorio", 33 | "configureWiki": "Configurar Wiki", 34 | "repoPlaceholder": "propietario/repositorio o URL de GitHub/GitLab/Bitbucket", 35 | "wikiLanguage": "Idioma del Wiki", 36 | "modelOptions": "Opciones de Modelo", 37 | "modelProvider": "Proveedor de Modelo", 38 | "modelSelection": "Selección de Modelo", 39 | "wikiType": "Tipo de Wiki", 40 | "comprehensive": "Completo", 41 | "concise": "Conciso", 42 | "comprehensiveDescription": "Wiki detallado con secciones estructuradas y más páginas", 43 | "conciseDescription": "Wiki simplificado con menos páginas e información esencial", 44 | "providerGoogle": "Google", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama (Local)", 48 | "localOllama": "Modelo Ollama Local", 49 | "experimental": "Experimental", 50 | "useOpenRouter": "Usar API de OpenRouter", 51 | "openRouterModel": "Modelo OpenRouter", 52 | "useOpenai": "Usar API de Openai", 53 | "openaiModel": "Modelo Openai", 54 | "useCustomModel": "Usar modelo personalizado", 55 | "customModelPlaceholder": "Ingrese nombre de modelo personalizado", 56 | "addTokens": "+ Agregar tokens de acceso para repositorios privados", 57 | "hideTokens": "- Ocultar tokens de acceso", 58 | "accessToken": "Token de Acceso para Repositorios Privados", 59 | "selectPlatform": "Seleccionar Plataforma", 60 | "personalAccessToken": "Token de Acceso Personal de {platform}", 61 | "tokenPlaceholder": "Ingresa tu token de {platform}", 62 | "tokenSecurityNote": "El token solo se almacena en memoria y nunca se persiste.", 63 | "defaultFiltersInfo": "Los filtros predeterminados incluyen directorios comunes como node_modules, .git y archivos de artefactos de construcción comunes.", 64 | "fileFilterTitle": "Configuración de Filtros de Archivos", 65 | "advancedOptions": "Opciones Avanzadas", 66 | "viewDefaults": "Ver Filtros Predeterminados", 67 | "showFilters": "Mostrar Filtros", 68 | "hideFilters": "Ocultar Filtros", 69 | "excludedDirs": "Directorios a Excluir", 70 | "excludedDirsHelp": "Una ruta de directorio por línea. Las rutas que comienzan con ./ son relativas a la raíz del repositorio.", 71 | "enterExcludedDirs": "Ingrese directorios a excluir, uno por línea...", 72 | "excludedFiles": "Archivos a Excluir", 73 | "excludedFilesHelp": "Un nombre de archivo por línea. Se admiten comodines (*).", 74 | "enterExcludedFiles": "Ingrese archivos a excluir, uno por línea...", 75 | "defaultFilters": "Archivos y Directorios Excluidos por Defecto", 76 | "directories": "Directorios", 77 | "files": "Archivos", 78 | "scrollToViewMore": "Desplazar para ver más", 79 | "changeModel": "Cambiar Modelo", 80 | "defaultNote": "Estos valores predeterminados ya están aplicados. Agregue sus exclusiones personalizadas arriba.", 81 | "hideDefault": "Ocultar Predeterminados", 82 | "viewDefault": "Ver Predeterminados" 83 | }, 84 | "footer": { 85 | "copyright": "DeepWiki - Documentación impulsada por IA para repositorios de código" 86 | }, 87 | "ask": { 88 | "placeholder": "Haz una pregunta sobre este repositorio...", 89 | "askButton": "Preguntar", 90 | "deepResearch": "Investigación Profunda", 91 | "researchInProgress": "Investigación en progreso...", 92 | "continueResearch": "Continuar Investigación", 93 | "viewPlan": "Ver Plan", 94 | "viewUpdates": "Ver Actualizaciones", 95 | "viewConclusion": "Ver Conclusión" 96 | }, 97 | "repoPage": { 98 | "refreshWiki": "Actualizar Wiki", 99 | "confirmRefresh": "Confirmar Actualización", 100 | "cancel": "Cancelar", 101 | "home": "Inicio", 102 | "errorTitle": "Error", 103 | "errorMessageDefault": "Por favor, compruebe que su repositorio existe y es público. Los formatos válidos son \"owner/repo\", \"https://github.com/owner/repo\", \"https://gitlab.com/owner/repo\", \"https://bitbucket.org/owner/repo\", o rutas de carpetas locales como \"C:\\\\path\\\\to\\\\folder\" o \"/path/to/folder\".", 104 | "backToHome": "Volver al Inicio", 105 | "exportWiki": "Exportar Wiki", 106 | "exportAsMarkdown": "Exportar como Markdown", 107 | "exportAsJson": "Exportar como JSON", 108 | "pages": "Páginas", 109 | "relatedFiles": "Archivos Relacionados:", 110 | "relatedPages": "Páginas Relacionadas:", 111 | "selectPagePrompt": "Seleccione una página de la navegación para ver su contenido", 112 | "askAboutRepo": "Hacer preguntas sobre este repositorio" 113 | }, 114 | "nav": { 115 | "wikiProjects": "Lista de Proyectos" 116 | }, 117 | "projects": { 118 | "title": "Proyectos Wiki Procesados", 119 | "searchPlaceholder": "Buscar proyectos por nombre, propietario o repositorio...", 120 | "noProjects": "No se encontraron proyectos en la caché del servidor. La caché podría estar vacía o el servidor encontró un problema.", 121 | "noSearchResults": "Ningún proyecto coincide con sus criterios de búsqueda.", 122 | "processedOn": "Procesado el:", 123 | "loadingProjects": "Cargando proyectos...", 124 | "errorLoading": "Error al cargar proyectos:", 125 | "backToHome": "Volver al Inicio", 126 | "browseExisting": "Explorar Proyectos Existentes", 127 | "existingProjects": "Proyectos Existentes", 128 | "recentProjects": "Proyectos Recientes" 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/messages/ja.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "AI駆動のドキュメンテーション", 5 | "generateWiki": "Wikiを生成", 6 | "processing": "処理中...", 7 | "error": "エラー", 8 | "submit": "送信", 9 | "cancel": "キャンセル", 10 | "close": "閉じる", 11 | "loading": "読み込み中..." 12 | }, 13 | "loading": { 14 | "initializing": "Wiki生成を初期化中...", 15 | "fetchingStructure": "リポジトリ構造を取得中...", 16 | "determiningStructure": "Wiki構造を決定中...", 17 | "clearingCache": "サーバーキャッシュをクリア中...", 18 | "preparingDownload": "ダウンロードの準備中です..." 19 | }, 20 | "home": { 21 | "welcome": "DeepWikiへようこそ", 22 | "welcomeTagline": "コードリポジトリのためのAI駆動ドキュメンテーション", 23 | "description": "GitHub、GitLab、またはBitbucketリポジトリから包括的なドキュメントを数クリックで生成します。", 24 | "quickStart": "クイックスタート", 25 | "enterRepoUrl": "以下のいずれかの形式でリポジトリURLを入力してください:", 26 | "advancedVisualization": "Mermaidダイアグラムによる高度な可視化", 27 | "diagramDescription": "DeepWikiは、コード構造と関係を理解するのに役立つインタラクティブな図を自動的に生成します:", 28 | "flowDiagram": "フロー図", 29 | "sequenceDiagram": "シーケンス図" 30 | }, 31 | "form": { 32 | "repository": "リポジトリ", 33 | "configureWiki": "Wiki設定", 34 | "repoPlaceholder": "所有者/リポジトリまたはGitHub/GitLab/BitbucketのURL", 35 | "wikiLanguage": "Wiki言語", 36 | "modelOptions": "モデルオプション", 37 | "modelProvider": "モデルプロバイダー", 38 | "modelSelection": "モデル選択", 39 | "wikiType": "Wikiタイプ", 40 | "comprehensive": "包括的", 41 | "concise": "簡潔", 42 | "comprehensiveDescription": "構造化されたセクションとより多くのページを持つ詳細なWiki", 43 | "conciseDescription": "ページ数が少なく、必要な情報のみを含む簡素化されたWiki", 44 | "providerGoogle": "Google", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama(ローカル)", 48 | "localOllama": "ローカルOllamaモデル", 49 | "experimental": "実験的", 50 | "useOpenRouter": "OpenRouter APIを使用", 51 | "openRouterModel": "OpenRouterモデル", 52 | "useOpenai": "Openai APIを使用", 53 | "openaiModel": "Openaiモデル", 54 | "useCustomModel": "カスタムモデルを使用", 55 | "customModelPlaceholder": "カスタムモデル名を入力", 56 | "addTokens": "+ プライベートリポジトリ用のアクセストークンを追加", 57 | "hideTokens": "- アクセストークンを隠す", 58 | "accessToken": "プライベートリポジトリ用のアクセストークン", 59 | "selectPlatform": "プラットフォームを選択", 60 | "personalAccessToken": "{platform}個人アクセストークン", 61 | "tokenPlaceholder": "{platform}トークンを入力してください", 62 | "tokenSecurityNote": "トークンはメモリ内にのみ保存され、永続化されることはありません。", 63 | "defaultFiltersInfo": "デフォルトのフィルターは、node_modules、.git、および一般的なビルドアーティファクトファイルのような一般的なディレクトリを含みます。", 64 | "fileFilterTitle": "ファイルフィルター設定", 65 | "advancedOptions": "詳細オプション", 66 | "viewDefaults": "デフォルトフィルターを表示", 67 | "showFilters": "フィルターを表示", 68 | "hideFilters": "フィルターを非表示", 69 | "excludedDirs": "除外するディレクトリ", 70 | "excludedDirsHelp": "一行につき一つのディレクトリパス。./で始まるパスはリポジトリルートからの相対パスです。", 71 | "enterExcludedDirs": "除外するディレクトリを一行ずつ入力...", 72 | "excludedFiles": "除外するファイル", 73 | "excludedFilesHelp": "一行につき一つのファイル名。ワイルドカード(*)が使用可能です。", 74 | "enterExcludedFiles": "除外するファイルを一行ずつ入力...", 75 | "defaultFilters": "デフォルトで除外されるファイルとディレクトリ", 76 | "directories": "ディレクトリ", 77 | "files": "ファイル", 78 | "scrollToViewMore": "スクロールしてさらに表示", 79 | "changeModel": "モデルを変更", 80 | "defaultNote": "これらのデフォルト設定は既に適用されています。上記に追加の除外項目を入力してください。", 81 | "hideDefault": "デフォルトを隠す", 82 | "viewDefault": "デフォルトを表示" 83 | }, 84 | "footer": { 85 | "copyright": "DeepWiki - コードリポジトリのためのAI駆動ドキュメンテーション" 86 | }, 87 | "ask": { 88 | "placeholder": "このリポジトリについて質問する...", 89 | "askButton": "質問する", 90 | "deepResearch": "詳細調査", 91 | "researchInProgress": "調査進行中...", 92 | "continueResearch": "調査を続ける", 93 | "viewPlan": "計画を見る", 94 | "viewUpdates": "更新を見る", 95 | "viewConclusion": "結論を見る" 96 | }, 97 | "repoPage": { 98 | "refreshWiki": "Wikiを更新", 99 | "confirmRefresh": "更新を確認", 100 | "cancel": "キャンセル", 101 | "home": "ホーム", 102 | "errorTitle": "エラー", 103 | "errorMessageDefault": "リポジトリが存在し、公開されていることを確認してください。有効な形式は「owner/repo」、「https://github.com/owner/repo」、「https://gitlab.com/owner/repo」、「https://bitbucket.org/owner/repo」、またはローカルフォルダパス(例: 「C:\\\\path\\\\to\\\\folder」、「/path/to/folder」)です。", 104 | "backToHome": "ホームに戻る", 105 | "exportWiki": "Wikiをエクスポート", 106 | "exportAsMarkdown": "Markdownとしてエクスポート", 107 | "exportAsJson": "JSONとしてエクスポート", 108 | "pages": "ページ", 109 | "relatedFiles": "関連ファイル:", 110 | "relatedPages": "関連ページ:", 111 | "selectPagePrompt": "ナビゲーションからページを選択してコンテンツを表示", 112 | "askAboutRepo": "このリポジトリについて質問する" 113 | }, 114 | "nav": { 115 | "wikiProjects": "プロジェクト一覧" 116 | }, 117 | "projects": { 118 | "title": "処理済みWikiプロジェクト", 119 | "searchPlaceholder": "プロジェクト名、所有者、リポジトリで検索...", 120 | "noProjects": "サーバーキャッシュにプロジェクトが見つかりません。キャッシュが空であるか、サーバーで問題が発生した可能性があります。", 121 | "noSearchResults": "検索条件に一致するプロジェクトがありません。", 122 | "processedOn": "処理日時:", 123 | "loadingProjects": "プロジェクトを読み込み中...", 124 | "errorLoading": "プロジェクトの読み込みエラー:", 125 | "backToHome": "ホームに戻る", 126 | "browseExisting": "既存プロジェクトを閲覧", 127 | "existingProjects": "既存プロジェクト", 128 | "recentProjects": "最近のプロジェクト" 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/messages/kr.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "AI 기반 문서화", 5 | "generateWiki": "위키 생성", 6 | "processing": "처리 중...", 7 | "error": "오류", 8 | "submit": "제출", 9 | "cancel": "취소", 10 | "close": "닫기", 11 | "loading": "로딩 중..." 12 | }, 13 | "loading": { 14 | "initializing": "위키 생성을 초기화하는 중...", 15 | "fetchingStructure": "저장소 구조를 가져오는 중...", 16 | "determiningStructure": "위키 구조를 결정하는 중...", 17 | "clearingCache": "서버 캐시를 지우는 중...", 18 | "preparingDownload": "다운로드를 준비 중입니다. 잠시만 기다려 주세요..." 19 | }, 20 | "home": { 21 | "welcome": "DeepWiki-Open에 오신 것을 환영합니다", 22 | "welcomeTagline": "코드 저장소를 위한 AI 기반 문서화", 23 | "description": "GitHub, GitLab 또는 Bitbucket 저장소에서 클릭 한 번으로 종합 문서를 생성하세요.", 24 | "quickStart": "빠른 시작", 25 | "enterRepoUrl": "다음 형식 중 하나로 저장소 URL을 입력하세요:", 26 | "advancedVisualization": "Mermaid 다이어그램을 활용한 고급 시각화", 27 | "diagramDescription": "DeepWiki는 코드 구조와 관계를 이해하는 데 도움이 되는 대화형 다이어그램을 자동 생성합니다:", 28 | "flowDiagram": "흐름도", 29 | "sequenceDiagram": "시퀀스 다이어그램" 30 | }, 31 | "form": { 32 | "repository": "저장소", 33 | "configureWiki": "위키 구성", 34 | "repoPlaceholder": "owner/repo 또는 GitHub/GitLab/Bitbucket URL", 35 | "wikiLanguage": "위키 언어", 36 | "modelOptions": "모델 옵션", 37 | "modelProvider": "모델 제공자", 38 | "modelSelection": "모델 선택", 39 | "wikiType": "위키 유형", 40 | "comprehensive": "종합적", 41 | "concise": "간결함", 42 | "comprehensiveDescription": "구조화된 섹션과 더 많은 페이지가 있는 상세한 위키", 43 | "conciseDescription": "페이지 수가 적고 필수 정보만 포함된 간소화된 위키", 44 | "providerGoogle": "구글", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama (로컬)", 48 | "localOllama": "로컬 Ollama 모델", 49 | "experimental": "실험적", 50 | "useOpenRouter": "OpenRouter API 사용", 51 | "openRouterModel": "OpenRouter 모델", 52 | "useOpenai": "Openai API 사용", 53 | "openaiModel": "Openai 모델", 54 | "useCustomModel": "사용자 정의 모델 사용", 55 | "customModelPlaceholder": "사용자 정의 모델 이름 입력", 56 | "addTokens": "+ 비공개 저장소 액세스 토큰 추가", 57 | "hideTokens": "- 액세스 토큰 숨기기", 58 | "accessToken": "비공개 저장소용 액세스 토큰", 59 | "selectPlatform": "플랫폼 선택", 60 | "personalAccessToken": "{platform} 개인 액세스 토큰", 61 | "tokenPlaceholder": "{platform} 토큰을 입력하세요", 62 | "tokenSecurityNote": "토큰은 메모리에만 저장되며, 영구적으로 보존되지 않습니다.", 63 | "defaultFiltersInfo": "기본 필터에는 node_modules,.git 및 일반적인 빌드 파일이 포함됩니다.", 64 | "fileFilterTitle": "파일 필터 구성", 65 | "advancedOptions": "고급 옵션", 66 | "viewDefaults": "기본 필터 보기", 67 | "showFilters": "필터 표시", 68 | "hideFilters": "필터 숨기기", 69 | "excludedDirs": "제외할 디렉토리", 70 | "excludedDirsHelp": "한 줄에 하나의 디렉토리 경로. ./로 시작하는 경로는 저장소 루트에서의 상대 경로입니다.", 71 | "enterExcludedDirs": "제외할 디렉토리를 한 줄에 하나씩 입력하세요...", 72 | "excludedFiles": "제외할 파일", 73 | "excludedFilesHelp": "한 줄에 하나의 파일 이름. 와일드카드(*)가 지원됩니다.", 74 | "enterExcludedFiles": "제외할 파일을 한 줄에 하나씩 입력하세요...", 75 | "defaultFilters": "기본적으로 제외되는 파일 및 디렉토리", 76 | "directories": "디렉토리", 77 | "files": "파일", 78 | "scrollToViewMore": "더 보려면 스크롤하세요", 79 | "changeModel": "모델 변경", 80 | "defaultNote": "이 기본 설정은 이미 적용되었습니다. 위에 사용자 지정 제외 항목을 추가하세요.", 81 | "hideDefault": "기본값 숨기기", 82 | "viewDefault": "기본값 보기" 83 | }, 84 | "footer": { 85 | "copyright": "DeepWiki - 코드 저장소를 위한 AI 기반 문서화" 86 | }, 87 | "ask": { 88 | "placeholder": "이 저장소에 대해 질문해 보세요...", 89 | "askButton": "질문하기", 90 | "deepResearch": "심층 분석", 91 | "researchInProgress": "심층 분석 진행 중...", 92 | "continueResearch": "분석 계속하기", 93 | "viewPlan": "계획 보기", 94 | "viewUpdates": "업데이트 보기", 95 | "viewConclusion": "결론 보기" 96 | }, 97 | "repoPage": { 98 | "refreshWiki": "위키 새로고침", 99 | "confirmRefresh": "새로고침 확인", 100 | "cancel": "취소", 101 | "home": "홈", 102 | "errorTitle": "오류", 103 | "errorMessageDefault": "저장소가 존재하며 공개 상태인지 확인해 주세요. 유효한 형식은 \"owner/repo\", \"https://github.com/owner/repo\", \"https://gitlab.com/owner/repo\", \"https://bitbucket.org/owner/repo\" 또는 로컬 폴더 경로 \"C:\\\\path\\\\to\\\\folder\" 혹은 \"/path/to/folder\" 입니다.", 104 | "backToHome": "홈으로 돌아가기", 105 | "exportWiki": "위키 내보내기", 106 | "exportAsMarkdown": "마크다운으로 내보내기", 107 | "exportAsJson": "JSON으로 내보내기", 108 | "pages": "페이지", 109 | "relatedFiles": "관련 파일:", 110 | "relatedPages": "관련 페이지:", 111 | "selectPagePrompt": "목록에서 페이지를 선택하여 내용을 확인하세요", 112 | "askAboutRepo": "이 저장소에 대해 질문하기" 113 | }, 114 | "nav": { 115 | "wikiProjects": "프로젝트 목록" 116 | }, 117 | "projects": { 118 | "title": "처리된 위키 프로젝트", 119 | "searchPlaceholder": "프로젝트 이름, 소유자 또는 저장소로 검색...", 120 | "noProjects": "서버 캐시에서 프로젝트를 찾을 수 없습니다. 캐시가 비어있거나 서버에 문제가 발생했을 수 있습니다.", 121 | "noSearchResults": "검색 조건에 맞는 프로젝트가 없습니다.", 122 | "processedOn": "처리 날짜:", 123 | "loadingProjects": "프로젝트 로딩 중...", 124 | "errorLoading": "프로젝트 로딩 오류:", 125 | "backToHome": "홈으로 돌아가기", 126 | "browseExisting": "기존 프로젝트 탐색", 127 | "existingProjects": "기존 프로젝트", 128 | "recentProjects": "최근 프로젝트" 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/messages/vi.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "Tài liệu hỗ trợ bởi AI", 5 | "generateWiki": "Tạo Wiki", 6 | "processing": "Đang xử lý...", 7 | "error": "Lỗi", 8 | "submit": "Gửi", 9 | "cancel": "Hủy", 10 | "close": "Đóng", 11 | "loading": "Đang tải..." 12 | }, 13 | "loading": { 14 | "initializing": "Đang khởi tạo wiki...", 15 | "fetchingStructure": "Đang lấy cấu trúc repository...", 16 | "determiningStructure": "Đang xác định cấu trúc wiki...", 17 | "clearingCache": "Đang xóa bộ nhớ đệm máy chủ...", 18 | "preparingDownload": "Đang tải! Vui lòng chờ..." 19 | }, 20 | "home": { 21 | "welcome": "Chào mừng đến với DeepWiki-Open", 22 | "welcomeTagline": "Tài liệu hỗ trợ bởi AI cho các repository của bạn", 23 | "description": "Tạo tài liệu từ các repository GitHub, GitLab, hoặc Bitbucket chỉ với vài cú nhấp chuột.", 24 | "quickStart": "Bắt đầu nhanh", 25 | "enterRepoUrl": "Nhập URL repository", 26 | "advancedVisualization": "Tùy chỉnh sơ đồ trực quan với Mermaid", 27 | "diagramDescription": "DeepWiki tự động tạo các sơ đồ tương tác giúp bạn hiểu cấu trúc source codes và mối quan hệ giữa chúng:", 28 | "flowDiagram": "Sơ đồ luồng", 29 | "sequenceDiagram": "Sơ đồ tuần tự" 30 | }, 31 | "form": { 32 | "repository": "Repository", 33 | "configureWiki": "Cấu hình Wiki", 34 | "repoPlaceholder": "owner/repo hoặc URL GitHub/GitLab/Bitbucket", 35 | "wikiLanguage": "Ngôn ngữ Wiki", 36 | "modelOptions": "Tùy chọn mô hình", 37 | "modelProvider": "Nhà cung cấp mô hình", 38 | "modelSelection": "Lựa chọn mô hình", 39 | "wikiType": "Loại Wiki", 40 | "comprehensive": "Toàn diện", 41 | "concise": "Súc tích", 42 | "comprehensiveDescription": "Wiki chi tiết với các phần có cấu trúc và nhiều trang hơn", 43 | "conciseDescription": "Wiki đơn giản hóa với ít trang hơn và thông tin thiết yếu", 44 | "providerGoogle": "Google", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama (Cục bộ)", 48 | "localOllama": "Mô hình Ollama cục bộ", 49 | "experimental": "Thử nghiệm", 50 | "useOpenRouter": "Sử dụng API OpenRouter", 51 | "openRouterModel": "Mô hình OpenRouter", 52 | "useOpenai": "Sử dụng API Openai", 53 | "openaiModel": "Mô hình Openai", 54 | "useCustomModel": "Sử dụng mô hình tùy chỉnh", 55 | "customModelPlaceholder": "Nhập tên mô hình tùy chỉnh", 56 | "addTokens": "+ Thêm token truy cập cho private repositories", 57 | "hideTokens": "- Ẩn token truy cập", 58 | "accessToken": "Token truy cập cho private repositories", 59 | "selectPlatform": "Chọn nền tảng", 60 | "personalAccessToken": "Token truy cập cá nhân {platform}", 61 | "tokenPlaceholder": "Nhập token {platform} của bạn", 62 | "tokenSecurityNote": "Token chỉ được lưu trong bộ nhớ và không bao giờ được lưu trữ vĩnh viễn.", 63 | "defaultFiltersInfo": "Lọc mặc định bao gồm các thư mục thông thường như node_modules, .git và các tệp tài liệu xây dựng thông thường.", 64 | "fileFilterTitle": "Cấu hình Lọc Tệp", 65 | "advancedOptions": "Tùy chọn nâng cao", 66 | "viewDefaults": "Xem Lọc Mặc định", 67 | "showFilters": "Hiển thị Lọc", 68 | "hideFilters": "Ẩn Lọc", 69 | "excludedDirs": "Thư mục để Loại trừ", 70 | "excludedDirsHelp": "Một đường dẫn thư mục trên một dòng. Đường dẫn bắt đầu bằng ./ là tương đối so với gốc kho lưu trữ.", 71 | "enterExcludedDirs": "Nhập thư mục cần loại trừ, mỗi dòng một thư mục...", 72 | "excludedFiles": "Tệp để Loại trừ", 73 | "excludedFilesHelp": "Một tên tệp trên một dòng. Hỗ trợ ký tự đại diện (*).", 74 | "enterExcludedFiles": "Nhập tệp cần loại trừ, mỗi dòng một tệp...", 75 | "defaultFilters": "Tệp và Thư mục Loại trừ Mặc định", 76 | "directories": "Thư mục", 77 | "files": "Tệp", 78 | "scrollToViewMore": "Dịch chuyển để xem thêm", 79 | "changeModel": "Thay đổi mô hình", 80 | "defaultNote": "Các giá trị mặc định này đã được áp dụng. Thêm các loại trừ tùy chỉnh của bạn ở trên.", 81 | "hideDefault": "Ẩn mặc định", 82 | "viewDefault": "Xem mặc định" 83 | }, 84 | "footer": { 85 | "copyright": "DeepWiki - Tài liệu hỗ trợ bởi AI cho repository" 86 | }, 87 | "ask": { 88 | "placeholder": "Đặt một câu hỏi về repository này...", 89 | "askButton": "Hỏi", 90 | "deepResearch": "Nghiên cứu sâu", 91 | "researchInProgress": "Đang tiến hành nghiên cứu...", 92 | "continueResearch": "Tiếp tục nghiên cứu", 93 | "viewPlan": "Xem kế hoạch", 94 | "viewUpdates": "Xem cập nhật", 95 | "viewConclusion": "Xem kết luận" 96 | }, 97 | "repoPage": { 98 | "refreshWiki": "Làm mới Wiki", 99 | "confirmRefresh": "Xác nhận làm mới", 100 | "cancel": "Hủy bỏ", 101 | "home": "Trang chủ", 102 | "errorTitle": "Lỗi", 103 | "errorMessageDefault": "Vui lòng kiểm tra xem repository có tồn tại và công khai hay không. Các định dạng hợp lệ là \"owner/repo\", \"https://github.com/owner/repo\", \"https://gitlab.com/owner/repo\", \"https://bitbucket.org/owner/repo\", hoặc các đường dẫn thư mục cục bộ như \"C:\\\\path\\\\to\\\\folder\" hoặc \"/path/to/folder\".", 104 | "backToHome": "Quay lại trang chủ", 105 | "exportWiki": "Xuất Wiki", 106 | "exportAsMarkdown": "Xuất dưới dạng Markdown", 107 | "exportAsJson": "Xuất dưới dạng JSON", 108 | "pages": "Trang", 109 | "relatedFiles": "Tệp liên quan:", 110 | "relatedPages": "Trang liên quan:", 111 | "selectPagePrompt": "Chọn một trang từ thanh điều hướng để xem nội dung của nó", 112 | "askAboutRepo": "Hỏi về repository này" 113 | }, 114 | "nav": { 115 | "wikiProjects": "Danh sách dự án" 116 | }, 117 | "projects": { 118 | "title": "Dự án Wiki đã xử lý", 119 | "searchPlaceholder": "Tìm kiếm dự án theo tên, chủ sở hữu hoặc repository...", 120 | "noProjects": "Không tìm thấy dự án nào trong bộ nhớ đệm máy chủ. Bộ nhớ đệm có thể trống hoặc máy chủ gặp sự cố.", 121 | "noSearchResults": "Không có dự án nào phù hợp với tiêu chí tìm kiếm của bạn.", 122 | "processedOn": "Xử lý vào:", 123 | "loadingProjects": "Đang tải dự án...", 124 | "errorLoading": "Lỗi khi tải dự án:", 125 | "backToHome": "Về trang chủ", 126 | "browseExisting": "Duyệt dự án hiện có", 127 | "existingProjects": "Dự án hiện có", 128 | "recentProjects": "Dự án gần đây" 129 | } 130 | } -------------------------------------------------------------------------------- /src/messages/zh.json: -------------------------------------------------------------------------------- 1 | { 2 | "common": { 3 | "appName": "DeepWiki-Open", 4 | "tagline": "AI驱动的文档", 5 | "generateWiki": "生成Wiki", 6 | "processing": "处理中...", 7 | "error": "错误", 8 | "submit": "提交", 9 | "cancel": "取消", 10 | "close": "关闭", 11 | "loading": "加载中..." 12 | }, 13 | "loading": { 14 | "initializing": "初始化Wiki生成...", 15 | "fetchingStructure": "获取仓库结构...", 16 | "determiningStructure": "验证Wiki结构...", 17 | "clearingCache": "清除服务器缓存...", 18 | "preparingDownload": "请等待,我们正在准备您的下载..." 19 | }, 20 | "home": { 21 | "welcome": "欢迎使用DeepWiki", 22 | "welcomeTagline": "为代码仓库提供AI驱动的文档", 23 | "description": "只需一次点击,即可从GitHub、GitLab或Bitbucket仓库生成全面的文档。", 24 | "quickStart": "快速开始", 25 | "enterRepoUrl": "请以下列格式之一输入仓库URL:", 26 | "advancedVisualization": "使用Mermaid图表进行高级可视化", 27 | "diagramDescription": "DeepWiki自动生成交互式图表,帮助您理解代码结构和关系:", 28 | "flowDiagram": "流程图", 29 | "sequenceDiagram": "序列图" 30 | }, 31 | "form": { 32 | "repository": "仓库", 33 | "configureWiki": "配置Wiki", 34 | "repoPlaceholder": "所有者/仓库或GitHub/GitLab/Bitbucket URL", 35 | "wikiLanguage": "Wiki语言", 36 | "modelOptions": "模型选项", 37 | "modelProvider": "模型提供商", 38 | "modelSelection": "模型选择", 39 | "wikiType": "Wiki类型", 40 | "comprehensive": "全面型", 41 | "concise": "简洁型", 42 | "comprehensiveDescription": "包含结构化章节和更多页面的详细Wiki", 43 | "conciseDescription": "页面更少,仅包含核心信息的简化Wiki", 44 | "providerGoogle": "Google", 45 | "providerOpenAI": "OpenAI", 46 | "providerOpenRouter": "OpenRouter", 47 | "providerOllama": "Ollama (本地)", 48 | "localOllama": "本地Ollama模型", 49 | "experimental": "实验性", 50 | "useOpenRouter": "使用OpenRouter API", 51 | "openRouterModel": "OpenRouter模型", 52 | "useOpenai": "使用Openai API", 53 | "openaiModel": "Openai 模型", 54 | "useCustomModel": "使用自定义模型", 55 | "customModelPlaceholder": "输入自定义模型名称", 56 | "addTokens": "+ 添加私有仓库访问令牌", 57 | "hideTokens": "- 隐藏访问令牌", 58 | "accessToken": "私有仓库访问令牌", 59 | "selectPlatform": "选择平台", 60 | "personalAccessToken": "{platform}个人访问令牌", 61 | "tokenPlaceholder": "输入您的{platform}令牌", 62 | "tokenSecurityNote": "令牌仅存储在内存中,从不持久化。", 63 | "defaultFiltersInfo": "默认过滤器包括node_modules、.git和常见的构建文件。", 64 | "fileFilterTitle": "文件过滤配置", 65 | "advancedOptions": "高级选项", 66 | "viewDefaults": "查看默认过滤", 67 | "showFilters": "显示过滤器", 68 | "hideFilters": "隐藏过滤器", 69 | "excludedDirs": "要排除的目录", 70 | "excludedDirsHelp": "每行一个目录路径。以./开头表示相对于仓库根目录的路径。", 71 | "enterExcludedDirs": "输入要排除的目录,每行一个...", 72 | "excludedFiles": "要排除的文件", 73 | "excludedFilesHelp": "每行一个文件名。支持通配符(*)。", 74 | "enterExcludedFiles": "输入要排除的文件,每行一个...", 75 | "defaultFilters": "默认排除的文件和目录", 76 | "directories": "目录", 77 | "files": "文件", 78 | "scrollToViewMore": "可滑动查看更多", 79 | "changeModel": "修改模型", 80 | "defaultNote": "这些默认配置已经被应用。请在上方添加您的自定义排除项。", 81 | "hideDefault": "隐藏默认配置", 82 | "viewDefault": "查看默认配置" 83 | }, 84 | "footer": { 85 | "copyright": "DeepWiki - 为代码仓库提供AI驱动的文档" 86 | }, 87 | "ask": { 88 | "placeholder": "询问关于此仓库的问题...", 89 | "askButton": "提问", 90 | "deepResearch": "深度研究", 91 | "researchInProgress": "研究进行中...", 92 | "continueResearch": "继续研究", 93 | "viewPlan": "查看计划", 94 | "viewUpdates": "查看更新", 95 | "viewConclusion": "查看结论" 96 | }, 97 | "repoPage": { 98 | "refreshWiki": "刷新Wiki", 99 | "confirmRefresh": "确认刷新", 100 | "cancel": "取消", 101 | "home": "首页", 102 | "errorTitle": "错误", 103 | "errorMessageDefault": "请检查您的仓库是否存在且为公开仓库。有效格式为\"owner/repo\", \"https://github.com/owner/repo\", \"https://gitlab.com/owner/repo\", \"https://bitbucket.org/owner/repo\", 或本地文件夹路径,如\"C:\\\\path\\\\to\\\\folder\"或\"/path/to/folder\"。", 104 | "backToHome": "返回首页", 105 | "exportWiki": "导出Wiki", 106 | "exportAsMarkdown": "导出为Markdown", 107 | "exportAsJson": "导出为JSON", 108 | "pages": "页面", 109 | "relatedFiles": "相关文件:", 110 | "relatedPages": "相关页面:", 111 | "selectPagePrompt": "从导航中选择一个页面以查看其内容", 112 | "askAboutRepo": "询问关于此仓库的问题" 113 | }, 114 | "nav": { 115 | "wikiProjects": "项目列表" 116 | }, 117 | "projects": { 118 | "title": "已处理的Wiki项目", 119 | "searchPlaceholder": "按项目名称、所有者或仓库搜索...", 120 | "noProjects": "服务器缓存中未找到项目。缓存可能为空或服务器遇到问题。", 121 | "noSearchResults": "没有项目符合您的搜索条件。", 122 | "processedOn": "处理时间:", 123 | "loadingProjects": "正在加载项目...", 124 | "errorLoading": "加载项目时出错:", 125 | "backToHome": "返回首页", 126 | "browseExisting": "浏览现有项目", 127 | "existingProjects": "现有项目", 128 | "recentProjects": "最近项目" 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/types/repoinfo.tsx: -------------------------------------------------------------------------------- 1 | export interface RepoInfo { 2 | owner: string; 3 | repo: string; 4 | type: string; 5 | token: string | null; 6 | localPath: string | null; 7 | repoUrl: string | null; 8 | } 9 | 10 | export default RepoInfo; -------------------------------------------------------------------------------- /src/types/wiki/wikipage.tsx: -------------------------------------------------------------------------------- 1 | // Wiki Interfaces 2 | export interface WikiPage { 3 | id: string; 4 | title: string; 5 | content: string; 6 | filePaths: string[]; 7 | importance: 'high' | 'medium' | 'low'; 8 | relatedPages: string[]; 9 | // New fields for hierarchy 10 | parentId?: string; 11 | isSection?: boolean; 12 | children?: string[]; // IDs of child pages 13 | } -------------------------------------------------------------------------------- /src/types/wiki/wikistructure.tsx: -------------------------------------------------------------------------------- 1 | import { WikiPage } from "./wikipage"; 2 | 3 | /** 4 | * @fileoverview This file defines the structure of a wiki page and its sections. 5 | */ 6 | export interface WikiStructure { 7 | id: string; 8 | title: string; 9 | description: string; 10 | pages: WikiPage[]; 11 | } -------------------------------------------------------------------------------- /src/utils/getRepoUrl.tsx: -------------------------------------------------------------------------------- 1 | import RepoInfo from "@/types/repoinfo"; 2 | 3 | export default function getRepoUrl(repoInfo: RepoInfo): string { 4 | console.log('getRepoUrl', repoInfo); 5 | if (repoInfo.type === 'local' && repoInfo.localPath) { 6 | return repoInfo.localPath; 7 | } else { 8 | if(repoInfo.repoUrl) { 9 | return repoInfo.repoUrl; 10 | } else { 11 | if(repoInfo.owner && repoInfo.repo) { 12 | return "http://example/" + repoInfo.owner + "/" + repoInfo.repo; 13 | } 14 | return ''; 15 | } 16 | } 17 | }; -------------------------------------------------------------------------------- /src/utils/urlDecoder.tsx: -------------------------------------------------------------------------------- 1 | export function extractUrlDomain(input: string): string | null { 2 | try { 3 | const normalizedInput = input.startsWith('http') ? input : `https://${input}`; 4 | const url = new URL(normalizedInput); 5 | return `${url.protocol}//${url.hostname}`; // Inclut le protocole et le domaine 6 | } catch { 7 | return null; // Not a valid URL 8 | } 9 | } 10 | 11 | export function extractUrlPath(input: string): string | null { 12 | try { 13 | const normalizedInput = input.startsWith('http') ? input : `https://${input}`; 14 | const url = new URL(normalizedInput); 15 | return url.pathname.replace(/^\/|\/$/g, ''); // Remove leading and trailing slashes 16 | } catch { 17 | return null; // Not a valid URL 18 | } 19 | } -------------------------------------------------------------------------------- /src/utils/websocketClient.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * WebSocket client for chat completions 3 | * This replaces the HTTP streaming endpoint with a WebSocket connection 4 | */ 5 | 6 | // Get the server base URL from environment or use default 7 | const SERVER_BASE_URL = process.env.NEXT_PUBLIC_SERVER_BASE_URL || 'http://localhost:8001'; 8 | 9 | // Convert HTTP URL to WebSocket URL 10 | const getWebSocketUrl = () => { 11 | const baseUrl = SERVER_BASE_URL; 12 | // Replace http:// with ws:// or https:// with wss:// 13 | const wsBaseUrl = baseUrl.replace(/^http/, 'ws'); 14 | return `${wsBaseUrl}/ws/chat`; 15 | }; 16 | 17 | export interface ChatMessage { 18 | role: 'user' | 'assistant' | 'system'; 19 | content: string; 20 | } 21 | 22 | export interface ChatCompletionRequest { 23 | repo_url: string; 24 | messages: ChatMessage[]; 25 | filePath?: string; 26 | token?: string; 27 | type?: string; 28 | provider?: string; 29 | model?: string; 30 | language?: string; 31 | excluded_dirs?: string; 32 | excluded_files?: string; 33 | } 34 | 35 | /** 36 | * Creates a WebSocket connection for chat completions 37 | * @param request The chat completion request 38 | * @param onMessage Callback for received messages 39 | * @param onError Callback for errors 40 | * @param onClose Callback for when the connection closes 41 | * @returns The WebSocket connection 42 | */ 43 | export const createChatWebSocket = ( 44 | request: ChatCompletionRequest, 45 | onMessage: (message: string) => void, 46 | onError: (error: Event) => void, 47 | onClose: () => void 48 | ): WebSocket => { 49 | // Create WebSocket connection 50 | const ws = new WebSocket(getWebSocketUrl()); 51 | 52 | // Set up event handlers 53 | ws.onopen = () => { 54 | console.log('WebSocket connection established'); 55 | // Send the request as JSON 56 | ws.send(JSON.stringify(request)); 57 | }; 58 | 59 | ws.onmessage = (event) => { 60 | // Call the message handler with the received text 61 | onMessage(event.data); 62 | }; 63 | 64 | ws.onerror = (error) => { 65 | console.error('WebSocket error:', error); 66 | onError(error); 67 | }; 68 | 69 | ws.onclose = () => { 70 | console.log('WebSocket connection closed'); 71 | onClose(); 72 | }; 73 | 74 | return ws; 75 | }; 76 | 77 | /** 78 | * Closes a WebSocket connection 79 | * @param ws The WebSocket connection to close 80 | */ 81 | export const closeWebSocket = (ws: WebSocket | null): void => { 82 | if (ws && ws.readyState === WebSocket.OPEN) { 83 | ws.close(); 84 | } 85 | }; 86 | -------------------------------------------------------------------------------- /tailwind.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | darkMode: 'selector', 3 | content: [ 4 | './src/pages/**/*.{js,ts,jsx,tsx,mdx}', 5 | './src/components/**/*.{js,ts,jsx,tsx,mdx}', 6 | './src/app/**/*.{js,ts,jsx,tsx,mdx}', 7 | ], 8 | } -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2017", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "noEmit": true, 9 | "esModuleInterop": true, 10 | "module": "esnext", 11 | "moduleResolution": "bundler", 12 | "resolveJsonModule": true, 13 | "isolatedModules": true, 14 | "jsx": "preserve", 15 | "incremental": true, 16 | "plugins": [ 17 | { 18 | "name": "next" 19 | } 20 | ], 21 | "paths": { 22 | "@/*": ["./src/*"] 23 | } 24 | }, 25 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], 26 | "exclude": ["node_modules"] 27 | } 28 | --------------------------------------------------------------------------------