├── helm-addons ├── templates │ ├── timesketch-llm-config.yaml │ ├── ts-configs-configmap.yaml │ ├── timesketch-llm-configmap.yaml │ └── ollama-deployment.yaml ├── values.yaml └── Chart.yaml ├── configs ├── timesketch-mcp-server │ ├── .python-version │ ├── docker │ │ ├── .env.sample │ │ ├── docker-compose.yml │ │ └── Dockerfile │ ├── README.md │ ├── pyproject.toml │ ├── src │ │ ├── timesketch_mcp_server │ │ │ ├── utils.py │ │ │ └── tools.py │ │ └── main.py │ ├── .gitignore │ └── LICENSE ├── osdfir-lab-values.yaml └── timesketch │ └── timesketch.conf ├── terraform ├── outputs.tf ├── main.tf ├── variables.tf ├── mcp.tf ├── .terraform.lock.hcl └── ollama.tf ├── docs ├── flowchart-osdfir-update.mmd ├── flowchart-deployment-overview.mmd ├── flowchart-deployment-terrform.mmd ├── flowchart-timesketch-mcp-builder.mmd ├── usage_examples.md └── updating_osdfir_lab.md ├── .github └── workflows │ ├── build-ts-configs-b64.yml │ └── build-mcp-server.yml ├── tools └── build-ts-configs.sh ├── .gitignore ├── CHANGELOG.md ├── README.md ├── LICENSE └── scripts └── manage-osdfir-lab.ps1 /helm-addons/templates/timesketch-llm-config.yaml: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/.python-version: -------------------------------------------------------------------------------- 1 | 3.11 2 | -------------------------------------------------------------------------------- /helm-addons/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for osdfir-addons 2 | -------------------------------------------------------------------------------- /helm-addons/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: osdfir-addons 3 | description: Additional components for OSDFIR Lab 4 | type: application 5 | version: 0.1.0 6 | appVersion: "1.0.0" 7 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/docker/.env.sample: -------------------------------------------------------------------------------- 1 | # Copy to .env and edit as needed 2 | 3 | TIMESKETCH_HOST=localhost 4 | TIMESKETCH_PORT=5000 5 | TIMESKETCH_USER=dev 6 | TIMESKETCH_PASSWORD=dev 7 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/README.md: -------------------------------------------------------------------------------- 1 | # timesketch-mcp-server 2 | 3 | ## How to run 4 | 5 | ``` 6 | docker compose up -d 7 | ``` 8 | 9 | Launch a bash shell in the container: 10 | 11 | ``` 12 | docker compose exec timesketch-mcp /bin/bash 13 | ``` 14 | 15 | Launch the web server: 16 | 17 | ``` 18 | uv run python src/main.py --mcp-host 0.0.0.0 --mcp-port 8081 19 | ``` 20 | -------------------------------------------------------------------------------- /helm-addons/templates/ts-configs-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Release.Name }}-ts-configs 5 | namespace: {{ .Values.namespaceOverride | default .Release.Namespace }} 6 | data: 7 | # The init script looks for /init/ts-configs.tgz.b64. 8 | # We mount this CM at /init in the core chart; the file name key must match. 9 | ts-configs.tgz.b64: |- 10 | {{ (.Files.Get "files/ts-configs.tgz.b64" | default "") | indent 4 }} 11 | -------------------------------------------------------------------------------- /terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "namespace" { 2 | description = "Name of the Kubernetes namespace created" 3 | value = kubernetes_namespace.osdfir.metadata[0].name 4 | } 5 | 6 | output "pvc_name" { 7 | description = "Name of the PersistentVolumeClaim created" 8 | value = kubernetes_persistent_volume_claim.osdfirvolume.metadata[0].name 9 | } 10 | 11 | output "helm_release" { 12 | description = "Name of the Helm release deployed" 13 | value = helm_release.osdfir.name 14 | } -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "timesketch-mcp-server" 3 | version = "0.1.0" 4 | description = "MCP server for Timesketch" 5 | readme = "README.md" 6 | requires-python = ">=3.11" 7 | dependencies = [ 8 | "fastmcp>=2.9.0", 9 | "timesketch-api-client>=20250521", 10 | ] 11 | 12 | [dependency-groups] 13 | dev = [ 14 | "ruff>=0.11.12", 15 | ] 16 | 17 | [build-system] 18 | requires = ["setuptools>=61.0", "wheel"] 19 | build-backend = "setuptools.build_meta" 20 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | timesketch-mcp: 3 | build: 4 | context: .. 5 | dockerfile: docker/Dockerfile 6 | ports: 7 | - "8081:8081" 8 | environment: 9 | - TIMESKETCH_HOST=${TIMESKETCH_HOST} 10 | - TIMESKETCH_PORT=${TIMESKETCH_PORT} 11 | - TIMESKETCH_USER=${TIMESKETCH_USER} 12 | - TIMESKETCH_PASSWORD=${TIMESKETCH_PASSWORD} 13 | volumes: 14 | - ../:/app 15 | networks: 16 | - default 17 | 18 | networks: 19 | default: 20 | driver: bridge 21 | -------------------------------------------------------------------------------- /docs/flowchart-osdfir-update.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | A[Start: Run update-osdfir-lab.ps1] --> B{Forced?}; 3 | B -- No --> C[Prompt for Confirmation]; 4 | B -- Yes --> D{Backup Enabled?}; 5 | C --> D; 6 | 7 | D -- Yes --> E[Create Project Backup]; 8 | D -- No --> F[Fetch Latest Release from GitHub API]; 9 | E --> F; 10 | 11 | F --> G[Download Release Package]; 12 | G --> H[Clean Helm Directory]; 13 | H --> I[Extract New Charts to Helm Directory]; 14 | I --> J[Update helm-addons/ if needed]; 15 | J --> K[Apply Custom Patches from 'configs/update']; 16 | K --> L[End: Update Complete]; 17 | 18 | style A fill:#2ecc71,stroke:#333,stroke-width:2px 19 | style L fill:#3498db,stroke:#333,stroke-width:2px -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/src/timesketch_mcp_server/utils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import os 3 | import logging 4 | 5 | from timesketch_api_client.client import TimesketchApi 6 | 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @functools.cache 12 | def get_timesketch_client() -> TimesketchApi: 13 | """ 14 | Get a cached instance of the TimesketchApi client. 15 | This is used to avoid creating multiple instances of the client. 16 | """ 17 | host_uri = f"http://{os.environ.get('TIMESKETCH_HOST')}:{os.environ.get('TIMESKETCH_PORT', '5000')}/" 18 | ts_client = TimesketchApi( 19 | host_uri=host_uri, 20 | username=os.environ.get("TIMESKETCH_USER"), 21 | password=os.environ.get("TIMESKETCH_PASSWORD"), 22 | ) 23 | return ts_client 24 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile for a development environment 2 | 3 | FROM python:3.11-slim 4 | 5 | WORKDIR /app 6 | 7 | RUN apt-get update && apt-get install -y \ 8 | git \ 9 | iputils-ping \ 10 | curl 11 | 12 | RUN pip install uv 13 | 14 | ADD . /app 15 | WORKDIR /app 16 | RUN uv sync --all-groups 17 | 18 | ENV PYTHONPATH /app 19 | 20 | # Command to run the application 21 | # We use 0.0.0.0 for the host to make it accessible from outside the container 22 | # CMD ["uv", "run", "python", "main.py", "--mcp-host", "0.0.0.0", "--mcp-port", "8081"] 23 | 24 | # Keep the container running 25 | CMD ["sleep", "infinity"] 26 | 27 | # Can be replaced with the following to run directly: 28 | # CMD ["uv", "run", "python", "main.py", "--mcp-host", "0.0.0.0", "--mcp-port", "8081"] 29 | -------------------------------------------------------------------------------- /docs/flowchart-deployment-overview.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | A[Start] --> B{Run 'manage-osdfir-lab.ps1 deploy'}; 3 | B --> C[Check Prerequisites]; 4 | C --> D{Docker Desktop Running?}; 5 | D -- No --> E[Start Docker Desktop]; 6 | D -- Yes --> F[Start Minikube Cluster]; 7 | E --> F; 8 | F --> G{Deploy MCP Server?}; 9 | G -- No --> H[Deploy with Terraform]; 10 | G -- Yes --> H[Deploy with Terraform (uses GHCR image)]; 11 | H --> I[Wait for Pods to be Ready]; 12 | I --> J[Start Port Forwarding]; 13 | J --> K[End]; 14 | 15 | subgraph "Terraform Actions" 16 | H --> H1[Create Namespace]; 17 | H --> H2[Create PVC]; 18 | H --> H3[Deploy Helm Chart]; 19 | end 20 | 21 | style A fill:#2ecc71,stroke:#333,stroke-width:2px 22 | style K fill:#e74c3c,stroke:#333,stroke-width:2px -------------------------------------------------------------------------------- /helm-addons/templates/timesketch-llm-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.timesketch.enabled -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Release.Name }}-timesketch-llm-config 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "timesketch.labels" . | nindent 4 }} 9 | data: 10 | # AI Model Environment Variables for Timesketch 11 | LLM_REQUEST_TIMEOUT: {{ .Values.ai.model.timeouts.llm_request | quote }} 12 | API_TIMEOUT: {{ .Values.ai.model.timeouts.api_timeout | quote }} 13 | HTTP_CONNECT_TIMEOUT: {{ .Values.ai.model.timeouts.http_connect | quote }} 14 | HTTP_READ_TIMEOUT: {{ .Values.ai.model.timeouts.http_read | quote }} 15 | WARM_UP_TIMEOUT: {{ .Values.ai.model.timeouts.warm_up | quote }} 16 | OLLAMA_MODEL_NAME: {{ .Values.ai.model.name | quote }} 17 | OLLAMA_SERVER_URL: {{ .Values.ai.model.server_url | quote }} 18 | {{- end }} -------------------------------------------------------------------------------- /docs/flowchart-deployment-terrform.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | subgraph "Phase 1: Initial Setup" 3 | A[Terraform Apply] --> B(Create Namespace); 4 | A --> C(Package Timesketch Configs); 5 | end 6 | 7 | subgraph "Phase 2: Core Infrastructure" 8 | B --> D(Create PVC); 9 | B --> E(Create ConfigMap for Timesketch); 10 | C --> E; 11 | D --> F(Deploy OSDFIR Helm Chart); 12 | E --> F; 13 | end 14 | 15 | subgraph "Phase 3: MCP Server Deployment (toggle)" 16 | F -- Helm Release Creates Secret --> G(Read Timesketch Secret); 17 | G --> H{deploy_mcp_server?}; 18 | H -- true --> I(Deploy MCP Server from GHCR); 19 | I --> J(Create MCP Service); 20 | H -- false --> J; 21 | end 22 | 23 | subgraph "Result" 24 | J --> K[Deployment Complete]; 25 | end 26 | 27 | style A fill:#2ecc71,stroke:#333,stroke-width:2px 28 | style J fill:#3498db,stroke:#333,stroke-width:2px 29 | linkStyle 5 stroke:#ff9f43,stroke-width:2px,stroke-dasharray: 5 5; -------------------------------------------------------------------------------- /.github/workflows/build-ts-configs-b64.yml: -------------------------------------------------------------------------------- 1 | name: build-ts-configs-b64 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths: 7 | - 'configs/timesketch/**' # your overrides 8 | - 'tools/build-ts-configs.sh' 9 | workflow_dispatch: 10 | 11 | concurrency: 12 | group: ts-configs-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | build: 17 | runs-on: ubuntu-latest 18 | permissions: 19 | contents: write # to commit the generated file back 20 | env: 21 | TS_REF: master 22 | DFIQ_REF: main 23 | CUSTOM_DIR: configs/timesketch 24 | OUT_FILE: helm-addons/files/ts-configs.tgz.b64 25 | 26 | steps: 27 | - uses: actions/checkout@v4 28 | 29 | - name: Build merged base64 payload 30 | run: | 31 | chmod +x tools/build-ts-configs.sh 32 | ./tools/build-ts-configs.sh 33 | 34 | - name: Commit updated ts-configs.tgz.b64 35 | uses: stefanzweifel/git-auto-commit-action@v5 36 | with: 37 | commit_message: "chore: refresh ts-configs.tgz.b64 (TS=${{ env.TS_REF }}, DFIQ=${{ env.DFIQ_REF }})" 38 | file_pattern: helm-addons/files/ts-configs.tgz.b64 39 | -------------------------------------------------------------------------------- /docs/flowchart-timesketch-mcp-builder.mmd: -------------------------------------------------------------------------------- 1 | graph TD 2 | subgraph Initialization 3 | A[Start: GH Action build] --> B[Build & Push to GHCR]; 4 | end 5 | 6 | subgraph "Local (legacy manual build)" 7 | B -. optional .-> C[build-timesketch-mcp.ps1]; 8 | C --> D[Build Image locally]; 9 | D --> E[Push/Load Image]; 10 | end 11 | 12 | subgraph "Minikube Build (legacy)" 13 | C -- minikube flag --> F{Minikube Running?}; 14 | F -- No --> G[Start Minikube]; 15 | F -- Yes --> H[Switch to Minikube Docker Context]; 16 | G --> H; 17 | H --> I[Confirm Build]; 18 | I --> J[Build Image in Minikube]; 19 | J --> K{Deployment Exists?}; 20 | K -- Yes --> L[Update & Restart Deployment]; 21 | K -- No --> M[Restore Docker Context]; 22 | L --> M; 23 | M --> N{Minikube Started by Script?}; 24 | N -- Yes --> O[Stop Minikube]; 25 | N -- No --> P[End: Build Complete]; 26 | O --> P; 27 | end 28 | 29 | style A fill:#2ecc71,stroke:#333,stroke-width:2px 30 | style E fill:#3498db,stroke:#333,stroke-width:2px 31 | style P fill:#3498db,stroke:#333,stroke-width:2px 32 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/src/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | from fastmcp import FastMCP 5 | 6 | from timesketch_mcp_server import tools 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | mcp = FastMCP("timesketch-mcp", dependencies=["timesketch-api-client"]) 13 | 14 | 15 | mcp.mount(tools.mcp, prefix=None) 16 | 17 | 18 | def main(): 19 | parser = argparse.ArgumentParser(description="MCP server for Timesketch") 20 | parser.add_argument( 21 | "--mcp-host", 22 | type=str, 23 | help="Host to run MCP server on (only used for sse), default: 127.0.0.1", 24 | default="127.0.0.1", 25 | ) 26 | parser.add_argument( 27 | "--mcp-port", 28 | type=int, 29 | help="Port to run MCP server on (only used for sse), default: 8081", 30 | default=8081, 31 | ) 32 | 33 | args = parser.parse_args() 34 | 35 | logger.info(f"Running MCP server on {args.mcp_host}:{args.mcp_port}") 36 | try: 37 | mcp.settings.port = args.mcp_port 38 | mcp.settings.host = args.mcp_host 39 | mcp.run(transport="sse") 40 | except KeyboardInterrupt: 41 | logger.info("Server stopped by user") 42 | return 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /tools/build-ts-configs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | TS_REF="${TS_REF:-master}" 5 | DFIQ_REF="${DFIQ_REF:-main}" 6 | CUSTOM_DIR="${CUSTOM_DIR:-configs/timesketch}" 7 | OUT_FILE="${OUT_FILE:-helm-addons/files/ts-configs.tgz.b64}" 8 | 9 | WORK="$(mktemp -d)" 10 | TIMESKETCH_DIR="$WORK/timesketch" 11 | mkdir -p "$TIMESKETCH_DIR" 12 | 13 | # 1) Upstream Timesketch data/ 14 | git clone --depth 1 --branch "$TS_REF" https://github.com/google/timesketch "$WORK/ts" 15 | cp -a "$WORK/ts/data/." "$TIMESKETCH_DIR/" 16 | rm -rf "$WORK/ts" 17 | 18 | # 2) Upstream DFIQ data/ 19 | git clone --depth 1 --branch "$DFIQ_REF" https://github.com/google/dfiq "$WORK/dfiq" 20 | mkdir -p "$TIMESKETCH_DIR/dfiq" 21 | cp -a "$WORK/dfiq/dfiq/data/." "$TIMESKETCH_DIR/dfiq/" 22 | rm -rf "$WORK/dfiq" 23 | 24 | # 3) Your overrides (wins) - Copy files directly to TIMESKETCH_DIR 25 | if [ -d "$CUSTOM_DIR" ]; then 26 | # Copy all files from custom directory directly to TIMESKETCH_DIR 27 | cp -a "$CUSTOM_DIR"/* "$TIMESKETCH_DIR/" 2>/dev/null || true 28 | fi 29 | 30 | # 4) Tar+base64 - Tar the contents of timesketch directory, not the directory itself 31 | pushd "$TIMESKETCH_DIR" >/dev/null 32 | tar -czf "$WORK/ts-configs.tgz" . 33 | popd >/dev/null 34 | 35 | # Create base64 version 36 | pushd "$WORK" >/dev/null 37 | base64 -w0 ts-configs.tgz > ts-configs.tgz.b64 38 | popd >/dev/null 39 | 40 | mkdir -p "$(dirname "$OUT_FILE")" 41 | mv "$WORK/ts-configs.tgz.b64" "$OUT_FILE" 42 | 43 | echo "Wrote $(du -h "$OUT_FILE" | awk '{print $1" "$2}'): $OUT_FILE" 44 | -------------------------------------------------------------------------------- /.github/workflows/build-mcp-server.yml: -------------------------------------------------------------------------------- 1 | name: Build Timesketch MCP Server 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths: 7 | - 'configs/timesketch-mcp-server/**' 8 | pull_request: 9 | branches: [ main ] 10 | paths: 11 | - 'configs/timesketch-mcp-server/**' 12 | workflow_dispatch: # Allow manual triggers 13 | 14 | env: 15 | REGISTRY: ghcr.io 16 | IMAGE_NAME: ${{ github.repository }}/timesketch-mcp-server 17 | 18 | jobs: 19 | build: 20 | runs-on: ubuntu-latest 21 | permissions: 22 | contents: read 23 | packages: write 24 | 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@v4 28 | 29 | - name: Set up Docker Buildx 30 | uses: docker/setup-buildx-action@v3 31 | 32 | - name: Log in to Container Registry 33 | uses: docker/login-action@v3 34 | with: 35 | registry: ${{ env.REGISTRY }} 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | 39 | - name: Extract metadata 40 | id: meta 41 | uses: docker/metadata-action@v5 42 | with: 43 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 44 | tags: | 45 | type=ref,event=branch 46 | type=ref,event=pr 47 | type=sha,prefix={{branch}}- 48 | type=raw,value=latest,enable={{is_default_branch}} 49 | 50 | - name: Build and push Docker image 51 | uses: docker/build-push-action@v5 52 | with: 53 | context: ./configs/timesketch-mcp-server 54 | file: ./configs/timesketch-mcp-server/docker/Dockerfile 55 | push: true 56 | tags: ${{ steps.meta.outputs.tags }} 57 | labels: ${{ steps.meta.outputs.labels }} 58 | cache-from: type=gha 59 | cache-to: type=gha,mode=max 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | .terraform/ 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Ignore transient lock info files created by terraform apply 27 | .terraform.tfstate.lock.info 28 | 29 | # Include override files you do wish to add to version control using negated pattern 30 | # !example_override.tf 31 | 32 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 33 | # example: *tfplan* 34 | 35 | # Ignore CLI configuration files 36 | .terraformrc 37 | terraform.rc 38 | ======= 39 | # OS artifacts 40 | .DS_Store 41 | Thumbs.db 42 | desktop.ini 43 | *.ps1.bak 44 | *.tmp 45 | *.temp 46 | ~* 47 | *.bak 48 | *.backup 49 | 50 | # Project folders 51 | .cursor 52 | .taskmaster 53 | .taskmaster/ 54 | backups 55 | scripts/dev 56 | scripts/dev/ 57 | commands.md 58 | .cursorignore 59 | 60 | # Terraform 61 | **/.terraform/ 62 | *.tfstate 63 | *.tfstate.* 64 | *.tfvars 65 | *.tfplan 66 | .terraform.lock.hcl 67 | crash.log 68 | 69 | # Kubernetes 70 | kubeconfig 71 | .kube/ 72 | 73 | # Helm 74 | charts/*.tgz 75 | .helm/ 76 | 77 | # Docker 78 | .docker/ 79 | 80 | # AI model cache (if downloaded locally) 81 | models/ 82 | *.gguf 83 | 84 | # Jupyter notebooks checkpoints 85 | .ipynb_checkpoints/ 86 | 87 | # Python cache (if any Python tools are added) 88 | __pycache__/ 89 | *.pyc 90 | *.pyo 91 | *.pyd 92 | .Python 93 | pip-log.txt 94 | .venv/ 95 | venv/ 96 | 97 | # Node modules (if any frontend tools are added) 98 | node_modules/ 99 | npm-debug.log 100 | 101 | # Generated Timesketch config tarball 102 | ts-configs.tar.gz 103 | helm-addons/files/*.tgz 104 | helm-addons/files/*.tar.gz 105 | -------------------------------------------------------------------------------- /docs/usage_examples.md: -------------------------------------------------------------------------------- 1 | ### Usage Examples: First Steps 2 | 3 | Welcome to your OSDFIR Lab! Now that you have everything up and running, here are a couple of simple walkthroughs to get you started with the core tools. 4 | 5 | --- 6 | 7 | ### 1. Uploading Your First Timeline to Timesketch 8 | 9 | Timesketch is a powerful tool for timeline analysis. Let's upload a sample timeline to see it in action. 10 | https://timesketch.org/guides/getting-started/ 11 | 12 | **Step 1: Access Timesketch** 13 | 1. Open your browser and navigate to `http://localhost:5000`. 14 | 2. Log in with the credentials provided by the `manage-osdfir-lab.ps1 creds` command. The default is `admin:admin`. 15 | 16 | **Step 2: Create a New Sketch** 17 | https://timesketch.org/guides/user/sketch-overview/ 18 | 1. In the Timesketch interface, click the **"New Sketch"** button in the top left. 19 | 2. Give your sketch a name (e.g., "My First Investigation") and a description. 20 | 3. Click **"Create Sketch"**. 21 | 22 | **Step 3: Upload a Timeline** 23 | https://timesketch.org/guides/user/import-from-json-csv/ 24 | For this example, you can create a simple `sample.csv` file on your computer with the following content: 25 | ```csv 26 | message,timestamp,datetime,timestamp_desc,extra_field_1,extra_field_2 27 | A message,1331698658276340,2015-07-24T19:01:01+00:00,Write time,foo,bar 28 | ``` 29 | 1. Inside your new sketch, click the **"Timelines"** tab on the left. 30 | 2. Click the **"Upload timeline"** button and select the `sample.csv` file you just created. 31 | 3. Once it's processed, you can explore the events on the timeline. 32 | 33 | --- 34 | 35 | ### 2. Running Your First Analysis in OpenRelik 36 | 37 | OpenRelik processes forensic evidence using various analyzer workflows. 38 | https://openrelik.org/ 39 | 40 | **Step 1: Access OpenRelik** 41 | 1. Open your browser and navigate to `http://localhost:8711`. 42 | 43 | **Step 2: Upload Evidence** 44 | 1. In the OpenRelik UI, click **"Upload file"**. 45 | 2. Choose any file from your machine as test evidence (e.g., the `sample.csv` from the previous example). 46 | 47 | **Step 3: Run an Analyzer** 48 | 1. Find your uploaded file in the list and click the **"Analyze"** button (it looks like a play icon). 49 | 2. Select a simple workflow like **`strings`** from the dialog that appears. 50 | 3. Click **"Run"**. 51 | 52 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to a `YYYYMMDD` versioning scheme. 7 | 8 | ## [20251120] - 2025-11-21 9 | 10 | ### Added 11 | - First-deployment detection and extended Terraform/Helm timeout handling in `scripts/manage-osdfir-lab.ps1`, including periodic status reminders during long rollouts. 12 | 13 | ### Changed 14 | - Upgraded to `osdfir-infrastructure` Helm chart **2.5.6**. 15 | - Bumped Timesketch to image tag **20251114** with aligned dependency images (nginx `1.25.5-alpine-slim`, OpenSearch `3.1.0`, Redis `7.4.2-alpine`, Postgres `17.5-alpine`). 16 | - Bumped OpenRelik components to **0.6.0** and pinned worker images (analyzer-config `0.2.0`, plaso `0.4.0`, timesketch `0.3.0`, hayabusa `0.3.0`, extraction `0.5.0`). 17 | - Swapped the default Ollama model to `smollm:latest` and hardened the model pull init script for Windows-safe execution. 18 | - Documented the new component versions and deployment guidance in `README.md` and `docs/updating_osdfir_lab.md`; updated project version badge to **20251120**. 19 | 20 | ### Fixed 21 | - Resolved CRLF-related failures in the Ollama init container by normalising scripts before execution. 22 | 23 | ## [20250822] - 2025-08-22 24 | 25 | ### Added 26 | - GitHub workflow for building and publishing Timesketch MCP Server Docker image 27 | - Helm chart structure for custom add-ons (helm-addons directory) 28 | - Terraform variable to control Ollama deployment (deploy_ollama) 29 | - Terraform variable to control MCP Server deployment (deploy_mcp_server) 30 | - Enhanced Timesketch LLM integration with Ollama 31 | 32 | ### Changed 33 | - Refactored deployment to use upstream osdfir-infrastructure Helm chart 34 | - Improved Minikube management in deployment scripts 35 | - Optimized tarball creation for Timesketch configuration, with GitHub workflow creation 36 | - Updated Terraform configuration to use Kubernetes provider properly 37 | - Address some issues 38 | 39 | ### Removed 40 | - Local copies of Helm, Timesketch, and DFIQ data files (now pulled from upstream) 41 | 42 | ## [20250721] - 2025-07-21 43 | 44 | ### Added 45 | - Initial setup of the OSDFIR Lab environment. 46 | - Deployment scripts for Minikube using Terraform and Helm. 47 | - Integration of Timesketch and OpenRelik. 48 | - Experimental AI integration with an Ollama server. 49 | - Project `README.md` with setup instructions. 50 | - Initial `CHANGELOG.md` to track project evolution. 51 | - `usage_examples.md` to guide new users. -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.0" 3 | required_providers { 4 | kubernetes = { source = "hashicorp/kubernetes", version = ">= 2.0.0" } 5 | helm = { source = "hashicorp/helm", version = ">= 2.0.0" } 6 | local = { source = "hashicorp/local", version = ">= 2.0.0" } 7 | null = { source = "hashicorp/null", version = ">= 3.0.0" } 8 | } 9 | } 10 | 11 | # Use the current kubectl context (which should be "osdfir") 12 | provider "kubernetes" { 13 | config_path = pathexpand("~/.kube/config") 14 | config_context = "osdfir" 15 | } 16 | 17 | # Add this Helm provider configuration (from the article): 18 | provider "helm" { 19 | kubernetes = { 20 | config_path = pathexpand("~/.kube/config") 21 | config_context = "osdfir" 22 | } 23 | } 24 | 25 | # Namespace 26 | resource "kubernetes_namespace" "osdfir" { 27 | metadata { name = var.namespace } 28 | } 29 | 30 | # ConfigMap that carries the prebuilt base64'ed tarball from your repo 31 | # Make sure this path matches where your workflow writes the file. 32 | # (Currently: helm-addons/files/ts-configs.tgz.b64) 33 | resource "kubernetes_config_map" "timesketch_configs" { 34 | metadata { 35 | name = "${var.helm_release_name}-ts-configs" 36 | namespace = kubernetes_namespace.osdfir.metadata[0].name 37 | } 38 | 39 | data = { 40 | "ts-configs.tgz.b64" = file("${path.module}/../helm-addons/files/ts-configs.tgz.b64") 41 | } 42 | 43 | depends_on = [kubernetes_namespace.osdfir] 44 | } 45 | 46 | # PVC (unchanged) 47 | resource "kubernetes_persistent_volume_claim" "osdfirvolume" { 48 | metadata { 49 | name = var.pvc_name 50 | namespace = kubernetes_namespace.osdfir.metadata[0].name 51 | } 52 | spec { 53 | access_modes = ["ReadWriteOnce"] 54 | storage_class_name = var.storage_class_name 55 | resources { requests = { storage = var.pvc_storage } } 56 | } 57 | } 58 | 59 | resource "null_resource" "helm_repo" { 60 | provisioner "local-exec" { command = "helm repo add osdfir-charts https://google.github.io/osdfir-infrastructure/" } 61 | provisioner "local-exec" { command = "helm repo update" } 62 | } 63 | 64 | # First deploy the main OSDFIR infrastructure 65 | resource "helm_release" "osdfir" { 66 | name = var.helm_release_name 67 | chart = "osdfir-charts/osdfir-infrastructure" 68 | version = var.osdfir_chart_version 69 | namespace = kubernetes_namespace.osdfir.metadata[0].name 70 | timeout = var.helm_timeout 71 | 72 | # Add this line to use your values file 73 | values = [file("${path.module}/../configs/osdfir-lab-values.yaml")] 74 | 75 | depends_on = [ 76 | null_resource.helm_repo, 77 | kubernetes_namespace.osdfir, 78 | kubernetes_config_map.timesketch_configs 79 | ] 80 | } 81 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | # OSDFIR Chart Version 2 | # This is the version of the osdfir-infrastructure Helm chart to deploy 3 | # Release can be found here: https://github.com/google/osdfir-infrastructure/releases 4 | variable "osdfir_chart_version" { 5 | description = "Version of the osdfir-infrastructure Helm chart to deploy" 6 | type = string 7 | default = "2.5.6" # Only use the numerical part of the version 8 | } 9 | 10 | 11 | # Timesketch MCP Server 12 | # Control whether to deploy the Timesketch MCP Server 13 | variable "deploy_mcp_server" { 14 | description = "Whether to deploy the Timesketch MCP Server" 15 | type = bool 16 | default = false # false = disabled, true = enabled 17 | } 18 | 19 | 20 | # Ollama Configuration 21 | # Control whether to deploy the Ollama server 22 | variable "deploy_ollama" { 23 | description = "Whether to deploy the Ollama server" 24 | type = bool 25 | default = true # false = disabled, true = enabled 26 | } 27 | 28 | # Set the AI Model to use with Ollama 29 | variable "ai_model_name" { 30 | description = "Name of the AI model to use with Ollama" 31 | type = string 32 | default = "smollm:latest" 33 | } 34 | 35 | # Set the maximum input tokens for the AI model 36 | variable "ai_model_max_input_tokens" { 37 | description = "Maximum input tokens for the AI model" 38 | type = number 39 | default = 8192 40 | } 41 | 42 | # Set the URL for the Ollama server pod 43 | variable "ai_model_server_url" { 44 | description = "URL for the Ollama server" 45 | type = string 46 | default = "http://ollama.osdfir.svc.cluster.local:11434" 47 | } 48 | 49 | variable "helm_timeout" { 50 | description = "Seconds Helm waits for resources to become ready" 51 | type = number 52 | default = 600 53 | } 54 | 55 | 56 | # OSDFIR-Lab Configuration 57 | # Set the Kubernetes namespace for the OSDFIR deployment 58 | variable "namespace" { 59 | description = "Kubernetes namespace for OSDFIR deployment" 60 | type = string 61 | default = "osdfir" 62 | } 63 | 64 | # Set the name of the PersistentVolumeClaim 65 | variable "pvc_name" { 66 | description = "Name of the PersistentVolumeClaim" 67 | type = string 68 | default = "osdfirvolume" 69 | } 70 | 71 | # Set the name for the Helm release label 72 | variable "helm_release_name" { 73 | description = "Name for the Helm release label" 74 | type = string 75 | default = "osdfir-lab" 76 | } 77 | 78 | # Set the storage size for the PVC 79 | variable "pvc_storage" { 80 | description = "Storage size for the PVC" 81 | type = string 82 | default = "200Gi" 83 | } 84 | 85 | # Set the storage class to use for the PVC 86 | variable "storage_class_name" { 87 | description = "Storage class to use for the PVC" 88 | type = string 89 | default = "standard" 90 | } 91 | 92 | # Set the GitHub repository name for the OSDFIR-Lab 93 | variable "github_repository" { 94 | description = "GitHub repository name (e.g., 'kev365/osdfir-lab')" 95 | type = string 96 | default = "kev365/osdfir-lab" # Use lowercase 97 | } -------------------------------------------------------------------------------- /terraform/mcp.tf: -------------------------------------------------------------------------------- 1 | # Data source to read the Timesketch secret 2 | data "kubernetes_secret" "timesketch" { 3 | count = var.deploy_mcp_server ? 1 : 0 4 | metadata { 5 | name = "osdfir-lab-timesketch-secret" 6 | namespace = kubernetes_namespace.osdfir.metadata[0].name 7 | } 8 | } 9 | 10 | # Timesketch MCP Server deployment 11 | resource "kubernetes_deployment" "timesketch_mcp_server" { 12 | count = var.deploy_mcp_server ? 1 : 0 13 | metadata { 14 | name = "timesketch-mcp-server" 15 | namespace = kubernetes_namespace.osdfir.metadata[0].name 16 | labels = { 17 | app = "timesketch-mcp-server" 18 | } 19 | } 20 | 21 | spec { 22 | replicas = 1 23 | 24 | selector { 25 | match_labels = { 26 | app = "timesketch-mcp-server" 27 | } 28 | } 29 | 30 | template { 31 | metadata { 32 | labels = { 33 | app = "timesketch-mcp-server" 34 | } 35 | } 36 | 37 | spec { 38 | container { 39 | 40 | image = "ghcr.io/${var.github_repository}/timesketch-mcp-server:latest" 41 | name = "timesketch-mcp-server" 42 | image_pull_policy = "Always" # or "IfNotPresent" if you prefer 43 | command = ["uv", "run", "python", "src/main.py", "--mcp-host", "0.0.0.0", "--mcp-port", "8081"] 44 | 45 | port { 46 | container_port = 8081 47 | } 48 | 49 | env { 50 | name = "TIMESKETCH_HOST" 51 | value = "osdfir-lab-timesketch" 52 | } 53 | 54 | env { 55 | name = "TIMESKETCH_PORT" 56 | value = "443" 57 | } 58 | 59 | env { 60 | name = "TIMESKETCH_USERNAME" 61 | value_from { 62 | secret_key_ref { 63 | name = "osdfir-lab-timesketch-secret" 64 | key = "timesketch-user" 65 | } 66 | } 67 | } 68 | 69 | env { 70 | name = "TIMESKETCH_PASSWORD" 71 | value_from { 72 | secret_key_ref { 73 | name = "osdfir-lab-timesketch-secret" 74 | key = "timesketch-secret" 75 | } 76 | } 77 | } 78 | 79 | resources { 80 | limits = { 81 | cpu = "500m" 82 | memory = "512Mi" 83 | } 84 | requests = { 85 | cpu = "250m" 86 | memory = "256Mi" 87 | } 88 | } 89 | } 90 | } 91 | } 92 | } 93 | 94 | depends_on = [ 95 | helm_release.osdfir 96 | ] 97 | } 98 | 99 | # Service for the MCP Server 100 | resource "kubernetes_service" "timesketch_mcp_server" { 101 | count = var.deploy_mcp_server ? 1 : 0 102 | metadata { 103 | name = "timesketch-mcp-server" 104 | namespace = kubernetes_namespace.osdfir.metadata[0].name 105 | } 106 | 107 | spec { 108 | selector = { 109 | app = "timesketch-mcp-server" 110 | } 111 | 112 | port { 113 | port = 8081 114 | target_port = 8081 115 | } 116 | 117 | type = "ClusterIP" 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /docs/updating_osdfir_lab.md: -------------------------------------------------------------------------------- 1 | # Updating OSDFIR Lab 2 | 3 | This document outlines how to use the `update-osdfir-lab.ps1` script to update your local OSDFIR Lab Helm charts to the latest version. 4 | 5 | ## Overview 6 | 7 | The update script automates the process of fetching the latest release of the `osdfir-infrastructure` charts from GitHub, backing up your current project, and applying the updates. It also reapplies any custom configurations you have stored. 8 | 9 | ## Current version baseline (November 2025) 10 | 11 | These are the versions currently pinned in the lab configuration. Review the upstream release notes before changing them. 12 | 13 | - **OSDFIR infrastructure chart**: `2.5.6` ([release notes](https://github.com/google/osdfir-infrastructure/releases/tag/osdfir-infrastructure-2.5.6)). 14 | - **Timesketch**: `20251114` image with supporting services `nginx:1.25.5-alpine-slim`, `opensearchproject/opensearch:3.1.0`, `opensearchproject/opensearch-dashboards:3.1.0`, `redis:7.4.2-alpine`, and `postgres:17.5-alpine` ([release notes](https://github.com/google/timesketch/releases/tag/20251114)). 15 | - **OpenRelik**: core components `0.6.0` ([server release](https://github.com/openrelik/openrelik-server/releases/tag/0.6.0)) with workers pinned to `openrelik-worker-analyzer-config:0.2.0`, `openrelik-worker-plaso:0.4.0`, `openrelik-worker-timesketch:0.3.0`, `openrelik-worker-hayabusa:0.3.0`, and `openrelik-worker-extraction:0.5.0`. 16 | - **Prometheus (OpenRelik)**: `prom/prometheus:v3.0.1`. 17 | - **LLM model**: `smollm:latest` served through Ollama. Confirm model availability with `ollama pull smollm:latest` if you rebuild the cache. 18 | 19 | If upstream releases introduce new dependency versions, update `configs/osdfir-lab-values.yaml`, `terraform/variables.tf`, and the Ollama deployment templates together to keep the stack consistent. 20 | 21 | ## Post-update verification checklist 22 | 23 | After bumping versions, validate the deployment before promoting the changes: 24 | 25 | - Run `helm template` or `helm lint` against the updated values to catch obvious YAML issues. 26 | - Execute `terraform plan` to confirm the chart upgrade (`osdfir_chart_version`) and value overrides apply cleanly. 27 | - Once deployed, run `.\scripts\manage-osdfir-lab.ps1 status` followed by `ollama-test` to confirm the new LLM model responds. 28 | - Verify Timesketch AI features by requesting an NL2Q query and an event summary; both should report `smollm:latest` as the active provider. 29 | 30 | ## Usage 31 | 32 | To run the update script, open a PowerShell terminal, navigate to the project root directory, and execute the following command: 33 | 34 | ```powershell 35 | .\scripts\update-osdfir-lab.ps1 36 | ``` 37 | 38 | ### Parameters 39 | 40 | You can modify the script's behavior using the following optional parameters: 41 | 42 | - `-Force`: Skips the confirmation prompt and runs the script non-interactively. 43 | - `-NoBackup`: Disables the automatic backup of the project directory. 44 | - `-DryRun`: Performs a "dry run" of the update process. It will show you what actions would be taken without actually making any changes to your files. 45 | - `-Help`: Displays the help message for the script. 46 | 47 | ### Example 48 | 49 | To run the update without any interactive prompts: 50 | 51 | ```powershell 52 | .\scripts\update-osdfir-lab.ps1 -Force 53 | ``` 54 | 55 | ## Update Process 56 | 57 | The script performs the following steps: 58 | 59 | 1. **Backup**: Creates a `.zip` backup of the entire project directory (except for the `backups` folder itself) and stores it in the `backups/` directory. This can be skipped with the `-NoBackup` flag. 60 | 2. **Fetch Latest Release**: Connects to the GitHub API to find the latest release of the `google/osdfir-infrastructure` repository. 61 | 3. **Download & Extract**: Downloads the latest release package (`.tgz`), clears the contents of the local `helm/` directory, and extracts the new charts into it. 62 | 4. **helm-addons**: Leave templates in `helm-addons/` untouched; use values in `configs/osdfir-lab-values.yaml` to customize behavior. 63 | 5. **Apply Custom Patches**: Copies any custom configuration files from `configs/update/` into the project, overwriting the newly updated files. This ensures your local modifications are preserved. 64 | -------------------------------------------------------------------------------- /terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/helm" { 5 | version = "3.0.2" 6 | constraints = ">= 2.0.0" 7 | hashes = [ 8 | "h1:KO2WWUKRSnAXKM8b1lxZlAuzXpnOkIgptuD/Shdz1Oc=", 9 | "zh:2778de76c7dfb2e85c75fe6de3c11172a25551ed499bfb9e9f940a5be81167b0", 10 | "zh:3b4c436a41e4fbae5f152852a9bd5c97db4460af384e26977477a40adf036690", 11 | "zh:617a372f5bb2288f3faf5fd4c878a68bf08541cf418a3dbb8a19bc41ad4a0bf2", 12 | "zh:84de431479548c96cb61c495278e320f361e80ab4f8835a5425ece24a9b6d310", 13 | "zh:8b4cf5f81d10214e5e1857d96cff60a382a22b9caded7f5d7a92e5537fc166c1", 14 | "zh:baeb26a00ffbcf3d507cdd940b2a2887eee723af5d3319a53eec69048d5e341e", 15 | "zh:ca05a8814e9bf5fbffcd642df3a8d9fae9549776c7057ceae6d6f56471bae80f", 16 | "zh:ca4bf3f94dedb5c5b1a73568f2dad7daf0ef3f85e688bc8bc2d0e915ec148366", 17 | "zh:d331f2129fd3165c4bda875c84a65555b22eb007801522b9e017d065ac69b67e", 18 | "zh:e583b2b478dde67da28e605ab4ef6521c2e390299b471d7d8ef05a0b608dcdad", 19 | "zh:f238b86611647c108c073d265f8891a2738d3158c247468ae0ff5b1a3ac4122a", 20 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 21 | ] 22 | } 23 | 24 | provider "registry.terraform.io/hashicorp/kubernetes" { 25 | version = "2.37.1" 26 | constraints = ">= 2.0.0" 27 | hashes = [ 28 | "h1:okZwE5tiivdUNTXdJyN01Mmzt13WU+kV5/qjyj7RhKw=", 29 | "zh:0ed097413c7fc804479e325966886b405dc0b75ad2b4f54ce4df1d8e4802b397", 30 | "zh:17dcf4a685a00d2d048671124e8a1a8e836b58ecd2ef628a1c666fe0ced2e598", 31 | "zh:36891284e5bced57c438f12d0b27856b0d4b70b562bd200b01919a6a89545be9", 32 | "zh:3e49d86b508e641ba122d1b0af24cdc4d8ffa2ec1b30022436fb1d7c6ba696ea", 33 | "zh:40be623e116708bdcb0fac32989db43720f031c5fe9a4dc63395078185d24403", 34 | "zh:44fc0ac3bc39e289b67f9dde7ee9fef29eb8192197e5e68fee69098573021722", 35 | "zh:957aa451573bcde5d57f6f8338ea3139010c7f61fefe8f6a140a8c267f056511", 36 | "zh:c55fd85b7e8acaac17e30670ac3574b88b3530820dd004bcd2a5daa8624a46e9", 37 | "zh:c743f06843a1f5ecde2b8ef639f4d3db654a334ef248dee57261c719ea843f3a", 38 | "zh:c93cc71c64b838d89522ac5fb60f68e0e1e7f2fc39db6b0ead7afd78795e79ed", 39 | "zh:eda1163c2266905adc54bc78cc3e7b606a164fbc6b59be607db933b302015ccd", 40 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 41 | ] 42 | } 43 | 44 | provider "registry.terraform.io/hashicorp/local" { 45 | version = "2.5.3" 46 | constraints = ">= 2.0.0" 47 | hashes = [ 48 | "h1:xb77x0HwwHCexdX4nLf5SrknvPskapmi4i1Vk5Tni1M=", 49 | "zh:284d4b5b572eacd456e605e94372f740f6de27b71b4e1fd49b63745d8ecd4927", 50 | "zh:40d9dfc9c549e406b5aab73c023aa485633c1b6b730c933d7bcc2fa67fd1ae6e", 51 | "zh:6243509bb208656eb9dc17d3c525c89acdd27f08def427a0dce22d5db90a4c8b", 52 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 53 | "zh:885d85869f927853b6fe330e235cd03c337ac3b933b0d9ae827ec32fa1fdcdbf", 54 | "zh:bab66af51039bdfcccf85b25fe562cbba2f54f6b3812202f4873ade834ec201d", 55 | "zh:c505ff1bf9442a889ac7dca3ac05a8ee6f852e0118dd9a61796a2f6ff4837f09", 56 | "zh:d36c0b5770841ddb6eaf0499ba3de48e5d4fc99f4829b6ab66b0fab59b1aaf4f", 57 | "zh:ddb6a407c7f3ec63efb4dad5f948b54f7f4434ee1a2607a49680d494b1776fe1", 58 | "zh:e0dafdd4500bec23d3ff221e3a9b60621c5273e5df867bc59ef6b7e41f5c91f6", 59 | "zh:ece8742fd2882a8fc9d6efd20e2590010d43db386b920b2a9c220cfecc18de47", 60 | "zh:f4c6b3eb8f39105004cf720e202f04f57e3578441cfb76ca27611139bc116a82", 61 | ] 62 | } 63 | 64 | provider "registry.terraform.io/hashicorp/null" { 65 | version = "3.2.4" 66 | constraints = ">= 3.0.0" 67 | hashes = [ 68 | "h1:+Ag4hSb4qQjNtAS6gj2+gsGl7v0iB/Bif6zZZU8lXsw=", 69 | "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", 70 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 71 | "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", 72 | "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", 73 | "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", 74 | "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", 75 | "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", 76 | "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", 77 | "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", 78 | "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", 79 | "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", 80 | "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", 81 | ] 82 | } 83 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Abstra 171 | # Abstra is an AI-powered process automation framework. 172 | # Ignore directories containing user credentials, local state, and settings. 173 | # Learn more at https://abstra.io/docs 174 | .abstra/ 175 | 176 | # Visual Studio Code 177 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 178 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 179 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 180 | # you could uncomment the following to ignore the enitre vscode folder 181 | # .vscode/ 182 | 183 | # Ruff stuff: 184 | .ruff_cache/ 185 | 186 | # PyPI configuration file 187 | .pypirc 188 | 189 | # Cursor 190 | # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to 191 | # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data 192 | # refer to https://docs.cursor.com/context/ignore-files 193 | .cursorignore 194 | .cursorindexingignore -------------------------------------------------------------------------------- /helm-addons/templates/ollama-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $releaseName := .Release.Name -}} 2 | {{- $releaseNamespace := .Release.Namespace -}} 3 | 4 | apiVersion: v1 5 | kind: PersistentVolumeClaim 6 | metadata: 7 | name: ollama-cache 8 | namespace: {{ $releaseNamespace }} 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 15Gi 15 | storageClassName: standard 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: ollama 21 | namespace: {{ $releaseNamespace }} 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | app: ollama 27 | template: 28 | metadata: 29 | labels: 30 | app: ollama 31 | spec: 32 | initContainers: 33 | - name: model-puller 34 | image: ollama/ollama:latest 35 | imagePullPolicy: IfNotPresent 36 | command: ["/bin/sh", "-c"] 37 | args: 38 | - | 39 | set -e 40 | cat <<'SCRIPT' >/tmp/model-puller.sh 41 | set -e 42 | MODEL="{{ .Values.ai.model.name }}" 43 | MODEL_DIR=$(printf '%s' "$MODEL" | tr ':' '/') 44 | MODEL_PATH="/root/.ollama/models/manifests/registry.ollama.ai/library/$MODEL_DIR" 45 | 46 | echo "Checking if $MODEL model already exists..." 47 | if [ -f "$MODEL_PATH" ]; then 48 | echo "Model $MODEL already exists, skipping download" 49 | exit 0 50 | fi 51 | 52 | echo "Starting Ollama service..." 53 | ollama serve & 54 | OLLAMA_PID=$! 55 | 56 | echo "Waiting for Ollama to be ready..." 57 | for i in $(seq 1 30); do 58 | if ollama list >/dev/null 2>&1; then 59 | echo "Ollama service is ready!" 60 | break 61 | fi 62 | echo "Waiting for Ollama... (attempt $i/30)" 63 | sleep 5 64 | if [ $i -eq 30 ]; then 65 | echo "ERROR: Ollama service did not become ready" 66 | kill $OLLAMA_PID 2>/dev/null || true 67 | exit 1 68 | fi 69 | done 70 | 71 | echo "Pulling model $MODEL..." 72 | ollama pull "$MODEL" 73 | 74 | echo "Model pull completed, stopping init service..." 75 | kill $OLLAMA_PID 76 | wait $OLLAMA_PID 77 | SCRIPT 78 | 79 | tr -d '\r' /tmp/model-puller-unix.sh 80 | chmod +x /tmp/model-puller-unix.sh 81 | exec /bin/bash /tmp/model-puller-unix.sh 82 | volumeMounts: 83 | - name: ollama-cache 84 | mountPath: /root/.ollama 85 | resources: 86 | requests: 87 | memory: "2Gi" 88 | cpu: "1" 89 | limits: 90 | memory: "4Gi" 91 | cpu: "2" 92 | containers: 93 | - name: ollama 94 | image: ollama/ollama:latest 95 | imagePullPolicy: IfNotPresent 96 | ports: 97 | - containerPort: 11434 98 | env: 99 | - name: OLLAMA_HOST 100 | value: "0.0.0.0:11434" 101 | - name: OLLAMA_NUM_PARALLEL 102 | value: "1" 103 | - name: OLLAMA_KEEP_ALIVE 104 | value: "5m" 105 | volumeMounts: 106 | - name: ollama-cache 107 | mountPath: /root/.ollama 108 | readinessProbe: 109 | httpGet: 110 | path: /api/tags 111 | port: 11434 112 | initialDelaySeconds: 30 113 | periodSeconds: 10 114 | livenessProbe: 115 | httpGet: 116 | path: /api/tags 117 | port: 11434 118 | initialDelaySeconds: 60 119 | periodSeconds: 30 120 | resources: 121 | requests: 122 | memory: "4Gi" 123 | cpu: "2" 124 | ephemeral-storage: "2Gi" 125 | limits: 126 | memory: "8Gi" 127 | cpu: "3" 128 | ephemeral-storage: "3Gi" 129 | volumes: 130 | - name: ollama-cache 131 | persistentVolumeClaim: 132 | claimName: ollama-cache 133 | --- 134 | apiVersion: v1 135 | kind: Service 136 | metadata: 137 | name: ollama 138 | namespace: {{ $releaseNamespace }} 139 | spec: 140 | selector: 141 | app: ollama 142 | ports: 143 | - protocol: TCP 144 | port: 11434 145 | targetPort: 11434 146 | --- 147 | apiVersion: v1 148 | kind: ConfigMap 149 | metadata: 150 | name: ollama-models-config 151 | namespace: {{ $releaseNamespace }} 152 | data: 153 | available-models.json: | 154 | { 155 | "models": [ 156 | { 157 | "name": "{{ .Values.ai.model.name }}", 158 | "display_name": "{{ .Values.ai.model.name | title }}", 159 | "description": "{{ .Values.ai.model.name }} model for text generation and analysis", 160 | "context_length": {{ .Values.ai.model.max_input_tokens }}, 161 | "capabilities": ["text-generation", "analysis", "forensics"] 162 | } 163 | ] 164 | } -------------------------------------------------------------------------------- /terraform/ollama.tf: -------------------------------------------------------------------------------- 1 | # Read AI configuration from values file 2 | locals { 3 | values_yaml = yamldecode(file("${path.module}/../configs/osdfir-lab-values.yaml")) 4 | ai_config = local.values_yaml.ai 5 | } 6 | 7 | # Ollama deployment 8 | resource "kubernetes_persistent_volume_claim" "ollama_cache" { 9 | count = var.deploy_ollama ? 1 : 0 10 | 11 | metadata { 12 | name = "ollama-cache" 13 | namespace = kubernetes_namespace.osdfir.metadata[0].name 14 | } 15 | 16 | spec { 17 | access_modes = ["ReadWriteOnce"] 18 | storage_class_name = var.storage_class_name 19 | resources { 20 | requests = { 21 | storage = "15Gi" 22 | } 23 | } 24 | } 25 | } 26 | 27 | resource "kubernetes_deployment" "ollama" { 28 | count = var.deploy_ollama ? 1 : 0 29 | 30 | metadata { 31 | name = "ollama" 32 | namespace = kubernetes_namespace.osdfir.metadata[0].name 33 | } 34 | 35 | spec { 36 | replicas = 1 37 | 38 | selector { 39 | match_labels = { 40 | app = "ollama" 41 | } 42 | } 43 | 44 | template { 45 | metadata { 46 | labels = { 47 | app = "ollama" 48 | } 49 | } 50 | 51 | spec { 52 | init_container { 53 | name = "model-puller" 54 | image = "ollama/ollama:latest" 55 | image_pull_policy = "IfNotPresent" 56 | 57 | command = ["/bin/bash", "-c"] 58 | args = [<<-EOT 59 | set -e 60 | cat <<'SCRIPT' >/tmp/model-puller.sh 61 | set -e 62 | MODEL="${var.ai_model_name}" 63 | MODEL_DIR=$(printf '%s' "$MODEL" | tr ':' '/') 64 | MODEL_PATH="/root/.ollama/models/manifests/registry.ollama.ai/library/$MODEL_DIR" 65 | 66 | echo "Checking if $MODEL model already exists..." 67 | if [ -f "$MODEL_PATH" ]; then 68 | echo "Model $MODEL already exists, skipping download" 69 | exit 0 70 | fi 71 | 72 | echo "Starting Ollama service..." 73 | ollama serve & 74 | OLLAMA_PID=$! 75 | 76 | echo "Waiting for Ollama to be ready..." 77 | for i in $(seq 1 30); do 78 | if ollama list >/dev/null 2>&1; then 79 | echo "Ollama service is ready!" 80 | break 81 | fi 82 | echo "Waiting for Ollama... (attempt $i/30)" 83 | sleep 5 84 | if [ $i -eq 30 ]; then 85 | echo "ERROR: Ollama service did not become ready" 86 | kill $OLLAMA_PID 2>/dev/null || true 87 | exit 1 88 | fi 89 | done 90 | 91 | echo "Pulling model $MODEL..." 92 | ollama pull "$MODEL" 93 | 94 | echo "Model pull completed, stopping init service..." 95 | kill $OLLAMA_PID 96 | wait $OLLAMA_PID 97 | SCRIPT 98 | 99 | tr -d '\r' /tmp/model-puller-unix.sh 100 | chmod +x /tmp/model-puller-unix.sh 101 | exec /bin/bash /tmp/model-puller-unix.sh 102 | EOT 103 | ] 104 | 105 | volume_mount { 106 | name = "ollama-cache" 107 | mount_path = "/root/.ollama" 108 | } 109 | 110 | resources { 111 | requests = { 112 | memory = "2Gi" 113 | cpu = "1" 114 | } 115 | limits = { 116 | memory = "4Gi" 117 | cpu = "2" 118 | } 119 | } 120 | } 121 | 122 | container { 123 | name = "ollama" 124 | image = "ollama/ollama:latest" 125 | image_pull_policy = "IfNotPresent" 126 | 127 | port { 128 | container_port = 11434 129 | } 130 | 131 | env { 132 | name = "OLLAMA_HOST" 133 | value = "0.0.0.0:11434" 134 | } 135 | 136 | env { 137 | name = "OLLAMA_NUM_PARALLEL" 138 | value = "1" 139 | } 140 | 141 | env { 142 | name = "OLLAMA_KEEP_ALIVE" 143 | value = "5m" 144 | } 145 | 146 | volume_mount { 147 | name = "ollama-cache" 148 | mount_path = "/root/.ollama" 149 | } 150 | 151 | readiness_probe { 152 | http_get { 153 | path = "/api/tags" 154 | port = 11434 155 | } 156 | initial_delay_seconds = 30 157 | period_seconds = 10 158 | } 159 | 160 | liveness_probe { 161 | http_get { 162 | path = "/api/tags" 163 | port = 11434 164 | } 165 | initial_delay_seconds = 60 166 | period_seconds = 30 167 | } 168 | 169 | resources { 170 | requests = { 171 | memory = "4Gi" 172 | cpu = "2" 173 | } 174 | limits = { 175 | memory = "8Gi" 176 | cpu = "3" 177 | } 178 | } 179 | } 180 | 181 | volume { 182 | name = "ollama-cache" 183 | persistent_volume_claim { 184 | claim_name = "ollama-cache" 185 | } 186 | } 187 | } 188 | } 189 | } 190 | 191 | depends_on = [ 192 | kubernetes_persistent_volume_claim.ollama_cache 193 | ] 194 | } 195 | 196 | resource "kubernetes_service" "ollama" { 197 | count = var.deploy_ollama ? 1 : 0 198 | 199 | metadata { 200 | name = "ollama" 201 | namespace = kubernetes_namespace.osdfir.metadata[0].name 202 | } 203 | 204 | spec { 205 | selector = { 206 | app = "ollama" 207 | } 208 | 209 | port { 210 | port = 11434 211 | target_port = 11434 212 | } 213 | 214 | type = "ClusterIP" 215 | } 216 | } 217 | 218 | resource "kubernetes_config_map" "ollama_models_config" { 219 | count = var.deploy_ollama ? 1 : 0 220 | 221 | metadata { 222 | name = "ollama-models-config" 223 | namespace = kubernetes_namespace.osdfir.metadata[0].name 224 | } 225 | 226 | data = { 227 | "available-models.json" = jsonencode({ 228 | models = [ 229 | { 230 | name = var.ai_model_name 231 | display_name = title(var.ai_model_name) 232 | description = "${var.ai_model_name} model for text generation and analysis" 233 | context_length = var.ai_model_max_input_tokens 234 | capabilities = ["text-generation", "analysis", "forensics"] 235 | } 236 | ] 237 | }) 238 | } 239 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ![Version](https://img.shields.io/badge/version-20251120-orange) 3 | ![GitHub forks](https://img.shields.io/github/forks/kev365/OSDFIR-Lab?style=social) 4 | ![GitHub stars](https://img.shields.io/github/stars/kev365/OSDFIR-Lab?style=social) 5 | [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](LICENSE) 6 | 7 | # OSDFIR Lab 8 | 9 | **Version:** 20251120 10 | 11 | A test lab environment for deploying Open Source Digital Forensics and Incident Response (OSDFIR) tools in a Minikube environment with integrated AI capabilities using Docker Desktop. 12 | 13 | - Source project: https://github.com/google/osdfir-infrastructure 14 | 15 | ## Overview 16 | 17 | This repository provides a complete lab setup for OSDFIR tools running on Kubernetes via Minikube. It includes automated deployment scripts, AI integration experiments, and a unified management interface for easy testing and development. 18 | 19 | ## Project Structure 20 | 21 | ``` 22 | osdfir-lab/ 23 | ├── backups/ # Project backups created by the update script 24 | ├── configs/ # Custom configuration files (Timesketch, values, etc.) 25 | ├── helm-addons/ # Add-on Helm templates (Ollama, Timesketch LLM config) 26 | ├── scripts/ # Management and utility scripts 27 | └── terraform/ # IaC: namespace, PVCs, Helm release, toggles 28 | ``` 29 | 30 | ## Prerequisites 31 | 32 | - [Docker Desktop](https://www.docker.com/products/docker-desktop/) with Kubernetes & WSL2 backend enabled 33 | - [Minikube](https://minikube.sigs.k8s.io/docs/start/) 34 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 35 | - [Helm](https://helm.sh/docs/intro/install/) 36 | - [Terraform](https://www.terraform.io/downloads) 37 | - [Windows PowerShell](https://docs.microsoft.com/en-us/powershell/) with execution policy set: 38 | ```powershell 39 | Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope Process 40 | ``` 41 | 42 | ### Example Development Environment 43 | 44 | *This lab has been developed and tested on the following setup (your mileage may vary):* 45 | 46 | **Hardware:** 47 | - CPU: Modern multi-core processor (8+ logical cores recommended) 48 | - RAM: 16GB+ system memory 49 | - Storage: 100GB+ available SSD disk space 50 | 51 | **Software:** 52 | - Windows 11 Pro with WSL2 enabled with Ubuntu 53 | - Docker Desktop for Windows 54 | - Memory allocation: 8GB+ 55 | - WSL2 integration enabled 56 | - PowerShell 5.1+ 57 | 58 | **Minikube Configuration (auto-detected by script):** 59 | - Driver: `docker` 60 | - Memory: 75% of Docker Desktop's available memory (minimum 8GB) 61 | - CPUs: 50% of system logical processors (minimum 8, maximum 12) 62 | - Disk: 40GB 63 | - Kubernetes version: stable 64 | 65 | ## Quick Start 66 | 67 | ### One-Command Deployment 68 | 69 | Open PowerShell as **Administrator** and run: 70 | 71 | ```powershell 72 | ./scripts/manage-osdfir-lab.ps1 deploy 73 | ``` 74 | 75 | This automatically handles: 76 | - Docker Desktop startup 77 | - Minikube cluster creation with optimal resource allocation 78 | - Terraform infrastructure deployment 79 | - Service port forwarding 80 | 81 | ### Access Your Lab 82 | 83 | ```powershell 84 | # Check status 85 | ./scripts/manage-osdfir-lab.ps1 status 86 | 87 | # Get login credentials 88 | ./scripts/manage-osdfir-lab.ps1 creds 89 | 90 | # Access services at: 91 | # - Timesketch: http://localhost:5000 92 | # - OpenRelik: http://localhost:8711 93 | # - OpenRelik API: http://localhost:8710 94 | ``` 95 | 96 | ### Cleanup 97 | 98 | ```powershell 99 | ./scripts/manage-osdfir-lab.ps1 teardown-lab 100 | ``` 101 | 102 | ## Components 103 | 104 | ### Core OSDFIR Tools 105 | 106 | - **[Timesketch](https://timesketch.org/)** - Timeline analysis and collaborative investigation 107 | - **[OpenRelik](https://openrelik.org/)** - Evidence processing and workflow automation 108 | - **[HashR](https://osdfir.blogspot.com/2024/03/introducing-hashr.html)** - Hash verification and analysis 109 | - **[Yeti](https://yeti-platform.io/)** - Threat intelligence platform 110 | 111 | ### Infrastructure 112 | 113 | - **Minikube** - Local Kubernetes cluster 114 | - **Terraform** - Infrastructure as Code 115 | - **Helm** - Package management (pulls upstream `osdfir-infrastructure` chart) 116 | - **Docker Desktop** - Container runtime 117 | 118 | ### Component Versions (20251120 baseline) 119 | 120 | - `osdfir-infrastructure` Helm chart: **2.5.6** 121 | - Timesketch image: **20251114** (nginx `1.25.5-alpine-slim`, OpenSearch `3.1.0`, Redis `7.4.2-alpine`, Postgres `17.5-alpine`) 122 | - OpenRelik core services: **0.6.0** (workers pinned to analyzer-config `0.2.0`, plaso `0.4.0`, timesketch `0.3.0`, hayabusa `0.3.0`, extraction `0.5.0`) 123 | - Ollama model: **smollm:latest** 124 | 125 | ## 🚧 Work in Progress 126 | 127 | ### AI Integration (Experimental) 128 | 129 | - **Ollama Server** - Local AI model hosting (`smollm:latest`). **NOTE: This is intentionally small for this project, feel free to adjust.** 130 | - **Timesketch LLM Features** - Natural Language to Query (NL2Q) + Event Summarization (Working!) 131 | - **OpenRelik AI Workers** - AI-powered evidence analysis (In Progress) 132 | - **Timesketch MCP Server** - Prebuilt via GitHub Actions, deployable via Terraform toggle. 133 | - **Yeti MCP Server** - in consideration to add 134 | 135 | **Current Status:** 136 | - Basic integration working, expanding AI capabilities across tools. 137 | - The model will be slow and may time out. However the purpose was deploy with something of reasonable size that is functional. 138 | - A larger model will be needed for better results and performance. 139 | 140 | ## Management 141 | 142 | The unified management script handles all operations: 143 | 144 | ```powershell 145 | ./scripts/manage-osdfir-lab.ps1 [action] 146 | 147 | # Key actions: 148 | deploy # Full deployment 149 | status # Check everything 150 | start/stop # Service access 151 | creds # Login credentials 152 | ollama # AI model status 153 | teardown-lab-all # Complete cleanup 154 | ``` 155 | 156 | For manual control or troubleshooting, see [commands.md](commands.md). 157 | 158 | ## Useful Resources 159 | 160 | - **[Updating the Lab](updating_osdfir_lab.md)** - Instructions for updating the lab components. 161 | - **[Official OSDFIR Documentation](https://osdfir.org/)** 162 | 163 | ## Troubleshooting Tips 164 | - When re-deploying, with the DFIQ previously enabled, if you get this message "No question found with this ID", try closing and re-opening the browser. 165 | - Eventually, Terraform my timeout waiting on the pods to all start up, use command `kubectl get pods -n osdfir` to check status. Terraform timing out does not mean the deployment failed, simply that Terraform stopped waiting. 166 | - After initial deployment, if the Timesketch AI features warn that a provider is needed, you may need to wait and reload the browser to see if the settings will work. 167 | - On a first deployment the management script automatically extends Helm’s timeout and will periodically remind you that you can run `kubectl get deploy -n osdfir` in another terminal—expect a longer wait while images download and the Ollama model is pulled. 168 | - For more serious testing, connect to a stronger LLM 169 | 170 | ## Known Issues / Troubleshooting Tips 171 | - Still some issues coming up with partial re-deployments/installs, mostly with secrets. 172 | - LLM features not fully functional in this lab, with the default deployment several features work, but may timeout. 173 | 174 | ## To-Do List 175 | 176 | ### Project Improvements 177 | - **Organization**: Refine project structure and code organization 178 | - **Standardization**: Create consistent patterns across configuration files 179 | - **Documentation**: Update docs and create comprehensive how-to guides 180 | - **Deployment**: Improve deployment process and error handling 181 | - **Pod Management**: Enhance methods to add/remove/modify pods 182 | - **Integration**: Complete Yeti and HashR integration setup 183 | - **External LLMs**: Determine settings for using LLMs outside of the pods 184 | - **OpenSearch Management**: Establish process for backing up/upgrading/scaling OpenSearch 185 | 186 | ## Contributing 187 | 188 | This is a personal lab project, though suggestions and improvements are welcome! 189 | 190 | Otherwise, contribute to source projects! 191 | - https://github.com/google/osdfir-infrastructure 192 | - https://github.com/google/timesketch 193 | - https://github.com/openrelik 194 | - https://github.com/timesketch/timesketch-mcp-server 195 | 196 | ## Disclaimer 197 | 198 | > **⚠️ Personal Test Lab Environment** 199 | > This is a personal development and testing lab for experimenting with OSDFIR tools and AI integration features. It's designed for learning, development, and fun - not for production use. 200 | 201 | ## Author 202 | 203 | Kevin Stokes 204 | 205 | [Blog](https://dfir-kev.medium.com/) · [LinkedIn Profile](https://www.linkedin.com/in/dfir-kev/) 206 | 207 | [Mmm Coffee..](https://www.buymeacoffee.com/dfirkev) · [When Bored](https://www.teepublic.com/user/kstrike) 208 | 209 | -------------------------------------------------------------------------------- /configs/osdfir-lab-values.yaml: -------------------------------------------------------------------------------- 1 | # OSDFIR Infrastructure Configuration 2 | 3 | # Centralized AI Model Configuration - Configure models here only 4 | # The ollama deployment is managed via helm/templates/ollama-deployment.yaml 5 | # and uses these values for model configuration 6 | ai: 7 | model: 8 | name: "smollm:latest" 9 | provider: "ollama" 10 | server_url: "http://ollama.osdfir.svc.cluster.local:11434" 11 | timeouts: 12 | llm_request: 120 # Main LLM request timeout (seconds) 13 | api_timeout: 120 # Timesketch API timeout (seconds) 14 | http_connect: 20 # HTTP connection timeout 15 | http_read: 120 # HTTP read timeout (should match llm_request) 16 | warm_up: 120 # Model warm-up timeout 17 | max_input_tokens: 8192 18 | # max_output_tokens: 1024 19 | # temperature: 0.1 20 | # Forensic-specific system prompt 21 | system_prompt: | 22 | You are a digital forensics AI assistant. Analyze the provided data with attention to: 23 | - Suspicious patterns or anomalies 24 | - Potential indicators of compromise 25 | - Timeline relationships 26 | - Data correlation opportunities 27 | Provide clear, actionable insights for investigators. 28 | 29 | # Global deployment configuration 30 | global: 31 | namespace: osdfir # Kubernetes Namespace 32 | timesketch: 33 | enabled: true 34 | openrelik: 35 | enabled: true 36 | yeti: 37 | enabled: false # Not currently set up in the lab 38 | hashr: 39 | enabled: false # Not currently set up in the lab 40 | grr: 41 | enabled: false # Not currently used in the lab 42 | 43 | # Timesketch MCP Server deployment, enabled/disable here: terraform\variables.tf 44 | # Ollama deployment, enabled/disable here: terraform\variables.tf 45 | 46 | # Global Security Settings 47 | securityContext: 48 | runAsNonRoot: true 49 | runAsUser: 1000 50 | fsGroup: 1000 51 | 52 | # Global Pod security standards 53 | podSecurityContext: 54 | seccompProfile: 55 | type: RuntimeDefault 56 | 57 | # GlobalNetwork Policies 58 | networkPolicy: 59 | enabled: true 60 | 61 | # Timesketch Configuration 62 | timesketch: 63 | image: 64 | repository: us-docker.pkg.dev/osdfir-registry/timesketch/timesketch 65 | pullPolicy: IfNotPresent 66 | tag: "20251114" 67 | config: 68 | override: true 69 | existingConfigMap: osdfir-lab-ts-configs 70 | createUser: true 71 | oidc: 72 | enabled: false 73 | frontend: 74 | resources: 75 | limits: {} 76 | requests: {} 77 | worker: 78 | resources: 79 | limits: {} 80 | requests: {} 81 | nginx: 82 | image: 83 | repository: nginx 84 | tag: "1.25.5-alpine-slim" 85 | securityContext: 86 | enabled: true 87 | opensearch: 88 | image: 89 | repository: opensearchproject/opensearch 90 | tag: "3.1.0" 91 | replicas: 1 92 | sysctlInit: 93 | enabled: true 94 | opensearchJavaOpts: "-Xmx512M -Xms512M" 95 | redis: 96 | image: 97 | repository: redis 98 | tag: "7.4.2-alpine" 99 | postgresql: 100 | image: 101 | repository: postgres 102 | tag: "17.5-alpine" 103 | 104 | # OpenRelik Configuration 105 | openrelik: 106 | frontend: 107 | image: 108 | repository: ghcr.io/openrelik/openrelik-ui 109 | pullPolicy: IfNotPresent 110 | tag: "0.6.0" 111 | api: 112 | image: 113 | repository: ghcr.io/openrelik/openrelik-server 114 | pullPolicy: IfNotPresent 115 | tag: "0.6.0" 116 | resources: 117 | limits: {} 118 | requests: {} 119 | command: ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8710"] 120 | env: 121 | REDIS_URL: "redis://openrelik-redis:6379" 122 | mediator: 123 | image: 124 | repository: ghcr.io/openrelik/openrelik-mediator 125 | pullPolicy: IfNotPresent 126 | tag: "0.6.0" 127 | metrics: 128 | image: 129 | repository: ghcr.io/openrelik/openrelik-metrics 130 | pullPolicy: IfNotPresent 131 | tag: "0.6.0" 132 | redis: 133 | image: 134 | repository: redis 135 | tag: "7.4.2-alpine" 136 | postgresql: 137 | image: 138 | repository: postgres 139 | tag: "17.5-alpine" 140 | prometheus: 141 | image: 142 | repository: prom/prometheus 143 | tag: "v3.0.1" 144 | 145 | # LLM analyzer configuration (using OpenRelik chart's built-in configmap) 146 | config: 147 | analyzers: 148 | llm: 149 | provider: "ollama" 150 | model: "smollm:latest" 151 | #max_input_tokens: !!int 4096 152 | # max_output_tokens: !!int 2048 153 | # temperature: !!float 0.1 154 | ollama_server_url: "http://ollama.osdfir.svc.cluster.local:11434" 155 | system_prompt: "You are a digital forensics expert assistant." 156 | 157 | # OpenRelik Workers 158 | workers: 159 | - name: openrelik-worker-analyzer-config 160 | image: ghcr.io/openrelik/openrelik-worker-analyzer-config:0.2.0 161 | command: "celery --app=src.app worker --task-events --concurrency=4 --loglevel=INFO -Q openrelik-worker-analyzer-config" 162 | env: {} 163 | resources: {} 164 | 165 | - name: openrelik-worker-plaso 166 | image: ghcr.io/openrelik/openrelik-worker-plaso:0.4.0 167 | command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-plaso" 168 | env: {} 169 | resources: {} 170 | 171 | ## When deployed via the OSDFIR Infrastructure chart, the Timesketch Worker 172 | ## automatically receives its required environment variables through Helm. 173 | - name: openrelik-worker-timesketch 174 | image: ghcr.io/openrelik/openrelik-worker-timesketch:0.3.0 175 | command: "celery --app=src.app worker --task-events --concurrency=1 --loglevel=INFO -Q openrelik-worker-timesketch" 176 | env: {} 177 | resources: {} 178 | 179 | - name: openrelik-worker-hayabusa 180 | image: ghcr.io/openrelik/openrelik-worker-hayabusa:0.3.0 181 | command: "celery --app=src.app worker --task-events --concurrency=4 --loglevel=INFO -Q openrelik-worker-hayabusa" 182 | env: {} 183 | resources: {} 184 | 185 | # LLM worker - environment variables automatically set by OpenRelik chart from config.analyzers.llm 186 | # Additional env vars added to ensure chunker gets proper integer values 187 | - name: openrelik-worker-llm 188 | image: ghcr.io/openrelik/openrelik-worker-llm:latest 189 | command: "celery --app=src.app worker --task-events --concurrency=1 --loglevel=DEBUG -Q openrelik-worker-llm" 190 | env: 191 | #- name: LLM_MAX_OUTPUT_TOKENS 192 | # value: "2048" 193 | #- name: LLM_TEMPERATURE 194 | # value: "0.1" 195 | - name: LLM_PROVIDER 196 | value: "ollama" 197 | - name: LLM_MODEL_NAME 198 | value: "smollm:latest" 199 | resources: 200 | requests: 201 | memory: 2Gi 202 | cpu: 1 203 | limits: 204 | memory: 4Gi 205 | cpu: 2 206 | 207 | - name: openrelik-worker-extraction 208 | image: ghcr.io/openrelik/openrelik-worker-extraction:0.5.0 209 | command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-extraction" 210 | env: {} 211 | resources: {} 212 | 213 | # - name: openrelik-worker-strings 214 | # image: ghcr.io/openrelik/openrelik-worker-strings:latest 215 | # command: "celery --app=src.app worker --task-events --concurrency=4 --loglevel=INFO -Q openrelik-worker-strings" 216 | # env: {} 217 | # resources: {} 218 | 219 | # - name: openrelik-worker-analyzer-logs 220 | # image: ghcr.io/openrelik/openrelik-worker-analyzer-logs:latest 221 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-analyzer-logs" 222 | # env: {} 223 | # resources: {} 224 | 225 | # - name: openrelik-worker-bulkextractor 226 | # image: ghcr.io/openrelik/openrelik-worker-bulkextractor:latest 227 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-bulkextractor" 228 | # env: {} 229 | # resources: {} 230 | 231 | # - name: openrelik-worker-capa 232 | # image: ghcr.io/openrelik/openrelik-worker-capa:latest 233 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-capa" 234 | # env: {} 235 | # resources: {} 236 | 237 | # - name: openrelik-worker-chromecreds 238 | # image: ghcr.io/openrelik/openrelik-worker-chromecreds:latest 239 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-chromecreds" 240 | # env: {} 241 | # resources: {} 242 | 243 | # - name: openrelik-worker-cloud-logs 244 | # image: ghcr.io/openrelik/openrelik-worker-cloud-logs:latest 245 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-cloud-logs" 246 | # env: {} 247 | # resources: {} 248 | 249 | # - name: openrelik-worker-dfindexeddb 250 | # image: ghcr.io/openrelik/openrelik-worker-dfindexeddb:latest 251 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-dfindexeddb" 252 | # env: {} 253 | # resources: {} 254 | 255 | # - name: openrelik-worker-entropy 256 | # image: ghcr.io/openrelik/openrelik-worker-entropy:latest 257 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-entropy" 258 | # env: {} 259 | # resources: {} 260 | 261 | # - name: openrelik-worker-exif 262 | # image: ghcr.io/openrelik/openrelik-worker-exif:latest 263 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-exif" 264 | # env: {} 265 | # resources: {} 266 | 267 | # - name: openrelik-worker-grep 268 | # image: ghcr.io/openrelik/openrelik-worker-grep:latest 269 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-grep" 270 | # env: {} 271 | # resources: {} 272 | 273 | 274 | # - name: openrelik-worker-os-creds 275 | # image: ghcr.io/openrelik/openrelik-worker-os-creds:latest 276 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-os-creds" 277 | # env: {} 278 | # resources: {} 279 | 280 | # - name: openrelik-worker-yara 281 | # image: ghcr.io/openrelik/openrelik-worker-yara:latest 282 | # command: "celery --app=src.app worker --task-events --concurrency=2 --loglevel=INFO -Q openrelik-worker-yara" 283 | # env: {} 284 | # resources: {} 285 | 286 | # Hshr Configuration 287 | hashr: 288 | image: 289 | repository: us-docker.pkg.dev/osdfir-registry/hashr/release/hashr 290 | pullPolicy: IfNotPresent 291 | tag: v1.8.2 292 | postgresql: 293 | image: 294 | repository: postgres 295 | tag: "17.5" 296 | 297 | # Yeti Configuration 298 | yeti: 299 | persistence: 300 | enabled: true 301 | existingPVC: osdfirvolume 302 | frontend: 303 | image: 304 | repository: yetiplatform/yeti-frontend 305 | pullPolicy: IfNotPresent 306 | tag: 2.4.2 307 | api: 308 | image: 309 | repository: yetiplatform/yeti 310 | pullPolicy: IfNotPresent 311 | tag: 2.4.2 312 | resources: 313 | limits: {} 314 | requests: {} 315 | tasks: 316 | image: 317 | repository: yetiplatform/yeti 318 | pullPolicy: IfNotPresent 319 | tag: 2.4.2 320 | redis: 321 | image: 322 | repository: redis 323 | tag: "7.4.4-alpine" 324 | arangodb: 325 | image: 326 | repository: arangodb 327 | pullPolicy: IfNotPresent 328 | tag: "3.12.5" 329 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/src/timesketch_mcp_server/tools.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from .utils import get_timesketch_client 4 | from timesketch_api_client import search 5 | from collections import defaultdict 6 | import pandas as pd 7 | 8 | from fastmcp import FastMCP 9 | 10 | mcp = FastMCP(name="timesketch-tools") 11 | 12 | RESERVED_CHARS = [ 13 | "+", 14 | "-", 15 | "=", 16 | "&&", 17 | "||", 18 | ">", 19 | "<", 20 | "!", 21 | "(", 22 | ")", 23 | "{", 24 | "}", 25 | "[", 26 | "]", 27 | "^", 28 | '"', 29 | "~", 30 | "*", 31 | "?", 32 | ":", 33 | "\\", 34 | "/", 35 | ] 36 | 37 | 38 | def _run_field_bucket_aggregation( 39 | sketch: Any, field: str, limit: int = 10000 40 | ) -> list[dict[str, int]]: 41 | """ 42 | Helper function to run a field bucket aggregation on a Timesketch sketch. 43 | 44 | Args: 45 | sketch: The Timesketch sketch object. 46 | field: The field to aggregate on. 47 | limit: The maximum number of buckets to return. Defaults to 10000. 48 | 49 | Returns: 50 | A list of dictionaries containing the field bucket aggregation results. 51 | """ 52 | aggregation_result = sketch.run_aggregator( 53 | aggregator_name="field_bucket", 54 | aggregator_parameters={ 55 | "field": field, 56 | "limit": limit, 57 | }, 58 | ) 59 | return aggregation_result.data.get("objects")[0]["field_bucket"]["buckets"] 60 | 61 | 62 | @mcp.tool() 63 | def discover_data_types(sketch_id: int) -> list[dict[str, int]]: 64 | """Discover data types in a Timesketch sketch. 65 | 66 | Args: 67 | sketch_id: The ID of the Timesketch sketch to discover data types from. 68 | 69 | Returns: 70 | A list of dictionaries containing data type information, including: 71 | - data_type: The name of the data type. 72 | - count: The number of events for that data type. 73 | """ 74 | 75 | sketch = get_timesketch_client().get_sketch(sketch_id) 76 | return _run_field_bucket_aggregation(sketch, "data_type") 77 | 78 | 79 | @mcp.tool() 80 | def count_distinct_field_values(sketch_id: int, field: str) -> list[dict[str, int]]: 81 | """Runs an aggregation to count distinct values for the specified field. 82 | 83 | Args: 84 | sketch_id: The ID of the Timesketch sketch to run the aggregation on. 85 | field: The field to count distinct values for, eg. "data_type", 86 | "source_ip", "yara_match". 87 | 88 | Returns: 89 | A list of dictionaries containing the aggregation results. 90 | """ 91 | 92 | sketch = get_timesketch_client().get_sketch(sketch_id) 93 | return _run_field_bucket_aggregation(sketch, field) 94 | 95 | 96 | @mcp.tool() 97 | def discover_fields_for_datatype(sketch_id: int, data_type: str) -> list[str]: 98 | """Discover fields for a specific data type in a Timesketch sketch. 99 | 100 | Args: 101 | sketch_id: The ID of the Timesketch sketch to discover fields from. 102 | data_type: The data type to discover fields for. 103 | 104 | Returns: 105 | A list of field names that are present in the events of the specified data type. 106 | """ 107 | 108 | events = do_timesketch_search( 109 | sketch_id=sketch_id, query=f'data_type:"{data_type}"', limit=1000, sort="desc" 110 | ).to_dict(orient="records") 111 | fields = defaultdict(dict) 112 | sketch = get_timesketch_client().get_sketch(sketch_id) 113 | for event in events: 114 | for field in event.keys(): 115 | if field in fields: 116 | continue 117 | 118 | top_values = _run_field_bucket_aggregation(sketch, field, limit=10) 119 | max_occurrences = max([value["count"] for value in top_values], default=0) 120 | 121 | # If the max occurrences for this field is less than 10, 122 | # it means it's probably unique. 123 | if max_occurrences < 10: 124 | fields[field] = None 125 | continue 126 | 127 | examples = [value[field] for value in top_values] 128 | fields[field] = examples 129 | 130 | return [field for field in fields.keys() if fields[field] is not None] 131 | 132 | 133 | @mcp.tool() 134 | def search_timesketch_events_substrings( 135 | sketch_id: int, 136 | substrings: list[str], 137 | regex: bool = False, 138 | boolean_operator: str = "AND", 139 | sort: str = "desc", 140 | starred: bool = False, 141 | ) -> list[dict[str, Any]]: 142 | """Search a Timesketch sketch and return a list of event dictionaries. 143 | 144 | This is the preferred method to use when searching for specific substrings in 145 | event messages. 146 | 147 | Supports both simple substring matching and regular expression matching. 148 | Regex matching allows for more complex patterns but is more expensive, 149 | so use with caution. 150 | 151 | Args: 152 | sketch_id: The ID of the Timesketch sketch to search. 153 | substrings: A list of substrings to search for in the event messages. 154 | regex: If True, treat substrings as regex patterns. If False, treat them as 155 | simple substrings. Defaults to False. 156 | boolean_operator: The boolean operator to use for combining multiple 157 | substring queries. Must be one of "AND" or "OR". Defaults to "AND". 158 | sort: Sort order for datetime field, either "asc" or "desc". Default is "desc". 159 | Useful for getting the most recent or oldest events. 160 | starred: If True, only return starred events. If False, return all events. 161 | 162 | Returns: 163 | A list of dictionaries representing the events found in the sketch. 164 | Each dictionary contains fields like datetime, data_type, tag, message, 165 | and optionally yara_match and sha256_hash if they are present in the results. 166 | 167 | If the query errors, an error object is returned instead. 168 | """ 169 | 170 | if not substrings: 171 | raise ValueError("Substrings list cannot be empty.") 172 | 173 | if boolean_operator not in ["AND", "OR"]: 174 | raise ValueError( 175 | f"Invalid boolean operator: {boolean_operator}. " 176 | "Must be one of 'AND' or 'OR'." 177 | ) 178 | boolean_operator = f" {boolean_operator} " 179 | 180 | terms = [] 181 | 182 | for substring in substrings: 183 | if not substring: 184 | continue 185 | 186 | if regex: 187 | terms.append(f"/.*{substring}.*/") 188 | else: 189 | for char in RESERVED_CHARS: 190 | substring = substring.replace(char, f"\\{char}") 191 | terms.append(f"*{substring}*") 192 | 193 | query = boolean_operator.join(terms) 194 | try: 195 | results_df = do_timesketch_search( 196 | sketch_id=sketch_id, 197 | query=query, 198 | sort=sort, 199 | starred=starred, 200 | ) 201 | return results_df.to_dict(orient="records") 202 | except Exception as e: 203 | return [{"result": f"Error: {str(e)}"}] 204 | 205 | 206 | @mcp.tool() 207 | def search_timesketch_events_advanced( 208 | sketch_id: int, 209 | query: str, 210 | sort: str = "desc", 211 | starred: bool = False, 212 | ) -> list[dict[str, Any]]: 213 | """ 214 | Search a Timesketch sketch using Lucene queries and return a list of event dictionaries. 215 | 216 | Events always contain the following fields: 217 | • datetime (useful for sorting) 218 | • data_type (useful for filtering). 219 | • message 220 | 221 | Always put double quotes around field values in queries (so data_type:"syslog:cron:task_run" 222 | instead of data_type:syslog:cron:task_run)' 223 | 224 | Examples: 225 | • Datatype `data_type:"apache:access_log:entry"`' 226 | • Field match `filename:*.docx` 227 | • Exact phrase `"mimikatz.exe"`' 228 | • Boolean `(ssh AND error) OR tag:bruteforce` 229 | • Date range `datetime:[2025-04-01 TO 2025-04-02]` 230 | • Wildcard `user:sam*` 231 | • Regex `host:/.*\\.google\\.com/` 232 | 233 | Args: 234 | sketch_id: The ID of the Timesketch sketch to search. 235 | query: The Lucene/OpenSearch query string to use for searching. 236 | sort: Sort order for datetime field, either "asc" or "desc". Default is "desc". 237 | starred: If True, only return starred events. If False, return all events. 238 | 239 | Returns: 240 | A list of dictionaries representing the events found in the sketch. 241 | Each dictionary contains fields like datetime, data_type, tag, message, 242 | and optionally yara_match and sha256_hash if they are present in the results. 243 | 244 | If the query errors, an error object is returned instead. 245 | """ 246 | 247 | try: 248 | results_df = do_timesketch_search( 249 | sketch_id=sketch_id, 250 | query=query, 251 | sort=sort, 252 | starred=starred, 253 | ) 254 | return results_df.to_dict(orient="records") 255 | except Exception as e: 256 | return [{"result": f"Error: {str(e)}"}] 257 | 258 | 259 | def do_timesketch_search( 260 | sketch_id: int, 261 | query: str, 262 | limit: int = 300, 263 | sort: str = "desc", 264 | starred: bool = False, 265 | ) -> pd.DataFrame: 266 | """Performs a search on a Timesketch sketch and returns a pandas DataFrame. 267 | 268 | Args: 269 | sketch_id: The ID of the Timesketch sketch to search. 270 | query: The Lucene/OpenSearch query string to use for searching. 271 | limit: Optional maximum number of events to return. 272 | sort: Sort order for datetime field, either "asc" or "desc". Default is 273 | "desc". 274 | starred: If True, only return starred events. If False, return all events. 275 | 276 | Returns: 277 | A pandas DataFrame containing the search results. 278 | 279 | Raises: 280 | ValueError: If the sketch with the given ID does not exist. 281 | RuntimeError: If the search fails for any reason. Usually due to an invalid query. 282 | """ 283 | sketch = get_timesketch_client().get_sketch(sketch_id) 284 | if not sketch: 285 | raise ValueError(f"Sketch with ID {sketch_id} not found.") 286 | 287 | search_instance = search.Search(sketch=sketch) 288 | search_instance.query_string = query 289 | 290 | if limit: 291 | search_instance.max_entries = limit 292 | else: 293 | search_instance.max_entries = search_instance.expected_size + 1 294 | 295 | search_instance.return_fields = "*,_id" 296 | if sort == "desc": 297 | search_instance.order_descending() 298 | else: 299 | search_instance.order_ascending() 300 | 301 | if starred: 302 | star_chip = search.LabelChip() 303 | star_chip.use_star_label() 304 | search_instance.add_chip(star_chip) 305 | 306 | result_df = search_instance.table 307 | 308 | if result_df.empty: 309 | return result_df 310 | 311 | extra_cols = [] 312 | if "yara_match" in result_df.columns: 313 | result_df["yara_match"] = result_df["yara_match"].fillna("N/A") 314 | extra_cols.append("yara_match") 315 | 316 | if "sha256_hash" in result_df.columns: 317 | result_df["sha256_hash"] = result_df["sha256_hash"].fillna("N/A") 318 | extra_cols.append("sha256_hash") 319 | 320 | # We convert the datetime column to ISO format so it shows up as a 321 | # serializable string and not a datetime object. 322 | result_df["datetime"] = result_df["datetime"].apply(lambda x: x.isoformat()) 323 | result_df = result_df.fillna("N/A") 324 | 325 | return result_df 326 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /configs/timesketch-mcp-server/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /configs/timesketch/timesketch.conf: -------------------------------------------------------------------------------- 1 | # Timesketch configuration 2 | 3 | # Show debug information. 4 | # Note: It is a security risk to have this enabled in production. 5 | DEBUG = False 6 | 7 | # Key for signing cookies and for CSRF protection. 8 | # 9 | # This should be a unique random string. Don't share this with anyone. 10 | # To generate a key, you can for example use openssl: 11 | # $ openssl rand -base64 32 12 | SECRET_KEY = '' 13 | 14 | # Setup the database. 15 | # 16 | # For more options, see the official documentation: 17 | # https://pythonhosted.org/Flask-SQLAlchemy/config.html 18 | # By default sqlite is used. 19 | # 20 | # NOTE: SQLite should only be used in development. Use PostgreSQL or MySQL in 21 | # production. 22 | SQLALCHEMY_DATABASE_URI = 'postgresql://:@localhost/timesketch' 23 | 24 | # Configure where your OpenSearch server is located. 25 | # 26 | # Make sure that the OpenSearch server is properly secured and not accessible 27 | # from the internet. See the following link for more information: 28 | # https://opensearch.org/docs/latest/getting-started/security/ 29 | OPENSEARCH_HOST = '127.0.0.1' 30 | OPENSEARCH_PORT = 9200 31 | OPENSEARCH_USER = None 32 | OPENSEARCH_PASSWORD = None 33 | OPENSEARCH_SSL = False 34 | OPENSEARCH_VERIFY_CERTS = True 35 | OPENSEARCH_TIMEOUT = 10 36 | OPENSEARCH_FLUSH_INTERVAL = 5000 37 | OPENSEARCH_INDEX_WAIT_TIMEOUT = 10 38 | OPENSEARCH_MINIMUM_HEALTH = 'yellow' 39 | # Be careful when increasing the upper limit since this will impact your 40 | # OpenSearch clusters performance and storage requirements! 41 | OPENSEARCH_MAPPING_BUFFER = 0.1 42 | OPENSEARCH_MAPPING_UPPER_LIMIT = 1000 43 | 44 | # Define what labels should be defined that make it so that a sketch and 45 | # timelines will not be deleted. This can be used to add a list of different 46 | # labels that ensure that a sketch and it's associated timelines cannot be 47 | # deleted. 48 | LABELS_TO_PREVENT_DELETION = ['protected', 'preserved'] 49 | 50 | # Number of seconds before a timeout occurs in bulk operations in the 51 | # OpenSearch client. 52 | TIMEOUT_FOR_EVENT_IMPORT = 180 53 | 54 | # Location for the configuration file of the data finder. 55 | DATA_FINDER_PATH = '/etc/timesketch/data_finder.yaml' 56 | 57 | #------------------------------------------------------------------------------- 58 | # Single Sign On (SSO) configuration. 59 | 60 | # Your web server can handle authentication for you by setting a environment 61 | # variable when the user is successfully authenticated. The standard environment 62 | # variable is REMOTE_USER and this is the default, but if your SSO system uses 63 | # another name you can configure that here. 64 | 65 | SSO_ENABLED = False 66 | SSO_USER_ENV_VARIABLE = 'REMOTE_USER' 67 | 68 | # Some SSO systems provides group information as environment variable. 69 | # Timesketch can automatically create groups and add users as members. 70 | # To enable this feature just provide the environment variable used in the SSO 71 | # system of use. 72 | SSO_GROUP_ENV_VARIABLE = None 73 | 74 | # Different systems use different separators in the string returned in the 75 | # environment variable. 76 | SSO_GROUP_SEPARATOR = ';' 77 | 78 | # Some SSO systems uses a special prefix for the group name to indicate that 79 | # the user is not a member of that group. Set this if that is the case, i.e. 80 | # '-'. 81 | SSO_GROUP_NOT_MEMBER_SIGN = None 82 | 83 | #------------------------------------------------------------------------------- 84 | # Google Cloud Identity-Aware Proxy (Cloud IAP) authentication configuration. 85 | 86 | # Cloud IAP controls access to your Timesketch server running on Google Cloud 87 | # Platform. Cloud IAP works by verifying a user’s identity and determining if 88 | # that user should be allowed to access the server. 89 | # 90 | # For this feature you will need to configure your Cloud IAP and HTTPS load- 91 | # balancer. Follow the official documentation to get everything ready: 92 | # https://cloud.google.com/iap/docs/enabling-compute-howto 93 | 94 | # Enable Cloud IAP authentication support. 95 | GOOGLE_IAP_ENABLED = False 96 | 97 | # This information is available via the Google Cloud console: 98 | # https://cloud.google.com/iap/docs/signed-headers-howto 99 | GOOGLE_IAP_PROJECT_NUMBER = '' 100 | GOOGLE_IAP_BACKEND_ID = '' 101 | 102 | # DON'T EDIT: Google IAP expected audience is based on Cloud project number and 103 | # backend ID. 104 | GOOGLE_IAP_AUDIENCE = '/projects/{}/global/backendServices/{}'.format( 105 | GOOGLE_IAP_PROJECT_NUMBER, 106 | GOOGLE_IAP_BACKEND_ID 107 | ) 108 | 109 | GOOGLE_IAP_ALGORITHM = 'ES256' 110 | GOOGLE_IAP_ISSUER = 'https://cloud.google.com/iap' 111 | GOOGLE_IAP_PUBLIC_KEY_URL = 'https://www.gstatic.com/iap/verify/public_key' 112 | 113 | #------------------------------------------------------------------------------- 114 | # Google Cloud OpenID Connect (OIDC) authentication configuration. 115 | 116 | # Cloud OIDC controls access to your Timesketch server running on Google Cloud 117 | # Platform. Cloud OIDC works by verifying a user’s identity and determining if 118 | # that user should be allowed to access the server. 119 | 120 | # Enable Cloud OIDC authentication support. 121 | # For Google's federated identity, leave AUTH_URI and DICOVERY_URL to None. 122 | # For others, refer to your OIDC provider configuration. Configuration can be 123 | # obtain from the discovery url. eg. https://accounts.google.com/.well-known/openid-configuration 124 | 125 | # Some OIDC providers expects a specific Algorithm. If so, specify in ALGORITHM. 126 | # Eg. HS256, HS384, HS512, RS256, RS384, RS512. 127 | # For Google, leave it to None 128 | 129 | GOOGLE_OIDC_ENABLED = False 130 | 131 | GOOGLE_OIDC_AUTH_URL = None 132 | GOOGLE_OIDC_DISCOVERY_URL = None 133 | GOOGLE_OIDC_ALGORITHM = None 134 | 135 | GOOGLE_OIDC_CLIENT_ID = None 136 | GOOGLE_OIDC_CLIENT_SECRET = None 137 | 138 | # If you need to authenticate an API client using OIDC you need to create 139 | # an OAUTH client for "other", or for native applications. 140 | # https://developers.google.com/identity/protocols/OAuth2ForDevices 141 | GOOGLE_OIDC_API_CLIENT_ID = None 142 | 143 | # List of additional allowed GOOGLE OIDC clients that can authenticate to the APIs 144 | GOOGLE_OIDC_API_CLIENT_IDS = [] 145 | 146 | # Limit access to a specific Google GSuite domain. 147 | GOOGLE_OIDC_HOSTED_DOMAIN = None 148 | 149 | # Additional Google GSuite domains allowed API access. 150 | GOOGLE_OIDC_API_ALLOWED_DOMAINS = [] 151 | 152 | # If populated only these users (email addresses) will be able to login to 153 | # this server. This can be used when access should be limited to a specific 154 | # set of users. 155 | GOOGLE_OIDC_ALLOWED_USERS = [] 156 | 157 | #------------------------------------------------------------------------------- 158 | # Upload and processing of Plaso storage files. 159 | 160 | # To enable this feature you need to configure an upload directory and 161 | # how to reach the Redis database used by the distributed task queue. 162 | UPLOAD_ENABLED = True 163 | 164 | # Folder for temporarily storage of Plaso dump files before being processed and 165 | # inserted into the datastore. 166 | UPLOAD_FOLDER = '/tmp' 167 | 168 | # Celery broker configuration. You need to change ip/port to where your Redis 169 | # server is running. 170 | CELERY_BROKER_URL = 'redis://127.0.0.1:6379' 171 | CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379' 172 | 173 | # File location to store the mappings used when OpenSearch indices are created 174 | # for plaso files. 175 | PLASO_MAPPING_FILE = '/etc/timesketch/plaso.mappings' 176 | GENERIC_MAPPING_FILE = '/etc/timesketch/generic.mappings' 177 | 178 | # Override/extend Plaso default message string formatters. 179 | PLASO_FORMATTERS = '/etc/timesketch/plaso_formatters.yaml' 180 | 181 | # Upper limits for the process memory that psort.py is allocated when ingesting 182 | # plaso files. The size is in bytes, with the default value of 183 | # 4294967296 or 4 GiB. 184 | PLASO_UPPER_MEMORY_LIMIT = None 185 | 186 | #------------------------------------------------------------------------------- 187 | # Analyzers. 188 | 189 | # Which analyzers to run automatically. 190 | AUTO_SKETCH_ANALYZERS = [] 191 | 192 | # Optional specify any default arguments to pass to analyzers. 193 | # The format is: 194 | # {'analyzer1_name': { 195 | # 'param1': 'value' 196 | # }, 197 | # {'analyzer2_name': { 198 | # 'param1': 'value' 199 | # } 200 | # } 201 | # } 202 | AUTO_SKETCH_ANALYZERS_KWARGS = {} 203 | ANALYZERS_DEFAULT_KWARGS = {} 204 | 205 | # Add all domains that are relevant to your enterprise here. 206 | # All domains in this list are added to the list of watched 207 | # domains and compared to other domains in the timeline to 208 | # attempt to spot "phishy" domains. 209 | DOMAIN_ANALYZER_WATCHED_DOMAINS = [] 210 | 211 | # Defines how deep into the most frequently visited top 212 | # level domains the analyzer should include in its watch list. 213 | DOMAIN_ANALYZER_WATCHED_DOMAINS_THRESHOLD = 10 214 | 215 | # The minimum Jaccard distance for a domain to be considered 216 | # similar to the domains in the watch list. The lower this number 217 | # is the more domains will be included in the "phishy" domain 218 | # category. 219 | DOMAIN_ANALYZER_WATCHED_DOMAINS_SCORE_THRESHOLD = 0.75 220 | 221 | # A list of domains that are frequent source of false positives 222 | # in the "phishy" domain comparison, mostly CDNs and similar. 223 | DOMAIN_ANALYZER_EXCLUDE_DOMAINS = ['ytimg.com', 'gstatic.com', 'yimg.com', 'akamaized.net', 'akamaihd.net', 's-microsoft.com', 'images-amazon.com', 'ssl-images-amazon.com', 'wikimedia.org', 'redditmedia.com', 'googleusercontent.com', 'googleapis.com', 'wikipedia.org', 'github.io', 'github.com'] 224 | 225 | # The threshold in minutes which the difference in timestamps has to cross in order to be 226 | # detected as 'timestomping'. 227 | NTFS_TIMESTOMP_ANALYZER_THRESHOLD = 10 228 | 229 | # Safe Browsing API key for the URL analyzer. 230 | SAFEBROWSING_API_KEY = '' 231 | 232 | # For the other possible values of the two settings below, please refer to 233 | # the Safe Browsing API reference at: 234 | # https://developers.google.com/safe-browsing/v4/reference/rest 235 | 236 | # Platforms to be looked at in Safe Browsing (PlatformType). 237 | SAFEBROWSING_PLATFORMS = ['ANY_PLATFORM'] 238 | 239 | # Types to be looked at in Safe Browsing (ThreatType). 240 | SAFEBROWSING_THREATTYPES = ['MALWARE'] 241 | 242 | #-- hashR integration --# 243 | # https://github.com/google/hashr 244 | # Uncomment and fill this section if you want to use the hashR lookup analyzer. 245 | # Provide hashR postgres database connection information below: 246 | HASHR_DB_USER = 'hashRuser' 247 | HASHR_DB_PW = 'hashRpass' 248 | HASHR_DB_ADDR = '127.0.0.1' 249 | HASHR_DB_PORT = '5432' 250 | HASHR_DB_NAME = 'hashRdb' 251 | 252 | # The total number of unique hashes that are checked against the database is 253 | # split into multiple batches. This number defines how many unique hashes are 254 | # checked per query. 50000 is the default value. 255 | HASHR_QUERY_BATCH_SIZE = '50000' 256 | 257 | # Set as True if you want to add the source of the hash ([repo:imagename]) as 258 | # an attribute to the event. WARNING: This will increase the processing time 259 | # of the analyzer! 260 | # HASHR_ADD_SOURCE_ATTRIBUTE = True 261 | 262 | # Threatintel Yeti analyzer-specific configuration 263 | # URI root to Yeti's API, e.g. 'https://localhost:8000/api/v2' 264 | YETI_API_ROOT = '' 265 | 266 | # API key to authenticate requests 267 | YETI_API_KEY = '' 268 | 269 | # Path to a TLS certificate that can be used to authenticate servers 270 | # using self-signed certificates. Provide the full path to the .crt file. 271 | YETI_TLS_CERTIFICATE = None 272 | 273 | # Labels to narrow down indicator selection 274 | YETI_INDICATOR_LABELS = ['domain'] 275 | 276 | 277 | # Url to MISP instance 278 | MISP_URL = '' 279 | 280 | # API key to authenticate requests 281 | MISP_API_KEY = '' 282 | 283 | # Url to Hashlookup instance 284 | HASHLOOKUP_URL = '' 285 | 286 | # GeoIP Analyzer Settings 287 | # 288 | # Disclaimer: Please note that the geolocation results obtained from this analyzer 289 | # are indicative and based upon the accuracy of the configured datasource. 290 | # This analyzer uses GeoLite2 data created by MaxMind, available from 291 | # https://maxmind.com. 292 | 293 | # The path to a MaxMind GeoIP database 294 | MAXMIND_DB_PATH = '' 295 | 296 | # The Account ID to access a MaxMind GeoIP web service 297 | MAXMIND_WEB_ACCOUNT_ID = '' 298 | 299 | # The license key to access a MaxMind GeoIP web service 300 | MAXMIND_WEB_LICENSE_KEY = '' 301 | 302 | # The host URL of a MaxMind GeoIP web service 303 | MAXMIND_WEB_HOST = '' 304 | 305 | #------------------------------------------------------------------------------- 306 | # Enable experimental UI features. 307 | 308 | ENABLE_EXPERIMENTAL_UI = False 309 | 310 | #------------------------------------------------------------------------------- 311 | # Email notifications. 312 | 313 | ENABLE_EMAIL_NOTIFICATIONS = False 314 | EMAIL_DOMAIN = 'localhost' 315 | EMAIL_FROM_USER = 'nobody' 316 | EMAIL_SMTP_SERVER = 'localhost' 317 | 318 | # Only send emails to these users. 319 | EMAIL_RECIPIENTS = [] 320 | 321 | # Configuration to construct URLs for resources. 322 | EXTERNAL_HOST_URL = 'https://localhost' 323 | 324 | # SSL/TLS support for emails 325 | EMAIL_TLS = False 326 | EMAIL_SSL = False 327 | 328 | # Email support for authentication 329 | EMAIL_AUTH_USERNAME = "" 330 | EMAIL_AUTH_PASSWORD = "" 331 | 332 | #------------------------------------------------------------------------------- 333 | # Sigma Settings 334 | 335 | SIGMA_CONFIG = '/etc/timesketch/sigma_config.yaml' 336 | SIGMA_TAG_DELAY = 5 337 | 338 | #------------------------------------------------------------------------------- 339 | # Flask Settings 340 | # Everything mentioned in https://flask-wtf.readthedocs.io/en/latest/config/ can be used. 341 | # Max age in seconds for CSRF tokens. Default is 3600. If set to None, the CSRF token is valid for the life of the session. 342 | WTF_CSRF_TIME_LIMIT = None # Set to None to fix OpenRelik integration issues 343 | # WTF_CSRF_ENABLED = False # Set this to False for UI-development purposes 344 | 345 | #------------------------------------------------------------------------------- 346 | # DFIQ - Digital Forensics Investigation Questions 347 | DFIQ_ENABLED = True 348 | DFIQ_PATH = '/etc/timesketch/dfiq/' 349 | 350 | # Intelligence tag metadata configuration 351 | INTELLIGENCE_TAG_METADATA = '/etc/timesketch/intelligence_tag_metadata.yaml' 352 | 353 | # Context links configuration 354 | CONTEXT_LINKS_CONFIG_PATH = '/etc/timesketch/context_links.yaml' 355 | 356 | # LLM provider configs for Ollama integration 357 | LLM_PROVIDER_CONFIGS = { 358 | # Configure LLM for Natural Language to Query (NL2Q) feature 359 | 'nl2q': { 360 | 'ollama': { 361 | 'server_url': 'http://ollama.osdfir.svc.cluster.local:11434', 362 | 'model': 'smollm:latest', # Should match ai.model.name in osdfir-lab-values.yaml 363 | }, 364 | }, 365 | # Configure LLM for event summarization feature 366 | 'llm_summarize': { 367 | 'ollama': { 368 | 'server_url': 'http://ollama.osdfir.svc.cluster.local:11434', 369 | 'model': 'smollm:latest', # Should match ai.model.name in osdfir-lab-values.yaml 370 | }, 371 | }, 372 | # Default LLM configuration for any other features 373 | 'default': { 374 | 'ollama': { 375 | 'server_url': 'http://ollama.osdfir.svc.cluster.local:11434', 376 | 'model': 'smollm:latest', # Should match ai.model.name in osdfir-lab-values.yaml 377 | }, 378 | } 379 | } 380 | 381 | # LLM nl2q configuration 382 | DATA_TYPES_PATH = '/etc/timesketch/nl2q/data_types.csv' 383 | PROMPT_NL2Q = '/etc/timesketch/nl2q/prompt_nl2q' 384 | EXAMPLES_NL2Q = '/etc/timesketch/nl2q/examples_nl2q' 385 | 386 | # LLM event summarization configuration 387 | PROMPT_LLM_SUMMARIZATION = '/etc/timesketch/llm_summarize/prompt.txt' 388 | 389 | #------------------------------------------------------------------------------- 390 | # Timesketch UI Option 391 | 392 | # Get the search processing timelines setting. 393 | # If set to True, the search processing timelines options will be displayed in the UI. 394 | SEARCH_PROCESSING_TIMELINES = False 395 | -------------------------------------------------------------------------------- /scripts/manage-osdfir-lab.ps1: -------------------------------------------------------------------------------- 1 | # OSDFIR Lab Management Script 2 | # Unified tool for managing OSDFIR deployment, services, and credentials on Minikube 3 | 4 | param( 5 | [Parameter(Mandatory = $false)] 6 | [ValidateSet("help", "status", "start", "stop", "restart", "logs", "cleanup", "creds", "jobs", "helm", "uninstall", "reinstall", "storage", "minikube", "deploy", "teardown-lab", "teardown-lab-all", "ollama", "ollama-test", "docker")] 7 | [string]$Action = "help", 8 | 9 | [Parameter(Mandatory = $false)] 10 | [string]$ReleaseName = "osdfir-lab", 11 | 12 | [Parameter(Mandatory = $false)] 13 | [string]$Namespace = "osdfir", 14 | 15 | [Parameter(Mandatory = $false)] 16 | [ValidateSet("all", "timesketch", "openrelik")] 17 | [string]$Service = "all", 18 | 19 | # Help alias 20 | [switch]$h = $false, 21 | 22 | # Cleanup and deployment options 23 | [switch]$Force = $false, 24 | [switch]$DryRun = $false 25 | ) 26 | 27 | # Color constants 28 | $Colors = @{ 29 | Header = "Cyan" 30 | Success = "Green" 31 | Warning = "Yellow" 32 | Error = "Red" 33 | Info = "White" 34 | Gray = "Gray" 35 | Command = "Magenta" 36 | } 37 | 38 | $script:IsFirstDeployment = $false 39 | 40 | function Update-DeploymentContext { 41 | param( 42 | [string]$Namespace, 43 | [string]$ReleaseName 44 | ) 45 | 46 | $tfStatePath = Join-Path $PSScriptRoot "..\terraform\terraform.tfstate" 47 | $hasTerraformState = Test-Path $tfStatePath 48 | 49 | $hasHelmRelease = $false 50 | try { 51 | $helmOutput = helm list -n $Namespace -o json 2>$null 52 | if ($helmOutput) { 53 | $helmReleases = $helmOutput | ConvertFrom-Json 54 | if ($helmReleases) { 55 | if ($helmReleases -isnot [System.Array]) { 56 | $helmReleases = @($helmReleases) 57 | } 58 | $hasHelmRelease = ($helmReleases | Where-Object { $_.name -eq $ReleaseName } | Measure-Object).Count -gt 0 59 | } 60 | } 61 | } catch { 62 | $hasHelmRelease = $false 63 | } 64 | 65 | $script:IsFirstDeployment = -not ($hasTerraformState -or $hasHelmRelease) 66 | } 67 | 68 | function Get-HelmTimeoutSeconds { 69 | if ($script:IsFirstDeployment) { 70 | return 1500 71 | } 72 | return 600 73 | } 74 | 75 | function Show-Header { 76 | param([string]$Title) 77 | Write-Host "" 78 | Write-Host "== $Title ==" -ForegroundColor $Colors.Header 79 | Write-Host ("=" * ($Title.Length + 7)) -ForegroundColor $Colors.Header 80 | } 81 | 82 | function Show-Help { 83 | Show-Header "OSDFIR Lab Management Tool" 84 | Write-Host "" 85 | Write-Host "Usage: .\manage-osdfir-lab.ps1 [action] [options]" -ForegroundColor $Colors.Warning 86 | Write-Host "" 87 | Write-Host "DEPLOYMENT + TEARDOWN:" -ForegroundColor $Colors.Success 88 | Write-Host " deploy - Full deployment (Docker + Minikube + Terraform + Services)" 89 | Write-Host " teardown-lab - Smart cleanup (Services + Terraform, PRESERVES AI models/data)" -ForegroundColor $Colors.Header 90 | Write-Host " teardown-lab-all - Complete destruction (Everything including AI models/data)" -ForegroundColor $Colors.Error 91 | Write-Host " docker - Check and start Docker Desktop if needed" 92 | Write-Host "" 93 | Write-Host "STATUS + MONITORING:" -ForegroundColor $Colors.Success 94 | Write-Host " status - Show deployment and service status" 95 | Write-Host " minikube - Show Minikube cluster status" 96 | Write-Host " helm - List Helm releases and show release status" 97 | Write-Host " storage - Show PV storage utilization" 98 | Write-Host " jobs - Manage background jobs" 99 | Write-Host " logs - Show logs from services" 100 | Write-Host "" 101 | Write-Host "SERVICE ACCESS:" -ForegroundColor $Colors.Success 102 | Write-Host " start - Start port forwarding for services" 103 | Write-Host " stop - Stop port forwarding jobs" 104 | Write-Host " restart - Restart port forwarding jobs" 105 | Write-Host " creds - Get service credentials" 106 | Write-Host "" 107 | Write-Host "AI + SPECIALIZED:" -ForegroundColor $Colors.Success 108 | Write-Host " ollama - Show Ollama AI model status and connectivity" 109 | Write-Host " ollama-test - Run comprehensive AI prompt testing" 110 | Write-Host "" 111 | Write-Host "MAINTENANCE:" -ForegroundColor $Colors.Success 112 | Write-Host " cleanup - Clean up OSDFIR deployment" 113 | Write-Host " uninstall - Uninstall the Helm release" 114 | Write-Host " reinstall - Reinstall the Helm release (uninstall + deploy)" 115 | Write-Host " help - Show this help message" 116 | Write-Host "" 117 | Write-Host "Options:" -ForegroundColor $Colors.Header 118 | Write-Host " -h Show help (alias for help action)" 119 | Write-Host " -Service Specific service for creds (all, timesketch, openrelik)" 120 | Write-Host " -Force Force operations without confirmation" 121 | Write-Host " -DryRun Show what would be done without executing" 122 | Write-Host "" 123 | Write-Host "Examples:" -ForegroundColor $Colors.Header 124 | Write-Host " .\manage-osdfir-lab.ps1 -h" 125 | Write-Host " .\manage-osdfir-lab.ps1 docker" 126 | Write-Host " .\manage-osdfir-lab.ps1 deploy # Preserves passwords if they exist" 127 | Write-Host " .\manage-osdfir-lab.ps1 reinstall # Reinstall while preserving passwords" 128 | Write-Host " .\manage-osdfir-lab.ps1 teardown-lab # Smart cleanup - preserves AI models/data" -ForegroundColor $Colors.Header 129 | Write-Host " .\manage-osdfir-lab.ps1 teardown-lab-all # Nuclear option - destroys everything" -ForegroundColor $Colors.Error 130 | Write-Host " .\manage-osdfir-lab.ps1 status" 131 | Write-Host " .\manage-osdfir-lab.ps1 creds -Service timesketch" 132 | } 133 | 134 | function Test-Prerequisites { 135 | $missing = @() 136 | 137 | # Check required tools using Get-Command which is more reliable 138 | $tools = @("minikube", "kubectl", "terraform", "helm", "docker") 139 | foreach ($tool in $tools) { 140 | try { 141 | $command = Get-Command $tool -ErrorAction Stop 142 | Write-Verbose "Found $tool at: $($command.Source)" -Verbose:$false 143 | } catch { 144 | $missing += $tool 145 | } 146 | } 147 | 148 | if ($missing.Count -gt 0) { 149 | Write-Host "ERROR: Missing required tools: $($missing -join ', ')" -ForegroundColor $Colors.Error 150 | Write-Host "Please install all required tools before proceeding." -ForegroundColor $Colors.Warning 151 | Write-Host "" 152 | Write-Host "Installation tips:" -ForegroundColor $Colors.Info 153 | foreach ($tool in $missing) { 154 | switch ($tool) { 155 | "kubectl" { Write-Host " - kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" -ForegroundColor $Colors.Gray } 156 | "docker" { Write-Host " - docker: https://docs.docker.com/desktop/install/windows-install/" -ForegroundColor $Colors.Gray } 157 | "minikube" { Write-Host " - minikube: https://minikube.sigs.k8s.io/docs/start/" -ForegroundColor $Colors.Gray } 158 | "terraform" { Write-Host " - terraform: https://developer.hashicorp.com/terraform/downloads" -ForegroundColor $Colors.Gray } 159 | "helm" { Write-Host " - helm: https://helm.sh/docs/intro/install/" -ForegroundColor $Colors.Gray } 160 | } 161 | } 162 | return $false 163 | } 164 | 165 | return $true 166 | } 167 | 168 | function Get-OptimalResources { 169 | # Get system memory in GB 170 | $totalMemoryGB = [math]::Round((Get-CimInstance Win32_ComputerSystem).TotalPhysicalMemory / 1GB, 1) 171 | 172 | # Get Docker Desktop's available memory 173 | $dockerMemoryMB = 0 174 | try { 175 | $dockerInfo = docker system info --format "{{.MemTotal}}" 2>$null 176 | if ($dockerInfo) { 177 | $dockerMemoryMB = [math]::Round($dockerInfo / 1MB, 0) 178 | } 179 | } catch { 180 | Write-Host "Warning: Could not determine Docker memory limit" -ForegroundColor $Colors.Warning 181 | } 182 | 183 | # Calculate memory allocation 184 | if ($dockerMemoryMB -gt 0) { 185 | # Use 80% of Docker's available memory, minimum 4GB for AI workloads 186 | $dockerMemoryGB = [math]::Round($dockerMemoryMB / 1024, 1) 187 | $memoryGB = [math]::Max([math]::Floor($dockerMemoryGB * 0.8), 4) 188 | 189 | # Ensure we don't exceed Docker's limits 190 | if ($memoryGB -gt ($dockerMemoryGB - 2)) { 191 | $memoryGB = [math]::Max($dockerMemoryGB - 2, 4) 192 | } 193 | } else { 194 | # Fallback to system memory calculation 195 | $memoryGB = [math]::Max([math]::Floor($totalMemoryGB * 0.5), 4) 196 | } 197 | 198 | # Get CPU count and use half, minimum 2, maximum 8 for balanced performance 199 | $totalCPUs = (Get-CimInstance Win32_ComputerSystem).NumberOfLogicalProcessors 200 | $cpus = [math]::Min([math]::Max([math]::Floor($totalCPUs / 2), 2), 8) 201 | 202 | Write-Host "System Resources:" -ForegroundColor $Colors.Success 203 | Write-Host " Total Memory: ${totalMemoryGB}GB" 204 | Write-Host " Total CPUs: $totalCPUs" 205 | if ($dockerMemoryMB -gt 0) { 206 | Write-Host " Docker Memory: ${dockerMemoryGB}GB" -ForegroundColor $Colors.Success 207 | } 208 | Write-Host "" 209 | Write-Host "Minikube Allocation:" -ForegroundColor $Colors.Warning 210 | Write-Host " Memory: ${memoryGB}GB" 211 | Write-Host " CPUs: $cpus" 212 | Write-Host "" 213 | 214 | return @{ 215 | Memory = "${memoryGB}GB" 216 | CPUs = $cpus 217 | } 218 | } 219 | 220 | function Test-Docker { 221 | param([switch]$Silent = $false) 222 | 223 | try { 224 | docker info > $null 2>&1 225 | if ($LASTEXITCODE -eq 0) { 226 | if (-not $Silent) { 227 | Write-Host "[OK] Docker is running" -ForegroundColor $Colors.Success 228 | } 229 | return $true 230 | } else { 231 | if (-not $Silent) { 232 | Write-Host "[ERROR] Docker is not running" -ForegroundColor $Colors.Error 233 | } 234 | return $false 235 | } 236 | } catch { 237 | if (-not $Silent) { 238 | Write-Host "[ERROR] Docker command not found" -ForegroundColor $Colors.Error 239 | Write-Host "Please install Docker Desktop and try again." -ForegroundColor $Colors.Warning 240 | } 241 | return $false 242 | } 243 | } 244 | 245 | function Set-ProxyEnvironment { 246 | # Check if proxy environment variables are set 247 | $proxyVars = @() 248 | 249 | if ($env:HTTP_PROXY) { 250 | $proxyVars += "--docker-env", "HTTP_PROXY=$env:HTTP_PROXY" 251 | Write-Host "Using HTTP_PROXY: $env:HTTP_PROXY" -ForegroundColor $Colors.Warning 252 | } 253 | 254 | if ($env:HTTPS_PROXY) { 255 | $proxyVars += "--docker-env", "HTTPS_PROXY=$env:HTTPS_PROXY" 256 | Write-Host "Using HTTPS_PROXY: $env:HTTPS_PROXY" -ForegroundColor $Colors.Warning 257 | } 258 | 259 | # Set NO_PROXY with Kubernetes and registry defaults 260 | $defaultNoProxy = "localhost,127.0.0.1,10.96.0.0/12,192.168.59.0/24,192.168.49.0/24,192.168.39.0/24,registry.k8s.io" 261 | 262 | if ($env:NO_PROXY) { 263 | $noProxy = "$env:NO_PROXY,$defaultNoProxy" 264 | } else { 265 | $noProxy = $defaultNoProxy 266 | } 267 | 268 | $proxyVars += "--docker-env", "NO_PROXY=$noProxy" 269 | Write-Host "Using NO_PROXY: $noProxy" -ForegroundColor $Colors.Warning 270 | 271 | return $proxyVars 272 | } 273 | 274 | function Start-OSDFIRMinikube { 275 | Show-Header "Starting OSDFIR Minikube Cluster" 276 | 277 | # Check if Minikube is already running with the osdfir profile 278 | $minikubeStatus = minikube status --profile=osdfir 2>&1 279 | 280 | if ($minikubeStatus -match "Running" -and $LASTEXITCODE -eq 0) { 281 | Write-Host "[INFO] Minikube 'osdfir' profile is already running" -ForegroundColor $Colors.Info 282 | Write-Host "Type: $(minikube profile)" -ForegroundColor $Colors.Info 283 | return $true 284 | } 285 | 286 | # Calculate system resources 287 | $totalMemory = (Get-CimInstance -ClassName Win32_ComputerSystem).TotalPhysicalMemory / 1GB 288 | $totalMemory = [math]::Round($totalMemory, 1) 289 | $totalCPUs = (Get-CimInstance -ClassName Win32_ComputerSystem).NumberOfLogicalProcessors 290 | 291 | # Get Docker memory allocation 292 | $dockerMemory = 0 293 | try { 294 | $dockerInfo = docker info --format "{{.MemTotal}}" 2>$null 295 | if ($dockerInfo) { 296 | $dockerMemory = [math]::Round(($dockerInfo / 1GB), 1) 297 | } 298 | } catch { 299 | $dockerMemory = "Unknown" 300 | } 301 | 302 | # Calculate Minikube resource allocation (adjust as needed) 303 | $minikubeMemory = [math]::Min(12, [math]::Floor($totalMemory * 0.6)) 304 | $minikubeCPUs = [math]::Min(8, [math]::Floor($totalCPUs * 0.6)) 305 | 306 | Write-Host "System Resources:" -ForegroundColor $Colors.Info 307 | Write-Host " Total Memory: ${totalMemory}GB" -ForegroundColor $Colors.Info 308 | Write-Host " Total CPUs: $totalCPUs" -ForegroundColor $Colors.Info 309 | Write-Host " Docker Memory: ${dockerMemory}GB" -ForegroundColor $Colors.Info 310 | Write-Host "" 311 | Write-Host "Minikube Allocation:" -ForegroundColor $Colors.Info 312 | Write-Host " Memory: ${minikubeMemory}GB" -ForegroundColor $Colors.Info 313 | Write-Host " CPUs: $minikubeCPUs" -ForegroundColor $Colors.Info 314 | Write-Host "" 315 | 316 | # Set NO_PROXY environment variable 317 | $noProxy = "localhost,127.0.0.1,10.96.0.0/12,192.168.59.0/24,192.168.49.0/24,192.168.39.0/24,registry.k8s.io" 318 | Write-Host "Using NO_PROXY: $noProxy" -ForegroundColor $Colors.Info 319 | 320 | # Start Minikube 321 | Write-Host "Starting Minikube with profile 'osdfir'..." -ForegroundColor $Colors.Info 322 | $minikubeCommand = "minikube start --profile=osdfir --driver=docker --memory=${minikubeMemory}GB --cpus=$minikubeCPUs --disk-size=40GB --kubernetes-version=stable --docker-env NO_PROXY=$noProxy --docker-env NO_PROXY=$noProxy" 323 | Write-Host "Running: $minikubeCommand" -ForegroundColor $Colors.Command 324 | 325 | Invoke-Expression $minikubeCommand 326 | 327 | if ($LASTEXITCODE -eq 0) { 328 | Write-Host "" 329 | Write-Host "[OK] Minikube started successfully" -ForegroundColor $Colors.Success 330 | Write-Host "" 331 | return $true 332 | } else { 333 | Write-Host "" 334 | Write-Host "[ERROR] Failed to start Minikube" -ForegroundColor $Colors.Error 335 | Write-Host "" 336 | return $false 337 | } 338 | } 339 | 340 | function Start-MinikubeTunnel { 341 | Write-Host "" 342 | Write-Host "Checking Minikube tunnel..." -ForegroundColor $Colors.Info 343 | 344 | # Check if tunnel job already exists 345 | $existingJob = Get-Job -Name "minikube-tunnel" -ErrorAction SilentlyContinue 346 | if ($existingJob -and $existingJob.State -eq "Running") { 347 | # Check if LoadBalancer services have external IPs 348 | $lbServices = kubectl get services --all-namespaces --field-selector metadata.namespace=$Namespace --no-headers -o custom-columns=":metadata.name,:spec.type,:status.loadBalancer.ingress[0].ip" | 349 | Where-Object { $_ -match "LoadBalancer" } 350 | 351 | if ($lbServices -and $lbServices -match "\S+\s+LoadBalancer\s+\d+\.\d+\.\d+\.\d+") { 352 | Write-Host "[OK] Minikube tunnel is already running and working properly" -ForegroundColor $Colors.Success 353 | Write-Host "LoadBalancer services are accessible on localhost" -ForegroundColor $Colors.Info 354 | return 355 | } 356 | 357 | Write-Host "Existing tunnel job found but may not be working properly" -ForegroundColor $Colors.Warning 358 | Write-Host "Stopping existing tunnel job..." -ForegroundColor $Colors.Warning 359 | $existingJob | Stop-Job 360 | $existingJob | Remove-Job -Force 361 | } elseif ($existingJob) { 362 | Write-Host "Cleaning up non-running tunnel job..." -ForegroundColor $Colors.Warning 363 | $existingJob | Remove-Job -Force 364 | } 365 | 366 | # Start tunnel in background job 367 | Write-Host "Starting Minikube tunnel..." -ForegroundColor $Colors.Info 368 | $scriptBlock = { 369 | minikube tunnel --profile=osdfir --cleanup 370 | } 371 | 372 | Start-Job -Name "minikube-tunnel" -ScriptBlock $scriptBlock | Out-Null 373 | Start-Sleep -Seconds 3 374 | 375 | $tunnelJob = Get-Job -Name "minikube-tunnel" 376 | if ($tunnelJob -and $tunnelJob.State -eq "Running") { 377 | Write-Host "[OK] Minikube tunnel started in background" -ForegroundColor $Colors.Success 378 | Write-Host "LoadBalancer services will be accessible on localhost" -ForegroundColor $Colors.Warning 379 | } else { 380 | Write-Host "[WARNING] Tunnel may not have started properly" -ForegroundColor $Colors.Warning 381 | Write-Host "You may need to run 'minikube tunnel --profile=osdfir' manually" -ForegroundColor $Colors.Warning 382 | } 383 | } 384 | 385 | function Stop-MinikubeTunnel { 386 | $tunnelJob = Get-Job -Name "minikube-tunnel" -ErrorAction SilentlyContinue 387 | if ($tunnelJob) { 388 | Write-Host "Stopping Minikube tunnel..." -ForegroundColor $Colors.Warning 389 | $tunnelJob | Stop-Job 390 | $tunnelJob | Remove-Job -Force 391 | Write-Host "[OK] Tunnel stopped" -ForegroundColor $Colors.Success 392 | } else { 393 | Write-Host "No tunnel job found" -ForegroundColor $Colors.Gray 394 | } 395 | } 396 | 397 | function Remove-MinikubeCluster { 398 | param([switch]$SkipConfirmation = $false) 399 | 400 | Write-Host "" 401 | Write-Host "Deleting OSDFIR Minikube Cluster..." -ForegroundColor $Colors.Error 402 | Write-Host "==================================" -ForegroundColor $Colors.Error 403 | Write-Host "" 404 | 405 | # Stop tunnel first 406 | Stop-MinikubeTunnel 407 | 408 | if (-not $Force -and -not $SkipConfirmation) { 409 | $confirmation = Read-Host "Are you sure you want to delete the 'osdfir' cluster? (yes/no)" 410 | if ($confirmation -ne "yes") { 411 | Write-Host "Deletion cancelled." -ForegroundColor $Colors.Warning 412 | return 413 | } 414 | } 415 | 416 | Write-Host "Deleting Minikube cluster 'osdfir'..." -ForegroundColor $Colors.Error 417 | minikube delete --profile=osdfir 418 | 419 | if ($LASTEXITCODE -eq 0) { 420 | Write-Host "[OK] Cluster deleted successfully" -ForegroundColor $Colors.Success 421 | } else { 422 | Write-Host "[ERROR] Failed to delete cluster" -ForegroundColor $Colors.Error 423 | } 424 | } 425 | 426 | function Start-DockerDesktop { 427 | Write-Host "Checking Docker Desktop status..." -ForegroundColor $Colors.Info 428 | 429 | if (Test-Docker -Silent) { 430 | Write-Host "[OK] Docker Desktop is already running" -ForegroundColor $Colors.Success 431 | return $true 432 | } 433 | 434 | Write-Host "Docker Desktop is not running. Starting..." -ForegroundColor $Colors.Warning 435 | 436 | # Try to find Docker Desktop executable 437 | $dockerDesktopPaths = @( 438 | "${env:ProgramFiles}\Docker\Docker\Docker Desktop.exe", 439 | "${env:ProgramFiles(x86)}\Docker\Docker\Docker Desktop.exe", 440 | "${env:LOCALAPPDATA}\Programs\Docker\Docker\Docker Desktop.exe" 441 | ) 442 | 443 | $dockerExe = $null 444 | foreach ($path in $dockerDesktopPaths) { 445 | if (Test-Path $path) { 446 | $dockerExe = $path 447 | break 448 | } 449 | } 450 | 451 | if (-not $dockerExe) { 452 | Write-Host "ERROR: Could not find Docker Desktop executable" -ForegroundColor $Colors.Error 453 | Write-Host "Please start Docker Desktop manually or install it from:" -ForegroundColor $Colors.Warning 454 | Write-Host "https://docs.docker.com/desktop/install/windows-install/" -ForegroundColor $Colors.Gray 455 | return $false 456 | } 457 | 458 | try { 459 | Start-Process -FilePath $dockerExe -WindowStyle Hidden 460 | 461 | Write-Host "Waiting for Docker Desktop to start..." -ForegroundColor $Colors.Info 462 | $timeout = 180 # 3 minutes 463 | $elapsed = 0 464 | 465 | do { 466 | Start-Sleep -Seconds 10 467 | $elapsed += 10 468 | Write-Host " Checking Docker status... ($elapsed s elapsed)" -ForegroundColor $Colors.Gray 469 | 470 | if (Test-Docker -Silent) { 471 | Write-Host "[OK] Docker Desktop started successfully!" -ForegroundColor $Colors.Success 472 | return $true 473 | } 474 | } while ($elapsed -lt $timeout) 475 | 476 | Write-Host "WARNING: Docker Desktop may still be starting. Please wait and try again." -ForegroundColor $Colors.Warning 477 | return $false 478 | 479 | } catch { 480 | Write-Host "ERROR: Failed to start Docker Desktop: $($_.Exception.Message)" -ForegroundColor $Colors.Error 481 | Write-Host "Please start Docker Desktop manually." -ForegroundColor $Colors.Warning 482 | return $false 483 | } 484 | } 485 | 486 | function Test-MinikubeRunning { 487 | try { 488 | $status = minikube status --profile=osdfir -f "{{.Host}}" 2>$null 489 | return $status -eq "Running" 490 | } catch { 491 | return $false 492 | } 493 | } 494 | 495 | function Test-KubectlAccess { 496 | try { 497 | kubectl get pods -n $Namespace --no-headers 2>$null | Out-Null 498 | return $true 499 | } catch { 500 | Write-Host "ERROR: Cannot access Kubernetes cluster or namespace '$Namespace'" -ForegroundColor $Colors.Error 501 | Write-Host "TIP: Ensure Minikube is running and kubectl context is set." -ForegroundColor $Colors.Warning 502 | return $false 503 | } 504 | } 505 | 506 | function Show-MinikubeStatus { 507 | Show-Header "Minikube Cluster Status" 508 | 509 | if (-not (Test-MinikubeRunning)) { 510 | Write-Host "Minikube cluster 'osdfir' is not running" -ForegroundColor $Colors.Error 511 | Write-Host "TIP: Run .\manage-osdfir-lab.ps1 deploy to start the full environment" -ForegroundColor $Colors.Info 512 | return 513 | } 514 | 515 | Write-Host "Cluster Status:" -ForegroundColor $Colors.Success 516 | minikube status --profile=osdfir 517 | 518 | Write-Host "" 519 | Write-Host "Cluster Resources:" -ForegroundColor $Colors.Success 520 | kubectl top nodes 2>$null 521 | 522 | Write-Host "" 523 | Write-Host "Minikube Tunnel Job:" -ForegroundColor $Colors.Success 524 | $tunnelJob = Get-Job -Name "minikube-tunnel" -ErrorAction SilentlyContinue 525 | if ($tunnelJob) { 526 | $status = switch ($tunnelJob.State) { 527 | "Running" { "[RUNNING]" } 528 | "Completed" { "[STOPPED]" } 529 | "Failed" { "[FAILED]" } 530 | default { "[UNKNOWN]" } 531 | } 532 | $color = switch ($tunnelJob.State) { 533 | "Running" { $Colors.Success } 534 | "Failed" { $Colors.Error } 535 | default { $Colors.Warning } 536 | } 537 | Write-Host " $status Minikube tunnel" -ForegroundColor $color 538 | } else { 539 | Write-Host " [NOT RUNNING] Minikube tunnel" -ForegroundColor $Colors.Warning 540 | } 541 | } 542 | 543 | function Show-OllamaStatus { 544 | Show-Header "Ollama AI Model Status" 545 | 546 | if (-not (Test-KubectlAccess)) { 547 | return 548 | } 549 | 550 | # Check Ollama pod status 551 | Write-Host "Ollama Pod Status:" -ForegroundColor $Colors.Success 552 | $ollamaPod = kubectl get pods -n $Namespace -l app=ollama --no-headers 2>$null 553 | if ($ollamaPod) { 554 | $parts = $ollamaPod -split '\s+' 555 | $name = $parts[0] 556 | $status = $parts[2] 557 | if ($status -eq "Running") { 558 | Write-Host " [OK] $name" -ForegroundColor $Colors.Success 559 | } else { 560 | Write-Host " [ERROR] $name ($status)" -ForegroundColor $Colors.Error 561 | } 562 | } else { 563 | Write-Host " [ERROR] Ollama pod not found" -ForegroundColor $Colors.Error 564 | return 565 | } 566 | 567 | # Check available models 568 | Write-Host "" 569 | Write-Host "Available Models:" -ForegroundColor $Colors.Success 570 | $availableModels = @() 571 | try { 572 | $modelOutput = kubectl exec -n $Namespace $name -- ollama list 2>$null 573 | if ($modelOutput) { 574 | $lines = $modelOutput -split "`n" 575 | $modelLines = $lines | Where-Object { $_ -match "^\w+.*\d+\s+(GB|MB|KB)" } 576 | 577 | if ($modelLines.Count -gt 0) { 578 | foreach ($line in $modelLines) { 579 | $parts = $line -split '\s+' 580 | $modelName = $parts[0] 581 | $modelSize = "$($parts[2]) $($parts[3])" 582 | $availableModels += $modelName 583 | Write-Host " [OK] $modelName (Size: $modelSize)" -ForegroundColor $Colors.Success 584 | } 585 | } else { 586 | Write-Host " [INFO] No models found" -ForegroundColor $Colors.Warning 587 | } 588 | } else { 589 | Write-Host " [ERROR] Unable to retrieve model list" -ForegroundColor $Colors.Error 590 | } 591 | } catch { 592 | Write-Host " [ERROR] Failed to check models: $($_.Exception.Message)" -ForegroundColor $Colors.Error 593 | } 594 | 595 | # Test AI functionality if models available 596 | if ($availableModels.Count -gt 0) { 597 | Write-Host "" 598 | Write-Host "AI Functionality Test:" -ForegroundColor $Colors.Success 599 | $testModel = $availableModels[0] 600 | try { 601 | Write-Host " Testing model '$testModel' with forensic prompt..." -ForegroundColor $Colors.Info 602 | $testPrompt = "List 3 common digital forensics file types. Answer with just the file types." 603 | $promptResult = kubectl exec -n $Namespace $name -- ollama run $testModel "$testPrompt" 2>$null 604 | 605 | if ($promptResult -and $promptResult.Length -gt 10) { 606 | Write-Host " [OK] AI model is responding to prompts" -ForegroundColor $Colors.Success 607 | Write-Host " Sample response: $($promptResult.Substring(0, [Math]::Min(80, $promptResult.Length)))..." -ForegroundColor $Colors.Gray 608 | } else { 609 | Write-Host " [ERROR] AI model not responding properly" -ForegroundColor $Colors.Error 610 | } 611 | } catch { 612 | Write-Host " [WARNING] Unable to test AI functionality: $($_.Exception.Message)" -ForegroundColor $Colors.Warning 613 | } 614 | } 615 | } 616 | 617 | function Show-Status { 618 | Show-Header "OSDFIR Deployment Status" 619 | 620 | # Check Minikube first 621 | if (-not (Test-MinikubeRunning)) { 622 | Write-Host "Minikube cluster 'osdfir' is not running" -ForegroundColor $Colors.Error 623 | Write-Host "TIP: Run .\manage-osdfir-lab.ps1 deploy to start the full environment" -ForegroundColor $Colors.Info 624 | return 625 | } 626 | 627 | # Test kubectl access 628 | if (-not (Test-KubectlAccess)) { 629 | return 630 | } 631 | 632 | # Check Helm release 633 | Write-Host "Helm Release Status:" -ForegroundColor $Colors.Success 634 | try { 635 | $release = helm list -n $Namespace -o json | ConvertFrom-Json | Where-Object { $_.name -eq $ReleaseName } 636 | if ($release) { 637 | Write-Host " [OK] Release '$ReleaseName' is $($release.status)" -ForegroundColor $Colors.Success 638 | } else { 639 | Write-Host " [ERROR] Release '$ReleaseName' not found" -ForegroundColor $Colors.Error 640 | } 641 | } catch { 642 | Write-Host " [ERROR] Unable to check Helm releases" -ForegroundColor $Colors.Error 643 | } 644 | 645 | # Check pods 646 | Write-Host "" 647 | Write-Host "Pod Status:" -ForegroundColor $Colors.Success 648 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null 649 | if ($pods) { 650 | $runningPods = 0 651 | $totalPods = 0 652 | 653 | $pods | ForEach-Object { 654 | $totalPods++ 655 | $parts = $_ -split '\s+' 656 | $name = $parts[0] 657 | $ready = $parts[1] 658 | $status = $parts[2] 659 | 660 | if ($status -eq "Running" -and $ready -like "*/*") { 661 | $readyParts = $ready -split '/' 662 | if ($readyParts[0] -eq $readyParts[1]) { 663 | $runningPods++ 664 | Write-Host " [OK] $name" -ForegroundColor $Colors.Success 665 | } else { 666 | Write-Host " [WAIT] $name ($ready)" -ForegroundColor $Colors.Warning 667 | } 668 | } else { 669 | Write-Host " [ERROR] $name ($status)" -ForegroundColor $Colors.Error 670 | } 671 | } 672 | 673 | Write-Host "" 674 | Write-Host "Summary: $runningPods/$totalPods pods running" -ForegroundColor $Colors.Info 675 | } else { 676 | Write-Host " No pods found in namespace '$Namespace'" -ForegroundColor $Colors.Warning 677 | } 678 | 679 | # Check port forwarding jobs 680 | Write-Host "" 681 | Write-Host "Port Forwarding Jobs:" -ForegroundColor $Colors.Success 682 | $osdfirJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 683 | 684 | if ($osdfirJobs.Count -eq 0) { 685 | Write-Host " No port forwarding jobs running" -ForegroundColor $Colors.Warning 686 | Write-Host " TIP: Run .\manage-osdfir-lab.ps1 start" -ForegroundColor $Colors.Info 687 | } else { 688 | foreach ($job in $osdfirJobs) { 689 | $serviceName = $job.Name -replace "pf-", "" 690 | $status = switch ($job.State) { 691 | "Running" { "[RUNNING]" } 692 | "Completed" { "[STOPPED]" } 693 | "Failed" { "[FAILED]" } 694 | "Stopped" { "[STOPPED]" } 695 | default { "[UNKNOWN]" } 696 | } 697 | 698 | $color = switch ($job.State) { 699 | "Running" { $Colors.Success } 700 | "Completed" { $Colors.Warning } 701 | "Failed" { $Colors.Error } 702 | "Stopped" { $Colors.Warning } 703 | default { $Colors.Gray } 704 | } 705 | 706 | Write-Host " $status $serviceName" -ForegroundColor $color 707 | } 708 | } 709 | } 710 | 711 | function Start-Services { 712 | Show-Header "Starting OSDFIR Services" 713 | 714 | # Check prerequisites 715 | if (-not (Test-MinikubeRunning)) { 716 | Write-Host "ERROR: Minikube cluster is not running" -ForegroundColor $Colors.Error 717 | Write-Host "TIP: Run .\manage-osdfir-lab.ps1 deploy to start the full environment" -ForegroundColor $Colors.Info 718 | return 719 | } 720 | 721 | if (-not (Test-KubectlAccess)) { 722 | return 723 | } 724 | 725 | Write-Host "Checking service availability..." -ForegroundColor $Colors.Info 726 | 727 | $services = @( 728 | @{Name="Timesketch"; Service="$ReleaseName-timesketch"; Port="5000"}, 729 | @{Name="OpenRelik-UI"; Service="$ReleaseName-openrelik"; Port="8711"}, 730 | @{Name="OpenRelik-API"; Service="$ReleaseName-openrelik-api"; Port="8710"}, 731 | @{Name="Timesketch-MCP-Server"; Service="timesketch-mcp-server"; Port="8081"} 732 | ) 733 | 734 | $availableServices = @() 735 | foreach ($svc in $services) { 736 | $null = kubectl get service $svc.Service -n $Namespace --no-headers 2>$null 737 | if ($LASTEXITCODE -eq 0) { 738 | Write-Host " [OK] $($svc.Name) service is available" -ForegroundColor $Colors.Success 739 | $availableServices += $svc 740 | } else { 741 | Write-Host " [ERROR] $($svc.Name) service not found" -ForegroundColor $Colors.Error 742 | } 743 | } 744 | 745 | if ($availableServices.Count -eq 0) { 746 | Write-Host "ERROR: No OSDFIR services are available. Please check your deployment." -ForegroundColor $Colors.Error 747 | return 748 | } 749 | 750 | Write-Host "" 751 | Write-Host "Starting port forwarding as background jobs..." -ForegroundColor $Colors.Info 752 | 753 | # Stop existing port forwarding jobs 754 | $existingJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 755 | if ($existingJobs) { 756 | Write-Host "Stopping existing jobs..." -ForegroundColor $Colors.Warning 757 | $existingJobs | Stop-Job 758 | $existingJobs | Remove-Job -Force 759 | } 760 | 761 | foreach ($svc in $availableServices) { 762 | $jobName = "pf-$($svc.Name)" 763 | Write-Host " Starting $($svc.Name) on port $($svc.Port)..." -ForegroundColor $Colors.Success 764 | 765 | $scriptBlock = { 766 | param($service, $namespace, $port) 767 | kubectl port-forward -n $namespace "svc/$service" "${port}:${port}" 768 | } 769 | 770 | Start-Job -Name $jobName -ScriptBlock $scriptBlock -ArgumentList $svc.Service, $Namespace, $svc.Port | Out-Null 771 | Start-Sleep -Seconds 1 772 | } 773 | 774 | Write-Host "" 775 | Write-Host "Waiting for port forwarding to initialize..." -ForegroundColor $Colors.Info 776 | Start-Sleep -Seconds 5 777 | 778 | Write-Host "" 779 | Write-Host "OSDFIR Services Available:" -ForegroundColor $Colors.Success 780 | foreach ($svc in $availableServices) { 781 | Write-Host " $($svc.Name): http://localhost:$($svc.Port)" -ForegroundColor $Colors.Header 782 | } 783 | 784 | Write-Host "" 785 | Write-Host "Port forwarding is now active!" -ForegroundColor $Colors.Success 786 | Write-Host "TIP: Use .\manage-osdfir-lab.ps1 creds to get login credentials" -ForegroundColor $Colors.Info 787 | } 788 | 789 | function Get-ServiceCredential { 790 | param($ServiceName, $SecretName, $SecretKey, $Username, $ServiceUrl) 791 | 792 | Write-Host "$ServiceName Credentials:" -ForegroundColor $Colors.Header 793 | Write-Host " Service URL: $ServiceUrl" -ForegroundColor $Colors.Success 794 | Write-Host " Username: $Username" -ForegroundColor $Colors.Success 795 | 796 | try { 797 | $password = kubectl get secret --namespace $Namespace $SecretName -o jsonpath="{.data.$SecretKey}" 2>$null 798 | 799 | if ($password) { 800 | $decodedPassword = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($password)) 801 | Write-Host " Password: $decodedPassword" -ForegroundColor $Colors.Success 802 | } else { 803 | Write-Host " Password: [Secret not found or not accessible]" -ForegroundColor $Colors.Error 804 | } 805 | } catch { 806 | Write-Host " Password: [Error retrieving secret]" -ForegroundColor $Colors.Error 807 | } 808 | 809 | Write-Host "" 810 | } 811 | 812 | function Show-Credentials { 813 | Show-Header "OSDFIR Service Credentials" 814 | 815 | # Check kubectl access 816 | if (-not (Test-KubectlAccess)) { 817 | return 818 | } 819 | 820 | Write-Host "Retrieving credentials for release '$ReleaseName' in namespace '$Namespace'..." -ForegroundColor $Colors.Info 821 | Write-Host "" 822 | 823 | # Get credentials based on service parameter 824 | switch ($Service) { 825 | "timesketch" { 826 | Get-ServiceCredential -ServiceName "Timesketch" -SecretName "$ReleaseName-timesketch-secret" -SecretKey "timesketch-user" -Username "timesketch" -ServiceUrl "http://localhost:5000" 827 | } 828 | 829 | "openrelik" { 830 | Get-ServiceCredential -ServiceName "OpenRelik" -SecretName "$ReleaseName-openrelik-secret" -SecretKey "openrelik-user" -Username "openrelik" -ServiceUrl "http://localhost:8711" 831 | } 832 | 833 | "all" { 834 | # Check which services are actually deployed 835 | $timesketchSecret = kubectl get secret --namespace $Namespace "$ReleaseName-timesketch-secret" 2>$null 836 | if ($timesketchSecret) { 837 | Get-ServiceCredential -ServiceName "Timesketch" -SecretName "$ReleaseName-timesketch-secret" -SecretKey "timesketch-user" -Username "timesketch" -ServiceUrl "http://localhost:5000" 838 | } 839 | 840 | $openrelikSecret = kubectl get secret --namespace $Namespace "$ReleaseName-openrelik-secret" 2>$null 841 | if ($openrelikSecret) { 842 | Get-ServiceCredential -ServiceName "OpenRelik" -SecretName "$ReleaseName-openrelik-secret" -SecretKey "openrelik-user" -Username "openrelik" -ServiceUrl "http://localhost:8711" 843 | } 844 | 845 | if (-not ($timesketchSecret -or $openrelikSecret)) { 846 | Write-Host "ERROR: No credential secrets found for release '$ReleaseName' in namespace '$Namespace'" -ForegroundColor $Colors.Error 847 | } 848 | } 849 | } 850 | 851 | Write-Host "NOTE: Change default credentials in production environments!" -ForegroundColor $Colors.Warning 852 | } 853 | 854 | function Show-Logs { 855 | Show-Header "OSDFIR Service Logs" 856 | if (-not (Test-KubectlAccess)) { 857 | return 858 | } 859 | Write-Host "Recent logs from key services:" -ForegroundColor $Colors.Info 860 | Write-Host "" 861 | 862 | $keyServices = @("openrelik-api", "timesketch", "ollama") 863 | foreach ($serviceName in $keyServices) { 864 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null | Where-Object { $_ -match $serviceName } 865 | if ($pods) { 866 | $podName = ($pods[0] -split '\s+')[0] 867 | Write-Host "Recent logs for $podName" -ForegroundColor $Colors.Info 868 | Write-Host "------------------------" -ForegroundColor $Colors.Gray 869 | kubectl logs $podName -n $Namespace --tail=10 2>$null 870 | } 871 | Write-Host "" 872 | } 873 | } 874 | 875 | function Show-Helm { 876 | Show-Header "Helm Releases and Status" 877 | if (-not (Test-KubectlAccess)) { 878 | return 879 | } 880 | helm list -n $Namespace 881 | Write-Host "" 882 | Write-Host "Release Status:" -ForegroundColor $Colors.Success 883 | helm status $ReleaseName -n $Namespace 884 | } 885 | 886 | function Show-Storage { 887 | Show-Header "PV Storage Utilization" 888 | if (-not (Test-KubectlAccess)) { return } 889 | 890 | # Get PVC information 891 | Write-Host "Persistent Volume Claims:" -ForegroundColor $Colors.Success 892 | kubectl get pvc -n $Namespace 893 | 894 | Write-Host "" 895 | Write-Host "Storage Usage by Pod:" -ForegroundColor $Colors.Success 896 | 897 | # Basic storage check for each pod 898 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null 899 | foreach ($pod in $pods) { 900 | $podName = ($pod -split '\s+')[0] 901 | Write-Host "Pod: $podName" -ForegroundColor $Colors.Info 902 | $df = kubectl exec -n $Namespace $podName -- df -h / 2>$null | Select-Object -Last 1 903 | if ($df) { 904 | Write-Host " Root filesystem: $df" -ForegroundColor $Colors.Success 905 | } 906 | Write-Host "" 907 | } 908 | } 909 | 910 | function Start-FullDeployment { 911 | Show-Header "Full OSDFIR Deployment" 912 | 913 | if (-not (Test-Prerequisites)) { 914 | return 915 | } 916 | 917 | Update-DeploymentContext -Namespace $Namespace -ReleaseName $ReleaseName 918 | if ($script:IsFirstDeployment) { 919 | Write-Host "" 920 | Write-Host "First-time deployment detected. Initial container pulls (especially the Ollama model download) can take longer than usual." -ForegroundColor $Colors.Warning 921 | } 922 | 923 | if ($DryRun) { 924 | Write-Host "DRY RUN: Would execute the following steps:" -ForegroundColor $Colors.Warning 925 | Write-Host "1. Start Docker Desktop (if not running)" -ForegroundColor $Colors.Info 926 | Write-Host "2. Start Minikube cluster with tunnel" -ForegroundColor $Colors.Info 927 | Write-Host "3. Initialize and apply Terraform configuration" -ForegroundColor $Colors.Info 928 | Write-Host "4. Start port forwarding for services" -ForegroundColor $Colors.Info 929 | return 930 | } 931 | 932 | # Step 1: Ensure Docker Desktop is running 933 | Write-Host "Step 1: Ensuring Docker Desktop is running..." -ForegroundColor $Colors.Info 934 | if (-not (Start-DockerDesktop)) { 935 | Write-Host "ERROR: Could not start Docker Desktop" -ForegroundColor $Colors.Error 936 | return 937 | } 938 | 939 | # Step 2: Start Minikube 940 | Write-Host "" 941 | Write-Host "Step 2: Starting Minikube cluster..." -ForegroundColor $Colors.Info 942 | if (-not (Start-OSDFIRMinikube)) { 943 | Write-Host "ERROR: Failed to start Minikube" -ForegroundColor $Colors.Error 944 | return 945 | } 946 | 947 | # Start tunnel after successful cluster start 948 | Start-MinikubeTunnel 949 | 950 | # Step 2.5: Build the MCP Server image 951 | #if (-not (New-MCPServerImage)) { 952 | # Write-Host "ERROR: MCP Server image build failed. Halting deployment." -ForegroundColor $Colors.Error 953 | # return 954 | #} 955 | 956 | # Step 3: Deploy with Terraform 957 | Write-Host "" 958 | Write-Host "Step 3: Deploying OSDFIR with Terraform..." -ForegroundColor $Colors.Info 959 | Push-Location "$PSScriptRoot\..\terraform" 960 | try { 961 | $helmTimeoutSec = Get-HelmTimeoutSeconds 962 | terraform init 963 | if ($LASTEXITCODE -ne 0) { 964 | Write-Host "ERROR: Terraform init failed" -ForegroundColor $Colors.Error 965 | return 966 | } 967 | 968 | # Add after line 922 (terraform init) 969 | # Check for existing resources and import them into state if needed 970 | $existingResources = kubectl get deployment,service,configmap -n $Namespace -o json | ConvertFrom-Json 971 | foreach ($resource in $existingResources.items) { 972 | # Logic to import resources into Terraform state 973 | } 974 | 975 | # Check if Helm release exists and import it if needed 976 | Write-Host "Checking for existing Helm release..." -ForegroundColor $Colors.Info 977 | $existingRelease = helm list -n $Namespace -o json | ConvertFrom-Json | Where-Object { $_.name -eq $ReleaseName } 978 | if ($existingRelease) { 979 | Write-Host "Found existing release '$ReleaseName', importing into Terraform state..." -ForegroundColor $Colors.Warning 980 | terraform import helm_release.osdfir "$Namespace/$ReleaseName" 2>$null 981 | if ($LASTEXITCODE -eq 0) { 982 | Write-Host "Successfully imported existing Helm release" -ForegroundColor $Colors.Success 983 | } else { 984 | Write-Host "Note: Import returned non-zero code, but this is often expected if already in state" -ForegroundColor $Colors.Info 985 | } 986 | } 987 | 988 | terraform apply -auto-approve -var "helm_timeout=$helmTimeoutSec" 989 | if ($LASTEXITCODE -ne 0) { 990 | Write-Host "ERROR: Terraform apply failed" -ForegroundColor $Colors.Error 991 | return 992 | } 993 | } finally { 994 | Pop-Location 995 | } 996 | 997 | # Step 4: Wait for pods to be ready 998 | Write-Host "" 999 | Write-Host "Step 4: Waiting for pods to be ready..." -ForegroundColor $Colors.Info 1000 | $timeout = Get-HelmTimeoutSeconds 1001 | $elapsed = 0 1002 | do { 1003 | Start-Sleep -Seconds 20 1004 | $elapsed += 20 1005 | if ($elapsed -gt 0 -and ($elapsed % 120 -eq 0)) { 1006 | Write-Host "Tip: Run 'kubectl get deploy -n $Namespace' in another terminal to monitor rollout progress." -ForegroundColor $Colors.Info 1007 | } 1008 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null 1009 | $runningPods = ($pods | Where-Object { $_ -match "Running" -and $_ -match "1/1" }).Count 1010 | $runningPods += ($pods | Where-Object { $_ -match "Running" -and $_ -match "2/2" }).Count 1011 | $runningPods += ($pods | Where-Object { $_ -match "Running" -and $_ -match "3/3" }).Count 1012 | $totalPods = ($pods | Measure-Object).Count 1013 | Write-Host " Pods ready: $runningPods/$totalPods ($elapsed seconds elapsed)" -ForegroundColor $Colors.Info 1014 | 1015 | # Check if Ollama is downloading model 1016 | $ollamaPod = kubectl get pods -n $Namespace -l app=ollama --no-headers 2>$null 1017 | if ($ollamaPod -and $ollamaPod -match "Init") { 1018 | $podName = ($ollamaPod -split '\s+')[0] 1019 | try { 1020 | $initLogs = kubectl logs $podName -c model-puller -n $Namespace --tail=3 2>$null 1021 | if ($initLogs -and $initLogs -match "Pulling model|pulling manifest|downloading") { 1022 | $lastLine = ($initLogs -split "`n")[-1].Trim() 1023 | if ($lastLine) { 1024 | # Clean up Unicode box-drawing characters and other display artifacts 1025 | $cleanedLine = $lastLine -replace '[^\x20-\x7E]', '' -replace '\s+', ' ' 1026 | # Extract meaningful information from progress lines 1027 | if ($cleanedLine -match "pulling (\w+):\s+(\d+%)\s+(.+)") { 1028 | Write-Host " Ollama: Downloading model layer - $($matches[2]) complete" -ForegroundColor $Colors.Warning 1029 | } elseif ($cleanedLine -match "pulling manifest") { 1030 | Write-Host " Ollama: Downloading model manifest..." -ForegroundColor $Colors.Warning 1031 | } elseif ($cleanedLine -match "downloading") { 1032 | Write-Host " Ollama: Downloading AI model..." -ForegroundColor $Colors.Warning 1033 | } else { 1034 | Write-Host " Ollama: Downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1035 | } 1036 | } else { 1037 | Write-Host " Ollama is downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1038 | } 1039 | } elseif ($initLogs -and $initLogs -match "already exists, skipping download") { 1040 | Write-Host " Ollama: Model already cached, initializing..." -ForegroundColor $Colors.Success 1041 | } else { 1042 | Write-Host " Ollama is initializing AI model..." -ForegroundColor $Colors.Warning 1043 | } 1044 | } catch { 1045 | Write-Host " Ollama is downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1046 | } 1047 | } 1048 | } while ($runningPods -lt $totalPods -and $elapsed -lt $timeout) 1049 | 1050 | if ($runningPods -lt $totalPods) { 1051 | Write-Host "WARNING: Not all pods are ready after $timeout seconds" -ForegroundColor $Colors.Warning 1052 | Write-Host "You can check status with: .\manage-osdfir-lab.ps1 status" -ForegroundColor $Colors.Info 1053 | } 1054 | 1055 | # Step 5: Start services 1056 | Write-Host "" 1057 | Write-Host "Step 5: Starting port forwarding..." -ForegroundColor $Colors.Info 1058 | Start-Services 1059 | 1060 | Write-Host "" 1061 | Write-Host "Deployment completed!" -ForegroundColor $Colors.Success 1062 | Write-Host "Use .\manage-osdfir-lab.ps1 creds to get login credentials" -ForegroundColor $Colors.Info 1063 | Write-Host "Use .\manage-osdfir-lab.ps1 ollama to check AI model status" -ForegroundColor $Colors.Info 1064 | } 1065 | 1066 | function Start-SmartCleanup { 1067 | Show-Header "Smart OSDFIR Cleanup (Preserves AI Models & Data)" 1068 | 1069 | if ($DryRun) { 1070 | Write-Host "DRY RUN: Would execute the following steps:" -ForegroundColor $Colors.Warning 1071 | Write-Host "1. Stop all port forwarding jobs" -ForegroundColor $Colors.Info 1072 | Write-Host "2. Destroy Terraform resources" -ForegroundColor $Colors.Info 1073 | Write-Host "3. Preserve Minikube cluster and persistent data" -ForegroundColor $Colors.Header 1074 | return 1075 | } 1076 | 1077 | if (-not $Force) { 1078 | Write-Host "This will clean up OSDFIR services but preserve:" -ForegroundColor $Colors.Info 1079 | Write-Host " - AI models (no re-download needed)" -ForegroundColor $Colors.Success 1080 | Write-Host " - Database data" -ForegroundColor $Colors.Success 1081 | Write-Host " - Minikube cluster" -ForegroundColor $Colors.Success 1082 | Write-Host "" 1083 | $confirmation = Read-Host "Continue with smart cleanup? (yes/no)" 1084 | if ($confirmation -ne "yes") { 1085 | Write-Host "Cleanup cancelled." -ForegroundColor $Colors.Warning 1086 | return 1087 | } 1088 | } 1089 | 1090 | # Step 1: Stop services 1091 | Write-Host "Step 1: Stopping port forwarding jobs..." -ForegroundColor $Colors.Info 1092 | $pfJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 1093 | if ($pfJobs) { 1094 | $pfJobs | Stop-Job 1095 | $pfJobs | Remove-Job -Force 1096 | Write-Host "Port forwarding jobs stopped." -ForegroundColor $Colors.Success 1097 | } 1098 | 1099 | # Step 2: Destroy Terraform 1100 | Write-Host "" 1101 | Write-Host "Step 2: Destroying Terraform resources..." -ForegroundColor $Colors.Info 1102 | Push-Location "$PSScriptRoot\..\terraform" 1103 | try { 1104 | terraform destroy -auto-approve 1105 | if ($LASTEXITCODE -ne 0) { 1106 | Write-Host "WARNING: Terraform destroy had issues" -ForegroundColor $Colors.Warning 1107 | } 1108 | } finally { 1109 | Pop-Location 1110 | } 1111 | 1112 | Write-Host "" 1113 | Write-Host "Smart cleanup completed!" -ForegroundColor $Colors.Success 1114 | Write-Host "[OK] Services removed" -ForegroundColor $Colors.Success 1115 | Write-Host "[OK] AI models preserved (next deploy will be faster)" -ForegroundColor $Colors.Header 1116 | Write-Host "[OK] Database data preserved" -ForegroundColor $Colors.Header 1117 | Write-Host "[OK] Minikube cluster ready for redeployment" -ForegroundColor $Colors.Header 1118 | } 1119 | 1120 | function Start-FullCleanup { 1121 | Show-Header "COMPLETE OSDFIR Destruction (Nuclear Option)" 1122 | 1123 | if ($DryRun) { 1124 | Write-Host "DRY RUN: Would execute the following steps:" -ForegroundColor $Colors.Error 1125 | Write-Host "1. Stop all port forwarding jobs" -ForegroundColor $Colors.Info 1126 | Write-Host "2. Destroy Terraform resources" -ForegroundColor $Colors.Info 1127 | Write-Host "3. Delete entire Minikube cluster (including AI models & data)" -ForegroundColor $Colors.Error 1128 | return 1129 | } 1130 | 1131 | Write-Host "" 1132 | Write-Host "WARNING: COMPLETE DESTRUCTION MODE" -ForegroundColor $Colors.Error -BackgroundColor Black 1133 | Write-Host "" 1134 | Write-Host "This will permanently destroy:" -ForegroundColor $Colors.Error 1135 | Write-Host " - All OSDFIR services" -ForegroundColor $Colors.Warning 1136 | Write-Host " - All database data" -ForegroundColor $Colors.Warning 1137 | Write-Host " - All AI models (1.6GB+ will need re-download)" -ForegroundColor $Colors.Warning 1138 | Write-Host " - Entire Minikube cluster" -ForegroundColor $Colors.Warning 1139 | Write-Host " - All persistent volumes and data" -ForegroundColor $Colors.Warning 1140 | Write-Host "" 1141 | Write-Host "TIP: Consider 'teardown-lab' instead to preserve AI models and data" -ForegroundColor $Colors.Info 1142 | Write-Host "" 1143 | 1144 | if (-not $Force) { 1145 | $confirmation = Read-Host "Type 'DESTROY' in all caps to confirm complete destruction" 1146 | if ($confirmation -ne "DESTROY") { 1147 | Write-Host "Complete destruction cancelled." -ForegroundColor $Colors.Success 1148 | Write-Host "TIP: Use 'teardown-lab' for smart cleanup that preserves data" -ForegroundColor $Colors.Info 1149 | return 1150 | } 1151 | 1152 | # Double confirmation for nuclear option 1153 | $finalConfirmation = Read-Host "Final confirmation - this will delete EVERYTHING. Continue? (yes/no)" 1154 | if ($finalConfirmation -ne "yes") { 1155 | Write-Host "Complete destruction cancelled." -ForegroundColor $Colors.Success 1156 | return 1157 | } 1158 | } 1159 | 1160 | # Step 1: Stop services 1161 | Write-Host "Step 1: Stopping port forwarding jobs..." -ForegroundColor $Colors.Info 1162 | $pfJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 1163 | if ($pfJobs) { 1164 | $pfJobs | Stop-Job 1165 | $pfJobs | Remove-Job -Force 1166 | Write-Host "Port forwarding jobs stopped." -ForegroundColor $Colors.Success 1167 | } 1168 | 1169 | # Step 2: Destroy Terraform 1170 | Write-Host "" 1171 | Write-Host "Step 2: Destroying Terraform resources..." -ForegroundColor $Colors.Info 1172 | Push-Location "$PSScriptRoot\..\terraform" 1173 | try { 1174 | terraform destroy -auto-approve 1175 | if ($LASTEXITCODE -ne 0) { 1176 | Write-Host "WARNING: Terraform destroy had issues" -ForegroundColor $Colors.Warning 1177 | } 1178 | } finally { 1179 | Pop-Location 1180 | } 1181 | 1182 | # Step 3: Delete Minikube cluster completely 1183 | Write-Host "" 1184 | Write-Host "Step 3: Deleting entire Minikube cluster..." -ForegroundColor $Colors.Error 1185 | # Skip confirmation since user already confirmed complete destruction 1186 | $script:Force = $true 1187 | Remove-MinikubeCluster 1188 | $script:Force = $Force # Restore original Force setting 1189 | 1190 | Write-Host "" 1191 | Write-Host "Complete destruction finished!" -ForegroundColor $Colors.Error 1192 | Write-Host "Everything has been permanently removed." -ForegroundColor $Colors.Warning 1193 | Write-Host "Next deployment will start completely fresh (including AI model download)." -ForegroundColor $Colors.Info 1194 | } 1195 | 1196 | function Restart-Deployment { 1197 | Show-Header "Reinstalling OSDFIR Deployment" 1198 | 1199 | if (-not (Test-Prerequisites)) { 1200 | return 1201 | } 1202 | 1203 | if (-not (Test-MinikubeRunning)) { 1204 | Write-Host "ERROR: Minikube cluster is not running" -ForegroundColor $Colors.Error 1205 | Write-Host "TIP: Run .\manage-osdfir-lab.ps1 deploy to start the full environment" -ForegroundColor $Colors.Info 1206 | return 1207 | } 1208 | 1209 | Update-DeploymentContext -Namespace $Namespace -ReleaseName $ReleaseName 1210 | 1211 | if ($DryRun) { 1212 | Write-Host "DRY RUN: Would execute the following steps:" -ForegroundColor $Colors.Warning 1213 | Write-Host "1. Stop all port forwarding jobs" -ForegroundColor $Colors.Info 1214 | Write-Host "2. Preserve database passwords" -ForegroundColor $Colors.Info 1215 | Write-Host "3. Uninstall existing Helm release" -ForegroundColor $Colors.Info 1216 | Write-Host "4. Wait for cleanup to complete" -ForegroundColor $Colors.Info 1217 | Write-Host "5. Apply Terraform configuration with preserved passwords" -ForegroundColor $Colors.Info 1218 | Write-Host "6. Wait for pods to be ready" -ForegroundColor $Colors.Info 1219 | Write-Host "7. Start port forwarding" -ForegroundColor $Colors.Info 1220 | return 1221 | } 1222 | 1223 | if (-not $Force) { 1224 | $confirmation = Read-Host "This will uninstall and reinstall the '$ReleaseName' Helm release in namespace '$Namespace'. Continue? (yes/no)" 1225 | if ($confirmation -ne "yes") { 1226 | Write-Host "Reinstall cancelled." -ForegroundColor $Colors.Warning 1227 | return 1228 | } 1229 | } 1230 | 1231 | # Step 1: Stop port forwarding jobs 1232 | Write-Host "Step 1: Stopping port forwarding jobs..." -ForegroundColor $Colors.Info 1233 | $pfJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 1234 | if ($pfJobs) { 1235 | $pfJobs | Stop-Job 1236 | $pfJobs | Remove-Job -Force 1237 | Write-Host "Port forwarding jobs stopped." -ForegroundColor $Colors.Success 1238 | } 1239 | 1240 | # Step 2: Preserve database passwords 1241 | Write-Host "" 1242 | Write-Host "Step 2: Preserving database passwords..." -ForegroundColor $Colors.Info 1243 | $preservedPasswords = @{} 1244 | 1245 | # Try to get existing passwords from secrets 1246 | try { 1247 | $timesketchPwd = kubectl get secret osdfir-lab-timesketch-secret -n $Namespace -o jsonpath="{.data.postgres-user}" 2>$null 1248 | if ($timesketchPwd -and $timesketchPwd.Length -gt 0) { 1249 | $preservedPasswords['timesketch'] = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($timesketchPwd)) 1250 | Write-Host " [OK] Preserved Timesketch database password" -ForegroundColor $Colors.Success 1251 | } else { 1252 | Write-Host " [SKIP] Could not preserve Timesketch password (will generate new)" -ForegroundColor $Colors.Warning 1253 | } 1254 | } catch { 1255 | Write-Host " [SKIP] Could not preserve Timesketch password (will generate new)" -ForegroundColor $Colors.Warning 1256 | } 1257 | 1258 | try { 1259 | $openrelikPwd = kubectl get secret osdfir-lab-openrelik-secret -n $Namespace -o jsonpath="{.data.postgres-user}" 2>$null 1260 | if ($openrelikPwd -and $openrelikPwd.Length -gt 0) { 1261 | $preservedPasswords['openrelik'] = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($openrelikPwd)) 1262 | Write-Host " [OK] Preserved OpenRelik database password" -ForegroundColor $Colors.Success 1263 | } else { 1264 | Write-Host " [SKIP] Could not preserve OpenRelik password (will generate new)" -ForegroundColor $Colors.Warning 1265 | } 1266 | } catch { 1267 | Write-Host " [SKIP] Could not preserve OpenRelik password (will generate new)" -ForegroundColor $Colors.Warning 1268 | } 1269 | 1270 | # Step 3: Uninstall existing Helm release 1271 | Write-Host "" 1272 | Write-Host "Step 3: Uninstalling existing Helm release..." -ForegroundColor $Colors.Info 1273 | 1274 | # Check if release exists 1275 | $releaseExists = $false 1276 | try { 1277 | $release = helm list -n $Namespace -o json | ConvertFrom-Json | Where-Object { $_.name -eq $ReleaseName } 1278 | if ($release) { 1279 | $releaseExists = $true 1280 | Write-Host "Found existing release '$ReleaseName' with status: $($release.status)" -ForegroundColor $Colors.Info 1281 | } 1282 | } catch { 1283 | Write-Host "Unable to check existing releases, proceeding with reinstall..." -ForegroundColor $Colors.Warning 1284 | } 1285 | 1286 | if ($releaseExists) { 1287 | Write-Host "Uninstalling release '$ReleaseName'..." -ForegroundColor $Colors.Warning 1288 | helm uninstall $ReleaseName -n $Namespace 1289 | if ($LASTEXITCODE -eq 0) { 1290 | Write-Host "Release uninstalled successfully." -ForegroundColor $Colors.Success 1291 | } else { 1292 | Write-Host "WARNING: Uninstall may have had issues, proceeding anyway..." -ForegroundColor $Colors.Warning 1293 | } 1294 | 1295 | # Step 4: Wait for cleanup 1296 | Write-Host "" 1297 | Write-Host "Step 4: Waiting for resources to be cleaned up..." -ForegroundColor $Colors.Info 1298 | $cleanupTimeout = 120 1299 | $elapsed = 0 1300 | do { 1301 | Start-Sleep -Seconds 5 1302 | $elapsed += 5 1303 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null 1304 | $remainingPods = ($pods | Where-Object { $_ -match $ReleaseName }).Count 1305 | Write-Host " Remaining pods: $remainingPods ($elapsed s elapsed)" -ForegroundColor $Colors.Info 1306 | } while ($remainingPods -gt 0 -and $elapsed -lt $cleanupTimeout) 1307 | 1308 | if ($remainingPods -gt 0) { 1309 | Write-Host "WARNING: Some pods may still be terminating, proceeding anyway..." -ForegroundColor $Colors.Warning 1310 | } else { 1311 | Write-Host "Cleanup completed." -ForegroundColor $Colors.Success 1312 | } 1313 | } else { 1314 | Write-Host "No existing release found, proceeding with fresh install..." -ForegroundColor $Colors.Info 1315 | } 1316 | 1317 | # Step 5: Create temporary values file with preserved passwords 1318 | Write-Host "" 1319 | Write-Host "Step 5: Creating temporary values file with preserved passwords..." -ForegroundColor $Colors.Info 1320 | $tempValuesFile = "$PSScriptRoot\..\terraform\temp-preserved-passwords.yaml" 1321 | $tempValuesContent = @" 1322 | # Temporary values file for preserved passwords during reinstall 1323 | "@ 1324 | 1325 | if ($preservedPasswords.ContainsKey('timesketch')) { 1326 | $tempValuesContent += @" 1327 | 1328 | timesketch: 1329 | postgres: 1330 | password: "$($preservedPasswords['timesketch'])" 1331 | "@ 1332 | Write-Host " Added preserved Timesketch password to values" -ForegroundColor $Colors.Success 1333 | } 1334 | 1335 | if ($preservedPasswords.ContainsKey('openrelik')) { 1336 | $tempValuesContent += @" 1337 | 1338 | openrelik: 1339 | postgres: 1340 | password: "$($preservedPasswords['openrelik'])" 1341 | "@ 1342 | Write-Host " Added preserved OpenRelik password to values" -ForegroundColor $Colors.Success 1343 | } 1344 | 1345 | # Write temporary values file 1346 | $tempValuesContent | Out-File -FilePath $tempValuesFile -Encoding UTF8 1347 | 1348 | # Step 6: Reinstall with Terraform using preserved passwords 1349 | Write-Host "" 1350 | Write-Host "Step 6: Reinstalling OSDFIR with Terraform and preserved passwords..." -ForegroundColor $Colors.Info 1351 | Push-Location "$PSScriptRoot\..\terraform" 1352 | try { 1353 | # Modify the Terraform main.tf to include the temp values file 1354 | $originalMainContent = Get-Content "main.tf" -Raw 1355 | $modifiedMainContent = $originalMainContent -replace 'values\s*=\s*\[\s*([^\]]+)\s*\]', 'values = [$1, file("temp-preserved-passwords.yaml")]' 1356 | $modifiedMainContent | Out-File -FilePath "main.tf" -Encoding UTF8 1357 | 1358 | # Run terraform apply to reinstall 1359 | $helmTimeoutSec = Get-HelmTimeoutSeconds 1360 | terraform apply -auto-approve -var "helm_timeout=$helmTimeoutSec" 1361 | $terraformResult = $LASTEXITCODE 1362 | 1363 | # Restore original main.tf 1364 | $originalMainContent | Out-File -FilePath "main.tf" -Encoding UTF8 1365 | 1366 | if ($terraformResult -ne 0) { 1367 | Write-Host "ERROR: Terraform apply failed during reinstall" -ForegroundColor $Colors.Error 1368 | return 1369 | } 1370 | } finally { 1371 | Pop-Location 1372 | # Clean up temporary values file 1373 | Remove-Item $tempValuesFile -ErrorAction SilentlyContinue 1374 | } 1375 | 1376 | # Step 7: Wait for pods to be ready 1377 | Write-Host "" 1378 | Write-Host "Step 7: Waiting for pods to be ready..." -ForegroundColor $Colors.Info 1379 | $timeout = Get-HelmTimeoutSeconds 1380 | $elapsed = 0 1381 | do { 1382 | Start-Sleep -Seconds 20 1383 | $elapsed += 20 1384 | if ($elapsed -gt 0 -and ($elapsed % 120 -eq 0)) { 1385 | Write-Host "Tip: Run 'kubectl get deploy -n $Namespace' in another terminal to monitor rollout progress." -ForegroundColor $Colors.Info 1386 | } 1387 | $pods = kubectl get pods -n $Namespace --no-headers 2>$null 1388 | $runningPods = ($pods | Where-Object { $_ -match "Running" -and $_ -match "1/1" }).Count 1389 | $runningPods += ($pods | Where-Object { $_ -match "Running" -and $_ -match "2/2" }).Count 1390 | $runningPods += ($pods | Where-Object { $_ -match "Running" -and $_ -match "3/3" }).Count 1391 | $totalPods = ($pods | Measure-Object).Count 1392 | Write-Host " Pods ready: $runningPods/$totalPods ($elapsed seconds elapsed)" -ForegroundColor $Colors.Info 1393 | 1394 | # Check if Ollama is downloading model 1395 | $ollamaPod = kubectl get pods -n $Namespace -l app=ollama --no-headers 2>$null 1396 | if ($ollamaPod -and $ollamaPod -match "Init") { 1397 | $podName = ($ollamaPod -split '\s+')[0] 1398 | try { 1399 | $initLogs = kubectl logs $podName -c model-puller -n $Namespace --tail=3 2>$null 1400 | if ($initLogs -and $initLogs -match "Pulling model|pulling manifest|downloading") { 1401 | $lastLine = ($initLogs -split "`n")[-1].Trim() 1402 | if ($lastLine) { 1403 | # Clean up Unicode box-drawing characters and other display artifacts 1404 | $cleanedLine = $lastLine -replace '[^\x20-\x7E]', '' -replace '\s+', ' ' 1405 | # Extract meaningful information from progress lines 1406 | if ($cleanedLine -match "pulling (\w+):\s+(\d+%)\s+(.+)") { 1407 | Write-Host " Ollama: Downloading model layer - $($matches[2]) complete" -ForegroundColor $Colors.Warning 1408 | } elseif ($cleanedLine -match "pulling manifest") { 1409 | Write-Host " Ollama: Downloading model manifest..." -ForegroundColor $Colors.Warning 1410 | } elseif ($cleanedLine -match "downloading") { 1411 | Write-Host " Ollama: Downloading AI model..." -ForegroundColor $Colors.Warning 1412 | } else { 1413 | Write-Host " Ollama: Downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1414 | } 1415 | } else { 1416 | Write-Host " Ollama is downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1417 | } 1418 | } elseif ($initLogs -and $initLogs -match "already exists, skipping download") { 1419 | Write-Host " Ollama: Model already cached, initializing..." -ForegroundColor $Colors.Success 1420 | } else { 1421 | Write-Host " Ollama is initializing AI model..." -ForegroundColor $Colors.Warning 1422 | } 1423 | } catch { 1424 | Write-Host " Ollama is downloading AI model... This may take several minutes." -ForegroundColor $Colors.Warning 1425 | } 1426 | } 1427 | } while ($runningPods -lt $totalPods -and $elapsed -lt $timeout) 1428 | 1429 | if ($runningPods -lt $totalPods) { 1430 | Write-Host "WARNING: Not all pods are ready after $timeout seconds" -ForegroundColor $Colors.Warning 1431 | Write-Host "You can check status with: .\manage-osdfir-lab.ps1 status" -ForegroundColor $Colors.Info 1432 | } else { 1433 | Write-Host "All pods are ready!" -ForegroundColor $Colors.Success 1434 | } 1435 | 1436 | # Step 8: Start services 1437 | Write-Host "" 1438 | Write-Host "Step 8: Starting port forwarding..." -ForegroundColor $Colors.Info 1439 | Start-Services 1440 | 1441 | Write-Host "" 1442 | Write-Host "Reinstall completed!" -ForegroundColor $Colors.Success 1443 | if ($preservedPasswords.Count -gt 0) { 1444 | Write-Host "Database passwords were preserved - services should continue working with existing data." -ForegroundColor $Colors.Success 1445 | } else { 1446 | Write-Host "New database passwords were generated - existing data may be inaccessible." -ForegroundColor $Colors.Warning 1447 | } 1448 | Write-Host "Use .\manage-osdfir-lab.ps1 creds to get login credentials" -ForegroundColor $Colors.Info 1449 | Write-Host "Use .\manage-osdfir-lab.ps1 ollama to check AI model status" -ForegroundColor $Colors.Info 1450 | } 1451 | 1452 | #function New-MCPServerImage { 1453 | # Show-Header "Building Timesketch MCP Server Image in Minikube" 1454 | 1455 | # $buildScriptPath = "$PSScriptRoot\build-timesketch-mcp.ps1" 1456 | 1457 | # if (-not (Test-Path $buildScriptPath)) { 1458 | # Write-Host "ERROR: Build script not found at: $buildScriptPath" -ForegroundColor $Colors.Error 1459 | # return $false 1460 | # } 1461 | 1462 | # Write-Host "Executing build script with -Minikube and -Force flags..." -ForegroundColor $Colors.Info 1463 | # Write-Host "This will build the image directly into Minikube's Docker daemon." -ForegroundColor $Colors.Warning 1464 | 1465 | # try { 1466 | # # Execute the script and pass parameters, including the new switch. 1467 | # & $buildScriptPath -Minikube -Force -CalledByManager 1468 | 1469 | # if ($LASTEXITCODE -eq 0) { 1470 | # Write-Host "[OK] MCP Server image built successfully into Minikube" -ForegroundColor $Colors.Success 1471 | # return $true 1472 | # } else { 1473 | # Write-Host "[ERROR] Failed to build MCP Server image. Check the output above for errors." -ForegroundColor $Colors.Error 1474 | # return $false 1475 | # } 1476 | # } catch { 1477 | # Write-Host "[ERROR] An error occurred while running the build script: $($_.Exception.Message)" -ForegroundColor $Colors.Error 1478 | # return $false 1479 | # } 1480 | #} 1481 | 1482 | # Handle -h flag for help 1483 | if ($h) { 1484 | $Action = "help" 1485 | } 1486 | 1487 | # Main script logic 1488 | switch ($Action.ToLower()) { 1489 | "help" { Show-Help } 1490 | "status" { Show-Status } 1491 | "start" { Start-Services } 1492 | "stop" { 1493 | $pfJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 1494 | if ($pfJobs.Count -eq 0) { 1495 | Write-Host "No port forwarding jobs found to stop." -ForegroundColor $Colors.Warning 1496 | } else { 1497 | Write-Host "Stopping and removing port forwarding jobs..." -ForegroundColor $Colors.Info 1498 | $pfJobs | Stop-Job 1499 | $pfJobs | Remove-Job -Force 1500 | Write-Host "All port forwarding jobs stopped and removed." -ForegroundColor $Colors.Success 1501 | } 1502 | } 1503 | "restart" { 1504 | Write-Host "Restarting OSDFIR services..." -ForegroundColor $Colors.Info 1505 | $pfJobs = Get-Job | Where-Object { $_.Name -like "pf-*" } 1506 | if ($pfJobs) { 1507 | $pfJobs | Stop-Job 1508 | $pfJobs | Remove-Job -Force 1509 | } 1510 | Start-Sleep -Seconds 2 1511 | Start-Services 1512 | } 1513 | "logs" { Show-Logs } 1514 | "creds" { Show-Credentials } 1515 | "jobs" { 1516 | Show-Header "Background Jobs" 1517 | $allJobs = Get-Job | Where-Object { $_.Name -like "pf-*" -or $_.Name -eq "minikube-tunnel" } 1518 | if ($allJobs.Count -eq 0) { 1519 | Write-Host "No OSDFIR-related jobs found." -ForegroundColor $Colors.Warning 1520 | } else { 1521 | foreach ($job in $allJobs) { 1522 | $status = switch ($job.State) { 1523 | "Running" { "[RUNNING]" } 1524 | "Completed" { "[STOPPED]" } 1525 | "Failed" { "[FAILED]" } 1526 | "Stopped" { "[STOPPED]" } 1527 | default { "[UNKNOWN]" } 1528 | } 1529 | $color = switch ($job.State) { 1530 | "Running" { $Colors.Success } 1531 | "Failed" { $Colors.Error } 1532 | "Stopped" { $Colors.Warning } 1533 | default { $Colors.Gray } 1534 | } 1535 | Write-Host " $status $($job.Name)" -ForegroundColor $color 1536 | } 1537 | } 1538 | } 1539 | "cleanup" { 1540 | Write-Host "OSDFIR Cleanup - Use with caution!" -ForegroundColor $Colors.Error 1541 | if (-not $Force) { 1542 | $confirmation = Read-Host "Are you sure you want to cleanup OSDFIR resources? (yes/no)" 1543 | if ($confirmation -ne "yes") { 1544 | Write-Host "Cleanup cancelled." -ForegroundColor $Colors.Warning 1545 | return 1546 | } 1547 | } 1548 | Write-Host "Cleaning up OSDFIR jobs..." -ForegroundColor $Colors.Warning 1549 | $allJobs = Get-Job | Where-Object { $_.Name -like "pf-*" -or $_.Name -eq "minikube-tunnel" } 1550 | if ($allJobs) { 1551 | $allJobs | Stop-Job 1552 | $allJobs | Remove-Job -Force 1553 | Write-Host "OSDFIR jobs cleaned up." -ForegroundColor $Colors.Success 1554 | } else { 1555 | Write-Host "No OSDFIR jobs found to clean up." -ForegroundColor $Colors.Info 1556 | } 1557 | } 1558 | "helm" { Show-Helm } 1559 | "uninstall" { 1560 | if (-not $Force) { 1561 | $confirmation = Read-Host "Are you sure you want to uninstall the Helm release '$ReleaseName'? (yes/no)" 1562 | if ($confirmation -ne "yes") { 1563 | Write-Host "Uninstall cancelled." -ForegroundColor $Colors.Warning 1564 | return 1565 | } 1566 | } 1567 | Show-Header "Uninstalling OSDFIR Helm Release" 1568 | helm uninstall $ReleaseName -n $Namespace 1569 | } 1570 | "reinstall" { Restart-Deployment } 1571 | "storage" { Show-Storage } 1572 | "minikube" { Show-MinikubeStatus } 1573 | "docker" { 1574 | Show-Header "Docker Desktop Management" 1575 | if (Test-Docker) { 1576 | # Show some Docker info 1577 | Write-Host "" 1578 | Write-Host "Docker Info:" -ForegroundColor $Colors.Info 1579 | docker version --format "Client: {{.Client.Version}}" 1580 | docker version --format "Server: {{.Server.Version}}" 1581 | } else { 1582 | Start-DockerDesktop 1583 | } 1584 | } 1585 | "deploy" { Start-FullDeployment } 1586 | "teardown-lab" { Start-SmartCleanup } 1587 | "teardown-lab-all" { Start-FullCleanup } 1588 | "ollama" { Show-OllamaStatus } 1589 | "ollama-test" { 1590 | Show-Header "Ollama AI Prompt Testing" 1591 | 1592 | if (-not (Test-KubectlAccess)) { 1593 | return 1594 | } 1595 | 1596 | $ollamaPod = kubectl get pods -n $Namespace -l app=ollama --no-headers 2>$null 1597 | if (-not $ollamaPod) { 1598 | Write-Host "ERROR: Ollama pod not found" -ForegroundColor $Colors.Error 1599 | return 1600 | } 1601 | 1602 | $name = ($ollamaPod -split '\s+')[0] 1603 | 1604 | # Get available models 1605 | $modelOutput = kubectl exec -n $Namespace $name -- ollama list 2>$null 1606 | $availableModels = @() 1607 | if ($modelOutput) { 1608 | $lines = $modelOutput -split "`n" 1609 | $modelLines = $lines | Where-Object { $_ -match "^\w+.*\d+\s+(GB|MB|KB)" } 1610 | foreach ($line in $modelLines) { 1611 | $parts = $line -split '\s+' 1612 | $availableModels += $parts[0] 1613 | } 1614 | } 1615 | 1616 | if ($availableModels.Count -eq 0) { 1617 | Write-Host "ERROR: No models available for testing" -ForegroundColor $Colors.Error 1618 | return 1619 | } 1620 | 1621 | $testModel = $availableModels[0] 1622 | Write-Host "Testing model: $testModel" -ForegroundColor $Colors.Info 1623 | Write-Host "This may take a few moments for each prompt..." -ForegroundColor $Colors.Warning 1624 | 1625 | # Test prompts with numbering and humorous forensics questions 1626 | $testPrompts = @( 1627 | @{Number=1; Prompt="Tell me a pun about digital forensics. Be creative and funny."}, 1628 | @{Number=2; Prompt="Write a haiku about finding deleted files. Make it dramatic and slightly ridiculous."} 1629 | ) 1630 | 1631 | $totalPrompts = $testPrompts.Count 1632 | foreach ($promptObj in $testPrompts) { 1633 | Write-Host "" 1634 | Write-Host "Test $($promptObj.Number) of ${totalPrompts}: $($promptObj.Prompt)" -ForegroundColor $Colors.Header 1635 | Write-Host "Response:" -ForegroundColor $Colors.Success 1636 | 1637 | try { 1638 | $promptResult = kubectl exec -n $Namespace $name -- ollama run $testModel "$($promptObj.Prompt)" 2>&1 | Out-String 1639 | 1640 | if ($promptResult -and $promptResult.Trim().Length -gt 10) { 1641 | # Extract the actual answer (last meaningful line) 1642 | $lines = $promptResult -split "`n" | Where-Object { $_.Trim() -ne "" } 1643 | $actualAnswer = $lines | Where-Object { $_ -notmatch "Thinking|\.\.\.done thinking" } | Select-Object -Last 1 1644 | 1645 | if ($actualAnswer -and $actualAnswer.Trim().Length -gt 5) { 1646 | Write-Host " [OK] AI model is responding to prompts" -ForegroundColor $Colors.Success 1647 | Write-Host " Sample response: $($actualAnswer.Trim())" -ForegroundColor $Colors.Gray 1648 | } else { 1649 | Write-Host " [OK] AI model responding but verbose output detected" -ForegroundColor $Colors.Warning 1650 | Write-Host " Raw response length: $($promptResult.Length) characters" -ForegroundColor $Colors.Gray 1651 | } 1652 | } else { 1653 | Write-Host " [ERROR] AI model not responding properly" -ForegroundColor $Colors.Error 1654 | Write-Host " Debug: Response length = $($promptResult.Length)" -ForegroundColor $Colors.Gray 1655 | } 1656 | } catch { 1657 | Write-Host "Error: $($_.Exception.Message)" -ForegroundColor $Colors.Error 1658 | } 1659 | 1660 | Write-Host "" 1661 | } 1662 | 1663 | Write-Host "AI Prompt Testing Complete!" -ForegroundColor $Colors.Success 1664 | Write-Host "TIP: Use these prompts as examples for integrating AI into your forensic workflows." -ForegroundColor $Colors.Info 1665 | } 1666 | default { Show-Help } 1667 | } 1668 | --------------------------------------------------------------------------------