├── .github
└── workflows
│ └── deploy-website.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── app
├── index.html
├── package-lock.json
├── package.json
├── postcss.config.js
├── public
│ ├── agent-activity_tracking_agent.yaml
│ ├── agent-command_tracking_agent.yaml
│ ├── agent-documentation_agent.yaml
│ ├── agent-focus_tracker.yaml
│ ├── agent-memory_summarization.yaml
│ └── eye-logo-black.svg
├── src-tauri
│ ├── .gitignore
│ ├── Cargo.lock
│ ├── Cargo.toml
│ ├── build.rs
│ ├── capabilities
│ │ └── default.json
│ ├── icons
│ │ ├── 128x128.png
│ │ ├── 128x128@2x.png
│ │ ├── 32x32.png
│ │ ├── Square107x107Logo.png
│ │ ├── Square142x142Logo.png
│ │ ├── Square150x150Logo.png
│ │ ├── Square284x284Logo.png
│ │ ├── Square30x30Logo.png
│ │ ├── Square310x310Logo.png
│ │ ├── Square44x44Logo.png
│ │ ├── Square71x71Logo.png
│ │ ├── Square89x89Logo.png
│ │ ├── StoreLogo.png
│ │ ├── icon.icns
│ │ ├── icon.ico
│ │ └── icon.png
│ ├── src
│ │ ├── lib.rs
│ │ └── main.rs
│ └── tauri.conf.json
├── src
│ ├── components
│ │ ├── AgentCard.tsx
│ │ ├── AgentImportHandler.tsx
│ │ ├── AgentLogViewer.tsx
│ │ ├── AppHeader.tsx
│ │ ├── AvailableModels.tsx
│ │ ├── CommunityTab.tsx
│ │ ├── EditAgent
│ │ │ ├── EditAgentModal.tsx
│ │ │ ├── Modal.tsx
│ │ │ └── useEditAgentModalLogic.ts
│ │ ├── ErrorDisplay.tsx
│ │ ├── GenerateAgent.tsx
│ │ ├── GenerateAgentModal.tsx
│ │ ├── GetStarted.tsx
│ │ ├── GlobalLogsViewer.tsx
│ │ ├── JupyterServerModal.tsx
│ │ ├── LocalServerSetupDialog.tsx
│ │ ├── MemoryManager.tsx
│ │ ├── ScheduleAgentModal.tsx
│ │ ├── SidebarMenu.tsx
│ │ ├── StartupDialogs.tsx
│ │ ├── TextBubble.tsx
│ │ └── debug
│ │ │ └── Auth0Debug.tsx
│ ├── desktop
│ │ └── .gitkeep
│ ├── env.d.ts
│ ├── index.css
│ ├── utils
│ │ ├── agent-output.ts
│ │ ├── agent_database.ts
│ │ ├── handlers
│ │ │ ├── JupyterConfig.ts
│ │ │ ├── javascript.ts
│ │ │ ├── python.ts
│ │ │ └── utils.ts
│ │ ├── logging.ts
│ │ ├── main_loop.ts
│ │ ├── ollamaServer.ts
│ │ ├── post-processor.ts
│ │ ├── pre-processor.ts
│ │ ├── python_system_prompt.ts
│ │ ├── screenCapture.ts
│ │ ├── sendApi.ts
│ │ ├── speechInputManager.ts
│ │ ├── streamApi.ts
│ │ ├── system_prompt.ts
│ │ └── test_system_prompt.ts
│ └── web
│ │ ├── App.tsx
│ │ ├── index.tsx
│ │ └── main.tsx
├── stats.html
├── tailwind.config.js
├── tsconfig.json
├── tsconfig.node.json
└── vite.config.ts
├── assets
└── ObserverAgent.png
├── community
├── api.py
├── api_handlers.py
├── compute.py
├── fireworks_handler.py
├── gemini_handler.py
├── marketplace.py
├── openrouter_handler.py
└── requirements.txt
├── docker-compose.yml
├── observer-ollama
├── LICENSE
├── observer_ollama
│ ├── __init__.py
│ ├── handle_ollama.py
│ ├── main.py
│ └── ssl_utils.py
├── pyproject.toml
└── test.py
├── print_info.sh
├── supervisord.conf
└── website
├── .github
└── workflows
│ └── deploy-website.yml
├── .gitignore
├── README.md
├── eslint.config.js
├── index.html
├── package-lock.json
├── package.json
├── postcss.config.js
├── public
├── CNAME
├── eye-logo-black.svg
├── eye-logo-white.svg
└── vite.svg
├── src
├── App.css
├── App.tsx
├── ObserverLanding.tsx
├── assets
│ └── react.svg
├── index.css
├── main.tsx
└── vite-env.d.ts
├── tailwind.config.js
├── tsconfig.app.json
├── tsconfig.json
├── tsconfig.node.json
└── vite.config.ts
/.github/workflows/deploy-website.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Website
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | paths:
7 | - 'website/**' # Only trigger on changes to website directory
8 |
9 | permissions:
10 | contents: read
11 | pages: write
12 | id-token: write
13 |
14 | jobs:
15 | build:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Setup Node
21 | uses: actions/setup-node@v4
22 | with:
23 | node-version: '20'
24 |
25 | - name: Install Dependencies
26 | working-directory: ./website
27 | run: npm install
28 |
29 | - name: Build
30 | working-directory: ./website
31 | run: npm run build
32 |
33 | - name: Upload artifact
34 | uses: actions/upload-pages-artifact@v3
35 | with:
36 | path: website/dist
37 |
38 | deploy:
39 | needs: build
40 | runs-on: ubuntu-latest
41 | environment:
42 | name: github-pages
43 | url: ${{ steps.deployment.outputs.page_url }}
44 | steps:
45 | - name: Deploy to GitHub Pages
46 | id: deployment
47 | uses: actions/deploy-pages@v4
48 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Ob-server/certs
2 |
3 | website/dist
4 | website/build
5 |
6 | website/src-tauri/target
7 | website/src-tauri/Cargo.lock
8 |
9 | website/node_modules
10 | website/.pnp
11 |
12 | app/node_modules
13 | app/dist
14 | app/.env
15 |
16 | desktop/node_modules
17 | desktop/public/assets/
18 | desktop/.pnp
19 |
20 | desktop/dist
21 | desktop/build
22 |
23 | desktop/src-tauri/target
24 | desktop/src-tauri/Cargo.lock
25 |
26 | observer_ollama/dist
27 | observer_ollama/observer_ollama.egg-info
28 | **/*.rs.bk
29 |
30 | .pnp.js
31 |
32 | # Python
33 | __pycache__/
34 | *.py[cod]
35 | *$py.class
36 | *.so
37 | .Python
38 | /python/**/data/
39 | /python/**/*.log
40 | /python/**/activity_log.json
41 |
42 | # Environment
43 | .env
44 | .env.local
45 | .env.development.local
46 | .env.test.local
47 | .env.production.local
48 | .venv
49 | env/
50 | venv/
51 |
52 | # Logs
53 | *.log
54 | npm-debug.log*
55 | yarn-debug.log*
56 | yarn-error.log*
57 |
58 | # Editor directories and files
59 | .idea
60 | .vscode
61 | *.swp
62 | *.swo
63 | .DS_Store
64 |
65 | # Cache directories
66 | .cache
67 | .pytest_cache/
68 | .mypy_cache/
69 | .coverage
70 | coverage/
71 | .eslintcache
72 |
73 | # Misc
74 | *.pem
75 | .DS_Store
76 | /node_modules
77 | /.pnp
78 | .pnp.js
79 |
80 | # Production build
81 | /dist
82 | /build
83 |
84 | # Tauri
85 | /src-tauri/target
86 | /src-tauri/Cargo.lock
87 | **/*.rs.bk
88 |
89 | # Python
90 | __pycache__/
91 | *.py[cod]
92 | *$py.class
93 | *.so
94 | .Python
95 | /python/**/data/
96 | /python/**/*.log
97 | /python/**/activity_log.json
98 |
99 | # Environment
100 | .env
101 | .env.local
102 | .env.development.local
103 | .env.test.local
104 | .env.production.local
105 | .venv
106 | env/
107 | venv/
108 |
109 | # Logs
110 | *.log
111 | npm-debug.log*
112 | yarn-debug.log*
113 | yarn-error.log*
114 |
115 | # Editor directories and files
116 | .idea
117 | .vscode
118 | *.swp
119 | *.swo
120 | .DS_Store
121 |
122 | # Cache directories
123 | .cache
124 | .pytest_cache/
125 | .mypy_cache/
126 | .coverage
127 | coverage/
128 | .eslintcache
129 |
130 | # Misc
131 | *.pem
132 | .DS_Storeompiled / optimized / DLL files
133 | __pycache__/
134 | *.py[cod]
135 | *$py.class
136 |
137 | # C extensions
138 | *.so
139 |
140 | # Distribution / packaging
141 | .Python
142 | build/
143 | develop-eggs/
144 | dist/
145 | downloads/
146 | eggs/
147 | .eggs/
148 | lib/
149 | lib64/
150 | parts/
151 | sdist/
152 | var/
153 | wheels/
154 | share/python-wheels/
155 | *.egg-info/
156 | .installed.cfg
157 | *.egg
158 | MANIFEST
159 |
160 | # PyInstaller
161 | # Usually these files are written by a python script from a template
162 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
163 | *.manifest
164 | *.spec
165 |
166 | # Installer logs
167 | pip-log.txt
168 | pip-delete-this-directory.txt
169 |
170 | # Unit test / coverage reports
171 | htmlcov/
172 | .tox/
173 | .nox/
174 | .coverage
175 | .coverage.*
176 | .cache
177 | nosetests.xml
178 | coverage.xml
179 | *.cover
180 | *.py,cover
181 | .hypothesis/
182 | .pytest_cache/
183 | cover/
184 |
185 | # Translations
186 | *.mo
187 | *.pot
188 |
189 | # Django stuff:
190 | *.log
191 | local_settings.py
192 | db.sqlite3
193 | db.sqlite3-journal
194 |
195 | # Flask stuff:
196 | instance/
197 | .webassets-cache
198 |
199 | # Scrapy stuff:
200 | .scrapy
201 |
202 | # Sphinx documentation
203 | docs/_build/
204 |
205 | # PyBuilder
206 | .pybuilder/
207 | target/
208 |
209 | # Jupyter Notebook
210 | .ipynb_checkpoints
211 |
212 | # IPython
213 | profile_default/
214 | ipython_config.py
215 |
216 | # pyenv
217 | # For a library or package, you might want to ignore these files since the code is
218 | # intended to run in multiple environments; otherwise, check them in:
219 | # .python-version
220 |
221 | # pipenv
222 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
223 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
224 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
225 | # install all needed dependencies.
226 | #Pipfile.lock
227 |
228 | # UV
229 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
230 | # This is especially recommended for binary packages to ensure reproducibility, and is more
231 | # commonly ignored for libraries.
232 | #uv.lock
233 |
234 | # poetry
235 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
236 | # This is especially recommended for binary packages to ensure reproducibility, and is more
237 | # commonly ignored for libraries.
238 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
239 | #poetry.lock
240 |
241 | # pdm
242 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
243 | #pdm.lock
244 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
245 | # in version control.
246 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
247 | .pdm.toml
248 | .pdm-python
249 | .pdm-build/
250 |
251 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
252 | __pypackages__/
253 |
254 | # Celery stuff
255 | celerybeat-schedule
256 | celerybeat.pid
257 |
258 | # SageMath parsed files
259 | *.sage.py
260 |
261 | # Environments
262 | .env
263 | .venv
264 | env/
265 | venv/
266 | ENV/
267 | env.bak/
268 | venv.bak/
269 |
270 | # Spyder project settings
271 | .spyderproject
272 | .spyproject
273 |
274 | # Rope project settings
275 | .ropeproject
276 |
277 | # mkdocs documentation
278 | /site
279 |
280 | # mypy
281 | .mypy_cache/
282 | .dmypy.json
283 | dmypy.json
284 |
285 | # Pyre type checker
286 | .pyre/
287 |
288 | # pytype static type analyzer
289 | .pytype/
290 |
291 | # Cython debug symbols
292 | cython_debug/
293 |
294 | # PyCharm
295 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
296 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
297 | # and can be added to the global gitignore or merged into this file. For a more nuclear
298 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
299 | #.idea/
300 |
301 | # PyPI configuration file
302 | .pypirc
303 |
304 |
305 | .DS_Store
306 | .DS_Store?
307 | desktop/src-tauri/target/*
308 | desktop/src-tauri/python/python-bundle
309 | community/marketplace.db
310 | community/marketplace.db
311 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Stage 1: Build the Vite application
2 | FROM node:20-alpine as builder
3 |
4 | WORKDIR /app
5 | COPY app/package.json app/package-lock.json* ./
6 | RUN npm install
7 | COPY app/ .
8 | RUN npm run build
9 |
10 | # Stage 2: Nginx + Python Proxy ONLY
11 | FROM nginx:alpine
12 |
13 | # Install Python, pip, openssl (for cert generation), supervisor
14 | # NO curl, NO gcompat, NO Ollama installation here
15 | RUN apk add --no-cache python3 py3-pip openssl supervisor
16 |
17 | # --- Nginx Setup ---
18 | COPY --from=builder /app/dist /usr/share/nginx/html
19 | EXPOSE 80
20 |
21 | # --- Python Proxy Setup ---
22 | RUN mkdir -p /opt/observer-ollama /var/log/supervisor
23 | COPY ./observer-ollama /opt/observer-ollama/
24 | WORKDIR /opt/observer-ollama
25 | RUN pip3 install --break-system-packages .
26 | EXPOSE 3838
27 |
28 | # --- Tell user webpage is available ---
29 | COPY ./print_info.sh /usr/local/bin/print_info.sh
30 | RUN chmod +x /usr/local/bin/print_info.sh
31 |
32 | # --- Supervisor Setup ---
33 | COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
34 | WORKDIR /
35 |
36 | # Command to run Supervisor
37 | CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
38 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Roy Medina
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Observer AI 👁️
2 |
3 | [Observer App Link](https://app.observer-ai.com/)
4 |
5 | - [Support me and the project!](https://buymeacoffee.com/roy3838)
6 |
7 | An open-source platform for running local AI agents that enhance your computing experience while preserving privacy.
8 |
9 |
10 | [](https://roy3838.github.io/observer-ai)
11 | [](LICENSE)
12 |
13 | Demo:
14 |
15 | https://github.com/user-attachments/assets/def0cba9-c8c3-41d3-bd03-a507744e6ade
16 |
17 |
18 |
19 |
20 |
21 | Key Features
22 |
23 | - 🔒 Privacy First: All processing happens locally on your machine
24 | - 💻 Resource Efficient: Take advantage of unused consumer-grade hardware
25 | - 🔌 Extensible: Easy-to-use framework for creating and sharing custom agents
26 | - 🤝 Community Driven: Growing ecosystem of community-created agents
27 | - 🐍 Jupyter Server Support: Run Python agents with system-level access
28 |
29 | |
30 |
31 |
32 | |
33 |
34 |
35 |
36 | ## 🚀 Getting Started with Local Inference
37 |
38 | We need to wrap Ollama to use https instead of http so that the browser can connect to it. This is done with self-signed SSL certificates.
39 |
40 | ```bash
41 | # Make sure to have [Ollama](https://ollama.com) installed
42 |
43 | # For local inference run observer-ollama
44 | pip install observer-ollama
45 |
46 | # Click on the link provided so that your browser accepts self signed CERTS (signed by your computer)
47 |
48 | # OLLAMA-PROXY ready
49 | # ➜ Local: https://localhost:3838/
50 | # ➜ Network: https://10.0.0.138:3838/
51 |
52 | # Click on proceed to localhost (unsafe), if "Ollama is running" shows up, you're done!
53 |
54 | # Go to webapp:
55 | app.observer-ai.com
56 |
57 | # Enter your inference IP (localhost:3838) on the app header.
58 | ```
59 |
60 | # 🏗️ Building Your Own Agent
61 |
62 | Creating your own Observer AI agent is simple and accessible to both beginners and experienced developers.
63 |
64 | ## Quick Start
65 |
66 | 1. Navigate to the Agent Dashboard and click "Create New Agent"
67 | 2. Fill in the "Configuration" tab with basic details (name, description, model, loop interval)
68 | 3. Use a system prompt with input variables! The current input variables that exist are:
69 | * **Screen OCR** ($SCREEN_OCR) Captures screen content as text via OCR (english only for now)
70 | * **Screenshot** ($SCREEN_64) Captures screen as an image for multimodal models
71 | * **Agent Memory** ($MEMORY@agent_id) Accesses agents' stored information
72 | * **Microphone** ($MICROPHONE) Captures the microphone and adds a transcription (english only for now)
73 |
74 | ## Code Tab
75 |
76 | The "Code" tab now offers a notebook-style coding experience where you can choose between JavaScript or Python execution:
77 |
78 | ### JavaScript (Browser-based)
79 |
80 | JavaScript agents run in the browser sandbox, making them ideal for passive monitoring and notifications:
81 |
82 | ```javascript
83 | // Remove Think tags for deepseek model
84 | const cleanedResponse = response.replace(/[\s\S]*?<\/think>/g, '').trim();
85 |
86 | // Preserve previous memory
87 | const prevMemory = await getMemory();
88 |
89 | // Get time
90 | const time = time();
91 |
92 | // Update memory with timestamp
93 | appendMemory(`[${time}] ${cleanedResponse}`);
94 | ```
95 |
96 | > **Note:** any function marked with `*` takes an `agentId` argument.
97 | > If you omit `agentId`, it defaults to the agent that’s running the code.
98 |
99 | Available utilities include:
100 |
101 | * `time()` – Get the current timestamp
102 | * `pushNotification(title, options)` – Send notifications
103 | * `getMemory(agentId)*` – Retrieve stored memory (defaults to current agent)
104 | * `setMemory(agentId, content)*` – Replace stored memory
105 | * `appendMemory(agentId, content)*` – Add to existing memory
106 | * `startAgent(agentId)*` – Starts an agent
107 | * `stopAgent(agentId)*` – Stops an agent
108 |
109 |
110 | ### Python (Jupyter Server)
111 |
112 | Python agents run on a Jupyter server with system-level access, enabling them to interact directly with your computer:
113 |
114 | ```python
115 | #python <-- don't remove this!
116 | print("Hello World!", response, agentId)
117 |
118 | # Example: Analyze screen content and take action
119 | if "SHUTOFF" in response:
120 | # System level commands can be executed here
121 | import os
122 | # os.system("command") # Be careful with system commands!
123 | ```
124 |
125 | The Python environment receives:
126 | * `response` - The model's output
127 | * `agentId` - The current agent's ID
128 |
129 | ## Example: Command Tracking Agent
130 |
131 | A simple agent that responds to specific commands in the model's output:
132 |
133 | ```javascript
134 | //Clean response
135 | const cleanedResponse = response.replace(/[\s\S]*?<\/think>/g, '').trim();
136 |
137 | //Command Format
138 | if (cleanedResponse.includes("COMMAND")) {
139 | const withoutcommand = cleanedResponse.replace(/COMMAND:/g, '');
140 | setMemory(`${await getMemory()} \n[${time()}] ${withoutcommand}`);
141 | }
142 | ```
143 |
144 | ## Jupyter Server Configuration
145 |
146 | To use Python agents:
147 |
148 | 1. Run a Jupyter server on your machine
149 | 2. Configure the connection in the Observer AI interface:
150 | * Host: The server address (e.g., 127.0.0.1)
151 | * Port: The server port (e.g., 8888)
152 | * Token: Your Jupyter server authentication token
153 | 3. Test the connection using the "Test Connection" button
154 | 4. Switch to the Python tab in the code editor to write Python-based agents
155 |
156 | ## Deploy & Share
157 |
158 | Save your agent, test it from the dashboard, and export the configuration to share with others!
159 |
160 | ## 🤝 Contributing
161 |
162 | We welcome contributions from the community! Here's how you can help:
163 |
164 | 1. Fork the repository
165 | 2. Create your feature branch (`git checkout -b feature/amazing-feature`)
166 | 3. Commit your changes (`git commit -m 'feat: add amazing feature'`)
167 | 4. Push to the branch (`git push origin feature/amazing-feature`)
168 | 5. Open a Pull Request
169 |
170 | ## 📄 License
171 |
172 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
173 |
174 | ## 🔗 Links
175 |
176 | - [Website](https://observer-ai.com)
177 | - [GitHub Repository](https://github.com/Roy3838/Observer)
178 | - [twitter](https://x.com/AppObserverAI)
179 |
180 | ## 📧 Contact
181 |
182 | - GitHub: [@Roy3838](https://github.com/Roy3838)
183 | - Project Link: [https://github.com/Roy3838/observer-ai](https://github.com/Roy3838/Observer)
184 |
185 | ---
186 |
187 | Built with ❤️ by the Observer AI Community
188 |
--------------------------------------------------------------------------------
/app/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Observer Web
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/app/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@observer/webapp",
3 | "private": true,
4 | "version": "0.1.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "tsc && vite build",
9 | "preview": "vite preview",
10 | "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
11 | "format": "prettier --write .",
12 | "tauri": "tauri",
13 | "tauri:dev": "tauri dev",
14 | "tauri:build": "tauri build"
15 | },
16 | "dependencies": {
17 | "@auth0/auth0-react": "^2.3.0",
18 | "@codemirror/lang-javascript": "^6.2.3",
19 | "@codemirror/lang-python": "^6.1.7",
20 | "@jupyterlab/services": "^7.3.6",
21 | "@lexical/history": "^0.25.0",
22 | "@lexical/list": "^0.25.0",
23 | "@lexical/react": "^0.25.0",
24 | "@lexical/rich-text": "^0.25.0",
25 | "@lexical/selection": "^0.25.0",
26 | "@tauri-apps/api": "^2.3.0",
27 | "@tinymce/tinymce-react": "^6.0.0",
28 | "@uiw/codemirror-theme-vscode": "^4.23.8",
29 | "@uiw/react-codemirror": "^4.23.8",
30 | "fs": "^0.0.1-security",
31 | "js-yaml": "^4.1.0",
32 | "lexical": "^0.25.0",
33 | "lucide-react": "^0.476.0",
34 | "path": "^0.12.7",
35 | "react": "^18.2.0",
36 | "react-dom": "^18.2.0",
37 | "react-router-dom": "^6.22.1",
38 | "slate": "^0.112.0",
39 | "slate-history": "^0.110.3",
40 | "slate-react": "^0.112.1",
41 | "tesseract.js": "^6.0.0",
42 | "ts-node": "^10.9.2",
43 | "yaml": "^2.7.0"
44 | },
45 | "devDependencies": {
46 | "@tauri-apps/cli": "^2.3.0",
47 | "@types/dom-speech-recognition": "^0.0.6",
48 | "@types/js-yaml": "^4.0.9",
49 | "@types/react": "^18.2.55",
50 | "@types/react-dom": "^18.2.19",
51 | "@typescript-eslint/eslint-plugin": "^6.21.0",
52 | "@typescript-eslint/parser": "^6.21.0",
53 | "@vitejs/plugin-react": "^4.2.1",
54 | "autoprefixer": "^10.4.17",
55 | "esbuild": "0.21.5",
56 | "eslint": "^8.56.0",
57 | "eslint-plugin-react-hooks": "^4.6.0",
58 | "eslint-plugin-react-refresh": "^0.4.5",
59 | "postcss": "^8.4.35",
60 | "prettier": "^3.2.5",
61 | "rollup-plugin-visualizer": "^5.14.0",
62 | "tailwindcss": "^3.4.1",
63 | "typescript": "~5.3.3",
64 | "vite": "^5.1.0"
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/app/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/app/public/agent-activity_tracking_agent.yaml:
--------------------------------------------------------------------------------
1 | id: activity_tracking_agent
2 | name: Activity Tracking Agent
3 | description: This agent tracks your activity.
4 | status: stopped
5 | model_name: deepseek-r1:8b
6 | loop_interval_seconds: 25
7 | system_prompt: |
8 | You are an activity tracking agent, watch the screen and respond with what the user is doing.
9 | Just respond with one sentence as the following:
10 |
11 | ACTIVITY: activity the user is doing
12 |
13 |
14 | $SCREEN_OCR
15 |
16 |
17 |
18 | Just respond with that one sentence.
19 |
20 | ACTIVITY: say what the user is doing
21 | code: |
22 | //ACTIVITY
23 | function(line) {
24 | const currentMemory = utilities.getAgentMemory(agentId);
25 | const time = utilities.getCurrentTime();
26 | currentMemory.then(memory => {
27 | utilities.updateAgentMemory(agentId, memory + "\n" + "[" + time + "] " + line);
28 | });
29 | }
30 | memory: |
31 |
32 | [ 10:27 pm ]The user is engaged in multiple activities involving screen sharing, code documentation, focus tracking, and memory summarization.
33 | [ 10:28 pm ]The user is configuring their network settings or checking their IP address via related applications.
34 | [ 10:28 pm ]The user is repeatedly sending POST requests to "/api/generate" and also made a GET request to "/api/tags".
35 | [ 10:29 pm ]interact with tracking agents via commands and monitor system logs.
36 | [ 10:29 pm ]sending repeated POST requests to "/api/generate" and making a GET request to "/api/tags".
37 | [ 10:30 pm ]The user is working on a project, running Git commands, and setting up a screen share for a presentation or demo.
38 | [ 10:30 pm ]The user is running `npm run build`, compiling TypeScript files, and updating a GitHub repository while setting up a remote screen session for development work.
39 | [ 10:30 pm ]Compiling a TypeScript web application using Vite and running a build command.
40 | [ 10:31 pm ]Running npm build command in the app directory and starting a screen session for development.
41 | [ 10:31 pm ]The user is building a React application and performing remote development work.
42 | [ 10:32 pm ]The user is developing a web-based application using React and TypeScript, managing code changes with Git, and running builds with Vite.
43 | [ 10:32 pm ]The user is running a build process for their web application using npm, TypeScript, and Vite.
44 | [ 10:32 pm ]Running npm commands to build a web application and managing screen sessions.
45 | [ 10:33 pm ]The user is running build commands and working on a web application using Vite.
46 | [ 10:33 pm ]Building the application and starting a screen session to serve it remotely.
47 | [ 10:34 pm ]Compiling code and running build commands, then starting a screen session.
48 | [ 10:34 pm ]Updating Git repository, running build command, and starting a screen session.
49 | [ 10:35 pm ]The user is running commands related to building and serving a web application, including Git operations and Vite compilation.
50 | [ 10:35 pm ]View system logs using a network monitoring tool.
51 | [ 10:36 pm ]configure network settings and view system logs
52 | [ 10:36 pm ]the user is interacting with system logs via commands.
53 | [ 10:37 pm ]The user is checking their public IP address using a command in the terminal or command line interface.
54 | [ 10:37 pm ]using multiple applications such as Chrome, Discord, and a terminal where they are typing commands like "Show System Logs."
55 | [ 10:38 pm ]The user is accessing network settings and viewing system logs on their device.
56 | [ 10:38 pm ]viewing system logs and managing network settings.
57 | [2:37 pm] Updating application code to handle server connections and configuration changes.
58 | [2:38 pm] The user is running build commands, updating the Git repository, configuring network settings, and viewing system logs as they work on a web application using Vite.
59 | [4:56 pm] The user is interacting with multiple AI tracking agents and attempting to manage screen sharing settings.
60 | [4:56 pm] The user is actively engaged on Telegram, managing multiple groups, interacting with others, and sharing various content including personal updates and YouTube links.
61 | [4:57 pm] The user is communicating via Telegram with others, sending messages, and engaging in conversations.
62 | [4:57 pm] Using Telegram to communicate with others, including sending messages and joining groups.
63 | [4:57 pm] Contributing to an open-source project on GitHub by forking repositories and setting up feature branches.
64 | [4:58 pm] launching system logs viewer to inspect log files and monitor system activity.
65 | [5:02 pm] using various software applications and tracking tools, possibly for design, documentation, and automation purposes.
66 | [5:02 pm] Configuring activity tracking systems using various agents and applications.
67 | [9:44 pm] The user is working on code and interacting with activity tracking agents that assist in managing focus and summarizing tasks.
68 | [9:44 pm] The user is actively working on a web application using Vite, managing server connections, configuration changes, and Git repositories, while also interacting with various AI tracking agents and communication tools like Telegram.
--------------------------------------------------------------------------------
/app/public/agent-command_tracking_agent.yaml:
--------------------------------------------------------------------------------
1 | id: command_tracking_agent
2 | name: Command Tracking Agent
3 | description: This agent looks at the screen and tracks all the commands you use.
4 | status: stopped
5 | model_name: deepseek-r1:8b
6 | loop_interval_seconds: 30
7 | system_prompt: |
8 | You are a command tracking assistant. Monitor the screen and identify any commands being run by the user.
9 |
10 | Look for terminal/console windows and command prompts.
11 |
12 | Simply respond with:
13 |
14 | COMMAND: the command that was executed
15 |
16 | Examples:
17 |
18 | COMMAND: git push origin main
19 |
20 | COMMAND: npm install react
21 |
22 | COMMAND: python script.py
23 |
24 |
25 | Only report when you see a new command being executed.
26 |
27 | Ignore repeated commands and command output.
28 |
29 | Focus on actual commands, not general terminal text or prompts.
30 | code: |
31 | //COMMAND
32 | function(line) {
33 | const currentMemory = utilities.getAgentMemory(agentId);
34 | const time = utilities.getCurrentTime();
35 | currentMemory.then(memory => {
36 | utilities.updateAgentMemory(agentId, memory + "\n" + "[ " + time + " ]" + line);
37 | });
38 | }
39 | memory: ""
40 |
--------------------------------------------------------------------------------
/app/public/agent-documentation_agent.yaml:
--------------------------------------------------------------------------------
1 | id: documentation_agent
2 | name: Code Documentation Agent
3 | description: This Agent watches your screen and if there is code, he will document code in the background
4 | status: stopped
5 | model_name: deepseek-r1:8b
6 | loop_interval_seconds: 60
7 | system_prompt: |
8 | You are a Documentation Generator agent that observes code being written and automatically drafts documentation.
9 |
10 | When you see code on screen, analyze it and generate appropriate documentation in a concise, professional style.
11 |
12 | Focus on:
13 | 1. Function purpose and behavior
14 | 2. Parameters and return values
15 | 3. Dependencies and side effects
16 | 4. Usage examples when helpful
17 |
18 | Respond only with the following format:
19 | DOCGEN: [Brief description of what documentation was generated]
20 |
21 | Existing Documentation:
22 | $MEMORY@documentation_agent
23 |
24 |
25 | $SCREEN_OCR
26 |
27 |
28 | DOCGEN: [Function/class name]: [1-2 sentence description of purpose]
29 | code: |
30 | //DOCGEN
31 | function(line) {
32 | const currentMemory = utilities.getAgentMemory(agentId);
33 | currentMemory.then(memory => {
34 | utilities.updateAgentMemory(agentId, memory + "\n" + line);
35 | });
36 | }
37 | memory: |
38 |
39 | **documentCodeInBackground**
40 | documentCodeInBackground - This function monitors network activity, specifically tracking an IP address and data usage via a Synology router, gathering information from endpoints like "app.observer-ai.com" and compiling statistics over 300 seconds using the deepseek-r1:8b AI model.
--------------------------------------------------------------------------------
/app/public/agent-focus_tracker.yaml:
--------------------------------------------------------------------------------
1 | id: focus_tracker
2 | name: Focus Tracking Assistant
3 | description: This assistant compiles a list of time used in certain applications
4 | status: stopped
5 | model_name: deepseek-r1:8b
6 | loop_interval_seconds: 60
7 | system_prompt: |
8 | You are a Focus Tracker agent that monitors application switching frequency and duration.
9 | Your job is to observe the screen, identify which applications are being used, and register use time of applications.
10 | Respond only with the following format:
11 | FOCUSSTAT: [Brief description of new activity]
12 |
13 | This is the user's previous activities, if the last activity is similar to the current activity. Don't respond with the command.
14 |
15 | $MEMORY@focus_tracker
16 |
17 |
18 | $SCREEN_OCR
19 |
20 |
21 | FOCUSSTAT: [Concise observation about new activity.
22 | Remember only use the command when the activity is new.
23 | code: |
24 | //FOCUSSTAT
25 | function(line) {
26 | const currentMemory = utilities.getAgentMemory(agentId);
27 | const time = utilities.getCurrentTime();
28 | currentMemory.then(memory => {
29 | utilities.updateAgentMemory(agentId, memory + "\n" + time + line);
30 | });
31 | }
32 | memory: ""
33 |
--------------------------------------------------------------------------------
/app/public/agent-memory_summarization.yaml:
--------------------------------------------------------------------------------
1 | id: memory_summarization
2 | name: Memory Summarization Agent
3 | description: This agent reads another agent's memories and summarizes them so that context window length doesn't become a problem.
4 | status: stopped
5 | model_name: deepseek-r1:8b
6 | loop_interval_seconds: 300
7 | system_prompt: |
8 | Hi deepseek please summarize the text below:
9 |
10 | $MEMORY@activity_tracking_agent
11 |
12 | respond with
13 | SUMMARY: summary of text you saw
14 | code: |
15 | //SUMMARY
16 | function(line) {
17 | utilities.updateAgentMemory(agentId, line);
18 | }
19 | memory: |
20 | The user is actively developing a React application using TypeScript and Vite, engaging in remote work by screen sharing, setting up build commands with npm, updating GitHub repositories, and configuring network settings. They are interacting with AI tracking agents for task management, communicating via Telegram, contributing to open-source projects, and monitoring system logs while working on their web-based application.
--------------------------------------------------------------------------------
/app/public/eye-logo-black.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/app/src-tauri/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | /target/
4 | /gen/schemas
5 |
--------------------------------------------------------------------------------
/app/src-tauri/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "app"
3 | version = "0.1.0"
4 | description = "A Tauri App"
5 | authors = ["you"]
6 | license = ""
7 | repository = ""
8 | edition = "2021"
9 | rust-version = "1.77.2"
10 |
11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
12 |
13 | [lib]
14 | name = "app_lib"
15 | crate-type = ["staticlib", "cdylib", "rlib"]
16 |
17 | [build-dependencies]
18 | tauri-build = { version = "2.0.5", features = [] }
19 |
20 | [dependencies]
21 | serde_json = "1.0"
22 | serde = { version = "1.0", features = ["derive"] }
23 | log = "0.4"
24 | tauri = { version = "2.3.0", features = [] }
25 | tauri-plugin-log = "2.0.0-rc"
26 |
27 | screenshots = "0.8.5"
28 | base64 = "0.21.0"
29 | image = "0.24.6"
30 |
--------------------------------------------------------------------------------
/app/src-tauri/build.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | tauri_build::build()
3 | }
4 |
--------------------------------------------------------------------------------
/app/src-tauri/capabilities/default.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "../gen/schemas/desktop-schema.json",
3 | "identifier": "default",
4 | "description": "enables the default permissions",
5 | "windows": [
6 | "main"
7 | ],
8 | "permissions": [
9 | "core:default"
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------
/app/src-tauri/icons/128x128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/128x128.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/128x128@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/128x128@2x.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/32x32.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square107x107Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square107x107Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square142x142Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square142x142Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square150x150Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square150x150Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square284x284Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square284x284Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square30x30Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square30x30Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square310x310Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square310x310Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square44x44Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square44x44Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square71x71Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square71x71Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/Square89x89Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/Square89x89Logo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/StoreLogo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/StoreLogo.png
--------------------------------------------------------------------------------
/app/src-tauri/icons/icon.icns:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/icon.icns
--------------------------------------------------------------------------------
/app/src-tauri/icons/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/icon.ico
--------------------------------------------------------------------------------
/app/src-tauri/icons/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src-tauri/icons/icon.png
--------------------------------------------------------------------------------
/app/src-tauri/src/lib.rs:
--------------------------------------------------------------------------------
1 | use screenshots::Screen;
2 | use std::io::Cursor;
3 | use base64::{Engine as _, engine::general_purpose};
4 | use image::ImageEncoder;
5 | #[cfg_attr(mobile, tauri::mobile_entry_point)]
6 | pub fn run() {
7 | tauri::Builder::default()
8 | .setup(|app| {
9 | if cfg!(debug_assertions) {
10 | app.handle().plugin(
11 | tauri_plugin_log::Builder::default()
12 | .level(log::LevelFilter::Info)
13 | .build(),
14 | )?;
15 | }
16 | Ok(())
17 | })
18 | .invoke_handler(tauri::generate_handler![capture_screen])
19 | .run(tauri::generate_context!())
20 | .expect("error while running tauri application");
21 | }
22 |
23 | #[tauri::command]
24 | fn capture_screen() -> Result {
25 | // Get all screens
26 | let screens = Screen::all().map_err(|e| e.to_string())?;
27 | if screens.is_empty() {
28 | return Err("No screens found".into());
29 | }
30 |
31 | // Capture the first screen
32 | let screen = &screens[0];
33 | let image = screen.capture().map_err(|e| e.to_string())?;
34 |
35 | // Convert to bytes and then to base64
36 | let mut buffer = Vec::new();
37 | image.write_to(&mut Cursor::new(&mut buffer), image::ImageFormat::Png)
38 | .map_err(|e| e.to_string())?;
39 |
40 | let base64_img = general_purpose::STANDARD.encode(&buffer);
41 | Ok(base64_img)
42 | }
43 |
--------------------------------------------------------------------------------
/app/src-tauri/src/main.rs:
--------------------------------------------------------------------------------
1 | // Prevents additional console window on Windows in release, DO NOT REMOVE!!
2 | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
3 |
4 | fn main() {
5 | app_lib::run();
6 | }
7 |
--------------------------------------------------------------------------------
/app/src-tauri/tauri.conf.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "../node_modules/@tauri-apps/cli/config.schema.json",
3 | "productName": "Observer",
4 | "version": "0.1.0",
5 | "identifier": "com.tauri.dev",
6 | "build": {
7 | "frontendDist": "../dist",
8 | "devUrl": "http://localhost:3001",
9 | "beforeDevCommand": "npm run dev",
10 | "beforeBuildCommand": "npm run build"
11 | },
12 | "app": {
13 | "windows": [
14 | {
15 | "title": "Observer",
16 | "width": 800,
17 | "height": 600,
18 | "resizable": true,
19 | "fullscreen": false
20 | }
21 | ],
22 | "security": {
23 | "csp": null
24 | }
25 | },
26 | "bundle": {
27 | "active": true,
28 | "targets": "all",
29 | "icon": [
30 | "icons/32x32.png",
31 | "icons/128x128.png",
32 | "icons/128x128@2x.png",
33 | "icons/icon.icns",
34 | "icons/icon.ico"
35 | ]
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/app/src/components/AgentImportHandler.tsx:
--------------------------------------------------------------------------------
1 | import { useState } from 'react';
2 | import { PlusCircle, RotateCw, Sparkles } from 'lucide-react';
3 | import { CompleteAgent } from '@utils/agent_database'; // Fixed import path
4 | import GenerateAgentModal from './GenerateAgentModal';
5 |
6 | interface ImportResult {
7 | filename: string;
8 | success: boolean;
9 | agent?: CompleteAgent;
10 | error?: string;
11 | }
12 |
13 | interface AgentImportHandlerProps {
14 | onAddAgent: () => void;
15 | agentCount: number;
16 | activeAgentCount: number;
17 | isRefreshing: boolean;
18 | onRefresh: () => void;
19 | }
20 |
21 | const AgentImportHandler = ({
22 | onAddAgent,
23 | agentCount,
24 | activeAgentCount,
25 | isRefreshing,
26 | onRefresh
27 | }: AgentImportHandlerProps) => {
28 | const [importStatus] = useState<{ inProgress: boolean; results: ImportResult[] }>({
29 | inProgress: false,
30 | results: []
31 | });
32 | const [isModalOpen, setIsModalOpen] = useState(false);
33 | // Unused variable removed
34 |
35 | // MAKES EDITING AGENTS UNUSABLE
36 | //useEffect(() => {
37 | // const interval = setInterval(() => {
38 | // if (!isRefreshing) {
39 | // onRefresh();
40 | // }
41 | // }, 1000); // Refresh every second
42 | // return () => clearInterval(interval); // Cleanup on unmount
43 | //}, [isRefreshing, onRefresh]); // Dependencies
44 |
45 | return (
46 | <>
47 |
48 |
49 |
52 |
Active: {activeAgentCount} / Total: {agentCount}
53 |
54 |
55 |
56 |
65 |
66 |
73 |
74 |
75 |
76 |
77 | {importStatus.results.length > 0 && (
78 |
79 |
Import Results:
80 |
81 | {importStatus.results.map((result, index) => (
82 | -
83 | {result.filename}: {result.success ? 'Success' : `Failed - ${result.error}`}
84 |
85 | ))}
86 |
87 |
88 | )}
89 |
90 | setIsModalOpen(false)} />
91 | >
92 | );
93 | };
94 |
95 | export default AgentImportHandler;
96 |
--------------------------------------------------------------------------------
/app/src/components/AvailableModels.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { listModels, Model } from '@utils/ollamaServer'; // Import updated Model interface
3 | import { Cpu, RefreshCw, Eye } from 'lucide-react'; // <-- Import Eye icon
4 | import { Logger } from '@utils/logging';
5 | import { getOllamaServerAddress } from '@utils/main_loop';
6 |
7 | // No need to redefine Model interface here if imported correctly
8 |
9 | const AvailableModels: React.FC = () => {
10 | const [models, setModels] = useState([]);
11 | const [loading, setLoading] = useState(true);
12 | const [error, setError] = useState(null);
13 | const [refreshing, setRefreshing] = useState(false);
14 |
15 | const fetchModels = async () => {
16 | setLoading(true);
17 | setError(null);
18 |
19 | try {
20 | Logger.info('MODELS', 'Fetching available models from server');
21 | const { host, port } = getOllamaServerAddress();
22 | Logger.info('MODELS', `Using server address: ${host}:${port}`);
23 |
24 | const response = await listModels(host, port); // Uses updated listModels
25 |
26 | if (response.error) {
27 | throw new Error(response.error);
28 | }
29 |
30 | setModels(response.models);
31 | Logger.info('MODELS', `Successfully loaded ${response.models.length} models`);
32 | } catch (err) {
33 | const errorMessage = err instanceof Error ? err.message : String(err);
34 | setError(errorMessage);
35 | Logger.error('MODELS', `Failed to fetch models: ${errorMessage}`);
36 | } finally {
37 | setLoading(false);
38 | setRefreshing(false);
39 | }
40 | };
41 |
42 | useEffect(() => {
43 | fetchModels();
44 | }, []);
45 |
46 | const handleRefresh = () => {
47 | setRefreshing(true);
48 | fetchModels();
49 | };
50 |
51 | if (loading && !refreshing) {
52 | // ... (loading state remains the same)
53 | return (
54 |
55 |
56 |
57 |
58 |
Loading available models...
59 |
60 | );
61 | }
62 |
63 | return (
64 |
65 |
66 |
Available Models
67 |
79 |
80 |
81 | {error ? (
82 | // ... (error display remains the same)
83 |
84 |
Error: {error}
85 |
86 | Check that your server is running properly and try again.
87 |
88 |
89 | ) : models.length === 0 ? (
90 | // ... (no models display remains the same)
91 |
92 |
No models found on the server.
93 |
94 | Ensure that your server is properly configured and has models available.
95 |
96 |
97 | ) : (
98 |
99 | {models.map((model) => (
100 |
104 |
105 |
106 |
107 |
{model.name}
108 | {/* Container for parameter size and multimodal icon */}
109 |
110 | {model.parameterSize && model.parameterSize !== "N/A" && (
111 |
112 | {model.parameterSize}
113 |
114 | )}
115 | {/* Conditionally render the Eye icon if multimodal is true */}
116 | {model.multimodal && (
117 |
118 |
119 | Vision
120 |
121 | )}
122 |
123 |
124 |
125 |
126 | ))}
127 |
128 | )}
129 |
130 | {/* ... (footer text remains the same) ... */}
131 |
132 |
133 | These models are available on your configured model server.
134 | You can use them in your agents by specifying their name.
135 |
136 |
137 |
138 | );
139 | };
140 |
141 | export default AvailableModels;
142 |
--------------------------------------------------------------------------------
/app/src/components/EditAgent/Modal.tsx:
--------------------------------------------------------------------------------
1 | // src/components/EditAgent/Modal.tsx
2 | import React, { ReactNode, useEffect } from 'react';
3 | import ReactDOM from 'react-dom';
4 |
5 | interface ModalProps {
6 | open: boolean;
7 | onClose: () => void;
8 | children: ReactNode;
9 | className?: string; // width / height / flex etc.
10 | backdropClassName?: string;
11 | }
12 |
13 | let openCount = 0; // track how many modals are mounted for scroll-lock
14 |
15 | const Modal: React.FC = ({
16 | open,
17 | onClose,
18 | children,
19 | className = '',
20 | backdropClassName = 'bg-black/50'
21 | }) => {
22 | // 1. Early exit
23 | if (!open) return null;
24 |
25 | // 2. Target element for the portal
26 | const portalTarget =
27 | document.getElementById('modal-root') ?? document.body;
28 |
29 | // 3. Close on
30 | useEffect(() => {
31 | const handler = (e: KeyboardEvent) => {
32 | if (e.key === 'Escape') onClose();
33 | };
34 | window.addEventListener('keydown', handler);
35 | return () => window.removeEventListener('keydown', handler);
36 | }, [onClose]);
37 |
38 | // 4. Disable body scroll while any modal is open
39 | useEffect(() => {
40 | openCount += 1;
41 | document.body.classList.add('overflow-hidden');
42 | return () => {
43 | openCount -= 1;
44 | if (openCount === 0) document.body.classList.remove('overflow-hidden');
45 | };
46 | }, []);
47 |
48 | // 5. Backdrop click only when the *originating* target *is* the backdrop
49 | const handleBackdropMouseDown = (e: React.MouseEvent) => {
50 | if (e.target === e.currentTarget) onClose();
51 | };
52 |
53 | return ReactDOM.createPortal(
54 |
58 |
e.stopPropagation()}
62 | >
63 | {children}
64 |
65 |
,
66 | portalTarget
67 | );
68 | };
69 |
70 | export default Modal;
71 |
72 |
--------------------------------------------------------------------------------
/app/src/components/ErrorDisplay.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 |
3 | interface ErrorDisplayProps {
4 | message: string;
5 | }
6 |
7 | const ErrorDisplay: React.FC = ({ message }) => {
8 | return (
9 |
10 | {message}
11 |
12 | );
13 | };
14 |
15 | export default ErrorDisplay;
16 |
--------------------------------------------------------------------------------
/app/src/components/GetStarted.tsx:
--------------------------------------------------------------------------------
1 | // src/components/GetStarted.tsx
2 | import React, { useState } from 'react';
3 | import { Plus, Users, Sparkles, Terminal, Code } from 'lucide-react';
4 | import GenerateAgent from './GenerateAgent';
5 |
6 | // Fixed model for GetStarted page
7 | const FIXED_MODEL = 'gemini-2.5-flash-preview-04-17';
8 |
9 | interface GetStartedProps {
10 | onExploreCommunity: () => void;
11 | onCreateNewAgent: () => void;
12 | }
13 |
14 | const GetStarted: React.FC = ({
15 | onExploreCommunity,
16 | onCreateNewAgent,
17 | }) => {
18 | const [showAiGenerator, setShowAiGenerator] = useState(false);
19 | const [agentType, setAgentType] = useState<'browser' | 'python'>('browser');
20 |
21 |
22 | return (
23 |
24 |
25 |
Welcome to Observer AI
26 |
27 |
28 |
29 |
40 |
51 |
52 |
53 |
54 |
55 | {agentType === 'browser' ? (
56 | null
57 | ) : (
58 |
59 | Requires Jupyter server setup
60 |
61 | )}
62 |
63 |
64 | {/* AI Agent Generator Section */}
65 |
66 |
67 |
68 |
69 |
70 | {agentType === 'browser' ? 'Create AI Browser Agent' : 'Create AI System Agent'}
71 |
72 |
73 |
74 |
75 |
76 | {showAiGenerator ? (
77 |
78 | ) : (
79 |
80 | setShowAiGenerator(true)}
88 | readOnly
89 | />
90 |
97 |
98 | )}
99 |
100 |
101 |
102 | {/* Browse Community and Create Custom options */}
103 |
104 |
108 |
109 |
110 |
111 |
Community Agents
112 |
113 |
114 |
118 |
121 |
Custom Agent
122 |
123 |
124 |
125 |
126 | );
127 | };
128 |
129 | export default GetStarted;
130 |
--------------------------------------------------------------------------------
/app/src/components/LocalServerSetupDialog.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect } from 'react';
2 | import { Terminal, CheckCircle2, XCircle, LoaderCircle, ArrowRight, ArrowLeft } from 'lucide-react';
3 | import { checkOllamaServer } from '@utils/ollamaServer';
4 |
5 | interface LocalServerSetupDialogProps {
6 | serverStatus: 'unchecked' | 'online' | 'offline';
7 | setServerStatus: (status: 'unchecked' | 'online' | 'offline') => void;
8 | onDismiss: () => void;
9 | onBack: () => void;
10 | }
11 |
12 | const LocalServerSetupDialog = ({
13 | serverStatus,
14 | setServerStatus,
15 | onDismiss,
16 | onBack
17 | }: LocalServerSetupDialogProps) => {
18 | useEffect(() => {
19 | const checkStatus = async () => {
20 | const result = await checkOllamaServer('localhost', '3838');
21 | setServerStatus(result.status === 'online' ? 'online' : 'offline');
22 | };
23 |
24 | const timer = setTimeout(checkStatus, 500);
25 | return () => clearTimeout(timer);
26 | }, [setServerStatus]);
27 |
28 | const StatusIcon = {
29 | online: CheckCircle2,
30 | offline: XCircle,
31 | unchecked: LoaderCircle
32 | }[serverStatus];
33 |
34 | return (
35 |
36 |
37 |
38 |
39 |
40 |
Set Up Local Server
41 |
42 |
49 |
50 |
51 |
52 | Follow these steps to set up your own Observer inference server on your local machine.
53 |
54 |
55 |
56 |
57 |
58 |
59 | 1
60 |
61 |
Install Ollama
62 |
63 |
64 | Install Ollama from ollama.com
65 |
66 |
67 |
68 |
69 |
70 |
71 | 2
72 |
73 |
Install Observer-Ollama
74 |
75 |
76 | pip install observer-ollama
77 |
78 |
79 |
80 |
81 |
82 |
83 | 3
84 |
85 |
Run Observer-Ollama
86 |
87 |
88 | observer-ollama
89 |
90 |
91 |
92 |
93 |
94 |
95 | 4
96 |
97 |
Accept Certificates
98 |
99 |
100 |
101 | Click the link in terminal:
102 |
103 |
104 | https://localhost:3838
105 |
106 |
107 |
108 |
109 |
110 |
111 | 5
112 |
113 |
Connect to Your Server
114 |
115 |
116 | Enter your local inference server address in the field above.
117 |
118 |
119 |
120 |
121 |
122 |
126 |
127 | {serverStatus === 'online' ? 'Connected successfully' :
128 | serverStatus === 'offline' ? 'Connection failed' :
129 | 'Checking connection...'}
130 |
131 |
132 |
133 | {serverStatus === 'offline' && (
134 |
135 |
Unable to connect to the Ollama server. Please verify:
136 |
137 | - Ollama is running on your system
138 | - Run "observer-ollama" in terminal
139 | - Server address is correct
140 |
141 |
142 | )}
143 |
144 |
145 |
152 |
153 |
160 |
161 |
162 |
163 | );
164 | };
165 |
166 | export default LocalServerSetupDialog;
167 |
--------------------------------------------------------------------------------
/app/src/components/MemoryManager.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect, useRef } from 'react';
2 | import { X, Save, Trash2 } from 'lucide-react';
3 | import { getAgentMemory, updateAgentMemory } from '@utils/agent_database';
4 | import { Logger } from '@utils/logging';
5 |
6 | // Create a custom event for memory updates
7 | export const MEMORY_UPDATE_EVENT = 'agent-memory-update';
8 |
9 | // Dispatch a memory update event
10 | export function dispatchMemoryUpdate(agentId: string) {
11 | const event = new CustomEvent(MEMORY_UPDATE_EVENT, {
12 | detail: { agentId }
13 | });
14 | window.dispatchEvent(event);
15 | }
16 |
17 | interface MemoryManagerProps {
18 | agentId: string;
19 | agentName: string;
20 | isOpen: boolean;
21 | onClose: () => void;
22 | }
23 |
24 | const MemoryManager: React.FC = ({
25 | agentId,
26 | agentName,
27 | isOpen,
28 | onClose
29 | }) => {
30 | const [memory, setMemory] = useState('');
31 | const [savedMemory, setSavedMemory] = useState('');
32 | const [isSaving, setIsSaving] = useState(false);
33 | const [isClearing, setIsClearing] = useState(false);
34 | const [error, setError] = useState(null);
35 |
36 | // Polling interval for memory updates (in milliseconds)
37 | const pollingInterval = 2000;
38 | const pollingTimer = useRef(null);
39 |
40 | // Start polling for memory updates when the component is open
41 | useEffect(() => {
42 | if (isOpen) {
43 | // Initial load
44 | loadMemory();
45 |
46 | // Set up polling for updates
47 | pollingTimer.current = window.setInterval(() => {
48 | loadMemory(false); // Silent update (no logging for routine checks)
49 | }, pollingInterval);
50 |
51 | // Clean up interval on unmount or when closing
52 | return () => {
53 | if (pollingTimer.current !== null) {
54 | window.clearInterval(pollingTimer.current);
55 | pollingTimer.current = null;
56 | }
57 | };
58 | }
59 | }, [isOpen, agentId]);
60 |
61 | // Listen for memory update events
62 | useEffect(() => {
63 | const handleMemoryUpdate = (event: CustomEvent) => {
64 | if (event.detail.agentId === agentId && isOpen) {
65 | loadMemory(false); // Silent update
66 | }
67 | };
68 |
69 | // Add event listener
70 | window.addEventListener(MEMORY_UPDATE_EVENT, handleMemoryUpdate as EventListener);
71 |
72 | // Clean up
73 | return () => {
74 | window.removeEventListener(MEMORY_UPDATE_EVENT, handleMemoryUpdate as EventListener);
75 | };
76 | }, [agentId, isOpen]);
77 |
78 | const loadMemory = async (logActivity = true) => {
79 | try {
80 | setError(null);
81 | const agentMemory = await getAgentMemory(agentId);
82 |
83 | // Skip update if memory hasn't changed
84 | if (agentMemory === savedMemory && memory === savedMemory) {
85 | return;
86 | }
87 |
88 | setMemory(agentMemory);
89 | setSavedMemory(agentMemory);
90 |
91 | if (logActivity) {
92 | Logger.debug(agentId, `Memory loaded (${agentMemory.length} characters)`);
93 | }
94 | } catch (err) {
95 | const errorMessage = err instanceof Error ? err.message : 'Unknown error';
96 | setError(`Failed to load memory: ${errorMessage}`);
97 | Logger.error(agentId, `Failed to load memory: ${errorMessage}`, err);
98 | }
99 | };
100 |
101 | const handleSave = async () => {
102 | try {
103 | setError(null);
104 | setIsSaving(true);
105 | await updateAgentMemory(agentId, memory);
106 | setSavedMemory(memory);
107 | Logger.debug(agentId, `Memory saved (${memory.length} characters)`);
108 | } catch (err) {
109 | const errorMessage = err instanceof Error ? err.message : 'Unknown error';
110 | setError(`Failed to save memory: ${errorMessage}`);
111 | Logger.error(agentId, `Failed to save memory: ${errorMessage}`, err);
112 | } finally {
113 | setIsSaving(false);
114 | }
115 | };
116 |
117 | const handleClear = async () => {
118 | if (window.confirm(`Are you sure you want to clear the memory for agent "${agentName}"?`)) {
119 | try {
120 | setError(null);
121 | setIsClearing(true);
122 | await updateAgentMemory(agentId, '');
123 | setMemory('');
124 | setSavedMemory('');
125 | Logger.info(agentId, 'Memory cleared');
126 | } catch (err) {
127 | const errorMessage = err instanceof Error ? err.message : 'Unknown error';
128 | setError(`Failed to clear memory: ${errorMessage}`);
129 | Logger.error(agentId, `Failed to clear memory: ${errorMessage}`, err);
130 | } finally {
131 | setIsClearing(false);
132 | }
133 | }
134 | };
135 |
136 | const hasChanges = memory !== savedMemory;
137 |
138 | if (!isOpen) {
139 | return null;
140 | }
141 |
142 | return (
143 |
144 |
145 |
146 |
Memory Manager: {agentName}
147 |
150 |
151 |
152 | {error && (
153 |
154 | {error}
155 |
156 | )}
157 |
158 |
159 |
166 |
167 |
168 |
169 |
179 |
180 |
190 |
191 |
192 |
193 | {memory.length} characters
194 |
195 |
196 |
197 |
198 | );
199 | };
200 |
201 | export default MemoryManager;
202 |
--------------------------------------------------------------------------------
/app/src/components/ScheduleAgentModal.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { Calendar, Clock } from 'lucide-react';
3 | import { Logger } from '../utils/logging';
4 | import { executeAgentIteration } from '../utils/main_loop';
5 |
6 | interface ScheduleAgentModalProps {
7 | agentId: string;
8 | isOpen: boolean;
9 | onClose: () => void;
10 | onUpdate: () => void;
11 | }
12 |
13 | // Store scheduled tasks in memory
14 | const scheduledAgents: Record = {};
18 |
19 | export const isAgentScheduled = (agentId: string): boolean => {
20 | return !!scheduledAgents[agentId];
21 | }
22 |
23 | export const getScheduledTime = (agentId: string): Date | null => {
24 | return scheduledAgents[agentId]?.scheduledTime || null;
25 | }
26 |
27 | export const cancelScheduledAgent = (agentId: string): void => {
28 | if (scheduledAgents[agentId]) {
29 | window.clearTimeout(scheduledAgents[agentId].timeoutId);
30 | delete scheduledAgents[agentId];
31 | Logger.info('SCHEDULE', `Cancelled scheduled run for agent ${agentId}`);
32 | }
33 | }
34 |
35 | const ScheduleAgentModal: React.FC = ({
36 | agentId,
37 | isOpen,
38 | onClose,
39 | onUpdate
40 | }) => {
41 | const [date, setDate] = useState('');
42 | const [time, setTime] = useState('');
43 | const [error, setError] = useState(null);
44 | const [isCurrentlyScheduled, setIsCurrentlyScheduled] = useState(false);
45 | const [scheduledDateTime, setScheduledDateTime] = useState(null);
46 | const [isOneTime, setIsOneTime] = useState(true); // Default to one-time execution
47 |
48 | // Set default date to today and time to next hour
49 | useEffect(() => {
50 | const now = new Date();
51 | const nextHour = new Date(now);
52 | nextHour.setHours(nextHour.getHours() + 1);
53 | nextHour.setMinutes(0);
54 | nextHour.setSeconds(0);
55 |
56 | const formattedDate = now.toISOString().split('T')[0];
57 | setDate(formattedDate);
58 |
59 | const hours = nextHour.getHours().toString().padStart(2, '0');
60 | const minutes = nextHour.getMinutes().toString().padStart(2, '0');
61 | setTime(`${hours}:${minutes}`);
62 |
63 | // Check if already scheduled
64 | if (isAgentScheduled(agentId)) {
65 | setIsCurrentlyScheduled(true);
66 | const scheduledTime = getScheduledTime(agentId);
67 | if (scheduledTime) {
68 | const formattedDateTime = new Intl.DateTimeFormat('en-US', {
69 | year: 'numeric',
70 | month: 'short',
71 | day: 'numeric',
72 | hour: '2-digit',
73 | minute: '2-digit'
74 | }).format(scheduledTime);
75 | setScheduledDateTime(formattedDateTime);
76 | }
77 | }
78 | }, [agentId]);
79 |
80 | const handleSchedule = () => {
81 | try {
82 | setError(null);
83 |
84 | if (!date || !time) {
85 | setError('Please select both date and time');
86 | return;
87 | }
88 |
89 | const scheduledDateTime = new Date(`${date}T${time}`);
90 | const now = new Date();
91 |
92 | if (scheduledDateTime <= now) {
93 | setError('Scheduled time must be in the future');
94 | return;
95 | }
96 |
97 | const timeUntilExecution = scheduledDateTime.getTime() - now.getTime();
98 |
99 | cancelScheduledAgent(agentId);
100 |
101 | const timeoutId = window.setTimeout(async () => {
102 | Logger.info('SCHEDULE', `Executing scheduled run for agent ${agentId}`);
103 |
104 | try {
105 | if (isOneTime) {
106 | // For one-time execution, just run the agent iteration once
107 | await executeAgentIteration(agentId);
108 | Logger.info('SCHEDULE', `One-time execution completed for agent ${agentId}`);
109 | } else {
110 | // For continuous execution, start the agent loop
111 | const { startAgentLoop } = await import('../utils/main_loop');
112 | await startAgentLoop(agentId);
113 | Logger.info('SCHEDULE', `Started continuous execution for agent ${agentId}`);
114 | }
115 |
116 | // Clean up one-time schedule
117 | if (isOneTime) {
118 | delete scheduledAgents[agentId];
119 | }
120 |
121 | onUpdate();
122 | } catch (err) {
123 | Logger.error('SCHEDULE', `Failed to execute scheduled agent ${agentId}: ${(err as Error).message}`, err);
124 | }
125 | }, timeUntilExecution);
126 |
127 | scheduledAgents[agentId] = {
128 | scheduledTime: scheduledDateTime,
129 | timeoutId: timeoutId as unknown as number
130 | };
131 |
132 | Logger.info('SCHEDULE', `Agent ${agentId} scheduled to run at ${scheduledDateTime.toLocaleString()} (${isOneTime ? 'one-time' : 'continuous'})`);
133 |
134 | onUpdate();
135 | onClose();
136 |
137 | } catch (err) {
138 | const errorMessage = err instanceof Error ? err.message : 'Unknown error';
139 | setError(`Failed to schedule agent: ${errorMessage}`);
140 | Logger.error('SCHEDULE', `Failed to schedule agent ${agentId}: ${errorMessage}`, err);
141 | }
142 | };
143 |
144 | const handleCancel = () => {
145 | cancelScheduledAgent(agentId);
146 | setIsCurrentlyScheduled(false);
147 | setScheduledDateTime(null);
148 | onUpdate();
149 | onClose();
150 | };
151 |
152 | if (!isOpen) return null;
153 |
154 | return (
155 |
156 |
157 |
Schedule Agent Run
158 |
159 | {isCurrentlyScheduled ? (
160 |
161 |
This agent is scheduled to run at:
162 |
163 |
164 | {scheduledDateTime}
165 |
166 |
167 | ) : (
168 |
169 |
170 |
171 |
172 | setDate(e.target.value)}
176 | className="w-full px-3 py-2 border rounded-md pl-10"
177 | />
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 | setTime(e.target.value)}
189 | className="w-full px-3 py-2 border rounded-md pl-10"
190 | />
191 |
192 |
193 |
194 |
195 |
196 |
205 |
206 |
207 | )}
208 |
209 | {error && (
210 |
211 | {error}
212 |
213 | )}
214 |
215 |
216 |
222 |
223 | {isCurrentlyScheduled ? (
224 |
230 | ) : (
231 |
237 | )}
238 |
239 |
240 |
241 | );
242 | };
243 |
244 | export default ScheduleAgentModal;
245 |
--------------------------------------------------------------------------------
/app/src/components/SidebarMenu.tsx:
--------------------------------------------------------------------------------
1 | // src/components/SidebarMenu.tsx
2 | import React from 'react';
3 | import { X, Home, Users, Database, Settings } from 'lucide-react';
4 | import { Logger } from '@utils/logging';
5 |
6 | interface SidebarMenuProps {
7 | isOpen: boolean;
8 | onClose: () => void;
9 | activeTab: string;
10 | onTabChange: (tab: string) => void;
11 | }
12 |
13 | const SidebarMenu: React.FC = ({
14 | isOpen,
15 | onClose,
16 | activeTab,
17 | onTabChange
18 | }) => {
19 | const handleTabClick = (tab: string) => {
20 | onTabChange(tab);
21 | Logger.info('NAVIGATION', `Navigated to ${tab} tab`);
22 | };
23 |
24 | return (
25 | <>
26 | {/* Overlay */}
27 | {isOpen && (
28 |
32 | )}
33 |
34 | {/* Sidebar */}
35 |
40 |
41 |
Observer
42 |
48 |
49 |
50 |
104 |
105 |
106 | Observer v0.1.0
107 |
108 |
109 | >
110 | );
111 | };
112 |
113 | export default SidebarMenu;
114 |
--------------------------------------------------------------------------------
/app/src/components/StartupDialogs.tsx:
--------------------------------------------------------------------------------
1 | // src/components/StartupDialog.tsx
2 | import React from 'react';
3 | import { Terminal, Cloud, Server, LogIn, ExternalLink } from 'lucide-react'; // Added ExternalLink
4 |
5 | interface StartupDialogProps {
6 | onDismiss: () => void;
7 | onLogin?: () => void;
8 | // serverStatus and setServerStatus are no longer needed here if we simplify this dialog
9 | setUseObServer?: (value: boolean) => void;
10 | }
11 |
12 | const StartupDialog: React.FC = ({
13 | onDismiss,
14 | onLogin,
15 | setUseObServer
16 | }) => {
17 | const ollamaProxyUrl = 'https://localhost:3838'; // For the link
18 |
19 | const handleObServerStart = () => {
20 | if (setUseObServer) {
21 | setUseObServer(true);
22 | }
23 | onDismiss();
24 | };
25 |
26 | const handleSetupLocal = () => {
27 | if (setUseObServer) {
28 | setUseObServer(false); // Ensure local mode is selected
29 | }
30 | onDismiss(); // Dismiss, user will use header to connect/troubleshoot
31 | };
32 |
33 | return (
34 |
35 |
36 |
37 |
38 |
Welcome to Observer
39 |
40 |
41 |
Choose how you want to get started:
42 |
43 |
44 | {/* Ob-Server Cloud Card */}
45 |
46 |
47 |
Ob-Server Cloud
48 |
49 |
50 |
51 | - No installation needed
52 | - Easy to use
53 | - Privacy respecting
54 |
55 |
61 | {onLogin && (
62 |
68 | )}
69 |
70 |
71 | {/* Local Server Card - Simplified */}
72 |
104 |
105 |
106 |
107 | You can switch between options anytime from the app header.
108 |
109 |
110 |
111 | );
112 | };
113 |
114 | export default StartupDialog;
115 |
--------------------------------------------------------------------------------
/app/src/components/TextBubble.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 | import { HelpCircle } from 'lucide-react';
3 |
4 | interface TextBubbleProps {
5 | message: string;
6 | duration?: number; // in milliseconds, 0 for permanent
7 | }
8 |
9 | const TextBubble: React.FC = ({
10 | message,
11 | duration = 0
12 | }) => {
13 | const [visible, setVisible] = useState(true);
14 |
15 | useEffect(() => {
16 | if (duration > 0) {
17 | const timer = setTimeout(() => {
18 | setVisible(false);
19 | }, duration);
20 |
21 | return () => clearTimeout(timer);
22 | }
23 | }, [duration]);
24 |
25 | if (!visible) return null;
26 |
27 | return (
28 |
29 | {/* Speech bubble tail pointing up */}
30 |
31 |
32 | {/* Main bubble */}
33 |
34 |
35 | {message}
36 |
37 |
38 | );
39 | };
40 |
41 | export default TextBubble;
42 |
--------------------------------------------------------------------------------
/app/src/components/debug/Auth0Debug.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { useAuth0 } from '@auth0/auth0-react';
3 |
4 | export const Auth0Debug = () => {
5 | const {
6 | isLoading,
7 | isAuthenticated,
8 | error,
9 | user,
10 | getAccessTokenSilently,
11 | } = useAuth0();
12 |
13 | const [token, setToken] = React.useState(null);
14 | const [tokenError, setTokenError] = React.useState(null);
15 |
16 | const fetchToken = async () => {
17 | try {
18 | const accessToken = await getAccessTokenSilently();
19 | setToken(accessToken);
20 | setTokenError(null);
21 | } catch (err) {
22 | setToken(null);
23 | setTokenError(err instanceof Error ? err.message : String(err));
24 | }
25 | };
26 |
27 | return (
28 |
29 |
Auth0 Debug Info
30 |
31 |
32 |
Loading: {isLoading ? 'true' : 'false'}
33 |
Authenticated: {isAuthenticated ? 'true' : 'false'}
34 | {error && (
35 |
36 | Error: {error.message}
37 |
38 | )}
39 |
40 |
41 | {user && (
42 |
43 |
User Info:
44 |
45 | {JSON.stringify(user, null, 2)}
46 |
47 |
48 | )}
49 |
50 |
51 |
58 |
59 |
60 | {token && (
61 |
62 |
Access Token:
63 |
64 | {token}
65 |
66 |
67 | )}
68 |
69 | {tokenError && (
70 |
71 | Token Error: {tokenError}
72 |
73 | )}
74 |
75 |
76 |
Add this component to your App for debugging:
77 |
78 | {`import { Auth0Debug } from './path/to/Auth0Debug';\n\n// Inside your component:\n`}
79 |
80 |
81 |
82 | );
83 | };
84 |
85 | export default Auth0Debug;
86 |
--------------------------------------------------------------------------------
/app/src/desktop/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/app/src/desktop/.gitkeep
--------------------------------------------------------------------------------
/app/src/env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | interface ImportMetaEnv {
3 | readonly VITE_AUTH0_DOMAIN: string
4 | readonly VITE_AUTH0_CLIENT_ID: string
5 | readonly VITE_GEMINI_API_KEY: string
6 | readonly VITE_DEFAULT_MODEL: string
7 | // add more environment variables as needed
8 | }
9 | interface ImportMeta {
10 | readonly env: ImportMetaEnv
11 | }
12 |
--------------------------------------------------------------------------------
/app/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
--------------------------------------------------------------------------------
/app/src/utils/agent-output.ts:
--------------------------------------------------------------------------------
1 | // src/utils/agent-output.ts
2 | import { Logger } from './logging';
3 | import * as utilities from './handlers/utils';
4 |
5 | // Map to store output processors
6 | const processors = new Map();
7 |
8 | /**
9 | * Register an output processor for an agent
10 | *
11 | * @param agentId ID of the agent
12 | * @param processor Function that processes the complete output
13 | */
14 | export function registerProcessor(agentId: string, processor: Function): void {
15 | processors.set(agentId, processor);
16 | Logger.info(agentId, `Registered output processor`);
17 | }
18 |
19 | /**
20 | * Process agent output text using the registered processor
21 | *
22 | * @param agentId ID of the agent
23 | * @param text Complete output text from the model
24 | * @returns True if processing resulted in an action
25 | */
26 | export async function processOutput(agentId: string, text: string): Promise {
27 | // Get the processor (or use a default logger if none exists)
28 | const processor = processors.get(agentId) || ((response: string) => {
29 | Logger.info(agentId, `Agent output: ${response.length > 100 ?
30 | response.substring(0, 100) + '...' : response}`);
31 | return false;
32 | });
33 |
34 | // Filter out content inside ... tags if needed
35 | //if (excludeThink) {
36 | // filteredText = text.replace(/[\s\S]*?<\/think>/g, '');
37 | // Logger.debug(agentId, `Filtered out blocks from agent output`);
38 | //}
39 |
40 | try {
41 | // Process the entire response at once
42 | const result = await processor(text, utilities, agentId);
43 | return !!result; // Convert to boolean
44 | } catch (error) {
45 | Logger.error(agentId, `Error processing response: ${error}`);
46 | return false;
47 | }
48 | }
49 |
50 | /**
51 | * Clear the processor for an agent
52 | *
53 | * @param agentId ID of the agent
54 | */
55 | export function clearProcessor(agentId: string): void {
56 | processors.delete(agentId);
57 | Logger.info(agentId, `Cleared output processor`);
58 | }
59 |
--------------------------------------------------------------------------------
/app/src/utils/handlers/JupyterConfig.ts:
--------------------------------------------------------------------------------
1 | // src/utils/handlers/JupyterConfig.ts
2 | import { Logger } from '../logging';
3 |
4 | // Default values
5 | let jupyterHost = '127.0.0.1';
6 | let jupyterPort = '8888';
7 | let jupyterToken = '';
8 | let jupyterConnected = false;
9 | const STORAGE_KEY = 'jupyter_config';
10 |
11 | // Load from localStorage on init
12 | try {
13 | const stored = localStorage.getItem(STORAGE_KEY);
14 | if (stored) {
15 | const config = JSON.parse(stored);
16 | jupyterHost = config.host || jupyterHost;
17 | jupyterPort = config.port || jupyterPort;
18 | jupyterToken = config.token || jupyterToken;
19 | Logger.debug('CONFIG', 'Loaded Jupyter config from storage');
20 | }
21 | } catch (error) {
22 | Logger.error('CONFIG', 'Error loading config:', error);
23 | }
24 |
25 | export function setJupyterConfig(host: string, port: string, token: string) {
26 | if (host) jupyterHost = host;
27 | if (port) jupyterPort = port;
28 | if (token) jupyterToken = token;
29 |
30 | try {
31 | localStorage.setItem(STORAGE_KEY, JSON.stringify({
32 | host: jupyterHost,
33 | port: jupyterPort,
34 | token: jupyterToken
35 | }));
36 | } catch (error) {
37 | Logger.error('CONFIG', 'Error saving config:', error);
38 | }
39 | }
40 |
41 | export function getJupyterConfig() {
42 | return {
43 | host: jupyterHost,
44 | port: jupyterPort,
45 | token: jupyterToken
46 | };
47 | }
48 |
49 | export function isJupyterConnected(): boolean {
50 | return jupyterConnected;
51 | }
52 |
53 | export async function testJupyterConnection(configOverride?: {
54 | host?: string,
55 | port?: string,
56 | token?: string
57 | }): Promise<{success: boolean, message: string}> {
58 | const config = configOverride
59 | ? {...getJupyterConfig(), ...configOverride}
60 | : getJupyterConfig();
61 |
62 | try {
63 | const url = `http://${config.host}:${config.port}/api/kernels`;
64 |
65 | const response = await fetch(url, {
66 | headers: {
67 | 'Authorization': `token ${config.token}`
68 | }
69 | });
70 |
71 | // Update the connection state
72 | jupyterConnected = response.ok;
73 |
74 | if (response.ok) {
75 | Logger.info('CONFIG', `Successfully connected to Jupyter server`);
76 | return { success: true, message: `✅ Connected to Jupyter server` };
77 | } else {
78 | Logger.warn('CONFIG', `Jupyter connection failed: ${response.status}`);
79 | return { success: false, message: `❌ Connection failed: ${response.status}` };
80 | }
81 | } catch (error) {
82 | jupyterConnected = false;
83 | const errorMessage = error instanceof Error ? error.message : String(error);
84 | return { success: false, message: `❌ Connection error: ${errorMessage}` };
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/app/src/utils/handlers/javascript.ts:
--------------------------------------------------------------------------------
1 | // src/utils/handlers/javascript.ts
2 | import * as utils from './utils';
3 | import { Logger } from '../logging';
4 | // Import the agent loop control functions
5 | import { startAgentLoop, stopAgentLoop } from '../main_loop';
6 |
7 | /**
8 | * Execute JavaScript handler for processing agent responses
9 | */
10 | export async function executeJavaScript(
11 | response: string,
12 | agentId: string,
13 | code: string
14 | ): Promise {
15 | try {
16 | // We'll use a sandboxed approach with a context object
17 | const context = {
18 | response,
19 | agentId,
20 | // Define utility functions with full flexibility - allow accessing any agent's memory
21 | getMemory: async (targetId = agentId) => await utils.getMemory(targetId),
22 | setMemory: async (targetId: string, value?: any) => {
23 | // If only one parameter is provided, assume it's the value for current agent
24 | if (value === undefined) {
25 | return await utils.setMemory(agentId, targetId);
26 | }
27 | // Otherwise set memory for specified agent
28 | return await utils.setMemory(targetId, value);
29 | },
30 | appendMemory: async (targetId: string, content?: string, separator = '\n') => {
31 | // If only one parameter is provided, assume it's content for current agent
32 | if (content === undefined) {
33 | return await utils.appendMemory(agentId, targetId, separator);
34 | }
35 | // Otherwise append to specified agent's memory
36 | return await utils.appendMemory(targetId, content, separator);
37 | },
38 | notify: utils.notify,
39 | time: utils.time,
40 | console: console,
41 |
42 | startAgent: async (targetAgentId?: string) => { // targetAgentId is optional
43 | const idToStart = targetAgentId === undefined ? agentId : targetAgentId;
44 | await startAgentLoop(idToStart);
45 | },
46 |
47 | stopAgent: async (targetAgentId?: string) => {
48 | const idToStop = targetAgentId === undefined ? agentId : targetAgentId;
49 | await stopAgentLoop(idToStop);
50 | }
51 | };
52 |
53 | // Create a wrapper function that sets up the context
54 | const wrappedCode = `
55 | (async function() {
56 | try {
57 | ${code}
58 | return true;
59 | } catch (e) {
60 | console.error('Error in handler:', e);
61 | return false;
62 | }
63 | })()
64 | `;
65 |
66 | // Use Function constructor with context binding
67 | const handler = new Function(...Object.keys(context), wrappedCode);
68 |
69 | // Execute with bound context values
70 | return await handler(...Object.values(context));
71 | } catch (error) {
72 | Logger.error(agentId, `Error executing JavaScript: ${error}`);
73 | return false;
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/app/src/utils/handlers/python.ts:
--------------------------------------------------------------------------------
1 | import { KernelManager, ServerConnection, KernelMessage } from '@jupyterlab/services';
2 | import { Logger } from '../logging';
3 | import { getJupyterConfig } from './JupyterConfig'; // Fixed casing to match actual file name
4 |
5 | /**
6 | * Execute Python code using a Jupyter kernel
7 | */
8 | export async function executePython(
9 | response: string,
10 | agentId: string,
11 | code: string
12 | ): Promise {
13 | Logger.debug(agentId, 'Executing Python code');
14 | const { host, port, token } = getJupyterConfig();
15 |
16 | try {
17 | // Create server settings with token and CORS handling
18 | const serverSettings = ServerConnection.makeSettings({
19 | baseUrl: `http://${host}:${port}`,
20 | wsUrl: `ws://${host}:${port}`,
21 | token: token,
22 | init: {
23 | mode: 'cors',
24 | headers: {
25 | 'Accept': 'application/json',
26 | 'Content-Type': 'application/json'
27 | }
28 | }
29 | });
30 |
31 | // Skip connection test - we'll rely on the kernel API
32 |
33 | // Create kernel manager with settings
34 | const kernelManager = new KernelManager({ serverSettings });
35 |
36 | // Start a new kernel directly instead of trying to list kernels first
37 | Logger.debug(agentId, 'Starting new kernel');
38 | const kernel = await kernelManager.startNew({ name: 'python3' });
39 | Logger.debug(agentId, `Started new kernel: ${kernel.id}`);
40 |
41 | // We already have a kernel from above
42 |
43 | // Prepare code with variables
44 | const fullCode = `
45 | response = """${response.replace(/"""/g, '\\"\\"\\"')}"""
46 | agentId = "${agentId}"
47 | # User code begins here
48 | ${code}
49 | `;
50 |
51 | // Execute the code
52 | Logger.debug(agentId, 'Executing code');
53 | let hasError = false;
54 | const future = kernel.requestExecute({ code: fullCode });
55 |
56 | // Handle messages with proper type casting
57 | future.onIOPub = (msg: KernelMessage.IIOPubMessage) => {
58 | const msgType = msg.header.msg_type;
59 |
60 | if (msgType === 'error') {
61 | hasError = true;
62 | const errorContent = msg.content as KernelMessage.IErrorMsg['content'];
63 | Logger.error(agentId, `Python error: ${errorContent.ename}: ${errorContent.evalue}`);
64 | } else if (msgType === 'stream') {
65 | const streamContent = msg.content as KernelMessage.IStreamMsg['content'];
66 | if (streamContent.name === 'stderr') {
67 | Logger.warn(agentId, `Python stderr: ${streamContent.text}`);
68 | } else if (streamContent.name === 'stdout') {
69 | Logger.info(agentId, `Python stdout: ${streamContent.text}`);
70 | }
71 | }
72 | };
73 |
74 | // Wait for execution to complete
75 | await future.done;
76 | Logger.debug(agentId, 'Code execution completed');
77 | await kernel.shutdown();
78 |
79 | return !hasError;
80 | } catch (error) {
81 | Logger.error(agentId, `Error executing Python: ${error}`);
82 | return false;
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/app/src/utils/handlers/utils.ts:
--------------------------------------------------------------------------------
1 | // src/utils/handlers/utils.ts
2 | import { Logger } from '../logging';
3 | import { getAgentMemory as fetchAgentMemory, updateAgentMemory as saveAgentMemory } from '../agent_database';
4 |
5 | /**
6 | * Utility functions for handlers
7 | */
8 |
9 | /**
10 | * Get the current time in a readable format
11 | */
12 | export function time(): string {
13 | return new Date().toLocaleTimeString([], {
14 | hour: 'numeric',
15 | minute: '2-digit',
16 | hour12: true
17 | }).toLowerCase();
18 | }
19 |
20 | /**
21 | * Get agent's memory value
22 | */
23 | export async function getMemory(agentId: string): Promise {
24 | return await fetchAgentMemory(agentId);
25 | }
26 |
27 | /**
28 | * Set agent's memory value
29 | */
30 | export async function setMemory(agentId: string, memory: any): Promise {
31 | await saveAgentMemory(agentId, memory);
32 |
33 | // Log the memory update
34 | Logger.info(agentId, `Memory Updated`, {
35 | logType: 'memory-update',
36 | content: memory
37 | });
38 | }
39 |
40 | /**
41 | * Append to agent's memory value
42 | * @param agentId The agent's ID
43 | * @param content Content to append to memory
44 | * @param separator Optional separator between existing memory and new content (default: '\n')
45 | */
46 | export async function appendMemory(agentId: string, content: string, separator: string = '\n'): Promise {
47 | try {
48 | // Get current memory
49 | const currentMemory = await fetchAgentMemory(agentId);
50 |
51 | // If current memory exists and isn't empty, append with separator
52 | // Otherwise just set the content directly
53 | const newMemory = currentMemory ? `${currentMemory}${separator}${content}` : content;
54 |
55 | // Save updated memory
56 | await saveAgentMemory(agentId, newMemory);
57 |
58 | Logger.debug('MEMORY', `Appended to agent ${agentId} memory`);
59 |
60 | // Log the memory append
61 | Logger.info(agentId, `Memory Appended`, {
62 | logType: 'memory-update',
63 | content: newMemory,
64 | update: {
65 | appended: content,
66 | separator: separator
67 | }
68 | });
69 | } catch (error) {
70 | Logger.error('MEMORY', `Error appending to memory: ${error instanceof Error ? error.message : 'Unknown error'}`);
71 | }
72 | }
73 |
74 |
75 | /**
76 | * Send a notification
77 | */
78 | export function notify(title: string, message: string): void {
79 | try {
80 | // Check if notifications are supported
81 | if (!("Notification" in window)) {
82 | Logger.error('NOTIFICATION', 'Browser does not support notifications');
83 | return;
84 | }
85 |
86 | // Check permission status
87 | if (Notification.permission === "granted") {
88 | // Create and show notification
89 | new Notification(title, { body: message });
90 | } else if (Notification.permission !== "denied") {
91 | // Request permission
92 | Notification.requestPermission().then(permission => {
93 | if (permission === "granted") {
94 | new Notification(title, { body: message });
95 | }
96 | });
97 | }
98 | } catch (error) {
99 | Logger.error('NOTIFICATION', `Error: ${error instanceof Error ? error.message : 'Unknown error'}`);
100 | }
101 | }
102 |
103 |
--------------------------------------------------------------------------------
/app/src/utils/logging.ts:
--------------------------------------------------------------------------------
1 | // src/utils/logging.ts
2 |
3 | // Define log levels
4 | export enum LogLevel {
5 | DEBUG = 0,
6 | INFO = 1,
7 | WARNING = 2,
8 | ERROR = 3
9 | }
10 |
11 | // Define log entry structure
12 | export interface LogEntry {
13 | id: string; // Unique ID for the log entry
14 | timestamp: Date; // When the log occurred
15 | level: LogLevel; // Severity level
16 | source: string; // Source of the log (agentId, service name, etc.)
17 | message: string; // Main log message
18 | details?: any; // Optional additional data
19 | }
20 |
21 | // Define log listener type
22 | export type LogListener = (entry: LogEntry) => void;
23 |
24 | /**
25 | * Logger service for capturing and managing application logs
26 | */
27 | class LoggingService {
28 | private logs: LogEntry[] = [];
29 | private listeners: LogListener[] = [];
30 | private maxLogEntries: number = 1000; // Maximum number of logs to keep in memory
31 | private nextLogId: number = 1;
32 |
33 | constructor() {
34 | // Initialize with any saved logs if needed
35 | }
36 |
37 | /**
38 | * Add a new log entry
39 | */
40 | public log(
41 | level: LogLevel,
42 | source: string,
43 | message: string,
44 | details?: any
45 | ): LogEntry {
46 | const entry: LogEntry = {
47 | id: `log-${this.nextLogId++}`,
48 | timestamp: new Date(),
49 | level,
50 | source,
51 | message,
52 | details
53 | };
54 |
55 | // Add to log array
56 | this.logs.push(entry);
57 |
58 | // Trim logs if we exceed max size
59 | if (this.logs.length > this.maxLogEntries) {
60 | this.logs = this.logs.slice(-this.maxLogEntries);
61 | }
62 |
63 | // Notify all listeners
64 | this.notifyListeners(entry);
65 |
66 | return entry;
67 | }
68 |
69 | /**
70 | * Convenience method for debug logs
71 | */
72 | public debug(source: string, message: string, details?: any): LogEntry {
73 | return this.log(LogLevel.DEBUG, source, message, details);
74 | }
75 |
76 | /**
77 | * Convenience method for info logs
78 | */
79 | public info(source: string, message: string, details?: any): LogEntry {
80 | return this.log(LogLevel.INFO, source, message, details);
81 | }
82 |
83 | /**
84 | * Convenience method for warning logs
85 | */
86 | public warn(source: string, message: string, details?: any): LogEntry {
87 | return this.log(LogLevel.WARNING, source, message, details);
88 | }
89 |
90 | /**
91 | * Convenience method for error logs
92 | */
93 | public error(source: string, message: string, details?: any): LogEntry {
94 | return this.log(LogLevel.ERROR, source, message, details);
95 | }
96 |
97 | /**
98 | * Get all stored logs
99 | */
100 | public getLogs(): LogEntry[] {
101 | return [...this.logs];
102 | }
103 |
104 | /**
105 | * Get logs filtered by criteria
106 | */
107 | public getFilteredLogs(
108 | filters: {
109 | level?: LogLevel,
110 | source?: string | string[],
111 | since?: Date,
112 | until?: Date,
113 | searchText?: string
114 | }
115 | ): LogEntry[] {
116 | return this.logs.filter(entry => {
117 | // Filter by log level
118 | if (filters.level !== undefined && entry.level < filters.level) {
119 | return false;
120 | }
121 |
122 | // Filter by source
123 | if (filters.source !== undefined) {
124 | if (Array.isArray(filters.source)) {
125 | if (!filters.source.includes(entry.source)) {
126 | return false;
127 | }
128 | } else if (entry.source !== filters.source) {
129 | return false;
130 | }
131 | }
132 |
133 | // Filter by start date
134 | if (filters.since && entry.timestamp < filters.since) {
135 | return false;
136 | }
137 |
138 | // Filter by end date
139 | if (filters.until && entry.timestamp > filters.until) {
140 | return false;
141 | }
142 |
143 | // Filter by text search
144 | if (filters.searchText) {
145 | const searchLower = filters.searchText.toLowerCase();
146 | if (!entry.message.toLowerCase().includes(searchLower)) {
147 | return false;
148 | }
149 | }
150 |
151 | return true;
152 | });
153 | }
154 |
155 | /**
156 | * Get logs for a specific agent
157 | */
158 | public getAgentLogs(agentId: string): LogEntry[] {
159 | return this.getFilteredLogs({ source: agentId });
160 | }
161 |
162 | /**
163 | * Clear all logs
164 | */
165 | public clearLogs(): void {
166 | this.logs = [];
167 | // Optionally notify listeners about clear
168 | }
169 |
170 | /**
171 | * Register a listener for new log entries
172 | */
173 | public addListener(listener: LogListener): void {
174 | this.listeners.push(listener);
175 | }
176 |
177 | /**
178 | * Remove a previously registered listener
179 | */
180 | public removeListener(listener: LogListener): void {
181 | this.listeners = this.listeners.filter(l => l !== listener);
182 | }
183 |
184 | /**
185 | * Notify all listeners about a new log entry
186 | */
187 | private notifyListeners(entry: LogEntry): void {
188 | this.listeners.forEach(listener => {
189 | try {
190 | listener(entry);
191 | } catch (error) {
192 | console.error('Error in log listener:', error);
193 | }
194 | });
195 | }
196 | }
197 |
198 | // Export a singleton instance of the service
199 | export const Logger = new LoggingService();
200 |
201 | // Create a React context provider for the Logger (to be used later)
202 | export const createLoggerDecorator = (source: string) => {
203 | return {
204 | debug: (message: string, details?: any) => Logger.debug(source, message, details),
205 | info: (message: string, details?: any) => Logger.info(source, message, details),
206 | warn: (message: string, details?: any) => Logger.warn(source, message, details),
207 | error: (message: string, details?: any) => Logger.error(source, message, details)
208 | };
209 | };
210 |
--------------------------------------------------------------------------------
/app/src/utils/main_loop.ts:
--------------------------------------------------------------------------------
1 | // src/utils/main_loop.ts
2 | import { getAgent, getAgentCode } from './agent_database';
3 | import { sendPrompt } from './sendApi';
4 | import { Logger } from './logging';
5 | import { preProcess } from './pre-processor';
6 | import { postProcess } from './post-processor';
7 | import { stopScreenCapture } from './screenCapture';
8 | import { stopRecognitionAndClear } from './speechInputManager'
9 |
10 | const activeLoops: Record = {};
16 |
17 |
18 | export const AGENT_STATUS_CHANGED_EVENT = 'agentStatusChanged';
19 |
20 | let serverHost = 'localhost';
21 | let serverPort = '3838';
22 |
23 | export function setOllamaServerAddress(host: string, port: string): void {
24 | serverHost = host;
25 | serverPort = port;
26 | Logger.info('SERVER', `Ollama server address set to ${host}:${port}`);
27 | }
28 |
29 | export function getOllamaServerAddress(): { host: string; port: string } {
30 | return { host: serverHost, port: serverPort };
31 | }
32 |
33 | export async function startAgentLoop(agentId: string): Promise {
34 | if (activeLoops[agentId]?.isRunning) {
35 | Logger.warn(agentId, `Agent is already running`);
36 | return;
37 | }
38 |
39 | try {
40 | const agent = await getAgent(agentId);
41 | if (!agent) throw new Error(`Agent ${agentId} not found`);
42 |
43 | activeLoops[agentId] = { intervalId: null, isRunning: true, serverHost, serverPort };
44 | window.dispatchEvent(
45 | new CustomEvent(AGENT_STATUS_CHANGED_EVENT, {
46 | detail: { agentId, status: 'running' },
47 | })
48 | );
49 |
50 | // first iteration immediately
51 | await executeAgentIteration(agentId);
52 |
53 | // then schedule
54 | const intervalMs = agent.loop_interval_seconds * 1000;
55 | activeLoops[agentId].intervalId = window.setInterval(async () => {
56 | if (activeLoops[agentId]?.isRunning) {
57 | try {
58 | await executeAgentIteration(agentId);
59 | } catch (e) {
60 | Logger.error(agentId, `Error in interval: ${e}`, e);
61 | }
62 | }
63 | }, intervalMs);
64 | } catch (error) {
65 | // on failure, dispatch “stopped” so UI can recover
66 | window.dispatchEvent(
67 | new CustomEvent(AGENT_STATUS_CHANGED_EVENT, {
68 | detail: { agentId, status: 'stopped' },
69 | })
70 | );
71 | // clean up and re-throw…
72 | throw error;
73 | }
74 | }
75 |
76 | export async function stopAgentLoop(agentId: string): Promise {
77 | const loop = activeLoops[agentId];
78 | if (loop?.isRunning) {
79 | if (loop.intervalId !== null) window.clearInterval(loop.intervalId);
80 | window.dispatchEvent(
81 | new CustomEvent(AGENT_STATUS_CHANGED_EVENT, {
82 | detail: { agentId, status: 'stopped' },
83 | })
84 | );
85 | stopScreenCapture(); // For screen capture
86 | stopRecognitionAndClear(agentId); //for microphone
87 | activeLoops[agentId] = { ...loop, isRunning: false, intervalId: null };
88 | } else {
89 | Logger.warn(agentId, `Attempted to stop agent that wasn't running`);
90 | }
91 | }
92 |
93 | /**
94 | * Execute a single iteration of the agent's loop
95 | */
96 | export async function executeAgentIteration(agentId: string): Promise {
97 | // Check if the loop is still active
98 | if (!activeLoops[agentId]?.isRunning) {
99 | Logger.debug(agentId, `Skipping execution for stopped agent`);
100 | return;
101 | }
102 |
103 | try {
104 | Logger.debug(agentId, `Starting agent iteration`);
105 |
106 | // Get the latest agent data
107 | const agent = await getAgent(agentId);
108 | const agentCode = await getAgentCode(agentId) || '';
109 |
110 | if (!agent) {
111 | throw new Error(`Agent ${agentId} not found`);
112 | }
113 |
114 | // 1. Pre-process: Prepare the prompt
115 | const systemPrompt = await preProcess(agentId, agent.system_prompt);
116 |
117 | Logger.info(agentId, `Prompt`, {
118 | logType: 'model-prompt',
119 | content: systemPrompt
120 | });
121 | console.log(systemPrompt);
122 |
123 | // 2. Send prompt to API
124 | Logger.debug(agentId, `Sending prompt to Ollama (${serverHost}:${serverPort}, model: ${agent.model_name})`);
125 |
126 | const response = await sendPrompt(
127 | serverHost,
128 | serverPort,
129 | agent.model_name,
130 | systemPrompt
131 | );
132 |
133 | Logger.info(agentId, `Response`, {
134 | logType: 'model-response',
135 | content: response
136 | });
137 |
138 | Logger.debug(agentId, `Response Received: ${response}`);
139 | Logger.debug(agentId, `About to call postProcess on ${agentId} with agentCode length: ${agentCode.length}`);
140 |
141 | try {
142 | await postProcess(agentId, response, agentCode);
143 | Logger.debug(agentId, `postProcess completed successfully`);
144 | } catch (postProcessError) {
145 | Logger.error(agentId, `Error in postProcess: ${postProcessError}`, postProcessError);
146 | }
147 |
148 | } catch (error) {
149 | const errorMessage = error instanceof Error ? error.message : 'Unknown error';
150 | Logger.error(agentId, `Error in agent iteration: ${errorMessage}`, error);
151 | }
152 | }
153 |
154 | /**
155 | * Check if an agent's loop is currently running
156 | */
157 | export function isAgentLoopRunning(agentId: string): boolean {
158 | return activeLoops[agentId]?.isRunning === true;
159 | }
160 |
161 | /**
162 | * Get all currently running agent IDs
163 | */
164 | export function getRunningAgentIds(): string[] {
165 | return Object.entries(activeLoops)
166 | .filter(([_, loop]) => loop.isRunning)
167 | .map(([agentId, _]) => agentId);
168 | }
169 |
170 | /**
171 | * Execute a single test iteration - this is a simplified version for testing in the UI
172 | */
173 | export async function executeTestIteration(
174 | agentId: string,
175 | systemPrompt: string,
176 | modelName: string
177 | ): Promise {
178 | try {
179 | Logger.debug(agentId, `Starting test iteration with model ${modelName}`);
180 |
181 | // Pre-process the prompt (even for tests)
182 | const processedPrompt = await preProcess(agentId, systemPrompt);
183 |
184 | // Send the prompt to Ollama and get response
185 | Logger.info(agentId, `Sending prompt to Ollama (model: ${modelName})`);
186 | const response = await sendPrompt(
187 | serverHost,
188 | serverPort,
189 | modelName,
190 | processedPrompt
191 | );
192 | stopScreenCapture();
193 |
194 | return response;
195 | } catch (error) {
196 | const errorMessage = error instanceof Error ? error.message : 'Unknown error';
197 | Logger.error(agentId, `Error in test iteration: ${errorMessage}`, error);
198 | throw error;
199 | }
200 | }
201 |
--------------------------------------------------------------------------------
/app/src/utils/ollamaServer.ts:
--------------------------------------------------------------------------------
1 | interface ServerResponse {
2 | status: 'online' | 'offline';
3 | error?: string;
4 | }
5 |
6 | export interface Model {
7 | name: string;
8 | parameterSize?: string;
9 | multimodal?: boolean;
10 | }
11 |
12 | interface ModelsResponse {
13 | models: Model[];
14 | error?: string;
15 | }
16 |
17 | export async function checkOllamaServer(host: string, port: string): Promise {
18 | try {
19 | const response = await fetch(`https://${host}:${port}/api/tags`, {
20 | method: 'GET',
21 | headers: {
22 | 'Content-Type': 'application/json',
23 | },
24 | });
25 |
26 | if (response.ok) {
27 | return { status: 'online' };
28 | }
29 |
30 | return {
31 | status: 'offline',
32 | error: `Server responded with status ${response.status}`
33 | };
34 | } catch (error) {
35 | return {
36 | status: 'offline',
37 | error: 'Could not connect to Ollama server'
38 | };
39 | }
40 | }
41 |
42 | export async function listModels(host: string, port: string): Promise {
43 | try {
44 | const response = await fetch(`https://${host}:${port}/api/tags`, {
45 | method: 'GET',
46 | headers: { 'Content-Type': 'application/json' },
47 | });
48 |
49 | if (!response.ok) {
50 | return { models: [], error: `Server responded with status ${response.status}` };
51 | }
52 |
53 | const data = await response.json();
54 |
55 | if (!data.models || !Array.isArray(data.models)) {
56 | return { models: [], error: 'Invalid response format from server' };
57 | }
58 |
59 | // Map the server response, EXTRACTING the new flag
60 | const models: Model[] = data.models.map((model: any) => {
61 | return {
62 | name: model.name,
63 | parameterSize: model.details?.parameter_size,
64 | multimodal: model.details?.multimodal ?? false // <-- EXTRACT AND MAP HERE
65 | };
66 | });
67 |
68 | return { models };
69 | } catch (error) {
70 | return {
71 | models: [],
72 | error: `Could not retrieve models: ${error instanceof Error ? error.message : String(error)}`
73 | };
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/app/src/utils/post-processor.ts:
--------------------------------------------------------------------------------
1 | import { Logger } from './logging';
2 | import { executeJavaScript } from './handlers/javascript';
3 | import { executePython } from './handlers/python';
4 |
5 | /**
6 | * Process response using the JavaScript or Python handler
7 | * based on the presence of a '#python' marker
8 | */
9 | export async function postProcess(agentId: string, response: string, code: string): Promise {
10 | try {
11 | Logger.debug(agentId, 'Starting response post-processing');
12 |
13 | // Check if the code starts with #python
14 | if (code.trim().startsWith('#python')) {
15 | Logger.debug(agentId, 'Detected Python code, using Python handler');
16 | const result = await executePython(response, agentId, code);
17 | if (result) {
18 | Logger.debug(agentId, 'Response processed successfully');
19 | } else {
20 | Logger.debug(agentId, 'Python execution failed');
21 | }
22 |
23 | return result;
24 | } else {
25 | // Default to JavaScript handler (existing behavior)
26 | Logger.debug(agentId, 'Using JavaScript handler');
27 |
28 | const result = await executeJavaScript(response, agentId, code);
29 |
30 | if (result) {
31 | Logger.debug(agentId, 'Response processed quickly');
32 | }
33 |
34 | return result;
35 | }
36 | } catch (error) {
37 | Logger.error(agentId, `Error in post-processing: ${error instanceof Error ? error.message : String(error)}`);
38 | return false;
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/app/src/utils/screenCapture.ts:
--------------------------------------------------------------------------------
1 | import { createWorker, Worker } from 'tesseract.js';
2 |
3 | interface OCRResult {
4 | success?: boolean;
5 | text?: string;
6 | confidence?: number;
7 | error?: string;
8 | }
9 |
10 | // Keep track of active streams
11 | let activeStream: MediaStream | null = null;
12 |
13 | export function isMobileDevice(): boolean {
14 |
15 | if (typeof window === 'undefined') return false;
16 |
17 | const userAgent = navigator.userAgent || navigator.vendor || (window as any).opera;
18 |
19 | // Simple regex check - sufficient for this purpose
20 | return /android|webos|iphone|ipad|ipod|blackberry|iemobile|opera mini/i.test(userAgent.toLowerCase());
21 | }
22 |
23 | // Function to start screen capture and return the stream
24 | export async function startScreenCapture(): Promise {
25 |
26 | if (isMobileDevice()){
27 | throw new Error("Screen capture is unavailable on mobile. This is a security limitation of mobile operating systems, not this application. Please use a desktop computer for this feature.");
28 | }
29 |
30 |
31 | // If we already have an active stream, return it
32 | if (activeStream) {
33 | return activeStream;
34 | }
35 |
36 | try {
37 | console.log('Starting screen capture...');
38 |
39 | const stream = await navigator.mediaDevices.getDisplayMedia({
40 | video: {
41 | cursor: "always"
42 | } as any
43 | });
44 |
45 | // Store the stream for later use
46 | activeStream = stream;
47 |
48 | // Set up a listener for when the stream ends
49 | stream.getVideoTracks()[0].onended = () => {
50 | console.log('Screen sharing stopped by user');
51 | activeStream = null;
52 | };
53 |
54 | return stream;
55 | } catch (error) {
56 | console.error('Screen capture error:', error);
57 |
58 | // Check for Safari's specific error message
59 | const errorMessage = error instanceof Error ? error.message : String(error);
60 | if (errorMessage.includes('getDisplayMedia must be called from a user gesture handler')) {
61 | // Create a custom error with a more helpful message
62 | const enhancedError = new Error(
63 | 'Safari Users: Browser permission needed, please click the Observer icon in the top left corner to enable screen capture'
64 | );
65 | throw enhancedError;
66 | }
67 |
68 | // For other errors, just pass them through
69 | throw error;
70 | }
71 | }
72 |
73 | // Function to stop the active screen capture
74 | export function stopScreenCapture(): void {
75 | if (activeStream) {
76 | activeStream.getTracks().forEach(track => track.stop());
77 | activeStream = null;
78 | console.log('Screen capture stopped');
79 | }
80 | }
81 |
82 | // Function to capture a frame from the active stream and perform OCR
83 | export async function captureFrameAndOCR(): Promise {
84 | // If no active stream, try to start one
85 | if (!activeStream) {
86 | const stream = await startScreenCapture();
87 | if (!stream) {
88 | return { error: 'Failed to start screen capture' };
89 | }
90 | }
91 |
92 | try {
93 | // Create video element to receive the stream
94 | const video = document.createElement('video');
95 | video.srcObject = activeStream;
96 |
97 | // Return a promise that resolves when video frame is processed
98 | return new Promise((resolve) => {
99 | video.onloadedmetadata = async () => {
100 | video.play();
101 |
102 | // Create canvas to capture video frame
103 | const canvas = document.createElement('canvas');
104 | canvas.width = video.videoWidth;
105 | canvas.height = video.videoHeight;
106 |
107 | // Draw video frame to canvas
108 | const ctx = canvas.getContext('2d');
109 | if (ctx) {
110 | ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
111 |
112 | // Get image data as base64
113 | const imageData = canvas.toDataURL('image/png').split(',')[1];
114 |
115 | // Perform OCR on the captured image
116 | const ocrResult = await performOCR(imageData);
117 | resolve(ocrResult);
118 | } else {
119 | resolve({ error: 'Failed to get canvas context' });
120 | }
121 | };
122 | });
123 | } catch (error) {
124 | console.error('Frame capture error:', error);
125 | return { error: `Failed to capture frame: ${error instanceof Error ? error.message : String(error)}` };
126 | }
127 | }
128 |
129 | // Legacy function for backward compatibility
130 | export async function captureScreenAndOCR(): Promise {
131 | const stream = await startScreenCapture();
132 | if (!stream) {
133 | return { error: 'Failed to start screen capture' };
134 | }
135 | return captureFrameAndOCR();
136 | }
137 |
138 | // Function to perform OCR on image data
139 | async function performOCR(imageData: string): Promise {
140 | console.log('Starting OCR processing...');
141 |
142 | try {
143 | // Initialize worker
144 | const worker: Worker = await createWorker('eng', 1, {
145 | workerPath: 'https://unpkg.com/tesseract.js@6.0.0/dist/worker.min.js',
146 | langPath: 'https://tessdata.projectnaptha.com/4.0.0',
147 | corePath: 'https://unpkg.com/tesseract.js-core@4.0.2/tesseract-core.wasm.js',
148 | logger: m => console.log('[Tesseract]', m)
149 | });
150 |
151 | // Recognize text
152 | const result = await worker.recognize(`data:image/png;base64,${imageData}`);
153 |
154 | // Terminate worker
155 | await worker.terminate();
156 |
157 | console.log('OCR processing complete');
158 | return {
159 | success: true,
160 | text: result.data.text,
161 | confidence: result.data.confidence
162 | };
163 | } catch (error) {
164 | console.error('OCR processing error:', error);
165 | return {
166 | error: `OCR processing failed: ${error instanceof Error ? error.message : String(error)}`
167 | };
168 | }
169 | }
170 |
171 | // Function to capture a frame from the active stream and return base64 image
172 | export async function captureScreenImage(): Promise {
173 | // If no active stream, try to start one
174 | if (!activeStream) {
175 | const stream = await startScreenCapture();
176 | if (!stream) {
177 | console.error('Failed to start screen capture');
178 | return null;
179 | }
180 | }
181 |
182 | try {
183 | // Create video element to receive the stream
184 | const video = document.createElement('video');
185 | video.srcObject = activeStream;
186 |
187 | // Return a promise that resolves when video frame is processed
188 | return new Promise((resolve) => {
189 | video.onloadedmetadata = async () => {
190 | video.play();
191 |
192 | // Create canvas to capture video frame
193 | const canvas = document.createElement('canvas');
194 | canvas.width = video.videoWidth;
195 | canvas.height = video.videoHeight;
196 |
197 | // Draw video frame to canvas
198 | const ctx = canvas.getContext('2d');
199 | if (ctx) {
200 | ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
201 |
202 | // Get image data as base64 (without the data:image/png;base64, prefix)
203 | const base64Image = canvas.toDataURL('image/png').split(',')[1];
204 | resolve(base64Image);
205 | } else {
206 | console.error('Failed to get canvas context');
207 | resolve(null);
208 | }
209 | };
210 | });
211 | } catch (error) {
212 | console.error('Frame capture error:', error);
213 | return null;
214 | }
215 | }
216 |
217 |
--------------------------------------------------------------------------------
/app/src/utils/sendApi.ts:
--------------------------------------------------------------------------------
1 | // src/utils/sendApi.ts
2 | import { PreProcessorResult } from './pre-processor';
3 |
4 | /**
5 | * Send a prompt to the API server using OpenAI-compatible v1 chat completions endpoint
6 | * @param host API server host
7 | * @param port API server port
8 | * @param modelName Name of the model to use
9 | * @param preprocessResult The preprocessed result containing prompt and optional images
10 | * @returns The model's response text
11 | */
12 | export async function sendPrompt(
13 | host: string,
14 | port: string,
15 | modelName: string,
16 | preprocessResult: PreProcessorResult
17 | ): Promise {
18 | try {
19 | const url = `https://${host}:${port}/v1/chat/completions`;
20 |
21 | const headers: Record = {
22 | 'Content-Type': 'application/json',
23 | };
24 |
25 | if (host === 'api.observer-ai.com') {
26 | const authCode = localStorage.getItem('observer_auth_code');
27 | if (authCode) {
28 | headers['X-Observer-Auth-Code'] = authCode;
29 | }
30 | }
31 |
32 | let content: any = preprocessResult.modifiedPrompt;
33 | const hasImages = preprocessResult.images && preprocessResult.images.length > 0;
34 |
35 | if (hasImages) {
36 | // Ensure preprocessResult.images contains an array of base64 strings
37 | content = [
38 | { type: "text", text: preprocessResult.modifiedPrompt },
39 | // Add the non-null assertion operator (!) after images
40 | ...preprocessResult.images!.map(imageBase64Data => ({ // Iterate through base64 strings
41 | type: "image_url",
42 | image_url: { // image_url is an object
43 | url: `data:image/png;base64,${imageBase64Data}` // url's value is the full data URI
44 | }
45 | }))
46 | ];
47 | }
48 |
49 | const requestBody = JSON.stringify({
50 | model: modelName,
51 | messages: [
52 | {
53 | role: "user",
54 | content: content // content will be a string or an array of parts
55 | }
56 | ],
57 | stream: false
58 | });
59 |
60 | const response = await fetch(url, {
61 | method: 'POST',
62 | headers,
63 | body: requestBody,
64 | });
65 |
66 | if (!response.ok) {
67 | const errorBody = await response.text(); // Attempt to read error body
68 | console.error(`API Error Response Body: ${errorBody}`);
69 | throw new Error(`API error: ${response.status} ${response.statusText}`);
70 | }
71 |
72 | const data = await response.json();
73 |
74 | // Basic check for expected response structure
75 | if (!data.choices || !data.choices[0] || !data.choices[0].message || typeof data.choices[0].message.content === 'undefined') {
76 | console.error('Unexpected API response structure:', data);
77 | throw new Error('Unexpected API response structure');
78 | }
79 |
80 | return data.choices[0].message.content;
81 |
82 | } catch (error) {
83 | console.error('Error calling API:', error);
84 | // Re-throw the error so the calling function knows something went wrong
85 | throw error;
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/app/src/utils/speechInputManager.ts:
--------------------------------------------------------------------------------
1 | // src/utils/SpeechInputManager.ts
2 | import { Logger } from './logging';
3 |
4 | const BrowserSpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition;
5 |
6 | let recognizer: SpeechRecognition | null = null;
7 | let isRecognizerActive = false;
8 |
9 | // This will store the fully finalized transcript parts
10 | let finalizedTranscript = "";
11 | // This will store the current, potentially updating, interim part of an utterance
12 | let currentUtteranceInterim = "";
13 |
14 | let explicitlyStopped = false;
15 |
16 | export async function ensureRecognitionStarted(agentId: string): Promise {
17 | if (!BrowserSpeechRecognition) {
18 | const msg = "Speech Recognition API not supported.";
19 | Logger.error(agentId, `SpeechInputManager: ${msg}`);
20 | throw new Error(`[${msg}]`);
21 | }
22 |
23 | if (isRecognizerActive && recognizer) {
24 | Logger.debug(agentId, "SpeechInputManager: Recognition already active.");
25 | return;
26 | }
27 |
28 | explicitlyStopped = false;
29 | // Do NOT reset finalizedTranscript here. Reset current interim part.
30 | currentUtteranceInterim = "";
31 |
32 | if (!recognizer) {
33 | Logger.debug(agentId, "SpeechInputManager: Initializing new recognizer instance.");
34 | try {
35 | recognizer = new BrowserSpeechRecognition();
36 | } catch (e: any) { // Keeping 'any' here for simplicity if specific error types from constructor are unknown/varied
37 | recognizer = null;
38 | isRecognizerActive = false;
39 | Logger.error(agentId, `SpeechInputManager: CRITICAL - Failed to instantiate: ${e instanceof Error ? e.message : String(e)}`, e);
40 | throw new Error(`[SpeechInputManager: CRITICAL - Failed to instantiate: ${e instanceof Error ? e.message : String(e)}]`);
41 | }
42 | }
43 |
44 | if (!recognizer) {
45 | const msg = "SpeechInputManager: Critical logic error - recognizer is null after instantiation attempt.";
46 | Logger.error(agentId, msg);
47 | throw new Error(`[${msg}]`);
48 | }
49 |
50 | Logger.debug(agentId, "SpeechInputManager: Configuring and starting/restarting instance.");
51 | recognizer.continuous = true;
52 | recognizer.interimResults = true; // <<< KEY CHANGE: Enable interim results
53 | recognizer.lang = 'en-US';
54 |
55 | recognizer.onresult = (event: SpeechRecognitionEvent) => {
56 | let latestInterimForThisEvent = ""; // Holds the latest interim from *this specific event*
57 |
58 | for (let i = event.resultIndex; i < event.results.length; ++i) {
59 | const transcriptPart = event.results[i][0].transcript;
60 | if (event.results[i].isFinal) {
61 | const trimmedFinal = transcriptPart.trim();
62 | if (trimmedFinal) {
63 | finalizedTranscript += (finalizedTranscript ? " " : "") + trimmedFinal; // Append with a space if not empty
64 | Logger.debug(agentId, `SpeechInputManager: Received final part: "${trimmedFinal}"`);
65 | }
66 | currentUtteranceInterim = ""; // Current utterance is now final, clear interim
67 | } else {
68 | latestInterimForThisEvent += transcriptPart; // Accumulate all interim parts in this event
69 | }
70 | }
71 |
72 | // Update currentUtteranceInterim only if there was new interim content in this event
73 | if (latestInterimForThisEvent.trim()) {
74 | currentUtteranceInterim = latestInterimForThisEvent.trim();
75 | // Logger.debug(agentId, `SpeechInputManager: Updated interim: "${currentUtteranceInterim}"`); // Can be noisy
76 | }
77 | };
78 |
79 | recognizer.onerror = (event: SpeechRecognitionErrorEvent) => {
80 | isRecognizerActive = false;
81 | Logger.error(agentId, `SpeechInputManager: Error: ${event.error}`, event.message);
82 | if (['not-allowed', 'service-not-allowed'].includes(event.error)) {
83 | Logger.warn(agentId, "SpeechInputManager: Unrecoverable error. Nullifying recognizer.");
84 | if (recognizer) { try { recognizer.abort(); } catch (e) { /* ignore */ } }
85 | recognizer = null;
86 | explicitlyStopped = true;
87 | }
88 | // Do not clear currentUtteranceInterim here; onend might attempt to finalize it.
89 | };
90 |
91 | recognizer.onend = () => {
92 | const previouslyActive = isRecognizerActive;
93 | isRecognizerActive = false;
94 | Logger.debug(agentId, `SpeechInputManager: Recognition ended. Was active: ${previouslyActive}. Explicitly: ${explicitlyStopped}.`);
95 |
96 | // If recognition ended unexpectedly and there's a lingering interim part,
97 | // consider it final. This handles cases like silent timeouts.
98 | if (currentUtteranceInterim.trim() && !explicitlyStopped) {
99 | Logger.debug(agentId, `SpeechInputManager: Finalizing lingering interim on unexpected end: "${currentUtteranceInterim}"`);
100 | finalizedTranscript += (finalizedTranscript ? " " : "") + currentUtteranceInterim.trim();
101 | currentUtteranceInterim = "";
102 | }
103 |
104 | if (previouslyActive && !explicitlyStopped && recognizer) {
105 | Logger.debug(agentId, "SpeechInputManager: Attempting to restart recognition.");
106 | try {
107 | recognizer.start();
108 | isRecognizerActive = true;
109 | Logger.debug(agentId, "SpeechInputManager: Restarted successfully.");
110 | } catch (e: any) { // Keeping 'any' for simplicity
111 | Logger.error(agentId, `SpeechInputManager: Failed to restart after onend: ${e instanceof Error ? e.message : String(e)}`, e);
112 | }
113 | }
114 | };
115 |
116 | try {
117 | recognizer.start();
118 | isRecognizerActive = true;
119 | Logger.debug(agentId, "SpeechInputManager: Started successfully.");
120 | } catch (e: any) { // Keeping 'any' for simplicity
121 | isRecognizerActive = false;
122 | const errorMessage = e instanceof Error ? e.message : String(e);
123 | Logger.error(agentId, `SpeechInputManager: Failed to start: ${errorMessage}`, e);
124 | if (e instanceof Error && e.name === 'InvalidStateError' && recognizer) {
125 | Logger.warn(agentId, "SpeechInputManager: Start failed with InvalidStateError, assuming already active.");
126 | isRecognizerActive = true;
127 | } else {
128 | if (recognizer) { try { recognizer.abort(); } catch (abortErr) { /* ignore */ } }
129 | recognizer = null;
130 | throw new Error(`[SpeechInputManager: Failed to start: ${errorMessage}]`);
131 | }
132 | }
133 | }
134 |
135 | export function stopRecognitionAndClear(agentId: string): void {
136 | explicitlyStopped = true;
137 | if (recognizer) {
138 | Logger.debug(agentId, "SpeechInputManager: Explicitly stopping recognition.");
139 | // Before stopping, if there's a lingering interim transcript, finalize it.
140 | if (currentUtteranceInterim.trim()) {
141 | Logger.debug(agentId, `SpeechInputManager: Finalizing lingering interim on stop: "${currentUtteranceInterim}"`);
142 | finalizedTranscript += (finalizedTranscript ? " " : "") + currentUtteranceInterim.trim();
143 | }
144 |
145 | recognizer.onend = null; // Prevent auto-restart
146 | if (isRecognizerActive || (recognizer as any).readyState === 1) { // readyState 1 is 'listening'
147 | try {
148 | recognizer.stop();
149 | } catch (e) { // FIXED: Type check for error
150 | const stopErrorMessage = e instanceof Error ? e.message : String(e);
151 | Logger.warn(agentId, `Error during recognizer.stop(): ${stopErrorMessage}`);
152 | }
153 | }
154 | recognizer = null;
155 | }
156 | isRecognizerActive = false;
157 | finalizedTranscript = ""; // CLEAR THE FINALIZED TRANSCRIPT
158 | currentUtteranceInterim = ""; // CLEAR THE INTERIM TRANSCRIPT
159 | Logger.debug(agentId, "SpeechInputManager: Recognition stopped and transcript cleared.");
160 | }
161 |
162 | /**
163 | * The "get function" for $MICROPHONE.
164 | * Returns the combination of finalized speech and the current interim utterance.
165 | */
166 | export function getCurrentTranscript(agentId: string): string { // FIXED: agentId is now used
167 | let combined = finalizedTranscript;
168 | if (currentUtteranceInterim.trim()) {
169 | combined += (combined ? " " : "") + currentUtteranceInterim.trim();
170 | }
171 | // Using agentId for logging, uncomment if needed for detailed debugging
172 | Logger.debug(agentId, `SpeechInputManager: Returning current transcript (length: ${combined.trim().length}): "${combined.trim().substring(0,100)}${combined.trim().length > 100 ? '...' : ''}"`);
173 | return combined.trim();
174 | }
175 |
--------------------------------------------------------------------------------
/app/src/utils/streamApi.ts:
--------------------------------------------------------------------------------
1 | // src/utils/streamApi.ts
2 |
3 | /**
4 | * Stream a prompt to the Gemini API and receive chunks of the response
5 | * @param apiKey Your Gemini API key
6 | * @param modelName Name of the model to use (e.g., "gemini-2.0-flash", "gemma-3-27b-it")
7 | * @param prompt The prompt text to send
8 | * @param onChunk Callback function that receives each chunk of text as it arrives
9 | * @param onComplete Optional callback function that is called when streaming completes
10 | * @param onError Optional callback function that is called if an error occurs
11 | * @returns A function that can be called to abort the stream
12 | */
13 | export function streamPrompt(
14 | apiKey: string,
15 | modelName: string,
16 | prompt: string,
17 | onChunk: (text: string) => void,
18 | onComplete?: () => void,
19 | onError?: (error: Error) => void
20 | ): () => void {
21 | // Create an AbortController to allow canceling the stream
22 | const abortController = new AbortController();
23 | const signal = abortController.signal;
24 |
25 | // Start the streaming process
26 | (async () => {
27 | try {
28 | // Construct the URL for the Gemini API streaming endpoint
29 | const url = `https://generativelanguage.googleapis.com/v1beta/models/${modelName}:streamGenerateContent?alt=sse&key=${apiKey}`;
30 |
31 | // Prepare the request body
32 | const requestBody = JSON.stringify({
33 | contents: [
34 | {
35 | parts: [
36 | {
37 | text: prompt
38 | }
39 | ]
40 | }
41 | ]
42 | });
43 |
44 | // Make the API request
45 | const response = await fetch(url, {
46 | method: 'POST',
47 | headers: {
48 | 'Content-Type': 'application/json',
49 | },
50 | body: requestBody,
51 | signal
52 | });
53 |
54 | if (!response.ok) {
55 | throw new Error(`Gemini API error: ${response.status} ${response.statusText}`);
56 | }
57 |
58 | if (!response.body) {
59 | throw new Error('Response body is null');
60 | }
61 |
62 | // Process the stream using the ReadableStream API
63 | const reader = response.body.getReader();
64 | const decoder = new TextDecoder();
65 |
66 | while (true) {
67 | const { done, value } = await reader.read();
68 | if (done) break;
69 |
70 | // Decode the chunk
71 | const chunk = decoder.decode(value, { stream: true });
72 |
73 | // Parse Server-Sent Events (SSE)
74 | // Format is: "data: {json}\n\n"
75 | const lines = chunk.split('\n\n');
76 |
77 | for (const line of lines) {
78 | // Skip empty lines
79 | if (!line.trim()) continue;
80 |
81 | // Process data lines
82 | if (line.startsWith('data: ')) {
83 | try {
84 | // Remove 'data: ' prefix
85 | const jsonStr = line.substring(6);
86 |
87 | // Skip end markers
88 | if (jsonStr === '[DONE]') continue;
89 |
90 | // Parse the JSON response
91 | const parsed = JSON.parse(jsonStr);
92 |
93 | // Extract text from the response structure
94 | if (parsed.candidates &&
95 | parsed.candidates[0] &&
96 | parsed.candidates[0].content &&
97 | parsed.candidates[0].content.parts &&
98 | parsed.candidates[0].content.parts[0] &&
99 | parsed.candidates[0].content.parts[0].text) {
100 | onChunk(parsed.candidates[0].content.parts[0].text);
101 | }
102 | } catch (e) {
103 | // If parsing fails, just continue
104 | console.error('Error parsing SSE data:', e);
105 | continue;
106 | }
107 | }
108 | }
109 | }
110 |
111 | // Call onComplete when done
112 | if (onComplete) {
113 | onComplete();
114 | }
115 | } catch (error) {
116 | // Don't report errors if the stream was intentionally aborted
117 | if (signal.aborted) {
118 | return;
119 | }
120 |
121 | // Handle errors
122 | if (onError) {
123 | onError(error instanceof Error ? error : new Error(String(error)));
124 | }
125 | }
126 | })();
127 |
128 | // Return an abort function
129 | return () => {
130 | abortController.abort();
131 | };
132 | }
133 |
--------------------------------------------------------------------------------
/app/src/utils/test_system_prompt.ts:
--------------------------------------------------------------------------------
1 | // test_system_prompt.ts
2 |
3 | // Adjust the path if this test file is not in the project root directory
4 | import getSystemPrompt from './system_prompt.ts';
5 |
6 | console.log("--- Testing getSystemPrompt() ---");
7 | console.log("Running getSystemPrompt to check the output string...");
8 |
9 | // Call the function to get the prompt string
10 | const promptString = getSystemPrompt();
11 |
12 | console.log("\n--- Generated System Prompt Start ---\n");
13 | // Print the resulting string to the console
14 | console.log(promptString);
15 | console.log("\n--- Generated System Prompt End ---\n");
16 |
17 | console.log("Test complete. Review the output above.");
18 |
--------------------------------------------------------------------------------
/app/src/web/index.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom/client';
3 | import App from './App';
4 | import '@/index.css';
5 |
6 | ReactDOM.createRoot(document.getElementById('root')!).render(
7 |
8 |
9 |
10 | );
11 |
--------------------------------------------------------------------------------
/app/src/web/main.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom/client';
3 | import App from './App';
4 | import './index';
5 |
6 | ReactDOM.createRoot(document.getElementById('root')!).render(
7 |
8 |
9 |
10 | );
11 |
--------------------------------------------------------------------------------
/app/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: [
4 | "./index.html",
5 | "./src/**/*.{js,ts,jsx,tsx}",
6 | ],
7 | theme: {
8 | extend: {},
9 | },
10 | plugins: [],
11 | }
12 |
--------------------------------------------------------------------------------
/app/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "types": [
4 | "node",
5 | "dom-speech-recognition"
6 | ],
7 | "target": "ES2020",
8 | "useDefineForClassFields": true,
9 | "lib": ["ES2020", "DOM", "DOM.Iterable"],
10 | "module": "ESNext",
11 | "skipLibCheck": true,
12 | "moduleResolution": "bundler",
13 | "allowImportingTsExtensions": true,
14 | "resolveJsonModule": true,
15 | "isolatedModules": true,
16 | "noEmit": true,
17 | "jsx": "react-jsx",
18 | "strict": true,
19 | "noUnusedLocals": true,
20 | "noUnusedParameters": true,
21 | "noFallthroughCasesInSwitch": true,
22 | "baseUrl": ".",
23 | "paths": {
24 | "@/*": ["src/*"],
25 | "@components/*": ["src/components/*"],
26 | "@utils/*": ["src/utils/*"],
27 | "@web/*": ["src/web/*"],
28 | "@desktop/*": ["src/desktop/*"]
29 | }
30 | },
31 | "include": ["src"],
32 | "references": [{ "path": "./tsconfig.node.json" }]
33 | }
34 |
--------------------------------------------------------------------------------
/app/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "composite": true,
4 | "skipLibCheck": true,
5 | "module": "ESNext",
6 | "moduleResolution": "bundler",
7 | "allowSyntheticDefaultImports": true
8 | },
9 | "include": ["vite.config.ts"]
10 | }
11 |
--------------------------------------------------------------------------------
/app/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite';
2 | import react from '@vitejs/plugin-react';
3 | import { visualizer } from 'rollup-plugin-visualizer';
4 | import { resolve } from 'path';
5 |
6 | export default defineConfig({
7 | plugins: [
8 | react(),
9 | //visualizer({
10 | // open: true, // Open the visualization after build
11 | // gzipSize: true,
12 | // brotliSize: true
13 | //})
14 | ],
15 | server: {
16 | host: '0.0.0.0',
17 | port: 3001, // Different from desktop and website
18 | },
19 | resolve: {
20 | alias: {
21 | '@': resolve(__dirname, './src'),
22 | '@components': resolve(__dirname, './src/components'),
23 | '@utils': resolve(__dirname, './src/utils'),
24 | '@web': resolve(__dirname, './src/web'),
25 | '@desktop': resolve(__dirname, './src/desktop')
26 | }
27 | },
28 | build: {
29 | outDir: 'dist',
30 | sourcemap: true,
31 | chunkSizeWarningLimit: 800, // Increase warning threshold
32 | rollupOptions: {
33 | output: {
34 | manualChunks(id) {
35 | // Vendor chunks
36 | if (id.includes('node_modules')) {
37 | if (id.includes('react') || id.includes('scheduler')) {
38 | return 'vendor-react';
39 | }
40 | if (id.includes('jupyterlab')) {
41 | return 'vendor-jupyter';
42 | }
43 | return 'vendor'; // Other dependencies
44 | }
45 |
46 | // App chunks
47 | if (id.includes('/src/components/')) {
48 | return 'app-components';
49 | }
50 | if (id.includes('/src/utils/')) {
51 | return 'app-utils';
52 | }
53 | }
54 | }
55 | }
56 | },
57 | });
58 |
--------------------------------------------------------------------------------
/assets/ObserverAgent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Roy3838/Observer/8e724b0df38b73bbfdf25986a9ff19b9fcd5dc0c/assets/ObserverAgent.png
--------------------------------------------------------------------------------
/community/api.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI
2 | from fastapi.middleware.cors import CORSMiddleware
3 | import uvicorn
4 | import argparse
5 | import logging
6 | import os
7 | import socket
8 | import subprocess
9 | from pathlib import Path
10 |
11 | # Import routers from our modules
12 | from marketplace import marketplace_router
13 | from compute import compute_router
14 |
15 | # Setup logging
16 | logging.basicConfig(
17 | level=logging.INFO,
18 | format='%(asctime)s - %(levelname)s - %(message)s',
19 | datefmt='%Y-%m-%d %H:%M:%S'
20 | )
21 | logger = logging.getLogger('api-server')
22 |
23 | # Setup FastAPI app
24 | app = FastAPI()
25 |
26 | # Enable CORS
27 | app.add_middleware(
28 | CORSMiddleware,
29 | allow_origins=["*"],
30 | allow_credentials=True,
31 | allow_methods=["*"],
32 | allow_headers=["*"],
33 | )
34 |
35 | # Include routers - without prefixes to maintain original URL structure
36 | app.include_router(marketplace_router)
37 | # Mount compute router last since it has a catch-all route
38 | app.include_router(compute_router)
39 |
40 | # Root path to check if service is running
41 | @app.get("/")
42 | async def root():
43 | return {"status": "API server is running"}
44 |
45 | # Generate SSL certificates
46 | def prepare_certificates(cert_dir):
47 | """Prepare SSL certificates"""
48 | cert_path = Path(cert_dir) / "cert.pem"
49 | key_path = Path(cert_dir) / "key.pem"
50 |
51 | # Create certificate directory if it doesn't exist
52 | os.makedirs(cert_dir, exist_ok=True)
53 |
54 | # Check if we need to generate certificates
55 | if not cert_path.exists() or not key_path.exists():
56 | logger.info("Generating SSL certificates...")
57 | cmd = [
58 | "openssl", "req", "-x509",
59 | "-newkey", "rsa:4096",
60 | "-sha256",
61 | "-days", "365",
62 | "-nodes",
63 | "-keyout", str(key_path),
64 | "-out", str(cert_path),
65 | "-subj", "/CN=localhost"
66 | ]
67 | try:
68 | subprocess.run(cmd, check=True, capture_output=True)
69 | logger.info(f"Certificates generated at {cert_dir}")
70 | except subprocess.CalledProcessError as e:
71 | logger.error(f"Failed to generate certificates: {e.stderr.decode() if hasattr(e, 'stderr') else str(e)}")
72 | raise RuntimeError("Failed to generate SSL certificates")
73 | else:
74 | logger.info(f"Using existing certificates from {cert_dir}")
75 |
76 | return cert_path, key_path
77 |
78 | def get_local_ip():
79 | """Get the local IP address for network access"""
80 | try:
81 | # Create a socket that connects to an external server to determine local IP
82 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
83 | # We don't actually need to send data
84 | s.connect(('8.8.8.8', 80))
85 | local_ip = s.getsockname()[0]
86 | s.close()
87 | return local_ip
88 | except Exception as e:
89 | logger.warning(f"Could not determine local IP: {e}")
90 | return "0.0.0.0"
91 |
92 | if __name__ == "__main__":
93 | parser = argparse.ArgumentParser(description="Observer AI API Server")
94 | parser.add_argument("--port", type=int, default=8000, help="Port to run on")
95 | parser.add_argument("--cert-dir", default="./certs", help="Certificate directory")
96 | parser.add_argument("--cert-file", help="Path to certificate file (if not provided, self-signed will be used)")
97 | parser.add_argument("--key-file", help="Path to key file (if not provided, self-signed will be used)")
98 | parser.add_argument("--debug", action="store_true", help="Enable debug logging")
99 | parser.add_argument("--proxy-target", default="https://compute.observer-ai.com", help="Target service URL for proxy")
100 |
101 | args = parser.parse_args()
102 |
103 | if args.debug:
104 | logger.setLevel(logging.DEBUG)
105 |
106 | # Set target URL for compute proxy
107 | os.environ["AI_SERVICE_URL"] = args.proxy_target
108 |
109 | # Set up SSL
110 | if args.cert_file and args.key_file:
111 | # Use provided certificates
112 | cert_path = args.cert_file
113 | key_path = args.key_file
114 | logger.info(f"Using provided certificates: {cert_path}, {key_path}")
115 | else:
116 | # Self-sign certificates
117 | cert_path, key_path = prepare_certificates(args.cert_dir)
118 |
119 | # Get local IP for network access
120 | local_ip = get_local_ip()
121 |
122 | # Print server info
123 | print("\n\033[1m OBSERVER AI API SERVER \033[0m ready")
124 | print(f" ➜ \033[36mLocal: \033[0mhttps://localhost:{args.port}/")
125 | print(f" ➜ \033[36mNetwork: \033[0mhttps://{local_ip}:{args.port}/")
126 | print(f"\n Marketplace routes: https://localhost:{args.port}/agents")
127 | print(f" Compute quota: https://localhost:{args.port}/quota")
128 | print(f" Proxy forwarding to: {args.proxy_target}")
129 |
130 | # Run with SSL context
131 | uvicorn.run(
132 | app,
133 | host="0.0.0.0",
134 | port=args.port,
135 | ssl_certfile=str(cert_path),
136 | ssl_keyfile=str(key_path)
137 | )
138 |
--------------------------------------------------------------------------------
/community/api_handlers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import json
4 | from datetime import datetime
5 | from pathlib import Path
6 | import logging
7 | import httpx # Import httpx here if base class needs it, or just in subclasses
8 |
9 | logger = logging.getLogger("api_handlers")
10 | # Basic logging setup if not configured elsewhere
11 | # logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - [%(name)s] - %(message)s')
12 |
13 | # Global registry for API handlers
14 | API_HANDLERS = {}
15 |
16 | class HandlerError(Exception):
17 | """Custom exception for handler-specific errors."""
18 | def __init__(self, message, status_code=500):
19 | super().__init__(message)
20 | self.status_code = status_code
21 |
22 | class ConfigError(HandlerError):
23 | """Error for configuration issues like missing API keys."""
24 | def __init__(self, message):
25 | super().__init__(message, status_code=500) # Internal server error as config is wrong
26 |
27 | class BackendAPIError(HandlerError):
28 | """Error originating from the downstream AI API."""
29 | def __init__(self, message, status_code=502): # Bad Gateway by default
30 | super().__init__(message, status_code)
31 |
32 |
33 | class BaseAPIHandler:
34 | """Base class for asynchronous API handlers."""
35 | def __init__(self, name):
36 | self.name = name
37 | self.models = [] # List of supported models { "name": "model-id", "parameters": "optional", ... }
38 | API_HANDLERS[name] = self
39 | logger.info("Registered API handler: '%s'", name)
40 | # Optional: Create a shared httpx client if needed across handlers (managing lifecycle is key)
41 | # self.http_client = httpx.AsyncClient(timeout=90.0) # Example
42 |
43 | def get_models(self):
44 | """Return the list of models supported by this handler."""
45 | return self.models
46 |
47 | async def handle_request(self, request_data: dict) -> dict:
48 | """
49 | Process the request asynchronously.
50 | Subclasses MUST override this method.
51 |
52 | Args:
53 | request_data: The parsed JSON request data (dictionary).
54 |
55 | Returns:
56 | A dictionary representing the successful JSON response payload.
57 |
58 | Raises:
59 | ConfigError: If configuration (like API key) is missing.
60 | BackendAPIError: If the downstream API call fails.
61 | ValueError: If the request_data is invalid.
62 | NotImplementedError: If the subclass doesn't implement this.
63 | Exception: For other unexpected errors.
64 | """
65 | raise NotImplementedError(f"Handler '{self.name}' must implement handle_request")
66 |
67 | def log_conversation(self, prompt, response, model_name, images_count=0):
68 | """Log conversation details to a file."""
69 | # Keep this synchronous, logging IO is usually acceptable to block briefly
70 | log_dir = Path("./logs")
71 | log_dir.mkdir(exist_ok=True)
72 | log_file = log_dir / f"{self.name}_conversations.log"
73 | # Ensure response is serializable (it should be if it's the final text)
74 | if not isinstance(response, (str, int, float, bool, type(None))):
75 | response_log = "[Object]" # Avoid logging complex objects directly
76 | else:
77 | response_log = response
78 |
79 | entry = {
80 | "timestamp": datetime.now().isoformat(),
81 | "model": model_name,
82 | "prompt": prompt, # Assumes prompt is simple text for logging
83 | "response": response_log,
84 | "images_count": images_count
85 | }
86 | try:
87 | with open(log_file, "a", encoding="utf-8") as f:
88 | f.write(json.dumps(entry) + "\n")
89 | except Exception as log_e:
90 | logger.error(f"Failed to write conversation log for {self.name}: {log_e}")
91 |
92 |
93 | # --- Import and Instantiate REWRITTEN Handlers ---
94 | # Ensure these files contain the rewritten async versions below
95 | from gemini_handler import GeminiAPIHandler
96 | from openrouter_handler import OpenRouterAPIHandler
97 | from fireworks_handler import FireworksAPIHandler
98 |
99 | # Instantiate and register the handlers.
100 | # The __init__ method in BaseAPIHandler adds them to API_HANDLERS
101 | gemini_handler = GeminiAPIHandler()
102 | fireworks_handler = FireworksAPIHandler()
103 | openrouter_handler = OpenRouterAPIHandler()
104 |
105 | logger.info("Initialized API Handlers. Available: %s", list(API_HANDLERS.keys()))
106 | # --- End Handler Instantiation ---
107 |
--------------------------------------------------------------------------------
/community/fireworks_handler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import json
4 | import logging
5 | import httpx # Use httpx for async requests
6 |
7 | # Import base class and custom exceptions
8 | from api_handlers import BaseAPIHandler, ConfigError, BackendAPIError, HandlerError
9 |
10 | logger = logging.getLogger("fireworks_handler")
11 |
12 | class FireworksAPIHandler(BaseAPIHandler):
13 | """
14 | Asynchronous handler for Fireworks AI API requests using httpx,
15 | with mapping for user-friendly model names.
16 | """
17 | FIREWORKS_API_URL = "https://api.fireworks.ai/inference/v1/chat/completions"
18 |
19 | def __init__(self):
20 | super().__init__("fireworks")
21 |
22 | # --- Model Mapping ---
23 | # Dictionary mapping display names to actual model IDs and parameters
24 | self.model_map = {
25 | # Simple mapping with just two models
26 | "llama4-scout": {
27 | "model_id": "accounts/fireworks/models/llama4-scout-instruct-basic",
28 | "parameters": "109B",
29 | "multimodal": True
30 | },
31 | "llama4-maverick": {
32 | "model_id": "accounts/fireworks/models/llama4-maverick-instruct-basic",
33 | "parameters": "400B",
34 | "multimodal": True
35 | }
36 | }
37 |
38 | # Define supported models for display using the pretty names from the map
39 | self.models = [
40 | {"name": display_name, "parameters": model_info.get("parameters", "N/A"),
41 | "multimodal": model_info.get("multimodal", False)}
42 | for display_name, model_info in self.model_map.items()
43 | ]
44 | # --- End Model Mapping ---
45 |
46 | self.api_key = os.environ.get("FIREWORKS_API_KEY")
47 | if not self.api_key:
48 | logger.error("FIREWORKS_API_KEY environment variable not set. Fireworks handler will fail.")
49 |
50 | # Log the DISPLAY names that will be shown to the user
51 | logger.info("FireworksAPIHandler registered display models: %s", [m["name"] for m in self.models])
52 |
53 | # Base headers
54 | self.base_headers = {
55 | "Authorization": f"Bearer {self.api_key}" if self.api_key else "",
56 | "Content-Type": "application/json",
57 | "Accept": "application/json"
58 | }
59 |
60 | async def handle_request(self, request_data: dict) -> dict:
61 | """
62 | Process a /v1/chat/completions request asynchronously via Fireworks AI.
63 | Translates display model name to actual Fireworks model ID.
64 | """
65 | if not self.api_key:
66 | raise ConfigError("FIREWORKS_API_KEY is not configured on the server.")
67 |
68 | # --- Get Display Name and Translate to Actual Model ID ---
69 | display_model_name = request_data.get("model")
70 | if not display_model_name:
71 | raise ValueError("Request data must include a 'model' field (using the display name).")
72 |
73 | # Look up the display name in our map
74 | model_info = self.model_map.get(display_model_name)
75 | if not model_info:
76 | # If the display name isn't found, the model is unsupported by this mapping
77 | logger.warning(f"Received request for unmapped Fireworks model display name: {display_model_name}")
78 | raise ValueError(f"Model display name '{display_model_name}' is not recognized or supported.")
79 |
80 | actual_model_id = model_info.get("model_id")
81 | if not actual_model_id:
82 | # Should not happen if map is defined correctly, but good practice to check
83 | logger.error(f"Internal configuration error: Missing 'model_id' for display name '{display_model_name}' in model_map.")
84 | raise ConfigError(f"Internal mapping error for model '{display_model_name}'.")
85 | # --- End Translation ---
86 |
87 | # --- Prepare API Call ---
88 | # Create a copy of the request data to modify
89 | payload = request_data.copy()
90 | # Set the 'model' in the payload to the ACTUAL Fireworks ID
91 | payload["model"] = actual_model_id
92 |
93 | # Default values from the curl example
94 | if "max_tokens" not in payload:
95 | payload["max_tokens"] = 16384
96 | if "top_p" not in payload:
97 | payload["top_p"] = 1
98 | if "top_k" not in payload:
99 | payload["top_k"] = 40
100 | if "presence_penalty" not in payload:
101 | payload["presence_penalty"] = 0
102 | if "frequency_penalty" not in payload:
103 | payload["frequency_penalty"] = 0
104 | if "temperature" not in payload:
105 | payload["temperature"] = 0.6
106 |
107 | # Update headers (in case API key was missing during init)
108 | headers = self.base_headers.copy()
109 | headers["Authorization"] = f"Bearer {self.api_key}"
110 |
111 | logger.info(f"Calling Fireworks API: display_model='{display_model_name}', actual_model='{actual_model_id}'")
112 |
113 | # --- Make API Call using httpx ---
114 | try:
115 | async with httpx.AsyncClient(timeout=120.0) as client:
116 | response = await client.post(self.FIREWORKS_API_URL, headers=headers, json=payload)
117 | response.raise_for_status()
118 | response_data = response.json()
119 |
120 | # --- Error Handling ---
121 | except httpx.RequestError as exc:
122 | logger.error(f"Fireworks API request failed (network/connection): {exc}")
123 | raise BackendAPIError(f"Could not connect to Fireworks API: {exc}", status_code=503) from exc
124 | except httpx.HTTPStatusError as exc:
125 | error_body = exc.response.text
126 | status_code = exc.response.status_code
127 | logger.error(f"Fireworks API returned error {status_code} for model {actual_model_id}: {error_body[:500]}")
128 | try:
129 | error_json = exc.response.json()
130 | message = error_json.get("error", {}).get("message", error_body)
131 | except json.JSONDecodeError:
132 | message = error_body
133 | raise BackendAPIError(f"Fireworks API Error ({status_code}): {message}", status_code=status_code) from exc
134 | except Exception as exc:
135 | logger.exception(f"An unexpected error occurred during Fireworks API call for model {actual_model_id}")
136 | raise HandlerError(f"Unexpected error processing Fireworks request: {exc}") from exc
137 |
138 | # --- Log Conversation (using display name for consistency) ---
139 | prompt_text = ""
140 | response_text = ""
141 | try:
142 | messages = request_data.get("messages", [])
143 | if messages and isinstance(messages[-1].get("content"), str):
144 | prompt_text = messages[-1].get("content", "")[:500]
145 | if 'choices' in response_data and response_data['choices']:
146 | choice = response_data['choices'][0]
147 | if 'message' in choice and 'content' in choice['message']:
148 | response_text = choice['message']['content'][:500]
149 | except Exception as log_parse_err:
150 | logger.warning(f"Could not parse prompt/response for logging: {log_parse_err}")
151 |
152 | # Log using the display name the user requested
153 | self.log_conversation(prompt_text, response_text, display_model_name)
154 |
155 | # --- Return Response ---
156 | # Replace actual ID with display name in response
157 | if "model" in response_data:
158 | response_data["model"] = display_model_name
159 |
160 | logger.info(f"Successfully processed Fireworks request for display model '{display_model_name}'.")
161 | return response_data
162 |
--------------------------------------------------------------------------------
/community/marketplace.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pydantic import BaseModel
3 | from typing import Optional, List
4 | import sqlite3
5 | import datetime
6 | import logging
7 |
8 | # Setup logging
9 | logging.basicConfig(
10 | level=logging.INFO,
11 | format='%(asctime)s - %(levelname)s - %(message)s',
12 | datefmt='%Y-%m-%d %H:%M:%S'
13 | )
14 | logger = logging.getLogger('marketplace')
15 |
16 | # Create router
17 | marketplace_router = APIRouter()
18 |
19 | # Database configuration
20 | DB_PATH = "marketplace.db"
21 |
22 | # Data model
23 | class Agent(BaseModel):
24 | id: str
25 | name: str
26 | description: Optional[str] = ""
27 | model_name: str
28 | system_prompt: Optional[str] = ""
29 | loop_interval_seconds: float
30 | code: str
31 | memory: Optional[str] = ""
32 | author: Optional[str] = None
33 | author_id: Optional[str] = None
34 | date_added: Optional[str] = None
35 |
36 | # Initialize database
37 | def init_db():
38 | conn = sqlite3.connect(DB_PATH)
39 | cursor = conn.cursor()
40 |
41 | # Check if the table exists
42 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='agents'")
43 | if cursor.fetchone() is None:
44 | # Create the table with the new author fields
45 | cursor.execute('''
46 | CREATE TABLE agents (
47 | id TEXT PRIMARY KEY,
48 | name TEXT NOT NULL,
49 | description TEXT,
50 | model_name TEXT NOT NULL,
51 | system_prompt TEXT,
52 | loop_interval_seconds REAL,
53 | code TEXT NOT NULL,
54 | memory TEXT,
55 | author TEXT,
56 | author_id TEXT,
57 | date_added TEXT
58 | )
59 | ''')
60 | else:
61 | # Check if we need to add the new columns
62 | cursor.execute("PRAGMA table_info(agents)")
63 | columns = [column[1] for column in cursor.fetchall()]
64 |
65 | if "author" not in columns:
66 | cursor.execute("ALTER TABLE agents ADD COLUMN author TEXT")
67 |
68 | if "author_id" not in columns:
69 | cursor.execute("ALTER TABLE agents ADD COLUMN author_id TEXT")
70 |
71 | if "date_added" not in columns:
72 | cursor.execute("ALTER TABLE agents ADD COLUMN date_added TEXT")
73 |
74 | conn.commit()
75 | conn.close()
76 | logger.info("Marketplace database initialized")
77 |
78 | # Initialize the database at module load
79 | init_db()
80 |
81 | # Routes
82 | @marketplace_router.get("/marketplace-status")
83 | async def marketplace_root():
84 | return {"status": "Marketplace service is running"}
85 |
86 | @marketplace_router.get("/agents")
87 | async def list_agents():
88 | conn = sqlite3.connect(DB_PATH)
89 | conn.row_factory = sqlite3.Row
90 | cursor = conn.cursor()
91 | cursor.execute("SELECT * FROM agents")
92 | agents = [dict(row) for row in cursor.fetchall()]
93 | conn.close()
94 | return agents
95 |
96 | @marketplace_router.get("/agents/{agent_id}")
97 | async def get_agent(agent_id: str):
98 | conn = sqlite3.connect(DB_PATH)
99 | conn.row_factory = sqlite3.Row
100 | cursor = conn.cursor()
101 | cursor.execute("SELECT * FROM agents WHERE id = ?", (agent_id,))
102 | agent = cursor.fetchone()
103 | conn.close()
104 |
105 | if not agent:
106 | raise HTTPException(status_code=404, detail="Agent not found")
107 |
108 | return dict(agent)
109 |
110 | @marketplace_router.post("/agents")
111 | async def create_agent(agent: Agent):
112 | conn = sqlite3.connect(DB_PATH)
113 | cursor = conn.cursor()
114 |
115 | # If date_added is not provided, set it to current time
116 | if not agent.date_added:
117 | agent.date_added = datetime.datetime.now().isoformat()
118 |
119 | # Insert or replace the agent with author information
120 | cursor.execute('''
121 | INSERT OR REPLACE INTO agents
122 | (id, name, description, model_name, system_prompt, loop_interval_seconds, code, memory, author, author_id, date_added)
123 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
124 | ''', (
125 | agent.id, agent.name, agent.description, agent.model_name,
126 | agent.system_prompt, agent.loop_interval_seconds, agent.code, agent.memory,
127 | agent.author, agent.author_id, agent.date_added
128 | ))
129 |
130 | conn.commit()
131 | conn.close()
132 |
133 | return {"success": True}
134 |
135 | @marketplace_router.get("/agents/statistics")
136 | async def get_agent_statistics():
137 | conn = sqlite3.connect(DB_PATH)
138 | conn.row_factory = sqlite3.Row
139 | cursor = conn.cursor()
140 |
141 | # Count total agents
142 | cursor.execute("SELECT COUNT(*) as total FROM agents")
143 | total = cursor.fetchone()["total"]
144 |
145 | # Count unique authors
146 | cursor.execute("SELECT COUNT(DISTINCT author_id) as authors FROM agents WHERE author_id IS NOT NULL")
147 | authors = cursor.fetchone()["authors"]
148 |
149 | # Get popular models
150 | cursor.execute("""
151 | SELECT model_name, COUNT(*) as count
152 | FROM agents
153 | GROUP BY model_name
154 | ORDER BY count DESC
155 | LIMIT 5
156 | """)
157 | models = [dict(row) for row in cursor.fetchall()]
158 |
159 | conn.close()
160 |
161 | return {
162 | "total_agents": total,
163 | "unique_authors": authors,
164 | "popular_models": models
165 | }
166 |
167 | @marketplace_router.get("/agents/by-author/{author_id}")
168 | async def get_agents_by_author(author_id: str):
169 | conn = sqlite3.connect(DB_PATH)
170 | conn.row_factory = sqlite3.Row
171 | cursor = conn.cursor()
172 | cursor.execute("SELECT * FROM agents WHERE author_id = ?", (author_id,))
173 | agents = [dict(row) for row in cursor.fetchall()]
174 | conn.close()
175 |
176 | return agents
177 |
--------------------------------------------------------------------------------
/community/openrouter_handler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import json
4 | import logging
5 | import httpx # Use httpx for async requests
6 |
7 | # Import base class and custom exceptions
8 | from api_handlers import BaseAPIHandler, ConfigError, BackendAPIError, HandlerError
9 |
10 | logger = logging.getLogger("openrouter_handler")
11 |
12 | class OpenRouterAPIHandler(BaseAPIHandler):
13 | """
14 | Asynchronous handler for OpenRouter API requests using httpx,
15 | with mapping for user-friendly model names.
16 | """
17 | OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
18 |
19 | def __init__(self):
20 | super().__init__("openrouter")
21 |
22 | # {"name": "google/gemini-2.0-flash-exp:free", "parameters": "unknown"},
23 | # {"name": "google/gemma-3-27b-it:free", "parameters": "27B"},
24 | # {"name": "deepseek/deepseek-r1-zero:free", "parameters": "671B"},
25 | # {"name": "deepseek/deepseek-v3-base:free", "parameters": "671B"}, # Example model ID
26 |
27 | # --- Model Mapping ---
28 | # Dictionary mapping display names to actual model IDs and parameter sizes
29 | self.model_map = {
30 | # --- Add your desired mappings here ---
31 | # "gemma-3-27b-or": {
32 | # "model_id": "google/gemma-3-27b-it:free",
33 | # "parameters": "27B",
34 | # "multimodal": True
35 | # },
36 | "deepseek-r1": {
37 | "model_id": "deepseek/deepseek-r1:free", # Example
38 | "parameters": "671B",
39 | "multimodal": False
40 | },
41 | "deepseek-v3": {
42 | "model_id": "deepseek/deepseek-chat:free", # Example
43 | "parameters": "671B",
44 | "multimodal": False
45 | },
46 | "qwq": {
47 | "model_id": "qwen/qwq-32b:free", # Example
48 | "parameters": "32B",
49 | "multimodal": False
50 | },
51 | "deepseek-llama-70b": {
52 | "model_id": "deepseek/deepseek-r1-distill-llama-70b:free",
53 | "parameters": "70b",
54 | "multimodal": False
55 | }
56 | # Add more models following this pattern
57 | # "your-pretty-name": { "model_id": "actual/openrouter-model-id:tag", "parameters": "..."}
58 | }
59 |
60 | # Define supported models for display using the pretty names from the map
61 | self.models = [
62 | {"name": display_name, "parameters": model_info.get("parameters", "N/A"),
63 | "multimodal": model_info.get("multimodal", False)}
64 | for display_name, model_info in self.model_map.items()
65 | ]
66 | # --- End Model Mapping ---
67 |
68 |
69 | self.api_key = os.environ.get("OPENROUTER_API_KEY")
70 | if not self.api_key:
71 | logger.error("OPENROUTER_API_KEY environment variable not set. OpenRouter handler will fail.")
72 |
73 | # Log the DISPLAY names that will be shown to the user
74 | logger.info("OpenRouterAPIHandler registered display models: %s", [m["name"] for m in self.models])
75 |
76 | # Base headers
77 | self.base_headers = {
78 | "Authorization": f"Bearer {self.api_key}" if self.api_key else "",
79 | "Content-Type": "application/json",
80 | "HTTP-Referer": os.environ.get("OPENROUTER_SITE_URL", "http://localhost"),
81 | "X-Title": os.environ.get("OPENROUTER_APP_TITLE", "ObserverAI-FastAPI"),
82 | "User-Agent": "ObserverAI-FastAPI-Client/1.0"
83 | }
84 |
85 | async def handle_request(self, request_data: dict) -> dict:
86 | """
87 | Process a /v1/chat/completions request asynchronously via OpenRouter.
88 | Translates display model name to actual OpenRouter model ID.
89 | """
90 | if not self.api_key:
91 | raise ConfigError("OPENROUTER_API_KEY is not configured on the server.")
92 |
93 | # --- Get Display Name and Translate to Actual Model ID ---
94 | display_model_name = request_data.get("model")
95 | if not display_model_name:
96 | raise ValueError("Request data must include a 'model' field (using the display name).")
97 |
98 | # Look up the display name in our map
99 | model_info = self.model_map.get(display_model_name)
100 | if not model_info:
101 | # If the display name isn't found, the model is unsupported by this mapping
102 | logger.warning(f"Received request for unmapped OpenRouter model display name: {display_model_name}")
103 | raise ValueError(f"Model display name '{display_model_name}' is not recognized or supported.")
104 |
105 | actual_model_id = model_info.get("model_id")
106 | if not actual_model_id:
107 | # Should not happen if map is defined correctly, but good practice to check
108 | logger.error(f"Internal configuration error: Missing 'model_id' for display name '{display_model_name}' in model_map.")
109 | raise ConfigError(f"Internal mapping error for model '{display_model_name}'.")
110 | # --- End Translation ---
111 |
112 |
113 | # --- Prepare API Call ---
114 | # Create a copy of the request data to modify
115 | payload = request_data.copy()
116 | # *** IMPORTANT: Set the 'model' in the payload to the ACTUAL OpenRouter ID ***
117 | payload["model"] = actual_model_id
118 |
119 | # Update headers (in case API key was missing during init)
120 | headers = self.base_headers.copy()
121 | headers["Authorization"] = f"Bearer {self.api_key}"
122 |
123 | logger.info(f"Calling OpenRouter API: display_model='{display_model_name}', actual_model='{actual_model_id}'")
124 |
125 | # --- Make API Call using httpx ---
126 | try:
127 | async with httpx.AsyncClient(timeout=120.0) as client:
128 | response = await client.post(self.OPENROUTER_API_URL, headers=headers, json=payload)
129 | response.raise_for_status()
130 | response_data = response.json()
131 |
132 | # --- Error Handling (keep as before) ---
133 | except httpx.RequestError as exc:
134 | logger.error(f"OpenRouter API request failed (network/connection): {exc}")
135 | raise BackendAPIError(f"Could not connect to OpenRouter API: {exc}", status_code=503) from exc
136 | except httpx.HTTPStatusError as exc:
137 | error_body = exc.response.text
138 | status_code = exc.response.status_code
139 | logger.error(f"OpenRouter API returned error {status_code} for model {actual_model_id}: {error_body[:500]}")
140 | try:
141 | error_json = exc.response.json()
142 | message = error_json.get("error", {}).get("message", error_body)
143 | except json.JSONDecodeError:
144 | message = error_body
145 | raise BackendAPIError(f"OpenRouter API Error ({status_code}): {message}", status_code=status_code) from exc
146 | except Exception as exc:
147 | logger.exception(f"An unexpected error occurred during OpenRouter API call for model {actual_model_id}")
148 | raise HandlerError(f"Unexpected error processing OpenRouter request: {exc}") from exc
149 |
150 |
151 | # --- Log Conversation (using display name for consistency if desired) ---
152 | prompt_text = ""
153 | response_text = ""
154 | try:
155 | messages = request_data.get("messages", [])
156 | if messages and isinstance(messages[-1].get("content"), str):
157 | prompt_text = messages[-1].get("content", "")[:500]
158 | if 'choices' in response_data and response_data['choices']:
159 | choice = response_data['choices'][0]
160 | if 'message' in choice and 'content' in choice['message']:
161 | response_text = choice['message']['content'][:500]
162 | except Exception as log_parse_err:
163 | logger.warning(f"Could not parse prompt/response for logging: {log_parse_err}")
164 |
165 | # Log using the display name the user requested
166 | self.log_conversation(prompt_text, response_text, display_model_name)
167 |
168 |
169 | # --- Return Response ---
170 | # Modify the response payload to show the *display name* instead of the actual ID? Optional.
171 | # If you want the client to see the pretty name in the response:
172 | if "model" in response_data:
173 | response_data["model"] = display_model_name # Replace actual ID with display name
174 |
175 | logger.info(f"Successfully processed OpenRouter request for display model '{display_model_name}'.")
176 | return response_data
177 |
--------------------------------------------------------------------------------
/community/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.103.1
2 | uvicorn==0.23.2
3 | httpx==0.24.1
4 | pydantic==2.3.0
5 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | ollama:
5 | image: ollama/ollama:latest
6 | container_name: ollama_service # Optional, but good for clarity
7 | volumes:
8 | - ollama_data:/root/.ollama # Persist Ollama models
9 | ports:
10 | - "11434:11434" # Expose Ollama's port to the host (and other containers)
11 | restart: unless-stopped
12 | # If on Linux with NVIDIA GPU and nvidia-container-toolkit installed:
13 | # deploy:
14 | # resources:
15 | # reservations:
16 | # devices:
17 | # - driver: nvidia
18 | # count: all
19 | # capabilities: [gpu]
20 |
21 | observer_app:
22 | build:
23 | context: . # Tells Docker Compose to look for Dockerfile in the current directory
24 | dockerfile: Dockerfile # Specifies your existing Dockerfile
25 | container_name: observer_app_and_proxy
26 | ports:
27 | - "8080:80" # Nginx for web app (HTTP)
28 | - "3838:3838" # Your Python proxy (HTTPS, self-signed)
29 | depends_on:
30 | - ollama # Tells Docker Compose to start ollama service before this one
31 | restart: unless-stopped
32 | # No environment variables needed yet for OLLAMA_API_BASE_URL,
33 | # as your Python script defaults to localhost:11434
34 |
35 | volumes:
36 | ollama_data: {} # Defines the named volume
37 |
--------------------------------------------------------------------------------
/observer-ollama/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Roy Medina
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/observer-ollama/observer_ollama/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | observer-ollama: HTTPS proxy with CORS support for Ollama
3 | """
4 |
5 | __version__ = "0.1.0"
6 |
--------------------------------------------------------------------------------
/observer-ollama/observer_ollama/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | import logging
4 | import signal
5 | import sys
6 | import socketserver
7 | import ssl
8 | from .ssl_utils import prepare_certificates, get_local_ip
9 | from .handle_ollama import OllamaProxy, check_ollama_running, start_ollama_server
10 |
11 | # Setup logging
12 | logging.basicConfig(
13 | level=logging.INFO,
14 | format='%(asctime)s - %(levelname)s - %(message)s',
15 | datefmt='%Y-%m-%d %H:%M:%S'
16 | )
17 | logger = logging.getLogger('ollama-proxy')
18 |
19 | def run_server(port, cert_dir, auto_start, dev_mode):
20 | """Start the proxy server"""
21 | # Prepare certificates
22 | cert_path, key_path = prepare_certificates(cert_dir)
23 |
24 | # Start Ollama if not running and auto_start is enabled
25 | if auto_start and not check_ollama_running():
26 | start_ollama_server()
27 | elif not check_ollama_running():
28 | logger.warning("Ollama is not running. Proxy may not work until Ollama server is available.")
29 | else:
30 | logger.info("Ollama is already running")
31 |
32 | # Create server
33 | class CustomThreadingTCPServer(socketserver.ThreadingTCPServer):
34 | allow_reuse_address = True
35 |
36 | def __init__(self, *args, **kwargs):
37 | self.dev_mode = dev_mode
38 | super().__init__(*args, **kwargs)
39 |
40 | httpd = CustomThreadingTCPServer(("", port), OllamaProxy)
41 |
42 | # Configure SSL
43 | context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
44 | context.minimum_version = ssl.TLSVersion.TLSv1_2
45 | context.load_cert_chain(certfile=cert_path, keyfile=key_path)
46 | httpd.socket = context.wrap_socket(httpd.socket, server_side=True)
47 |
48 | # Setup simplified shutdown
49 | def signal_handler(sig, frame):
50 | logger.info("Shutting down...")
51 | httpd.server_close()
52 | sys.exit(0)
53 |
54 | signal.signal(signal.SIGINT, signal_handler)
55 | signal.signal(signal.SIGTERM, signal_handler)
56 |
57 | # Display server information in Vite-like format
58 | local_ip = get_local_ip()
59 | print("\n\033[1m OLLAMA-PROXY \033[0m ready")
60 | print(f" ➜ \033[36mLocal: \033[0mhttps://localhost:{port}/")
61 | print(f" ➜ \033[36mNetwork: \033[0mhttps://{local_ip}:{port}/")
62 | print("\n Use the Network URL when accessing from another machine\n")
63 |
64 | # Start server
65 | try:
66 | logger.info(f"Server started on port {port}")
67 | httpd.serve_forever()
68 | except Exception as e:
69 | logger.error(f"Server error: {e}")
70 | sys.exit(1)
71 |
72 | def main():
73 | """Main entry point"""
74 | parser = argparse.ArgumentParser(description="Ollama HTTPS Proxy Server")
75 | parser.add_argument("--port", type=int, default=3838, help="Port to run the proxy server on")
76 | parser.add_argument("--cert-dir", default="./certs", help="Directory to store certificates")
77 | parser.add_argument("--debug", action="store_true", help="Enable debug logging")
78 | parser.add_argument("--dev", action="store_true", help="Development mode (allows all origins)")
79 | parser.add_argument("--no-start", action="store_true", help="Don't automatically start Ollama if not running")
80 |
81 | args = parser.parse_args()
82 |
83 | if args.debug:
84 | logger.setLevel(logging.DEBUG)
85 | # Set all loggers to DEBUG
86 | for name in logging.root.manager.loggerDict:
87 | logging.getLogger(name).setLevel(logging.DEBUG)
88 |
89 | run_server(args.port, args.cert_dir, not args.no_start, args.dev)
90 |
91 | if __name__ == "__main__":
92 | main()
93 |
--------------------------------------------------------------------------------
/observer-ollama/observer_ollama/ssl_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import sys
4 | import socket
5 | import subprocess
6 | import logging
7 | from pathlib import Path
8 |
9 | logger = logging.getLogger('ollama-proxy.ssl')
10 |
11 | def get_local_ip():
12 | """Get the local IP address for network access"""
13 | try:
14 | # Create a socket that connects to an external server to determine local IP
15 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
16 | # We don't actually need to send data
17 | s.connect(('8.8.8.8', 80))
18 | local_ip = s.getsockname()[0]
19 | s.close()
20 | logger.debug(f"Local IP detected: {local_ip}")
21 | return local_ip
22 | except Exception as e:
23 | logger.warning(f"Could not determine local IP: {e}")
24 | return "127.0.0.1"
25 |
26 | def prepare_certificates(cert_dir):
27 | """Prepare SSL certificates"""
28 | cert_path = Path(cert_dir) / "cert.pem"
29 | key_path = Path(cert_dir) / "key.pem"
30 | config_path = Path(cert_dir) / "openssl.cnf"
31 |
32 | logger.debug(f"Certificate paths: cert={cert_path}, key={key_path}")
33 |
34 | # Create certificate directory if it doesn't exist
35 | os.makedirs(cert_dir, exist_ok=True)
36 | logger.debug(f"Ensured certificate directory exists: {cert_dir}")
37 |
38 | # Check if we need to generate certificates
39 | if not cert_path.exists() or not key_path.exists():
40 | logger.info("Generating new SSL certificates...")
41 |
42 | # Create a minimal OpenSSL config with SAN entries
43 | local_ip = get_local_ip()
44 |
45 | config_content = f"""
46 | [req]
47 | distinguished_name = req_distinguished_name
48 | x509_extensions = v3_req
49 | prompt = no
50 |
51 | [req_distinguished_name]
52 | CN = localhost
53 |
54 | [v3_req]
55 | subjectAltName = @alt_names
56 |
57 | [alt_names]
58 | DNS.1 = localhost
59 | IP.1 = 127.0.0.1
60 | IP.2 = {local_ip}
61 | """
62 |
63 | logger.debug(f"Writing OpenSSL config with content:\n{config_content}")
64 |
65 | with open(config_path, 'w') as f:
66 | f.write(config_content)
67 |
68 | cmd = [
69 | "openssl", "req", "-x509",
70 | "-newkey", "rsa:4096",
71 | "-sha256",
72 | "-days", "365",
73 | "-nodes",
74 | "-keyout", str(key_path),
75 | "-out", str(cert_path),
76 | "-config", str(config_path)
77 | ]
78 |
79 | logger.debug(f"Executing OpenSSL command: {' '.join(cmd)}")
80 |
81 | try:
82 | result = subprocess.run(cmd, check=True, capture_output=True, text=True)
83 | logger.debug(f"OpenSSL stdout: {result.stdout}")
84 | logger.info(f"Certificates successfully generated at {cert_dir}")
85 | except subprocess.CalledProcessError as e:
86 | logger.error(f"Failed to generate certificates: {e.stderr}")
87 | logger.error(f"Command was: {' '.join(cmd)}")
88 | sys.exit(1)
89 | else:
90 | logger.info(f"Using existing certificates from {cert_dir}")
91 |
92 | return cert_path, key_path
93 |
--------------------------------------------------------------------------------
/observer-ollama/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "observer-ollama"
7 | version = "0.1.8"
8 | description = "HTTPS proxy with CORS support for Ollama"
9 | authors = [
10 | {name = "Roy Medina", email = "roymedina@me.com"}
11 | ]
12 | license = {file = "LICENSE"}
13 |
14 | classifiers = [
15 | "Programming Language :: Python :: 3",
16 | "License :: OSI Approved :: MIT License",
17 | "Operating System :: OS Independent",
18 | ]
19 | requires-python = ">=3.10"
20 | dependencies = [
21 | "ollama>=0.4.7" # Official Python client for Ollama
22 | ]
23 | [tool.setuptools]
24 | packages = ["observer_ollama"]
25 |
26 | [project.scripts]
27 | observer-ollama = "observer_ollama.main:main"
28 |
--------------------------------------------------------------------------------
/observer-ollama/test.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 | import sys
4 |
5 | # Disable SSL warnings for self-signed certificates
6 | import urllib3
7 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
8 |
9 | def test_proxy():
10 | proxy_url = "https://localhost:3838"
11 |
12 | print(f"Testing connection to {proxy_url}...")
13 |
14 | # Test basic connectivity
15 | try:
16 | # Use verify=False for self-signed certs
17 | response = requests.get(f"{proxy_url}/api/tags", verify=False)
18 | print(f"✅ HTTPS connection successful (Status: {response.status_code})")
19 |
20 | # Check if we got a valid response from Ollama
21 | if response.status_code == 200:
22 | models = response.json()
23 | print(f"✅ Ollama communication successful!")
24 | print(f"Available models:")
25 | for model in models['models']:
26 | print(f" - {model['name']}")
27 | else:
28 | print(f"❌ Ollama returned error: {response.text}")
29 |
30 | except requests.exceptions.SSLError as e:
31 | print(f"❌ SSL Error: {e}")
32 | except requests.exceptions.ConnectionError as e:
33 | print(f"❌ Connection Error: {e}")
34 | except Exception as e:
35 | print(f"❌ Unexpected error: {e}")
36 |
37 | if __name__ == "__main__":
38 | test_proxy()
39 |
--------------------------------------------------------------------------------
/print_info.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # print_info.sh
3 |
4 | echo ""
5 | echo "####################################################################"
6 | echo "# #"
7 | echo "# 🚀 Observer Web App is starting! Access it at: #"
8 | echo "# #"
9 | echo "# 👉 http://localhost:8080 #"
10 | echo "# #"
11 | echo "# Remember to configure the Inference API in the app to: #"
12 | echo "# https://localhost:3838 (and accept the cert) #"
13 | echo "# #"
14 | echo "# Waiting for services to become fully available... #"
15 | echo "# #"
16 | echo "####################################################################"
17 | echo ""
18 | exit 0 # Important to exit so supervisor knows it completed
19 |
--------------------------------------------------------------------------------
/supervisord.conf:
--------------------------------------------------------------------------------
1 | ; supervisord.conf
2 | [supervisord]
3 | nodaemon=true
4 | logfile=/var/log/supervisor/supervisord.log
5 | pidfile=/var/run/supervisord.pid
6 |
7 | [program:print_startup_info]
8 | command=/usr/local/bin/print_info.sh
9 | autostart=true
10 | autorestart=false ; Do not restart after it exits
11 | startsecs=0 ; Consider it started immediately
12 | priority=100 ; Lower priority, run it relatively early
13 | stdout_logfile=/dev/stdout ; Send its output directly to supervisor's stdout
14 | stdout_logfile_maxbytes=0 ; Keep all output
15 | stderr_logfile=/dev/stderr ; Send its errors directly to supervisor's stderr
16 | stderr_logfile_maxbytes=0
17 |
18 | [program:nginx]
19 | command=/usr/sbin/nginx -g "daemon off;"
20 | autostart=true
21 | autorestart=true
22 | stdout_logfile=/var/log/supervisor/nginx_stdout.log
23 | stderr_logfile=/var/log/supervisor/nginx_stderr.log
24 |
25 | [program:observer_proxy]
26 | command=observer-ollama --no-start # Keep --no-start if you don't want it to try and launch ollama
27 | # For this super-simple setup, your script WILL try to connect to localhost:11434
28 | # and potentially try to start it if check_ollama_running fails and auto_start is true.
29 | # Let's keep it as is for now, it will fail to *start* ollama, but then should connect.
30 | # OR, to be cleaner, add --no-start
31 | ; command=observer-ollama --no-start # This is probably better to prevent errors if localhost:11434 isn't immediately up
32 | directory=/opt/observer-ollama
33 | autostart=true
34 | autorestart=true
35 | stdout_logfile=/var/log/supervisor/observer_proxy_stdout.log
36 | stderr_logfile=/var/log/supervisor/observer_proxy_stderr.log
37 |
--------------------------------------------------------------------------------
/website/.github/workflows/deploy-website.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Website
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | paths:
7 | - 'website/**' # Only trigger on changes to website directory
8 |
9 | permissions:
10 | contents: read
11 | pages: write
12 | id-token: write
13 |
14 | jobs:
15 | build:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: actions/checkout@v4
19 |
20 | - name: Setup Node
21 | uses: actions/setup-node@v4
22 | with:
23 | node-version: '20'
24 |
25 | - name: Install Dependencies
26 | working-directory: ./website
27 | run: npm install
28 |
29 | - name: Build
30 | working-directory: ./website
31 | run: npm run build
32 |
33 | - name: Upload artifact
34 | uses: actions/upload-pages-artifact@v3
35 | with:
36 | path: website/dist
37 |
38 | deploy:
39 | needs: build
40 | runs-on: ubuntu-latest
41 | environment:
42 | name: github-pages
43 | url: ${{ steps.deployment.outputs.page_url }}
44 | steps:
45 | - name: Deploy to GitHub Pages
46 | id: deployment
47 | uses: actions/deploy-pages@v4
48 |
--------------------------------------------------------------------------------
/website/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 |
--------------------------------------------------------------------------------
/website/README.md:
--------------------------------------------------------------------------------
1 | # React + TypeScript + Vite
2 |
3 | This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
4 |
5 | Currently, two official plugins are available:
6 |
7 | - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
8 | - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
9 |
10 | ## Expanding the ESLint configuration
11 |
12 | If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
13 |
14 | - Configure the top-level `parserOptions` property like this:
15 |
16 | ```js
17 | export default tseslint.config({
18 | languageOptions: {
19 | // other options...
20 | parserOptions: {
21 | project: ['./tsconfig.node.json', './tsconfig.app.json'],
22 | tsconfigRootDir: import.meta.dirname,
23 | },
24 | },
25 | })
26 | ```
27 |
28 | - Replace `tseslint.configs.recommended` to `tseslint.configs.recommendedTypeChecked` or `tseslint.configs.strictTypeChecked`
29 | - Optionally add `...tseslint.configs.stylisticTypeChecked`
30 | - Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and update the config:
31 |
32 | ```js
33 | // eslint.config.js
34 | import react from 'eslint-plugin-react'
35 |
36 | export default tseslint.config({
37 | // Set the react version
38 | settings: { react: { version: '18.3' } },
39 | plugins: {
40 | // Add the react plugin
41 | react,
42 | },
43 | rules: {
44 | // other rules...
45 | // Enable its recommended rules
46 | ...react.configs.recommended.rules,
47 | ...react.configs['jsx-runtime'].rules,
48 | },
49 | })
50 | ```
51 |
--------------------------------------------------------------------------------
/website/eslint.config.js:
--------------------------------------------------------------------------------
1 | import js from '@eslint/js'
2 | import globals from 'globals'
3 | import reactHooks from 'eslint-plugin-react-hooks'
4 | import reactRefresh from 'eslint-plugin-react-refresh'
5 | import tseslint from 'typescript-eslint'
6 |
7 | export default tseslint.config(
8 | { ignores: ['dist'] },
9 | {
10 | extends: [js.configs.recommended, ...tseslint.configs.recommended],
11 | files: ['**/*.{ts,tsx}'],
12 | languageOptions: {
13 | ecmaVersion: 2020,
14 | globals: globals.browser,
15 | },
16 | plugins: {
17 | 'react-hooks': reactHooks,
18 | 'react-refresh': reactRefresh,
19 | },
20 | rules: {
21 | ...reactHooks.configs.recommended.rules,
22 | 'react-refresh/only-export-components': [
23 | 'warn',
24 | { allowConstantExport: true },
25 | ],
26 | },
27 | },
28 | )
29 |
--------------------------------------------------------------------------------
/website/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Observer AI - Open Source AI Agents
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/website/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@observer/website",
3 | "private": true,
4 | "version": "0.1.0",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview"
10 | },
11 | "dependencies": {
12 | "react": "^18.2.0",
13 | "react-dom": "^18.2.0",
14 | "lucide-react": "^0.294.0"
15 | },
16 | "devDependencies": {
17 | "@types/react": "^18.2.43",
18 | "@types/react-dom": "^18.2.17",
19 | "@vitejs/plugin-react": "^4.2.1",
20 | "autoprefixer": "^10.4.16",
21 | "postcss": "^8.4.32",
22 | "tailwindcss": "^3.3.6",
23 | "typescript": "^5.2.2",
24 | "vite": "^5.0.8"
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/website/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/website/public/CNAME:
--------------------------------------------------------------------------------
1 | observer-ai.com
2 |
--------------------------------------------------------------------------------
/website/public/eye-logo-black.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/website/public/eye-logo-white.svg:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/website/public/vite.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/website/src/App.css:
--------------------------------------------------------------------------------
1 | #root {
2 | max-width: 1280px;
3 | margin: 0 auto;
4 | padding: 2rem;
5 | text-align: center;
6 | }
7 |
8 | .logo {
9 | height: 6em;
10 | padding: 1.5em;
11 | will-change: filter;
12 | transition: filter 300ms;
13 | }
14 | .logo:hover {
15 | filter: drop-shadow(0 0 2em #646cffaa);
16 | }
17 | .logo.react:hover {
18 | filter: drop-shadow(0 0 2em #61dafbaa);
19 | }
20 |
21 | @keyframes logo-spin {
22 | from {
23 | transform: rotate(0deg);
24 | }
25 | to {
26 | transform: rotate(360deg);
27 | }
28 | }
29 |
30 | @media (prefers-reduced-motion: no-preference) {
31 | a:nth-of-type(2) .logo {
32 | animation: logo-spin infinite 20s linear;
33 | }
34 | }
35 |
36 | @import url('https://fonts.googleapis.com/css2?family=Golos+Text:wght@400;500;600;700&display=swap');
37 |
38 | .card {
39 | padding: 2em;
40 | }
41 |
42 | .read-the-docs {
43 | color: #888;
44 | }
45 |
46 | /* New header styles */
47 | header nav {
48 | max-width: 1400px;
49 | width: 100%;
50 | padding: 1rem 2rem;
51 | display: flex;
52 | justify-content: space-between;
53 | }
54 |
55 | /* Logo section - moved left and made smaller */
56 | header nav .flex.items-center.space-x-2 {
57 | display: flex;
58 | align-items: center;
59 | gap: 0.5rem;
60 | margin-left: -0.5rem; /* Move closer to left edge */
61 | }
62 |
63 | header nav .flex.items-center.space-x-2 img {
64 | width: 1.7rem !important; /* ~15% smaller than original 2rem (w-8 = 2rem) */
65 | height: 1.7rem !important;
66 | }
67 |
68 | /* Right navigation section */
69 | header nav .flex.items-center.space-x-4 {
70 | display: flex;
71 | align-items: center;
72 | gap: 2.5rem;
73 | margin-right: -0.5rem; /* Move closer to right edge */
74 | }
75 |
76 | header nav a {
77 | font-weight: 500;
78 | }
79 |
80 | /* GitHub button styling */
81 | header nav a[href*="github"] {
82 | background-color: rgba(255, 255, 255, 0.1);
83 | border-radius: 0.5rem;
84 | padding: 0.5rem 1rem;
85 | display: flex;
86 | align-items: center;
87 | gap: 0.5rem;
88 | transition: background-color 0.2s;
89 | margin-left: 0.5rem; /* Add a bit more separation from other nav items */
90 | }
91 |
92 | header nav a[href*="github"]:hover {
93 | background-color: rgba(255, 255, 255, 0.2);
94 | }
95 |
--------------------------------------------------------------------------------
/website/src/ObserverLanding.tsx:
--------------------------------------------------------------------------------
1 | import React, { useState, useEffect } from 'react';
2 |
3 | const EyeLogo = ({ mousePosition }) => {
4 | const eyeRadius = 67; // Increased by 1.5x from 45
5 | const pupilRadius = 18; // Increased by 1.5x from 12
6 | const maxPupilOffset = eyeRadius - pupilRadius - 7;
7 |
8 | const calculatePupilPosition = () => {
9 | if (!mousePosition.x || !mousePosition.y) return { x: 0, y: 0 };
10 |
11 | const rect = document.getElementById('eye-container')?.getBoundingClientRect();
12 | if (!rect) return { x: 0, y: 0 };
13 |
14 | const eyeCenterX = rect.left + rect.width / 2;
15 | const eyeCenterY = rect.top + rect.height / 2;
16 |
17 | const angle = Math.atan2(mousePosition.y - eyeCenterY, mousePosition.x - eyeCenterX);
18 | const distance = Math.min(
19 | maxPupilOffset,
20 | Math.sqrt(Math.pow(mousePosition.x - eyeCenterX, 2) + Math.pow(mousePosition.y - eyeCenterY, 2)) / 8
21 | );
22 |
23 | return {
24 | x: Math.cos(angle) * distance,
25 | y: Math.sin(angle) * distance
26 | };
27 | };
28 |
29 | const pupilPosition = calculatePupilPosition();
30 | return (
31 |
41 | );
42 | };
43 |
44 | const ObserverLanding = () => {
45 | const [mousePosition, setMousePosition] = useState({ x: 0, y: 0 });
46 |
47 | useEffect(() => {
48 | const handleMouseMove = (e) => {
49 | setMousePosition({ x: e.clientX, y: e.clientY });
50 | };
51 | window.addEventListener('mousemove', handleMouseMove);
52 | return () => window.removeEventListener('mousemove', handleMouseMove);
53 | }, []);
54 |
55 | return (
56 |
57 |
58 |
59 |
60 |
61 |
bserver
62 |
63 |
64 | );
65 | };
66 |
67 | export default ObserverLanding;
68 |
--------------------------------------------------------------------------------
/website/src/assets/react.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/website/src/index.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | body {
6 | font-family: 'Inter', sans-serif;
7 | }
8 |
--------------------------------------------------------------------------------
/website/src/main.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import ReactDOM from 'react-dom/client'
3 | import App from './App'
4 | import './index.css'
5 |
6 | ReactDOM.createRoot(document.getElementById('root')!).render(
7 |
8 |
9 | ,
10 | )
11 |
--------------------------------------------------------------------------------
/website/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
--------------------------------------------------------------------------------
/website/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | export default {
3 | content: [
4 | "./index.html",
5 | "./src/**/*.{js,ts,jsx,tsx}",
6 | ],
7 | theme: {
8 | extend: {
9 | fontFamily: {
10 | 'golos': ['Golos Text', 'sans-serif'],
11 | },
12 | },
13 | },
14 | plugins: [],
15 | }
16 |
--------------------------------------------------------------------------------
/website/tsconfig.app.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
4 | "target": "ES2020",
5 | "useDefineForClassFields": true,
6 | "lib": ["ES2020", "DOM", "DOM.Iterable"],
7 | "module": "ESNext",
8 | "skipLibCheck": true,
9 |
10 | /* Bundler mode */
11 | "moduleResolution": "bundler",
12 | "allowImportingTsExtensions": true,
13 | "isolatedModules": true,
14 | "moduleDetection": "force",
15 | "noEmit": true,
16 | "jsx": "react-jsx",
17 |
18 | /* Linting */
19 | "strict": true,
20 | "noUnusedLocals": true,
21 | "noUnusedParameters": true,
22 | "noFallthroughCasesInSwitch": true,
23 | "noUncheckedSideEffectImports": true
24 | },
25 | "include": ["src"]
26 | }
27 |
--------------------------------------------------------------------------------
/website/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "files": [],
3 | "references": [
4 | { "path": "./tsconfig.app.json" },
5 | { "path": "./tsconfig.node.json" }
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/website/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
4 | "target": "ES2022",
5 | "lib": ["ES2023"],
6 | "module": "ESNext",
7 | "skipLibCheck": true,
8 |
9 | /* Bundler mode */
10 | "moduleResolution": "bundler",
11 | "allowImportingTsExtensions": true,
12 | "isolatedModules": true,
13 | "moduleDetection": "force",
14 | "noEmit": true,
15 |
16 | /* Linting */
17 | "strict": true,
18 | "noUnusedLocals": true,
19 | "noUnusedParameters": true,
20 | "noFallthroughCasesInSwitch": true,
21 | "noUncheckedSideEffectImports": true
22 | },
23 | "include": ["vite.config.ts"]
24 | }
25 |
--------------------------------------------------------------------------------
/website/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import react from '@vitejs/plugin-react'
3 |
4 | export default defineConfig({
5 | plugins: [react()]
6 | })
7 |
--------------------------------------------------------------------------------