├── outputs
└── .gitkeep
├── nostr_dvm
├── __init__.py
├── tasks
│ └── __init__.py
├── utils
│ ├── __init__.py
│ ├── scrapper
│ │ ├── __init__.py
│ │ └── request_details.json
│ ├── print_utils.py
│ ├── heartbeat.py
│ ├── nip98_utils.py
│ ├── nip65_utils.py
│ ├── reaction_utils.py
│ ├── nwc_tools.py
│ ├── external_dvm_utils.py
│ └── blossom_utils.py
├── backends
│ ├── __init__.py
│ ├── mlx
│ │ ├── __init__.py
│ │ └── modules
│ │ │ ├── __init__.py
│ │ │ └── stable_diffusion
│ │ │ ├── config.py
│ │ │ ├── clip.py
│ │ │ ├── sampler.py
│ │ │ ├── __init__.py
│ │ │ └── tokenizer.py
│ ├── discover
│ │ ├── __init__.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── whisperx
│ │ │ │ ├── __init__.py
│ │ │ │ ├── version.py
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── whisperx_transcript.trainer
│ │ │ │ └── readme.md
│ │ │ ├── image_upscale
│ │ │ │ ├── __init__.py
│ │ │ │ ├── version.py
│ │ │ │ ├── requirements.txt
│ │ │ │ └── image_upscale_realesrgan.trainer
│ │ │ ├── image_interrogator
│ │ │ │ ├── __init__.py
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── version.py
│ │ │ │ ├── readme.md
│ │ │ │ └── image_interrogator.trainer
│ │ │ ├── stablediffusionxl
│ │ │ │ ├── __init__.py
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── version.py
│ │ │ │ ├── readme.md
│ │ │ │ ├── stablediffusionxl-img2img.trainer
│ │ │ │ └── lora.py
│ │ │ └── stablevideodiffusion
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── version.py
│ │ │ │ ├── stablevideodiffusion.trainer
│ │ │ │ └── stablevideodiffusion.py
│ │ ├── run_windows.cmd
│ │ └── setup_windows.cmd
│ └── mcp
│ │ ├── messages
│ │ ├── __init__.py
│ │ ├── message_types
│ │ │ ├── __init__.py
│ │ │ ├── ping_message.py
│ │ │ ├── resources_messages.py
│ │ │ ├── json_rpc_message.py
│ │ │ ├── incrementing_id_message.py
│ │ │ ├── tools_messages.py
│ │ │ ├── prompts_models.py
│ │ │ ├── prompts_messages.py
│ │ │ └── initialize_message.py
│ │ ├── send_ping.py
│ │ ├── send_prompts.py
│ │ ├── send_tools_list.py
│ │ ├── send_resources.py
│ │ ├── send_call_tool.py
│ │ ├── send_message.py
│ │ └── send_initialize_message.py
│ │ ├── transport
│ │ ├── __init__.py
│ │ └── stdio
│ │ │ ├── __init__.py
│ │ │ ├── stdio_server_parameters.py
│ │ │ └── stdio_server_shutdown.py
│ │ ├── server_config.json
│ │ ├── environment.py
│ │ ├── connection_check.py
│ │ ├── config.py
│ │ └── system_prompt_generator.py
├── interfaces
│ └── __init__.py
└── framework.py
├── ui
└── noogle
│ ├── src
│ ├── index.ts
│ ├── assets
│ │ ├── edit.png
│ │ ├── load.gif
│ │ ├── delete.png
│ │ ├── usercard.png
│ │ ├── main.css
│ │ ├── nostr-purple.svg
│ │ └── base.css
│ ├── components
│ │ ├── icons
│ │ │ ├── IconSupport.vue
│ │ │ ├── IconTooling.vue
│ │ │ ├── IconCommunity.vue
│ │ │ ├── IconDocumentation.vue
│ │ │ └── IconEcosystem.vue
│ │ ├── Chat.vue
│ │ ├── Image.vue
│ │ ├── Home.vue
│ │ ├── Newnote.vue
│ │ └── helper
│ │ │ └── string.ts
│ ├── app.css
│ ├── App.vue
│ ├── router
│ │ └── index.js
│ ├── main.js
│ └── layouts
│ │ └── ThreeColumnLayout.vue
│ ├── .env_example
│ ├── public
│ ├── NWA.png
│ ├── NWC.png
│ ├── Alby.jpg
│ ├── Mutiny.png
│ ├── favicon.ico
│ ├── shipyard.ico
│ ├── pwa-192x192.png
│ ├── pwa-512x512.png
│ └── opensearch.xml
│ ├── postcss.config.js
│ ├── tsconfig.json
│ ├── jsconfig.json
│ ├── README.md
│ ├── index.html
│ ├── tailwind.config.js
│ ├── tsconfig.node.json
│ ├── tsconfig.app.json
│ ├── vite.config.ts
│ └── package.json
├── tests
├── mcp
│ ├── mcp-servers
│ │ └── nostr-notes
│ │ │ ├── __init__.py
│ │ │ └── server.py
│ ├── package.json
│ └── dvm
│ │ ├── mcp_server_config.json
│ │ └── mcp_test.py
├── make_ln_address_lnbits.py
├── upload_hoster.py
├── nwc.py
├── db.py
├── tor_test.py
├── filter.py
├── ditto.py
├── summarization_duck.py
├── tts.py
├── generic_dvm.py
├── sunoai.py
├── dalle.py
├── sd35_api.py
├── fix_lnbits_lnaddress.py
├── simplebot.py
├── chat_bot.py
└── bot.py
├── start_windows.cmd
├── .idea
├── vcs.xml
├── sqldialects.xml
├── .gitignore
├── inspectionProfiles
│ └── profiles_settings.xml
├── modules.xml
├── misc.xml
└── dvm.iml
├── docker-compose.yml
├── Dockerfile
├── examples
├── tts_dvm
│ ├── .env_example
│ ├── README.md
│ └── main.py
├── unleashed_dvm
│ ├── .env_example
│ ├── README.md
│ ├── main.py
│ └── test_client.py
└── ollama_dvm
│ ├── .env_example
│ ├── README.md
│ ├── main.py
│ └── test_client.py
├── LICENSE
├── .github
└── workflows
│ └── python-publish.yml
├── setup.py
├── .env_example
├── tutorials
├── 04_simple_chat_bot.py
└── 10_delete_nip89.py
├── main.py
└── README.md
/outputs/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/noogle/src/index.ts:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/tasks/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/interfaces/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/scrapper/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/transport/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/mcp/mcp-servers/nostr-notes/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/transport/stdio/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/whisperx/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_upscale/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/start_windows.cmd:
--------------------------------------------------------------------------------
1 | call venv/Scripts/activate
2 | python main.py
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_interrogator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/noogle/.env_example:
--------------------------------------------------------------------------------
1 | VITE_NOOGLE_PK=""
2 | VITE_SUBSCRIPTIPON_VERIFIER_PUBKEY=""
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/run_windows.cmd:
--------------------------------------------------------------------------------
1 | call venv/Scripts/activate
2 | nova-server
--------------------------------------------------------------------------------
/ui/noogle/public/NWA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/NWA.png
--------------------------------------------------------------------------------
/ui/noogle/public/NWC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/NWC.png
--------------------------------------------------------------------------------
/ui/noogle/public/Alby.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/Alby.jpg
--------------------------------------------------------------------------------
/ui/noogle/public/Mutiny.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/Mutiny.png
--------------------------------------------------------------------------------
/ui/noogle/public/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/favicon.ico
--------------------------------------------------------------------------------
/ui/noogle/public/shipyard.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/shipyard.ico
--------------------------------------------------------------------------------
/ui/noogle/src/assets/edit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/src/assets/edit.png
--------------------------------------------------------------------------------
/ui/noogle/src/assets/load.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/src/assets/load.gif
--------------------------------------------------------------------------------
/ui/noogle/public/pwa-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/pwa-192x192.png
--------------------------------------------------------------------------------
/ui/noogle/public/pwa-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/public/pwa-512x512.png
--------------------------------------------------------------------------------
/ui/noogle/src/assets/delete.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/src/assets/delete.png
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/setup_windows.cmd:
--------------------------------------------------------------------------------
1 | python -m venv venv
2 | call venv/Scripts/activate
3 | pip install hcai-nova-server
--------------------------------------------------------------------------------
/ui/noogle/src/assets/usercard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/believethehype/nostrdvm/HEAD/ui/noogle/src/assets/usercard.png
--------------------------------------------------------------------------------
/ui/noogle/postcss.config.js:
--------------------------------------------------------------------------------
1 | export default {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/sqldialects.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/server_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "nostrdvmmcp": {
4 | "command": "node",
5 | "args": ["../../../tests/mcp/nostr_dvmcp_server.js"]
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/ui/noogle/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "files": [],
3 | "references": [
4 | {
5 | "path": "./tsconfig.node.json"
6 | },
7 | {
8 | "path": "./tsconfig.app.json"
9 | }
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/ui/noogle/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "paths": {
4 | "@/*": [
5 | "./src/*"
6 | ]
7 | }
8 | },
9 | "exclude": [
10 | "node_modules",
11 | "dist"
12 | ]
13 | }
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_interrogator/requirements.txt:
--------------------------------------------------------------------------------
1 | hcai-nova-utils>=1.5.7
2 | --extra-index-url https://download.pytorch.org/whl/cu118
3 | torch==2.1.1
4 | clip_interrogator
5 | git+https://github.com/huggingface/diffusers.git
6 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablevideodiffusion/requirements.txt:
--------------------------------------------------------------------------------
1 | hcai-nova-utils>=1.5.7
2 | --extra-index-url https://download.pytorch.org/whl/cu118
3 | torch==2.1.0
4 | git+https://github.com/huggingface/diffusers.git
5 | transformers
6 | accelerate
7 | opencv-python
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | nostrdvm:
5 | container_name: nostrdvm
6 | build:
7 | context: .
8 | dockerfile: Dockerfile
9 | ports:
10 | - "80:80"
11 | env_file:
12 | - .env
13 | volumes:
14 | - .:/app
15 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/requirements.txt:
--------------------------------------------------------------------------------
1 | hcai-nova-utils>=1.5.7
2 | --extra-index-url https://download.pytorch.org/whl/cu118
3 | torch==2.1.0
4 | compel~=2.0.2
5 | git+https://github.com/huggingface/diffusers.git
6 | transformers
7 | accelerate
8 | numpy
9 | omegaconf
10 |
--------------------------------------------------------------------------------
/ui/noogle/README.md:
--------------------------------------------------------------------------------
1 | # Nostr DVM web app
2 |
3 | ## Project Setup
4 |
5 | ```sh
6 | npm install
7 | ```
8 |
9 | ### Compile and Hot-Reload for Development
10 |
11 | ```sh
12 | npm run dev
13 | ```
14 |
15 | ### Compile and Minify for Production
16 |
17 | ```sh
18 | npm run build
19 | npm run preview
20 | ```
21 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/whisperx/version.py:
--------------------------------------------------------------------------------
1 | """ WhisperX
2 | """
3 | # We follow Semantic Versioning (https://semver.org/)
4 | _MAJOR_VERSION = '1'
5 | _MINOR_VERSION = '0'
6 | _PATCH_VERSION = '1'
7 |
8 | __version__ = '.'.join([
9 | _MAJOR_VERSION,
10 | _MINOR_VERSION,
11 | _PATCH_VERSION,
12 | ])
13 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_upscale/version.py:
--------------------------------------------------------------------------------
1 | """ RealESRGan
2 | """
3 | # We follow Semantic Versioning (https://semver.org/)
4 | _MAJOR_VERSION = '1'
5 | _MINOR_VERSION = '0'
6 | _PATCH_VERSION = '0'
7 |
8 | __version__ = '.'.join([
9 | _MAJOR_VERSION,
10 | _MINOR_VERSION,
11 | _PATCH_VERSION,
12 | ])
13 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_interrogator/version.py:
--------------------------------------------------------------------------------
1 | """ Clip Interrorgator
2 | """
3 | # We follow Semantic Versioning (https://semver.org/)
4 | _MAJOR_VERSION = '1'
5 | _MINOR_VERSION = '0'
6 | _PATCH_VERSION = '0'
7 |
8 | __version__ = '.'.join([
9 | _MAJOR_VERSION,
10 | _MINOR_VERSION,
11 | _PATCH_VERSION,
12 | ])
13 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/version.py:
--------------------------------------------------------------------------------
1 | """ Stable Diffusion XL
2 | """
3 | # We follow Semantic Versioning (https://semver.org/)
4 | _MAJOR_VERSION = '1'
5 | _MINOR_VERSION = '0'
6 | _PATCH_VERSION = '0'
7 |
8 | __version__ = '.'.join([
9 | _MAJOR_VERSION,
10 | _MINOR_VERSION,
11 | _PATCH_VERSION,
12 | ])
13 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/transport/stdio/stdio_server_parameters.py:
--------------------------------------------------------------------------------
1 | # transport/stdio/stdio_server_parameters.py
2 | from pydantic import BaseModel, Field
3 | from typing import Any, Dict, Optional
4 |
5 | class StdioServerParameters(BaseModel):
6 | command: str
7 | args: list[str] = Field(default_factory=list)
8 | env: Optional[Dict[str, str]] = None
--------------------------------------------------------------------------------
/ui/noogle/src/components/icons/IconSupport.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablevideodiffusion/version.py:
--------------------------------------------------------------------------------
1 | """ Stable Video Diffusion
2 | """
3 | # We follow Semantic Versioning (https://semver.org/)
4 | _MAJOR_VERSION = '1'
5 | _MINOR_VERSION = '0'
6 | _PATCH_VERSION = '0'
7 |
8 | __version__ = '.'.join([
9 | _MAJOR_VERSION,
10 | _MINOR_VERSION,
11 | _PATCH_VERSION,
12 | ])
13 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/whisperx/requirements.txt:
--------------------------------------------------------------------------------
1 | hcai-nova-utils>=1.5.7
2 | --extra-index-url https://download.pytorch.org/whl/cu118
3 | torch==2.1.0+cu118
4 | torchvision>= 0.15.1+cu118
5 | torchaudio >= 2.0.0+cu118
6 | pyannote-audio @ git+https://github.com/shelm/pyannote-audio.git@d7b4de3
7 | whisperx @ git+https://github.com/m-bain/whisperx.git@49e0130
8 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/ping_message.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/ping_message.py
2 | from nostr_dvm.backends.mcp.messages.message_types.incrementing_id_message import IncrementingIDMessage
3 |
4 | class PingMessage(IncrementingIDMessage):
5 | def __init__(self, start_id: int = None, **kwargs):
6 | super().__init__(prefix="ping", method="ping", start_id=start_id, **kwargs)
7 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_upscale/requirements.txt:
--------------------------------------------------------------------------------
1 | realesrgan @git+https://github.com/xinntao/Real-ESRGAN.git
2 | hcai-nova-utils>=1.5.7
3 | --extra-index-url https://download.pytorch.org/whl/cu118
4 | torch==2.1.0
5 | torchvision
6 | basicsr>=1.4.2
7 | facexlib>=0.2.5
8 | gfpgan>=1.3.5
9 | numpy
10 | opencv-python
11 | Pillow
12 | tqdm
13 | git+https://github.com/huggingface/diffusers.git
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_interrogator/readme.md:
--------------------------------------------------------------------------------
1 | #Clip Interogator
2 |
3 | This modules provides prompt generation based on images
4 |
5 | * https://huggingface.co/spaces/pharmapsychotic/CLIP-Interrogator
6 |
7 | ## Options
8 |
9 | - `kind`: string, identifier of the kind of processing
10 | - `prompt`: Generates a prompt from image
11 | - `analysis`: Generates a categorical analysis
12 |
--------------------------------------------------------------------------------
/tests/make_ln_address_lnbits.py:
--------------------------------------------------------------------------------
1 | from nostr_dvm.utils.zap_utils import make_ln_address_nostdress_manual_lnbits
2 |
3 | name = ""
4 | invoice_key = ""
5 | npub = ""
6 | nostdress_domain = ""
7 | lnbits_domain = "https://demo.lnbits.com"
8 |
9 | lnaddress, pin = make_ln_address_nostdress_manual_lnbits("_", invoice_key, npub, nostdress_domain, lnbits_domain, pin = " ", currentname= " ")
10 | print(lnaddress)
11 | print(pin)
12 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use the official Python 3.12 image from the Docker Hub
2 | FROM python:3.12-slim
3 |
4 | # Set the working directory in the container
5 | WORKDIR /app
6 |
7 | RUN pip install setuptools
8 |
9 | # Copy the rest of the application code into the container
10 | COPY . .
11 |
12 | #RUN pip install nostr-dvm
13 | RUN python setup.py install
14 |
15 | # Specify the command to run your application
16 | CMD ["python3", "-u", "main.py"]
17 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/resources_messages.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/resources_messages.py
2 | from typing import ClassVar
3 | from mcpcli.messages.message_types.incrementing_id_message import IncrementingIDMessage
4 |
5 | class ResourcesListMessage(IncrementingIDMessage):
6 | def __init__(self, start_id: int = None, **kwargs):
7 | super().__init__(prefix="resources-list", method="resources/list", start_id=start_id, **kwargs)
8 |
--------------------------------------------------------------------------------
/ui/noogle/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Nostr decentralized search and other stuff
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/Chat.vue:
--------------------------------------------------------------------------------
1 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/Image.vue:
--------------------------------------------------------------------------------
1 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/ui/noogle/tailwind.config.js:
--------------------------------------------------------------------------------
1 | import daisyui from "daisyui";
2 |
3 | /** @type {import('tailwindcss').Config} */
4 | export default {
5 | darkMode: "class",
6 | content: [
7 | "./index.html",
8 | "./src/**/*.{vue,js,ts,jsx,tsx}",
9 |
10 | ],
11 | theme: {
12 |
13 | extend: {
14 | colors: {
15 | 'nostr': '#6d52f1',
16 | 'nostr2': '#8453f1',
17 |
18 | }
19 |
20 | },
21 | },
22 | plugins: [daisyui],
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/json_rpc_message.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/json_rpc_message.py
2 | from typing import Any, Dict, Optional
3 | from pydantic import BaseModel, ConfigDict
4 |
5 | class JSONRPCMessage(BaseModel):
6 | jsonrpc: str = "2.0"
7 | id: Optional[str] = None
8 | method: Optional[str] = None
9 | params: Optional[Dict[str, Any]] = None
10 | result: Optional[Dict[str, Any]] = None
11 | error: Optional[Dict[str, Any]] = None
12 |
13 | model_config = ConfigDict(extra="allow")
--------------------------------------------------------------------------------
/ui/noogle/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@tsconfig/node20/tsconfig.json",
3 | "include": [
4 | "vite.config.*",
5 | "vitest.config.*",
6 | "cypress.config.*",
7 | "nightwatch.conf.*",
8 | "playwright.config.*"
9 | ],
10 | "compilerOptions": {
11 | "composite": true,
12 | "noEmit": true,
13 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
14 | "module": "ESNext",
15 | "moduleResolution": "Bundler",
16 | "types": [
17 | "node"
18 | ]
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/tests/mcp/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "vue",
3 | "version": "0.0.0",
4 | "private": true,
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview",
10 | "build-only": "vite build",
11 | "type-check": "vue-tsc --build --force"
12 | },
13 | "dependencies": {
14 | "@modelcontextprotocol/sdk": "^1.5.0",
15 | "@rust-nostr/nostr-sdk": "0.39.0",
16 | "zod": "^3.24.2"
17 | },
18 | "optionalDependencies": {
19 | "@rollup/rollup-linux-x64-gnu": "4.6.1"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/ui/noogle/tsconfig.app.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@vue/tsconfig/tsconfig.dom.json",
3 | "include": [
4 | "env.d.ts",
5 | "src/**/*",
6 | "src/**/*.vue"
7 | ],
8 | "exclude": [
9 | "src/**/__tests__/*"
10 | ],
11 | "compilerOptions": {
12 | "composite": true,
13 | "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
14 | "baseUrl": ".",
15 | "paths": {
16 | "@/*": [
17 | "./src/*"
18 | ]
19 | },
20 | "allowJs": true,
21 | "noImplicitAny": false,
22 | "verbatimModuleSyntax": false
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_interrogator/image_interrogator.trainer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
6 |
7 |
8 |
9 |
10 |
12 |
13 |
--------------------------------------------------------------------------------
/tests/mcp/dvm/mcp_server_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "sqlite": {
4 | "command": "uvx",
5 | "args": ["mcp-server-sqlite", "--db-path", "test.db"]
6 | },
7 | "mcp-crypto-price": {
8 | "command": "node",
9 | "args": [
10 | "../../mcp-crypto-price/build/index.js"
11 | ]
12 | },
13 | "nostr-notes": {
14 | "command": "uv",
15 | "args": [
16 | "run",
17 | "--with",
18 | "mcp[cli]",
19 | "--with",
20 | "nostr_sdk==0.39.0",
21 | "mcp",
22 | "run",
23 | "tests/mcp/mcp-servers/nostr-notes/server.py"
24 | ]
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/Home.vue:
--------------------------------------------------------------------------------
1 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/print_utils.py:
--------------------------------------------------------------------------------
1 | class bcolors:
2 | HEADER = '\033[95m'
3 | ENDC = '\033[0m'
4 | BOLD = '\033[1m'
5 | UNDERLINE = '\033[4m'
6 | RED = '\033[91m'
7 | GREEN = '\033[92m'
8 | BLUE = '\033[94m'
9 | CYAN = '\033[96m'
10 | WHITE = '\033[97m'
11 | YELLOW = '\033[93m'
12 | MAGENTA = '\033[95m'
13 | GREY = '\033[90m'
14 | BLACK = '\033[90m'
15 | BRIGHT_BLACK = '\033[90m'
16 | BRIGHT_RED = '\033[91m'
17 | BRIGHT_GREEN = '\033[92m'
18 | BRIGHT_YELLOW = '\033[93m'
19 | BRIGHT_BLUE = '\033[94m'
20 | BRIGHT_MAGENTA = '\033[95m'
21 | BRIGHT_CYAN = '\033[96m'
22 | BRIGHT_WHITE = '\033[97m'
23 | DEFAULT = '\033[99m'
24 |
--------------------------------------------------------------------------------
/ui/noogle/public/opensearch.xml:
--------------------------------------------------------------------------------
1 |
3 | Noogle
4 | Search the Nostr
5 |
6 |
7 | https://noogle.lol/favicon.ico
8 |
9 | http://my-site/search
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/ui/noogle/src/assets/main.css:
--------------------------------------------------------------------------------
1 | @import './base.css';
2 |
3 | #app {
4 | max-width: 1280px;
5 | margin: 0 auto;
6 | padding: 2rem;
7 | font-weight: normal;
8 | }
9 |
10 | a,
11 | .green {
12 | text-decoration: none;
13 | color: hsla(160, 100%, 37%, 1);
14 | transition: 0.4s;
15 | padding: 3px;
16 | }
17 |
18 | @media (hover: hover) {
19 | a:hover {
20 | background-color: hsla(160, 100%, 37%, 0.2);
21 | }
22 | }
23 |
24 | @media (min-width: 1024px) {
25 | body {
26 | display: flex;
27 | place-items: center;
28 | }
29 |
30 | #app {
31 | display: grid;
32 | grid-template-columns: 1fr 1fr;
33 | padding: 0 2rem;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablevideodiffusion/stablevideodiffusion.trainer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
6 |
7 |
8 |
9 |
11 |
12 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_ping.py:
--------------------------------------------------------------------------------
1 | # messages/send_ping.py
2 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
3 | from nostr_dvm.backends.mcp.messages.send_message import send_message
4 | from nostr_dvm.backends.mcp.messages.message_types.ping_message import PingMessage
5 |
6 | async def send_ping(
7 | read_stream: MemoryObjectReceiveStream,
8 | write_stream: MemoryObjectSendStream,
9 | ) -> bool:
10 | # create a ping message
11 | ping_msg = PingMessage()
12 |
13 | # send the message
14 | response = await send_message(
15 | read_stream=read_stream,
16 | write_stream=write_stream,
17 | message=ping_msg
18 | )
19 |
20 | # return the response
21 | return response is not None
22 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_prompts.py:
--------------------------------------------------------------------------------
1 | # messages/prompts.py
2 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
3 | from send_message import send_message
4 | from message_types.prompts_messages import PromptsListMessage
5 |
6 | async def send_prompts_list(
7 | read_stream: MemoryObjectReceiveStream,
8 | write_stream: MemoryObjectSendStream,
9 | ) -> list:
10 | """Send a 'prompts/list' message and return the list of prompts."""
11 | message = PromptsListMessage()
12 |
13 | # send the message
14 | response = await send_message(
15 | read_stream=read_stream,
16 | write_stream=write_stream,
17 | message=message,
18 | )
19 |
20 | # return the result
21 | return response.get("result", [])
22 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_tools_list.py:
--------------------------------------------------------------------------------
1 | # mcpcli/messages/tools.py
2 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
3 | from nostr_dvm.backends.mcp.messages.send_message import send_message
4 | from nostr_dvm.backends.mcp.messages.message_types.tools_messages import ToolsListMessage
5 |
6 | async def send_tools_list(
7 | read_stream: MemoryObjectReceiveStream,
8 | write_stream: MemoryObjectSendStream,
9 | ) -> list:
10 | # create the tools list message
11 | message = ToolsListMessage()
12 |
13 | # send the message
14 | response = await send_message(
15 | read_stream=read_stream,
16 | write_stream=write_stream,
17 | message=message,
18 | )
19 |
20 | # get the response
21 | return response.get("result", [])
22 |
--------------------------------------------------------------------------------
/tests/upload_hoster.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from pathlib import Path
4 |
5 | import dotenv
6 | from nostr_sdk import Keys
7 | from nostr_dvm.utils.blossom_utils import upload_blossom
8 | from nostr_dvm.utils.output_utils import upload_media_to_hoster
9 |
10 | if __name__ == '__main__':
11 | env_path = Path('.env')
12 | if env_path.is_file():
13 | print(f'loading environment from {env_path.resolve()}')
14 | dotenv.load_dotenv(env_path, verbose=True, override=True)
15 | else:
16 | raise FileNotFoundError(f'.env file not found at {env_path} ')
17 |
18 | pkey = os.getenv("DVM_PRIVATE_KEY_BOT")
19 |
20 |
21 | #asyncio.run(upload_media_to_hoster("tests/image.png", pkey, True))
22 | asyncio.run(upload_blossom("tests/cat4.png", pkey, "https://blossom.primal.net"))
23 |
24 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_resources.py:
--------------------------------------------------------------------------------
1 | # messages/send_resources.py
2 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
3 | from send_message import send_message
4 | from message_types.resources_messages import ResourcesListMessage
5 |
6 | async def send_resources_list(
7 | read_stream: MemoryObjectReceiveStream,
8 | write_stream: MemoryObjectSendStream,
9 | ) -> list:
10 | """Send a 'resources/list' message and return the list of resources."""
11 | # create the message
12 | message = ResourcesListMessage()
13 |
14 | # send the message
15 | response = await send_message(
16 | read_stream=read_stream,
17 | write_stream=write_stream,
18 | message=message,
19 | )
20 |
21 | # return the result
22 | return response.get("result", [])
23 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/whisperx/whisperx_transcript.trainer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
10 |
11 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/image_upscale/image_upscale_realesrgan.trainer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
10 |
11 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/incrementing_id_message.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/incrementing_id_message.py
2 | from typing import ClassVar
3 | from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
4 |
5 | class IncrementingIDMessage(JSONRPCMessage):
6 | counter: ClassVar[int] = 0
7 |
8 | @classmethod
9 | def load_counter(cls, value: int):
10 | cls.counter = value
11 |
12 | @classmethod
13 | def save_counter(cls) -> int:
14 | return cls.counter
15 |
16 | def __init__(self, prefix: str, method: str, start_id: int = None, **kwargs):
17 | if start_id is not None:
18 | type(self).counter = start_id
19 | else:
20 | type(self).counter += 1
21 |
22 | message_id = f"{prefix}-{type(self).counter}"
23 | super().__init__(method=method, id=message_id, **kwargs)
24 |
--------------------------------------------------------------------------------
/ui/noogle/src/app.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | :root {
6 | color-scheme: light;
7 | color: rgba(255, 255, 255, 0.87);
8 | background-color: #242424;
9 | }
10 |
11 | @media (prefers-color-scheme: light) {
12 | :root {
13 | color: #213547;
14 | background-color: #ffffff;
15 | }
16 | }
17 |
18 | a,
19 | .green {
20 | text-decoration: none;
21 | color: hsla(160, 100%, 37%, 1);
22 | transition: 0.4s;
23 | padding: 3px;
24 | }
25 |
26 | .purple {
27 | @apply text-nostr hover:text-orange-400;
28 | text-decoration: none;
29 | transition: 0.4s;
30 | padding: 3px;
31 | }
32 |
33 | .white {
34 | @apply text-white;
35 | text-decoration: none;
36 | transition: 0.4s;
37 | padding: 3px;
38 | }
39 |
40 | .menu {
41 | color: white;
42 | @apply btn bg-transparent border-transparent tracking-wide;
43 |
44 | }
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/nostr_dvm/framework.py:
--------------------------------------------------------------------------------
1 | import os
2 | import signal
3 | import time
4 | from nostr_dvm.utils.print_utils import bcolors
5 |
6 |
7 | class DVMFramework:
8 | dvms = []
9 |
10 | def __init__(self):
11 | self.dvms = []
12 |
13 |
14 | def add(self, dvm):
15 | self.dvms.append(dvm)
16 |
17 | def run(self):
18 | for dvm in self.dvms:
19 | dvm.run()
20 |
21 | try:
22 | while True:
23 | time.sleep(0.1)
24 | except KeyboardInterrupt:
25 | for dvm in self.dvms:
26 | if dvm.dvm_config.NIP89 is not None:
27 | print(bcolors.CYAN + "Shuting down " + dvm.dvm_config.NIP89.NAME + bcolors.ENDC)
28 | dvm.join()
29 | print(bcolors.GREEN +"All DVMs shut down." + bcolors.ENDC)
30 | os.kill(os.getpid(), signal.SIGKILL)
31 | exit(1)
32 |
33 | def get_dvms(self):
34 | return self.dvms
35 |
36 |
--------------------------------------------------------------------------------
/ui/noogle/src/App.vue:
--------------------------------------------------------------------------------
1 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
47 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/icons/IconTooling.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
13 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_call_tool.py:
--------------------------------------------------------------------------------
1 | # mcpcli/messages/tools.py
2 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
3 | from nostr_dvm.backends.mcp.messages.send_message import send_message
4 | from nostr_dvm.backends.mcp.messages.message_types.tools_messages import CallToolMessage
5 |
6 | async def send_call_tool(
7 | tool_name: str,
8 | arguments: dict,
9 | read_stream: MemoryObjectReceiveStream,
10 | write_stream: MemoryObjectSendStream,
11 | ) -> dict:
12 | # create the message
13 | message = CallToolMessage(tool_name=tool_name, arguments=arguments)
14 |
15 | try:
16 | # send the message
17 | response = await send_message(
18 | read_stream=read_stream,
19 | write_stream=write_stream,
20 | message=message,
21 | )
22 |
23 | # get the result
24 | return response.get("result", {})
25 | except Exception as e:
26 | return {"isError": True, "error": str(e)}
27 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/environment.py:
--------------------------------------------------------------------------------
1 | # environment.py
2 | import os
3 | import sys
4 |
5 | # Default environment variables to inherit
6 | DEFAULT_INHERITED_ENV_VARS = (
7 | ["HOME", "LOGNAME", "PATH", "SHELL", "TERM", "USER"]
8 | if sys.platform != "win32"
9 | else [
10 | "APPDATA",
11 | "HOMEDRIVE",
12 | "HOMEPATH",
13 | "LOCALAPPDATA",
14 | "PATH",
15 | "PROCESSOR_ARCHITECTURE",
16 | "SYSTEMDRIVE",
17 | "SYSTEMROOT",
18 | "TEMP",
19 | "USERNAME",
20 | "USERPROFILE",
21 | ]
22 | )
23 |
24 |
25 | def get_default_environment() -> dict[str, str]:
26 | """
27 | Retrieve a dictionary of default environment variables to inherit.
28 | """
29 |
30 | # get the current environment
31 | env = {
32 | key: value
33 | for key in DEFAULT_INHERITED_ENV_VARS
34 | if (value := os.environ.get(key)) and not value.startswith("()")
35 | }
36 |
37 | # return the dictionary
38 | return env
39 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/tools_messages.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/tools_messages.py
2 | from nostr_dvm.backends.mcp.messages.message_types.incrementing_id_message import IncrementingIDMessage
3 | from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
4 |
5 | class ToolsListMessage(IncrementingIDMessage):
6 | def __init__(self, start_id: int = None, **kwargs):
7 | super().__init__(prefix="tools-list", method="tools/list", start_id=start_id, **kwargs)
8 |
9 | class CallToolMessage(IncrementingIDMessage):
10 | def __init__(self, tool_name: str, arguments: dict, start_id: int = None, **kwargs):
11 | super().__init__(prefix="tools-call", method="tools/call", start_id=start_id, params={"name": tool_name, "arguments": arguments}, **kwargs)
12 |
13 | class ToolsListChangedMessage(JSONRPCMessage):
14 | def __init__(self, **kwargs):
15 | # A notification has no 'id' field.
16 | super().__init__(method="notifications/tools/list_changed", id=None, **kwargs)
17 |
--------------------------------------------------------------------------------
/ui/noogle/src/router/index.js:
--------------------------------------------------------------------------------
1 | import {createRouter, createWebHistory} from "vue-router";
2 |
3 |
4 | const routes = [
5 | {path: "/", component: () => import("@/components/Home.vue")},
6 | {path: "/about", component: () => import("@/components/AboutPage.vue")},
7 | {path: "/donate", component: () => import("@/components/Donate.vue")},
8 | {path: "/nip89", component: () => import("@/components/Nip89view.vue")},
9 | {path: "/image", component: () => import("@/components/Image.vue")},
10 | {path: "/filter", component: () => import("@/components/FilterGeneration.vue")},
11 | {path: "/ai", component: () => import("@/components/ChatGeneration.vue")},
12 |
13 | {path: "/discover", component: () => import("@/components/RecommendationGeneration.vue")},
14 | {path: "/article/:id", component: () => import("@/components/Home.vue")},
15 | {path: '/:pathMatch(.*)*', component: () => import("@/components/Home.vue")},
16 | ];
17 |
18 | const router = createRouter({
19 | history: createWebHistory(),
20 | routes,
21 | });
22 |
23 | export default router;
--------------------------------------------------------------------------------
/ui/noogle/src/components/icons/IconCommunity.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/dvm.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/examples/tts_dvm/.env_example:
--------------------------------------------------------------------------------
1 | #Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
2 | LNBITS_ADMIN_KEY = ""
3 | LNBITS_WALLET_ID = ""
4 | LNBITS_HOST = "https://lnbits.com" #Use your own/a trusted instance ideally.
5 | # In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
6 | # We will use the api to create and manage zapable lightning addresses
7 | NOSTDRESS_DOMAIN = "nostrdvm.com"
8 |
9 |
10 | # We will automatically create dtags and private keys based on the identifier variable in main.
11 | # If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
12 | # The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
13 |
14 | # We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
15 | # Make sure you backup this file to keep access to your wallets
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023-2024 believethehype
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/examples/unleashed_dvm/.env_example:
--------------------------------------------------------------------------------
1 | #Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
2 | LNBITS_ADMIN_KEY = ""
3 | LNBITS_WALLET_ID = ""
4 | LNBITS_HOST = "https://lnbits.com" #Use your own/a trusted instance ideally.
5 | # In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
6 | # We will use the api to create and manage zapable lightning addresses
7 | NOSTDRESS_DOMAIN = "nostrdvm.com"
8 |
9 |
10 | UNLEASHED_API_KEY = ""
11 |
12 |
13 | # We will automatically create dtags and private keys based on the identifier variable in main.
14 | # If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
15 | # The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
16 |
17 | # We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
18 | # Make sure you backup this file to keep access to your wallets
--------------------------------------------------------------------------------
/nostr_dvm/utils/heartbeat.py:
--------------------------------------------------------------------------------
1 | from nostr_dvm.utils.definitions import EventDefinitions
2 | from nostr_dvm.utils.nostr_utils import send_event
3 | from nostr_dvm.utils.print_utils import bcolors
4 | from nostr_sdk import Tag, Keys, EventBuilder, Timestamp
5 |
6 |
7 | async def beat(dvm_config, client, frequency=300):
8 | status_tag = Tag.parse(["status", "My heart keeps beating like a hammer"])
9 | d_tag = Tag.parse(["d", dvm_config.NIP89.DTAG])
10 | expiration_tag = Tag.parse(["expiration", str(Timestamp.now().as_secs() + frequency)])
11 |
12 | tags = [status_tag, d_tag, expiration_tag]
13 | keys = Keys.parse(dvm_config.NIP89.PK)
14 | content = "Alive and kicking"
15 |
16 | event = EventBuilder(EventDefinitions.KIND_HEARTBEAT, content).tags(tags).sign_with_keys(keys)
17 |
18 | response_status = await send_event(event, client=client, dvm_config=dvm_config, broadcast=True)
19 |
20 |
21 | print(bcolors.BRIGHT_RED + "[" + dvm_config.NIP89.NAME + "] Sent heartbeat for " + dvm_config.NIP89.NAME + ". Success: " + str(response_status.success) + " Failed: " + str(response_status.failed) + " EventID: "
22 | + response_status.id.to_hex() + " / " + response_status.id.to_bech32())
23 |
--------------------------------------------------------------------------------
/ui/noogle/src/main.js:
--------------------------------------------------------------------------------
1 | //import './assets/main.css'
2 | import {createApp} from 'vue'
3 |
4 | import App from './App.vue'
5 | import store from './store';
6 | import "./app.css"
7 |
8 | import 'vue3-easy-data-table/dist/style.css';
9 | import router from './router'
10 | import Vue3EasyDataTable from 'vue3-easy-data-table';
11 |
12 | import VueDatePicker from '@vuepic/vue-datepicker';
13 | import '@vuepic/vue-datepicker/dist/main.css'
14 |
15 |
16 | //This is all for notifications
17 | import VueNotifications from "vue-notifications";
18 | import miniToastr from 'mini-toastr'
19 | import {registerSW} from 'virtual:pwa-register'
20 |
21 | registerSW({immediate: true})
22 |
23 | miniToastr.init()
24 |
25 |
26 | function toast({title, message, type, timeout, cb}) {
27 | return miniToastr[type](message, title, timeout, cb)
28 | }
29 |
30 | const options = {
31 | success: toast,
32 | error: toast,
33 | info: toast,
34 | warn: toast
35 | }
36 | //This is all for notifications end
37 |
38 | createApp(App)
39 | .use(VueNotifications, options)
40 | .use(store)
41 | .use(router)
42 |
43 | .component('EasyDataTable', Vue3EasyDataTable)
44 | .component('VueDatePicker', VueDatePicker)
45 | .mount('#app')
46 |
--------------------------------------------------------------------------------
/tests/nwc.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | from pathlib import Path
4 |
5 | import dotenv
6 | from nostr_sdk import Keys, PublicKey
7 |
8 | from nostr_dvm.utils import dvmconfig
9 | from nostr_dvm.utils.nwc_tools import nwc_zap
10 | from nostr_dvm.utils.zap_utils import zaprequest
11 |
12 |
13 | async def playground():
14 |
15 | connectionstr = os.getenv("TEST_NWC")
16 | keys = Keys.parse(os.getenv("TEST_USER"))
17 | bolt11 = zaprequest("bot@nostrdvm.com", 5, "test", None, PublicKey.parse("npub1cc79kn3phxc7c6mn45zynf4gtz0khkz59j4anew7dtj8fv50aqrqlth2hf"), keys, dvmconfig.DVMConfig.RELAY_LIST, zaptype="private")
18 | print(bolt11)
19 | result = await nwc_zap(connectionstr, bolt11, keys, externalrelay=None)
20 | print(result)
21 |
22 |
23 | if __name__ == '__main__':
24 | env_path = Path('.env')
25 | if not env_path.is_file():
26 | with open('.env', 'w') as f:
27 | print("Writing new .env file")
28 | f.write('')
29 | if env_path.is_file():
30 | print(f'loading environment from {env_path.resolve()}')
31 | dotenv.load_dotenv(env_path, verbose=True, override=True)
32 | else:
33 | raise FileNotFoundError(f'.env file not found at {env_path} ')
34 | asyncio.run(playground())
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package using Twine when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | deploy:
20 |
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - uses: actions/checkout@v3
25 | - name: Set up Python
26 | uses: actions/setup-python@v3
27 | with:
28 | python-version: '3.x'
29 | - name: Install dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install build
33 | - name: Build package
34 | run: python -m build
35 | - name: Publish package
36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
37 | with:
38 | user: __token__
39 | password: ${{ secrets.PYPI_API_TOKEN }}
40 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/icons/IconDocumentation.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/prompts_models.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List, Dict, Any, Union, Literal
2 | from pydantic import BaseModel, Field
3 |
4 | # Content Types
5 | class TextContent(BaseModel):
6 | type: Literal["text"] = "text"
7 | text: str
8 |
9 | class ImageContent(BaseModel):
10 | type: Literal["image"] = "image"
11 | data: str # base64-encoded image data
12 | mimeType: str
13 |
14 | class ResourceData(BaseModel):
15 | uri: str
16 | mimeType: str
17 | text: Optional[str] = None
18 | blob: Optional[str] = None # if binary data is included, base64-encoded
19 |
20 | class ResourceContent(BaseModel):
21 | type: Literal["resource"] = "resource"
22 | resource: ResourceData
23 |
24 | # Union of all content types
25 | MessageContent = Union[TextContent, ImageContent, ResourceContent]
26 |
27 | class PromptMessage(BaseModel):
28 | role: str
29 | content: MessageContent
30 |
31 | # Prompt Definition
32 | class Prompt(BaseModel):
33 | name: str
34 | description: Optional[str] = None
35 | arguments: Optional[List[str]] = None
36 |
37 | class PromptsGetResult(BaseModel):
38 | description: Optional[str]
39 | messages: List[PromptMessage]
40 |
41 | class PromptsGetParams(BaseModel):
42 | name: str
43 | arguments: Dict[str, Any] = {}
44 |
--------------------------------------------------------------------------------
/tests/db.py:
--------------------------------------------------------------------------------
1 | from threading import Thread
2 |
3 | from nostr_sdk import Keys, Filter, ClientBuilder, NostrDatabase, SyncOptions, init_logger, LogLevel
4 |
5 | init_logger(LogLevel.INFO)
6 | keys = Keys.parse("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85")
7 | print(keys.public_key().to_bech32())
8 |
9 |
10 |
11 | async def reconcile_db():
12 | # Create/open SQLite database
13 | database = NostrDatabase.lmdb("nostr.db")
14 |
15 | # NOT AVAILABLE ON WINDOWS AT THE MOMENT!
16 | # Create/open nostrdb database
17 | # database = NostrDatabase.ndb("ndb")
18 |
19 | client = ClientBuilder().database(database).build()
20 |
21 | await client.add_relay("wss://relay.damus.io")
22 | await client.add_relay("wss://atl.purplerelay.com")
23 | await client.connect()
24 |
25 | # Negentropy reconciliation
26 | f = Filter().author(keys.public_key())
27 | opts = SyncOptions()
28 | await client.sync(f, opts)
29 |
30 | await do_some_work()
31 |
32 | async def do_some_work():
33 | database = NostrDatabase.lmdb("nostr.db")
34 | f = Filter().author(keys.public_key()).limit(10)
35 | events = await database.query(f)
36 |
37 | for event in events.to_vec():
38 | print(event.as_json())
39 |
40 | nostr_dvm_thread = Thread(target=reconcile_db)
41 | nostr_dvm_thread.start()
42 |
--------------------------------------------------------------------------------
/examples/ollama_dvm/.env_example:
--------------------------------------------------------------------------------
1 | #Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
2 | LNBITS_ADMIN_KEY = ""
3 | LNBITS_WALLET_ID = ""
4 | LNBITS_HOST = "https://lnbits.com" #Use your own/a trusted instance ideally.
5 | # In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
6 | # We will use the api to create and manage zapable lightning addresses
7 | NOSTDRESS_DOMAIN = "nostrdvm.com"
8 |
9 | #Backend Specific Options for tasks that require them. A DVM needing these should only be started if these are set.
10 | OPENAI_API_KEY = "" # Enter your OpenAI API Key to use DVMs with OpenAI services
11 |
12 | # We will automatically create dtags and private keys based on the identifier variable in main.
13 | # If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
14 | # The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
15 |
16 | # We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
17 | # Make sure you backup this file to keep access to your wallets
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/prompts_messages.py:
--------------------------------------------------------------------------------
1 | # messages/message_types/prompts_messages.py
2 | from typing import Any, Dict, Optional
3 | from mcpcli.messages.message_types.incrementing_id_message import IncrementingIDMessage
4 | from mcpcli.messages.message_types.json_rpc_message import JSONRPCMessage
5 | from mcpcli.messages.message_types.prompts_models import PromptsGetParams
6 |
7 | class PromptsListMessage(IncrementingIDMessage):
8 | def __init__(self, start_id: int = None, **kwargs):
9 | super().__init__(prefix="prompts-list", method="prompts/list", start_id=start_id, **kwargs)
10 |
11 |
12 | class PromptsGetMessage(IncrementingIDMessage):
13 | def __init__(self, name: str, arguments: Optional[Dict[str, Any]] = None, start_id: int = None, **kwargs):
14 | # Validate params using PromptsGetParams
15 | params_model = PromptsGetParams(name=name, arguments=arguments or {})
16 | super().__init__(
17 | prefix="prompts-get",
18 | method="prompts/get",
19 | start_id=start_id,
20 | params=params_model.model_dump(),
21 | **kwargs
22 | )
23 |
24 |
25 | class PromptsListChangedMessage(JSONRPCMessage):
26 | def __init__(self, **kwargs):
27 | super().__init__(method="notifications/prompts/list_changed", id=None, **kwargs)
28 |
--------------------------------------------------------------------------------
/tests/tor_test.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from nostr_sdk import Keys, ClientBuilder, Options, EventBuilder, Connection, ConnectionTarget, init_logger, LogLevel, NostrSigner
3 |
4 |
5 | async def main():
6 | init_logger(LogLevel.INFO)
7 |
8 | keys = Keys.generate()
9 | print(keys.public_key().to_bech32())
10 |
11 | # Configure client to use embedded tor for `.onion` relays
12 | connection = Connection().embedded_tor().target(ConnectionTarget.ONION)
13 | opts = Options().connection(connection)
14 | signer = NostrSigner.keys(keys)
15 | client = ClientBuilder().signer(signer).opts(opts).build()
16 |
17 | await client.add_relay("wss://relay.damus.io")
18 | await client.add_relay("ws://oxtrdevav64z64yb7x6rjg4ntzqjhedm5b5zjqulugknhzr46ny2qbad.onion")
19 | await client.add_relay("ws://2jsnlhfnelig5acq6iacydmzdbdmg7xwunm4xl6qwbvzacw4lwrjmlyd.onion")
20 | await client.connect()
21 |
22 | event = EventBuilder.text_note("Hello from rust-nostr Python bindings!")
23 | res = await client.send_event_builder(event)
24 | print("Event sent:")
25 | print(f" hex: {res.id.to_hex()}")
26 | print(f" bech32: {res.id.to_bech32()}")
27 | print(f" Successfully sent to: {res.output.success}")
28 | print(f" Failed to send to: {res.output.failed}")
29 |
30 |
31 | if __name__ == '__main__':
32 | asyncio.run(main())
--------------------------------------------------------------------------------
/ui/noogle/vite.config.ts:
--------------------------------------------------------------------------------
1 | import {fileURLToPath, URL} from 'node:url'
2 | import {defineConfig} from 'vite'
3 | import vue from '@vitejs/plugin-vue'
4 | import {VitePWA} from 'vite-plugin-pwa'
5 | // https://vitejs.dev/config/
6 | export default defineConfig({
7 | plugins: [
8 | vue(),
9 | VitePWA({
10 |
11 | registerType: 'autoUpdate',
12 | manifest: {
13 | name: 'Noogle',
14 | short_name: 'Noogle',
15 | description: 'Other Stuff on Nostr',
16 | theme_color: '#ffffff',
17 | icons: [
18 | {
19 | src: 'pwa-192x192.png',
20 | sizes: '192x192',
21 | type: 'image/png'
22 | },
23 | {
24 | src: 'pwa-512x512.png',
25 | sizes: '512x512',
26 | type: 'image/png'
27 | }
28 | ]
29 | },
30 | devOptions: {
31 | enabled: true,
32 | },
33 | workbox: {
34 | maximumFileSizeToCacheInBytes: 8000000,
35 | },
36 | })
37 | ],
38 | resolve: {
39 | alias: {
40 | '@': fileURLToPath(new URL('./src', import.meta.url))
41 | }
42 | }
43 | })
44 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/readme.md:
--------------------------------------------------------------------------------
1 | # Stable Diffusion XL
2 |
3 | This modules provides image generation based on prompts
4 |
5 | * https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0
6 |
7 | ## Options
8 |
9 | - `model`: string, identifier of the model to choose
10 | - `stabilityai/stable-diffusion-xl-base-1.0`: Default Stable Diffusion XL model
11 |
12 |
13 | - `ratio`: Ratio of the output image
14 | - `1-1` ,`4-3`, `16-9`, `16-10`, `3-4`,`9-16`,`10-16`
15 |
16 | - `high_noise_frac`: Denoising factor
17 |
18 | - `n_steps`: how many iterations should be performed
19 |
20 | ## Example payload
21 |
22 | ```python
23 | payload = {
24 | 'trainerFilePath': 'modules\\stablediffusionxl\\stablediffusionxl.trainer',
25 | 'server': '127.0.0.1',
26 | 'data' = '[{"id":"input_prompt","type":"input","src":"user:text","prompt":"' + prompt +'","active":"True"},{"id":"negative_prompt","type":"input","src":"user:text","prompt":"' + negative_prompt +'","active":"True"},{"id":"output_image","type":"output","src":"file:image","uri":"' + outputfile+'","active":"True"}]'
27 | 'optStr': 'model=stabilityai/stable-diffusion-xl-base-1.0;ratio=4-3'
28 | }
29 |
30 | import requests
31 |
32 | url = 'http://127.0.0.1:53770/predict'
33 | headers = {'Content-type': 'application/x-www-form-urlencoded'}
34 | requests.post(url, headers=headers, data=payload)
35 | ```
36 |
--------------------------------------------------------------------------------
/tests/mcp/mcp-servers/nostr-notes/server.py:
--------------------------------------------------------------------------------
1 | import re
2 | from datetime import timedelta
3 |
4 | from mcp.server.fastmcp import FastMCP
5 |
6 | mcp = FastMCP("Nostr", description="Get notes from Nostr for a given key", dependencies=["nostr_sdk==0.39.0"])
7 |
8 | @mcp.tool()
9 | async def get_nostr_notes(npub: str, limit: int) -> str:
10 | from nostr_sdk import Client, Keys, NostrSigner, Filter, Kind, PublicKey
11 |
12 | keys = Keys.parse("e318cb3e6ac163814dd297c2c7d745faacfbc2a826eb4f6d6c81430426a83c2b")
13 | client = Client(NostrSigner.keys(keys))
14 |
15 | relay_list = ["wss://relay.damus.io",
16 | "wss://nostr.oxtr.dev",
17 | "wss://relay.primal.net",
18 | ]
19 |
20 | for relay in relay_list:
21 | await client.add_relay(relay)
22 |
23 |
24 | await client.connect()
25 |
26 | f = Filter().kind(Kind(1)).author(PublicKey.parse(npub)).limit(limit)
27 | events = await client.fetch_events(f, timedelta(5))
28 |
29 | index = 1
30 | notes = ""
31 | for event in events.to_vec():
32 | try:
33 | pattern = r"[^a-zA-Z0-9\s.!?:,-/]"
34 | cleaned_string = re.sub(pattern, "", event.content())
35 | notes = notes + str(index) + ". " + cleaned_string + "\n"
36 | index += 1
37 | except Exception as e:
38 | print(e)
39 |
40 | return notes
41 |
42 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/stable_diffusion/config.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | from dataclasses import dataclass
4 | from typing import Tuple
5 |
6 |
7 | @dataclass
8 | class AutoencoderConfig:
9 | in_channels: int = 3
10 | out_channels: int = 3
11 | latent_channels_out: int = 8
12 | latent_channels_in: int = 4
13 | block_out_channels: Tuple[int] = (128, 256, 512, 512)
14 | layers_per_block: int = 2
15 | norm_num_groups: int = 32
16 | scaling_factor: float = 0.18215
17 |
18 |
19 | @dataclass
20 | class CLIPTextModelConfig:
21 | num_layers: int = 23
22 | model_dims: int = 1024
23 | num_heads: int = 16
24 | max_length: int = 77
25 | vocab_size: int = 49408
26 |
27 |
28 | @dataclass
29 | class UNetConfig:
30 | in_channels: int = 4
31 | out_channels: int = 4
32 | conv_in_kernel: int = 3
33 | conv_out_kernel: int = 3
34 | block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
35 | layers_per_block: Tuple[int] = (2, 2, 2, 2)
36 | mid_block_layers: int = 2
37 | transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)
38 | num_attention_heads: Tuple[int] = (5, 10, 20, 20)
39 | cross_attention_dim: Tuple[int] = (1024,) * 4
40 | norm_num_groups: int = 32
41 |
42 |
43 | @dataclass
44 | class DiffusionConfig:
45 | beta_schedule: str = "scaled_linear"
46 | beta_start: float = 0.00085
47 | beta_end: float = 0.012
48 | num_train_steps: int = 1000
49 |
--------------------------------------------------------------------------------
/ui/noogle/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "vue",
3 | "version": "0.0.0",
4 | "private": true,
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview",
10 | "build-only": "vite build",
11 | "type-check": "vue-tsc --build --force"
12 | },
13 | "dependencies": {
14 | "@getalby/sdk": "^3.4.0",
15 | "@rust-nostr/nostr-sdk": "^0.14.1",
16 | "@vuepic/vue-datepicker": "^7.4.1",
17 | "@vueuse/core": "^10.7.2",
18 | "bech32": "^2.0.0",
19 | "bootstrap": "^5.3.2",
20 | "daisyui": "^4.6.0",
21 | "mini-toastr": "^0.8.1",
22 | "nostr-login": "^1.6.7",
23 | "nostr-tools": "^2.4.0",
24 | "vue": "^3.4.15",
25 | "vue-notifications": "^1.0.2",
26 | "vue3-easy-data-table": "^1.5.47",
27 | "vuex": "^4.1.0",
28 | "webln": "^0.3.2"
29 | },
30 | "devDependencies": {
31 | "@tsconfig/node20": "^20.1.2",
32 | "@types/node": "^20.11.10",
33 | "@vitejs/plugin-vue": "^4.5.2",
34 | "@vue/tsconfig": "^0.5.1",
35 | "autoprefixer": "^10.4.17",
36 | "postcss": "^8.4.33",
37 | "sass": "^1.70.0",
38 | "tailwindcss": "^3.4.1",
39 | "typescript": "~5.3.0",
40 | "vite": "^5.0.10",
41 | "vite-plugin-pwa": "^0.20.5",
42 | "vue-router": "^4.2.5",
43 | "vue-tsc": "^2.0.29",
44 | "workbox-window": "^7.1.0"
45 | },
46 | "optionalDependencies": {
47 | "@rollup/rollup-linux-x64-gnu": "4.6.1"
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/ui/noogle/src/assets/nostr-purple.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/Newnote.vue:
--------------------------------------------------------------------------------
1 |
16 |
17 |
18 |
19 |
20 |
21 |
24 |
25 | default content
26 |
27 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/examples/ollama_dvm/README.md:
--------------------------------------------------------------------------------
1 | # NostrAI: Nostr NIP90 Data Vending Machine Framework Example
2 |
3 | Projects in this folder contain ready-to-use DVMs. To tun the DVM following the next steps:
4 |
5 | ## To get started:
6 | - Install Python 3.10
7 |
8 |
9 | Create a new venv in this directory by opening the terminal here, or navigate to this directory and type: `"python -m venv venv"`
10 | - Place .env file (based on .env_example) in this folder.
11 | - Recommended but optional:
12 | - Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms) Open the .env file and enter this info to `LNBITS_ADMIN_KEY`, `LNBITS_WALLET_ID`, `LNBITS_HOST`.
13 | - If you are running an own instance of `Nostdress` enter `NOSTDRESS_DOMAIN` or use the default one.
14 | - Activate the venv with
15 | - MacOS/Linux: source ./venv/bin/activate
16 | - Windows: .\venv\Scripts\activate
17 | - Type: `pip install nostr-dvm`
18 | - Run `python3 main.py` (or python main.py)
19 | - The framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
20 | - Check the .env file if these values look correct.
21 | - Check the `main.py` file. You can update the image/description/name of your DVM before announcing it.
22 | - You can then in main.py set `admin_config.REBROADCAST_NIP89` and
23 | `admin_config.UPDATE_PROFILE` to `True` to announce the NIP89 info and update the npubs profile automatically.
24 | - After this was successful you can set these back to False until the next time you want to update the NIP89 or profile.
25 |
26 | You are now running your own DVM.
--------------------------------------------------------------------------------
/examples/tts_dvm/README.md:
--------------------------------------------------------------------------------
1 | # NostrAI: Nostr NIP90 Data Vending Machine Framework Example
2 |
3 | Projects in this folder contain ready-to-use DVMs. To tun the DVM following the next steps:
4 |
5 | ## To get started:
6 | - Install Python (tested on 3.10/3.11)
7 |
8 |
9 | Create a new venv in this directory by opening the terminal here, or navigate to this directory and type: `"python -m venv venv"`
10 | - Place .env file (based on .env_example) in this folder.
11 | - Recommended but optional:
12 | - Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms) Open the .env file and enter this info to `LNBITS_ADMIN_KEY`, `LNBITS_WALLET_ID`, `LNBITS_HOST`.
13 | - If you are running an own instance of `Nostdress` enter `NOSTDRESS_DOMAIN` or use the default one.
14 | - Activate the venv with
15 | - MacOS/Linux: source ./venv/bin/activate
16 | - Windows: .\venv\Scripts\activate
17 | - Type: `pip install nostr-dvm`
18 | - Run `python3 main.py` (or python main.py)
19 | - The framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
20 | - Check the .env file if these values look correct.
21 | - Check the `main.py` file. You can update the image/description/name of your DVM before announcing it.
22 | - You can then in main.py set `admin_config.REBROADCAST_NIP89` and
23 | `admin_config.UPDATE_PROFILE` to `True` to announce the NIP89 info and update the npubs profile automatically.
24 | - After this was successful you can set these back to False until the next time you want to update the NIP89 or profile.
25 |
26 | You are now running your own DVM.
27 |
--------------------------------------------------------------------------------
/examples/unleashed_dvm/README.md:
--------------------------------------------------------------------------------
1 | # NostrAI: Nostr NIP90 Data Vending Machine Framework Example
2 |
3 | Projects in this folder contain ready-to-use DVMs. To tun the DVM following the next steps:
4 |
5 | ## To get started:
6 | - Install Python (tested on 3.10/3.11)
7 |
8 |
9 | Create a new venv in this directory by opening the terminal here, or navigate to this directory and type: `"python -m venv venv"`
10 | - Place .env file (based on .env_example) in this folder.
11 | - Recommended but optional:
12 | - Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms) Open the .env file and enter this info to `LNBITS_ADMIN_KEY`, `LNBITS_WALLET_ID`, `LNBITS_HOST`.
13 | - If you are running an own instance of `Nostdress` enter `NOSTDRESS_DOMAIN` or use the default one.
14 | - Activate the venv with
15 | - MacOS/Linux: source ./venv/bin/activate
16 | - Windows: .\venv\Scripts\activate
17 | - Type: `pip install nostr-dvm`
18 | - Run `python3 main.py` (or python main.py)
19 | - The framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
20 | - Check the .env file if these values look correct.
21 | - Check the `main.py` file. You can update the image/description/name of your DVM before announcing it.
22 | - You can then in main.py set `admin_config.REBROADCAST_NIP89` and
23 | `admin_config.UPDATE_PROFILE` to `True` to announce the NIP89 info and update the npubs profile automatically.
24 | - After this was successful you can set these back to False until the next time you want to update the NIP89 or profile.
25 |
26 | You are now running your own DVM.
27 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | VERSION = '1.1.3'
4 | DESCRIPTION = 'A framework to build and run Nostr NIP90 Data Vending Machines'
5 | LONG_DESCRIPTION = ('A framework to build and run Nostr NIP90 Data Vending Machines. See the github repository for more information')
6 |
7 | # Setting up
8 | setup(
9 | name="nostr-dvm",
10 | version=VERSION,
11 | author="Believethehype",
12 | author_email="believethehypeonnostr@proton.me",
13 | description=DESCRIPTION,
14 | long_description=LONG_DESCRIPTION,
15 | packages=find_packages(include=['nostr_dvm', 'nostr_dvm.*']),
16 |
17 | install_requires=["nostr-sdk==0.39.0",
18 | "bech32==1.2.0",
19 | "pycryptodome==3.20.0",
20 | "yt-dlp==2025.7.21",
21 | "python-dotenv==1.0.0",
22 | "emoji==2.12.1",
23 | "ffmpegio==0.9.1",
24 | "Pillow==10.1.0",
25 | "PyUpload==0.1.4",
26 | "pandas==2.2.2",
27 | "requests==2.32.3",
28 | "moviepy==2.0.0",
29 | "zipp==3.19.1",
30 | "urllib3==2.5.0",
31 | "networkx==3.3",
32 | "scipy==1.13.1",
33 | "typer==0.15.1",
34 | "beautifulsoup4==4.12.3"
35 | ],
36 | keywords=['nostr', 'nip90', 'dvm', 'data vending machine'],
37 | url="https://github.com/believethehype/nostrdvm",
38 | license="MIT",
39 | classifiers=[
40 | "Development Status :: 3 - Alpha",
41 | "Intended Audience :: Education",
42 | "Programming Language :: Python :: 3",
43 | ]
44 | )
--------------------------------------------------------------------------------
/tests/filter.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import dotenv
4 |
5 | from nostr_dvm.framework import DVMFramework
6 | from nostr_dvm.tasks import discovery_bot_farms
7 | from nostr_dvm.utils.admin_utils import AdminConfig
8 |
9 |
10 | def playground():
11 | framework = DVMFramework()
12 | # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast
13 | # their NIP89 announcement
14 | # You can create individual admins configs and hand them over when initializing the dvm,
15 | # for example to whilelist users or add to their balance.
16 | # If you use this global config, options will be set for all dvms that use it.
17 | admin_config = AdminConfig()
18 | admin_config.REBROADCAST_NIP89 = False
19 | admin_config.UPDATE_PROFILE = False
20 |
21 | #discovery_test_sub = discovery_censor_wot.build_example("Censorship", "discovery_censor", admin_config)
22 | #framework.add(discovery_test_sub)
23 |
24 | discovery_test_sub = discovery_bot_farms.build_example("Bot Hunter", "discovery_botfarms", admin_config)
25 | framework.add(discovery_test_sub)
26 |
27 | #discovery_test_sub = discovery_inactive_follows.build_example("Inactive Followings", "discovery_inactive", admin_config)
28 | #framework.add(discovery_test_sub)
29 |
30 | framework.run()
31 |
32 |
33 | if __name__ == '__main__':
34 | env_path = Path('.env')
35 | if not env_path.is_file():
36 | with open('.env', 'w') as f:
37 | print("Writing new .env file")
38 | f.write('')
39 | if env_path.is_file():
40 | print(f'loading environment from {env_path.resolve()}')
41 | dotenv.load_dotenv(env_path, verbose=True, override=True)
42 | else:
43 | raise FileNotFoundError(f'.env file not found at {env_path} ')
44 | playground()
45 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/connection_check.py:
--------------------------------------------------------------------------------
1 | import anyio
2 | import logging
3 | import sys
4 | from config import load_config
5 | from messages.send_initialize_message import send_initialize
6 | from messages.send_ping import send_ping
7 | from messages.send_tools_list import send_tools_list
8 | from messages.send_call_tool import send_call_tool
9 | from transport.stdio.stdio_client import stdio_client
10 |
11 | # Configure logging
12 | logging.basicConfig(
13 | level=logging.DEBUG,
14 | format='%(asctime)s - %(levelname)s - %(message)s',
15 | stream=sys.stderr
16 | )
17 |
18 | async def main():
19 | """Stripped-down script to initialize the server and send a ping."""
20 | # Configuration values
21 | config_path = "server_config.json"
22 | server_name = "nostrdvmmcp"
23 |
24 | # Load server configuration
25 | server_params = await load_config(config_path, server_name)
26 |
27 | # Establish stdio communication
28 | async with stdio_client(server_params) as (read_stream, write_stream):
29 | # Initialize the server
30 | init_result = await send_initialize(read_stream, write_stream)
31 |
32 | # check we got a result
33 | if not init_result:
34 | print("Server initialization failed")
35 | return
36 |
37 | # connected
38 | print(f"We're connected!!!")
39 |
40 | # Send a ping
41 | result = await send_ping(read_stream, write_stream)
42 | print("Ping successful" if result else "Ping failed")
43 |
44 | # get tools
45 | result = await send_tools_list(read_stream, write_stream)
46 | print(result)
47 |
48 | result = await send_call_tool("get-crypto-price", {"symbol":"BTC"}, read_stream, write_stream)
49 |
50 | print(result)
51 |
52 | # Run the script
53 | if __name__ == "__main__":
54 | anyio.run(main)
55 |
--------------------------------------------------------------------------------
/tests/ditto.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 |
4 | from nostr_sdk import NostrSigner, Keys, Client, Alphabet, SingleLetterTag, Filter, \
5 | PublicKey, init_logger, LogLevel, Tag
6 |
7 | from nostr_dvm.utils.definitions import relay_timeout_long
8 | from nostr_dvm.utils.nostr_utils import check_and_set_private_key
9 |
10 |
11 | async def main():
12 | init_logger(LogLevel.DEBUG)
13 | options = {
14 | "max_results": 200,
15 | "relay": "wss://gleasonator.dev/relay"
16 | }
17 |
18 | keys = Keys.parse(check_and_set_private_key("test_client"))
19 | cli = Client(NostrSigner.keys(keys))
20 |
21 | await cli.add_relay(options["relay"])
22 | await cli.connect()
23 |
24 | ltags = ["#e", "pub.ditto.trends"]
25 | itags = [str(SingleLetterTag.lowercase(Alphabet.E))]
26 | authors = [PublicKey.parse("db0e60d10b9555a39050c258d460c5c461f6d18f467aa9f62de1a728b8a891a4")]
27 | notes_filter = Filter().authors(authors).custom_tags(SingleLetterTag.lowercase(Alphabet.L), ltags)
28 |
29 | events_struct = await cli.fetch_events(notes_filter, relay_timeout_long)
30 | events = events_struct.to_vec()
31 |
32 |
33 | result_list = []
34 | if len(events) > 0:
35 | event = events[0]
36 | print(event)
37 | result_list = []
38 | for tag in event.tags().to_vec():
39 | print(tag.as_vec())
40 | if tag.as_vec()[0] == "e":
41 |
42 | e_tag = Tag.parse(["e", tag.as_vec()[1], tag.as_vec()[2]])
43 | result_list.append(e_tag.as_vec())
44 |
45 | else:
46 | print("Nothing found")
47 | # for event in events:
48 | # e_tag = Tag.parse(["e", event.id().to_hex()])
49 | return ""
50 |
51 | await cli.shutdown()
52 | print(json.dumps(result_list))
53 | return json.dumps(result_list)
54 |
55 |
56 | asyncio.run(main())
57 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/message_types/initialize_message.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 | from pydantic import BaseModel, Field
3 | from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
4 |
5 |
6 | class MCPClientCapabilities(BaseModel):
7 | roots: dict = Field(default_factory=lambda: {"listChanged": True})
8 | sampling: dict = Field(default_factory=dict)
9 |
10 |
11 | class MCPClientInfo(BaseModel):
12 | name: str = "PythonMCPClient"
13 | version: str = "1.0.0"
14 |
15 |
16 | class InitializeParams(BaseModel):
17 | protocolVersion: str
18 | capabilities: MCPClientCapabilities
19 | clientInfo: MCPClientInfo
20 |
21 |
22 | class ServerInfo(BaseModel):
23 | name: str
24 | version: str
25 |
26 |
27 | class ServerCapabilities(BaseModel):
28 | logging: dict = Field(default_factory=dict)
29 | prompts: Optional[dict] = None
30 | resources: Optional[dict] = None
31 | tools: Optional[dict] = None
32 |
33 |
34 | class InitializeResult(BaseModel):
35 | protocolVersion: str
36 | capabilities: ServerCapabilities
37 | serverInfo: ServerInfo
38 |
39 |
40 | class InitializeMessage(JSONRPCMessage):
41 | """
42 | A JSON-RPC 'initialize' message with default id and method.
43 | """
44 | def __init__(self, init_params: InitializeParams, **kwargs):
45 | super().__init__(
46 | id="init-1",
47 | method="initialize",
48 | params=init_params.model_dump(),
49 | **kwargs
50 | )
51 |
52 |
53 | class InitializedNotificationMessage(JSONRPCMessage):
54 | """
55 | A JSON-RPC notification message to notify the server that the client is initialized.
56 | """
57 | def __init__(self, **kwargs):
58 | super().__init__(
59 | method="notifications/initialized",
60 | params={},
61 | **kwargs
62 | )
63 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/scrapper/request_details.json:
--------------------------------------------------------------------------------
1 | {
2 | "features": {
3 | "responsive_web_graphql_exclude_directive_enabled": true,
4 | "verified_phone_label_enabled": false,
5 | "responsive_web_graphql_timeline_navigation_enabled": true,
6 | "responsive_web_graphql_skip_user_profile_image_extensions_enabled": false,
7 | "tweetypie_unmention_optimization_enabled": true,
8 | "vibe_api_enabled": false,
9 | "responsive_web_edit_tweet_api_enabled": false,
10 | "graphql_is_translatable_rweb_tweet_is_translatable_enabled": false,
11 | "view_counts_everywhere_api_enabled": true,
12 | "longform_notetweets_consumption_enabled": true,
13 | "tweet_awards_web_tipping_enabled": false,
14 | "freedom_of_speech_not_reach_fetch_enabled": false,
15 | "standardized_nudges_misinfo": false,
16 | "tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled": false,
17 | "interactive_text_enabled": false,
18 | "responsive_web_twitter_blue_verified_badge_is_enabled": true,
19 | "responsive_web_text_conversations_enabled": false,
20 | "longform_notetweets_richtext_consumption_enabled": false,
21 | "responsive_web_enhance_cards_enabled": false,
22 | "longform_notetweets_inline_media_enabled": true,
23 | "longform_notetweets_rich_text_read_enabled": true,
24 | "responsive_web_media_download_video_enabled": true,
25 | "responsive_web_twitter_article_tweet_consumption_enabled": true,
26 | "creator_subscriptions_tweet_preview_api_enabled": true
27 | },
28 | "variables": {
29 | "with_rux_injections": false,
30 | "includePromotedContent": true,
31 | "withCommunity": true,
32 | "withQuickPromoteEligibilityTweetFields": true,
33 | "withBirdwatchNotes": true,
34 | "withDownvotePerspective": false,
35 | "withReactionsMetadata": false,
36 | "withReactionsPerspective": false,
37 | "withVoice": true,
38 | "withV2Timeline": true
39 | }
40 | }
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/config.py:
--------------------------------------------------------------------------------
1 | # config.py
2 | import json
3 | import logging
4 |
5 | from nostr_dvm.backends.mcp.transport.stdio.stdio_server_parameters import StdioServerParameters
6 |
7 |
8 | async def load_config(config_path: str, server_name: str) -> StdioServerParameters:
9 | """Load the server configuration from a JSON file."""
10 | try:
11 | # debug
12 | logging.debug(f"Loading config from {config_path}")
13 |
14 | # Read the configuration file
15 | with open(config_path, "r") as config_file:
16 | config = json.load(config_file)
17 |
18 | # Retrieve the server configuration
19 | server_config = config.get("mcpServers", {}).get(server_name)
20 | if not server_config:
21 | error_msg = f"Server '{server_name}' not found in configuration file."
22 | logging.error(error_msg)
23 | raise ValueError(error_msg)
24 |
25 | # Construct the server parameters
26 | result = StdioServerParameters(
27 | command=server_config["command"],
28 | args=server_config.get("args", []),
29 | env=server_config.get("env"),
30 | )
31 |
32 | # debug
33 | logging.debug(
34 | f"Loaded config: command='{result.command}', args={result.args}, env={result.env}"
35 | )
36 |
37 | # return result
38 | return result
39 |
40 | except FileNotFoundError:
41 | # error
42 | error_msg = f"Configuration file not found: {config_path}"
43 | logging.error(error_msg)
44 | raise FileNotFoundError(error_msg)
45 | except json.JSONDecodeError as e:
46 | # json error
47 | error_msg = f"Invalid JSON in configuration file: {e.msg}"
48 | logging.error(error_msg)
49 | raise json.JSONDecodeError(error_msg, e.doc, e.pos)
50 | except ValueError as e:
51 | # error
52 | logging.error(str(e))
53 | raise
54 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/icons/IconEcosystem.vue:
--------------------------------------------------------------------------------
1 |
2 |
3 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/whisperx/readme.md:
--------------------------------------------------------------------------------
1 | # WhisperX
2 |
3 | This modules provides fast automatic speech recognition (70x realtime with large-v2) with word-level timestamps and
4 | speaker diarization.
5 |
6 | * https://github.com/m-bain/whisperX
7 |
8 | ## Options
9 |
10 | - `model`: string, identifier of the model to choose, sorted ascending in required (V)RAM:
11 | - `tiny`, `tiny.en`
12 | - `base`, `base.en`
13 | - `small`, `small.en`
14 | - `medium`, `medium.en`
15 | - `large-v1`
16 | - `large-v2`
17 |
18 | - `alignment_mode`: string, alignment method to use
19 | - `raw` Segments as identified by Whisper
20 | - `segment` Improved segmentation using separate alignment model. Roughly equivalent to sentence alignment.
21 | - `word` Improved segmentation using separate alignment model. Equivalent to word alignment.
22 |
23 | - `language`: language code for transcription and alignment models. Supported languages:
24 | - `ar`, `cs`, `da`, `de`, `el`, `en`, `es`, `fa`, `fi`, `fr`, `he`, `hu`, `it`, `ja`, `ko`, `nl`, `pl`, `pt`, `ru`,
25 | `te`, `tr`, `uk`, `ur`, `vi`, `zh`
26 | - `None`: auto-detect language from first 30 seconds of audio
27 |
28 | - `batch_size`: how many samples to process at once, increases speed but also (V)RAM consumption
29 |
30 | ## Examples
31 |
32 | ### Request
33 |
34 | ```python
35 | import requests
36 | import json
37 |
38 | payload = {
39 | "jobID" : "whisper_transcript",
40 | "data": json.dumps([
41 | {"src":"file:stream:audio", "type":"input", "id":"audio", "uri":"path/to/my/file.wav"},
42 | {"src":"file:annotation:free", "type":"output", "id":"transcript", "uri":"path/to/my/transcript.annotation"}
43 | ]),
44 | "trainerFilePath": "modules\\whisperx\\whisperx_transcript.trainer",
45 | }
46 |
47 |
48 | url = 'http://127.0.0.1:8080/process'
49 | headers = {'Content-type': 'application/x-www-form-urlencoded'}
50 | x = requests.post(url, headers=headers, data=payload)
51 | print(x.text)
52 |
53 | ```
54 |
--------------------------------------------------------------------------------
/examples/unleashed_dvm/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 | import dotenv
4 |
5 | from nostr_dvm.tasks.textgeneration_unleashed_chat import TextGenerationUnleashedChat
6 | from nostr_dvm.utils.admin_utils import AdminConfig
7 | from nostr_dvm.utils.dvmconfig import build_default_config
8 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
9 |
10 |
11 | def main():
12 | identifier = "unleashed"
13 | name = "Unleashed Chat"
14 | dvm_config = build_default_config(identifier)
15 | dvm_config.SEND_FEEDBACK_EVENTS = False
16 | dvm_config.USE_OWN_VENV = False
17 | dvm_config.FIX_COST = 0
18 | admin_config = AdminConfig()
19 | admin_config.LUD16 = dvm_config.LN_ADDRESS
20 | admin_config.REBROADCAST_NIP89 = False
21 |
22 |
23 | nip89info = {
24 | "name": name,
25 | "picture": "https://unleashed.chat/_app/immutable/assets/hero.pehsu4x_.jpeg",
26 | "about": "I generate Text with Unleashed.chat",
27 | "supportsEncryption": True,
28 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
29 | "nip90Params": {}
30 | }
31 |
32 | nip89config = NIP89Config()
33 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
34 | nip89config.CONTENT = json.dumps(nip89info)
35 |
36 |
37 |
38 | unleashed = TextGenerationUnleashedChat(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config)
39 | unleashed.run()
40 |
41 |
42 |
43 |
44 | if __name__ == '__main__':
45 | env_path = Path('.env')
46 | if not env_path.is_file():
47 | with open('.env', 'w') as f:
48 | print("Writing new .env file")
49 | f.write('')
50 | if env_path.is_file():
51 | print(f'loading environment from {env_path.resolve()}')
52 | dotenv.load_dotenv(env_path, verbose=True, override=True)
53 | else:
54 | raise FileNotFoundError(f'.env file not found at {env_path} ')
55 | main()
56 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/nip98_utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import hashlib
3 |
4 | from nostr_sdk import EventBuilder, Tag, Kind, Keys
5 |
6 |
7 | def sha256sum(filename):
8 | with open(filename, 'rb', buffering=0) as f:
9 | return hashlib.file_digest(f, 'sha256').hexdigest()
10 |
11 |
12 | async def generate_nip98_header(pkeys_hex, url="", kind="POST", filepath=""):
13 | """
14 | Generates a NIP-98 authentication header for a Nostr-based server. This header is
15 | intended to be used for authenticating requests by signing events with specific
16 | keys and including necessary metadata tags. The resulting header is encoded into
17 | a format suitable for HTTP authorization headers.
18 |
19 | :param pkeys_hex: The private keys in hexadecimal format, used for signing the
20 | event.
21 | :type pkeys_hex: str
22 | :param url: The URL that the NIP-98 header should apply to. Defaults to an empty
23 | string.
24 | :type url: str, optional
25 | :param kind: The HTTP method type for which the header is generated (e.g.,
26 | "POST"). Defaults to "POST".
27 | :type kind: str, optional
28 | :param filepath: The path to a file whose content will be hashed and included in
29 | the payload as a tag if the method is "POST". Defaults to an empty string.
30 | :type filepath: str, optional
31 | :return: The generated NIP-98 header in the format "Nostr ".
32 | :rtype: str
33 | """
34 | keys = Keys.parse(pkeys_hex)
35 | utag = Tag.parse(["u", url])
36 | methodtag = Tag.parse(["method", kind])
37 | tags = [utag, methodtag]
38 | if kind == "POST":
39 | payloadtag = Tag.parse(["payload", sha256sum(filepath)])
40 | tags.append(payloadtag)
41 | eb = EventBuilder(Kind(27235), "").tags(tags)
42 | event = eb.sign_with_keys(keys)
43 |
44 | encoded_nip98_event = base64.b64encode(event.as_json().encode('utf-8')).decode('utf-8')
45 |
46 | return "Nostr " + encoded_nip98_event
47 |
--------------------------------------------------------------------------------
/.env_example:
--------------------------------------------------------------------------------
1 | # Optional LNBITS options to create invoices (if empty, it will use the lud16 from profile)
2 | # Admin Key is (only) required for bot or if any payments should be made
3 |
4 | #Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
5 | LNBITS_ADMIN_KEY = "" # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users. Keep this secret and use responsibly.
6 | LNBITS_WALLET_ID = ""
7 | LNBITS_HOST = "https://demo.lnbits.com/"
8 | # In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
9 | # We will use the api to create and manage zapable lightning addresses
10 | NOSTDRESS_DOMAIN = "nostrdvm.com"
11 |
12 | #Backend Specific Options for tasks that require them. A DVM needing these should only be started if these are set.
13 |
14 | OPENAI_API_KEY = "" # Enter your OpenAI API Key to use DVMs with OpenAI services
15 | LIBRE_TRANSLATE_ENDPOINT = "" # Url to LibreTranslate Endpoint e.g. https://libretranslate.com
16 | LIBRE_TRANSLATE_API_KEY = "" # API Key, if required (You can host your own instance where you don't need it)
17 | REPLICATE_API_TOKEN = "" #API Key to run models on replicate.com
18 | HUGGINGFACE_EMAIL = ""
19 | HUGGINGFACE_PASSWORD = ""
20 | COINSTATSOPENAPI_KEY = ""
21 | NOSTR_BUILD_ACCOUNT_PK = "" # Enter the private key of an account you use with nostr.build
22 |
23 | # We will automatically create dtags and private keys based on the identifier variable in main.
24 | # If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
25 | # The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
26 |
27 | # We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
28 | # Make sure you backup this file to keep access to your wallets
29 |
--------------------------------------------------------------------------------
/examples/ollama_dvm/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 | import dotenv
4 |
5 | from nostr_dvm.tasks.textgeneration_llmlite import TextGenerationLLMLite
6 | from nostr_dvm.utils.admin_utils import AdminConfig
7 | from nostr_dvm.utils.dvmconfig import build_default_config
8 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
9 |
10 |
11 | def main():
12 | identifier = "llama2"
13 | name = "Ollama"
14 |
15 | dvm_config = build_default_config(identifier)
16 | admin_config = AdminConfig()
17 | admin_config.REBROADCAST_NIP89 = False
18 | admin_config.UPDATE_PROFILE = False
19 | admin_config.LUD16 = dvm_config.LN_ADDRESS
20 |
21 | options = {'default_model': "ollama/llama2", 'server': "http://localhost:11434"}
22 |
23 | nip89info = {
24 | "name": name,
25 | "picture": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
26 | "about": "I use a LLM connected via OLLAMA",
27 | "supportsEncryption": True,
28 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
29 | "nip90Params": {
30 |
31 | }
32 | }
33 |
34 | nip89config = NIP89Config()
35 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
36 | nip89config.CONTENT = json.dumps(nip89info)
37 |
38 | ollama = TextGenerationLLMLite(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config,
39 | options=options)
40 | ollama.run()
41 |
42 |
43 | if __name__ == '__main__':
44 | env_path = Path('.env')
45 | if not env_path.is_file():
46 | with open('.env', 'w') as f:
47 | print("Writing new .env file")
48 | f.write('')
49 | if env_path.is_file():
50 | print(f'loading environment from {env_path.resolve()}')
51 | dotenv.load_dotenv(env_path, verbose=True, override=True)
52 | else:
53 | raise FileNotFoundError(f'.env file not found at {env_path} ')
54 | main()
55 |
--------------------------------------------------------------------------------
/tests/summarization_duck.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 | import dotenv
5 |
6 | from nostr_dvm.tasks.summarization_duckduck_ai import SummarizationDuckDuck
7 | from nostr_dvm.utils.admin_utils import AdminConfig
8 | from nostr_dvm.utils.dvmconfig import build_default_config
9 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
10 |
11 |
12 | def playground(announce=False):
13 | admin_config = AdminConfig()
14 | admin_config.REBROADCAST_NIP89 = announce
15 | admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
16 | admin_config.UPDATE_PROFILE = announce
17 |
18 | name = "Summarizer Duck"
19 | identifier = "summarizer_duck"
20 | dvm_config = build_default_config(identifier)
21 | dvm_config.SEND_FEEDBACK_EVENTS = False
22 | admin_config.LUD16 = dvm_config.LN_ADDRESS
23 |
24 | nip89info = {
25 | "name": name,
26 | "picture": "https://image.nostr.build/28da676a19841dcfa7dcf7124be6816842d14b84f6046462d2a3f1268fe58d03.png",
27 | "about": "I summarize Text",
28 | "supportsEncryption": True,
29 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
30 | "nip90Params": {}
31 | }
32 |
33 | nip89config = NIP89Config()
34 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
35 | nip89config.CONTENT = json.dumps(nip89info)
36 |
37 |
38 | dvm = SummarizationDuckDuck(name=name, dvm_config=dvm_config, nip89config=nip89config,
39 | admin_config=admin_config)
40 | dvm.run(True)
41 |
42 |
43 | if __name__ == '__main__':
44 | env_path = Path('.env')
45 | if not env_path.is_file():
46 | with open('.env', 'w') as f:
47 | print("Writing new .env file")
48 | f.write('')
49 | if env_path.is_file():
50 | print(f'loading environment from {env_path.resolve()}')
51 | dotenv.load_dotenv(env_path, verbose=True, override=True)
52 | else:
53 | raise FileNotFoundError(f'.env file not found at {env_path} ')
54 |
55 | playground(announce=False)
56 |
--------------------------------------------------------------------------------
/examples/tts_dvm/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 | import dotenv
4 | from nostr_dvm.tasks.texttospeech import TextToSpeech
5 | from nostr_dvm.utils.admin_utils import AdminConfig
6 | from nostr_dvm.utils.dvmconfig import build_default_config
7 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
8 |
9 |
10 | def main():
11 | identifier = "tts"
12 | name = "Guy Swann Clone"
13 |
14 | dvm_config = build_default_config(identifier)
15 | admin_config = AdminConfig()
16 | admin_config.REBROADCAST_NIP89 = False
17 | admin_config.UPDATE_PROFILE = False
18 | admin_config.LUD16 = dvm_config.LN_ADDRESS
19 |
20 | # Use default file if paramter is empty, else overwrite with any local wav file
21 | options = {'input_file': ""}
22 |
23 | nip89info = {
24 | "name": name,
25 | "picture": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
26 | "about": "I Generate Speech from Text",
27 | "supportsEncryption": True,
28 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
29 | "nip90Params": {
30 | "language": {
31 | "required": False,
32 | "values": []
33 | }
34 | }
35 | }
36 |
37 | nip89config = NIP89Config()
38 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
39 | nip89config.CONTENT = json.dumps(nip89info)
40 |
41 | tts = TextToSpeech(name=name,
42 | dvm_config=dvm_config,
43 | nip89config=nip89config,
44 | admin_config=admin_config,
45 | options=options)
46 | tts.run()
47 |
48 |
49 | if __name__ == '__main__':
50 | env_path = Path('.env')
51 | if not env_path.is_file():
52 | with open('.env', 'w') as f:
53 | print("Writing new .env file")
54 | f.write('')
55 | if env_path.is_file():
56 | print(f'loading environment from {env_path.resolve()}')
57 | dotenv.load_dotenv(env_path, verbose=True, override=True)
58 | else:
59 | raise FileNotFoundError(f'.env file not found at {env_path} ')
60 | main()
61 |
--------------------------------------------------------------------------------
/tests/tts.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 | import dotenv
5 |
6 | from nostr_dvm.framework import DVMFramework
7 | from nostr_dvm.tasks.texttospeech import TextToSpeech
8 | from nostr_dvm.utils.admin_utils import AdminConfig
9 | from nostr_dvm.utils.dvmconfig import build_default_config
10 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
11 |
12 | if __name__ == '__main__':
13 | env_path = Path('.env')
14 | if env_path.is_file():
15 | print(f'loading environment from {env_path.resolve()}')
16 | dotenv.load_dotenv(env_path, verbose=True, override=True)
17 | else:
18 | raise FileNotFoundError(f'.env file not found at {env_path} ')
19 |
20 | framework = DVMFramework()
21 | name = "TTS Guy Swann"
22 | identifier = "ttsguy"
23 | admin_config_tts = AdminConfig()
24 | admin_config_tts.UPDATE_PROFILE = True
25 | admin_config_tts.REBROADCAST_NIP65_RELAY_LIST = True
26 | dvm_config = build_default_config(identifier)
27 | dvm_config.USE_OWN_VENV = True
28 | dvm_config.FIX_COST = 0
29 | dvm_config.PER_UNIT_COST = 0
30 | relays = dvm_config.RELAY_LIST
31 | relays.append("wss://relay.damus.io")
32 | relays.append("wss://relay.primal.net")
33 | dvm_config.RELAY_LIST = relays
34 |
35 | admin_config_tts.LUD16 = dvm_config.LN_ADDRESS
36 | # use an alternative local wav file you want to use for cloning
37 | options = {'input_file': ""}
38 | nip89info = {
39 | "name": name,
40 | "picture": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
41 | "about": "I Generate Speech from Text",
42 | "supportsEncryption": True,
43 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
44 | "nip90Params": {
45 | "language": {
46 | "required": False,
47 | "values": []
48 | }
49 | }
50 | }
51 |
52 | nip89config = NIP89Config()
53 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
54 | nip89config.CONTENT = json.dumps(nip89info)
55 | tts = TextToSpeech(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config_tts,
56 | options=options)
57 |
58 | framework.add(tts)
59 | framework.run()
60 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_message.py:
--------------------------------------------------------------------------------
1 | # messages/send_message.py
2 | import logging
3 | import anyio
4 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
5 | from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
6 |
7 | async def send_message(
8 | read_stream: MemoryObjectReceiveStream,
9 | write_stream: MemoryObjectSendStream,
10 | message: JSONRPCMessage,
11 | timeout: float = 5,
12 | retries: int = 3,
13 | ) -> dict:
14 | """
15 | Send a JSON-RPC message to the server and return the response.
16 |
17 | Args:
18 | read_stream (MemoryObjectReceiveStream): The stream to read responses.
19 | write_stream (MemoryObjectSendStream): The stream to send requests.
20 | message (JSONRPCMessage): The JSON-RPC message to send.
21 | timeout (float): Timeout in seconds to wait for a response.
22 | retries (int): Number of retry attempts.
23 |
24 | Returns:
25 | dict: The server's response as a dictionary.
26 |
27 | Raises:
28 | TimeoutError: If no response is received within the timeout.
29 | Exception: If an unexpected error occurs.
30 | """
31 | for attempt in range(1, retries + 1):
32 | try:
33 | logging.debug(f"Attempt {attempt}/{retries}: Sending message: {message}")
34 | await write_stream.send(message)
35 |
36 | with anyio.fail_after(timeout):
37 | async for response in read_stream:
38 | if not isinstance(response, Exception):
39 | logging.debug(f"Received response: {response.model_dump()}")
40 | return response.model_dump()
41 | else:
42 | logging.error(f"Server error: {response}")
43 | raise response
44 |
45 | except TimeoutError:
46 | logging.error(
47 | f"Timeout waiting for response to message '{message.method}' (Attempt {attempt}/{retries})"
48 | )
49 | if attempt == retries:
50 | raise
51 | except Exception as e:
52 | logging.error(
53 | f"Unexpected error during '{message.method}' request: {e} (Attempt {attempt}/{retries})"
54 | )
55 | if attempt == retries:
56 | raise
57 |
58 | await anyio.sleep(2)
59 |
--------------------------------------------------------------------------------
/tutorials/04_simple_chat_bot.py:
--------------------------------------------------------------------------------
1 | # In tutorial 2 we have written a very simplistic DVM that replies with "The result of the DVM is: #RunDVM"
2 | # In tutorial 3 we have written a client that requests a response from the DVM and gets the reply back.
3 | # In this tutorial we build a simple bot that bridges the communication between the user and the Kind 5050
4 | # (Text generation) DVM.
5 | import threading
6 | from pathlib import Path
7 |
8 | import dotenv
9 |
10 | from nostr_dvm.bot import Bot
11 | from nostr_dvm.utils.admin_utils import AdminConfig
12 | from nostr_dvm.utils.dvmconfig import build_default_config
13 |
14 | def run_dvm(identifier):
15 |
16 | bot_config = build_default_config(identifier)
17 | # The main purpose is of the Bot is to be an indexable overview of multiple DVMs. But we will use it in "chatbot" mode here
18 | # by setting the CHATBOT option to true
19 | bot_config.CHATBOT = True
20 | # And we simply hand over the publickey of our DVM from tutorial 1
21 | bot_config.DVM_KEY = "aa8ab5b774d47e7b29a985dd739cfdcccf93451678bf7977ba1b2e094ecd8b30" # TODO replace with your DVM
22 |
23 | # We update our relay list and profile and Start the bot
24 | admin_config = AdminConfig()
25 | admin_config.REBROADCAST_NIP65_RELAY_LIST = True
26 | admin_config.UPDATE_PROFILE = True
27 | x = threading.Thread(target=Bot, args=([bot_config, admin_config]))
28 | x.start()
29 |
30 | # Now you can copy the npub to a Social client of your choice and (if tutorials 2 and 4 are running) it should reply
31 | # in your client.
32 |
33 |
34 |
35 |
36 | if __name__ == '__main__':
37 | #We open the .env file we created before.
38 | env_path = Path('.env')
39 | if not env_path.is_file():
40 | with open('.env', 'w') as f:
41 | print("Writing new .env file")
42 | f.write('')
43 | if env_path.is_file():
44 | print(f'loading environment from {env_path.resolve()}')
45 | dotenv.load_dotenv(env_path, verbose=True, override=True)
46 | else:
47 | raise FileNotFoundError(f'.env file not found at {env_path} ')
48 |
49 | # A unique identifier that will be used to store keys in your .env file as well as for your ln address.
50 | # (If its already used it will get some random letters to it)
51 | identifier = "chat_bot"
52 |
53 | # psst, you can change your lightning address here:
54 | #asyncio.run(change_ln_address(identifier, "test", DVMConfig(), True))
55 |
56 | run_dvm(identifier)
57 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/nip65_utils.py:
--------------------------------------------------------------------------------
1 | from nostr_sdk import Tag, Keys, EventBuilder, Kind
2 |
3 | from nostr_dvm.utils.definitions import EventDefinitions
4 | from nostr_dvm.utils.nostr_utils import send_event
5 | from nostr_dvm.utils.print_utils import bcolors
6 |
7 |
8 | async def announce_dm_relays(dvm_config, client):
9 | tags = []
10 |
11 | for relay in dvm_config.RELAY_LIST:
12 | r_tag = Tag.parse(["r", relay])
13 | tags.append(r_tag)
14 |
15 | keys = Keys.parse(dvm_config.NIP89.PK)
16 | content = ""
17 |
18 | event = EventBuilder(Kind(10050), content).tags(tags).sign_with_keys(keys)
19 | response_status = await send_event(event, client=client, dvm_config=dvm_config, broadcast=True)
20 | if response_status is not None:
21 | print(
22 | bcolors.BLUE + "[" + dvm_config.NIP89.NAME + "] Announced DM relays for " + dvm_config.NIP89.NAME + ". Success: " + str(
23 | response_status.success) + " Failed: " + str(response_status.failed) + " EventID: "
24 | + response_status.id.to_hex() + " / " + response_status.id.to_bech32())
25 | else:
26 | print(
27 | bcolors.RED + "[" + dvm_config.NIP89.NAME + "] Could not announce DM relays for " + dvm_config.NIP89.NAME + bcolors.ENDC)
28 |
29 |
30 | async def nip65_announce_relays(dvm_config, client):
31 | # todo we might want to call the dm relays seperately but for now we do it together with the inbox relays
32 | await announce_dm_relays(dvm_config, client)
33 |
34 | tags = []
35 |
36 | for relay in dvm_config.RELAY_LIST:
37 | r_tag = Tag.parse(["r", relay])
38 | tags.append(r_tag)
39 |
40 | keys = Keys.parse(dvm_config.NIP89.PK)
41 | content = ""
42 |
43 | event = EventBuilder(EventDefinitions.KIND_RELAY_ANNOUNCEMENT, content).tags(tags).sign_with_keys(keys)
44 | response_status = await send_event(event, client=client, dvm_config=dvm_config, broadcast=True)
45 | if response_status is not None:
46 | print(
47 | bcolors.BLUE + "[" + dvm_config.NIP89.NAME + "] Announced NIP 65 for " + dvm_config.NIP89.NAME + ". Success: " + str(
48 | response_status.success) + " Failed: " + str(response_status.failed) + " EventID: "
49 | + response_status.id.to_hex() + " / " + response_status.id.to_bech32() + bcolors.ENDC)
50 | else:
51 | print(
52 | bcolors.RED + "[" + dvm_config.NIP89.NAME + "] Could not announce NIP 65 for " + dvm_config.NIP89.NAME + bcolors.ENDC)
53 |
--------------------------------------------------------------------------------
/ui/noogle/src/assets/base.css:
--------------------------------------------------------------------------------
1 | /* color palette from */
2 | :root {
3 | --vt-c-white: #ffffff;
4 | --vt-c-white-soft: #f8f8f8;
5 | --vt-c-white-mute: #f2f2f2;
6 |
7 | --vt-c-black: #181818;
8 | --vt-c-black-soft: #222222;
9 | --vt-c-black-mute: #282828;
10 |
11 | --vt-c-indigo: #2c3e50;
12 |
13 | --vt-c-divider-light-1: rgba(60, 60, 60, 0.29);
14 | --vt-c-divider-light-2: rgba(60, 60, 60, 0.12);
15 | --vt-c-divider-dark-1: rgba(84, 84, 84, 0.65);
16 | --vt-c-divider-dark-2: rgba(84, 84, 84, 0.48);
17 |
18 | --vt-c-text-light-1: var(--vt-c-indigo);
19 | --vt-c-text-light-2: rgba(60, 60, 60, 0.66);
20 | --vt-c-text-dark-1: var(--vt-c-white);
21 | --vt-c-text-dark-2: rgba(235, 235, 235, 0.64);
22 | }
23 |
24 | /* semantic color variables for this project */
25 | :root {
26 | --color-background: var(--vt-c-white);
27 | --color-background-soft: var(--vt-c-white-soft);
28 | --color-background-mute: var(--vt-c-white-mute);
29 |
30 | --color-border: var(--vt-c-divider-light-2);
31 | --color-border-hover: var(--vt-c-divider-light-1);
32 |
33 | --color-heading: var(--vt-c-text-light-1);
34 | --color-text: var(--vt-c-text-light-1);
35 |
36 | --section-gap: 160px;
37 | }
38 |
39 | @media (prefers-color-scheme: dark) {
40 | :root {
41 | --color-background: var(--vt-c-black);
42 | --color-background-soft: var(--vt-c-black-soft);
43 | --color-background-mute: var(--vt-c-black-mute);
44 |
45 | --color-border: var(--vt-c-divider-dark-2);
46 | --color-border-hover: var(--vt-c-divider-dark-1);
47 |
48 | --color-heading: var(--vt-c-text-dark-1);
49 | --color-text: var(--vt-c-text-dark-2);
50 | }
51 | }
52 |
53 | *,
54 | *::before,
55 | *::after {
56 | box-sizing: border-box;
57 | margin: 0;
58 | font-weight: normal;
59 | }
60 |
61 | body {
62 | min-height: 100vh;
63 | color: var(--color-text);
64 | background: var(--color-background);
65 | transition: color 0.5s,
66 | background-color 0.5s;
67 | line-height: 1.6;
68 | font-family: Inter,
69 | -apple-system,
70 | BlinkMacSystemFont,
71 | 'Segoe UI',
72 | Roboto,
73 | Oxygen,
74 | Ubuntu,
75 | Cantarell,
76 | 'Fira Sans',
77 | 'Droid Sans',
78 | 'Helvetica Neue',
79 | sans-serif;
80 | font-size: 15px;
81 | text-rendering: optimizeLegibility;
82 | -webkit-font-smoothing: antialiased;
83 | -moz-osx-font-smoothing: grayscale;
84 | }
85 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/stable_diffusion/clip.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | import mlx.core as mx
4 | import mlx.nn as nn
5 |
6 | from .config import CLIPTextModelConfig
7 |
8 |
9 | class CLIPEncoderLayer(nn.Module):
10 | """The transformer encoder layer from CLIP."""
11 |
12 | def __init__(self, model_dims: int, num_heads: int):
13 | super().__init__()
14 |
15 | self.layer_norm1 = nn.LayerNorm(model_dims)
16 | self.layer_norm2 = nn.LayerNorm(model_dims)
17 |
18 | self.attention = nn.MultiHeadAttention(model_dims, num_heads)
19 | # Add biases to the attention projections to match CLIP
20 | self.attention.query_proj.bias = mx.zeros(model_dims)
21 | self.attention.key_proj.bias = mx.zeros(model_dims)
22 | self.attention.value_proj.bias = mx.zeros(model_dims)
23 | self.attention.out_proj.bias = mx.zeros(model_dims)
24 |
25 | self.linear1 = nn.Linear(model_dims, 4 * model_dims)
26 | self.linear2 = nn.Linear(4 * model_dims, model_dims)
27 |
28 | def __call__(self, x, attn_mask=None):
29 | y = self.layer_norm1(x)
30 | y = self.attention(y, y, y, attn_mask)
31 | x = y + x
32 |
33 | y = self.layer_norm2(x)
34 | y = self.linear1(y)
35 | y = nn.gelu_approx(y)
36 | y = self.linear2(y)
37 | x = y + x
38 |
39 | return x
40 |
41 |
42 | class CLIPTextModel(nn.Module):
43 | """Implements the text encoder transformer from CLIP."""
44 |
45 | def __init__(self, config: CLIPTextModelConfig):
46 | super().__init__()
47 |
48 | self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)
49 | self.position_embedding = nn.Embedding(config.max_length, config.model_dims)
50 | self.layers = [
51 | CLIPEncoderLayer(config.model_dims, config.num_heads)
52 | for i in range(config.num_layers)
53 | ]
54 | self.final_layer_norm = nn.LayerNorm(config.model_dims)
55 |
56 | def __call__(self, x):
57 | # Extract some shapes
58 | B, N = x.shape
59 |
60 | # Compute the embeddings
61 | x = self.token_embedding(x)
62 | x = x + self.position_embedding.weight[:N]
63 |
64 | # Compute the features from the transformer
65 | mask = nn.MultiHeadAttention.create_additive_causal_mask(N, x.dtype)
66 | for l in self.layers:
67 | x = l(x, mask)
68 |
69 | # Apply the final layernorm and return
70 | return self.final_layer_norm(x)
71 |
--------------------------------------------------------------------------------
/tests/generic_dvm.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 | import dotenv
5 | from nostr_sdk import Kind
6 |
7 | from nostr_dvm.tasks.generic_dvm import GenericDVM
8 | from nostr_dvm.utils.admin_utils import AdminConfig
9 | from nostr_dvm.utils.dvmconfig import build_default_config
10 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
11 |
12 |
13 | def playground(announce=False):
14 | admin_config = AdminConfig()
15 | admin_config.REBROADCAST_NIP89 = announce
16 | admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
17 | admin_config.UPDATE_PROFILE = announce
18 |
19 | name = "Generic DVM"
20 | identifier = "a_very_generic_dvm" # Chose a unique identifier in order to get a lnaddress
21 | dvm_config = build_default_config(identifier)
22 | dvm_config.KIND = Kind(5050) # Manually set the Kind Number (see data-vending-machines.org)
23 |
24 | # Add NIP89
25 | nip89info = {
26 | "name": name,
27 | "picture": "https://image.nostr.build/28da676a19841dcfa7dcf7124be6816842d14b84f6046462d2a3f1268fe58d03.png",
28 | "about": "I'm an all purpose DVM'",
29 | "supportsEncryption": True,
30 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
31 | "nip90Params": {
32 | }
33 | }
34 |
35 | nip89config = NIP89Config()
36 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
37 | nip89config.CONTENT = json.dumps(nip89info)
38 |
39 | options = {
40 | "some_option": "#RunDVM",
41 | }
42 |
43 | dvm = GenericDVM(name=name, dvm_config=dvm_config, nip89config=nip89config,
44 | admin_config=admin_config, options=options)
45 |
46 | async def process(request_form):
47 | options = dvm.set_options(request_form)
48 | result = "I'm manipulating the DVM from outside\n"
49 | result += options["some_option"]
50 | print(result)
51 | return result
52 |
53 | dvm.process = process # overwrite the process function with the above one
54 | dvm.run()
55 |
56 |
57 | if __name__ == '__main__':
58 | env_path = Path('.env')
59 | if not env_path.is_file():
60 | with open('.env', 'w') as f:
61 | print("Writing new .env file")
62 | f.write('')
63 | if env_path.is_file():
64 | print(f'loading environment from {env_path.resolve()}')
65 | dotenv.load_dotenv(env_path, verbose=True, override=True)
66 | else:
67 | raise FileNotFoundError(f'.env file not found at {env_path} ')
68 |
69 | playground(announce=False)
70 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/reaction_utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from nostr_sdk import Tag, Keys, EventBuilder, Kind, NostrSigner, Client
4 |
5 | from nostr_dvm.utils.dvmconfig import DVMConfig
6 | from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
7 | from nostr_dvm.utils.print_utils import bcolors
8 |
9 |
10 | async def create_reaction(keys, title, dtag):
11 | d_tag = Tag.parse(["d", dtag])
12 | title_tag = Tag.parse(["title", title])
13 |
14 | emoji_tags = []
15 |
16 | # add more if you want
17 | name = "ThugAmy"
18 | url = "https://image.nostr.build/ccc229cbe11f5a13a1cc7fd24e13ac53fc78f287ecce0d9a674807e2e20f6fd5.png"
19 | emoji_tag1 = Tag.parse(["emoji", name, url])
20 | emoji_tags.append(emoji_tag1)
21 |
22 | keys = Keys.parse(keys)
23 | content = ""
24 | event = EventBuilder(Kind(30030), content).tags([d_tag, title_tag] + emoji_tags).sign_with_keys(keys)
25 |
26 | client = Client(NostrSigner.keys(keys))
27 | # We add the relays we defined above and told our DVM we would want to receive events to.
28 | for relay in DVMConfig().RELAY_LIST:
29 | await client.add_relay(relay)
30 | # We connect the client
31 | await client.connect()
32 |
33 | eventid = await send_event(event, client=client, dvm_config=DVMConfig())
34 |
35 | print(
36 | bcolors.BLUE + "[" + "Reaction" + "] Announced (" + eventid.id.to_nostr_uri() +
37 | " Hex: " + eventid.id.to_hex() + ")" + bcolors.ENDC)
38 |
39 |
40 | async def delete_reaction(keys, eid: str, dtag: str):
41 | keys = Keys.parse(keys)
42 | e_tag = Tag.parse(["e", eid])
43 | a_tag = Tag.parse(
44 | ["a", "30030:" + keys.public_key().to_hex() + ":" + dtag])
45 | event = EventBuilder(Kind(5), "").tags([e_tag, a_tag]).sign_with_keys(keys)
46 |
47 | client = Client(NostrSigner.keys(keys))
48 | # We add the relays we defined above and told our DVM we would want to receive events to.
49 | for relay in DVMConfig().RELAY_LIST:
50 | await client.add_relay(relay)
51 | # We connect the client
52 | await client.connect()
53 |
54 | eventid = await send_event(event, client, DVMConfig())
55 | print(
56 | bcolors.BLUE + "[" + "Reaction" + "] deleted (" + eventid.id.to_nostr_uri() +
57 | " Hex: " + eventid.id.to_hex() + ")" + bcolors.ENDC)
58 |
59 |
60 | keys = check_and_set_private_key("test_client")
61 | eventid = "da05cefc512ad43363f84131343f5d2a80303ea3b9368b9ad7f010e07db37d90"
62 |
63 | asyncio.run(create_reaction(keys=keys, title="ThugAmy", dtag="ThugAmy"))
64 | # asyncio.run(delete_reaction(keys=keys, eid=eventid, dtag="ThugAmy"))
65 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/stable_diffusion/sampler.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | import mlx.core as mx
4 |
5 | from .config import DiffusionConfig
6 |
7 |
8 | def _linspace(a, b, num):
9 | x = mx.arange(0, num) / (num - 1)
10 | return (b - a) * x + a
11 |
12 |
13 | def _interp(y, x_new):
14 | """Interpolate the function defined by (arange(0, len(y)), y) at positions x_new."""
15 | x_low = x_new.astype(mx.int32)
16 | x_high = mx.minimum(x_low + 1, len(y) - 1)
17 |
18 | y_low = y[x_low]
19 | y_high = y[x_high]
20 | delta_x = x_new - x_low
21 | y_new = y_low * (1 - delta_x) + delta_x * y_high
22 |
23 | return y_new
24 |
25 |
26 | class SimpleEulerSampler:
27 | """A simple Euler integrator that can be used to sample from our diffusion models.
28 |
29 | The method ``step()`` performs one Euler step from x_t to x_t_prev.
30 | """
31 |
32 | def __init__(self, config: DiffusionConfig):
33 | # Compute the noise schedule
34 | if config.beta_schedule == "linear":
35 | betas = _linspace(
36 | config.beta_start, config.beta_end, config.num_train_steps
37 | )
38 | elif config.beta_schedule == "scaled_linear":
39 | betas = _linspace(
40 | config.beta_start ** 0.5, config.beta_end ** 0.5, config.num_train_steps
41 | ).square()
42 | else:
43 | raise NotImplementedError(f"{config.beta_schedule} is not implemented.")
44 |
45 | alphas = 1 - betas
46 | alphas_cumprod = mx.cumprod(alphas)
47 |
48 | self._sigmas = mx.concatenate(
49 | [mx.zeros(1), ((1 - alphas_cumprod) / alphas_cumprod).sqrt()]
50 | )
51 |
52 | def sample_prior(self, shape, dtype=mx.float32, key=None):
53 | noise = mx.random.normal(shape, key=key)
54 | return (
55 | noise * self._sigmas[-1] * (self._sigmas[-1].square() + 1).rsqrt()
56 | ).astype(dtype)
57 |
58 | def sigmas(self, t):
59 | return _interp(self._sigmas, t)
60 |
61 | def timesteps(self, num_steps: int, dtype=mx.float32):
62 | steps = _linspace(len(self._sigmas) - 1, 0, num_steps + 1).astype(dtype)
63 | return list(zip(steps, steps[1:]))
64 |
65 | def step(self, eps_pred, x_t, t, t_prev):
66 | sigma = self.sigmas(t).astype(eps_pred.dtype)
67 | sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype)
68 |
69 | dt = sigma_prev - sigma
70 | x_t_prev = (sigma.square() + 1).sqrt() * x_t + eps_pred * dt
71 |
72 | x_t_prev = x_t_prev * (sigma_prev.square() + 1).rsqrt()
73 |
74 | return x_t_prev
75 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/system_prompt_generator.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | class SystemPromptGenerator:
5 | """
6 | A class for generating system prompts dynamically based on tools JSON and user inputs.
7 | """
8 |
9 | def __init__(self):
10 | """
11 | Initialize the SystemPromptGenerator with a default system prompt template.
12 | """
13 | self.template = """
14 | In this environment you have access to a set of tools you can use to answer the user's question.
15 | {{ FORMATTING INSTRUCTIONS }}
16 | String and scalar parameters should be specified as is, while lists and objects should use JSON format. Note that spaces for string values are not stripped. The output is not expected to be valid XML and is parsed with regular expressions.
17 | Here are the functions available in JSONSchema format:
18 | {{ TOOL DEFINITIONS IN JSON SCHEMA }}
19 | {{ USER SYSTEM PROMPT }}
20 | {{ TOOL CONFIGURATION }}
21 | """
22 | self.default_user_system_prompt = "You are an intelligent assistant capable of using tools to solve user queries effectively."
23 | self.default_tool_config = "No additional configuration is required."
24 |
25 | def generate_prompt(
26 | self, tools: dict, user_system_prompt: str = None, tool_config: str = None
27 | ) -> str:
28 | """
29 | Generate a system prompt based on the provided tools JSON, user prompt, and tool configuration.
30 |
31 | Args:
32 | tools (dict): The tools JSON containing definitions of the available tools.
33 | user_system_prompt (str): A user-provided description or instruction for the assistant (optional).
34 | tool_config (str): Additional tool configuration information (optional).
35 |
36 | Returns:
37 | str: The dynamically generated system prompt.
38 | """
39 |
40 | # set the user system prompt
41 | user_system_prompt = user_system_prompt or self.default_user_system_prompt
42 |
43 | # set the tools config
44 | tool_config = tool_config or self.default_tool_config
45 |
46 | # get the tools schema
47 | tools_json_schema = json.dumps(tools, indent=2)
48 |
49 | # perform replacements
50 | prompt = self.template.replace(
51 | "{{ TOOL DEFINITIONS IN JSON SCHEMA }}", tools_json_schema
52 | )
53 | prompt = prompt.replace("{{ FORMATTING INSTRUCTIONS }}", "")
54 | prompt = prompt.replace("{{ USER SYSTEM PROMPT }}", user_system_prompt)
55 | prompt = prompt.replace("{{ TOOL CONFIGURATION }}", tool_config)
56 |
57 | # return the prompt
58 | return prompt
59 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 | import dotenv
4 |
5 | from nostr_dvm.framework import DVMFramework
6 | from nostr_dvm.tasks.generic_dvm import GenericDVM
7 | from nostr_dvm.utils.admin_utils import AdminConfig
8 | from nostr_dvm.utils.dvmconfig import build_default_config
9 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
10 | from nostr_sdk import Keys, Kind
11 |
12 |
13 |
14 | def playground(announce=False):
15 | framework = DVMFramework()
16 |
17 | admin_config = AdminConfig()
18 | admin_config.REBROADCAST_NIP89 = announce
19 | admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
20 | admin_config.UPDATE_PROFILE = announce
21 |
22 | name = "Generic DVM"
23 | identifier = "a_very_generic_dvm" # Chose a unique identifier in order to get a lnaddress
24 | dvm_config = build_default_config(identifier)
25 | dvm_config.KIND = Kind(5050) # Manually set the Kind Number (see data-vending-machines.org)
26 |
27 | # Add NIP89
28 | nip89info = {
29 | "name": name,
30 | "picture": "https://image.nostr.build/28da676a19841dcfa7dcf7124be6816842d14b84f6046462d2a3f1268fe58d03.png",
31 | "about": "I'm just a demo DVM, not doing much.'",
32 | "supportsEncryption": True,
33 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
34 | "nip90Params": {
35 | }
36 | }
37 |
38 | nip89config = NIP89Config()
39 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
40 | nip89config.CONTENT = json.dumps(nip89info)
41 |
42 | options = {
43 | "some_option": "#RunDVM",
44 | }
45 |
46 | dvm = GenericDVM(name=name, dvm_config=dvm_config, nip89config=nip89config,
47 | admin_config=admin_config, options=options)
48 |
49 | async def process(request_form):
50 | options = dvm.set_options(request_form)
51 | result = "The result of the DVM is: "
52 | result += options["some_option"]
53 | print(result)
54 | return result
55 |
56 | dvm.process = process # overwrite the process function with the above one
57 | framework.add(dvm)
58 |
59 | framework.run()
60 |
61 | if __name__ == '__main__':
62 | env_path = Path('.env')
63 | if not env_path.is_file():
64 | with open('.env', 'w') as f:
65 | print("Writing new .env file")
66 | f.write('')
67 | if env_path.is_file():
68 | print(f'loading environment from {env_path.resolve()}')
69 | dotenv.load_dotenv(env_path, verbose=True, override=True)
70 | else:
71 | raise FileNotFoundError(f'.env file not found at {env_path} ')
72 | announce = False
73 |
74 | playground(announce=announce)
75 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/nwc_tools.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | import requests
5 | from nostr_sdk import Keys, PublicKey, NostrWalletConnectUri, Nwc
6 |
7 | from nostr_dvm.utils.dvmconfig import DVMConfig
8 | from nostr_dvm.utils.nostr_utils import check_and_set_private_key
9 | from nostr_dvm.utils.zap_utils import zaprequest
10 |
11 |
12 | async def nwc_zap(connectionstr, bolt11, keys, externalrelay=None):
13 | uri = NostrWalletConnectUri.parse(connectionstr)
14 |
15 | # Initialize NWC client
16 | nwc = Nwc(uri)
17 |
18 | info = await nwc.get_info()
19 | print(info)
20 |
21 | balance = await nwc.get_balance()
22 | print(f"Balance: {balance} MilliSats")
23 |
24 | event_id = await nwc.pay_invoice(bolt11)
25 | print("NWC event: " + event_id)
26 | return event_id
27 |
28 |
29 | def parse_connection_str(connectionstring):
30 | split = connectionstring.split("?")
31 | targetpubkey = split[0].split(":")[1].replace("//", "")
32 | split2 = split[1].split("&")
33 | relay = split2[0].split("=")[1]
34 | relay = relay.replace("%3A%2F%2F", "://")
35 | secret = split2[1].split("=")[1]
36 | return targetpubkey, relay, secret
37 |
38 |
39 | def make_nwc_account(identifier, nwcdomain):
40 | pubkey = Keys.parse(os.getenv("DVM_PRIVATE_KEY_" + identifier.upper())).public_key().to_hex()
41 | data = {
42 | 'name': identifier,
43 | 'host': os.getenv("LNBITS_HOST"),
44 | 'key': os.getenv("LNBITS_ADMIN_KEY_" + identifier.upper()),
45 | 'pubkey': pubkey,
46 | }
47 |
48 | try:
49 | url = nwcdomain
50 | header = {"content-type": "application/json"}
51 | res = requests.post(url, headers=header, json=data)
52 | obj = json.loads(res.text)
53 | if obj['params']['connectionURI'] != "Users already exists":
54 | return obj['params']['connectionURI']
55 | else:
56 | return ""
57 |
58 | except Exception as e:
59 | print(e)
60 | return ""
61 |
62 |
63 | async def nwc_test(nwc_server):
64 | connectionstring = make_nwc_account("test", nwc_server + "/api/new")
65 | print(connectionstring)
66 | # TODO Store the connection string in a db, use here if you already have one
67 | # connectionstring = "nostr+walletconnect:..."
68 | if connectionstring != "":
69 | # we use the keys from a test user
70 | keys = Keys.parse(check_and_set_private_key("test"))
71 |
72 | # we zap npub1nxa4tywfz9nqp7z9zp7nr7d4nchhclsf58lcqt5y782rmf2hefjquaa6q8's profile 21 sats and say Cool stuff
73 | pubkey = PublicKey.parse("npub1nxa4tywfz9nqp7z9zp7nr7d4nchhclsf58lcqt5y782rmf2hefjquaa6q8")
74 | bolt11 = zaprequest("hype@bitcoinfixesthis.org", 21, "Cool Stuff", None,
75 | pubkey, keys, DVMConfig.RELAY_LIST)
76 |
77 | await nwc_zap(connectionstring, bolt11, keys)
78 |
--------------------------------------------------------------------------------
/ui/noogle/src/components/helper/string.ts:
--------------------------------------------------------------------------------
1 | const Regex_Url_Str = "^https?:\/\/(?:.*\/)?[^\/.]+$" // domain name
2 | const Regex_Url_Img = "(https?:\\/\\/.*\\.(?:png|jpg|jpeg|webp|gif))" // domain name
3 | const Regex_Url_Video = "(https?:\\/\\/.*\\.(?:mp4|mov|avi))" // domain name
4 | const Regex_Urlw_Str = "(https:\\/\\/)" + "((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.)+[a-z]{2,})+[\\/]{0,1}" // domain name
5 |
6 | ;
7 |
8 | const Regex_Nip05_Str = "(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\\])"
9 |
10 | const StringUtil = {
11 | isValidUrl: (str: string): boolean => new RegExp(Regex_Url_Str, "i").test(str),
12 | parseHyperlinks: (str: string): string => str.replace(new RegExp(Regex_Urlw_Str, "gim"), "$& ").replace(new RegExp(Regex_Nip05_Str, "gim"), "$& "),
13 | parseImages: (str: string): string => str.toLowerCase().includes('nsfw') || str.toLowerCase().includes('lingerie') || str.toLowerCase().includes('sex') || str.toLowerCase().includes('porn') ? str.replace(" http", "\nhttp").replace(new RegExp(Regex_Url_Img, "gim"), "NSFW Show/Hide Results
").replace(new RegExp(Regex_Url_Video, "gim"), "NSFW Show/Hide Results
").replace(new RegExp(Regex_Url_Str, "gim"), "$& ") : str.replace(" http", "\nhttp").replace(new RegExp(Regex_Url_Img, "gim"), " ").replace(new RegExp(Regex_Url_Video, "gim"), " ").replace(new RegExp(Regex_Url_Str, "gim"), "$& "),
14 | parseImages_save: (str: string): string => str.replace(" http", "\nhttp").replace(str, " ").replace(new RegExp(Regex_Url_Video, "gim"), " ").replace(new RegExp(Regex_Url_Str, "gim"), "$& "),
15 |
16 |
17 | //parseImages: (str: string): string => str.replace(" http", " http") //.replace("\n", " ").replace(new RegExp(Regex_Url_Img, "gim"), " ")
18 |
19 |
20 | };
21 |
22 | export default StringUtil;
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NostrDVM: Nostr NIP90 Data Vending Machine Framework
2 |
3 | This framework provides a way to easily build and/or run `Nostr NIP90 DVMs in Python`.
4 |
5 | This project is currently under development and additional tasks and features are added along the way.
6 | This means the project is in alpha status, interfaces might still change/break at this stage.
7 |
8 | ## Getting started
9 |
10 | Create a new venv by running `"python -m venv venv"`
11 | - Place .env file (based on .env_example) in main folder.
12 | - If you want the framework to manage wallets and lnaddresses automatically for you, create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms). Otherwise leave the lnbits .env variables empty and update each of your DVM's profile with a lightning address of your choice or alternativly, make sure the DVM is free.
13 | - the framework will then automatically create keys, nip89 tags and if lnbits is used zapable NIP57 `lightning addresses` for your dvms in this file.
14 | - Activate the venv by typing `".venv\Scripts\activate"` on Windows or `"source venv/bin/activate"` otherwise
15 | - pip install nostr-dvm
16 | - Run python3 main.py. (or check single examples in the example folder)
17 |
18 | In each task component DVM examples are already prepared. Feel free to play along with the existing ones.
19 | You can also add new tasks by using the interface, just like the existing tasks in the `tasks` folder.
20 |
21 | A `bot` is running by default that lists and communicates with the `DVMs` added to it,
22 | so your DVMs can be controled via any regular social client as well.
23 |
24 | If LNBits is not used, make sure your DVM's nostr accounts have a valid lightning address.
25 |
26 | A tutorial on how to add additional tasks, as well as the larger server backend will be added at a later stage.
27 |
28 | ## Getting started with Docker
29 |
30 | Create `.env` from the example provided by us `.env_example`
31 |
32 | ```bash
33 | cp .env_example .env
34 | ```
35 |
36 | and set the necessary environmental variables:
37 |
38 | ```bash
39 | LNBITS_ADMIN_KEY = ""
40 | LNBITS_WALLET_ID = ""
41 | LNBITS_HOST = "https://demo.lnbits.com/"
42 | NOSTDRESS_DOMAIN = "nostrdvm.com"
43 | ```
44 |
45 | To get the Docker container up and running:
46 |
47 | ```sh
48 | # in foreground
49 | docker compose up --build
50 |
51 | # in background
52 | docker compose up --build -d
53 | ```
54 |
55 | To update your container, do:
56 |
57 | ```sh
58 | git pull
59 |
60 | docker compose build --no-cache
61 |
62 | # in foreground
63 | docker compose up
64 |
65 | # in background
66 | docker compose up -d
67 | ```
68 |
69 | This will build the Docker image and start the `nostrdvm` service as defined in the `docker-compose.yml` file.
70 |
71 | ## License
72 |
73 | This project is licensed under the MIT License.
74 |
--------------------------------------------------------------------------------
/ui/noogle/src/layouts/ThreeColumnLayout.vue:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
42 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tests/sunoai.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import requests
4 |
5 | # replace your vercel domain
6 | base_url = 'http://localhost:3000'
7 |
8 |
9 | def custom_generate_audio(payload):
10 | url = f"{base_url}/api/custom_generate"
11 | response = requests.post(url, json=payload, headers={'Content-Type': 'application/json'})
12 | return response.json()
13 |
14 |
15 | def extend_audio(payload):
16 | url = f"{base_url}/api/extend_audio"
17 | response = requests.post(url, json=payload, headers={'Content-Type': 'application/json'})
18 | return response.json()
19 |
20 | def generate_audio_by_prompt(payload):
21 | url = f"{base_url}/api/generate"
22 | response = requests.post(url, json=payload, headers={'Content-Type': 'application/json'})
23 | return response.json()
24 |
25 |
26 | def get_audio_information(audio_ids):
27 | url = f"{base_url}/api/get?ids={audio_ids}"
28 | response = requests.get(url)
29 | return response.json()
30 |
31 |
32 | def get_quota_information():
33 | url = f"{base_url}/api/get_limit"
34 | response = requests.get(url)
35 | return response.json()
36 |
37 | def get_clip(clip_id):
38 | url = f"{base_url}/api/clip?id={clip_id}"
39 | response = requests.get(url)
40 | return response.json()
41 |
42 | def generate_whole_song(clip_id):
43 | payload = {"clip_id": clip_id}
44 | url = f"{base_url}/api/concat"
45 | response = requests.post(url, json=payload)
46 | return response.json()
47 |
48 |
49 | if __name__ == '__main__':
50 | prompt = "A popular heavy metal song about a purple Ostrich, Nostr, sung by a deep-voiced male singer, slowly and melodiously. The lyrics depict hope for a better future."
51 |
52 |
53 | has_quota = False
54 | quota_info = get_quota_information()
55 | if int(quota_info['credits_left']) >= 20:
56 | has_quota = True
57 | else:
58 | print("No quota left, exiting.")
59 |
60 |
61 | if has_quota:
62 |
63 | data = generate_audio_by_prompt({
64 | "prompt": prompt,
65 | "make_instrumental": False,
66 | "wait_audio": False
67 | })
68 | if len(data) == 0:
69 | print("Couldn't create song")
70 | pass
71 |
72 | ids = f"{data[0]['id']},{data[1]['id']}"
73 | print(f"ids: {ids}")
74 |
75 | for _ in range(60):
76 | data = get_audio_information(ids)
77 | if data[0]["status"] == 'streaming':
78 | print(f"{data[0]['id']} ==> {data[0]['video_url']}")
79 | print(f"{data[1]['id']} ==> {data[1]['video_url']}")
80 | break
81 | # sleep 5s
82 | asyncio.sleep(1.0)
83 |
84 | response1 = get_clip(data[0]['id'])
85 | print(response1['video_url'])
86 | print(response1['prompt'])
87 |
88 | response2 = get_clip(data[1]['id'])
89 | print(response2['video_url'])
90 | print(response2['prompt'])
91 |
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/tests/dalle.py:
--------------------------------------------------------------------------------
1 | import json
2 | import json
3 | import os
4 | from pathlib import Path
5 |
6 | import dotenv
7 | from nostr_sdk import LogLevel, init_logger
8 |
9 | from nostr_dvm.tasks.imagegeneration_openai_dalle import ImageGenerationDALLE
10 | from nostr_dvm.utils.admin_utils import AdminConfig
11 | from nostr_dvm.utils.dvmconfig import build_default_config
12 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
13 | from nostr_dvm.utils.zap_utils import get_price_per_sat
14 |
15 | rebroadcast_NIP89 = False # Announce NIP89 on startup Only do this if you know what you're doing.
16 | rebroadcast_NIP65_Relay_List = False
17 | update_profile = False
18 |
19 | use_logger = True
20 | log_level = LogLevel.ERROR
21 |
22 | if use_logger:
23 | init_logger(log_level)
24 |
25 |
26 | def build_dalle(name, identifier):
27 | dvm_config = build_default_config(identifier)
28 |
29 | dvm_config.NEW_USER_BALANCE = 0
30 | dvm_config.USE_OWN_VENV = False
31 | dvm_config.ENABLE_NUTZAP = True
32 | profit_in_sats = 10
33 | dvm_config.FIX_COST = int(((4.0 / (get_price_per_sat("USD") * 100)) + profit_in_sats))
34 | nip89info = {
35 | "name": name,
36 | "picture": "https://image.nostr.build/22f2267ca9d4ee9d5e8a0c7818a9fa325bbbcdac5573a60a2d163e699bb69923.jpg",
37 | "about": "I create Images bridging OpenAI's DALL·E 3",
38 | "supportsEncryption": True,
39 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
40 | "nip90Params": {
41 | "size": {
42 | "required": False,
43 | "values": ["1024:1024", "1024x1792", "1792x1024"]
44 | }
45 | }
46 | }
47 | nip89config = NIP89Config()
48 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
49 | nip89info["picture"])
50 | nip89config.CONTENT = json.dumps(nip89info)
51 | aconfig = AdminConfig()
52 | aconfig.REBROADCAST_NIP89 = False # We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
53 | aconfig.LUD16 = dvm_config.LN_ADDRESS
54 | aconfig.PRIVKEY = dvm_config.PRIVATE_KEY
55 | aconfig.MELT_ON_STARTUP = False # set this to true to melt cashu tokens to our ln address on startup
56 | return ImageGenerationDALLE(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=aconfig)
57 |
58 |
59 | def playground():
60 | if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "":
61 | dalle = build_dalle("Dall-E 3", "dalle3")
62 | dalle.run(True)
63 |
64 |
65 | if __name__ == '__main__':
66 | env_path = Path('.env')
67 | if not env_path.is_file():
68 | with open('.env', 'w') as f:
69 | print("Writing new .env file")
70 | f.write('')
71 | if env_path.is_file():
72 | print(f'loading environment from {env_path.resolve()}')
73 | dotenv.load_dotenv(env_path, verbose=True, override=True)
74 | else:
75 | raise FileNotFoundError(f'.env file not found at {env_path} ')
76 | playground()
77 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/messages/send_initialize_message.py:
--------------------------------------------------------------------------------
1 | # messages/send_initialize_message.py
2 | import logging
3 | import anyio
4 | from typing import Optional
5 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
6 |
7 | from nostr_dvm.backends.mcp.messages.message_types.initialize_message import (
8 | InitializeMessage,
9 | InitializedNotificationMessage,
10 | InitializeParams,
11 | MCPClientCapabilities,
12 | MCPClientInfo,
13 | InitializeResult,
14 | )
15 |
16 |
17 | async def send_initialize(
18 | read_stream: MemoryObjectReceiveStream,
19 | write_stream: MemoryObjectSendStream,
20 | ) -> Optional[InitializeResult]:
21 | """Send an initialization request to the server and process its response."""
22 |
23 | # Set initialize params
24 | init_params = InitializeParams(
25 | protocolVersion="2024-11-05",
26 | capabilities=MCPClientCapabilities(),
27 | clientInfo=MCPClientInfo(),
28 | )
29 |
30 | # Create the initialize message
31 | init_message = InitializeMessage(init_params)
32 |
33 | # Sending
34 | logging.debug("Sending initialize request")
35 | await write_stream.send(init_message)
36 |
37 | try:
38 | # 5-second timeout for response
39 | with anyio.fail_after(5):
40 | # Get the response from the server
41 | async for response in read_stream:
42 | # If the response is an exception, log it and continue
43 | if isinstance(response, Exception):
44 | logging.error(f"Error from server: {response}")
45 | continue
46 |
47 | # Debug log the received message
48 | logging.debug(f"Received: {response.model_dump()}")
49 |
50 | # Check for error
51 | if response.error:
52 | logging.error(f"Server initialization error: {response.error}")
53 | return None
54 |
55 | # Check for result
56 | if response.result:
57 | try:
58 | # Validate the result
59 | init_result = InitializeResult.model_validate(response.result)
60 | logging.debug("Server initialized successfully")
61 |
62 | # Notify the server of successful initialization
63 | initialized_notify = InitializedNotificationMessage()
64 | await write_stream.send(initialized_notify)
65 |
66 | return init_result
67 | except Exception as e:
68 | logging.error(f"Error processing init result: {e}")
69 | return None
70 |
71 | except TimeoutError:
72 | logging.error("Timeout waiting for server initialization response")
73 | return None
74 | except Exception as e:
75 | logging.error(f"Unexpected error during server initialization: {e}")
76 | raise
77 |
78 | # Timeout
79 | logging.error("Initialization response timeout")
80 | return None
81 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/external_dvm_utils.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | from datetime import timedelta
4 |
5 | from nostr_sdk import PublicKey, Options, Keys, Client, NostrSigner
6 |
7 | from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
8 | from nostr_dvm.utils.dvmconfig import DVMConfig
9 | from nostr_dvm.utils.nip89_utils import NIP89Config, nip89_fetch_events_pubkey
10 | from nostr_dvm.utils.output_utils import PostProcessFunctionType
11 |
12 |
13 | async def build_client(config):
14 | keys = Keys.parse(config.PRIVATE_KEY)
15 | client = Client(NostrSigner.keys(keys))
16 |
17 | for relay in config.RELAY_LIST:
18 | await client.add_relay(relay)
19 | await client.connect()
20 | return client
21 |
22 |
23 | def build_external_dvm(pubkey, task, kind, fix_cost, per_unit_cost, config,
24 | external_post_process=PostProcessFunctionType.NONE):
25 | pubkey = PublicKey.parse(pubkey).to_hex()
26 | dvm_config = DVMConfig()
27 | dvm_config.PUBLIC_KEY = pubkey
28 | dvm_config.FIX_COST = fix_cost
29 | dvm_config.PER_UNIT_COST = per_unit_cost
30 | dvm_config.EXTERNAL_POST_PROCESS_TYPE = external_post_process
31 |
32 | client = asyncio.run(build_client(config))
33 |
34 | nip89content_str = asyncio.run(nip89_fetch_events_pubkey(client, pubkey, kind))
35 | name = "External DVM"
36 | image = "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg"
37 | about = "An External DVM with no info"
38 | nip90params = {}
39 | encryption_supported = False
40 | cashu_accepted = False
41 |
42 | if nip89content_str is not None:
43 | print(nip89content_str)
44 | nip89content = json.loads(nip89content_str)
45 | if nip89content.get("name"):
46 | name = nip89content.get("name")
47 | if nip89content.get("picture"):
48 | image = nip89content.get("picture")
49 | elif nip89content.get("image"):
50 | image = nip89content.get("image")
51 | if nip89content.get("about"):
52 | about = nip89content.get("about")
53 | if nip89content.get("nip90Params"):
54 | nip90params = nip89content["nip90Params"]
55 | if nip89content.get("supportsEncryption"):
56 | encryption_supported = nip89content["supportsEncryption"]
57 | if nip89content.get("acceptsNutZaps"):
58 | cashu_accepted = nip89content["acceptsNutZaps"]
59 | else:
60 | print("No NIP89 set for " + name)
61 | nip89info = {
62 | "name": name,
63 | "picture": image,
64 | "about": about,
65 | "supportsEncryption": encryption_supported,
66 | "acceptsNutZaps": cashu_accepted,
67 | "nip90Params": nip90params
68 | }
69 | nip89config = NIP89Config()
70 | nip89config.KIND = kind
71 | nip89config.CONTENT = json.dumps(nip89info)
72 |
73 | interface = DVMTaskInterface(name=name, dvm_config=dvm_config, nip89config=nip89config, task=task)
74 | interface.SUPPORTS_ENCRYPTION = encryption_supported
75 | interface.ACCEPTS_CASHU = cashu_accepted
76 |
77 | return interface
78 |
--------------------------------------------------------------------------------
/nostr_dvm/utils/blossom_utils.py:
--------------------------------------------------------------------------------
1 | import base64
2 | import hashlib
3 | import json
4 | import mimetypes
5 | import os
6 |
7 | import aiohttp
8 | from nostr_sdk import Keys, Tag, EventBuilder, Kind, Timestamp
9 |
10 |
11 | def sha256(file):
12 | h = hashlib.sha256()
13 | h.update(file)
14 | return h.digest().hex()
15 |
16 | async def check(url, sha256, key):
17 | async with aiohttp.ClientSession(url) as sess:
18 | b64_auth = await generate_blossom_header(key, sha256, "get")
19 | headers = {
20 | "Authorization": b64_auth
21 | }
22 |
23 | async with sess.get(f"{sha256}", headers=headers) as resp:
24 | return resp.status == 200
25 |
26 |
27 | async def generate_blossom_header(key, hash, method):
28 |
29 | keys = Keys.parse(key)
30 | t_tag = Tag.parse(["t", method])
31 | x_tag = Tag.parse(["x", hash])
32 | expiration_tag = Tag.parse(["expiration", str(Timestamp.now().as_secs() + 300)])
33 | tags = [x_tag, t_tag, expiration_tag]
34 |
35 | eb = EventBuilder(Kind(24242), "Uploading blob with SHA-256 hash").tags(tags)
36 | event = eb.sign_with_keys(keys)
37 |
38 | encoded_nip98_event = base64.b64encode(event.as_json().encode('utf-8')).decode('utf-8')
39 | return "Nostr " + encoded_nip98_event
40 |
41 |
42 | def upload_image_to_blossom(file_path, url):
43 | """Uploads an image to a Blossom server via a PUT request."""
44 |
45 |
46 | async def upload_blossom(filepath, pkey, url):
47 |
48 | with open(filepath, "rb") as f:
49 | mtime = int(os.fstat(f.fileno()).st_mtime)
50 |
51 | file = f.read()
52 | hash = sha256(file)
53 | b64_auth = await generate_blossom_header(pkey, hash, "upload")
54 |
55 | file_stats = os.stat(filepath)
56 | sizeinmb = file_stats.st_size / (1024 * 1024)
57 | print("Filesize of Uploaded media: " + str(sizeinmb) + " Mb.")
58 |
59 | if await check(url, hash, pkey):
60 | print(f"Blob {hash} is up to date for [{filepath}].")
61 | else:
62 | print(f"Storing file [{hash}] on blossom.")
63 |
64 |
65 | content_type = mimetypes.guess_type(filepath)[0] or 'application/octet-stream'
66 |
67 | # Upload object to blossom.
68 | async with aiohttp.ClientSession() as sess:
69 | async with sess.put(
70 | url = url.rstrip('/') + '/upload',
71 | data=file,
72 | headers={
73 | "Authorization": b64_auth,
74 | "Content-Type": content_type
75 | }) as resp:
76 |
77 |
78 | if resp.status == 413:
79 | print("Can't upload on Blossom Server")
80 | return "Can't upload on Blossom Server"
81 |
82 | elif resp.status != 200:
83 | txt = await resp.text()
84 | print(txt)
85 | else:
86 | res = await resp.text()
87 | resjson = json.loads(res)
88 | print(resjson["url"])
89 | return resjson["url"]
90 |
91 |
--------------------------------------------------------------------------------
/tests/sd35_api.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from pathlib import Path
4 |
5 | import dotenv
6 | from nostr_sdk import LogLevel, init_logger
7 |
8 | from nostr_dvm.tasks.imagegeneration_sd35_api import ImageGenerationSD35
9 | from nostr_dvm.utils.admin_utils import AdminConfig
10 | from nostr_dvm.utils.dvmconfig import build_default_config
11 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
12 | from nostr_dvm.utils.zap_utils import get_price_per_sat
13 |
14 | rebroadcast_NIP89 = False # Announce NIP89 on startup Only do this if you know what you're doing.
15 | rebroadcast_NIP65_Relay_List = False
16 | update_profile = False
17 |
18 | use_logger = True
19 | log_level = LogLevel.ERROR
20 |
21 | if use_logger:
22 | init_logger(log_level)
23 |
24 |
25 | def build_sd35(name, identifier):
26 | dvm_config = build_default_config(identifier)
27 |
28 | dvm_config.NEW_USER_BALANCE = 0
29 | dvm_config.USE_OWN_VENV = False
30 | dvm_config.ENABLE_NUTZAP = True
31 | profit_in_sats = 10
32 | dvm_config.FIX_COST = int(((4.0 / (get_price_per_sat("USD") * 100)) + profit_in_sats))
33 | nip89info = {
34 | "name": name,
35 | "picture": "https://i.nostr.build/NOXcCIPmOZrDTK35.jpg",
36 | "about": "I draw images using Stable diffusion ultra",
37 | "supportsEncryption": True,
38 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
39 | "nip90Params": {
40 | "negative_prompt": {
41 | "required": False,
42 | "values": []
43 | },
44 | "ratio": {
45 | "required": False,
46 | "values": ["1:1", "5:4", "3:2", "16:9","21:9", "9:21", "9:16", "2:3", "4:5"]
47 | }
48 | }
49 | }
50 | nip89config = NIP89Config()
51 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
52 | nip89info["picture"])
53 | nip89config.CONTENT = json.dumps(nip89info)
54 | aconfig = AdminConfig()
55 | aconfig.REBROADCAST_NIP89 = False # We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
56 | aconfig.LUD16 = dvm_config.LN_ADDRESS
57 | aconfig.PRIVKEY = dvm_config.PRIVATE_KEY
58 | aconfig.MELT_ON_STARTUP = False # set this to true to melt cashu tokens to our ln address on startup
59 |
60 |
61 | options= {"API_KEY": os.getenv("STABILITY_KEY")}
62 |
63 |
64 | return ImageGenerationSD35(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=aconfig, options=options)
65 |
66 |
67 | def playground():
68 | if os.getenv("STABILITY_KEY") is not None and os.getenv("STABILITY_KEY") != "":
69 | sd35 = build_sd35("Stable Diffusion 3.5", "sd35")
70 | sd35.run(True)
71 |
72 |
73 | if __name__ == '__main__':
74 | env_path = Path('.env')
75 | if not env_path.is_file():
76 | with open('.env', 'w') as f:
77 | print("Writing new .env file")
78 | f.write('')
79 | if env_path.is_file():
80 | print(f'loading environment from {env_path.resolve()}')
81 | dotenv.load_dotenv(env_path, verbose=True, override=True)
82 | else:
83 | raise FileNotFoundError(f'.env file not found at {env_path} ')
84 | playground()
85 |
--------------------------------------------------------------------------------
/tests/fix_lnbits_lnaddress.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | import dotenv
4 | from nostr_dvm.utils.zap_utils import make_ln_address_nostdress, create_lnbits_wallet, add_key_to_env_file
5 |
6 |
7 | def playground():
8 | # change the idenftier to the dvm you want to update.
9 | # This will create a new lnbits wallet and update the lnaddress to it
10 | # This is for existing dvms
11 | identifier = "discovery_content_gm"
12 | check_and_set_ln_bits_keys_force_new(identifier, os.getenv("DVM_PRIVATE_KEY_BOT_" + identifier.upper()))
13 |
14 |
15 |
16 | def check_and_set_ln_bits_keys(identifier, npub):
17 | if not os.getenv("LNBITS_INVOICE_KEY_" + identifier.upper()):
18 | invoicekey, adminkey, walletid, success = create_lnbits_wallet(identifier)
19 |
20 | add_key_to_env_file("LNBITS_INVOICE_KEY_" + identifier.upper(), invoicekey)
21 | add_key_to_env_file("LNBITS_ADMIN_KEY_" + identifier.upper(), adminkey)
22 | add_key_to_env_file("LNBITS_WALLET_ID_" + identifier.upper(), walletid)
23 |
24 | lnaddress = ""
25 | pin = ""
26 | if os.getenv("NOSTDRESS_DOMAIN") and success != "failed":
27 | print(os.getenv("NOSTDRESS_DOMAIN"))
28 | lnaddress, pin = make_ln_address_nostdress(identifier, npub, " ", os.getenv("NOSTDRESS_DOMAIN"), identifier)
29 | add_key_to_env_file("LNADDRESS_" + identifier.upper(), lnaddress)
30 | add_key_to_env_file("LNADDRESS_PIN_" + identifier.upper(), pin)
31 |
32 | return invoicekey, adminkey, walletid, lnaddress
33 | else:
34 | return (os.getenv("LNBITS_INVOICE_KEY_" + identifier.upper()),
35 | os.getenv("LNBITS_ADMIN_KEY_" + identifier.upper()),
36 | os.getenv("LNBITS_WALLET_ID_" + identifier.upper()),
37 | os.getenv("LNADDRESS_" + identifier.upper()))
38 |
39 |
40 |
41 | def check_and_set_ln_bits_keys_force_new(identifier, npub):
42 | #FORCE UPDATE THE CONFIG; INSTALL NEW WALLET
43 | invoicekey, adminkey, walletid, success = create_lnbits_wallet(identifier)
44 |
45 | add_key_to_env_file("LNBITS_INVOICE_KEY_" + identifier.upper(), invoicekey)
46 | add_key_to_env_file("LNBITS_ADMIN_KEY_" + identifier.upper(), adminkey)
47 | add_key_to_env_file("LNBITS_WALLET_ID_" + identifier.upper(), walletid)
48 |
49 | lnaddress = ""
50 | if os.getenv("NOSTDRESS_DOMAIN") and success != "failed":
51 | print(os.getenv("NOSTDRESS_DOMAIN"))
52 | lnaddress, pin = make_ln_address_nostdress(identifier, npub, os.getenv("LNADDRESS_PIN_" + identifier.upper()), os.getenv("NOSTDRESS_DOMAIN"), identifier)
53 | add_key_to_env_file("LNADDRESS_" + identifier.upper(), lnaddress)
54 | add_key_to_env_file("LNADDRESS_PIN_" + identifier.upper(), pin)
55 |
56 | return invoicekey, adminkey, walletid, lnaddress
57 |
58 |
59 |
60 | if __name__ == '__main__':
61 | env_path = Path('.env')
62 | if not env_path.is_file():
63 | with open('.env', 'w') as f:
64 | print("Writing new .env file")
65 | f.write('')
66 | if env_path.is_file():
67 | print(f'loading environment from {env_path.resolve()}')
68 | dotenv.load_dotenv(env_path, verbose=True, override=True)
69 | else:
70 | raise FileNotFoundError(f'.env file not found at {env_path} ')
71 | playground()
72 |
--------------------------------------------------------------------------------
/tutorials/10_delete_nip89.py:
--------------------------------------------------------------------------------
1 |
2 | # Welcome Back. So you were playing around with a DVM and announced it to the world, but now you rather would like
3 | # to remove it, because you don't want to run it anymore or you did it by accident. In this tutorial we look at how to
4 | # remove NIP89 announcements, so our DVM doesn't appear in clients anymore.
5 | # Check the main function for more instructions.
6 |
7 |
8 |
9 | import asyncio
10 | from datetime import timedelta
11 |
12 | from nostr_sdk import Keys, Client, NostrSigner, Filter
13 |
14 | from nostr_dvm.utils.definitions import EventDefinitions
15 | from nostr_dvm.utils.dvmconfig import DVMConfig
16 | from nostr_dvm.utils.nip89_utils import fetch_nip89_parameters_for_deletion
17 |
18 | # Method to delete the NIP89, don't worry, you don't have to touch this, take a look in the main function.
19 | async def delete_nip_89(private_key, relay_list, pow=True):
20 | keys = Keys.parse(private_key)
21 | dvm_config = DVMConfig()
22 | dvm_config.RELAY_LIST = relay_list
23 | client = Client(NostrSigner.keys(keys))
24 | for relay in dvm_config.RELAY_LIST:
25 | await client.add_relay(relay)
26 | await client.connect()
27 | filter = Filter().kind(EventDefinitions.KIND_ANNOUNCEMENT).author(keys.public_key())
28 | events = await client.fetch_events(filter, timedelta(seconds=5))
29 |
30 | if len(events.to_vec()) == 0:
31 | print("Couldn't find note on relays. Seems they are gone.")
32 | return
33 |
34 | for event in events.to_vec():
35 | print(event)
36 | await fetch_nip89_parameters_for_deletion(keys, event.id().to_hex(), client, dvm_config, pow)
37 |
38 |
39 |
40 | if __name__ == '__main__':
41 |
42 | # What do you need to delete an event?
43 |
44 | # The first thing you need is the private key of the DVM the announcement was made for.
45 | # NostrDVM stores all your keys in the .env file. Open the .env file and search for
46 | # DVM_PRIVATE_KEY_{IDENTIFIER_OF_YOUR_DVM}. Enter it below in the field private_key:
47 |
48 | private_key = "83274234123..."
49 |
50 | # You can either use Proof of Work to send the delete event or not. Some relays require you to use POW in order to write.
51 | # Sending a POW event might take up to a couple of minutes, so if you decide to use it, have some patience in the progress.
52 | # If you know the relays you published your announcements to do not require POW, you can also set it to False which speeds up the progress.
53 | pow = False
54 |
55 | # And finally set the relay list you want to send the deletion request to. Ideally, you use the same relays that you use
56 | # in your DVM's config. Maybe announcements also got propagated to other relays, so you might need to play around a bit until it's gone everywhere.
57 | RELAY_LIST = ["wss://relay.primal.net",
58 | "wss://relay.damus.io",
59 | "wss://relay.nostrplebs.com",
60 | "wss://promenade.fiatjaf.com",
61 | "wss://nostr.mom",
62 | "wss://nostr.oxtr.dev",
63 | "wss://relay.nostr.band"
64 | ]
65 |
66 | # That's it. Once you entered the info, run the script and if your private key matches the ID and the event can be found it should be deleted.
67 | # Otherwise you'll get a warning
68 | asyncio.run(delete_nip_89(private_key, RELAY_LIST, pow))
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/stable_diffusion/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | import time
4 | from typing import Tuple
5 |
6 | import mlx.core as mx
7 |
8 | from .model_io import (
9 | _DEFAULT_MODEL,
10 | load_autoencoder,
11 | load_diffusion_config,
12 | load_text_encoder,
13 | load_tokenizer,
14 | load_unet,
15 | )
16 | from .sampler import SimpleEulerSampler
17 |
18 |
19 | def _repeat(x, n, axis):
20 | # Make the expanded shape
21 | s = x.shape
22 | s.insert(axis + 1, n)
23 |
24 | # Expand
25 | x = mx.broadcast_to(mx.expand_dims(x, axis + 1), s)
26 |
27 | # Make the flattened shape
28 | s.pop(axis + 1)
29 | s[axis] *= n
30 |
31 | return x.reshape(s)
32 |
33 |
34 | class StableDiffusion:
35 | def __init__(self, model: str = _DEFAULT_MODEL, float16: bool = False):
36 | self.dtype = mx.float16 if float16 else mx.float32
37 | self.diffusion_config = load_diffusion_config(model)
38 | self.unet = load_unet(model, float16)
39 | self.text_encoder = load_text_encoder(model, float16)
40 | self.autoencoder = load_autoencoder(model, float16)
41 | self.sampler = SimpleEulerSampler(self.diffusion_config)
42 | self.tokenizer = load_tokenizer(model)
43 |
44 | def generate_latents(
45 | self,
46 | text: str,
47 | n_images: int = 1,
48 | num_steps: int = 50,
49 | cfg_weight: float = 7.5,
50 | negative_text: str = "",
51 | latent_size: Tuple[int] = (64, 64),
52 | seed=None,
53 | ):
54 | # Set the PRNG state
55 | seed = seed or int(time.time())
56 | mx.random.seed(seed)
57 |
58 | # Tokenize the text
59 | tokens = [self.tokenizer.tokenize(text)]
60 | if cfg_weight > 1:
61 | tokens += [self.tokenizer.tokenize(negative_text)]
62 | lengths = [len(t) for t in tokens]
63 | N = max(lengths)
64 | tokens = [t + [0] * (N - len(t)) for t in tokens]
65 | tokens = mx.array(tokens)
66 |
67 | # Compute the features
68 | conditioning = self.text_encoder(tokens)
69 |
70 | # Repeat the conditioning for each of the generated images
71 | if n_images > 1:
72 | conditioning = _repeat(conditioning, n_images, axis=0)
73 |
74 | # Create the latent variables
75 | x_T = self.sampler.sample_prior(
76 | (n_images, *latent_size, self.autoencoder.latent_channels), dtype=self.dtype
77 | )
78 |
79 | # Perform the denoising loop
80 | x_t = x_T
81 | for t, t_prev in self.sampler.timesteps(num_steps, dtype=self.dtype):
82 | x_t_unet = mx.concatenate([x_t] * 2, axis=0) if cfg_weight > 1 else x_t
83 | t_unet = mx.broadcast_to(t, [len(x_t_unet)])
84 | eps_pred = self.unet(x_t_unet, t_unet, encoder_x=conditioning)
85 |
86 | if cfg_weight > 1:
87 | eps_text, eps_neg = eps_pred.split(2)
88 | eps_pred = eps_neg + cfg_weight * (eps_text - eps_neg)
89 |
90 | x_t_prev = self.sampler.step(eps_pred, x_t, t, t_prev)
91 | x_t = x_t_prev
92 | yield x_t
93 |
94 | def decode(self, x_t):
95 | x = self.autoencoder.decode(x_t / self.autoencoder.scaling_factor)
96 | x = mx.minimum(1, mx.maximum(0, x / 2 + 0.5))
97 | return x
98 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mcp/transport/stdio/stdio_server_shutdown.py:
--------------------------------------------------------------------------------
1 | # transport/stdio/stdio_server_shutdown.py
2 | import logging
3 | from typing import Optional
4 |
5 | import anyio
6 | from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
7 |
8 |
9 | async def shutdown_stdio_server(
10 | read_stream: Optional[MemoryObjectReceiveStream],
11 | write_stream: Optional[MemoryObjectSendStream],
12 | process: anyio.abc.Process,
13 | timeout: float = 5.0,
14 | ) -> None:
15 | """
16 | Gracefully shutdown a stdio-based server.
17 |
18 | This function performs the following steps:
19 | 1. Closes the stdin stream of the process.
20 | 2. Waits for the process to terminate gracefully.
21 | 3. Sends SIGTERM if the process does not terminate within the timeout.
22 | 4. Sends SIGKILL if the process does not terminate after SIGTERM.
23 | 5. Logs each step and ensures cleanup in case of errors.
24 |
25 | Args:
26 | read_stream (Optional[MemoryObjectReceiveStream]): Stream to receive responses.
27 | write_stream (Optional[MemoryObjectSendStream]): Stream to send requests.
28 | process (anyio.abc.Process): The server process.
29 | timeout (float): Time to wait for graceful shutdown and SIGTERM before escalation.
30 | """
31 | logging.info("Initiating stdio server shutdown")
32 |
33 | try:
34 | # ensure we have a process
35 | if process:
36 | # Step 1: Close the write stream (stdin for the server)
37 | if process.stdin:
38 | # close
39 | await process.stdin.aclose()
40 | logging.info("Closed stdin stream")
41 |
42 | # Step 2: Wait for the process to terminate gracefully
43 | with anyio.fail_after(timeout):
44 | await process.wait()
45 | logging.info("Process exited normally")
46 | return
47 |
48 | except TimeoutError:
49 | logging.warning(
50 | f"Server did not exit within {timeout} seconds, sending SIGTERM"
51 | )
52 |
53 | # ensure we have a process
54 | if process:
55 | # terminate
56 | process.terminate()
57 |
58 | try:
59 | # Step 3: Wait for the process to terminate after SIGTERM
60 | with anyio.fail_after(timeout):
61 | await process.wait()
62 | logging.info("Process exited after SIGTERM")
63 | return
64 | except TimeoutError:
65 | logging.warning("Server did not respond to SIGTERM, sending SIGKILL")
66 |
67 | # ensure we have a process
68 | if process:
69 | # kill
70 | process.kill()
71 |
72 | # Step 4: Wait for the process to terminate after SIGKILL
73 | await process.wait()
74 | logging.info("Process exited after SIGKILL")
75 |
76 | except Exception as e:
77 | # Catch unexpected errors during shutdown
78 | logging.error(f"Unexpected error during stdio server shutdown: {e}")
79 |
80 | if process:
81 | # kill
82 | process.kill()
83 |
84 | # wait
85 | await process.wait()
86 | logging.info("Process forcibly terminated")
87 | finally:
88 | # complete
89 | logging.info("Stdio server shutdown complete")
90 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/stablediffusionxl-img2img.trainer:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
6 |
7 |
8 |
9 |
10 |
11 |
13 |
15 |
17 |
19 |
21 |
23 |
25 |
27 |
29 |
31 |
33 |
35 |
37 |
39 |
40 |
42 |
43 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablevideodiffusion/stablevideodiffusion.py:
--------------------------------------------------------------------------------
1 | import gc
2 | import os
3 | import sys
4 |
5 | sys.path.insert(0, os.path.dirname(__file__))
6 |
7 | from nova_utils.interfaces.server_module import Processor
8 | import torch
9 | from diffusers import StableVideoDiffusionPipeline
10 | import numpy as np
11 | from PIL import Image as PILImage
12 |
13 | # Setting defaults
14 | _default_options = {"model": "stabilityai/stable-video-diffusion-img2vid-xt", "fps": "7", "seed": ""}
15 |
16 |
17 | # TODO: add log infos,
18 | class StableVideoDiffusion(Processor):
19 | def __init__(self, *args, **kwargs):
20 | super().__init__(*args, **kwargs)
21 | self.options = _default_options | self.options
22 | self.device = None
23 | self.ds_iter = None
24 | self.current_session = None
25 |
26 | # IO shortcuts
27 | self.input = [x for x in self.model_io if x.io_type == "input"]
28 | self.output = [x for x in self.model_io if x.io_type == "output"]
29 | self.input = self.input[0]
30 | self.output = self.output[0]
31 |
32 | def process_data(self, ds_iter) -> dict:
33 |
34 | self.device = "cuda" if torch.cuda.is_available() else "cpu"
35 | self.ds_iter = ds_iter
36 | current_session_name = self.ds_iter.session_names[0]
37 | self.current_session = self.ds_iter.sessions[current_session_name]['manager']
38 | input_image = self.current_session.input_data['input_image'].data
39 |
40 | try:
41 | pipe = StableVideoDiffusionPipeline.from_pretrained(
42 | self.options["model"], torch_dtype=torch.float16, variant="fp16"
43 | )
44 | pipe.enable_model_cpu_offload()
45 |
46 | # Load the conditioning image
47 | image = PILImage.fromarray(input_image)
48 | image = image.resize((1024, 576))
49 |
50 | if self.options["seed"] != "" and self.options["seed"] != " ":
51 | generator = torch.manual_seed(int(self.options["seed"]))
52 | frames = pipe(image, decode_chunk_size=8, generator=generator).frames[0]
53 | else:
54 | frames = pipe(image, decode_chunk_size=8).frames[0]
55 |
56 | if torch.cuda.is_available():
57 | del pipe
58 | gc.collect()
59 | torch.cuda.empty_cache()
60 | torch.cuda.ipc_collect()
61 |
62 | np_video = np.stack([np.asarray(x) for x in frames])
63 | return np_video
64 |
65 |
66 | except Exception as e:
67 | print(e)
68 | sys.stdout.flush()
69 | return "Error"
70 |
71 | def calculate_aspect(self, width: int, height: int):
72 | def gcd(a, b):
73 | """The GCD (greatest common divisor) is the highest number that evenly divides both width and height."""
74 | return a if b == 0 else gcd(b, a % b)
75 |
76 | r = gcd(width, height)
77 | x = int(width / r)
78 | y = int(height / r)
79 |
80 | return x, y
81 |
82 | def to_output(self, data: list):
83 | video = self.current_session.output_data_templates['output_video']
84 | video.data = data
85 | video.meta_data.sample_rate = int(self.options['fps'])
86 | video.meta_data.media_type = 'video'
87 |
88 | return self.current_session.output_data_templates
89 |
--------------------------------------------------------------------------------
/nostr_dvm/backends/mlx/modules/stable_diffusion/tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright © 2023 Apple Inc.
2 |
3 | import regex
4 |
5 |
6 | class Tokenizer:
7 | """A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ ."""
8 |
9 | def __init__(self, bpe_ranks, vocab):
10 | self.bpe_ranks = bpe_ranks
11 | self.vocab = vocab
12 | self.pat = regex.compile(
13 | r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
14 | regex.IGNORECASE,
15 | )
16 |
17 | self._cache = {self.bos: self.bos, self.eos: self.eos}
18 |
19 | @property
20 | def bos(self):
21 | return "<|startoftext|>"
22 |
23 | @property
24 | def bos_token(self):
25 | return self.vocab[self.bos]
26 |
27 | @property
28 | def eos(self):
29 | return "<|endoftext|>"
30 |
31 | @property
32 | def eos_token(self):
33 | return self.vocab[self.eos]
34 |
35 | def bpe(self, text):
36 | if text in self._cache:
37 | return self._cache[text]
38 |
39 | unigrams = list(text[:-1]) + [text[-1] + ""]
40 | unique_bigrams = set(zip(unigrams, unigrams[1:]))
41 |
42 | if not unique_bigrams:
43 | return unigrams
44 |
45 | # In every iteration try to merge the two most likely bigrams. If none
46 | # was merged we are done.
47 | #
48 | # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py
49 | while unique_bigrams:
50 | bigram = min(
51 | unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))
52 | )
53 | if bigram not in self.bpe_ranks:
54 | break
55 |
56 | new_unigrams = []
57 | skip = False
58 | for a, b in zip(unigrams, unigrams[1:]):
59 | if skip:
60 | skip = False
61 | continue
62 |
63 | if (a, b) == bigram:
64 | new_unigrams.append(a + b)
65 | skip = True
66 |
67 | else:
68 | new_unigrams.append(a)
69 |
70 | if not skip:
71 | new_unigrams.append(b)
72 |
73 | unigrams = new_unigrams
74 | unique_bigrams = set(zip(unigrams, unigrams[1:]))
75 |
76 | self._cache[text] = unigrams
77 |
78 | return unigrams
79 |
80 | def tokenize(self, text, prepend_bos=True, append_eos=True):
81 | if isinstance(text, list):
82 | return [self.tokenize(t, prepend_bos, append_eos) for t in text]
83 |
84 | # Lower case cleanup and split according to self.pat. Hugging Face does
85 | # a much more thorough job here but this should suffice for 95% of
86 | # cases.
87 | clean_text = regex.sub(r"\s+", " ", text.lower())
88 | tokens = regex.findall(self.pat, clean_text)
89 |
90 | # Split the tokens according to the byte-pair merge file
91 | bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]
92 |
93 | # Map to token ids and return
94 | tokens = [self.vocab[t] for t in bpe_tokens]
95 | if prepend_bos:
96 | tokens = [self.bos_token] + tokens
97 | if append_eos:
98 | tokens.append(self.eos_token)
99 |
100 | return tokens
101 |
--------------------------------------------------------------------------------
/tests/mcp/dvm/mcp_test.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | from pathlib import Path
4 |
5 | import dotenv
6 |
7 | from nostr_dvm.framework import DVMFramework
8 | from tests.mcp.dvm.mcpbridge import MCPBridge
9 | from nostr_dvm.utils.admin_utils import AdminConfig
10 | from nostr_dvm.utils.dvmconfig import build_default_config
11 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
12 | from nostr_sdk import Tag
13 |
14 |
15 | async def get_tools(config_path, server_names):
16 | tools = await MCPBridge.list_tools(config_path, server_names)
17 | return tools
18 |
19 |
20 |
21 |
22 | def playground(announce=False):
23 |
24 | framework = DVMFramework()
25 |
26 | admin_config = AdminConfig()
27 | admin_config.REBROADCAST_NIP89 = announce
28 | admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
29 | admin_config.UPDATE_PROFILE = announce
30 |
31 | name = "MCP Test DVM"
32 | identifier = "mcp_test" # Chose a unique identifier in order to get a lnaddress
33 | dvm_config = build_default_config(identifier)
34 | dvm_config.DELETE_ANNOUNCEMENT_ON_SHUTDOWN = True
35 |
36 | # MCP CONFIG
37 | config_path = str(Path.absolute(Path(__file__).parent / "mcp_server_config.json"))
38 | server_names = ["mcp-crypto-price", "nostr-notes"]
39 |
40 |
41 | tools = asyncio.run(get_tools(config_path, server_names))
42 | # for now get the first connected server only
43 | #print(tools)
44 | if len (tools) == 0:
45 | print("Couldnt load tools, shutting down.")
46 | exit()
47 |
48 | final_tools =[]
49 | for tool in tools:
50 | j = json.loads(json.dumps(tool[1]))["tools"]
51 | for tool in j:
52 | final_tools.append(tool)
53 | print(final_tools)
54 |
55 |
56 |
57 | # Add NIP89
58 | nip89info = {
59 | "name": name,
60 | "picture": "https://i.nostr.build/er2Vu8DccjfanFLo.png",
61 | "about": "I'm a MCP Test DVM'",
62 | "supportsEncryption": True,
63 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
64 | "nip90Params": {
65 | },
66 | "tools": final_tools
67 |
68 | }
69 |
70 | nip89config = NIP89Config()
71 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
72 | nip89config.CONTENT = json.dumps(nip89info)
73 |
74 | capabilities_tag = Tag.parse(["capabilities", "mcp-1.0"])
75 | t1_tag = Tag.parse(["t","mcp"])
76 | t2_tag = Tag.parse(["t", "bitcoin price"])
77 | t3_tag = Tag.parse(["t", "bitcoin analysis"])
78 | nip89config.EXTRA_TAGS =[capabilities_tag, t1_tag, t2_tag, t3_tag]
79 |
80 |
81 | options = {
82 | "config_path": config_path,
83 | "server_names": server_names
84 | }
85 |
86 |
87 |
88 | dvm = MCPBridge(name=name, dvm_config=dvm_config, nip89config=nip89config,
89 | admin_config=admin_config, options=options)
90 |
91 |
92 | framework.add(dvm)
93 |
94 | framework.run()
95 |
96 |
97 | if __name__ == '__main__':
98 | env_path = Path('.env')
99 | if not env_path.is_file():
100 | with open('.env', 'w') as f:
101 | print("Writing new .env file")
102 | f.write('')
103 | if env_path.is_file():
104 | print(f'loading environment from {env_path.resolve()}')
105 | dotenv.load_dotenv(env_path, verbose=True, override=True)
106 | else:
107 | raise FileNotFoundError(f'.env file not found at {env_path} ')
108 |
109 | playground(announce=True)
110 |
--------------------------------------------------------------------------------
/tests/simplebot.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from nostr_sdk import Client, NostrSigner, Keys, Event, UnsignedEvent, Filter, \
4 | HandleNotification, Timestamp, nip04_decrypt, UnwrappedGift, init_logger, LogLevel, Kind, KindEnum
5 |
6 | from nostr_dvm.utils.nostr_utils import send_nip04_dm
7 |
8 |
9 | async def test():
10 | init_logger(LogLevel.DEBUG)
11 |
12 | # sk = SecretKey.from_bech32("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85")
13 | # keys = Keys(sk)
14 | # OR
15 | keys = Keys.parse("nsec1ufnus6pju578ste3v90xd5m2decpuzpql2295m3sknqcjzyys9ls0qlc85")
16 |
17 | sk = keys.secret_key()
18 | pk = keys.public_key()
19 | print(f"Bot public key: {pk.to_bech32()}")
20 |
21 | client = Client(NostrSigner.keys(keys))
22 |
23 | await client.add_relay("wss://relay.damus.io")
24 | await client.add_relay("wss://nostr.mom")
25 | await client.add_relay("wss://nostr.oxtr.dev")
26 | await client.connect()
27 |
28 | now = Timestamp.now()
29 |
30 | nip04_filter = Filter().pubkey(pk).kind(Kind(KindEnum.ENCRYPTED_DIRECT_MESSAGE())).since(now)
31 | nip59_filter = Filter().pubkey(pk).kind(Kind((KindEnum.GIFT_WRAP()))).limit(0)
32 | await client.subscribe(nip04_filter)
33 | await client.subscribe(nip59_filter)
34 |
35 | class NotificationHandler(HandleNotification):
36 | async def handle(self, relay_url, subscription_id, event: Event):
37 | print(f"Received new event from {relay_url}: {event.as_json()}")
38 | if event.kind().as_u16() == KindEnum.ENCRYPTED_DIRECT_MESSAGE():
39 | print("Decrypting NIP04 event")
40 | try:
41 | msg = nip04_decrypt(sk, event.author(), event.content())
42 | print(f"Received new msg: {msg}")
43 | await send_nip04_dm(client, msg, event.author(), sk)
44 |
45 |
46 | except Exception as e:
47 | print(f"Error during content NIP04 decryption: {e}")
48 | elif event.kind().as_enum() == KindEnum.GIFT_WRAP():
49 | print("Decrypting NIP59 event")
50 | try:
51 | # Extract rumor
52 | unwrapped_gift = UnwrappedGift.from_gift_wrap(NostrSigner(keys), event)
53 | sender = unwrapped_gift.sender()
54 | rumor: UnsignedEvent = unwrapped_gift.rumor()
55 |
56 | # Check timestamp of rumor
57 | if rumor.created_at().as_secs() >= now.as_secs():
58 | if rumor.kind().as_enum() == KindEnum.PRIVATE_DIRECT_MESSAGE():
59 | msg = rumor.content()
60 | print(f"Received new msg [sealed]: {msg}")
61 | await client.send_private_msg(sender, f"Echo: {msg}", None)
62 | else:
63 | print(f"{rumor.as_json()}")
64 | except Exception as e:
65 | print(f"Error during content NIP59 decryption: {e}")
66 |
67 | async def handle_msg(self, relay_url, msg):
68 | var = None
69 |
70 | #await client.handle_notifications(NotificationHandler())
71 |
72 | # To handle notifications and continue with code execution, use:
73 | asyncio.create_task(client.handle_notifications(NotificationHandler()))
74 | while True:
75 | print("lol.")
76 | await asyncio.sleep(5)
77 |
78 |
79 | async def async_input():
80 | while True:
81 | print("lol")
82 | await asyncio.sleep(5)
83 |
84 |
85 | #async def main():
86 | # await asyncio.gather(asyncio.to_thread(async_input), test())
87 |
88 | if __name__ == "__main__":
89 | asyncio.run(test())
--------------------------------------------------------------------------------
/nostr_dvm/backends/discover/modules/stablediffusionxl/lora.py:
--------------------------------------------------------------------------------
1 | def build_lora_xl(lora, prompt, lora_weight):
2 | existing_lora = False
3 | if lora == "3drenderstyle":
4 | if lora_weight == "":
5 | lora_weight = "1"
6 | prompt = "3d style, 3d render, " + prompt + " "
7 | existing_lora = True
8 |
9 | if lora == "psychedelicnoir":
10 | if lora_weight == "":
11 | lora_weight = "1"
12 | prompt = prompt + " >"
13 | existing_lora = True
14 |
15 | if lora == "wojak":
16 | if lora_weight == "":
17 | lora_weight = "1"
18 | prompt = ", " + prompt + ", wojak"
19 | existing_lora = True
20 |
21 | if lora == "dreamarts":
22 | if lora_weight == "":
23 | lora_weight = "1"
24 | prompt = ", " + prompt
25 | existing_lora = True
26 |
27 | if lora == "voxel":
28 | if lora_weight == "":
29 | lora_weight = "1"
30 | prompt = "voxel style, " + prompt + " "
31 | existing_lora = True
32 |
33 | if lora == "kru3ger":
34 | if lora_weight == "":
35 | lora_weight = "1"
36 | prompt = "kru3ger_style, " + prompt + ""
37 | existing_lora = True
38 |
39 | if lora == "inkpunk":
40 | if lora_weight == "":
41 | lora_weight = "0.5"
42 | prompt = "inkpunk style, " + prompt + " "
43 | existing_lora = True
44 |
45 | if lora == "inkscenery":
46 | if lora_weight == "":
47 | lora_weight = "1"
48 | prompt = " ink scenery, " + prompt + " "
49 | existing_lora = True
50 |
51 | if lora == "inkpainting":
52 | if lora_weight == "":
53 | lora_weight = "0.7"
54 | prompt = "painting style, " + prompt + " ,"
55 | existing_lora = True
56 |
57 | if lora == "timburton":
58 | if lora_weight == "":
59 | lora_weight = "1.27"
60 | pencil_weight = "1.15"
61 | prompt = prompt + " (hand drawn with pencil" + pencil_weight + "), (tim burton style:" + lora_weight + ")"
62 | existing_lora = True
63 |
64 | if lora == "pixelart":
65 | if lora_weight == "":
66 | lora_weight = "1"
67 | prompt = prompt + " (flat shading:1.2), (minimalist:1.4), "
68 | existing_lora = True
69 |
70 | if lora == "pepe":
71 | if lora_weight == "":
72 | lora_weight = "0.8"
73 | prompt = prompt + " , pepe"
74 | existing_lora = True
75 |
76 | if lora == "bettertext":
77 | if lora_weight == "":
78 | lora_weight = "1"
79 | prompt = prompt + " ,"
80 | existing_lora = True
81 |
82 | if lora == "mspaint":
83 | if lora_weight == "":
84 | lora_weight = "1"
85 | prompt = "MSPaint drawing " + prompt + ">"
86 | existing_lora = True
87 |
88 | if lora == "woodfigure":
89 | if lora_weight == "":
90 | lora_weight = "0.7"
91 | prompt = prompt + ",woodfigurez,artistic style "
92 | existing_lora = True
93 |
94 | if lora == "fireelement":
95 | prompt = prompt + ",composed of fire elements, fire element"
96 | existing_lora = True
97 |
98 | return lora, prompt, existing_lora
99 |
--------------------------------------------------------------------------------
/tests/chat_bot.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import os
4 | import threading
5 | from pathlib import Path
6 |
7 | import dotenv
8 | from duck_chat import ModelType
9 | from nostr_sdk import Keys, Kind
10 |
11 | from nostr_dvm.bot import Bot
12 | from nostr_dvm.framework import DVMFramework
13 | from nostr_dvm.tasks.generic_dvm import GenericDVM
14 | from nostr_dvm.utils.admin_utils import AdminConfig
15 | from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
16 | from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
17 |
18 |
19 |
20 | def playground(announce = False):
21 |
22 | framework = DVMFramework()
23 |
24 | identifier = "bot_test"
25 | bot_config = build_default_config(identifier)
26 | bot_config.CHATBOT = False
27 | bot_config.DVM_KEY = "aa8ab5b774d47e7b29a985dd739cfdcccf93451678bf7977ba1b2e094ecd8b30"
28 |
29 | admin_config = AdminConfig()
30 | admin_config.REBROADCAST_NIP65_RELAY_LIST = False
31 | admin_config.UPDATE_PROFILE = False
32 | bot_config.RELAY_LIST = ["wss://relay.primal.net", "wss://relay.nostrdvm.com", "wss://nostr.oxtr.dev"]
33 | x = threading.Thread(target=Bot, args=([bot_config, admin_config]))
34 | x.start()
35 |
36 |
37 | kind = 5050
38 | admin_config = AdminConfig()
39 | admin_config.REBROADCAST_NIP89 = announce
40 | admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
41 | admin_config.UPDATE_PROFILE = announce
42 |
43 | name = "DuckChat"
44 | identifier = "duckduckchat" # Chose a unique identifier in order to get a lnaddress
45 | dvm_config = build_default_config(identifier)
46 | dvm_config.KIND = Kind(kind) # Manually set the Kind Number (see data-vending-machines.org)
47 | dvm_config.SEND_FEEDBACK_EVENTS = False
48 |
49 | # Add NIP89
50 | nip89info = {
51 | "name": name,
52 | "picture": "https://image.nostr.build/28da676a19841dcfa7dcf7124be6816842d14b84f6046462d2a3f1268fe58d03.png",
53 | "about": "I'm briding DuckDuckAI'",
54 | "supportsEncryption": True,
55 | "acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
56 | "nip90Params": {
57 | }
58 | }
59 |
60 | nip89config = NIP89Config()
61 | nip89config.KIND = Kind(kind)
62 | nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
63 | nip89config.CONTENT = json.dumps(nip89info)
64 |
65 | options = {
66 | "input": "",
67 | }
68 |
69 | dvm = GenericDVM(name=name, dvm_config=dvm_config, nip89config=nip89config,
70 | admin_config=admin_config, options=options)
71 |
72 | async def process(request_form):
73 | # pip install -U https://github.com/mrgick/duckduckgo-chat-ai/archive/master.zip
74 |
75 | from duck_chat import DuckChat
76 | options = dvm.set_options(request_form)
77 | async with DuckChat(model=ModelType.GPT4o) as chat:
78 | query = options["input"]
79 | result = await chat.ask_question(query)
80 | print(result)
81 | return result
82 |
83 | dvm.process = process # overwrite the process function with the above one
84 | framework.add(dvm)
85 | #framework.run()
86 |
87 |
88 |
89 | if __name__ == '__main__':
90 | env_path = Path('.env')
91 | if not env_path.is_file():
92 | with open('.env', 'w') as f:
93 | print("Writing new .env file")
94 | f.write('')
95 | if env_path.is_file():
96 | print(f'loading environment from {env_path.resolve()}')
97 | dotenv.load_dotenv(env_path, verbose=True, override=True)
98 | else:
99 | raise FileNotFoundError(f'.env file not found at {env_path} ')
100 | playground(False)
--------------------------------------------------------------------------------
/examples/ollama_dvm/test_client.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import time
4 | from pathlib import Path
5 | from threading import Thread
6 |
7 | import dotenv
8 | from nostr_sdk import Keys, Client, NostrSigner, Tag, EventBuilder, Filter, HandleNotification, Timestamp, \
9 | nip04_decrypt, Event
10 |
11 | from nostr_dvm.utils.dvmconfig import DVMConfig
12 | from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
13 | from nostr_dvm.utils.definitions import EventDefinitions
14 |
15 |
16 | async def nostr_client_test_llm(prompt):
17 | keys = Keys.parse(check_and_set_private_key("test_client"))
18 |
19 | iTag = Tag.parse(["i", prompt, "text"])
20 | relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
21 | "wss://nostr-pub.wellorder.net"])
22 | alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTSt"])
23 | event = EventBuilder(EventDefinitions.KIND_NIP90_GENERATE_TEXT, str("Generate an Audio File.")).tags(
24 | [iTag, relaysTag, alttag]).sign_with_keys(keys)
25 |
26 | relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
27 | "wss://nostr-pub.wellorder.net"]
28 |
29 | client = Client(NostrSigner.keys(keys))
30 |
31 | for relay in relay_list:
32 | await client.add_relay(relay)
33 | await client.connect()
34 | config = DVMConfig
35 | await send_event(event, client=client, dvm_config=config)
36 | return event.as_json()
37 |
38 | async def nostr_client():
39 | keys = Keys.parse(check_and_set_private_key("test_client"))
40 | sk = keys.secret_key()
41 | pk = keys.public_key()
42 | print(f"Nostr Test Client public key: {pk.to_bech32()}, Hex: {pk.to_hex()} ")
43 | client = Client(NostrSigner.keys(keys))
44 | dvmconfig = DVMConfig()
45 | for relay in dvmconfig.RELAY_LIST:
46 | await client.add_relay(relay)
47 | await client.connect()
48 |
49 | dm_zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM,
50 | EventDefinitions.KIND_ZAP]).since(
51 | Timestamp.now()) # events to us specific
52 | dvm_filter = (Filter().kinds([EventDefinitions.KIND_NIP90_RESULT_GENERATE_TEXT,
53 | EventDefinitions.KIND_FEEDBACK]).since(Timestamp.now())) # public events
54 | await client.subscribe(dm_zap_filter)
55 | await client.subscribe(dvm_filter)
56 |
57 |
58 | await nostr_client_test_llm("Tell me a joke about a purple Ostrich!")
59 | print("Sending Job Request")
60 |
61 |
62 | #nostr_client_test_image_private("a beautiful ostrich watching the sunset")
63 | class NotificationHandler(HandleNotification):
64 | async def handle(self, relay_url, subscription_id, event: Event):
65 | print(f"Received new event from {relay_url}: {event.as_json()}")
66 | if event.kind() == 7000:
67 | print("[Nostr Client]: " + event.as_json())
68 | elif 6000 < event.kind().as_u16() < 6999:
69 | print("[Nostr Client]: " + event.as_json())
70 | print("[Nostr Client]: " + event.content())
71 |
72 | elif event.kind() == 4:
73 | dec_text = nip04_decrypt(sk, event.author(), event.content())
74 | print("[Nostr Client]: " + f"Received new msg: {dec_text}")
75 |
76 | elif event.kind() == 9735:
77 | print("[Nostr Client]: " + f"Received new zap:")
78 | print(event.as_json())
79 |
80 | async def handle_msg(self, relay_url, msg):
81 | return
82 |
83 | asyncio.create_task(client.handle_notifications(NotificationHandler()))
84 | while True:
85 | await asyncio.sleep(5.0)
86 |
87 |
88 | if __name__ == '__main__':
89 |
90 | env_path = Path('.env')
91 | if env_path.is_file():
92 | print(f'loading environment from {env_path.resolve()}')
93 | dotenv.load_dotenv(env_path, verbose=True, override=True)
94 | else:
95 | raise FileNotFoundError(f'.env file not found at {env_path} ')
96 |
97 | asyncio.run(nostr_client())
98 |
99 |
--------------------------------------------------------------------------------
/tests/bot.py:
--------------------------------------------------------------------------------
1 | import os
2 | import threading
3 | from pathlib import Path
4 |
5 | import dotenv
6 | from nostr_sdk import Keys
7 |
8 | from nostr_dvm.bot import Bot
9 | from nostr_dvm.framework import DVMFramework
10 | from nostr_dvm.tasks import textextraction_pdf, convert_media, discovery_inactive_follows, translation_google
11 | from nostr_dvm.utils.admin_utils import AdminConfig
12 | from nostr_dvm.utils.definitions import EventDefinitions
13 | from nostr_dvm.utils.dvmconfig import DVMConfig
14 | from nostr_dvm.utils.external_dvm_utils import build_external_dvm
15 | from nostr_dvm.utils.nostr_utils import check_and_set_private_key
16 | from nostr_dvm.utils.output_utils import PostProcessFunctionType
17 | from nostr_dvm.utils.zap_utils import check_and_set_ln_bits_keys
18 |
19 |
20 | def playground():
21 | framework = DVMFramework()
22 | bot_config = DVMConfig()
23 | identifier = "bot_test"
24 | bot_config.PRIVATE_KEY = check_and_set_private_key(identifier)
25 | npub = Keys.parse(bot_config.PRIVATE_KEY).public_key().to_bech32()
26 | invoice_key, admin_key, wallet_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)
27 | bot_config.LN_ADDRESS = lnaddress
28 | bot_config.LNBITS_INVOICE_KEY = invoice_key
29 | bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back
30 | bot_config.LNBITS_URL = os.getenv("LNBITS_HOST")
31 |
32 |
33 | admin_config = AdminConfig()
34 |
35 | pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config)
36 | # If we don't add it to the bot, the bot will not provide access to the DVM
37 | framework.add(pdfextractor)
38 | bot_config.SUPPORTED_DVMS.append(pdfextractor) # We add translator to the bot
39 |
40 | ymhm_external = build_external_dvm(pubkey="58c52fdca7593dffea63ba6f758779d8251c6732f54e9dc0e56d7a1afe1bb1b6",
41 | task="wot-discovery",
42 | kind=EventDefinitions.KIND_NIP90_PEOPLE_DISCOVERY,
43 | fix_cost=0, per_unit_cost=0, config=bot_config,
44 | external_post_process=PostProcessFunctionType.NONE)
45 |
46 | bot_config.SUPPORTED_DVMS.append(ymhm_external)
47 |
48 | admin_config_media = AdminConfig()
49 | admin_config_media.UPDATE_PROFILE = True
50 | admin_config_media.REBROADCAST_NIP65_RELAY_LIST = True
51 | media_bringer = convert_media.build_example("Nostr AI DVM Media Converter",
52 | "media_converter", admin_config_media)
53 | bot_config.SUPPORTED_DVMS.append(media_bringer)
54 | framework.add(media_bringer)
55 |
56 |
57 | admin_config_followers = AdminConfig()
58 | admin_config_followers.UPDATE_PROFILE = True
59 | admin_config_followers.REBROADCAST_NIP65_RELAY_LIST = True
60 | discover_inactive = discovery_inactive_follows.build_example("Those who left",
61 | "discovery_inactive_follows", admin_config_followers)
62 | bot_config.SUPPORTED_DVMS.append(discover_inactive)
63 | framework.add(discover_inactive)
64 |
65 | admin_config_google = AdminConfig()
66 | admin_config_google.UPDATE_PROFILE = True
67 | admin_config_google.REBROADCAST_NIP65_RELAY_LIST = True
68 |
69 | translator = translation_google.build_example("NostrAI DVM Translator", "google_translator", admin_config_google)
70 | bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot
71 | framework.add(translator)
72 |
73 | admin_config = AdminConfig()
74 | admin_config.REBROADCAST_NIP65_RELAY_LIST = True
75 | admin_config.UPDATE_PROFILE = True
76 | x = threading.Thread(target=Bot, args=([bot_config, admin_config]))
77 | x.start()
78 |
79 |
80 | framework.run()
81 |
82 |
83 |
84 |
85 |
86 | if __name__ == '__main__':
87 | env_path = Path('.env')
88 | if not env_path.is_file():
89 | with open('.env', 'w') as f:
90 | print("Writing new .env file")
91 | f.write('')
92 | if env_path.is_file():
93 | print(f'loading environment from {env_path.resolve()}')
94 | dotenv.load_dotenv(env_path, verbose=True, override=True)
95 | else:
96 | raise FileNotFoundError(f'.env file not found at {env_path} ')
97 | playground()
--------------------------------------------------------------------------------
/examples/unleashed_dvm/test_client.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import time
4 | from datetime import timedelta
5 | from pathlib import Path
6 | from threading import Thread
7 |
8 | import dotenv
9 | from nostr_sdk import Keys, Client, Tag, EventBuilder, Filter, HandleNotification, Timestamp, nip04_decrypt, \
10 | NostrSigner, Options, Event
11 |
12 | from nostr_dvm.utils.dvmconfig import DVMConfig
13 | from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
14 | from nostr_dvm.utils.definitions import EventDefinitions
15 |
16 |
17 | async def nostr_client_test(prompt):
18 | keys = Keys.parse(check_and_set_private_key("test_client"))
19 |
20 | iTag = Tag.parse(["i", prompt, "text"])
21 |
22 |
23 | relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
24 | "wss://nostr-pub.wellorder.net"])
25 | alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTS"])
26 | event = EventBuilder(EventDefinitions.KIND_NIP90_GENERATE_TEXT, str("Answer to prompt")).tags(
27 | [iTag, relaysTag, alttag]).sign_with_keys(keys)
28 |
29 | relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
30 | "wss://nostr-pub.wellorder.net"]
31 |
32 | client = Client(NostrSigner.keys(keys))
33 | for relay in relay_list:
34 | await client.add_relay(relay)
35 | await client.connect()
36 | config = DVMConfig
37 | await send_event(event, client=client, dvm_config=config)
38 | return event.as_json()
39 |
40 | async def nostr_client():
41 | keys = Keys.parse(check_and_set_private_key("test_client"))
42 | sk = keys.secret_key()
43 | pk = keys.public_key()
44 | print(f"Nostr Test Client public key: {pk.to_bech32()}, Hex: {pk.to_hex()} ")
45 | client = Client(NostrSigner.keys(keys))
46 |
47 | dvmconfig = DVMConfig()
48 | for relay in dvmconfig.RELAY_LIST:
49 | await client.add_relay(relay)
50 | await client.connect()
51 |
52 | dm_zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM,
53 | EventDefinitions.KIND_ZAP]).since(
54 | Timestamp.now()) # events to us specific
55 | dvm_filter = (Filter().kinds([EventDefinitions.KIND_NIP90_RESULT_GENERATE_TEXT,
56 | EventDefinitions.KIND_FEEDBACK]).since(Timestamp.now())) # public events
57 | await client.subscribe(dm_zap_filter)
58 | await client.subscribe(dvm_filter)
59 |
60 |
61 | #nostr_client_test("What has Pablo been up to?")
62 | await nostr_client_test("What is Gigi talking about recently?")
63 | print("Sending Job Request")
64 |
65 |
66 | class NotificationHandler(HandleNotification):
67 | async def handle(self, relay_url, subscription_id, event: Event):
68 | print(f"Received new event from {relay_url}: {event.as_json()}")
69 | if event.kind() == 7000:
70 | print("[Nostr Client]: " + event.as_json())
71 | elif 6000 < event.kind().as_u16() < 6999:
72 | print("[Nostr Client " + event.author().to_bech32() + "]: " + event.as_json())
73 | print("[Nostr Client " + event.author().to_bech32() + "]: " + event.content())
74 |
75 |
76 | elif event.kind() == 4:
77 | dec_text = nip04_decrypt(sk, event.author(), event.content())
78 | print("[Nostr Client]: " + f"Received new msg: {dec_text}")
79 |
80 | elif event.kind() == 9735:
81 | print("[Nostr Client]: " + f"Received new zap:")
82 | print(event.as_json())
83 |
84 | async def handle_msg(self, relay_url, msg):
85 | return
86 |
87 | asyncio.create_task(client.handle_notifications(NotificationHandler()))
88 |
89 | while True:
90 | await asyncio.sleep(1)
91 |
92 |
93 | if __name__ == '__main__':
94 |
95 | env_path = Path('.env')
96 | if env_path.is_file():
97 | print(f'loading environment from {env_path.resolve()}')
98 | dotenv.load_dotenv(env_path, verbose=True, override=True)
99 | else:
100 | raise FileNotFoundError(f'.env file not found at {env_path} ')
101 |
102 | asyncio.run(nostr_client())
--------------------------------------------------------------------------------