├── .chainlit ├── config.toml └── translations │ └── en-US.json ├── .dockerignore ├── .env.example ├── .gitattributes ├── .github └── workflows │ ├── build-docker-images.yml │ ├── ci.yml │ └── run_test_and_gen_report.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── docker-compose.yml ├── main.py ├── omniagent ├── __init__.py ├── agents │ ├── __init__.py │ ├── agent_factory.py │ ├── asset_management.py │ ├── block_explore.py │ ├── fallback.py │ ├── feed_explore.py │ ├── market_analysis.py │ └── research_analyst.py ├── app.py ├── conf │ ├── __init__.py │ ├── env.py │ └── llm_provider.py ├── db │ ├── __init__.py │ ├── database.py │ └── models.py ├── executors │ ├── __init__.py │ ├── block_stat_executor.py │ ├── coin_market_executor.py │ ├── defi_executor.py │ ├── feed_executor.py │ ├── feed_prompt.py │ ├── feed_source_executor.py │ ├── funding_rate_executor.py │ ├── nft_balance_executor.py │ ├── nft_rank_executor.py │ ├── price_executor.py │ ├── project_executor.py │ ├── search_executor.py │ ├── swap_executor.py │ ├── tg_news_executor.py │ ├── tg_util.py │ ├── token_balance_executor.py │ ├── token_util.py │ └── transfer_executor.py ├── index │ ├── __init__.py │ ├── feed_indexing.py │ ├── feed_scrape.py │ └── pgvector_store.py ├── router │ ├── __init__.py │ ├── health.py │ ├── openai.py │ └── widget.py ├── ui │ ├── __init__.py │ ├── app.py │ └── profile.py └── workflows │ ├── __init__.py │ ├── member.py │ ├── supervisor_chain.py │ └── workflow.py ├── poetry.lock ├── pyproject.toml ├── tests ├── README.md ├── __init__.py ├── agent_trajectory │ ├── __init__.py │ ├── asset_management.py │ ├── block_explore.py │ ├── feed_explore.py │ ├── market_analysis.py │ └── research_analyst.py ├── compatible-models.mdx ├── conftest.py ├── gen_benchmark_html_report.py ├── generate_benchmark_report.py ├── openai-api │ ├── __init__.py │ └── example.sh ├── run_test.py ├── supervisor_chain.py └── templates │ ├── benchmark.html.j2 │ └── compatible-models.mdx.j2 └── widget ├── .gitignore ├── index.html ├── package.json ├── src ├── App.tsx ├── components │ ├── PriceChart.tsx │ ├── Swap.tsx │ ├── TransferWidget.module.css │ └── TransferWidget.tsx ├── custom.d.ts ├── main.tsx └── vite-env.d.ts ├── tsconfig.json ├── tsconfig.node.json ├── vite.config.ts └── yarn.lock /.chainlit/config.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | # Whether to enable telemetry (default: true). No personal data is collected. 3 | enable_telemetry = true 4 | 5 | 6 | # List of environment variables to be provided by each user to use the app. 7 | user_env = [] 8 | 9 | # Duration (in seconds) during which the session is saved when the connection is lost 10 | session_timeout = 3600 11 | 12 | # Enable third parties caching (e.g LangChain cache) 13 | cache = false 14 | 15 | # Authorized origins 16 | allow_origins = ["*"] 17 | 18 | # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) 19 | # follow_symlink = false 20 | 21 | [features] 22 | # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript) 23 | unsafe_allow_html = true 24 | 25 | # Process and display mathematical expressions. This can clash with "$" characters in messages. 26 | latex = false 27 | 28 | # Automatically tag threads with the current chat profile (if a chat profile is used) 29 | auto_tag_thread = true 30 | 31 | # Authorize users to spontaneously upload files with messages 32 | [features.spontaneous_file_upload] 33 | enabled = true 34 | accept = ["*/*"] 35 | max_files = 20 36 | max_size_mb = 500 37 | 38 | [features.audio] 39 | # Threshold for audio recording 40 | min_decibels = -45 41 | # Delay for the user to start speaking in MS 42 | initial_silence_timeout = 3000 43 | # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop. 44 | silence_timeout = 1500 45 | # Above this duration (MS), the recording will forcefully stop. 46 | max_duration = 15000 47 | # Duration of the audio chunks in MS 48 | chunk_duration = 1000 49 | # Sample rate of the audio 50 | sample_rate = 44100 51 | 52 | [UI] 53 | # Name of the assistant. 54 | name = "OmniAgent" 55 | 56 | # Description of the assistant. This is used for HTML tags. 57 | description = "OmniAgent is a conversational AI assistant that can help you with your web3 queries." 58 | 59 | # Large size content are by default collapsed for a cleaner ui 60 | default_collapse_content = true 61 | 62 | # Hide the chain of thought details from the user in the UI. 63 | hide_cot = false 64 | 65 | # Link to your github repo. This will add a github button in the UI's header. 66 | # github = "" 67 | 68 | # Specify a CSS file that can be used to customize the user interface. 69 | # The CSS file can be served from the public directory or via an external link. 70 | # custom_css = "/public/test.css" 71 | custom_css = "/public/index.css" 72 | 73 | # Specify a Javascript file that can be used to customize the user interface. 74 | # The Javascript file can be served from the public directory. 75 | # custom_js = "/public/test.js" 76 | 77 | # Specify a custom font url. 78 | # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap" 79 | 80 | # Specify a custom meta image url. 81 | # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png" 82 | 83 | # Specify a custom build directory for the frontend. 84 | # This can be used to customize the frontend code. 85 | # Be careful: If this is a relative path, it should not start with a slash. 86 | # custom_build = "./public/build" 87 | 88 | [UI.theme] 89 | default = "dark" 90 | #layout = "wide" 91 | #font_family = "Inter, sans-serif" 92 | # Override default MUI light theme. (Check theme.ts) 93 | [UI.theme.light] 94 | #background = "#FAFAFA" 95 | #paper = "#FFFFFF" 96 | 97 | [UI.theme.light.primary] 98 | #main = "#F80061" 99 | #dark = "#980039" 100 | #light = "#FFE7EB" 101 | [UI.theme.light.text] 102 | #primary = "#212121" 103 | #secondary = "#616161" 104 | 105 | # Override default MUI dark theme. (Check theme.ts) 106 | [UI.theme.dark] 107 | #background = "#FAFAFA" 108 | #paper = "#FFFFFF" 109 | 110 | [UI.theme.dark.primary] 111 | #main = "#F80061" 112 | #dark = "#980039" 113 | #light = "#FFE7EB" 114 | [UI.theme.dark.text] 115 | #primary = "#EEEEEE" 116 | #secondary = "#BDBDBD" 117 | 118 | [meta] 119 | generated_by = "1.1.305" 120 | -------------------------------------------------------------------------------- /.chainlit/translations/en-US.json: -------------------------------------------------------------------------------- 1 | { 2 | "components": { 3 | "atoms": { 4 | "buttons": { 5 | "userButton": { 6 | "menu": { 7 | "APIKeys": "API Keys", 8 | "logout": "Logout", 9 | "settings": "Settings", 10 | "settingsKey": "S" 11 | } 12 | } 13 | } 14 | }, 15 | "molecules": { 16 | "attachments": { 17 | "cancelUpload": "Cancel upload", 18 | "removeAttachment": "Remove attachment" 19 | }, 20 | "auth": { 21 | "authForgotPassword": { 22 | "continue": "Continue", 23 | "email": "Email address", 24 | "emailRequired": "email is a required field", 25 | "emailSent": "Please check the email address {{email}} for instructions to reset your password.", 26 | "enterEmail": "Enter your email address and we will send you instructions to reset your password.", 27 | "goBack": "Go Back", 28 | "resendEmail": "Resend email" 29 | }, 30 | "authLogin": { 31 | "error": { 32 | "callback": "Try signing in with a different account.", 33 | "credentialssignin": "Sign in failed. Check the details you provided are correct.", 34 | "default": "Unable to sign in.", 35 | "emailcreateaccount": "Try signing in with a different account.", 36 | "emailsignin": "The e-mail could not be sent.", 37 | "emailverify": "Please verify your email, a new email has been sent.", 38 | "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.", 39 | "oauthcallbackerror": "Try signing in with a different account.", 40 | "oauthcreateaccount": "Try signing in with a different account.", 41 | "oauthsignin": "Try signing in with a different account.", 42 | "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.", 43 | "sessionrequired": "Please sign in to access this page.", 44 | "signin": "Try signing in with a different account." 45 | }, 46 | "form": { 47 | "alreadyHaveAccount": "Already have an account?", 48 | "continue": "Continue", 49 | "email": "Email address", 50 | "emailRequired": "email is a required field", 51 | "forgotPassword": "Forgot password?", 52 | "noAccount": "Don't have an account?", 53 | "or": "OR", 54 | "password": "Password", 55 | "passwordMustContain": "Your password must contain:", 56 | "passwordRequired": "password is a required field", 57 | "signin": "Sign In", 58 | "signup": "Sign Up" 59 | }, 60 | "title": "Login to access the app." 61 | }, 62 | "authResetPassword": { 63 | "confirmPassword": "Confirm password", 64 | "confirmPasswordRequired": "Confirm password is a required field", 65 | "newPassword": "New password", 66 | "newPasswordRequired": "New password is a required field", 67 | "passwordsMustMatch": "Passwords must match", 68 | "resetPassword": "Reset Password" 69 | }, 70 | "authVerifyEmail": { 71 | "almostThere": "You're almost there! We've sent an email to ", 72 | "didNotReceive": "Can't find the email?", 73 | "emailSent": "Email sent successfully.", 74 | "goBack": "Go Back", 75 | "resendEmail": "Resend email", 76 | "verifyEmail": "Verify your email address", 77 | "verifyEmailLink": "Please click on the link in that email to complete your signup." 78 | }, 79 | "providerButton": { 80 | "continue": "Continue with {{provider}}", 81 | "signup": "Sign up with {{provider}}" 82 | } 83 | }, 84 | "detailsButton": { 85 | "used": "Used", 86 | "using": "Using" 87 | }, 88 | "newChatButton": { 89 | "newChat": "New Chat" 90 | }, 91 | "newChatDialog": { 92 | "cancel": "Cancel", 93 | "clearChat": "This will clear the current messages and start a new chat.", 94 | "confirm": "Confirm", 95 | "createNewChat": "Create new chat?" 96 | }, 97 | "settingsModal": { 98 | "darkMode": "Dark Mode", 99 | "expandMessages": "Expand Messages", 100 | "hideChainOfThought": "Hide Chain of Thought", 101 | "settings": "Settings" 102 | }, 103 | "tasklist": { 104 | "TaskList": { 105 | "error": "An error occurred", 106 | "loading": "Loading...", 107 | "title": "\ud83d\uddd2\ufe0f Task List" 108 | } 109 | } 110 | }, 111 | "organisms": { 112 | "chat": { 113 | "Messages": { 114 | "index": { 115 | "executedSuccessfully": "executed successfully", 116 | "failed": "failed", 117 | "feedbackUpdated": "Feedback updated", 118 | "running": "Running", 119 | "updating": "Updating" 120 | } 121 | }, 122 | "dropScreen": { 123 | "dropYourFilesHere": "Drop your files here" 124 | }, 125 | "history": { 126 | "index": { 127 | "lastInputs": "Last Inputs", 128 | "loading": "Loading...", 129 | "noInputs": "Such empty...", 130 | "showHistory": "Show history" 131 | } 132 | }, 133 | "index": { 134 | "cancelledUploadOf": "Cancelled upload of", 135 | "continuingChat": "Continuing previous chat", 136 | "couldNotReachServer": "Could not reach the server", 137 | "failedToUpload": "Failed to upload" 138 | }, 139 | "inputBox": { 140 | "SubmitButton": { 141 | "sendMessage": "Send message", 142 | "stopTask": "Stop Task" 143 | }, 144 | "UploadButton": { 145 | "attachFiles": "Attach files" 146 | }, 147 | "input": { 148 | "placeholder": "Type your message here..." 149 | }, 150 | "speechButton": { 151 | "start": "Start recording", 152 | "stop": "Stop recording" 153 | }, 154 | "waterMark": { 155 | "text": "Built with" 156 | } 157 | }, 158 | "settings": { 159 | "cancel": "Cancel", 160 | "confirm": "Confirm", 161 | "reset": "Reset", 162 | "settingsPanel": "Settings panel" 163 | } 164 | }, 165 | "header": { 166 | "chat": "Chat", 167 | "readme": "Readme" 168 | }, 169 | "threadHistory": { 170 | "Thread": { 171 | "backToChat": "Go back to chat", 172 | "chatCreatedOn": "This chat was created on" 173 | }, 174 | "sidebar": { 175 | "DeleteThreadButton": { 176 | "cancel": "Cancel", 177 | "chatDeleted": "Chat deleted", 178 | "confirm": "Confirm", 179 | "confirmMessage": "This will delete the thread as well as it's messages and elements.", 180 | "deletingChat": "Deleting chat" 181 | }, 182 | "ThreadList": { 183 | "empty": "Empty...", 184 | "previous30days": "Previous 30 days", 185 | "previous7days": "Previous 7 days", 186 | "today": "Today", 187 | "yesterday": "Yesterday" 188 | }, 189 | "TriggerButton": { 190 | "closeSidebar": "Close sidebar", 191 | "openSidebar": "Open sidebar" 192 | }, 193 | "filters": { 194 | "FeedbackSelect": { 195 | "feedbackAll": "Feedback: All", 196 | "feedbackNegative": "Feedback: Negative", 197 | "feedbackPositive": "Feedback: Positive" 198 | }, 199 | "SearchBar": { 200 | "search": "Search" 201 | } 202 | }, 203 | "index": { 204 | "pastChats": "Past Chats" 205 | } 206 | } 207 | } 208 | } 209 | }, 210 | "hooks": { 211 | "useLLMProviders": { 212 | "failedToFetchProviders": "Failed to fetch providers:" 213 | } 214 | }, 215 | "pages": { 216 | "Design": {}, 217 | "Env": { 218 | "requiredApiKeys": "Required API Keys", 219 | "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage.", 220 | "savedSuccessfully": "Saved successfully" 221 | }, 222 | "Page": { 223 | "notPartOfProject": "You are not part of this project." 224 | }, 225 | "ResumeButton": { 226 | "resumeChat": "Resume Chat" 227 | } 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | widget/node_modules 2 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Database settings 2 | DB_CONNECTION=postgresql+psycopg://postgres:password@vector_db:5432/omniagent 3 | 4 | # LLM provider settings (at least one required) 5 | # To get a Google Cloud Vertex project ID, visit: https://console.cloud.google.com/vertex-ai 6 | VERTEX_PROJECT_ID= 7 | # To get an OpenAI API key, sign up at: https://platform.openai.com/signup 8 | OPENAI_API_KEY= 9 | # To get an ANTHROPIC_API_KEY, visit: https://www.anthropic.com 10 | ANTHROPIC_API_KEY= 11 | # To get a Google Gemini API key, visit: https://ai.google.dev 12 | GOOGLE_GEMINI_API_KEY= 13 | # For Ollama, download and install from: https://github.com/ollama/ollama 14 | OLLAMA_HOST=http://ollama:11434 15 | 16 | # Optional API keys for additional features 17 | # Get your Tavily API key at: https://www.tavily.com/ 18 | TAVILY_API_KEY= 19 | # Get your Moralis API key at: https://moralis.io/ 20 | MORALIS_API_KEY= 21 | # Register for a RootData API key at: https://www.rootdata.com/ 22 | ROOTDATA_API_KEY= 23 | # Sign up for a CoinGecko API key at: https://www.coingecko.com/en/api/pricing 24 | COINGECKO_API_KEY= 25 | # RSS3 API URLs (default values provided, change if needed) 26 | RSS3_DATA_API=https://gi.vividgen.me 27 | RSS3_SEARCH_API=https://devnet.vividgen.me/search 28 | 29 | # Chainlit OAuth settings (all fields must be set if using OAuth, otherwise leave them empty) 30 | # For Auth0 setup, visit: https://docs.chainlit.io/authentication/oauth 31 | CHAINLIT_AUTH_SECRET= 32 | OAUTH_AUTH0_CLIENT_ID= 33 | OAUTH_AUTH0_CLIENT_SECRET= 34 | OAUTH_AUTH0_DOMAIN= 35 | 36 | #run unit test concurrently 37 | #run unit test with repeat count 38 | #simplify test and report generation 39 | #Change the current model settings 40 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/build-docker-images.yml: -------------------------------------------------------------------------------- 1 | name: build & push docker images 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - "*" 9 | pull_request: 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | platform: 18 | - linux/amd64 19 | # - linux/arm64 # Cannot build frontend (install apk packages) within acceptable time 20 | steps: 21 | - name: Prepare 22 | run: | 23 | platform=${{ matrix.platform }} 24 | echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v3 29 | with: 30 | platforms: ${{ matrix.platform }} 31 | - name: Login to DockerHub 32 | uses: docker/login-action@v3 33 | with: 34 | username: ${{ secrets.DOCKERHUB_USERNAME }} 35 | password: ${{ secrets.DOCKERHUB_TOKEN }} 36 | - name: Docker meta 37 | id: meta 38 | uses: docker/metadata-action@v5 39 | with: 40 | images: rss3/omniagent 41 | - name: Build and push by digest 42 | id: build 43 | uses: docker/build-push-action@v5 44 | with: 45 | context: . 46 | platforms: ${{ matrix.platform }} 47 | labels: ${{ steps.meta.outputs.labels }} 48 | outputs: type=image,name=rss3/omniagent,push-by-digest=true,name-canonical=true,push=true 49 | - name: Export digest 50 | run: | 51 | mkdir -p /tmp/digests 52 | digest="${{ steps.build.outputs.digest }}" 53 | touch "/tmp/digests/${digest#sha256:}" 54 | - name: Upload digest 55 | uses: actions/upload-artifact@v4 56 | with: 57 | name: digests-${{ env.PLATFORM_PAIR }} 58 | path: /tmp/digests/* 59 | if-no-files-found: error 60 | retention-days: 1 61 | 62 | merge: 63 | runs-on: ubuntu-latest 64 | permissions: 65 | contents: read 66 | packages: write 67 | id-token: write 68 | needs: 69 | - build 70 | steps: 71 | - name: Download digests 72 | uses: actions/download-artifact@v4 73 | with: 74 | path: /tmp/digests 75 | pattern: digests-* 76 | merge-multiple: true 77 | - name: Set up Docker Buildx 78 | uses: docker/setup-buildx-action@v3 79 | 80 | - name: Log in to the Container registry 81 | uses: docker/login-action@v3 82 | with: 83 | registry: ghcr.io 84 | username: ${{ github.actor }} 85 | password: ${{ github.token }} 86 | 87 | - name: Login to DockerHub 88 | uses: docker/login-action@v3 89 | with: 90 | username: ${{ secrets.DOCKERHUB_USERNAME }} 91 | password: ${{ secrets.DOCKERHUB_TOKEN }} 92 | 93 | - name: Extract metadata (tags, labels) for Docker 94 | id: meta 95 | uses: docker/metadata-action@v5 96 | with: 97 | images: | 98 | rss3/omniagent 99 | ghcr.io/${{ github.repository }} 100 | tags: | 101 | type=raw,value=latest,enable={{is_default_branch}} 102 | type=raw,value=${{ matrix.arch }},enable={{is_default_branch}} 103 | type=ref,event=tag 104 | type=ref,event=branch 105 | type=ref,event=pr 106 | type=sha,prefix={{branch}}-,enable=${{ !startsWith(github.ref, 'refs/tags') && github.event_name != 'pull_request' }},event=branch 107 | 108 | - name: Create manifest list and push 109 | working-directory: /tmp/digests 110 | run: | 111 | if [ ${{ github.event_name }} == 'pull_request' ]; then 112 | ARGS="--dry-run" 113 | fi 114 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ 115 | $(printf 'rss3/omniagent@sha256:%s ' *) $ARGS 116 | - name: Inspect image 117 | if: github.event_name != 'pull_request' 118 | run: | 119 | docker buildx imagetools inspect rss3/omniagent:${{ steps.meta.outputs.version }} 120 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: backend code quality 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - "main" 8 | - "prod" 9 | 10 | jobs: 11 | pre-commit: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | - uses: actions/setup-python@v3 16 | - run: pip install flake8 mypy pydantic types-requests types-redis ruff 17 | - uses: pre-commit/action@v3.0.0 18 | with: 19 | extra_args: --files ./src/* 20 | -------------------------------------------------------------------------------- /.github/workflows/run_test_and_gen_report.yml: -------------------------------------------------------------------------------- 1 | name: Test and generate report 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | workflow_dispatch: 8 | 9 | # Add permissions configuration 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | 17 | services: 18 | postgres: 19 | image: postgres:latest 20 | env: 21 | POSTGRES_USER: postgres 22 | POSTGRES_PASSWORD: password 23 | POSTGRES_DB: omniagent 24 | ports: 25 | - 5432:5432 26 | options: >- 27 | --health-cmd pg_isready 28 | --health-interval 10s 29 | --health-timeout 5s 30 | --health-retries 5 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | with: 35 | fetch-depth: 0 # Fetch all history and tags 36 | 37 | - name: Set up Python 38 | uses: actions/setup-python@v4 39 | with: 40 | python-version: '3.11.5' 41 | 42 | - name: Install poetry 43 | run: | 44 | python -m pip install --upgrade pip 45 | pip install poetry 46 | poetry config virtualenvs.create false 47 | 48 | - name: Install dependencies 49 | run: | 50 | sudo apt-get update 51 | sudo apt-get install -y postgresql-client libpq-dev 52 | poetry install 53 | poetry add pytest pytest-xdist 54 | 55 | - name: Run tests 56 | continue-on-error: true 57 | env: 58 | # Database settings 59 | DB_CONNECTION: postgresql+psycopg://postgres:password@localhost:5432/omniagent 60 | 61 | # LLM provider settings 62 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 63 | GOOGLE_GEMINI_API_KEY: ${{ secrets.GOOGLE_GEMINI_API_KEY }} 64 | OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }} 65 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 66 | 67 | # Optional API keys 68 | TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }} 69 | MORALIS_API_KEY: ${{ secrets.MORALIS_API_KEY }} 70 | ROOTDATA_API_KEY: ${{ secrets.ROOTDATA_API_KEY }} 71 | COINGECKO_API_KEY: ${{ secrets.COINGECKO_API_KEY }} 72 | 73 | # RSS3 API URLs 74 | RSS3_DATA_API: https://gi.vividgen.me 75 | RSS3_SEARCH_API: https://devnet.vividgen.me/search 76 | 77 | 78 | run: | 79 | pwd 80 | ls -la 81 | cd tests 82 | poetry run python run_test.py 83 | 84 | - name: Commit and push report 85 | env: 86 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 87 | run: | 88 | # Switch to docs branch 89 | git fetch origin docs || git fetch origin main 90 | git checkout docs || git checkout -b docs 91 | 92 | git config --global user.name 'github-actions[bot]' 93 | git config --global user.email 'github-actions[bot]@users.noreply.github.com' 94 | 95 | # Check if file exists 96 | ls -la tests/compatible-models.mdx || echo "Report file not found!" 97 | 98 | # Add all changes (including new files) 99 | git add -A 100 | 101 | # Show pending changes 102 | git status 103 | 104 | # Create commit with timestamp 105 | git commit -m "docs: update compatibility test report" || echo "No changes to commit" 106 | 107 | # Push changes to docs branch 108 | git push origin docs 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #map_cache 2 | map_cache_* 3 | 4 | ### VisualStudioCode template 5 | .vscode/* 6 | !.vscode/settings.json 7 | !.vscode/tasks.json 8 | !.vscode/launch.json 9 | !.vscode/extensions.json 10 | !.vscode/*.code-snippets 11 | 12 | # Local History for Visual Studio Code 13 | .history/ 14 | 15 | # Built Visual Studio Code Extensions 16 | *.vsix 17 | 18 | ### JetBrains template 19 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 20 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 21 | 22 | # User-specific stuff 23 | .idea/**/workspace.xml 24 | .idea/**/tasks.xml 25 | .idea/**/usage.statistics.xml 26 | .idea/**/dictionaries 27 | .idea/**/shelf 28 | 29 | # AWS User-specific 30 | .idea/**/aws.xml 31 | 32 | # Generated files 33 | .idea/**/contentModel.xml 34 | 35 | # Sensitive or high-churn files 36 | .idea/**/dataSources/ 37 | .idea/**/dataSources.ids 38 | .idea/**/dataSources.local.xml 39 | .idea/**/sqlDataSources.xml 40 | .idea/**/dynamic.xml 41 | .idea/**/uiDesigner.xml 42 | .idea/**/dbnavigator.xml 43 | 44 | # Gradle 45 | .idea/**/gradle.xml 46 | .idea/**/libraries 47 | 48 | # Gradle and Maven with auto-import 49 | # When using Gradle or Maven with auto-import, you should exclude module files, 50 | # since they will be recreated, and may cause churn. Uncomment if using 51 | # auto-import. 52 | # .idea/artifacts 53 | # .idea/compiler.xml 54 | # .idea/jarRepositories.xml 55 | # .idea/modules.xml 56 | # .idea/*.iml 57 | # .idea/modules 58 | # *.iml 59 | # *.ipr 60 | 61 | # CMake 62 | cmake-build-*/ 63 | 64 | # Mongo Explorer plugin 65 | .idea/**/mongoSettings.xml 66 | 67 | # File-based project format 68 | *.iws 69 | 70 | # IntelliJ 71 | out/ 72 | 73 | # mpeltonen/sbt-idea plugin 74 | .idea_modules/ 75 | 76 | # JIRA plugin 77 | atlassian-ide-plugin.xml 78 | 79 | # Cursive Clojure plugin 80 | .idea/replstate.xml 81 | 82 | # SonarLint plugin 83 | .idea/sonarlint/ 84 | 85 | # Crashlytics plugin (for Android Studio and IntelliJ) 86 | com_crashlytics_export_strings.xml 87 | crashlytics.properties 88 | crashlytics-build.properties 89 | fabric.properties 90 | 91 | # Editor-based Rest Client 92 | .idea/httpRequests 93 | 94 | # Android studio 3.1+ serialized cache file 95 | .idea/caches/build_file_checksums.ser 96 | 97 | ### dotenv template 98 | .env 99 | 100 | ### Linux template 101 | *~ 102 | 103 | # temporary files which can be created if a process still has a handle open of a deleted file 104 | .fuse_hidden* 105 | 106 | # KDE directory preferences 107 | .directory 108 | 109 | # Linux trash folder which might appear on any partition or disk 110 | .Trash-* 111 | 112 | # .nfs files are created when an open file is removed but is still being accessed 113 | .nfs* 114 | 115 | chainlit.md 116 | ### Python template 117 | # Byte-compiled / optimized / DLL files 118 | __pycache__/ 119 | *.py[cod] 120 | *$py.class 121 | 122 | # C extensions 123 | *.so 124 | 125 | # Distribution / packaging 126 | .Python 127 | build/ 128 | develop-eggs/ 129 | dist/ 130 | downloads/ 131 | eggs/ 132 | .eggs/ 133 | lib/ 134 | lib64/ 135 | parts/ 136 | sdist/ 137 | var/ 138 | wheels/ 139 | share/python-wheels/ 140 | *.egg-info/ 141 | .installed.cfg 142 | *.egg 143 | MANIFEST 144 | 145 | # PyInstaller 146 | # Usually these files are written by a python script from a template 147 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 148 | *.manifest 149 | *.spec 150 | 151 | # Installer logs 152 | pip-log.txt 153 | pip-delete-this-directory.txt 154 | 155 | # Unit test / coverage reports 156 | htmlcov/ 157 | .tox/ 158 | .nox/ 159 | .coverage 160 | .coverage.* 161 | .cache 162 | nosetests.xml 163 | coverage.xml 164 | *.cover 165 | *.py,cover 166 | .hypothesis/ 167 | .pytest_cache/ 168 | cover/ 169 | 170 | # Translations 171 | *.mo 172 | *.pot 173 | 174 | # Django stuff: 175 | *.log 176 | local_settings.py 177 | db.sqlite3 178 | db.sqlite3-journal 179 | 180 | # Flask stuff: 181 | instance/ 182 | .webassets-cache 183 | 184 | # Scrapy stuff: 185 | .scrapy 186 | 187 | # Sphinx documentation 188 | docs/_build/ 189 | 190 | # PyBuilder 191 | .pybuilder/ 192 | target/ 193 | 194 | # Jupyter Notebook 195 | .ipynb_checkpoints 196 | 197 | # IPython 198 | profile_default/ 199 | ipython_config.py 200 | 201 | # pyenv 202 | # For a library or package, you might want to ignore these files since the code is 203 | # intended to run in multiple environments; otherwise, check them in: 204 | # .python-version 205 | 206 | # pipenv 207 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 208 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 209 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 210 | # install all needed dependencies. 211 | #Pipfile.lock 212 | 213 | # poetry 214 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 215 | # This is especially recommended for binary packages to ensure reproducibility, and is more 216 | # commonly ignored for libraries. 217 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 218 | #poetry.lock 219 | 220 | # pdm 221 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 222 | #pdm.lock 223 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 224 | # in version control. 225 | # https://pdm.fming.dev/#use-with-ide 226 | .pdm.toml 227 | 228 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 229 | __pypackages__/ 230 | 231 | # Celery stuff 232 | celerybeat-schedule 233 | celerybeat.pid 234 | 235 | # SageMath parsed files 236 | *.sage.py 237 | 238 | # Environments 239 | .env 240 | .venv 241 | env/ 242 | venv/ 243 | ENV/ 244 | env.bak/ 245 | venv.bak/ 246 | 247 | # Spyder project settings 248 | .spyderproject 249 | .spyproject 250 | 251 | # Rope project settings 252 | .ropeproject 253 | 254 | # mkdocs documentation 255 | /site 256 | 257 | # mypy 258 | .mypy_cache/ 259 | .dmypy.json 260 | dmypy.json 261 | 262 | # Pyre type checker 263 | .pyre/ 264 | 265 | # pytype static type analyzer 266 | .pytype/ 267 | 268 | # Cython debug symbols 269 | cython_debug/ 270 | 271 | # PyCharm 272 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 273 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 274 | # and can be added to the global gitignore or merged into this file. For a more nuclear 275 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 276 | #.idea/ 277 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "contracts/lib/forge-std"] 2 | path = contracts/lib/forge-std 3 | url = https://github.com/foundry-rs/forge-std 4 | [submodule "executor/contracts/lib/forge-std"] 5 | path = executor/contracts/lib/forge-std 6 | url = https://github.com/foundry-rs/forge-std 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-ast 6 | - id: check-case-conflict 7 | - id: check-docstring-first 8 | - id: check-executables-have-shebangs 9 | - id: check-json 10 | - id: check-added-large-files 11 | - id: pretty-format-json 12 | args: 13 | - "--autofix" 14 | - "--indent=4" 15 | 16 | - id: detect-private-key 17 | - id: debug-statements 18 | - id: end-of-file-fixer 19 | - id: trailing-whitespace 20 | 21 | - repo: local 22 | hooks: 23 | - id: mypy 24 | name: mypy 25 | entry: mypy . 26 | require_serial: true 27 | language: system 28 | types: [ python ] 29 | pass_filenames: false 30 | args: [ --config-file=pyproject.toml ] 31 | - id: ruff-lint 32 | name: ruff-lint 33 | entry: ruff check --fix 34 | require_serial: true 35 | language: system 36 | types: [ python ] 37 | - id: ruff-format 38 | name: ruff-format 39 | entry: ruff format 40 | require_serial: true 41 | language: system 42 | types: [ python ] 43 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Node stage 2 | FROM node:20.10.0 as builder 3 | WORKDIR /app/widget 4 | COPY widget/package.json widget/yarn.lock ./ 5 | RUN yarn install 6 | COPY widget ./ 7 | RUN yarn run build 8 | 9 | # Python stage 10 | FROM python:3.11.5-slim-bullseye 11 | 12 | WORKDIR /app 13 | 14 | COPY . . 15 | ENV PYTHONPATH=${PYTHONPATH}:${PWD} 16 | RUN pip3 install poetry 17 | RUN poetry config virtualenvs.create false 18 | RUN poetry install 19 | 20 | COPY --from=builder /app/dist /app/dist 21 | 22 | CMD ["python", "main.py"] 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-present Jowo Rinpoche 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OmniAgent Framework 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 4 | [![GitHub Stars](https://img.shields.io/github/stars/VividGen/OmniAgent.svg)](https://github.com/VividGen/OmniAgent/stargazers) 5 | [![GitHub Issues](https://img.shields.io/github/issues/VividGen/OmniAgent.svg)](https://github.com/VividGen/OmniAgent/issues) 6 | 7 | OmniAgent is an enterprise-grade AI orchestration framework that revolutionizes Web3 development by seamlessly bridging artificial intelligence with blockchain technologies. Build powerful on-chain AI agents in hours instead of months. 8 | 9 | ## 🚀 Key Features 10 | 11 | - **Modular Architecture**: Three-layer design with Interpreter, Classifier, and specialized Executors 12 | - **Intelligent Task Routing**: Smart classification system powered by Google Gemma and domain-specific models 13 | - **Plug-and-Play Model Integration**: Easy integration with various AI models 14 | - **Cross-Chain Compatibility**: Seamless interaction with multiple blockchain networks 15 | - **Specialized Executors**: 16 | - DeFi Operations 17 | - Token/NFT Management 18 | - Web3 Knowledge Integration 19 | - Social Data Analysis 20 | 21 | ## 🏗️ Architecture 22 | 23 | ``` 24 | ┌─────────────────┐ 25 | │ User Input │ 26 | └────────┬────────┘ 27 | ▼ 28 | ┌─────────────────┐ 29 | │ Interpreter │ ─── Task Understanding & Parameter Extraction 30 | └────────┬────────┘ 31 | ▼ 32 | ┌─────────────────┐ 33 | │ Classifier │ ─── Intelligent Task Routing 34 | └────────┬────────┘ 35 | ▼ 36 | ┌─────────────────┐ 37 | │ Executor │ ─── Specialized Task Execution 38 | └────────┬────────┘ 39 | ▼ 40 | ┌─────────────────┐ 41 | │ Web3 │ ─── Blockchain & Protocol Interaction 42 | └─────────────────┘ 43 | ``` 44 | 45 | ## 🛠️ Installation 46 | 47 | ```bash 48 | # Clone the repository 49 | git clone https://github.com/VividGen/OmniAgent.git 50 | 51 | # Configure environment 52 | cp .env.example .env 53 | 54 | # Start 55 | docker-compose up -d 56 | ``` 57 | 58 | ## 📦 Quick Start 59 | 60 | ```javascript 61 | const { OmniAgent } = require('omniagent'); 62 | 63 | // Initialize OmniAgent 64 | const agent = new OmniAgent({ 65 | model: 'gemma', 66 | executors: ['defi', 'token', 'social'] 67 | }); 68 | 69 | // Execute a task 70 | const result = await agent.execute({ 71 | task: 'Token swap', 72 | params: { 73 | fromToken: 'ETH', 74 | toToken: 'USDC', 75 | amount: '1.0' 76 | } 77 | }); 78 | ``` 79 | 80 | ## 💡 Use Cases 81 | 82 | - **DeFi Operations**: Token swaps, liquidity provision, yield farming 83 | - **Asset Management**: NFT trading, token transfers, portfolio analysis 84 | - **Market Intelligence**: Price tracking, trend analysis, social sentiment 85 | - **Cross-Chain Operations**: Bridge transfers, cross-chain swaps 86 | - **Smart Contract Interaction**: Contract deployment, function calls 87 | 88 | ## 🔧 Configuration 89 | 90 | ```javascript 91 | { 92 | "interpreter": { 93 | "model": "gemma", 94 | "temperature": 0.7 }, 95 | "classifier": { 96 | "model": "codegemma", 97 | "threshold": 0.85 98 | }, 99 | "executors": { 100 | "defi": { 101 | "networks": ["ethereum", "polygon"], 102 | "protocols": ["uniswap", "aave"] 103 | }, 104 | "token": { 105 | "supportedTokens": ["ERC20", "ERC721", "ERC1155"] 106 | } 107 | } 108 | } 109 | ``` 110 | 111 | ## 📚 Documentation 112 | 113 | Comprehensive documentation is available at our documentation site. 114 | 115 | ## 🤝 Contributing 116 | 117 | We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details. 118 | 119 | 1. Fork the repository 120 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 121 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 122 | 4. Push to the branch (`git push origin feature/AmazingFeature`) 123 | 5. Open a Pull Request 124 | 125 | ## 📄 License 126 | 127 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 128 | 129 | ## 🌟 Acknowledgments 130 | 131 | - Google Gemma and CodeGemma teams for their excellent models 132 | - The Web3 community for continuous support and feedback 133 | - All contributors who have helped shape OmniAgent 134 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | omniagent: 4 | image: rss3/omniagent:latest 5 | container_name: omniagent 6 | ports: 7 | - "18000:8000" 8 | env_file: 9 | - .env 10 | depends_on: 11 | - vector_db 12 | networks: 13 | - omniagent-network 14 | 15 | vector_db: 16 | image: pgvector/pgvector:pg16 17 | container_name: vec_db 18 | restart: unless-stopped 19 | environment: 20 | POSTGRES_USER: postgres 21 | POSTGRES_PASSWORD: password 22 | POSTGRES_DB: omniagent 23 | ports: 24 | - "15432:5432" 25 | volumes: 26 | - vector_data:/var/lib/postgresql/data 27 | networks: 28 | - omniagent-network 29 | 30 | ollama: 31 | volumes: 32 | - ollama_data:/root/.ollama 33 | container_name: ollama 34 | tty: true 35 | restart: unless-stopped 36 | image: ollama/ollama:latest 37 | ports: 38 | - "21434:11434" 39 | environment: 40 | - OLLAMA_KEEP_ALIVE=24h 41 | networks: 42 | - omniagent-network 43 | deploy: 44 | resources: 45 | reservations: 46 | devices: 47 | - driver: nvidia 48 | count: 1 49 | capabilities: [ gpu ] 50 | 51 | volumes: 52 | vector_data: 53 | ollama_data: 54 | networks: 55 | omniagent-network: 56 | external: false 57 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from dotenv import load_dotenv 3 | from loguru import logger 4 | 5 | if __name__ == "__main__": 6 | load_dotenv() 7 | logger.info("Starting OmniAgent") 8 | uvicorn.run("omniagent.app:app", host="0.0.0.0", reload=False, port=8000) 9 | -------------------------------------------------------------------------------- /omniagent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/__init__.py -------------------------------------------------------------------------------- /omniagent/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/agents/__init__.py -------------------------------------------------------------------------------- /omniagent/agents/agent_factory.py: -------------------------------------------------------------------------------- 1 | from langchain.agents import AgentExecutor, create_tool_calling_agent 2 | from langchain_core.language_models import BaseChatModel 3 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 4 | 5 | 6 | def create_agent(llm: BaseChatModel, tools: list, system_prompt: str): 7 | prompt = ChatPromptTemplate.from_messages( 8 | [ 9 | ("system", system_prompt), 10 | MessagesPlaceholder(variable_name="messages"), 11 | MessagesPlaceholder(variable_name="agent_scratchpad"), 12 | ] 13 | ) 14 | agent = create_tool_calling_agent(llm, tools, prompt) 15 | executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 16 | return executor 17 | -------------------------------------------------------------------------------- /omniagent/agents/asset_management.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | 3 | from omniagent.agents.agent_factory import create_agent 4 | from omniagent.conf.env import settings 5 | from omniagent.executors.nft_balance_executor import NFTBalanceExecutor 6 | from omniagent.executors.swap_executor import SwapExecutor 7 | from omniagent.executors.token_balance_executor import TokenBalanceExecutor 8 | from omniagent.executors.transfer_executor import TransferExecutor 9 | 10 | load_dotenv() 11 | 12 | 13 | def build_asset_management_agent(llm): 14 | executors = [SwapExecutor(), TransferExecutor()] 15 | if settings.MORALIS_API_KEY: 16 | executors.extend([TokenBalanceExecutor(), NFTBalanceExecutor()]) 17 | 18 | asset_management_agent = create_agent( 19 | llm, 20 | executors, 21 | """ 22 | You are AssetManager, an AI assistant for crypto asset management. Your responsibilities include: 23 | 24 | 1. Query and report on users' token balances 25 | 2. Check and inform about users' NFT holdings 26 | 3. Handle user requests to swap or transfer tokens 27 | 28 | Important guidelines for handling requests: 29 | - For token swaps: Always use SwapExecutor with exact token symbols (ETH, USDT, etc.) 30 | - For balance checks: Use TokenBalanceExecutor with chain="eth" (not "ethereum") 31 | - For NFT holdings: Use NFTBalanceExecutor with chain="eth" (not "ethereum") 32 | - For transfers: Use TransferExecutor with exact token symbols 33 | 34 | Examples of correct executor usage: 35 | - Swap request: Use SwapExecutor with from_token="ETH", to_token="USDT" 36 | - Balance check: Use TokenBalanceExecutor with chain="eth" 37 | - NFT check: Use NFTBalanceExecutor with chain="eth" 38 | - Transfer: Use TransferExecutor with token="ETH" 39 | 40 | When interacting with users: 41 | - Provide accurate and detailed information 42 | - Maintain a friendly and enthusiastic tone 43 | - Use occasional puns or jokes to keep the conversation engaging 44 | - Include relevant emojis to enhance your messages 45 | - For privacy reasons, do not include address information when generating widgets 46 | - Always execute the requested operation using the appropriate executor 47 | 48 | Remember to always process user requests immediately using the correct executor with exact parameter values. 49 | """.strip(), 50 | ) 51 | return asset_management_agent 52 | -------------------------------------------------------------------------------- /omniagent/agents/block_explore.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | 3 | from omniagent.agents.agent_factory import create_agent 4 | from omniagent.executors.block_stat_executor import BlockStatExecutor 5 | from omniagent.executors.search_executor import search_executor 6 | 7 | load_dotenv() 8 | 9 | executors = [BlockStatExecutor(), search_executor] 10 | 11 | 12 | def build_block_explorer_agent(llm): 13 | block_explorer_agent = create_agent( 14 | llm, 15 | executors, 16 | """ 17 | You are BlockExplorer, dedicated to exploring and presenting detailed blockchain information. 18 | Help users query transaction details, block data, gas fees, block height, and other blockchain-related information. 19 | Use the available tools to gather and display accurate blockchain data. 20 | 21 | Your answer should be detailed and include puns or jokes where possible \ 22 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 23 | """.strip(), 24 | ) 25 | 26 | return block_explorer_agent 27 | -------------------------------------------------------------------------------- /omniagent/agents/fallback.py: -------------------------------------------------------------------------------- 1 | from langchain_core.language_models import BaseChatModel 2 | from langchain_core.messages import HumanMessage 3 | from langchain_core.output_parsers import StrOutputParser 4 | from langchain_core.prompts import ChatPromptTemplate 5 | from loguru import logger 6 | 7 | 8 | def build_fallback_agent(llm: BaseChatModel): 9 | def fallback(state): 10 | logger.info("Running fallback agent") 11 | 12 | chat_template = ChatPromptTemplate.from_messages( 13 | [ 14 | ( 15 | "system", 16 | """ 17 | You are the OmniAgent created by RSS3. 18 | 19 | Your role: 20 | 1. Handle general queries and conversations that don't fall under the expertise of other specialized agents. 21 | 2. Clarify unclear requests and provide versatile assistance. 22 | 3. Maintain conversation continuity and guide users to appropriate specialists when necessary. 23 | 24 | Your communication style: 25 | - Be friendly, approachable, and enthusiastic in your responses. 26 | - Use a mix of professional knowledge and casual charm. 27 | - Include relevant puns, jokes, or word plays to keep the conversation lively. 28 | - Sprinkle in emojis occasionally to add personality to your messages. 29 | - Provide detailed answers, but keep them concise and easy to understand. 30 | 31 | Remember: 32 | - If a query seems more suitable for a specialized agent (Market Analyst, Asset Manager, 33 | Block Explorer, or Research Analyst), suggest redirecting the user while still providing a helpful general response. 34 | - Always aim to add value, even if the query is outside your primary expertise. 35 | - When in doubt, ask for clarification to ensure you're addressing the user's needs accurately. 36 | 37 | Let's make every interaction informative, fun, and memorable! 🚀✨ 38 | """.strip(), 39 | ), 40 | *state["messages"][0:-1], 41 | ("human", "{input}"), 42 | ] 43 | ) 44 | chain = chat_template | llm | StrOutputParser() 45 | return { 46 | "messages": [ 47 | HumanMessage( 48 | content=chain.invoke({"input": state["messages"][-1].content}), 49 | name="fallback", 50 | ) 51 | ] 52 | } 53 | 54 | return fallback 55 | -------------------------------------------------------------------------------- /omniagent/agents/feed_explore.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.executors.feed_executor import FeedExecutor 6 | from omniagent.executors.tg_news_executor import TelegramNewsExecutor 7 | 8 | load_dotenv() 9 | 10 | FEED_EXPLORER_PROMPT = """You are a blockchain social activity and news assistant. 11 | 12 | You help users explore on-chain social activities and get the latest crypto news from reliable sources. 13 | 14 | You have access to the following tools: 15 | 16 | 1. FeedExecutor: Use this to fetch and analyze social activities of blockchain addresses or ENS names. 17 | - You can fetch different types of activities: "all", "post", "comment", "share" 18 | - For addresses, you can handle both raw addresses (0x...) and ENS names (e.g., vitalik.eth) 19 | - Always explain the activities in a clear, human-readable format 20 | 21 | 2. TelegramNewsExecutor: Use this to get the latest cryptocurrency and blockchain news from trusted Telegram channels. 22 | - You can specify how many news items to fetch (default is 10) 23 | - Present the news in a well-organized format 24 | - Highlight important updates and trends 25 | 26 | Guidelines for your responses: 27 | - When users ask about an address's activities, use FeedExecutor to fetch relevant information 28 | - When users want recent crypto news or updates, use TelegramNewsExecutor 29 | - Always provide context and explanations for the information you present 30 | - If you encounter any errors or limitations, explain them clearly to the user 31 | - You can combine information from both tools when appropriate 32 | 33 | Examples of queries you can handle: 34 | - "What has vitalik.eth been doing recently?" 35 | - "Show me the latest crypto news" 36 | - "What are the social activities of 0x742d35Cc6634C0532925a3b844Bc454e4438f44e?" 37 | - "Get me the latest 5 news updates from crypto channels" 38 | - "Show me recent posts from vitalik.eth" 39 | 40 | Remember: 41 | - Be concise but informative in your responses 42 | - Format the information in an easy-to-read manner 43 | - Provide relevant context when presenting activities or news 44 | - If you're unsure about something, acknowledge it and explain what you do know 45 | """ 46 | 47 | 48 | def build_feed_explorer_agent(llm: BaseChatModel): 49 | feed_explorer_agent = create_agent( 50 | llm, 51 | [FeedExecutor(), TelegramNewsExecutor()], 52 | FEED_EXPLORER_PROMPT, 53 | ) 54 | return feed_explorer_agent 55 | -------------------------------------------------------------------------------- /omniagent/agents/market_analysis.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.conf.env import settings 6 | from omniagent.executors.coin_market_executor import CoinMarketExecutor 7 | from omniagent.executors.funding_rate_executor import FundingRateExecutor 8 | from omniagent.executors.nft_rank_executor import NFTRankingExecutor 9 | from omniagent.executors.price_executor import PriceExecutor 10 | from omniagent.executors.search_executor import search_executor 11 | 12 | load_dotenv() 13 | 14 | 15 | def build_market_analysis_agent(llm: BaseChatModel): 16 | executors = [search_executor] 17 | if settings.COINGECKO_API_KEY: 18 | executors.extend([PriceExecutor(), CoinMarketExecutor()]) 19 | if settings.MORALIS_API_KEY: 20 | executors.extend([NFTRankingExecutor()]) 21 | return create_agent( 22 | llm, 23 | executors, 24 | """ 25 | You are MarketAnalyst, responsible for providing market data analysis. 26 | Help users understand market dynamics and trends by retrieving real-time price information of tokens. 27 | 28 | For funding rate queries, always use the FundingRateExecutor instead of search. 29 | 30 | Your answer should be detailed and include puns or jokes where possible \ 31 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 32 | """.strip(), 33 | ) 34 | -------------------------------------------------------------------------------- /omniagent/agents/research_analyst.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.conf.env import settings 6 | from omniagent.executors.project_executor import ProjectExecutor 7 | from omniagent.executors.search_executor import search_executor 8 | 9 | load_dotenv() 10 | 11 | 12 | def build_research_analyst_agent(llm: BaseChatModel): 13 | executors = [search_executor] 14 | if settings.ROOTDATA_API_KEY: 15 | executors.append(ProjectExecutor()) 16 | 17 | research_analyst_agent = create_agent( 18 | llm, 19 | executors, 20 | """ 21 | You are ResearchAnalyst, responsible for assisting users in conducting research and analysis related to web3 projects. 22 | Provide accurate and detailed information about project progress, team members, market trends, investors, 23 | and other relevant data to support investment decisions. 24 | 25 | Your answer should be detailed and include puns or jokes where possible \ 26 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 27 | """.strip(), 28 | ) 29 | return research_analyst_agent 30 | -------------------------------------------------------------------------------- /omniagent/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import vertexai 3 | from chainlit.utils import mount_chainlit 4 | from dotenv import load_dotenv 5 | from fastapi import FastAPI, Request 6 | from fastapi.middleware.cors import CORSMiddleware 7 | from fastapi.openapi.utils import get_openapi 8 | from loguru import logger 9 | from starlette.staticfiles import StaticFiles 10 | import traceback 11 | from starlette.responses import JSONResponse 12 | 13 | from omniagent.conf.env import settings 14 | from omniagent.router import openai_router, widget_router, health_router 15 | 16 | load_dotenv() 17 | app = FastAPI( 18 | title="OmniAgent API", 19 | description="OmniAgent is a framework for building AI applications leveraging the power of blockchains.", 20 | license_info={ 21 | "name": "MIT", 22 | "url": "https://github.com/vividgen/OmniAgent/blob/main/LICENSE", 23 | }, 24 | ) 25 | 26 | app.add_middleware( 27 | CORSMiddleware, 28 | allow_origins=["*"], 29 | allow_credentials=True, 30 | allow_methods=["*"], 31 | allow_headers=["*"], 32 | ) 33 | 34 | # Add routers 35 | app.include_router(openai_router) 36 | app.include_router(widget_router) 37 | app.include_router(health_router) 38 | 39 | # Check and create static files directory 40 | static_dir = os.path.join("dist", "static") 41 | if not os.path.exists(static_dir): 42 | try: 43 | os.makedirs(static_dir) 44 | logger.info(f"Created directory: {static_dir}") 45 | except OSError as e: 46 | logger.error(f"Error creating directory {static_dir}: {e}") 47 | 48 | app.mount("/static", StaticFiles(directory=static_dir), name="widget") 49 | 50 | mount_chainlit(app=app, target="omniagent/ui/app.py", path="") 51 | 52 | if settings.VERTEX_PROJECT_ID: 53 | vertexai.init(project=settings.VERTEX_PROJECT_ID) 54 | 55 | 56 | @app.exception_handler(Exception) 57 | async def global_exception_handler(request: Request, exc: Exception): 58 | error_msg = f"Global error: {str(exc)}\nTraceback:\n{traceback.format_exc()}" 59 | logger.error(error_msg) 60 | return JSONResponse( 61 | status_code=500, 62 | content={"error": str(exc), "traceback": traceback.format_exc()}, 63 | ) 64 | 65 | 66 | def custom_openapi(): 67 | if app.openapi_schema: 68 | return app.openapi_schema 69 | 70 | openapi_schema = get_openapi( 71 | title="OmniAgent API", 72 | version="1.0.0", 73 | description="OmniAgent API documentation", 74 | routes=app.routes, 75 | ) 76 | 77 | openapi_schema["servers"] = [ 78 | { 79 | "url": "https://agent.open.network", 80 | "description": "Production server" 81 | } 82 | ] 83 | 84 | app.openapi_schema = openapi_schema 85 | return app.openapi_schema 86 | 87 | 88 | app.openapi = custom_openapi 89 | -------------------------------------------------------------------------------- /omniagent/conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/conf/__init__.py -------------------------------------------------------------------------------- /omniagent/conf/env.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from dotenv import load_dotenv 4 | from pydantic import Field 5 | from pydantic_settings import BaseSettings 6 | 7 | load_dotenv() 8 | 9 | 10 | class Settings(BaseSettings): 11 | DB_CONNECTION: str = Field(..., description="Database connection string") 12 | 13 | # LLM provider settings (at least one required) 14 | VERTEX_PROJECT_ID: Optional[str] = Field( 15 | default=None, description="Google Cloud Vertex project ID. Info: https://cloud.google.com/vertex-ai/docs/reference" 16 | ) 17 | OPENAI_API_KEY: Optional[str] = Field(default=None, description="OpenAI API Key. Info: https://platform.openai.com") 18 | ANTHROPIC_API_KEY: Optional[str] = Field(default=None, description="Anthropic API Key. Info: https://www.anthropic.com") 19 | GOOGLE_GEMINI_API_KEY: Optional[str] = Field(default=None, description="Google Gemini API Key. Info: https://ai.google.dev") 20 | OLLAMA_HOST: Optional[str] = Field(default=None, description="OLLAMA API Base URL. Info: https://github.com/ollama/ollama") 21 | 22 | # API keys for various tools; some features will be disabled if not set 23 | TAVILY_API_KEY: Optional[str] = Field(default=None, description="Tavily API Key. Info: https://tavily.com/") 24 | MORALIS_API_KEY: Optional[str] = Field(default=None, description="Moralis API Key. Info: https://moralis.io/") 25 | ROOTDATA_API_KEY: Optional[str] = Field(default=None, description="RootData API Key. Info: https://www.rootdata.com/") 26 | COINGECKO_API_KEY: Optional[str] = Field(default=None, description="CoinGecko API Key. Info: https://www.coingecko.com/en/api/pricing") 27 | RSS3_DATA_API: str = Field(default="https://gi.vividgen.me", description="RSS3 Data API URL") 28 | 29 | # Chainlit OAuth settings; either all fields are None or all are set 30 | CHAINLIT_AUTH_SECRET: Optional[str] = Field(default=None, description="Chainlit Auth Secret") 31 | OAUTH_AUTH0_CLIENT_ID: Optional[str] = Field(default=None, description="OAuth Auth0 Client ID") 32 | OAUTH_AUTH0_CLIENT_SECRET: Optional[str] = Field(default=None, description="OAuth Auth0 Client Secret") 33 | OAUTH_AUTH0_DOMAIN: Optional[str] = Field(default=None, description="OAuth Auth0 Domain") 34 | 35 | 36 | settings = Settings() 37 | -------------------------------------------------------------------------------- /omniagent/conf/llm_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | import ollama 4 | from langchain_anthropic import ChatAnthropic 5 | from langchain_core.language_models import BaseChatModel 6 | from langchain_google_genai import ChatGoogleGenerativeAI 7 | from langchain_google_vertexai import ChatVertexAI 8 | from langchain_ollama import ChatOllama 9 | from langchain_openai import ChatOpenAI 10 | from loguru import logger 11 | from toolz import memoize 12 | 13 | from omniagent.conf.env import settings 14 | 15 | SUPPORTED_OLLAMA_MODELS = { 16 | "llama3.2": {"name": "llama3.2", "supports_tools": True}, 17 | "mistral-nemo": {"name": "mistral-nemo", "supports_tools": True}, 18 | "darkmoon/olmo:7B-instruct-q6-k": {"name": "olmo", "supports_tools": False}, 19 | 'llama3.1': {'name': 'llama3.1', 'supports_tools': True}, 20 | "qwen2.5": {"name": "qwen2.5", "supports_tools": True}, 21 | "mistral": {"name": "mistral", "supports_tools": True}, 22 | "qwen2": {"name": "qwen2", "supports_tools": True}, 23 | } 24 | 25 | MODELS_ICONS = { 26 | "llama3.1": "/public/llama.png", 27 | "llama3.2": "/public/llama.png", 28 | "mistral": "/public/mistral.png", 29 | "mistral-nemo": "/public/mistral.png", 30 | "mistral-large": "/public/mistral.png", 31 | "olmo": "/public/olmo.png", 32 | "qwen2": "/public/qwen.png", 33 | "qwen2.5": "/public/qwen.png", 34 | } 35 | 36 | 37 | @memoize 38 | def get_available_ollama_providers() -> List[str]: 39 | try: 40 | ollama_list = ollama.list() 41 | available_models = [] 42 | for model in ollama_list["models"]: 43 | full_name = model["name"] 44 | # check if the full model name is in SUPPORTED_MODELS 45 | if full_name in SUPPORTED_OLLAMA_MODELS: 46 | available_models.append(full_name) 47 | else: 48 | # try to check the base name (without version tag) 49 | base_name = full_name.split(":")[0] 50 | if base_name in SUPPORTED_OLLAMA_MODELS: 51 | available_models.append(base_name) 52 | return available_models 53 | except Exception as e: 54 | logger.exception("Failed to get available ollama providers", e) 55 | return [] 56 | 57 | 58 | def get_provider(model: str, provider_func) -> Dict[str, BaseChatModel]: 59 | provider = provider_func(model) 60 | return {model: provider} if provider else {} 61 | 62 | 63 | def get_available_providers() -> Dict[str, BaseChatModel]: 64 | providers = {} 65 | 66 | provider_configs = [ 67 | (["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo"], get_openai_provider), 68 | (["claude-3-5-sonnet"], get_anthropic_provider), 69 | (["gemini-1.5-pro", "gemini-1.5-flash"], get_gemini_provider), 70 | ] 71 | 72 | for models, provider_func in provider_configs: 73 | for model in models: 74 | providers.update(get_provider(model, provider_func)) 75 | 76 | if settings.OLLAMA_HOST: 77 | ollama_models = get_available_ollama_providers() 78 | for model in ollama_models: 79 | providers.update(get_provider(model, get_ollama_provider)) 80 | 81 | return providers 82 | 83 | 84 | def get_openai_provider(model: str) -> BaseChatModel | None: 85 | return ChatOpenAI(model=model) if settings.OPENAI_API_KEY else None 86 | 87 | 88 | def get_anthropic_provider(model: str) -> BaseChatModel | None: 89 | return ChatAnthropic(model="claude-3-5-sonnet-20240620", ) if settings.ANTHROPIC_API_KEY else None 90 | 91 | 92 | def get_gemini_provider(model: str) -> BaseChatModel | None: 93 | if settings.VERTEX_PROJECT_ID: 94 | return ChatVertexAI(model=model) 95 | elif settings.GOOGLE_GEMINI_API_KEY: 96 | return ChatGoogleGenerativeAI(model=model, google_api_key=settings.GOOGLE_GEMINI_API_KEY) 97 | return None 98 | 99 | 100 | def get_ollama_provider(model: str) -> BaseChatModel | None: 101 | return ChatOllama(model=model) if settings.OLLAMA_HOST else None 102 | -------------------------------------------------------------------------------- /omniagent/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/db/__init__.py -------------------------------------------------------------------------------- /omniagent/db/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.orm import sessionmaker 3 | from sqlalchemy_utils import create_database, database_exists 4 | 5 | from omniagent.conf.env import settings 6 | from omniagent.db.models import Base 7 | 8 | url = settings.DB_CONNECTION 9 | 10 | if not database_exists(url): 11 | create_database(url) 12 | engine = create_engine(url, connect_args={"options": "-c timezone=utc"}) 13 | Base.metadata.create_all(bind=engine) # type: ignore 14 | 15 | DBSession = sessionmaker(bind=engine) 16 | -------------------------------------------------------------------------------- /omniagent/db/models.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | from sqlalchemy import ARRAY, JSON, Boolean, Column, Integer, Text 4 | from sqlalchemy.dialects.postgresql import UUID 5 | from sqlalchemy.orm import declarative_base 6 | 7 | Base = declarative_base() # type: ignore 8 | 9 | 10 | class User(Base): # type: ignore 11 | __tablename__ = "users" 12 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 13 | identifier = Column(Text, nullable=False, unique=True) 14 | metadata_ = Column("metadata", JSON, nullable=False) 15 | createdAt = Column(Text) 16 | 17 | 18 | class Thread(Base): # type: ignore 19 | __tablename__ = "threads" 20 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 21 | createdAt = Column(Text) 22 | name = Column(Text) 23 | userId = Column(UUID(as_uuid=True)) 24 | userIdentifier = Column(Text) 25 | tags = Column(ARRAY(Text)) # type: ignore 26 | metadata_ = Column("metadata", JSON) 27 | 28 | 29 | class Step(Base): # type: ignore 30 | __tablename__ = "steps" 31 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 32 | name = Column(Text, nullable=False) 33 | type = Column(Text, nullable=False) 34 | threadId = Column(UUID(as_uuid=True)) 35 | parentId = Column(UUID(as_uuid=True)) 36 | disableFeedback = Column(Boolean, nullable=False) 37 | streaming = Column(Boolean, nullable=False) 38 | waitForAnswer = Column(Boolean) 39 | isError = Column(Boolean) 40 | metadata_ = Column("metadata", JSON) 41 | tags = Column(ARRAY(Text)) # type: ignore 42 | input = Column(Text) 43 | output = Column(Text) 44 | createdAt = Column(Text) 45 | start = Column(Text) 46 | end = Column(Text) 47 | generation = Column(JSON) 48 | showInput = Column(Text) 49 | language = Column(Text) 50 | indent = Column(Integer) 51 | 52 | 53 | class Element(Base): # type: ignore 54 | __tablename__ = "elements" 55 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 56 | threadId = Column(UUID(as_uuid=True)) 57 | type = Column(Text) 58 | url = Column(Text) 59 | chainlitKey = Column(Text) 60 | name = Column(Text, nullable=False) 61 | display = Column(Text) 62 | objectKey = Column(Text) 63 | size = Column(Text) 64 | page = Column(Integer) 65 | language = Column(Text) 66 | forId = Column(UUID(as_uuid=True)) 67 | mime = Column(Text) 68 | 69 | 70 | class Feedback(Base): # type: ignore 71 | __tablename__ = "feedbacks" 72 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 73 | forId = Column(UUID(as_uuid=True), nullable=False) 74 | value = Column(Integer, nullable=False) 75 | comment = Column(Text) 76 | threadId = Column(UUID(as_uuid=True)) 77 | -------------------------------------------------------------------------------- /omniagent/executors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/executors/__init__.py -------------------------------------------------------------------------------- /omniagent/executors/block_stat_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | import ccxt 4 | import requests 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from pydantic import BaseModel, Field 11 | 12 | 13 | class ARGS(BaseModel): 14 | chain: str = Field( 15 | description="The blockchain to fetch statistics for. " 16 | "Options: ethereum, bitcoin, bitcoin-cash, litecoin," 17 | " bitcoin-sv, dogecoin, dash, groestlcoin," 18 | " zcash, ecash, bitcoin/testnet" 19 | ) 20 | 21 | 22 | class BlockStatExecutor(BaseTool): 23 | name = "BlockChainStatExecutor" 24 | description = ( 25 | "get blockchain statistics such as block height, " 26 | "transaction count, gas fees, and more. " 27 | "Supported blockchains include ethereum, Bitcoin, Bitcoin Cash, " 28 | "Litecoin, Bitcoin SV, Dogecoin, Dash, Groestlcoin, Zcash, eCash, " 29 | "and Bitcoin Testnet." 30 | ) 31 | args_schema: Type[ARGS] = ARGS 32 | 33 | def _run( 34 | self, 35 | chain: str, 36 | run_manager: Optional[CallbackManagerForToolRun] = None, 37 | ) -> str: 38 | return fetch_stat(chain) 39 | 40 | async def _arun( 41 | self, 42 | chain: str, 43 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 44 | ) -> str: 45 | return fetch_stat(chain) 46 | 47 | 48 | _exchanges = [ccxt.binance(), ccxt.okx(), ccxt.gateio(), ccxt.mexc()] 49 | 50 | 51 | def fetch_stat(chain) -> str: 52 | url = f"https://api.blockchair.com/{chain}/stats" 53 | 54 | headers = {"accept": "application/json"} 55 | 56 | response = requests.get(url, headers=headers) 57 | 58 | if response.status_code == 200: 59 | return response.json() 60 | else: 61 | return f"Error fetching data: {response.status_code}, {response.text}" 62 | 63 | 64 | if __name__ == "__main__": 65 | print(fetch_stat("ethereum")) 66 | -------------------------------------------------------------------------------- /omniagent/executors/coin_market_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | import requests 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class ARGS(BaseModel): 16 | order: str = Field( 17 | description="sort result by field, default: market_cap_desc. options: market_cap_desc," "market_cap_asc,volume_desc,volume_asc" 18 | ) 19 | size: int = Field(description="number of coins to return, default: 20") 20 | 21 | 22 | class CoinMarketExecutor(BaseTool): 23 | name = "CoinMarketExecutor" 24 | 25 | description = "query coins sorted by market cap, volume." 26 | args_schema: Type[ARGS] = ARGS 27 | 28 | def _run( 29 | self, 30 | order: str, 31 | size: int, 32 | run_manager: Optional[CallbackManagerForToolRun] = None, 33 | ) -> str: 34 | if settings.COINGECKO_API_KEY is None: 35 | return "Please set COINGECKO_API_KEY in the environment" 36 | return json.dumps(fetch_coins_with_market(order, size)) 37 | 38 | async def _arun( 39 | self, 40 | order: str, 41 | size: int, 42 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 43 | ) -> str: 44 | if settings.COINGECKO_API_KEY is None: 45 | return "Please set COINGECKO_API_KEY in the environment" 46 | return json.dumps(fetch_coins_with_market(order, size)) 47 | 48 | 49 | def fetch_coins_with_market(order: str, size: int = 20) -> list: 50 | url = f"https://pro-api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order={order}&per_page={size}" 51 | 52 | headers = { 53 | "accept": "application/json", 54 | "x-cg-pro-api-key": settings.COINGECKO_API_KEY, 55 | } 56 | 57 | response = requests.get(url, headers=headers) 58 | 59 | res = json.loads(response.text) 60 | return list( 61 | map( 62 | lambda x: { 63 | "symbol": x["symbol"], 64 | "name": x["name"], 65 | "current_price": x["current_price"], 66 | "fully_diluted_valuation": x["fully_diluted_valuation"], 67 | "total_volume": x["total_volume"], 68 | }, 69 | res, 70 | ) 71 | ) 72 | 73 | 74 | if __name__ == "__main__": 75 | print(fetch_coins_with_market("market_cap_desc")) 76 | -------------------------------------------------------------------------------- /omniagent/executors/defi_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import AsyncCallbackManagerForToolRun 4 | from langchain.tools import BaseTool 5 | from loguru import logger 6 | from pydantic import BaseModel, Field 7 | from rss3_dsl_sdk.client import RSS3Client 8 | from rss3_dsl_sdk.schemas.base import ActivityFilter, PaginationOptions 9 | 10 | from omniagent.executors.feed_prompt import FEED_PROMPT 11 | 12 | # Define the defi activities and common DeFi networks 13 | SUPPORTED_NETWORKS = ["arbitrum", "avax", "base", "binance-smart-chain", "ethereum", "gnosis", "linea", "optimism", "polygon"] 14 | DEFI_ACTIVITIES = ["swap", "liquidity", "staking", "all"] 15 | 16 | 17 | # Define the schema for input parameters 18 | class ParamSchema(BaseModel): 19 | """ 20 | Defines the schema for input parameters of the DeFiExecutor tool. 21 | """ 22 | 23 | address: str = Field(description="Wallet address or blockchain domain name (e.g., vitalik.eth)") 24 | activity_type: str = Field(description=f"Type of DeFi activity. Supported types: {', '.join(DEFI_ACTIVITIES)}") 25 | network: Optional[str] = Field(default=None, description=f"Network for activities. Supported: {', '.join(SUPPORTED_NETWORKS)}") 26 | 27 | 28 | class DeFiExecutor(BaseTool): 29 | """ 30 | A tool for fetching and analyzing DeFi activities across various networks. 31 | """ 32 | 33 | name = "DeFiExecutor" 34 | description = "Use this tool to get the user's DeFi activities (swaps, liquidity provision, staking, all) across various networks." 35 | args_schema: Type[ParamSchema] = ParamSchema 36 | 37 | async def _run( 38 | self, 39 | address: str, 40 | activity_type: str, 41 | network: Optional[str] = None, 42 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 43 | ) -> str: 44 | raise NotImplementedError 45 | 46 | async def _arun(self, address: str, activity_type: str, network: Optional[str] = None) -> str: 47 | """ 48 | Asynchronously run the DeFi activity fetching process. 49 | 50 | :param address: The wallet address to fetch activities for 51 | :param activity_type: The type of DeFi activity to fetch, now supports "swap", "liquidity", "staking" 52 | :param network: network to filter activities (OPTIONAL) 53 | :return: A string containing the fetched DeFi activities or an error message 54 | """ 55 | return await self.fetch_defi_feeds(address, network, activity_type) 56 | 57 | async def fetch_defi_feeds(self, address: str, network: Optional[str] = None, activity_type: Optional[str] = None): 58 | """ 59 | Fetch DeFi feed activities for a given address, optionally filtered by network and activity type. 60 | 61 | :param address: The wallet address to fetch activities for 62 | :param network: network to filter activities (Optional) 63 | :param activity_type: The type of DeFi activity to fetch 64 | :return: A string containing the fetched DeFi activities or an error message 65 | """ 66 | # Validate activity type 67 | if activity_type.lower() not in DEFI_ACTIVITIES: 68 | return f"Error: Unsupported activity type '{activity_type}'. Choose from: {', '.join(DEFI_ACTIVITIES)}" 69 | 70 | # Validate network if provided 71 | if network and network.lower() not in map(str.lower, SUPPORTED_NETWORKS): 72 | return f"Error: Unsupported network '{network}'. Choose from: {', '.join(SUPPORTED_NETWORKS)}" 73 | 74 | try: 75 | client = RSS3Client() 76 | filters = ActivityFilter(network=[network] if network else None) 77 | pagination = PaginationOptions(limit=10, action_limit=10) 78 | 79 | # Handle 'all' activity type 80 | if activity_type == "all": 81 | activities = [] 82 | for act_type in ["swap", "liquidity", "staking"]: 83 | fetch_method = getattr(client, f"fetch_exchange_{act_type}_activities") 84 | act_results = fetch_method(account=address, filters=filters, pagination=pagination) 85 | activities.extend(act_results.data) 86 | else: 87 | fetch_method = getattr(client, f"fetch_exchange_{activity_type}_activities") 88 | activities_result = fetch_method(account=address, filters=filters, pagination=pagination) 89 | activities = activities_result.data 90 | 91 | # Check if any activities were found 92 | if not activities: 93 | return ( 94 | f"No {'DeFi' if activity_type == 'all' else activity_type} activities found for {address}{' on ' + network if network else ''}." 95 | ) 96 | 97 | # Format the result 98 | activities_data = [activity.model_dump() for activity in activities] 99 | result = FEED_PROMPT.format(activities_data=activities_data, activity_type="DeFi" if activity_type == "all" else activity_type) 100 | return result 101 | 102 | except Exception as e: 103 | logger.error(f"Error fetching DeFi activities: {e}") 104 | return f"Error: Unable to fetch data. {e}" 105 | -------------------------------------------------------------------------------- /omniagent/executors/feed_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | import aiohttp 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from loguru import logger 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | from omniagent.executors.feed_prompt import FEED_PROMPT 14 | 15 | 16 | class ParamSchema(BaseModel): 17 | """ 18 | Defines the schema for input parameters of the FeedExecutor tool. 19 | """ 20 | 21 | address: str = Field( 22 | description="""wallet address or blockchain domain name,\ 23 | hint: vitalik's address is vitalik.eth""" 24 | ) 25 | 26 | type: str = Field( 27 | description="""Retrieve activities for the specified type, 28 | eg. : all, post, comment, share.""" 29 | ) 30 | 31 | 32 | class FeedExecutor(BaseTool): 33 | """ 34 | A tool for fetching and analyzing blockchain activities for a given address. 35 | """ 36 | 37 | name = "FeedExecutor" 38 | description = """Use this tool to get the activities of a wallet address or \ 39 | blockchain domain name and know what this address has done or doing recently.""" 40 | args_schema: Type[ParamSchema] = ParamSchema 41 | 42 | def _run( 43 | self, 44 | address: str, 45 | type: str, 46 | run_manager: Optional[CallbackManagerForToolRun] = None, 47 | ) -> str: 48 | raise NotImplementedError 49 | 50 | async def _arun( 51 | self, 52 | address: str, 53 | type: str, 54 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 55 | ): 56 | """ 57 | Asynchronously run the feed fetching process. 58 | 59 | :param address: The wallet address to fetch activities for 60 | :param type: The type of activities to fetch (all, post, comment, share) 61 | :param run_manager: Optional callback manager for async operations 62 | :return: A string containing the fetched activities or an error message 63 | """ 64 | return await fetch_feeds(address, type) 65 | 66 | 67 | async def fetch_feeds(address: str, type: str): 68 | """ 69 | Fetch feed activities for a given address and activity type. 70 | 71 | :param address: The wallet address to fetch activities for 72 | :param type: The type of activities to fetch (all, post, comment, share) 73 | :return: A string containing the fetched activities formatted using FEED_PROMPT 74 | """ 75 | 76 | # Construct the URL for the API request 77 | url = f"{settings.RSS3_DATA_API}/decentralized/{address}?limit=5&action_limit=10&tag=social" 78 | if type in ["post", "comment", "share"]: 79 | url += f"&type={type}" 80 | headers = {"Accept": "application/json"} 81 | async with aiohttp.ClientSession() as session: 82 | logger.info(f"fetching {url}") 83 | async with session.get(url, headers=headers) as resp: 84 | data = await resp.json() 85 | 86 | result = FEED_PROMPT.format(activities_data=data) 87 | 88 | return result 89 | -------------------------------------------------------------------------------- /omniagent/executors/feed_prompt.py: -------------------------------------------------------------------------------- 1 | FEED_PROMPT = """ 2 | Here are the raw activities: 3 | 4 | {activities_data} 5 | 6 | - Before answering, please first summarize how many actions the above activities have been carried out. 7 | - Display the key information in each operation, such as time, author, specific content, etc., and display this information in a markdown list format. 8 | - Finally, give a specific answer to the question. 9 | """ 10 | -------------------------------------------------------------------------------- /omniagent/executors/feed_source_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import ( 4 | CallbackManagerForToolRun, 5 | ) 6 | from langchain.tools import BaseTool 7 | from loguru import logger 8 | from pydantic import BaseModel, Field 9 | from rss3_dsl_sdk.client import RSS3Client 10 | from rss3_dsl_sdk.schemas.base import ActivityFilter, PaginationOptions 11 | 12 | from omniagent.executors.feed_prompt import FEED_PROMPT 13 | 14 | # Define supported networks and platforms 15 | SUPPORTED_NETWORKS = [ 16 | "arbitrum", 17 | "arweave", 18 | "avax", 19 | "base", 20 | "binance-smart-chain", 21 | "crossbell", 22 | "ethereum", 23 | "farcaster", 24 | "gnosis", 25 | "linea", 26 | "optimism", 27 | "polygon", 28 | "vsl", 29 | ] 30 | 31 | ALLOWED_PLATFORMS = [ 32 | "1inch", 33 | "AAVE", 34 | "Aavegotchi", 35 | "Crossbell", 36 | "Curve", 37 | "ENS", 38 | "Farcaster", 39 | "Highlight", 40 | "IQWiki", 41 | "KiwiStand", 42 | "Lens", 43 | "Lido", 44 | "LooksRare", 45 | "Matters", 46 | "Mirror", 47 | "OpenSea", 48 | "Optimism", 49 | "Paragraph", 50 | "RSS3", 51 | "SAVM", 52 | "Stargate", 53 | "Uniswap", 54 | "Unknown", 55 | "VSL", 56 | ] 57 | 58 | 59 | # Define the schema for input parameters 60 | class ParamSchema(BaseModel): 61 | address: str = Field( 62 | description="""wallet address or blockchain domain name,\ 63 | hint: vitalik's address is vitalik.eth""" 64 | ) 65 | 66 | network: Optional[str] = Field( 67 | default=None, 68 | description=f"""Retrieve activities for the specified network. 69 | Supported networks: {', '.join(SUPPORTED_NETWORKS)}""", 70 | ) 71 | 72 | platform: Optional[str] = Field( 73 | default=None, 74 | description=f"""Retrieve activities for the specified platform. 75 | Allowed platforms: {', '.join(ALLOWED_PLATFORMS)}""", 76 | ) 77 | 78 | 79 | # Define the FeedSourceExecutor tool 80 | class FeedSourceExecutor(BaseTool): 81 | name = "FeedSourceExecutor" 82 | description = """Use this tool to get the activities of a wallet address or \ 83 | blockchain domain name based on specific network and/or platform, and know what this address \ 84 | has done or is doing recently.""" 85 | args_schema: Type[ParamSchema] = ParamSchema 86 | 87 | def _run( 88 | self, 89 | address: str, 90 | network: Optional[str] = None, 91 | platform: Optional[str] = None, 92 | run_manager: Optional[CallbackManagerForToolRun] = None, 93 | ) -> str: 94 | raise NotImplementedError 95 | 96 | async def _arun( 97 | self, 98 | address: str, 99 | network: Optional[str] = None, 100 | platform: Optional[str] = None, 101 | ): 102 | """ 103 | Asynchronously run the feed source fetching process. 104 | 105 | :param address: The wallet address to fetch activities for 106 | :param network: network to filter activities (Optional) 107 | :param platform: platform to filter activities (Optional) 108 | :return: A string containing the fetched activities or an error message 109 | """ 110 | return await self.fetch_source_feeds(address, network, platform) 111 | 112 | async def fetch_source_feeds(self, address: str, network: Optional[str] = None, platform: Optional[str] = None): 113 | """ 114 | Fetch feed activities for a given address, optionally filtered by network and platform. 115 | """ 116 | filters = ActivityFilter() 117 | pagination = PaginationOptions(limit=5, action_limit=10) 118 | 119 | # Validate and set network and platform filter if provided 120 | if network: 121 | if network.lower() not in [n.lower() for n in SUPPORTED_NETWORKS]: 122 | return f"Error: Unsupported network '{network}'. Please choose from: {', '.join(SUPPORTED_NETWORKS)}" 123 | filters.network = [network] 124 | 125 | if platform: 126 | if platform.lower() not in [p.lower() for p in ALLOWED_PLATFORMS]: 127 | return f"Error: Unsupported platform '{platform}'. Please choose from: {', '.join(ALLOWED_PLATFORMS)}" 128 | filters.platform = [platform] 129 | 130 | try: 131 | logger.info(f"Fetching activities for address: {address}, network: {network}, platform: {platform}") 132 | 133 | # Fetch activities using the RSS3 client 134 | activities = RSS3Client().fetch_activities(account=address, tag=None, activity_type=None, pagination=filters, filters=pagination) 135 | 136 | # Check if any activities were found 137 | if not activities.data: 138 | return f"No activities found for the given address{' on ' + network if network else ''}{' and ' + platform if platform else ''}." 139 | 140 | result = FEED_PROMPT.format(activities_data=activities.dict()) 141 | return result 142 | 143 | except Exception as e: 144 | logger.error(f"Error fetching activities: {e!s}") 145 | return f"Error: Unable to fetch data. {e!s}" 146 | -------------------------------------------------------------------------------- /omniagent/executors/funding_rate_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | import ccxt 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from loguru import logger 11 | from pydantic import BaseModel, Field 12 | 13 | 14 | class ARGS(BaseModel): 15 | exchange: str = Field(description="Name of the exchange (ccxt supported), e.g., 'binance'") 16 | symbol: str = Field(description="Trading pair symbol, e.g., 'BTC/USDT'") 17 | 18 | 19 | class FundingRateExecutor(BaseTool): 20 | name = "FundingRateExecutor" 21 | description = "Use this tool to get the funding rate of a trading pair." 22 | args_schema: Type[ARGS] = ARGS 23 | 24 | def _run( 25 | self, 26 | exchange: str, 27 | symbol: str, 28 | run_manager: Optional[CallbackManagerForToolRun] = None, 29 | ) -> str: 30 | try: 31 | return json.dumps(fetch_funding_rate(exchange, symbol)) 32 | except Exception as e: 33 | return f"error: {e}" 34 | 35 | async def _arun( 36 | self, 37 | exchange: str, 38 | symbol: str, 39 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 40 | ) -> str: 41 | try: 42 | return json.dumps(fetch_funding_rate(exchange, symbol)) 43 | except Exception as e: 44 | return f"error: {e}" 45 | 46 | 47 | def fetch_funding_rate(exchange_name: str, symbol: str) -> float: 48 | try: 49 | if not symbol.endswith(":USDT"): 50 | symbol = f"{symbol}:USDT" 51 | exchange_class = getattr(ccxt, exchange_name) 52 | exchange = exchange_class() 53 | 54 | funding_rate = exchange.fetch_funding_rate(symbol) 55 | return funding_rate 56 | except Exception as e: 57 | logger.warning(f"Fetch funding rate error from {exchange_name}: {e}") 58 | raise e 59 | 60 | 61 | if __name__ == "__main__": 62 | tool = FundingRateExecutor() 63 | print(tool.run(tool_input={"exchange": "binance", "symbol": "BTC/USDT:USDT"})) 64 | -------------------------------------------------------------------------------- /omniagent/executors/nft_balance_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.conf.env import settings 12 | 13 | 14 | class ARGS(BaseModel): 15 | chain: str = Field(description="chain name,options:eth,optimism,arbitrum,bsc") 16 | 17 | wallet_address: str = Field(description="wallet address") 18 | 19 | 20 | class NFTBalanceExecutor(BaseTool): 21 | name = "NFTBalanceExecutor" 22 | description = "get the nft asset of a wallet." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | chain: str, 28 | wallet_address: str, 29 | run_manager: Optional[CallbackManagerForToolRun] = None, 30 | ) -> str: 31 | return fetch_balance(chain, wallet_address) 32 | 33 | async def _arun( 34 | self, 35 | chain: str, 36 | wallet_address: str, 37 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 38 | ) -> str: 39 | return fetch_balance(chain, wallet_address) 40 | 41 | 42 | def fetch_balance(chain: str, address: str) -> str: 43 | if settings.MORALIS_API_KEY is None: 44 | return "Please set MORALIS_API_KEY in the environment" 45 | from moralis import evm_api 46 | 47 | params = {"chain": chain, "format": "decimal", "media_items": False, "address": address} 48 | 49 | result = evm_api.nft.get_wallet_nfts( 50 | api_key=settings.MORALIS_API_KEY, 51 | params=params, 52 | ) 53 | 54 | return json.dumps( 55 | list( 56 | map( 57 | lambda x: { 58 | "amount": x["amount"], 59 | "name": x["name"], 60 | "symbol": x["symbol"], 61 | }, 62 | result["result"], 63 | ) 64 | ) 65 | ) 66 | 67 | 68 | if __name__ == "__main__": 69 | print(fetch_balance("eth", "0x33c0814654fa367ce67d8531026eb4481290e63c")) 70 | -------------------------------------------------------------------------------- /omniagent/executors/nft_rank_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from moralis import evm_api 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class NFTRankingArgs(BaseModel): 16 | limit: int = Field(description="Number of collections to return", default=10) 17 | 18 | 19 | class NFTRankingExecutor(BaseTool): 20 | name = "NFTRankingExecutor" 21 | description = "A tool for getting NFT collection rankings." 22 | args_schema: Type[NFTRankingArgs] = NFTRankingArgs 23 | 24 | def _run( 25 | self, 26 | limit: int, 27 | run_manager: Optional[CallbackManagerForToolRun] = None, 28 | ) -> str: 29 | return self.collection_ranking(limit) 30 | 31 | async def _arun( 32 | self, 33 | limit: int, 34 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 35 | ) -> str: 36 | return self._run(limit, run_manager) 37 | 38 | @staticmethod 39 | def collection_ranking(limit: int) -> str: 40 | if settings.MORALIS_API_KEY is None: 41 | return "Please set MORALIS_API_KEY in the environment" 42 | by_market_cap = evm_api.market_data.get_top_nft_collections_by_market_cap( 43 | api_key=settings.MORALIS_API_KEY, 44 | ) 45 | limit = min(limit, len(by_market_cap)) 46 | result = by_market_cap[0:limit] 47 | return json.dumps( 48 | list( 49 | map( 50 | lambda x: { 51 | "collection_title": x["collection_title"], 52 | "collection_image": x["collection_image"], 53 | "floor_price_usd": x["floor_price_usd"], 54 | "collection_address": x["collection_address"], 55 | }, 56 | result, 57 | ) 58 | ) 59 | ) 60 | 61 | 62 | if __name__ == "__main__": 63 | ranking = NFTRankingExecutor.collection_ranking(4) 64 | print(ranking) 65 | -------------------------------------------------------------------------------- /omniagent/executors/price_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Optional, Type 4 | 5 | import requests 6 | from langchain.callbacks.manager import ( 7 | AsyncCallbackManagerForToolRun, 8 | CallbackManagerForToolRun, 9 | ) 10 | from langchain.tools import BaseTool 11 | from pydantic import BaseModel, Field 12 | 13 | from omniagent.conf.env import settings 14 | 15 | 16 | class ARGS(BaseModel): 17 | token: str = Field(description="token symbol, e.g., 'ETH', 'BTC'") 18 | 19 | 20 | class PriceExecutor(BaseTool): 21 | name = "PriceExecutor" 22 | description = "use this tool to get the price widget of a token." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | token: str, 28 | run_manager: Optional[CallbackManagerForToolRun] = None, 29 | ) -> str: 30 | return asyncio.run(fetch_price(token)) 31 | 32 | async def _arun( 33 | self, 34 | token: str, 35 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 36 | ) -> str: 37 | return await fetch_price(token) 38 | 39 | 40 | async def fetch_price(token: str) -> str: 41 | url = f"https://pro-api.coingecko.com/api/v3/search?query={token}" 42 | 43 | key = settings.COINGECKO_API_KEY 44 | headers = {"accept": "application/json", "x-cg-pro-api-key": key} 45 | 46 | response = requests.get(url, headers=headers) 47 | token_: dict = json.loads(response.text)["coins"][0] 48 | token_id_ = token_["id"] 49 | 50 | url = ( 51 | f"https://pro-api.coingecko.com/api/v3/simple/price?ids={token_id_}&" 52 | f"vs_currencies=usd&include_market_cap=true&include_24hr_vol=true&" 53 | f"include_24hr_change=true&include_last_updated_at=true" 54 | ) 55 | 56 | headers = {"accept": "application/json", "x-cg-pro-api-key": key} 57 | 58 | response = requests.get(url, headers=headers) 59 | 60 | return response.text 61 | 62 | 63 | if __name__ == "__main__": 64 | print(asyncio.run(fetch_price("eth"))) 65 | -------------------------------------------------------------------------------- /omniagent/executors/project_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from concurrent.futures import ThreadPoolExecutor 4 | from typing import Optional, Type 5 | 6 | import aiohttp 7 | from cachetools import TTLCache, cached 8 | from langchain.callbacks.manager import ( 9 | AsyncCallbackManagerForToolRun, 10 | CallbackManagerForToolRun, 11 | ) 12 | from langchain.tools import BaseTool 13 | from pydantic import BaseModel, Field 14 | 15 | from omniagent.conf.env import settings 16 | 17 | API_KEY = "" 18 | HEADERS = { 19 | "apikey": settings.ROOTDATA_API_KEY, 20 | "language": "en", 21 | "Content-Type": "application/json", 22 | } 23 | 24 | cache = TTLCache(maxsize=100, ttl=24 * 60 * 60) 25 | 26 | 27 | class ARGS(BaseModel): 28 | keyword: str = Field(description="keyword") 29 | 30 | 31 | def _fetch_project_sync(keyword: str) -> str: 32 | projects = asyncio.run(fetch_project(keyword)) 33 | return json.dumps(projects) 34 | 35 | 36 | class ProjectExecutor(BaseTool): 37 | name = "ProjectExecutor" 38 | 39 | description = "get the project information like investors, team members, social media, etc." 40 | args_schema: Type[ARGS] = ARGS 41 | 42 | def _run( 43 | self, 44 | keyword: str, 45 | run_manager: Optional[CallbackManagerForToolRun] = None, 46 | ) -> str: 47 | if settings.ROOTDATA_API_KEY is None: 48 | return "Please set ROOTDATA_API_KEY in the environment" 49 | with ThreadPoolExecutor() as executor: 50 | future = executor.submit(_fetch_project_sync, keyword) 51 | return future.result() 52 | 53 | async def _arun( 54 | self, 55 | keyword: str, 56 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 57 | ) -> str: 58 | if settings.ROOTDATA_API_KEY is None: 59 | return "Please set ROOTDATA_API_KEY in the environment" 60 | projects = await fetch_project(keyword) 61 | return json.dumps(projects) 62 | 63 | 64 | async def fetch_project_detail(session, project_id: int) -> dict: 65 | url = "https://api.rootdata.com/open/get_item" 66 | payload = json.dumps({"project_id": project_id, "include_team": True, "include_investors": True}) 67 | 68 | async with session.post(url, headers=HEADERS, data=payload) as response: 69 | response_text = await response.text() 70 | return json.loads(response_text)["data"] 71 | 72 | 73 | @cached(cache) 74 | async def fetch_project(keyword: str) -> list: 75 | url = "https://api.rootdata.com/open/ser_inv" 76 | payload = json.dumps({"query": keyword, "variables": {}}) 77 | 78 | async with aiohttp.ClientSession() as session, session.post(url, headers=HEADERS, data=payload) as response: 79 | response_text = await response.text() 80 | data = json.loads(response_text)["data"] 81 | project_ids = [item["id"] for item in data if item["type"] == 1][0:2] 82 | 83 | tasks = [fetch_project_detail(session, project_id) for project_id in project_ids] 84 | return list(await asyncio.gather(*tasks)) 85 | 86 | 87 | if __name__ == "__main__": 88 | print(asyncio.run(fetch_project("rss3"))) 89 | -------------------------------------------------------------------------------- /omniagent/executors/search_executor.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools import DuckDuckGoSearchRun 2 | from langchain_community.tools.tavily_search import TavilySearchResults 3 | 4 | from omniagent.conf.env import settings 5 | 6 | 7 | class SearchExecutor: 8 | def __new__(cls): 9 | if settings.TAVILY_API_KEY: 10 | return TavilySearchResults(max_results=5, name="TavilySearchExecutor") 11 | else: 12 | return DuckDuckGoSearchRun(name="DuckDuckGoSearchExecutor") 13 | 14 | 15 | search_executor = SearchExecutor() 16 | -------------------------------------------------------------------------------- /omniagent/executors/swap_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Literal, Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.executors.token_util import chain_name_to_id, get_token_data_by_key, select_best_token 12 | 13 | 14 | class Swap(BaseModel): 15 | from_token: str 16 | from_token_address: str 17 | to_token: str 18 | to_token_address: str 19 | amount: str 20 | type: str = "swap" 21 | from_chain_name: str 22 | to_chain_name: str 23 | 24 | 25 | ChainLiteral = Literal["ETH", "BSC", "ARBITRUM", "OPTIMISM", "BASE"] 26 | 27 | 28 | class ParamSchema(BaseModel): 29 | """ 30 | Schema for the parameters required for a token swap. 31 | """ 32 | 33 | from_token: str = Field(description="Symbol of the token to swap from, e.g., 'BTC', 'ETH', 'RSS3', 'USDT', 'USDC'. Default: 'ETH'.") 34 | to_token: str = Field(description="Symbol of the token to swap to, e.g., 'BTC', 'ETH', 'RSS3', 'USDT', 'USDC'. Default: 'ETH'.") 35 | from_chain: ChainLiteral = Field( 36 | default="ETH", 37 | description="Blockchain network to swap from, support networks: 'ETH', 'BSC', 'ARBITRUM', 'OPTIMISM', 'BASE'. Default: 'ETH'.", 38 | ) 39 | to_chain: ChainLiteral = Field( 40 | default="ETH", 41 | description="Blockchain network to swap to, support networks: 'ETH', 'BSC', 'ARBITRUM', 'OPTIMISM', 'BASE'. Default: 'ETH'.", 42 | ) 43 | amount: str = Field(description="Amount of the from-side token to swap, e.g., '0.1', '1', '10'. Default: '1'.") 44 | 45 | 46 | class SwapExecutor(BaseTool): 47 | """ 48 | Tool for generating a swap widget for cryptocurrency swaps. 49 | """ 50 | 51 | name = "SwapExecutor" 52 | description = "Use this tool to handle user requests to swap cryptocurrencies." 53 | args_schema: Type[ParamSchema] = ParamSchema 54 | return_direct = False 55 | 56 | def _run( 57 | self, 58 | from_token: str, 59 | to_token: str, 60 | from_chain: ChainLiteral, 61 | to_chain: ChainLiteral, 62 | amount: str, 63 | run_manager: Optional[CallbackManagerForToolRun] = None, 64 | ) -> str: 65 | raise NotImplementedError 66 | 67 | async def _arun( 68 | self, 69 | from_token: str, 70 | to_token: str, 71 | from_chain: ChainLiteral = "ETH", 72 | to_chain: ChainLiteral = "ETH", 73 | amount: str = "1", 74 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 75 | ): 76 | return await fetch_swap(from_token, to_token, from_chain, to_chain, amount) 77 | 78 | 79 | async def fetch_swap(from_token: str, to_token: str, from_chain: ChainLiteral, to_chain: ChainLiteral, amount: str): 80 | """ 81 | Fetch the swap details for the given parameters. 82 | 83 | Args: 84 | from_token (str): The symbol of the from-side token. 85 | to_token (str): The symbol of the to-side token. 86 | from_chain (ChainLiteral): The from-side blockchain network. 87 | to_chain (ChainLiteral): The to-side blockchain network. 88 | amount (str): The amount of tokens to swap. 89 | 90 | Returns: 91 | str: The swap details in JSON format. 92 | """ 93 | from_chain_id = chain_name_to_id(from_chain) 94 | to_chain_id = chain_name_to_id(to_chain) 95 | 96 | # Fetch token data concurrently 97 | from_token_data, to_token_data = await asyncio.gather(select_best_token(from_token, from_chain_id), select_best_token(to_token, to_chain_id)) 98 | 99 | swap = Swap( 100 | from_token=get_token_data_by_key(from_token_data, "symbol"), 101 | from_token_address=get_token_data_by_key(from_token_data, "address"), 102 | to_token=get_token_data_by_key(to_token_data, "symbol"), 103 | to_token_address=get_token_data_by_key(to_token_data, "address"), 104 | from_chain_name=from_chain, 105 | to_chain_name=to_chain, 106 | amount=amount, 107 | ) 108 | return swap.model_dump_json() 109 | -------------------------------------------------------------------------------- /omniagent/executors/tg_news_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Any, Dict, List, Optional, Type 4 | 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.executors.tg_util import fetch_tg_msgs 12 | 13 | 14 | class ParamSchema(BaseModel): 15 | """ 16 | Defines the schema for input parameters of the TelegramNewsExecutor tool. 17 | """ 18 | 19 | limit: int = Field(default=10, description="Number of recent news items to fetch from Telegram channels") 20 | 21 | 22 | class TelegramNewsExecutor(BaseTool): 23 | """ 24 | A tool for fetching recent news from specific Telegram channels using RSS3 DATA API. 25 | """ 26 | 27 | def _run(self, *args: Any, **kwargs: Any) -> Any: 28 | raise NotImplementedError 29 | 30 | name = "TelegramNewsExecutor" 31 | description = """Use this tool to get recent news and updates in the blockchain \ 32 | and cryptocurrency space.""" 33 | args_schema: Type[ParamSchema] = ParamSchema 34 | 35 | async def _arun( 36 | self, 37 | limit: int = 10, 38 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 39 | ) -> str: 40 | """ 41 | Asynchronously run the Telegram news fetching process. 42 | 43 | :param limit: Number of recent news items to fetch 44 | :param run_manager: Optional callback manager for async operations 45 | :return: A string containing the fetched news items 46 | """ 47 | return await fetch_telegram_news(["ChannelPANews", "chainfeedsxyz"], limit) 48 | 49 | 50 | async def fetch_telegram_news(channels: List[str], limit: int = 10) -> str: 51 | """ 52 | Fetch recent news from specific Telegram channels using RSS3 DATA API. 53 | 54 | :param channels: List of Telegram channels to fetch news from 55 | :param limit: Number of recent news items to fetch 56 | :return: A string containing the fetched news items 57 | """ 58 | results = [] 59 | try: 60 | results = list(await asyncio.gather(*[fetch_tg_msgs(channel, limit) for channel in channels])) 61 | return format_news(results) 62 | except Exception as e: 63 | if results: 64 | return f"An error occurred while fetching news, this is the results: {json.dumps(results)}" 65 | return f"An error occurred while fetching news: {e!s}" 66 | 67 | 68 | def format_news(results: List[List[Dict]]) -> str: 69 | """ 70 | Format the fetched news results into a readable string. 71 | 72 | :param results: A list of lists containing news entries 73 | :return: A formatted string of news items 74 | """ 75 | formatted_news = [format_entry(entry) for item in results for entry in item] 76 | return "Recent news from Telegram channels:\n\n" + "\n".join(formatted_news) 77 | 78 | 79 | def format_entry(entry: Dict) -> str: 80 | """ 81 | Format a single news entry into a readable string. 82 | 83 | :param entry: A dictionary containing news entry data 84 | :return: A formatted string of the news entry 85 | """ 86 | metadata = entry["actions"][0]["metadata"] 87 | return f"Title: {metadata['title']}\nDate: {metadata['pub_date']}\nSummary: {metadata['description']}\n\n" 88 | 89 | 90 | if __name__ == "__main__": 91 | loop = asyncio.get_event_loop() 92 | entries = loop.run_until_complete(fetch_telegram_news(["ChannelPANews", "chainfeedsxyz"], 10)) 93 | print(entries) 94 | -------------------------------------------------------------------------------- /omniagent/executors/tg_util.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import aiohttp 4 | from loguru import logger 5 | 6 | from omniagent.conf.env import settings 7 | 8 | 9 | async def fetch_tg_msgs(channel: str, limit: int = 10): 10 | """ 11 | Fetch recent content from a specific Telegram channel using RSS3 DATA API. 12 | 13 | :param channel: The Telegram channel to fetch content from 14 | :param limit: Number of recent items to fetch 15 | :return: A string containing the fetched items 16 | """ 17 | 18 | url = f"{settings.RSS3_DATA_API}/rss/telegram/channel/{channel}" 19 | logger.info(f"Fetching content from {url}") 20 | 21 | async with aiohttp.ClientSession() as session: # noqa 22 | async with session.get(url) as resp: 23 | if resp.status == 200: 24 | content = await resp.text() 25 | data = json.loads(content) 26 | return data["data"][:limit] 27 | else: 28 | logger.error(f"Failed to fetch from {url}. Status: {resp.status}") 29 | 30 | 31 | if __name__ == "__main__": 32 | import asyncio 33 | 34 | loop = asyncio.get_event_loop() 35 | entries = loop.run_until_complete(fetch_tg_msgs("ChannelPANews", 5)) 36 | print(entries) 37 | -------------------------------------------------------------------------------- /omniagent/executors/token_balance_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from moralis import evm_api 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class ARGS(BaseModel): 16 | chain: str = Field(description="chain name,options:eth,optimism,arbitrum,bsc") 17 | wallet_address: str = Field(description="wallet address") 18 | 19 | 20 | class TokenBalanceExecutor(BaseTool): 21 | name = "TokenBalanceExecutor" 22 | description = "get the token balance of a wallet address." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | chain: str, 28 | wallet_address: str, 29 | run_manager: Optional[CallbackManagerForToolRun] = None, 30 | ) -> str: 31 | return fetch_balance(chain, wallet_address) 32 | 33 | async def _arun( 34 | self, 35 | chain: str, 36 | wallet_address: str, 37 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 38 | ) -> str: 39 | return fetch_balance(chain, wallet_address) 40 | 41 | 42 | def fetch_balance(chain: str, address: str) -> str: 43 | if settings.MORALIS_API_KEY is None: 44 | return "Please set MORALIS_API_KEY in the environment" 45 | result = evm_api.wallets.get_wallet_token_balances_price( 46 | api_key=settings.MORALIS_API_KEY, 47 | params={"chain": chain, "address": address}, 48 | ) 49 | 50 | return json.dumps( 51 | list( 52 | map( 53 | lambda x: { 54 | "symbol": x["symbol"], 55 | "balance_formatted": x["balance_formatted"], 56 | "usd_value": x["usd_value"], 57 | }, 58 | result["result"], 59 | ) 60 | ) 61 | ) 62 | 63 | 64 | if __name__ == "__main__": 65 | print(fetch_balance("eth", "0x33c0814654fa367ce67d8531026eb4481290e63c")) 66 | -------------------------------------------------------------------------------- /omniagent/executors/token_util.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | 3 | import aiohttp 4 | from aiocache import Cache 5 | from aiocache.decorators import cached 6 | from loguru import logger 7 | 8 | 9 | def get_token_data_by_key(token: Dict, key: str) -> str: 10 | """ 11 | Retrieve data from the token dictionary by key. 12 | 13 | Args: 14 | token (Dict): The token dictionary. 15 | key (str): The key to retrieve the data. 16 | 17 | Returns: 18 | str: The value associated with the key, or an empty string if the key does not exist. 19 | """ 20 | return str(token[key]) if token and key in token else "" 21 | 22 | 23 | def chain_name_to_id(chain_name: str) -> str: 24 | """ 25 | Convert chain name to chain ID. 26 | 27 | Args: 28 | chain_name (str): The name of the blockchain network. 29 | 30 | Returns: 31 | str: The corresponding chain ID. 32 | """ 33 | chain_map = { 34 | "ETH": "1", 35 | "OPTIMISM": "10", 36 | "BSC": "56", 37 | "BASE": "8453", 38 | "ARBITRUM": "42161", 39 | } 40 | return chain_map.get(chain_name, "1") 41 | 42 | 43 | @cached(ttl=300, cache=Cache.MEMORY) 44 | async def fetch_tokens() -> Dict[str, List[Dict]]: 45 | """ 46 | Fetch the token list from the API and cache it for 60 seconds. 47 | 48 | Returns: 49 | Dict[str, List[Dict]]: The token list grouped by chain ID. 50 | """ 51 | url = "https://li.quest/v1/tokens" 52 | headers = {"Accept": "application/json"} 53 | logger.info(f"Fetching new data from {url}") 54 | 55 | async with aiohttp.ClientSession() as session: # noqa 56 | async with session.get(url, headers=headers) as response: 57 | token_list = await response.json() 58 | return token_list["tokens"] 59 | 60 | 61 | async def select_best_token(keyword: str, chain_id: str) -> Optional[Dict]: 62 | """ 63 | Select the best token based on the keyword and chain ID. 64 | 65 | Args: 66 | keyword (str): The keyword to search for. 67 | chain_id (str): The chain ID to filter tokens. 68 | 69 | Returns: 70 | Optional[Dict]: The best matching token, or None if no match is found. 71 | """ 72 | keyword = keyword.lower() 73 | 74 | # special case for eth on non-ethereum chains 75 | if keyword == "eth" and chain_id != "1": 76 | keyword = "weth" 77 | 78 | # special case for btc 79 | if keyword == "btc": 80 | keyword = "wbtc" 81 | 82 | tokens = await fetch_tokens() 83 | tokens_on_chain = tokens.get(chain_id, []) 84 | 85 | # Filter based on symbol and name 86 | results = [token for token in tokens_on_chain if token["symbol"].lower() == keyword or token["name"].lower() == keyword] 87 | 88 | if results: 89 | if len(results) == 1: 90 | return results[0] 91 | 92 | # Sort based on priority 93 | results.sort( 94 | key=lambda x: ( 95 | "logoURI" in x, 96 | x["symbol"].lower() == keyword, 97 | x.get("coinKey", "").lower() == keyword, 98 | x.get("priceUSD") is not None, 99 | x["name"].lower() == keyword, 100 | ), 101 | reverse=True, 102 | ) 103 | return results[0] 104 | 105 | return None 106 | -------------------------------------------------------------------------------- /omniagent/executors/transfer_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import ( 4 | AsyncCallbackManagerForToolRun, 5 | CallbackManagerForToolRun, 6 | ) 7 | from langchain.tools import BaseTool 8 | from pydantic import BaseModel, Field 9 | 10 | from omniagent.executors.token_util import chain_name_to_id, get_token_data_by_key, select_best_token 11 | 12 | 13 | class Transfer(BaseModel): 14 | # task_id: str 15 | to_address: str 16 | token: str 17 | token_address: str 18 | chain_id: str 19 | amount: str 20 | logoURI: str # noqa 21 | decimals: int 22 | 23 | 24 | class ParamSchema(BaseModel): 25 | """ 26 | Defines the schema for input parameters of the TransferExecutor tool. 27 | """ 28 | 29 | to_address: str = Field( 30 | description="""extract the blockchain address mentioned in the query""", 31 | ) 32 | 33 | token: str = Field( 34 | description="""extract the token symbol mentioned in the query""", 35 | ) 36 | 37 | chain_name: str = Field( 38 | default="ethereum", 39 | description="""extract the blockchain name mentioned in the query, 40 | if not mentioned, default is "ethereum".""", 41 | ) 42 | 43 | amount: str = Field( 44 | default="1", 45 | description="""extract the amount of cryptocurrencies mentioned in the query, 46 | if not mentioned, default is "1".""", 47 | ) 48 | 49 | 50 | class TransferExecutor(BaseTool): 51 | """ 52 | Tool for generating a transfer widget for cryptocurrency transfers. 53 | """ 54 | 55 | name = "TransferExecutor" 56 | description = """Use this tool to send cryptocurrencies to another address.""" 57 | args_schema: Type[ParamSchema] = ParamSchema 58 | return_direct = False 59 | last_task_id: Optional[str] = None 60 | 61 | def _run( 62 | self, 63 | to_address: str, 64 | token: str, 65 | chain_name: str, 66 | amount: str, 67 | run_manager: Optional[CallbackManagerForToolRun] = None, 68 | ) -> str: 69 | raise NotImplementedError 70 | 71 | async def _arun( 72 | self, 73 | to_address: str, 74 | token: str, 75 | chain_name: str = "ethereum", 76 | amount: str = "1", 77 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 78 | ): 79 | """ 80 | Asynchronously run the transfer process. 81 | 82 | :param to_address: The recipient's blockchain address 83 | :param token: The token symbol 84 | :param chain_name: The blockchain name (default is "ethereum") 85 | :param amount: The amount to transfer (default is "1") 86 | :param run_manager: Optional callback manager for async operations 87 | :return: JSON representation of the transfer details 88 | """ 89 | 90 | return await fetch_transfer(to_address, token, chain_name, amount) 91 | 92 | 93 | async def fetch_transfer(to_address: str, token: str, chain_name: str, amount: str): 94 | """ 95 | Fetch transfer details and prepare the Transfer object. 96 | 97 | :param to_address: The recipient's blockchain address 98 | :param token: The token symbol 99 | :param chain_name: The blockchain name 100 | :param amount: The amount to transfer 101 | :return: JSON representation of the Transfer object 102 | """ 103 | 104 | if not to_address.startswith("0x") and not to_address.endswith(".eth"): 105 | to_address += ".eth" 106 | chain_id = chain_name_to_id(chain_name) 107 | res = { 108 | "to_address": to_address, 109 | "token": token, 110 | "amount": amount, 111 | } 112 | 113 | # Select the best token based on the provided token symbol and chain ID 114 | token_info = await select_best_token(token, chain_id) 115 | 116 | # Create a Transfer object with all the necessary information 117 | transfer = Transfer( 118 | to_address=res.get("to_address", "1"), 119 | token=get_token_data_by_key(token_info, "symbol"), 120 | token_address=get_token_data_by_key(token_info, "address"), 121 | chain_id=chain_id, 122 | amount=res.get("amount", "1"), 123 | logoURI=get_token_data_by_key(token_info, "logoURI"), 124 | decimals=get_token_data_by_key(token_info, "decimals"), 125 | ) 126 | 127 | return transfer.model_dump_json() 128 | -------------------------------------------------------------------------------- /omniagent/index/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/index/__init__.py -------------------------------------------------------------------------------- /omniagent/index/feed_indexing.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from dotenv import load_dotenv 4 | from langchain.indexes import SQLRecordManager 5 | from langchain_core.documents import Document 6 | from langchain_core.indexing import index 7 | from langchain_text_splitters import CharacterTextSplitter 8 | from loguru import logger 9 | 10 | from omniagent.conf.env import settings 11 | from omniagent.index.feed_scrape import fetch_iqwiki_feeds, fetch_mirror_feeds 12 | from omniagent.index.pgvector_store import build_vector_store 13 | 14 | load_dotenv() 15 | 16 | record_manager = SQLRecordManager("backend", db_url=settings.DB_CONNECTION) 17 | record_manager.create_schema() 18 | 19 | 20 | def _clear(): 21 | index([], record_manager, build_vector_store(), cleanup="incremental", source_id_key="id") 22 | 23 | 24 | def build_index(): 25 | indexing_iqwiki() 26 | indexing_mirror() 27 | 28 | 29 | def indexing_iqwiki(): 30 | index_feed(fetch_iqwiki_feeds, "iqwiki") 31 | 32 | 33 | def indexing_mirror(): 34 | index_feed(fetch_mirror_feeds, "mirror") 35 | 36 | 37 | def index_feed(fetch_function, feed_name): 38 | since_date = datetime.datetime.now() - datetime.timedelta(days=180) 39 | curr_date = datetime.datetime.now() 40 | since_ts = int(since_date.timestamp()) 41 | curr_ts = int(curr_date.timestamp()) 42 | 43 | cursor = None 44 | logger.info( 45 | f"Starting to index feed '{feed_name}' from " f"{since_date.strftime('%Y-%m-%d %H:%M:%S')} to" f" {curr_date.strftime('%Y-%m-%d %H:%M:%S')}" 46 | ) 47 | while True: 48 | resp = fetch_function(since_ts, curr_ts, cursor=cursor) 49 | if resp["meta"] is None: 50 | logger.info(f"no meta in response, done with {feed_name}!") 51 | break 52 | cursor = resp["meta"]["cursor"] 53 | logger.info(f"fetched {len(resp['data'])} records from {feed_name}," f" next cursor: {cursor}") 54 | 55 | records = resp.get("data", []) 56 | if len(records) == 0: 57 | break 58 | 59 | save_records(records) 60 | 61 | 62 | def save_records(records): 63 | docs = [build_docs(record) for record in records] 64 | final_docs = [doc for sublist in docs for doc in sublist] 65 | # index the documents 66 | indexing_result = index( 67 | final_docs, 68 | record_manager, 69 | build_vector_store(), 70 | cleanup="incremental", 71 | source_id_key="id", 72 | ) 73 | logger.info(f"Indexing result: {indexing_result}") 74 | 75 | 76 | text_splitter = CharacterTextSplitter( 77 | separator="\n\n", 78 | chunk_size=1000, 79 | chunk_overlap=200, 80 | length_function=len, 81 | is_separator_regex=False, 82 | ) 83 | 84 | 85 | def build_docs(record): 86 | title = record["actions"][0]["metadata"]["title"] 87 | body = record["actions"][0]["metadata"]["body"] 88 | txt = f"

{title}

{body}" 89 | chunks = text_splitter.split_text(txt) 90 | return [Document(page_content=chunk, metadata={"id": record["id"], "full": record}) for chunk in chunks] 91 | 92 | 93 | if __name__ == "__main__": 94 | _clear() 95 | build_index() 96 | -------------------------------------------------------------------------------- /omniagent/index/feed_scrape.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import requests 4 | from loguru import logger 5 | from retrying import retry 6 | 7 | from omniagent.conf.env import settings 8 | 9 | 10 | def fetch_mirror_feeds(since_timestamp, until_timestamp, limit=10, cursor=None) -> dict: 11 | """ 12 | Fetch feeds from Mirror. 13 | """ 14 | return fetch_feeds("Mirror", since_timestamp, until_timestamp, limit, cursor) 15 | 16 | 17 | def fetch_iqwiki_feeds(since_timestamp, until_timestamp, limit=10, cursor=None) -> dict: 18 | """ 19 | Fetch feeds from IQWiki. 20 | """ 21 | return fetch_feeds("IQ.Wiki", since_timestamp, until_timestamp, limit, cursor) 22 | 23 | 24 | def fetch_feeds(platform, since_timestamp, until_timestamp, limit=10, cursor=None, max_retries=3) -> dict: 25 | """ 26 | Fetch feeds from a platform with retry functionality. 27 | """ 28 | 29 | @retry(stop_max_attempt_number=max_retries) 30 | def _fetch_feeds(): 31 | cursor_str = f"&cursor={cursor}" if cursor else "" 32 | url = ( 33 | f"{settings.RSS3_DATA_API}/decentralized/platform/{platform}?limit={limit}" 34 | f"&action_limit=10&since_timestamp={since_timestamp}&type=post&" 35 | f"until_timestamp={until_timestamp}{cursor_str}" 36 | ) 37 | payload = {} # type: ignore 38 | headers = {} # type: ignore 39 | 40 | response = requests.request("GET", url, headers=headers, data=payload) 41 | 42 | if response.status_code != 200: 43 | raise Exception(f"Failed to fetch feeds: {response.text}") 44 | 45 | return json.loads(response.text) 46 | 47 | try: 48 | return _fetch_feeds() 49 | except Exception as e: 50 | logger.error(f"Failed to fetch feeds from {platform}: {e}") 51 | return {} 52 | 53 | 54 | if __name__ == "__main__": 55 | feeds = fetch_feeds("Mirror", 0, 0, 1, None, 3) 56 | print(json.dumps(feeds, ensure_ascii=False)) 57 | -------------------------------------------------------------------------------- /omniagent/index/pgvector_store.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_google_genai import GoogleGenerativeAIEmbeddings 3 | from langchain_google_vertexai import VertexAIEmbeddings 4 | from langchain_openai import OpenAIEmbeddings 5 | from langchain_postgres.vectorstores import PGVector 6 | from toolz import memoize 7 | 8 | from omniagent.conf.env import settings 9 | 10 | load_dotenv() 11 | 12 | 13 | @memoize 14 | def build_vector_store() -> PGVector: 15 | collection_name = "backend" 16 | if settings.VERTEX_PROJECT_ID: 17 | underlying_embeddings = VertexAIEmbeddings(model_name="textembedding-gecko@003", project=settings.VERTEX_PROJECT_ID) 18 | 19 | elif settings.GOOGLE_GEMINI_API_KEY: 20 | underlying_embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=settings.GOOGLE_GEMINI_API_KEY) 21 | else: 22 | underlying_embeddings = OpenAIEmbeddings(model="text-embedding-3-large") 23 | return PGVector( 24 | embeddings=underlying_embeddings, 25 | collection_name=collection_name, 26 | connection=settings.DB_CONNECTION, 27 | use_jsonb=True, 28 | ) 29 | -------------------------------------------------------------------------------- /omniagent/router/__init__.py: -------------------------------------------------------------------------------- 1 | from .openai import router as openai_router 2 | from .widget import router as widget_router 3 | from .health import router as health_router 4 | 5 | __all__ = [ 6 | 'openai_router', 7 | 'widget_router', 8 | 'health_router' 9 | ] 10 | -------------------------------------------------------------------------------- /omniagent/router/health.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | from starlette import status 3 | from starlette.responses import JSONResponse 4 | 5 | router = APIRouter(tags=["health"]) 6 | 7 | @router.get("/health", status_code=status.HTTP_200_OK, include_in_schema=False) 8 | async def health_check(): 9 | return JSONResponse(content={"status": "ok"}) 10 | -------------------------------------------------------------------------------- /omniagent/router/widget.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter 2 | from starlette.responses import FileResponse 3 | import os 4 | 5 | router = APIRouter(tags=["widget"]) 6 | 7 | @router.get("/widget/swap", include_in_schema=False) 8 | async def swap_root(): 9 | return FileResponse(os.path.join("dist", "index.html")) 10 | 11 | @router.get("/widget/price-chart", include_in_schema=False) 12 | async def chart_price_root(): 13 | return FileResponse(os.path.join("dist", "index.html")) 14 | 15 | @router.get("/widget/transfer", include_in_schema=False) 16 | async def transfer_root(): 17 | return FileResponse(os.path.join("dist", "index.html")) 18 | -------------------------------------------------------------------------------- /omniagent/ui/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/ui/__init__.py -------------------------------------------------------------------------------- /omniagent/ui/app.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Dict, Optional 3 | 4 | import chainlit as cl 5 | import chainlit.data as cl_data 6 | from chainlit.data.sql_alchemy import SQLAlchemyDataLayer 7 | from langchain.memory import ConversationBufferMemory 8 | from langchain.schema.runnable.config import RunnableConfig 9 | from langchain_core.language_models import BaseChatModel 10 | from langchain_core.messages import HumanMessage 11 | from langchain_ollama import ChatOllama 12 | from loguru import logger 13 | 14 | from omniagent.conf.env import settings 15 | from omniagent.conf.llm_provider import SUPPORTED_OLLAMA_MODELS, get_available_providers 16 | from omniagent.ui.profile import profile_name_to_provider_key, provider_to_profile 17 | from omniagent.workflows.member import members 18 | from omniagent.workflows.workflow import build_workflow 19 | 20 | 21 | def enable_auth(): 22 | auth_settings = [ 23 | settings.CHAINLIT_AUTH_SECRET, 24 | settings.OAUTH_AUTH0_CLIENT_ID, 25 | settings.OAUTH_AUTH0_CLIENT_SECRET, 26 | settings.OAUTH_AUTH0_DOMAIN, 27 | ] 28 | return all(arg for arg in auth_settings) 29 | 30 | 31 | if enable_auth(): 32 | # Set up the data layer 33 | cl_data._data_layer = SQLAlchemyDataLayer(conninfo=settings.DB_CONNECTION) 34 | 35 | @cl.oauth_callback 36 | def oauth_callback( 37 | provider_id: str, 38 | token: str, 39 | raw_user_data: Dict[str, str], 40 | default_user: cl.User, 41 | ) -> Optional[cl.User]: 42 | """OAuth callback function.""" 43 | return default_user 44 | 45 | @cl.on_chat_resume 46 | async def on_chat_resume(thread: cl_data.ThreadDict): 47 | """Callback function when chat resumes.""" 48 | memory = initialize_memory() 49 | root_messages = [m for m in thread["steps"]] 50 | for message in root_messages: 51 | if message["type"] == "user_message": 52 | memory.chat_memory.add_user_message(message["output"]) 53 | else: 54 | memory.chat_memory.add_ai_message(message["output"]) 55 | 56 | cl.user_session.set("memory", memory) 57 | profile = cl.user_session.get("chat_profile") 58 | provider_key = profile_name_to_provider_key(profile) 59 | llm = get_available_providers()[provider_key] 60 | setup_runnable(llm) 61 | 62 | 63 | def setup_runnable(llm: BaseChatModel): 64 | """Set up the runnable agent.""" 65 | agent = build_workflow(llm) 66 | cl.user_session.set("runnable", agent) 67 | 68 | 69 | def initialize_memory() -> ConversationBufferMemory: 70 | """Initialize conversation memory.""" 71 | return ConversationBufferMemory(return_messages=True) 72 | 73 | 74 | @cl.set_chat_profiles 75 | async def chat_profile(): 76 | providers = get_available_providers() 77 | profiles = list(map(provider_to_profile, providers.keys())) 78 | profiles = [profile for profile in profiles if profile is not None] 79 | 80 | return profiles 81 | 82 | 83 | @cl.on_chat_start 84 | async def on_chat_start(): 85 | """Callback function when chat starts.""" 86 | cl.user_session.set("memory", initialize_memory()) 87 | profile = cl.user_session.get("chat_profile") 88 | provider_key = profile_name_to_provider_key(profile) 89 | llm = get_available_providers()[provider_key] 90 | setup_runnable(llm) 91 | 92 | 93 | def build_token(token_symbol: str, token_address: str): 94 | return f"{token_symbol}{'--' + token_address.lower() if token_symbol != 'ETH' else ''}" 95 | 96 | 97 | @cl.on_message 98 | async def on_message(message: cl.Message): # noqa 99 | """Callback function to handle user messages.""" 100 | memory = cl.user_session.get("memory") # type: ConversationBufferMemory 101 | 102 | profile = cl.user_session.get("chat_profile") 103 | provider_key = profile_name_to_provider_key(profile) 104 | llm = get_available_providers()[provider_key] 105 | 106 | setup_runnable(llm) 107 | runnable = cl.user_session.get("runnable") 108 | 109 | msg = cl.Message(content="") 110 | agent_names = [member["name"] for member in members] 111 | 112 | if hasattr(llm, "model") and isinstance(llm,ChatOllama): 113 | model_name = llm.model 114 | supports_tools = SUPPORTED_OLLAMA_MODELS.get(model_name, {}).get("supports_tools", False) 115 | else: 116 | supports_tools = True 117 | 118 | if supports_tools: 119 | async for event in runnable.astream_events( 120 | {"messages": [*memory.chat_memory.messages, HumanMessage(content=message.content)]}, 121 | config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler(stream_final_answer=True)]), 122 | version="v1", 123 | ): 124 | kind = event["event"] 125 | if kind == "on_tool_end": 126 | await handle_tool_end(event, msg) 127 | elif kind == "on_chat_model_stream": # noqa 128 | if event["metadata"]["langgraph_node"] in agent_names: 129 | content = event["data"]["chunk"].content 130 | if content: 131 | if isinstance(event["data"]["chunk"].content ,list): 132 | for chunk in content: 133 | if chunk['type'] == 'text': 134 | await msg.stream_token(chunk['text']) 135 | else: 136 | print(chunk) 137 | else: 138 | await msg.stream_token(content) 139 | else: 140 | # simple conversation handling logic 141 | async for chunk in runnable.astream( 142 | [*memory.chat_memory.messages, HumanMessage(content=message.content)], 143 | config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler(stream_final_answer=True)]), 144 | ): 145 | if chunk.content: 146 | await msg.stream_token(chunk.content) 147 | 148 | await msg.send() 149 | memory.chat_memory.add_user_message(message.content) 150 | memory.chat_memory.add_ai_message(msg.content) 151 | 152 | 153 | async def handle_tool_end(event, msg): 154 | if event["name"] == "SwapExecutor": 155 | output = event["data"]["output"] 156 | swap_dict = json.loads(output) 157 | logger.info(swap_dict) 158 | from_chain = swap_dict["from_chain_name"] 159 | to_chain = swap_dict["to_chain_name"] 160 | from_token_ = swap_dict["from_token"] 161 | from_token_address = swap_dict["from_token_address"] 162 | to_token = swap_dict["to_token"] 163 | to_token_address = swap_dict["to_token_address"] 164 | from_amount = swap_dict["amount"] 165 | 166 | widget = ( 167 | f"""""" 171 | ) 172 | await msg.stream_token(widget) 173 | 174 | if event["name"] == "TransferExecutor": 175 | output = event["data"]["output"] 176 | transfer_dict = json.loads(output) 177 | token = transfer_dict["token"] 178 | token_address = transfer_dict["token_address"] 179 | to_address = transfer_dict["to_address"] 180 | amount = transfer_dict["amount"] 181 | 182 | url = f"/widget/transfer?token={token}&tokenAddress={token_address}&amount={amount}&toAddress={to_address}" 183 | 184 | iframe_html = f""" 185 | 187 | """ 188 | await msg.stream_token(iframe_html) 189 | 190 | if event["name"] == "PriceExecutor": 191 | output = event["data"]["output"] 192 | price_dict = json.loads(output) 193 | widget = f"""""" # noqa 194 | await msg.stream_token(widget) 195 | -------------------------------------------------------------------------------- /omniagent/ui/profile.py: -------------------------------------------------------------------------------- 1 | import chainlit as cl 2 | 3 | from omniagent.conf.llm_provider import MODELS_ICONS, SUPPORTED_OLLAMA_MODELS 4 | 5 | provider_key_to_profile_info = { 6 | "gpt-4o": { 7 | "name": "GPT-4o", 8 | "markdown_description": "Using **GPT-4o**.", 9 | "icon": "https://custom.typingmind.com/assets/models/gpt-4.webp", 10 | }, 11 | "gpt-4o-mini": { 12 | "name": "GPT-4o-mini", 13 | "markdown_description": "Using **GPT-4o**.", 14 | "icon": "https://custom.typingmind.com/assets/models/gpt-4.webp", 15 | }, 16 | "gemini-1.5-pro": { 17 | "name": "Gemini 1.5 Pro", 18 | "markdown_description": "Using **Gemini 1.5 Pro**.", 19 | "icon": "https://custom.typingmind.com/assets/models/gemini.png", 20 | }, 21 | "gemini-1.5-flash": { 22 | "name": "Gemini 1.5 Flash", 23 | "markdown_description": "Using **Gemini 1.5 Flash**.", 24 | "icon": "https://custom.typingmind.com/assets/models/gemini.png", 25 | }, 26 | "claude-3-5-sonnet": { 27 | "name": "Claude 3.5 Sonnet", 28 | "markdown_description": "Using **Claude 3.5 Sonnet**.", 29 | "icon": "public/claude.png", 30 | } 31 | } 32 | 33 | for model_key, model_info in SUPPORTED_OLLAMA_MODELS.items(): 34 | icon = MODELS_ICONS.get(model_info["name"], "/public/ollama.png") # type: ignore 35 | provider_key_to_profile_info[model_key] = { 36 | "name": model_info["name"], # type: ignore 37 | "markdown_description": f"Using **{model_info['name']}**.", 38 | "icon": icon, 39 | } 40 | 41 | 42 | def provider_to_profile(provider_key): 43 | profile_info = provider_key_to_profile_info.get(provider_key) 44 | if profile_info: 45 | return cl.ChatProfile( 46 | name=profile_info["name"], 47 | markdown_description=profile_info["markdown_description"], 48 | icon=profile_info["icon"], 49 | starters=[ 50 | cl.Starter( 51 | label="Swap token", 52 | message="Swap 1 ETH for USDC on the Ethereum.", 53 | icon="/public/swap.png", 54 | ), 55 | cl.Starter( 56 | label="Market analysis", 57 | message="What's the current price of Ethereum and its market trend?", 58 | icon="/public/market.png", 59 | ), 60 | cl.Starter( 61 | label="Transfer token", 62 | message="Can you help me transfer 0.1 ETH to 0x742d35Cc6634C0532925a3b844Bc454e4438f44e on the Ethereum network?", 63 | icon="/public/transfer.png", 64 | ), 65 | cl.Starter( 66 | label="Block explorer", 67 | message="What's the latest block height on the Ethereum network, and what are the current gas fees?", 68 | icon="/public/block_chain.png", 69 | ), 70 | ], 71 | ) 72 | return None 73 | 74 | 75 | def profile_name_to_provider_key(name): 76 | map = {v["name"]: k for k, v in provider_key_to_profile_info.items()} 77 | return map.get(name) 78 | -------------------------------------------------------------------------------- /omniagent/workflows/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/workflows/__init__.py -------------------------------------------------------------------------------- /omniagent/workflows/member.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | 3 | MARKET_ANALYSIS: str = "market_analysis_agent" 4 | ASSET_MANAGEMENT = "asset_management_agent" 5 | BLOCK_EXPLORER = "block_explorer_agent" 6 | FEED_EXPLORER = "feed_explorer_agent" 7 | RESEARCH_ANALYST = "research_analyst_agent" 8 | FALLBACK = "fallback_agent" 9 | 10 | AgentRole = Literal[ 11 | "market_analysis_agent", 12 | "asset_management_agent", 13 | "block_explorer_agent", 14 | "feed_explorer_agent", 15 | "research_analyst_agent", 16 | "fallback_agent", 17 | ] 18 | 19 | members = [ 20 | { 21 | "name": MARKET_ANALYSIS, 22 | "description": """ 23 | MarketAnalyst: Provides market data analysis and insights. 24 | 25 | Responsibilities: 26 | 1. Retrieve real-time token price information 27 | 2. Analyze market trends and dynamics 28 | 3. Offer insights to help users understand the market 29 | 30 | Maintain a professional and informative tone while providing clear, concise analysis. 31 | """.strip(), 32 | }, 33 | { 34 | "name": ASSET_MANAGEMENT, 35 | "description": """ 36 | AssetManager: Assists with crypto asset management. 37 | 38 | Responsibilities: 39 | 1. Query and report on users' token balances 40 | 2. Check and inform about users' NFT holdings 41 | 3. Swap or transfer tokens 42 | 43 | Provide accurate information with a friendly tone, using occasional puns or emojis to keep interactions engaging. 44 | """.strip(), 45 | }, 46 | { 47 | "name": BLOCK_EXPLORER, 48 | "description": """ 49 | BlockExplorer: Assists in exploring blockchain data. 50 | 51 | Responsibilities: 52 | 1. Retrieve and explain block height information 53 | 2. Provide transaction details and status updates 54 | 3. Inform about gas fees and other relevant blockchain data 55 | 56 | Present technical information in an easy-to-understand manner, using analogies when helpful. 57 | """.strip(), 58 | }, 59 | { 60 | "name": FEED_EXPLORER, 61 | "description": """ 62 | FeedExplorer: Explores and presents blockchain-related activities and feeds. 63 | 64 | Responsibilities: 65 | 1. Query and analyze various blockchain-related feeds 66 | 2. Retrieve activities from different sources and platforms 67 | 3. Provide insights on DeFi activities across various chains 68 | 69 | Present feed and activity data in a clear, engaging manner, using emojis and blockchain-related puns when appropriate. 70 | """.strip(), 71 | }, 72 | { 73 | "name": RESEARCH_ANALYST, 74 | "description": """ 75 | ResearchAnalyst: Conducts and provides web3 project research. 76 | 77 | Responsibilities: 78 | 1. Gather detailed information on web3 projects 79 | 2. Analyze project progress, team members, and investors 80 | 3. Provide insights on market trends related to specific projects 81 | 82 | Deliver comprehensive yet concise reports, emphasizing key points for investment decisions. 83 | """.strip(), 84 | }, 85 | { 86 | "name": FALLBACK, 87 | "description": """ 88 | FallbackAgent: Handles general queries and conversations. 89 | 90 | Responsibilities: 91 | 1. Answer user queries unrelated to other agents' specialties 92 | 2. Clarify unclear requests and provide general assistance 93 | 3. Maintain conversation continuity when needed 94 | 95 | Respond with versatility and friendliness, guiding users to appropriate specialists when necessary. 96 | """.strip(), 97 | }, 98 | ] 99 | -------------------------------------------------------------------------------- /omniagent/workflows/supervisor_chain.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.output_parsers import JsonOutputToolsParser 3 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 4 | from langchain_core.tools import tool 5 | from langchain_google_genai import ChatGoogleGenerativeAI 6 | from langchain_google_vertexai import ChatVertexAI 7 | from loguru import logger 8 | 9 | from omniagent.workflows.member import AgentRole, members 10 | 11 | load_dotenv() 12 | 13 | 14 | @tool 15 | def route(next_: AgentRole): 16 | """Select the next role.""" 17 | pass 18 | 19 | 20 | def build_supervisor_chain(llm): 21 | system_prompt = """ 22 | You are an AI Agent Supervisor coordinating specialized AI Agents. Your task: 23 | 24 | 1. Analyze user requests and conversation history. 25 | 2. Select the most suitable AI Agent based on their expertise: 26 | 27 | {members} 28 | 29 | 30 | Selection principles: 31 | - Match Agent expertise to current needs. 32 | - Prioritize Agents who can advance the task. 33 | - Choose the Agent for the most comprehensive response. 34 | 35 | Based on these guidelines, select the next AI Agent or end the conversation. 36 | """ 37 | members_info = ", ".join([f"{member['name']} ({member['description']})" for member in members]) 38 | system_prompt = system_prompt.format(members=members_info) 39 | options = [member["name"] for member in members] 40 | 41 | prompt = ChatPromptTemplate.from_messages( 42 | [ 43 | ("system", system_prompt), 44 | MessagesPlaceholder(variable_name="messages"), 45 | ] 46 | ).partial(options=str(options), members=", ".join([member["name"] for member in members])) 47 | 48 | def extract_next(x): 49 | try: 50 | next__ = x[-1]["args"]["next_"] 51 | except Exception: 52 | logger.warning(f"Error extracting next agent: {x}") 53 | next__ = "fallback_agent" 54 | return {"next": next__} 55 | 56 | def get_tool_choice(llm): 57 | if isinstance(llm, ChatVertexAI) and llm.model_name == "gemini-1.5-flash": 58 | return None 59 | if isinstance(llm, ChatGoogleGenerativeAI): 60 | return None 61 | return "route" 62 | 63 | tool_choice = get_tool_choice(llm) 64 | 65 | return prompt | llm.bind_tools(tools=[route], tool_choice=tool_choice) | JsonOutputToolsParser() | extract_next 66 | -------------------------------------------------------------------------------- /omniagent/workflows/workflow.py: -------------------------------------------------------------------------------- 1 | import operator 2 | from typing import Annotated, Sequence, TypedDict 3 | 4 | from langchain_core.language_models import BaseChatModel 5 | from langchain_core.messages import BaseMessage, HumanMessage 6 | from langchain_ollama import ChatOllama 7 | from langgraph.graph import END, StateGraph 8 | from loguru import logger 9 | 10 | from omniagent.agents.asset_management import build_asset_management_agent 11 | from omniagent.agents.block_explore import build_block_explorer_agent 12 | from omniagent.agents.fallback import build_fallback_agent 13 | from omniagent.agents.feed_explore import build_feed_explorer_agent 14 | from omniagent.agents.research_analyst import build_research_analyst_agent 15 | from omniagent.conf.llm_provider import SUPPORTED_OLLAMA_MODELS 16 | 17 | 18 | class AgentState(TypedDict): 19 | messages: Annotated[Sequence[BaseMessage], operator.add] 20 | next: str 21 | 22 | 23 | def create_node(agent, name): 24 | async def run(state): 25 | logger.info(f"Running {name} agent") 26 | result = await agent.ainvoke(state) 27 | return {"messages": [HumanMessage(content=result["output"], name=name)]} 28 | 29 | return run 30 | 31 | 32 | def build_workflow(llm: BaseChatModel): 33 | is_ollama = isinstance(llm, ChatOllama) 34 | if hasattr(llm, "model") and is_ollama: 35 | model_name = llm.model 36 | else: 37 | return build_tool_workflow(llm) 38 | 39 | supports_tools = SUPPORTED_OLLAMA_MODELS.get(model_name, {}).get("supports_tools", False) 40 | 41 | if not supports_tools and is_ollama: 42 | return build_simple_workflow(llm) 43 | else: 44 | return build_tool_workflow(llm) 45 | 46 | 47 | def build_simple_workflow(llm: BaseChatModel): 48 | """Simple conversation workflow without tools""" 49 | return llm 50 | 51 | 52 | def build_tool_workflow(llm: BaseChatModel): 53 | from omniagent.agents.market_analysis import build_market_analysis_agent 54 | from omniagent.workflows.member import members 55 | from omniagent.workflows.supervisor_chain import build_supervisor_chain 56 | 57 | market_analysis_agent_node = create_node(build_market_analysis_agent(llm), "market_analysis_agent") 58 | asset_management_agent_node = create_node(build_asset_management_agent(llm), "asset_management_agent") 59 | block_explorer_agent_node = create_node(build_block_explorer_agent(llm), "block_explorer_agent") 60 | research_analyst_agent_node = create_node(build_research_analyst_agent(llm), "research_analyst_agent") 61 | feed_explorer_agent_node = create_node(build_feed_explorer_agent(llm), "feed_explorer_agent") 62 | 63 | workflow = StateGraph(AgentState) 64 | workflow.add_node("market_analysis_agent", market_analysis_agent_node) 65 | workflow.add_node("asset_management_agent", asset_management_agent_node) 66 | workflow.add_node("block_explorer_agent", block_explorer_agent_node) 67 | workflow.add_node("feed_explorer_agent", feed_explorer_agent_node) 68 | workflow.add_node("research_analyst_agent", research_analyst_agent_node) 69 | workflow.add_node("supervisor", build_supervisor_chain(llm)) 70 | workflow.add_node("fallback_agent", build_fallback_agent(llm)) 71 | 72 | member_names = list(map(lambda x: x["name"], members)) 73 | 74 | for member in member_names: 75 | workflow.add_edge(member, END) 76 | 77 | conditional_map = {k: k for k in member_names} 78 | workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map) 79 | workflow.set_entry_point("supervisor") 80 | return workflow.compile() 81 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "OmniAgent" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Jowo Rinpoche "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.11" 10 | python-dotenv = "^1.0.0" 11 | sse-starlette = "^1.6.5" 12 | loguru = "^0.7.0" 13 | toolz = "^0.12.0" 14 | google-search-results = "^2.4.2" 15 | psycopg = "^3.1.12" 16 | aiohttp = "^3.8.6" 17 | langchain = "0.2.6" 18 | pydantic-settings = "^2.0.3" 19 | gptcache = "^0.1.42" 20 | asyncpg = "^0.28.0" 21 | sqlalchemy = "^2.0.22" 22 | websockets = "^12.0" 23 | aiocache = "^0.12.2" 24 | pytz = "^2023.3.post1" 25 | ccxt = "^4.3.7" 26 | beautifulsoup4 = "^4.12.3" 27 | markdown = "^3.6" 28 | sqlalchemy-utils = "^0.41.2" 29 | retrying = "^1.3.4" 30 | langchain-postgres = "0.0.9" 31 | psycopg2-binary = "^2.9.9" 32 | psycopg-binary = "^3.1.19" 33 | langchain-core = ">=0.2.9,<0.3" 34 | langgraph="0.1.1" 35 | langchain-google-vertexai = "^1.0.6" 36 | langchain-community = "^0.2.6" 37 | langchain-openai = "^0.1.13" 38 | chainlit = "^1.1.305" 39 | greenlet = "^3.0.3" 40 | langchain-ollama = "^0.1.0" 41 | ollama = "^0.3.1" 42 | pydantic = "^2.8.2" 43 | rss3-dsl-sdk = "^0.4.0" 44 | duckduckgo-search = "^6.2.6" 45 | pytest-xdist = "^3.6.1" 46 | pytest-repeat = "^0.9.3" 47 | allure-pytest = "^2.13.5" 48 | moralis = "^0.1.49" 49 | pytest-asyncio = "^0.23.8" 50 | feedparser = "^6.0.11" 51 | jinja2 = "^3.1.4" 52 | langchain-google-genai = "<2.0.4" 53 | pytest = "^8.3.3" 54 | langchain-anthropic = "0.1.17" 55 | 56 | [tool.poetry.group.dev.dependencies] 57 | ruff = "^0.4.1" 58 | pre-commit = "^3.7.0" 59 | mypy = "^1.10.0" 60 | 61 | [tool.ruff] 62 | line-length = 150 63 | indent-width = 4 64 | exclude = [ 65 | ".bzr", 66 | ".direnv", 67 | ".eggs", 68 | ".git", 69 | ".git-rewrite", 70 | ".hg", 71 | ".ipynb_checkpoints", 72 | ".mypy_cache", 73 | ".nox", 74 | ".pants.d", 75 | ".pyenv", 76 | ".pytest_cache", 77 | ".pytype", 78 | ".ruff_cache", 79 | ".svn", 80 | ".tox", 81 | ".venv", 82 | ".vscode", 83 | "__pypackages__", 84 | "_build", 85 | "buck-out", 86 | "build", 87 | "dist", 88 | "node_modules", 89 | "site-packages", 90 | "venv", 91 | ] 92 | 93 | [tool.ruff.lint.mccabe] 94 | max-complexity = 7 95 | 96 | [tool.ruff.lint] 97 | select = [ 98 | "AIR", 99 | "B", 100 | "C90", 101 | "E", 102 | "F", 103 | "FLY", 104 | "FURB", 105 | "I", 106 | "N", 107 | "PERF", 108 | "RUF", 109 | "SIM", 110 | "UP", 111 | "W" 112 | ] 113 | [tool.ruff.lint.per-file-ignores] 114 | "__init__.py"= ["F401"] 115 | "chainlit_app.py"= ["F811"] 116 | "omniagent/db/database.py"= ["E261"] 117 | "omniagent/service/chat.py"= ["C901"] 118 | "omniagent/service/history.py"= ["E711"] 119 | "omniagent/service/session.py"= ["C901","B904"] 120 | "omniagent/agent/system_prompt.py" =["E501"] 121 | "omniagent/dto/task.py"=["N815"] 122 | "omniagent/experts/__init__.py"=["SIM117"] 123 | "omniagent/experts/swap_expert.py"=["SIM117"] 124 | "omniagent/db/models.py"=["N815"] 125 | 126 | 127 | [tool.ruff.format] 128 | quote-style = "double" 129 | indent-style = "space" 130 | docstring-code-format = true 131 | 132 | [tool.mypy] 133 | disallow_any_unimported = true 134 | disallow_untyped_defs = false 135 | no_implicit_optional = true 136 | strict_equality = true 137 | warn_unused_ignores = true 138 | warn_redundant_casts = true 139 | warn_return_any = true 140 | check_untyped_defs = true 141 | show_error_codes = true 142 | disable_error_code = ["arg-type","call-arg","union-attr","operator","unused-ignore","import-untyped","assignment","no-any-return","no-any-unimported","import-not-found","misc"] 143 | 144 | 145 | [build-system] 146 | requires = ["poetry-core"] 147 | build-backend = "poetry.core.masonry.api" 148 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # OmniAgent Framework 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) 4 | [![GitHub Stars](https://img.shields.io/github/stars/VividGen/OmniAgent.svg)](https://github.com/VividGen/OmniAgent/stargazers) 5 | [![GitHub Issues](https://img.shields.io/github/issues/VividGen/OmniAgent.svg)](https://github.com/VividGen/OmniAgent/issues) 6 | 7 | OmniAgent is an enterprise-grade AI orchestration framework that revolutionizes Web3 development by seamlessly bridging artificial intelligence with blockchain technologies. Build powerful on-chain AI agents in hours instead of months. 8 | 9 | ## 🚀 Key Features 10 | 11 | - **Modular Architecture**: Three-layer design with Interpreter, Classifier, and specialized Executors 12 | - **Intelligent Task Routing**: Smart classification system powered by Google Gemma and domain-specific models 13 | - **Plug-and-Play Model Integration**: Easy integration with various AI models 14 | - **Cross-Chain Compatibility**: Seamless interaction with multiple blockchain networks 15 | - **Specialized Executors**: 16 | - DeFi Operations 17 | - Token/NFT Management 18 | - Web3 Knowledge Integration 19 | - Social Data Analysis 20 | 21 | ## 🏗️ Architecture 22 | 23 | ``` 24 | ┌─────────────────┐ 25 | │ User Input │ 26 | └────────┬────────┘ 27 | ▼ 28 | ┌─────────────────┐ 29 | │ Interpreter │ ─── Task Understanding & Parameter Extraction 30 | └────────┬────────┘ 31 | ▼ 32 | ┌─────────────────┐ 33 | │ Classifier │ ─── Intelligent Task Routing 34 | └────────┬────────┘ 35 | ▼ 36 | ┌─────────────────┐ 37 | │ Executor │ ─── Specialized Task Execution 38 | └────────┬────────┘ 39 | ▼ 40 | ┌─────────────────┐ 41 | │ Web3 │ ─── Blockchain & Protocol Interaction 42 | └─────────────────┘ 43 | ``` 44 | 45 | ## 🛠️ Installation 46 | 47 | ```bash 48 | # Clone the repository 49 | git clone https://github.com/VividGen/OmniAgent.git 50 | 51 | # Configure environment 52 | cp .env.example .env 53 | 54 | # Start 55 | docker-compose up -d 56 | ``` 57 | 58 | ## 📦 Quick Start 59 | 60 | ```javascript 61 | const { OmniAgent } = require('omniagent'); 62 | 63 | // Initialize OmniAgent 64 | const agent = new OmniAgent({ 65 | model: 'gemma', 66 | executors: ['defi', 'token', 'social'] 67 | }); 68 | 69 | // Execute a task 70 | const result = await agent.execute({ 71 | task: 'Token swap', 72 | params: { 73 | fromToken: 'ETH', 74 | toToken: 'USDC', 75 | amount: '1.0' 76 | } 77 | }); 78 | ``` 79 | 80 | ## 💡 Use Cases 81 | 82 | - **DeFi Operations**: Token swaps, liquidity provision, yield farming 83 | - **Asset Management**: NFT trading, token transfers, portfolio analysis 84 | - **Market Intelligence**: Price tracking, trend analysis, social sentiment 85 | - **Cross-Chain Operations**: Bridge transfers, cross-chain swaps 86 | - **Smart Contract Interaction**: Contract deployment, function calls 87 | 88 | ## 🔧 Configuration 89 | 90 | ```javascript 91 | { 92 | "interpreter": { 93 | "model": "gemma", 94 | "temperature": 0.7 }, 95 | "classifier": { 96 | "model": "codegemma", 97 | "threshold": 0.85 98 | }, 99 | "executors": { 100 | "defi": { 101 | "networks": ["ethereum", "polygon"], 102 | "protocols": ["uniswap", "aave"] 103 | }, 104 | "token": { 105 | "supportedTokens": ["ERC20", "ERC721", "ERC1155"] 106 | } 107 | } 108 | } 109 | ``` 110 | 111 | ## 📚 Documentation 112 | 113 | Comprehensive documentation is available at our documentation site. 114 | 115 | ## 🤝 Contributing 116 | 117 | We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details. 118 | 119 | 1. Fork the repository 120 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 121 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 122 | 4. Push to the branch (`git push origin feature/AmazingFeature`) 123 | 5. Open a Pull Request 124 | 125 | ## 📄 License 126 | 127 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 128 | 129 | ## 🌟 Acknowledgments 130 | 131 | - Google Gemma and CodeGemma teams for their excellent models 132 | - The Web3 community for continuous support and feedback 133 | - All contributors who have helped shape OmniAgent 134 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/tests/__init__.py -------------------------------------------------------------------------------- /tests/agent_trajectory/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/tests/agent_trajectory/__init__.py -------------------------------------------------------------------------------- /tests/agent_trajectory/asset_management.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.agents.asset_management import build_asset_management_agent 6 | from omniagent.conf.llm_provider import get_available_providers 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def asset_management_agent(request): 11 | model = request.config.getoption("--model") 12 | logger.info(f"using model: {model}") 13 | llm = get_available_providers()[model] 14 | agent = build_asset_management_agent(llm) 15 | return agent 16 | 17 | 18 | @pytest.mark.asyncio 19 | async def test_swap_eth_to_usdt(asset_management_agent): 20 | events = asset_management_agent.astream_events({"messages": [HumanMessage(content="Can you swap 20 eth to usdt ?", name="human")]}, version="v1") 21 | 22 | on_tool_end_count = 0 23 | 24 | async for event in events: 25 | if event["event"] == "on_tool_end": 26 | on_tool_end_count += 1 27 | event_data_input_ = event["data"]["input"] 28 | assert event["name"] == "SwapExecutor" 29 | assert event_data_input_["from_token"] == "ETH" 30 | assert event_data_input_["to_token"] == "USDT" 31 | assert event_data_input_["amount"] == "20" 32 | 33 | assert on_tool_end_count > 0, "The on_tool_end event did not occur in test_swap_eth_to_usdt" 34 | 35 | 36 | @pytest.mark.asyncio 37 | async def test_query_user_token_balance(asset_management_agent): 38 | events = asset_management_agent.astream_events( 39 | {"messages": [HumanMessage(content="Can you check 0x33c0814654fa367ce67d8531026eb4481290e63c eth balance ?", name="human")]}, 40 | version="v1", 41 | ) 42 | 43 | on_tool_end_count = 0 44 | 45 | async for event in events: 46 | if event["event"] == "on_tool_end": 47 | on_tool_end_count += 1 48 | event_data_input_ = event["data"]["input"] 49 | assert event["name"] == "TokenBalanceExecutor" 50 | assert event_data_input_["wallet_address"] == "0x33c0814654fa367ce67d8531026eb4481290e63c" 51 | assert event_data_input_["chain"] == "eth" 52 | 53 | assert on_tool_end_count > 0, "The on_tool_end event did not occur in test_query_user_token_balance" 54 | 55 | 56 | @pytest.mark.asyncio 57 | async def test_query_user_nft_holdings(asset_management_agent): 58 | events = asset_management_agent.astream_events( 59 | {"messages": [HumanMessage(content="Can you check 0x33c0814654fa367ce67d8531026eb4481290e63c nft holdings ?", name="human")]}, 60 | version="v1", 61 | ) 62 | 63 | on_tool_end_count = 0 64 | 65 | async for event in events: 66 | if event["event"] == "on_tool_end": 67 | on_tool_end_count += 1 68 | event_data_input_ = event["data"]["input"] 69 | assert event["name"] == "NFTBalanceExecutor" 70 | assert event_data_input_["wallet_address"] == "0x33c0814654fa367ce67d8531026eb4481290e63c" 71 | assert event_data_input_["chain"] == "eth" 72 | 73 | assert on_tool_end_count > 0, "The on_tool_end event did not occur in test_query_user_nft_holdings" 74 | 75 | 76 | @pytest.mark.asyncio 77 | async def test_transfer_eth(asset_management_agent): 78 | events = asset_management_agent.astream_events( 79 | {"messages": [HumanMessage(content="Can you transfer 0.5 ETH to 0x742d35Cc6634C0532925a3b844Bc454e4438f44e?", name="human")]}, 80 | version="v1", 81 | ) 82 | 83 | on_tool_end_count = 0 84 | 85 | async for event in events: 86 | if event["event"] == "on_tool_end": 87 | on_tool_end_count += 1 88 | event_data_input_ = event["data"]["input"] 89 | assert event["name"] == "TransferExecutor" 90 | assert event_data_input_["to_address"] == "0x742d35Cc6634C0532925a3b844Bc454e4438f44e" 91 | assert event_data_input_["token"] == "ETH" 92 | assert event_data_input_["amount"] == "0.5" 93 | 94 | assert on_tool_end_count > 0, "The on_tool_end event did not occur in test_transfer_eth" 95 | -------------------------------------------------------------------------------- /tests/agent_trajectory/block_explore.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.agents.block_explore import build_block_explorer_agent 6 | from omniagent.conf.llm_provider import get_available_providers 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def block_explorer_agent(request): 11 | model = request.config.getoption("--model") 12 | logger.info(f"using model: {model}") 13 | 14 | llm = get_available_providers()[model] 15 | agent = build_block_explorer_agent(llm) 16 | return agent 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_query_block_height(block_explorer_agent): 21 | events = block_explorer_agent.astream_events( 22 | {"messages": [HumanMessage(content="What's the latest block height on the Ethereum network?", name="human")]}, version="v1" 23 | ) 24 | 25 | tool_end_count = 0 26 | async for event in events: 27 | if event["event"] == "on_tool_end": 28 | tool_end_count += 1 29 | event_data_input_ = event["data"]["input"] 30 | assert event["name"] == "BlockChainStatExecutor" 31 | assert event_data_input_["chain"] == "ethereum" 32 | 33 | assert tool_end_count > 0, "The on_tool_end event did not occur" 34 | 35 | 36 | if __name__ == "__main__": 37 | pytest.main() 38 | -------------------------------------------------------------------------------- /tests/agent_trajectory/feed_explore.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.agents.feed_explore import build_feed_explorer_agent 6 | from omniagent.conf.llm_provider import get_available_providers 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def feed_explorer_agent(request): 11 | model = request.config.getoption("--model") 12 | logger.info(f"using model: {model}") 13 | llm = get_available_providers()[model] 14 | agent = build_feed_explorer_agent(llm) 15 | return agent 16 | 17 | 18 | @pytest.mark.asyncio 19 | async def test_query_social_activities(feed_explorer_agent): 20 | events = feed_explorer_agent.astream_events( 21 | {"messages": [HumanMessage(content="What are the recent activities for vitalik.eth?", name="human")]}, version="v1" 22 | ) 23 | 24 | tool_end_count = 0 25 | async for event in events: 26 | if event["event"] == "on_tool_end": 27 | tool_end_count += 1 28 | event_data_input_ = event["data"]["input"] 29 | assert event["name"] == "FeedExecutor" 30 | assert "address" in event_data_input_ 31 | assert event_data_input_["address"] == "vitalik.eth" 32 | assert "type" in event_data_input_ 33 | assert event_data_input_["type"] in ["all", "post", "comment", "share"] 34 | 35 | assert tool_end_count > 0, "The on_tool_end event did not occur" 36 | 37 | 38 | @pytest.mark.asyncio 39 | async def test_query_specific_activity_type(feed_explorer_agent): 40 | events = feed_explorer_agent.astream_events( 41 | {"messages": [HumanMessage(content="Show me recent posts from vitalik.eth", name="human")]}, 42 | version="v1", 43 | ) 44 | 45 | tool_end_count = 0 46 | async for event in events: 47 | if event["event"] == "on_tool_end": 48 | tool_end_count += 1 49 | event_data_input_ = event["data"]["input"] 50 | assert event["name"] == "FeedExecutor" 51 | assert event_data_input_["address"] == "vitalik.eth" 52 | assert event_data_input_["type"] == "post" 53 | 54 | assert tool_end_count > 0, "The on_tool_end event did not occur" 55 | 56 | 57 | @pytest.mark.asyncio 58 | async def test_query_telegram_news(feed_explorer_agent): 59 | events = feed_explorer_agent.astream_events( 60 | {"messages": [HumanMessage(content="Show me the latest crypto news", name="human")]}, 61 | version="v1", 62 | ) 63 | 64 | tool_end_count = 0 65 | async for event in events: 66 | if event["event"] == "on_tool_end": 67 | tool_end_count += 1 68 | assert event["name"] == "TelegramNewsExecutor" 69 | 70 | assert tool_end_count > 0, "The on_tool_end event did not occur" 71 | 72 | 73 | @pytest.mark.asyncio 74 | async def test_query_limited_news(feed_explorer_agent): 75 | events = feed_explorer_agent.astream_events( 76 | {"messages": [HumanMessage(content="Get me the latest 5 news updates from crypto channels", name="human")]}, 77 | version="v1", 78 | ) 79 | 80 | tool_end_count = 0 81 | async for event in events: 82 | if event["event"] == "on_tool_end": 83 | tool_end_count += 1 84 | assert event["name"] == "TelegramNewsExecutor" 85 | event_data_input_ = event["data"]["input"] 86 | assert event_data_input_["limit"] == 5 87 | 88 | assert tool_end_count > 0, "The on_tool_end event did not occur" 89 | 90 | 91 | if __name__ == "__main__": 92 | pytest.main() 93 | -------------------------------------------------------------------------------- /tests/agent_trajectory/market_analysis.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.agents.market_analysis import build_market_analysis_agent 6 | from omniagent.conf.llm_provider import get_available_providers 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def market_analysis_agent(request): 11 | model = request.config.getoption("--model") 12 | logger.info(f"using model: {model}") 13 | 14 | llm = get_available_providers()[model] 15 | agent = build_market_analysis_agent(llm) 16 | return agent 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_query_btc_price(market_analysis_agent): 21 | events = market_analysis_agent.astream_events({"messages": [HumanMessage(content="What's BTC price now?", name="human")]}, version="v1") 22 | 23 | tool_end_count = 0 24 | async for event in events: 25 | if event["event"] == "on_tool_end": 26 | tool_end_count += 1 27 | event_data_input_ = event["data"]["input"] 28 | assert event["name"] == "PriceExecutor" 29 | assert event_data_input_["token"] == "BTC" 30 | 31 | assert tool_end_count > 0, "The on_tool_end event did not occur" 32 | 33 | 34 | @pytest.mark.asyncio 35 | async def test_query_eth_price(market_analysis_agent): 36 | events = market_analysis_agent.astream_events( 37 | {"messages": [HumanMessage(content="What's the current price of Ethereum?", name="human")]}, version="v1" 38 | ) 39 | 40 | tool_end_count = 0 41 | async for event in events: 42 | if event["event"] == "on_tool_end": 43 | tool_end_count += 1 44 | event_data_input_ = event["data"]["input"] 45 | assert event["name"] == "PriceExecutor" 46 | assert event_data_input_["token"] == "ETH" 47 | 48 | assert tool_end_count > 0, "The on_tool_end event did not occur" 49 | 50 | 51 | @pytest.mark.asyncio 52 | async def test_query_funding_rate(market_analysis_agent): 53 | events = market_analysis_agent.astream_events( 54 | {"messages": [HumanMessage(content="What's the funding rate for BTC in binance?", name="human")]}, version="v1" 55 | ) 56 | 57 | tool_end_count = 0 58 | async for event in events: 59 | if event["event"] == "on_tool_end": 60 | tool_end_count += 1 61 | event_data_input_ = event["data"]["input"] 62 | assert event["name"] == "FundingRateExecutor" 63 | assert event_data_input_["exchange"] == "binance" 64 | assert event_data_input_["symbol"] == "BTC/USDT" 65 | 66 | assert tool_end_count > 0, "The on_tool_end event did not occur" 67 | 68 | 69 | @pytest.mark.asyncio 70 | async def test_query_nft_ranking(market_analysis_agent): 71 | events = market_analysis_agent.astream_events( 72 | {"messages": [HumanMessage(content="What are the top 5 NFT collections?", name="human")]}, version="v1" 73 | ) 74 | 75 | tool_end_count = 0 76 | async for event in events: 77 | if event["event"] == "on_tool_end": 78 | tool_end_count += 1 79 | assert event["name"] == "NFTRankingExecutor" 80 | 81 | assert tool_end_count > 0, "The on_tool_end event did not occur" 82 | 83 | 84 | @pytest.mark.asyncio 85 | async def test_query_coin_market(market_analysis_agent): 86 | events = market_analysis_agent.astream_events( 87 | {"messages": [HumanMessage(content="Give me the market cap for Bitcoin", name="human")]}, version="v1" 88 | ) 89 | 90 | tool_end_count = 0 91 | async for event in events: 92 | if event["event"] == "on_tool_end": 93 | tool_end_count += 1 94 | event_data_input_ = event["data"]["input"] 95 | assert event["name"] == "CoinMarketExecutor" 96 | assert event_data_input_["order"] == "market_cap_desc" 97 | 98 | assert tool_end_count > 0, "The on_tool_end event did not occur" 99 | 100 | 101 | if __name__ == "__main__": 102 | pytest.main() 103 | -------------------------------------------------------------------------------- /tests/agent_trajectory/research_analyst.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.agents.research_analyst import build_research_analyst_agent 6 | from omniagent.conf.llm_provider import get_available_providers 7 | 8 | 9 | @pytest.fixture(scope="module") 10 | def research_analyst_agent(request): 11 | model = request.config.getoption("--model") 12 | logger.info(f"using model: {model}") 13 | 14 | llm = get_available_providers()[model] 15 | agent = build_research_analyst_agent(llm) 16 | return agent 17 | 18 | 19 | @pytest.mark.asyncio 20 | async def test_query_project(research_analyst_agent): 21 | events = research_analyst_agent.astream_events( 22 | {"messages": [HumanMessage(content="Do you know anything about RSS3?", name="human")]}, version="v1" 23 | ) 24 | 25 | tool_end_count = 0 26 | 27 | async for event in events: 28 | if event["event"] == "on_tool_end": 29 | event_data_input_ = event["data"]["input"] 30 | assert event["name"] == "ProjectExecutor" 31 | assert event_data_input_["keyword"] == "RSS3" 32 | tool_end_count += 1 33 | 34 | assert tool_end_count > 0, "The on_tool_end event did not occur" 35 | 36 | 37 | if __name__ == "__main__": 38 | pytest.main() 39 | -------------------------------------------------------------------------------- /tests/compatible-models.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | id: compatible-models 3 | title: Compatible Models 4 | description: Learn about the models compatible with OmniAgent. 5 | --- 6 | 7 | ## Compatible Models 8 | 9 | Learn what models are compatible with OmniAgent. 10 | 11 | OmniAgent is compatible with many models out of the box. 12 | 13 | We rate the models by conducting various tests and manually reviewing the output. These scores give a **rough indication** of how well the model can handle queries involving Open Web elements like blockchain, but they **do not** reflect the overall capability of the model. 14 | 15 | For more information on what's included by the Open Web, see: 16 | 17 | 18 | 19 | Our vision of the next generation Internet. 20 | 21 | ### Open Source Models 22 | 23 | > Theoretically, all open source models are compatible. 24 | > 25 | > Models with tool calling capability tend to have better overall performance. 26 | 27 | You may try all models supported by Ollama, use the exact model name from https://ollama.com/library. 28 | 29 | Learn more: 30 | 31 | 32 | 33 | | Name | Score (out of 100) | Tool Call Support | 34 | |------|-------------------|---------------------| 35 | | mistral-nemo | 88 | ✅ | 36 | | llama3.2 | 80 | ✅ | 37 | | darkmoon/olmo:7B-instruct-q6-k | 0 | ❌ | 38 | 39 | ### Proprietary Models 40 | 41 | | Name | Score (out of 100) | Tool Call Support | 42 | |------|-------------------|---------------------| 43 | | gpt-4o-mini | 100 | ✅ | 44 | | gemini-1.5-flash | 100 | ✅ | 45 | | gpt-4o | 96 | ✅ | 46 | | gemini-1.5-pro | 96 | ✅ | 47 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | def pytest_addoption(parser): 2 | parser.addoption( 3 | "--model", 4 | action="store", 5 | default="llama3.2", 6 | help="Model to use for testing, e.g. gemini-1.5-pro, gemini-1.5-flash, " "llama3.1:latest", 7 | ) 8 | -------------------------------------------------------------------------------- /tests/gen_benchmark_html_report.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from dotenv import load_dotenv 4 | from langchain.chat_models import ChatOpenAI 5 | from langchain_core.output_parsers import StrOutputParser 6 | from langchain_core.prompts import ChatPromptTemplate 7 | from langchain_ollama import ChatOllama 8 | 9 | load_dotenv() 10 | 11 | 12 | def measure_proprietary_models_metrics(model_name: str, num_samples: int = 3) -> tuple[float, float]: 13 | prompt = ChatPromptTemplate.from_messages([ 14 | ("system", "You are a helpful AI assistant."), 15 | ("user", "Tell me a short story about a cat.") 16 | ]) 17 | 18 | model = ChatOpenAI( 19 | model_name=model_name, 20 | streaming=True, 21 | temperature=0 22 | ) 23 | 24 | chain = prompt | model | StrOutputParser() 25 | 26 | latencies = [] 27 | token_rates = [] 28 | 29 | for _ in range(num_samples): 30 | start_time = time.time() 31 | first_token_received = False 32 | token_count = 0 33 | 34 | for chunk in chain.stream({}): 35 | current_time = time.time() 36 | if not first_token_received: 37 | latency = (current_time - start_time) * 1000 38 | latencies.append(latency) 39 | first_token_received = True 40 | first_token_time = current_time 41 | 42 | token_count += 1 43 | 44 | total_time = time.time() - first_token_time 45 | if total_time > 0: 46 | tokens_per_second = token_count / total_time 47 | token_rates.append(tokens_per_second) 48 | 49 | time.sleep(1) 50 | 51 | avg_latency = sum(latencies) / len(latencies) 52 | avg_token_rate = sum(token_rates) / len(token_rates) 53 | return avg_latency, avg_token_rate 54 | 55 | 56 | def measure_opensource_models_metrics(model_name: str, num_samples: int = 3) -> tuple[float, float]: 57 | prompt = ChatPromptTemplate.from_messages([ 58 | ("system", "You are a helpful AI assistant."), 59 | ("user", "Tell me a short story about a cat.") 60 | ]) 61 | 62 | model = ChatOllama( 63 | model=model_name, 64 | streaming=True, 65 | ) 66 | 67 | chain = prompt | model | StrOutputParser() 68 | 69 | latencies = [] 70 | token_rates = [] 71 | 72 | for _ in range(num_samples): 73 | start_time = time.time() 74 | first_token_received = False 75 | token_count = 0 76 | 77 | for chunk in chain.stream({}): 78 | current_time = time.time() 79 | if not first_token_received: 80 | latency = (current_time - start_time) * 1000 81 | latencies.append(latency) 82 | first_token_received = True 83 | first_token_time = current_time 84 | 85 | token_count += 1 86 | 87 | total_time = time.time() - first_token_time 88 | if total_time > 0: 89 | tokens_per_second = token_count / total_time 90 | token_rates.append(tokens_per_second) 91 | 92 | time.sleep(1) 93 | 94 | avg_latency = sum(latencies) / len(latencies) 95 | avg_token_rate = sum(token_rates) / len(token_rates) 96 | return avg_latency, avg_token_rate 97 | 98 | 99 | 100 | 101 | if __name__ == '__main__': 102 | model_name = "llama3.2" 103 | avg_latency, avg_token_rate = measure_opensource_models_metrics(model_name) 104 | print(f"Average first token latency for {model_name}: {avg_latency:.2f} ms") 105 | print(f"Average token output rate for {model_name}: {avg_token_rate:.2f} tokens/sec") 106 | -------------------------------------------------------------------------------- /tests/generate_benchmark_report.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | import pytest 5 | from jinja2 import Environment, FileSystemLoader 6 | from loguru import logger 7 | 8 | # Global model configurations 9 | PROPRIETARY_MODELS = [ 10 | {"name": "gpt-4o-mini", "function_call_support": True}, 11 | {"name": "gpt-4o", "function_call_support": True}, 12 | {"name": "gemini-1.5-flash", "function_call_support": True}, 13 | {"name": "gemini-1.5-pro", "function_call_support": True}, 14 | ] 15 | 16 | OPENSOURCE_MODELS = [ 17 | {"name": "qwen2", "function_call_support": True}, 18 | {"name": "mistral", "function_call_support": True}, 19 | {"name": "qwen2.5", "function_call_support": True}, 20 | {"name": "llama3.1", "function_call_support": True}, 21 | {"name": "llama3.2", "function_call_support": True}, 22 | {"name": "mistral-nemo", "function_call_support": True}, 23 | ] 24 | 25 | from gen_benchmark_html_report import measure_proprietary_models_metrics, measure_opensource_models_metrics 26 | 27 | 28 | class TestStats: 29 | def __init__(self): 30 | self.passed = 0 31 | self.failed = 0 32 | self.skipped = 0 33 | self.errors = 0 34 | 35 | def pytest_runtest_logreport(self, report): 36 | if report.when == 'call': 37 | if report.passed: 38 | self.passed += 1 39 | elif report.failed: 40 | self.failed += 1 41 | elif report.skipped: 42 | self.skipped += 1 43 | elif report.when == 'setup' and report.outcome == 'error': 44 | self.errors += 1 45 | 46 | def calculate_model_score(self): 47 | total_tests = self.passed + self.failed 48 | if total_tests == 0: 49 | return 0 50 | score = (self.passed / total_tests) * 100 51 | return round(score) 52 | 53 | def pytest_terminal_summary(self, terminalreporter, exitstatus, config): 54 | print(f"Passed: {self.passed}") 55 | print(f"Failed: {self.failed}") 56 | print(f"Skipped: {self.skipped}") 57 | print(f"Errors: {self.errors}") 58 | 59 | 60 | def run_model_tests(model_name): 61 | print(f"\nTesting model: {model_name}") 62 | stats = TestStats() 63 | test_files = ["supervisor_chain.py"] \ 64 | # + glob.glob("agent_trajectory/*.py") 65 | pytest.main(["--count=1", "-n", "11"] + test_files + [f"--model={model_name}"] + sys.argv[1:], plugins=[stats]) 66 | return stats.calculate_model_score() 67 | 68 | 69 | def bool_to_emoji(value): 70 | return "✅" if value else "❌" 71 | 72 | 73 | def generate_benchmark_report(proprietary_results, opensource_results): 74 | """Generate HTML benchmark report for model performance results. 75 | 76 | Args: 77 | proprietary_results (dict): Results for proprietary models containing tuples of (score, first_token_latency, token_rate) 78 | opensource_results (dict): Results for open source models containing tuples of (score, first_token_latency, token_rate) 79 | """ 80 | # Convert results format and sort by score 81 | proprietary_models = [] 82 | for model in PROPRIETARY_MODELS: 83 | if model['name'] in proprietary_results: 84 | score, latency, token_rate = proprietary_results[model['name']] 85 | proprietary_models.append({ 86 | 'name': model['name'], 87 | 'score': score, 88 | 'first_token_latency': f"{latency:.2f}ms", 89 | 'token_rate': f"{token_rate:.1f} tokens/sec", 90 | 'function_call_support': bool_to_emoji(model['function_call_support']) 91 | }) 92 | proprietary_models.sort(key=lambda x: x['score'], reverse=True) 93 | 94 | open_source_models = [] 95 | for model in OPENSOURCE_MODELS: 96 | if model['name'] in opensource_results: 97 | score, latency, token_rate = opensource_results[model['name']] 98 | open_source_models.append({ 99 | 'name': model['name'], 100 | 'score': score, 101 | 'first_token_latency': f"{latency:.2f}ms", 102 | 'token_rate': f"{token_rate:.1f} tokens/sec", 103 | 'function_call_support': bool_to_emoji(model['function_call_support']) 104 | }) 105 | open_source_models.sort(key=lambda x: x['score'], reverse=True) 106 | 107 | # Set up template environment 108 | env = Environment(loader=FileSystemLoader('templates')) 109 | 110 | # Generate HTML benchmark report 111 | html_template = env.get_template('benchmark.html.j2') 112 | html_output = html_template.render( 113 | open_source_models=open_source_models, 114 | proprietary_models=proprietary_models 115 | ) 116 | 117 | # Create reports directory if it doesn't exist 118 | os.makedirs('reports', exist_ok=True) 119 | 120 | # Save HTML report 121 | with open('reports/benchmark.html', 'w') as f: 122 | f.write(html_output) 123 | 124 | 125 | def main(): 126 | proprietary_results = {} 127 | opensource_results = {} 128 | 129 | for model in PROPRIETARY_MODELS: 130 | score = run_model_tests(model['name']) 131 | latency, token_rate = measure_proprietary_models_metrics(model['name']) 132 | logger.info(f"First token latency for {model['name']}: {latency:.2f}ms") 133 | logger.info(f"Token output rate for {model['name']}: {token_rate:.1f} tokens/sec") 134 | proprietary_results[model['name']] = (score, latency, token_rate) 135 | 136 | for model in OPENSOURCE_MODELS: 137 | score = run_model_tests(model['name']) 138 | latency, token_rate = measure_opensource_models_metrics(model['name']) 139 | logger.info(f"First token latency for {model['name']}: {latency:.2f}ms") 140 | logger.info(f"Token output rate for {model['name']}: {token_rate:.1f} tokens/sec") 141 | opensource_results[model['name']] = (score, latency, token_rate) 142 | 143 | generate_benchmark_report(proprietary_results, opensource_results) 144 | print("Benchmark report generated successfully at reports/benchmark.html") 145 | 146 | 147 | if __name__ == "__main__": 148 | main() 149 | -------------------------------------------------------------------------------- /tests/openai-api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/tests/openai-api/__init__.py -------------------------------------------------------------------------------- /tests/openai-api/example.sh: -------------------------------------------------------------------------------- 1 | curl http://localhost:8000/v1/chat/completions \ 2 | -H "Content-Type: application/json" \ 3 | -d '{ 4 | "model": "gpt-4o", 5 | "messages": [ 6 | { 7 | "role": "user", 8 | "content": "Hello!" 9 | } 10 | ] 11 | }' 12 | 13 | 14 | 15 | 16 | curl http://localhost:8000/v1/chat/completions \ 17 | -H "Content-Type: application/json" \ 18 | -d '{ 19 | "model": "gpt-4o", 20 | "stream": "true", 21 | "messages": [ 22 | { 23 | "role": "user", 24 | "content": "btc price?" 25 | } 26 | ] 27 | }' 28 | -------------------------------------------------------------------------------- /tests/run_test.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import sys 3 | 4 | import pytest 5 | from jinja2 import Environment, FileSystemLoader 6 | 7 | # Global model configurations 8 | PROPRIETARY_MODELS = [ 9 | {"name": "gpt-4o-mini", "function_call_support": True}, 10 | {"name": "gpt-4o", "function_call_support": True}, 11 | {"name": "gemini-1.5-flash", "function_call_support": True}, 12 | {"name": "gemini-1.5-pro", "function_call_support": True}, 13 | {"name": "claude-3-5-sonnet", "function_call_support": True}, 14 | ] 15 | 16 | OPENSOURCE_MODELS = [ 17 | {"name": "qwen2", "function_call_support": True, "parameters": "7B"}, 18 | {"name": "mistral", "function_call_support": True, "parameters": "7B"}, 19 | {"name": "qwen2.5", "function_call_support": True, "parameters": "7B"}, 20 | {"name": "llama3.1", "function_call_support": True, "parameters": "8B"}, 21 | {"name": "llama3.2", "function_call_support": True, "parameters": "3B"}, 22 | {"name": "mistral-nemo", "function_call_support": True, "parameters": "12B"}, 23 | {"name": "olmo", "function_call_support": False, "parameters": "7B"}, 24 | {"name": "gemma", "function_call_support": False, "parameters": "7B"}, 25 | {"name": "llava", "function_call_support": False, "parameters": "13B"}, 26 | {"name": "deepseek-coder-v2", "function_call_support": False, "parameters": "16B"} 27 | ] 28 | 29 | 30 | class TestStats: 31 | def __init__(self): 32 | self.passed = 0 33 | self.failed = 0 34 | self.skipped = 0 35 | self.errors = 0 36 | 37 | def pytest_runtest_logreport(self, report): 38 | if report.when == 'call': 39 | if report.passed: 40 | self.passed += 1 41 | elif report.failed: 42 | self.failed += 1 43 | elif report.skipped: 44 | self.skipped += 1 45 | elif report.when == 'setup' and report.outcome == 'error': 46 | self.errors += 1 47 | 48 | def calculate_model_score(self): 49 | total_tests = self.passed + self.failed 50 | if total_tests == 0: 51 | return 0 52 | score = (self.passed / total_tests) * 100 53 | return round(score) 54 | 55 | def pytest_terminal_summary(self, terminalreporter, exitstatus, config): 56 | print(f"Passed: {self.passed}") 57 | print(f"Failed: {self.failed}") 58 | print(f"Skipped: {self.skipped}") 59 | print(f"Errors: {self.errors}") 60 | 61 | 62 | def generate_model_report(proprietary_results, opensource_results): 63 | def bool_to_emoji(value): 64 | return "✅" if value else "❌" 65 | 66 | # Convert results format and sort by score 67 | proprietary_models = [] 68 | for model in PROPRIETARY_MODELS: 69 | if model['name'] in proprietary_results: 70 | proprietary_models.append({ 71 | 'name': model['name'], 72 | 'score': proprietary_results[model['name']], 73 | 'function_call_support': bool_to_emoji(model['function_call_support']) 74 | }) 75 | 76 | open_source_models = [] 77 | for model in OPENSOURCE_MODELS: 78 | if model['name'] in opensource_results: 79 | open_source_models.append({ 80 | 'name': model['name'], 81 | 'score': opensource_results[model['name']], 82 | 'function_call_support': bool_to_emoji(model['function_call_support']), 83 | 'parameters': model['parameters'] 84 | }) 85 | 86 | # Sort models, putting '-' scores at the end 87 | proprietary_models.sort( 88 | key=lambda x: float('-inf') if x['score'] == '-' else x['score'], 89 | reverse=True 90 | ) 91 | open_source_models.sort( 92 | key=lambda x: float('-inf') if x['score'] == '-' else x['score'], 93 | reverse=True 94 | ) 95 | 96 | # Set up template environment 97 | env = Environment(loader=FileSystemLoader('templates')) 98 | template = env.get_template('compatible-models.mdx.j2') 99 | 100 | # Render template 101 | output = template.render( 102 | open_source_models=open_source_models, 103 | proprietary_models=proprietary_models 104 | ) 105 | 106 | # Save to file 107 | with open('compatible-models.mdx', 'w') as f: 108 | f.write(output) 109 | 110 | 111 | def run_model_tests(model_name): 112 | print(f"\nTesting model: {model_name}") 113 | stats = TestStats() 114 | test_files = ["supervisor_chain.py"] \ 115 | + glob.glob("agent_trajectory/*.py") 116 | pytest.main(["--count=3", "-n", "11"] + test_files + [f"--model={model_name}"] + sys.argv[1:], plugins=[stats]) 117 | return stats.calculate_model_score() 118 | 119 | 120 | def run_all_tests(): 121 | proprietary_results = {} 122 | opensource_results = {} 123 | 124 | # Test proprietary models 125 | for model in PROPRIETARY_MODELS: 126 | if model['function_call_support']: 127 | proprietary_results[model['name']] = run_model_tests(model['name']) 128 | else: 129 | proprietary_results[model['name']] = '-' 130 | 131 | # Test open source models 132 | for model in OPENSOURCE_MODELS: 133 | if model['function_call_support']: 134 | opensource_results[model['name']] = run_model_tests(model['name']) 135 | else: 136 | opensource_results[model['name']] = '-' 137 | 138 | # Generate report 139 | generate_model_report(proprietary_results, opensource_results) 140 | 141 | 142 | if __name__ == "__main__": 143 | run_all_tests() 144 | -------------------------------------------------------------------------------- /tests/supervisor_chain.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_core.messages import HumanMessage 3 | from loguru import logger 4 | 5 | from omniagent.conf.llm_provider import get_available_providers 6 | from omniagent.workflows.supervisor_chain import build_supervisor_chain 7 | 8 | 9 | def next_role(supervisor_chain, query) -> str: 10 | resp = supervisor_chain.invoke({"messages": [HumanMessage(content=query, name="human")]}) 11 | logger.info(f"response: {resp}") 12 | return resp["next"] 13 | 14 | 15 | @pytest.fixture(scope="module") 16 | def supervisor_chain(request): 17 | model = request.config.getoption("--model") 18 | logger.info(f"using model: {model}") 19 | llm = get_available_providers()[model] 20 | return build_supervisor_chain(llm) 21 | 22 | 23 | @pytest.mark.parametrize( 24 | "query,expected_role", 25 | [ 26 | ("What's the current price of Ethereum and its market trend?", "market_analysis_agent"), 27 | ("Can you check my ETH balance and show me how to swap some for USDC?", "asset_management_agent"), 28 | ("swap 1 eth to usdt on ethereum.", "asset_management_agent"), 29 | ("Can you help me transfer 0.5 ETH to 0x742d35Cc6634C0532925a3b844Bc454e4438f44e on the Ethereum network?", "asset_management_agent"), 30 | ("What's the latest block height on the Ethereum network, and what are the current gas fees?", "block_explorer_agent"), 31 | ( 32 | "Can you provide a detailed analysis of the Uniswap project, including its recent developments and market position?", 33 | "research_analyst_agent", 34 | ), 35 | ("What's the weather like today in New York?", "fallback_agent"), 36 | ("What are the recent DeFi activities for the address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e?", "feed_explorer_agent"), 37 | ("Show me the latest social interactions for vitalik.eth on Farcaster.", "feed_explorer_agent"), 38 | ("What are the most recent activities of vitalik.eth from the Uniswap on Ethereum?", "feed_explorer_agent"), 39 | ], 40 | ) 41 | def test_next_role(supervisor_chain, query, expected_role): 42 | result = next_role(supervisor_chain, query) 43 | assert result == expected_role, f"Expected {expected_role}, but got {result} for query: {query}" 44 | 45 | 46 | if __name__ == "__main__": 47 | pytest.main() 48 | -------------------------------------------------------------------------------- /tests/templates/benchmark.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Model Intelligence Metrics 6 | 7 | 143 | 144 | 145 |
146 |

Model Intelligence Metrics

147 | 148 |
149 |
150 | 151 |
152 |
153 | 154 |
155 |
156 | 157 |
158 |
159 | 160 |
161 |

Proprietary Models

162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | {% for model in proprietary_models %} 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | {% endfor %} 179 |
ModelScoreFirst Token LatencyToken Output RateFunction Call Support
{{ model.name }}{{ model.score }}%{{ model.first_token_latency }}{{ model.token_rate }}{{ model.function_call_support }}
180 |
181 | 182 |
183 |

Open Source Models

184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | {% for model in open_source_models %} 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | {% endfor %} 201 |
ModelScoreFirst Token LatencyToken Output RateFunction Call Support
{{ model.name }}{{ model.score }}%{{ model.first_token_latency }}{{ model.token_rate }}{{ model.function_call_support }}
202 |
203 |
204 | 205 | 321 | 322 | 323 | -------------------------------------------------------------------------------- /tests/templates/compatible-models.mdx.j2: -------------------------------------------------------------------------------- 1 | --- 2 | id: compatible-models 3 | title: Compatible Models 4 | description: Learn about the models compatible with OmniAgent. 5 | --- 6 | 7 | ## Compatible Models 8 | 9 | Learn what models are compatible with OmniAgent. 10 | 11 | OmniAgent is compatible with many models out of the box. 12 | 13 | We rate the models by conducting various tests and manually reviewing the output. These scores give a **rough indication** of how well the model can handle queries involving Open Web elements like blockchain, but they **do not** reflect the overall capability of the model. 14 | 15 | For more information on what's included by the Open Web, see: 16 | 17 | 18 | 19 | Our vision of the next generation Internet. 20 | 21 | ### Open Source Models 22 | 23 | > Theoretically, all open source models are compatible. 24 | > 25 | > Models with function calling capability tend to have better overall performance. 26 | 27 | You may try all models supported by Ollama, use the exact model name from https://ollama.com/library. 28 | 29 | Learn more: 30 | 31 | 32 | 33 | | Name | Parameters | Score (out of 100) | Function Call Support | 34 | |------|------------|-------------------|---------------------| 35 | {%- for model in open_source_models %} 36 | | {{ model.name }} | {{ model.parameters }} | {{ model.score }} | {{ model.function_call_support }} | 37 | {%- endfor %} 38 | 39 | ### Proprietary Models 40 | 41 | | Name | Score (out of 100) | Function Call Support | 42 | |------|-------------------|---------------------| 43 | {%- for model in proprietary_models %} 44 | | {{ model.name }} | {{ model.score }} | {{ model.function_call_support }} | 45 | {%- endfor %} 46 | -------------------------------------------------------------------------------- /widget/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | **node_modules** 5 | **dist** 6 | /.pnp 7 | .pnp.js 8 | 9 | # testing 10 | /coverage 11 | 12 | # production 13 | **build** 14 | 15 | # misc 16 | .DS_Store 17 | .env.local 18 | .env.development.local 19 | .env.test.local 20 | .env.production.local 21 | 22 | npm-debug.log* 23 | yarn-debug.log* 24 | yarn-error.log* 25 | 26 | .next 27 | .cache 28 | .yarn 29 | 30 | # Swap the comments on the following lines if you wish to use zero-installs 31 | # Documentation here: https://yarnpkg.com/features/zero-installs 32 | # !.yarn/cache 33 | .pnp.* 34 | -------------------------------------------------------------------------------- /widget/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Vite + React + TS 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /widget/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "@rainbow-me/rainbowkit": "^2.1.3", 4 | "@tanstack/react-query": "^5.51.16", 5 | "ethers": "^6.13.2", 6 | "react": "^18.3.1", 7 | "react-dom": "^18.3.1", 8 | "react-router-dom": "^6.24.1", 9 | "viem": "^2.18.7", 10 | "wagmi": "^2.12.2" 11 | }, 12 | "devDependencies": { 13 | "@types/events": "^3.0.3", 14 | "@types/node": "^20.14.9", 15 | "@types/react": "^18.3.3", 16 | "@types/react-dom": "^18.3.0", 17 | "@vitejs/plugin-react": "^4.3.1", 18 | "typescript": "^5.5.2", 19 | "vite": "^5.3.1", 20 | "vite-plugin-node-polyfills": "^0.22.0" 21 | }, 22 | "name": "vite-project", 23 | "private": true, 24 | "scripts": { 25 | "analyze": "source-map-explorer 'dist/assets/*.js' --no-border-checks", 26 | "build": "tsc && vite build", 27 | "dev": "vite", 28 | "preview": "vite preview" 29 | }, 30 | "type": "module", 31 | "version": "0.0.0" 32 | } 33 | -------------------------------------------------------------------------------- /widget/src/App.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import '@rainbow-me/rainbowkit/styles.css'; 3 | import {RainbowKitProvider, getDefaultConfig, getDefaultWallets} from '@rainbow-me/rainbowkit'; 4 | import {createConfig, http, WagmiProvider} from 'wagmi'; 5 | import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; 6 | import {mainnet, sepolia} from 'wagmi/chains'; 7 | 8 | let MAINNET_RPC_URL = process.env.REACT_APP_MAINNET_RPC_URL || ""; 9 | let SEPOLIA_RPC_URL = process.env.REACT_APP_SEPOLIA_RPC_URL || ""; 10 | 11 | /* eslint-disable no-console */ 12 | export function App() { 13 | return <> 14 | 15 | } 16 | 17 | const { connectors } = getDefaultWallets({ 18 | appName: "OmniAgent Widget", 19 | appDescription: "Widget", 20 | projectId: "project id", 21 | }) 22 | 23 | export const wagmiConfig = createConfig({ 24 | chains:[mainnet,sepolia], 25 | connectors, 26 | ssr: true, 27 | transports: { 28 | [mainnet.id]: http(MAINNET_RPC_URL, { batch: true }), 29 | [sepolia.id]: http(SEPOLIA_RPC_URL, { batch: true }), 30 | }, 31 | syncConnectedChain: true, 32 | }) 33 | 34 | declare module "wagmi" { 35 | interface Register { 36 | config: typeof wagmiConfig 37 | } 38 | } 39 | 40 | // Create a new QueryClient instance for React Query 41 | const queryClient = new QueryClient(); 42 | 43 | /** 44 | * TransferWidgetApp component. 45 | * Wraps children with necessary providers for Wagmi, React Query, and RainbowKit. 46 | * 47 | * @param {Object} props - The component props 48 | * @param {React.ReactNode} props.children - The child components to be wrapped 49 | */ 50 | export function TransferWidgetApp({ children }: { children: React.ReactNode }) { 51 | return ( 52 | 53 | 54 | 55 | {children} 56 | 57 | 58 | 59 | ); 60 | } 61 | -------------------------------------------------------------------------------- /widget/src/components/PriceChart.tsx: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | function getQueryParams() { 3 | const params = new URLSearchParams(window.location.search); 4 | return { 5 | token: params.get('token') || 'btc', 6 | 7 | }; 8 | } 9 | 10 | export function PriceChart() { 11 | const { 12 | token 13 | } = getQueryParams(); 14 | 15 | return ( 16 | <> 17 |
18 | 20 |
21 | 22 | ); 23 | } 24 | -------------------------------------------------------------------------------- /widget/src/components/Swap.tsx: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console */ 2 | function getQueryParams() { 3 | const params = new URLSearchParams(window.location.search); 4 | return { 5 | fromAmount: params.get('fromAmount') || 11, 6 | fromChain: params.get('fromChain') || 1, 7 | fromToken: params.get('fromToken') || 'eth', 8 | toChain: params.get('toChain') || 1, 9 | toToken: params.get('toToken') || 'weth', 10 | }; 11 | } 12 | 13 | export function Swap() { 14 | const {fromAmount, fromChain, fromToken, toChain, toToken} = getQueryParams(); 15 | 16 | return ( 17 | <> 18 | 19 | swap 20 | 21 | ); 22 | } 23 | -------------------------------------------------------------------------------- /widget/src/components/TransferWidget.module.css: -------------------------------------------------------------------------------- 1 | .transferWidget { 2 | max-width: 400px; 3 | margin: auto; 4 | border: 1px solid #e0e0e0; 5 | border-radius: 12px; 6 | padding: 24px; 7 | font-family: Arial, sans-serif; 8 | background-color: #fff; 9 | box-shadow: 0 4px 12px rgba(0, 0, 0, 0.05); 10 | } 11 | 12 | .title { 13 | text-align: center; 14 | margin-bottom: 20px; 15 | font-size: 24px; 16 | color: #333; 17 | } 18 | 19 | .accountInfo, 20 | .assetInfo, 21 | .amountInfo, 22 | .addressInfo, 23 | .gasFeeInfo { 24 | margin-bottom: 20px; 25 | } 26 | 27 | .accountInfo p { 28 | margin: 0; 29 | font-size: 14px; 30 | color: #666; 31 | } 32 | 33 | .assetInfo { 34 | display: flex; 35 | justify-content: space-between; 36 | align-items: center; 37 | background-color: #f8f9fa; 38 | padding: 12px; 39 | border-radius: 8px; 40 | } 41 | 42 | .assetLabel, .balanceLabel { 43 | display: flex; 44 | flex-direction: column; 45 | } 46 | 47 | .assetLabel span:first-child, .balanceLabel span:first-child { 48 | font-size: 12px; 49 | color: #666; 50 | margin-bottom: 4px; 51 | } 52 | 53 | .assetLabel span:last-child, .balanceLabel span:last-child { 54 | font-size: 18px; 55 | font-weight: bold; 56 | color: #333; 57 | } 58 | 59 | .amountInfo label, 60 | .addressInfo label { 61 | display: block; 62 | margin-bottom: 8px; 63 | font-weight: bold; 64 | color: #555; 65 | } 66 | 67 | .amountInfo input, 68 | .addressInfo input { 69 | width: 100%; 70 | padding: 10px; 71 | border: 1px solid #ccc; 72 | border-radius: 6px; 73 | font-size: 14px; 74 | } 75 | 76 | .gasFeeInfo { 77 | background-color: #f8f9fa; 78 | padding: 12px; 79 | border-radius: 8px; 80 | } 81 | 82 | .gasFeeInfo p { 83 | margin: 5px 0; 84 | font-size: 14px; 85 | color: #666; 86 | } 87 | 88 | .resolvedAddress { 89 | display: block; 90 | width: 100%; 91 | padding: 8px; 92 | margin: 8px 0; 93 | border: 1px solid #ccc; 94 | border-radius: 4px; 95 | font-size: 1rem; 96 | } 97 | 98 | .buttonGroup { 99 | display: flex; 100 | justify-content: space-between; 101 | align-items: center; 102 | margin-top: 20px; 103 | } 104 | 105 | .transferButton { 106 | width: auto; 107 | min-width: 100px; 108 | padding: 12px 24px; 109 | font-size: 16px; 110 | border: none; 111 | border-radius: 6px; 112 | cursor: pointer; 113 | background-color: #28a745; 114 | color: white; 115 | transition: background-color 0.3s; 116 | } 117 | 118 | .transferButton:hover { 119 | background-color: #218838; 120 | } 121 | 122 | .transferButton:disabled { 123 | background-color: #cccccc; 124 | cursor: not-allowed; 125 | } 126 | 127 | .errorMessage { 128 | color: #dc3545; 129 | margin-top: 12px; 130 | font-weight: bold; 131 | text-align: center; 132 | } 133 | 134 | 135 | 136 | .transactionInfo { 137 | font-size: 14px; 138 | color: #000; 139 | text-align: center; 140 | margin-top: 12px; 141 | word-wrap: break-word; 142 | white-space: normal; 143 | } 144 | 145 | .status, 146 | .error { 147 | font-size: 14px; 148 | color: #666; 149 | text-align: center; 150 | margin-top: 12px; 151 | } 152 | 153 | .error { 154 | color: #dc3545; 155 | } 156 | -------------------------------------------------------------------------------- /widget/src/components/TransferWidget.tsx: -------------------------------------------------------------------------------- 1 | import React, {useState, useEffect} from 'react'; 2 | import {ConnectButton} from '@rainbow-me/rainbowkit'; 3 | import { 4 | useSendTransaction, 5 | useWaitForTransactionReceipt, 6 | useWriteContract, 7 | useAccount, 8 | useBalance, 9 | useEstimateGas, 10 | useFeeData, 11 | type BaseError, useEnsAddress 12 | } from 'wagmi'; 13 | import { 14 | erc20Abi, 15 | encodeFunctionData, 16 | parseUnits, 17 | parseEther, 18 | formatEther, 19 | isAddress 20 | } from 'viem'; 21 | import styles from './TransferWidget.module.css'; 22 | 23 | // Define the prop types for the TransferWidget component 24 | interface TransferWidgetProps { 25 | token: string, 26 | amount: string, 27 | toAddress: string, 28 | tokenAddress?: string 29 | } 30 | 31 | /** 32 | * TransferWidgetComponent - A component for handling cryptocurrency token transfers. 33 | */ 34 | const TransferWidgetComponent: React.FC = ({ 35 | token, 36 | tokenAddress, 37 | amount, 38 | toAddress, 39 | }) => { 40 | // States for holding input values, account, and other dynamic data 41 | const [currentAmount, setCurrentAmount] = useState(amount); 42 | const [currentToAddress, setCurrentToAddress] = useState(toAddress); 43 | const [account, setAccount] = useState(null); 44 | const [estimatedGasFee, setEstimatedGasFee] = useState('0'); 45 | const [estimationError, setEstimationError] = useState(null); 46 | const [status, setStatus] = useState(''); 47 | const {address} = useAccount(); 48 | const [isAmountValid, setIsAmountValid] = useState(true); 49 | 50 | const { 51 | data: hash1, 52 | error: error1, 53 | isPending: isPending1, 54 | sendTransaction 55 | } = useSendTransaction(); 56 | 57 | const { writeContract, data: hash2, error: error2, isPending: isPending2 } = useWriteContract(); 58 | 59 | const isErc20 = tokenAddress !== "0x0000000000000000000000000000000000000000"; 60 | const hash = isErc20 ? hash2 : hash1; 61 | const error = isErc20 ? error2 : error1; 62 | const isPending = isErc20 ? isPending2 : isPending1; 63 | 64 | // Get the user wallet balance 65 | const {data: balance, isError: balanceError, isLoading: balanceLoading} = useBalance({ 66 | address, 67 | token: !isErc20? undefined : tokenAddress as `0x${string}`, 68 | }); 69 | const { 70 | isLoading: isConfirming, 71 | isSuccess: isConfirmed 72 | } = useWaitForTransactionReceipt({hash}); 73 | 74 | 75 | // Estimate the gas required for the transaction 76 | const { data: estimatedGas, isError: isEstimateError } = useEstimateGas({ 77 | to: currentToAddress as `0x${string}`, 78 | value: parseUnits(currentAmount || '0', balance?.decimals || 18), 79 | data: isErc20 ? encodeFunctionData({ 80 | abi: erc20Abi, 81 | functionName: 'transfer', 82 | args: [currentToAddress as `0x${string}`, parseUnits(currentAmount || '0', balance?.decimals || 18)] 83 | }) : undefined, 84 | }); 85 | 86 | // Resolve ENS name 87 | const { data: resolvedAddress, isLoading: isResolvingENS } = useEnsAddress({ 88 | name: isAddress(currentToAddress) ? undefined : currentToAddress, 89 | chainId: 1, 90 | }); 91 | 92 | // Feture the fee data 93 | const { data: feeData } = useFeeData(); 94 | 95 | // Calculate gas fee 96 | useEffect(() => { 97 | // console.log("estimatedGas is:", estimatedGas); 98 | // console.log("feeData object is:", feeData); 99 | if (estimatedGas && feeData?.maxFeePerGas) { 100 | 101 | // Multiply estimated gas by maxFeePerGas to get the gas fee in Wei and then in Eth 102 | const gasFeeInWei = estimatedGas * feeData.maxFeePerGas; 103 | const gasFeeInEth = formatEther(gasFeeInWei); 104 | 105 | // console.log("Gas fee in ETH:", gasFeeInEth); 106 | setEstimatedGasFee(Number(gasFeeInEth).toFixed(8)); 107 | } else { 108 | console.log("Missing estimatedGas or feeData.maxFeePerGas"); 109 | setEstimatedGasFee('0.00000000'); 110 | } 111 | }, [estimatedGas, feeData]); 112 | 113 | // Check if amount and gas fee is greater than balance 114 | useEffect(() => { 115 | 116 | // console.log("Balance:", balance); 117 | // console.log("Current Amount:", currentAmount); 118 | // console.log("Estimated Gas Fee:", estimatedGasFee); 119 | if (balance && currentAmount && estimatedGasFee) { 120 | 121 | const amountWei = parseUnits(currentAmount, balance.decimals); 122 | // Gas is in ETH (with 18 decimals) 123 | const gasFeeWei = parseUnits(estimatedGasFee, 18); 124 | const totalCostWei = amountWei + gasFeeWei; 125 | setIsAmountValid(totalCostWei <= balance.value); 126 | } 127 | }, [balance, currentAmount, estimatedGasFee]); 128 | 129 | 130 | // Update the connected account address when it changes 131 | useEffect(() => { 132 | setAccount(address as `0x${string}`) 133 | }, [address]); 134 | 135 | 136 | // Handle changes in the amount input field 137 | const handleAmountChange = (e: React.ChangeEvent) => { 138 | const value = e.target.value; 139 | if (value === '' || /^\d*\.?\d*$/.test(value)) { 140 | setCurrentAmount(value); 141 | } 142 | }; 143 | 144 | /** 145 | * Handle form submission for the transfer. 146 | */ 147 | const handleSubmit = (e: React.FormEvent) => { 148 | e.preventDefault(); 149 | 150 | const recipientAddress = resolvedAddress || currentToAddress; 151 | 152 | if (address && currentToAddress) { 153 | const value = parseUnits(currentAmount, balance?.decimals || 18); 154 | if (!isErc20) { 155 | 156 | // Send native ETH transfer 157 | sendTransaction( 158 | { 159 | to: recipientAddress as `0x${string}`, 160 | value: parseUnits(currentAmount, balance?.decimals || 18), 161 | }) 162 | } else { 163 | 164 | // Send ERC20 token transfer 165 | writeContract({ 166 | abi: erc20Abi, 167 | address: tokenAddress as `0x${string}`, 168 | args: [recipientAddress as `0x${string}`, parseEther(String(value))], 169 | functionName: "transfer", 170 | }, 171 | { 172 | onError: (error) => { 173 | console.log(error, 111); 174 | }, 175 | }) 176 | } 177 | } else { 178 | setStatus('Wallet is not connected or recipient address is missing'); 179 | } 180 | }; 181 | 182 | // Render the component 183 | return ( 184 |
185 |

Send

186 |
187 |

From

188 |

{account || 'Not connected'}

189 |
190 |
191 |
192 | Asset: 193 | {token} 194 |
195 |
196 | Balance: 197 | {Number(balance?.formatted || 0).toFixed(2)} {token} 198 |
199 |
200 |
201 | 202 | 208 |
209 |
210 | 211 | setCurrentToAddress(e.target.value)} 215 | placeholder="Enter recipient address or ENS" 216 | /> 217 | 218 |
219 |
220 | {isResolvingENS &&

Resolving ENS...

} 221 | {resolvedAddress && } 222 | {resolvedAddress &&

{resolvedAddress}

} 223 |
224 |
225 |

Estimated gas fee

226 | {isEstimateError ? ( 227 |

Error estimating gas

228 | ) : ( 229 |

{estimatedGasFee} ETH

230 | )} 231 |
232 |
233 | 234 | 241 |
242 | {!isAmountValid &&

Insufficient balance for transfer and gas fee

} 243 |
{status}
244 | {hash && ( 245 |
246 |
Transaction Hash: {hash}
247 |
248 | )} 249 | {isConfirming &&
Waiting for confirmation...
} 250 | {isConfirmed &&
Transaction confirmed.
} 251 | {error && ( 252 |
Error: {(error as BaseError).shortMessage || error.message}
253 | )} 254 |
255 | ); 256 | }; 257 | 258 | /** 259 | * TransferWidget - Wrapper component that parses URL parameters and renders TransferWidgetComponent. 260 | */ 261 | export function TransferWidget() { 262 | const params = new URLSearchParams(window.location.search); 263 | const token = params.get('token') || ''; 264 | const tokenAddress = params.get('tokenAddress') || ''; 265 | const amount = params.get('amount') || ''; 266 | const toAddress = params.get('toAddress') || ''; 267 | 268 | // @ts-ignore 269 | return ( 270 | 276 | ); 277 | } 278 | 279 | export default TransferWidget; 280 | -------------------------------------------------------------------------------- /widget/src/custom.d.ts: -------------------------------------------------------------------------------- 1 | // src/custom.d.ts 2 | declare namespace JSX { 3 | interface IntrinsicElements { 4 | 'gecko-coin-price-chart-widget': any; 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /widget/src/main.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import {App, TransferWidgetApp} from './App'; 4 | import {BrowserRouter as Router, Route, Routes} from "react-router-dom"; 5 | import {Swap} from "./components/Swap"; 6 | import {PriceChart} from "./components/PriceChart"; 7 | import TransferWidget from "./components/TransferWidget"; 8 | 9 | ReactDOM.createRoot(document.getElementById('root') as HTMLElement).render( 10 | 11 | 12 | 13 | 14 | 15 | }/> 16 | }/> 17 | }/> 18 | 20 | 21 | 22 | } /> 23 | 24 | 25 | 26 | , 27 | ); 28 | -------------------------------------------------------------------------------- /widget/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /widget/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowJs": false, 4 | "allowSyntheticDefaultImports": true, 5 | "esModuleInterop": false, 6 | "forceConsistentCasingInFileNames": true, 7 | "isolatedModules": true, 8 | "jsx": "react-jsx", 9 | "lib": [ 10 | "DOM", 11 | "DOM.Iterable", 12 | "ESNext" 13 | ], 14 | "module": "ESNext", 15 | "moduleResolution": "Node", 16 | "noEmit": true, 17 | "resolveJsonModule": true, 18 | "skipLibCheck": true, 19 | "strict": true, 20 | "target": "ESNext", 21 | "useDefineForClassFields": true 22 | }, 23 | "include": [ 24 | "src" 25 | ], 26 | "references": [ 27 | { 28 | "path": "./tsconfig.node.json" 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /widget/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowSyntheticDefaultImports": true, 4 | "composite": true, 5 | "module": "ESNext", 6 | "moduleResolution": "Node" 7 | }, 8 | "include": [ 9 | "vite.config.ts" 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /widget/vite.config.ts: -------------------------------------------------------------------------------- 1 | import react from '@vitejs/plugin-react'; 2 | import {defineConfig} from 'vite'; 3 | import {nodePolyfills} from 'vite-plugin-node-polyfills'; 4 | 5 | // https://vitejs.dev/config/ 6 | // eslint-disable-next-line import/no-default-export 7 | export default defineConfig({ 8 | plugins: [react(), nodePolyfills()], 9 | esbuild: { 10 | target: 'esnext', 11 | }, 12 | build: { 13 | outDir: '../dist', 14 | rollupOptions: { 15 | output: { 16 | assetFileNames: 'static/[name].[hash].[ext]', 17 | chunkFileNames: 'static/[name].[hash].js', 18 | entryFileNames: 'static/[name].[hash].js', 19 | } 20 | } 21 | }, 22 | 23 | server: { 24 | port: 3000, 25 | open: true, 26 | }, 27 | }); 28 | --------------------------------------------------------------------------------