├── .chainlit ├── config.toml └── translations │ └── en-US.json ├── .dockerignore ├── .env.example ├── .gitattributes ├── .github └── workflows │ ├── build-docker-images.yml │ ├── ci.yml │ └── run_test_and_gen_report.yml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── docker-compose.yml ├── main.py ├── omniagent ├── __init__.py ├── agents │ ├── __init__.py │ ├── agent_factory.py │ ├── asset_management.py │ ├── block_explore.py │ ├── fallback.py │ ├── feed_explore.py │ ├── market_analysis.py │ └── research_analyst.py ├── app.py ├── conf │ ├── __init__.py │ ├── env.py │ └── llm_provider.py ├── db │ ├── __init__.py │ ├── database.py │ └── models.py ├── executors │ ├── __init__.py │ ├── block_stat_executor.py │ ├── coin_market_executor.py │ ├── defi_executor.py │ ├── feed_executor.py │ ├── feed_prompt.py │ ├── feed_source_executor.py │ ├── funding_rate_executor.py │ ├── nft_balance_executor.py │ ├── nft_rank_executor.py │ ├── price_executor.py │ ├── project_executor.py │ ├── search_executor.py │ ├── swap_executor.py │ ├── tg_news_executor.py │ ├── tg_util.py │ ├── token_balance_executor.py │ ├── token_util.py │ └── transfer_executor.py ├── index │ ├── __init__.py │ ├── feed_indexing.py │ ├── feed_scrape.py │ └── pgvector_store.py ├── router │ ├── __init__.py │ ├── health.py │ ├── openai.py │ └── widget.py ├── ui │ ├── __init__.py │ ├── app.py │ └── profile.py └── workflows │ ├── __init__.py │ ├── member.py │ ├── supervisor_chain.py │ └── workflow.py ├── poetry.lock ├── pyproject.toml ├── tests ├── README.md ├── __init__.py ├── agent_trajectory │ ├── __init__.py │ ├── asset_management.py │ ├── block_explore.py │ ├── feed_explore.py │ ├── market_analysis.py │ └── research_analyst.py ├── compatible-models.mdx ├── conftest.py ├── gen_benchmark_html_report.py ├── generate_benchmark_report.py ├── openai-api │ ├── __init__.py │ └── example.sh ├── run_test.py ├── supervisor_chain.py └── templates │ ├── benchmark.html.j2 │ └── compatible-models.mdx.j2 └── widget ├── .gitignore ├── index.html ├── package.json ├── src ├── App.tsx ├── components │ ├── PriceChart.tsx │ ├── Swap.tsx │ ├── TransferWidget.module.css │ └── TransferWidget.tsx ├── custom.d.ts ├── main.tsx └── vite-env.d.ts ├── tsconfig.json ├── tsconfig.node.json ├── vite.config.ts └── yarn.lock /.chainlit/config.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | # Whether to enable telemetry (default: true). No personal data is collected. 3 | enable_telemetry = true 4 | 5 | 6 | # List of environment variables to be provided by each user to use the app. 7 | user_env = [] 8 | 9 | # Duration (in seconds) during which the session is saved when the connection is lost 10 | session_timeout = 3600 11 | 12 | # Enable third parties caching (e.g LangChain cache) 13 | cache = false 14 | 15 | # Authorized origins 16 | allow_origins = ["*"] 17 | 18 | # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) 19 | # follow_symlink = false 20 | 21 | [features] 22 | # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript) 23 | unsafe_allow_html = true 24 | 25 | # Process and display mathematical expressions. This can clash with "$" characters in messages. 26 | latex = false 27 | 28 | # Automatically tag threads with the current chat profile (if a chat profile is used) 29 | auto_tag_thread = true 30 | 31 | # Authorize users to spontaneously upload files with messages 32 | [features.spontaneous_file_upload] 33 | enabled = true 34 | accept = ["*/*"] 35 | max_files = 20 36 | max_size_mb = 500 37 | 38 | [features.audio] 39 | # Threshold for audio recording 40 | min_decibels = -45 41 | # Delay for the user to start speaking in MS 42 | initial_silence_timeout = 3000 43 | # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop. 44 | silence_timeout = 1500 45 | # Above this duration (MS), the recording will forcefully stop. 46 | max_duration = 15000 47 | # Duration of the audio chunks in MS 48 | chunk_duration = 1000 49 | # Sample rate of the audio 50 | sample_rate = 44100 51 | 52 | [UI] 53 | # Name of the assistant. 54 | name = "OmniAgent" 55 | 56 | # Description of the assistant. This is used for HTML tags. 57 | description = "OmniAgent is a conversational AI assistant that can help you with your web3 queries." 58 | 59 | # Large size content are by default collapsed for a cleaner ui 60 | default_collapse_content = true 61 | 62 | # Hide the chain of thought details from the user in the UI. 63 | hide_cot = false 64 | 65 | # Link to your github repo. This will add a github button in the UI's header. 66 | # github = "" 67 | 68 | # Specify a CSS file that can be used to customize the user interface. 69 | # The CSS file can be served from the public directory or via an external link. 70 | # custom_css = "/public/test.css" 71 | custom_css = "/public/index.css" 72 | 73 | # Specify a Javascript file that can be used to customize the user interface. 74 | # The Javascript file can be served from the public directory. 75 | # custom_js = "/public/test.js" 76 | 77 | # Specify a custom font url. 78 | # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap" 79 | 80 | # Specify a custom meta image url. 81 | # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png" 82 | 83 | # Specify a custom build directory for the frontend. 84 | # This can be used to customize the frontend code. 85 | # Be careful: If this is a relative path, it should not start with a slash. 86 | # custom_build = "./public/build" 87 | 88 | [UI.theme] 89 | default = "dark" 90 | #layout = "wide" 91 | #font_family = "Inter, sans-serif" 92 | # Override default MUI light theme. (Check theme.ts) 93 | [UI.theme.light] 94 | #background = "#FAFAFA" 95 | #paper = "#FFFFFF" 96 | 97 | [UI.theme.light.primary] 98 | #main = "#F80061" 99 | #dark = "#980039" 100 | #light = "#FFE7EB" 101 | [UI.theme.light.text] 102 | #primary = "#212121" 103 | #secondary = "#616161" 104 | 105 | # Override default MUI dark theme. (Check theme.ts) 106 | [UI.theme.dark] 107 | #background = "#FAFAFA" 108 | #paper = "#FFFFFF" 109 | 110 | [UI.theme.dark.primary] 111 | #main = "#F80061" 112 | #dark = "#980039" 113 | #light = "#FFE7EB" 114 | [UI.theme.dark.text] 115 | #primary = "#EEEEEE" 116 | #secondary = "#BDBDBD" 117 | 118 | [meta] 119 | generated_by = "1.1.305" 120 | -------------------------------------------------------------------------------- /.chainlit/translations/en-US.json: -------------------------------------------------------------------------------- 1 | { 2 | "components": { 3 | "atoms": { 4 | "buttons": { 5 | "userButton": { 6 | "menu": { 7 | "APIKeys": "API Keys", 8 | "logout": "Logout", 9 | "settings": "Settings", 10 | "settingsKey": "S" 11 | } 12 | } 13 | } 14 | }, 15 | "molecules": { 16 | "attachments": { 17 | "cancelUpload": "Cancel upload", 18 | "removeAttachment": "Remove attachment" 19 | }, 20 | "auth": { 21 | "authForgotPassword": { 22 | "continue": "Continue", 23 | "email": "Email address", 24 | "emailRequired": "email is a required field", 25 | "emailSent": "Please check the email address {{email}} for instructions to reset your password.", 26 | "enterEmail": "Enter your email address and we will send you instructions to reset your password.", 27 | "goBack": "Go Back", 28 | "resendEmail": "Resend email" 29 | }, 30 | "authLogin": { 31 | "error": { 32 | "callback": "Try signing in with a different account.", 33 | "credentialssignin": "Sign in failed. Check the details you provided are correct.", 34 | "default": "Unable to sign in.", 35 | "emailcreateaccount": "Try signing in with a different account.", 36 | "emailsignin": "The e-mail could not be sent.", 37 | "emailverify": "Please verify your email, a new email has been sent.", 38 | "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.", 39 | "oauthcallbackerror": "Try signing in with a different account.", 40 | "oauthcreateaccount": "Try signing in with a different account.", 41 | "oauthsignin": "Try signing in with a different account.", 42 | "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.", 43 | "sessionrequired": "Please sign in to access this page.", 44 | "signin": "Try signing in with a different account." 45 | }, 46 | "form": { 47 | "alreadyHaveAccount": "Already have an account?", 48 | "continue": "Continue", 49 | "email": "Email address", 50 | "emailRequired": "email is a required field", 51 | "forgotPassword": "Forgot password?", 52 | "noAccount": "Don't have an account?", 53 | "or": "OR", 54 | "password": "Password", 55 | "passwordMustContain": "Your password must contain:", 56 | "passwordRequired": "password is a required field", 57 | "signin": "Sign In", 58 | "signup": "Sign Up" 59 | }, 60 | "title": "Login to access the app." 61 | }, 62 | "authResetPassword": { 63 | "confirmPassword": "Confirm password", 64 | "confirmPasswordRequired": "Confirm password is a required field", 65 | "newPassword": "New password", 66 | "newPasswordRequired": "New password is a required field", 67 | "passwordsMustMatch": "Passwords must match", 68 | "resetPassword": "Reset Password" 69 | }, 70 | "authVerifyEmail": { 71 | "almostThere": "You're almost there! We've sent an email to ", 72 | "didNotReceive": "Can't find the email?", 73 | "emailSent": "Email sent successfully.", 74 | "goBack": "Go Back", 75 | "resendEmail": "Resend email", 76 | "verifyEmail": "Verify your email address", 77 | "verifyEmailLink": "Please click on the link in that email to complete your signup." 78 | }, 79 | "providerButton": { 80 | "continue": "Continue with {{provider}}", 81 | "signup": "Sign up with {{provider}}" 82 | } 83 | }, 84 | "detailsButton": { 85 | "used": "Used", 86 | "using": "Using" 87 | }, 88 | "newChatButton": { 89 | "newChat": "New Chat" 90 | }, 91 | "newChatDialog": { 92 | "cancel": "Cancel", 93 | "clearChat": "This will clear the current messages and start a new chat.", 94 | "confirm": "Confirm", 95 | "createNewChat": "Create new chat?" 96 | }, 97 | "settingsModal": { 98 | "darkMode": "Dark Mode", 99 | "expandMessages": "Expand Messages", 100 | "hideChainOfThought": "Hide Chain of Thought", 101 | "settings": "Settings" 102 | }, 103 | "tasklist": { 104 | "TaskList": { 105 | "error": "An error occurred", 106 | "loading": "Loading...", 107 | "title": "\ud83d\uddd2\ufe0f Task List" 108 | } 109 | } 110 | }, 111 | "organisms": { 112 | "chat": { 113 | "Messages": { 114 | "index": { 115 | "executedSuccessfully": "executed successfully", 116 | "failed": "failed", 117 | "feedbackUpdated": "Feedback updated", 118 | "running": "Running", 119 | "updating": "Updating" 120 | } 121 | }, 122 | "dropScreen": { 123 | "dropYourFilesHere": "Drop your files here" 124 | }, 125 | "history": { 126 | "index": { 127 | "lastInputs": "Last Inputs", 128 | "loading": "Loading...", 129 | "noInputs": "Such empty...", 130 | "showHistory": "Show history" 131 | } 132 | }, 133 | "index": { 134 | "cancelledUploadOf": "Cancelled upload of", 135 | "continuingChat": "Continuing previous chat", 136 | "couldNotReachServer": "Could not reach the server", 137 | "failedToUpload": "Failed to upload" 138 | }, 139 | "inputBox": { 140 | "SubmitButton": { 141 | "sendMessage": "Send message", 142 | "stopTask": "Stop Task" 143 | }, 144 | "UploadButton": { 145 | "attachFiles": "Attach files" 146 | }, 147 | "input": { 148 | "placeholder": "Type your message here..." 149 | }, 150 | "speechButton": { 151 | "start": "Start recording", 152 | "stop": "Stop recording" 153 | }, 154 | "waterMark": { 155 | "text": "Built with" 156 | } 157 | }, 158 | "settings": { 159 | "cancel": "Cancel", 160 | "confirm": "Confirm", 161 | "reset": "Reset", 162 | "settingsPanel": "Settings panel" 163 | } 164 | }, 165 | "header": { 166 | "chat": "Chat", 167 | "readme": "Readme" 168 | }, 169 | "threadHistory": { 170 | "Thread": { 171 | "backToChat": "Go back to chat", 172 | "chatCreatedOn": "This chat was created on" 173 | }, 174 | "sidebar": { 175 | "DeleteThreadButton": { 176 | "cancel": "Cancel", 177 | "chatDeleted": "Chat deleted", 178 | "confirm": "Confirm", 179 | "confirmMessage": "This will delete the thread as well as it's messages and elements.", 180 | "deletingChat": "Deleting chat" 181 | }, 182 | "ThreadList": { 183 | "empty": "Empty...", 184 | "previous30days": "Previous 30 days", 185 | "previous7days": "Previous 7 days", 186 | "today": "Today", 187 | "yesterday": "Yesterday" 188 | }, 189 | "TriggerButton": { 190 | "closeSidebar": "Close sidebar", 191 | "openSidebar": "Open sidebar" 192 | }, 193 | "filters": { 194 | "FeedbackSelect": { 195 | "feedbackAll": "Feedback: All", 196 | "feedbackNegative": "Feedback: Negative", 197 | "feedbackPositive": "Feedback: Positive" 198 | }, 199 | "SearchBar": { 200 | "search": "Search" 201 | } 202 | }, 203 | "index": { 204 | "pastChats": "Past Chats" 205 | } 206 | } 207 | } 208 | } 209 | }, 210 | "hooks": { 211 | "useLLMProviders": { 212 | "failedToFetchProviders": "Failed to fetch providers:" 213 | } 214 | }, 215 | "pages": { 216 | "Design": {}, 217 | "Env": { 218 | "requiredApiKeys": "Required API Keys", 219 | "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage.", 220 | "savedSuccessfully": "Saved successfully" 221 | }, 222 | "Page": { 223 | "notPartOfProject": "You are not part of this project." 224 | }, 225 | "ResumeButton": { 226 | "resumeChat": "Resume Chat" 227 | } 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | widget/node_modules 2 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Database settings 2 | DB_CONNECTION=postgresql+psycopg://postgres:password@vector_db:5432/omniagent 3 | 4 | # LLM provider settings (at least one required) 5 | # To get a Google Cloud Vertex project ID, visit: https://console.cloud.google.com/vertex-ai 6 | VERTEX_PROJECT_ID= 7 | # To get an OpenAI API key, sign up at: https://platform.openai.com/signup 8 | OPENAI_API_KEY= 9 | # To get an ANTHROPIC_API_KEY, visit: https://www.anthropic.com 10 | ANTHROPIC_API_KEY= 11 | # To get a Google Gemini API key, visit: https://ai.google.dev 12 | GOOGLE_GEMINI_API_KEY= 13 | # For Ollama, download and install from: https://github.com/ollama/ollama 14 | OLLAMA_HOST=http://ollama:11434 15 | 16 | # Optional API keys for additional features 17 | # Get your Tavily API key at: https://www.tavily.com/ 18 | TAVILY_API_KEY= 19 | # Get your Moralis API key at: https://moralis.io/ 20 | MORALIS_API_KEY= 21 | # Register for a RootData API key at: https://www.rootdata.com/ 22 | ROOTDATA_API_KEY= 23 | # Sign up for a CoinGecko API key at: https://www.coingecko.com/en/api/pricing 24 | COINGECKO_API_KEY= 25 | # RSS3 API URLs (default values provided, change if needed) 26 | RSS3_DATA_API=https://gi.vividgen.me 27 | RSS3_SEARCH_API=https://devnet.vividgen.me/search 28 | 29 | # Chainlit OAuth settings (all fields must be set if using OAuth, otherwise leave them empty) 30 | # For Auth0 setup, visit: https://docs.chainlit.io/authentication/oauth 31 | CHAINLIT_AUTH_SECRET= 32 | OAUTH_AUTH0_CLIENT_ID= 33 | OAUTH_AUTH0_CLIENT_SECRET= 34 | OAUTH_AUTH0_DOMAIN= 35 | 36 | #run unit test concurrently 37 | #run unit test with repeat count 38 | #simplify test and report generation 39 | #Change the current model settings 40 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/build-docker-images.yml: -------------------------------------------------------------------------------- 1 | name: build & push docker images 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - "*" 9 | pull_request: 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | platform: 18 | - linux/amd64 19 | # - linux/arm64 # Cannot build frontend (install apk packages) within acceptable time 20 | steps: 21 | - name: Prepare 22 | run: | 23 | platform=${{ matrix.platform }} 24 | echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v3 29 | with: 30 | platforms: ${{ matrix.platform }} 31 | - name: Login to DockerHub 32 | uses: docker/login-action@v3 33 | with: 34 | username: ${{ secrets.DOCKERHUB_USERNAME }} 35 | password: ${{ secrets.DOCKERHUB_TOKEN }} 36 | - name: Docker meta 37 | id: meta 38 | uses: docker/metadata-action@v5 39 | with: 40 | images: rss3/omniagent 41 | - name: Build and push by digest 42 | id: build 43 | uses: docker/build-push-action@v5 44 | with: 45 | context: . 46 | platforms: ${{ matrix.platform }} 47 | labels: ${{ steps.meta.outputs.labels }} 48 | outputs: type=image,name=rss3/omniagent,push-by-digest=true,name-canonical=true,push=true 49 | - name: Export digest 50 | run: | 51 | mkdir -p /tmp/digests 52 | digest="${{ steps.build.outputs.digest }}" 53 | touch "/tmp/digests/${digest#sha256:}" 54 | - name: Upload digest 55 | uses: actions/upload-artifact@v4 56 | with: 57 | name: digests-${{ env.PLATFORM_PAIR }} 58 | path: /tmp/digests/* 59 | if-no-files-found: error 60 | retention-days: 1 61 | 62 | merge: 63 | runs-on: ubuntu-latest 64 | permissions: 65 | contents: read 66 | packages: write 67 | id-token: write 68 | needs: 69 | - build 70 | steps: 71 | - name: Download digests 72 | uses: actions/download-artifact@v4 73 | with: 74 | path: /tmp/digests 75 | pattern: digests-* 76 | merge-multiple: true 77 | - name: Set up Docker Buildx 78 | uses: docker/setup-buildx-action@v3 79 | 80 | - name: Log in to the Container registry 81 | uses: docker/login-action@v3 82 | with: 83 | registry: ghcr.io 84 | username: ${{ github.actor }} 85 | password: ${{ github.token }} 86 | 87 | - name: Login to DockerHub 88 | uses: docker/login-action@v3 89 | with: 90 | username: ${{ secrets.DOCKERHUB_USERNAME }} 91 | password: ${{ secrets.DOCKERHUB_TOKEN }} 92 | 93 | - name: Extract metadata (tags, labels) for Docker 94 | id: meta 95 | uses: docker/metadata-action@v5 96 | with: 97 | images: | 98 | rss3/omniagent 99 | ghcr.io/${{ github.repository }} 100 | tags: | 101 | type=raw,value=latest,enable={{is_default_branch}} 102 | type=raw,value=${{ matrix.arch }},enable={{is_default_branch}} 103 | type=ref,event=tag 104 | type=ref,event=branch 105 | type=ref,event=pr 106 | type=sha,prefix={{branch}}-,enable=${{ !startsWith(github.ref, 'refs/tags') && github.event_name != 'pull_request' }},event=branch 107 | 108 | - name: Create manifest list and push 109 | working-directory: /tmp/digests 110 | run: | 111 | if [ ${{ github.event_name }} == 'pull_request' ]; then 112 | ARGS="--dry-run" 113 | fi 114 | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ 115 | $(printf 'rss3/omniagent@sha256:%s ' *) $ARGS 116 | - name: Inspect image 117 | if: github.event_name != 'pull_request' 118 | run: | 119 | docker buildx imagetools inspect rss3/omniagent:${{ steps.meta.outputs.version }} 120 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: backend code quality 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - "main" 8 | - "prod" 9 | 10 | jobs: 11 | pre-commit: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | - uses: actions/setup-python@v3 16 | - run: pip install flake8 mypy pydantic types-requests types-redis ruff 17 | - uses: pre-commit/action@v3.0.0 18 | with: 19 | extra_args: --files ./src/* 20 | -------------------------------------------------------------------------------- /.github/workflows/run_test_and_gen_report.yml: -------------------------------------------------------------------------------- 1 | name: Test and generate report 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | workflow_dispatch: 8 | 9 | # Add permissions configuration 10 | permissions: 11 | contents: write 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | 17 | services: 18 | postgres: 19 | image: postgres:latest 20 | env: 21 | POSTGRES_USER: postgres 22 | POSTGRES_PASSWORD: password 23 | POSTGRES_DB: omniagent 24 | ports: 25 | - 5432:5432 26 | options: >- 27 | --health-cmd pg_isready 28 | --health-interval 10s 29 | --health-timeout 5s 30 | --health-retries 5 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | with: 35 | fetch-depth: 0 # Fetch all history and tags 36 | 37 | - name: Set up Python 38 | uses: actions/setup-python@v4 39 | with: 40 | python-version: '3.11.5' 41 | 42 | - name: Install poetry 43 | run: | 44 | python -m pip install --upgrade pip 45 | pip install poetry 46 | poetry config virtualenvs.create false 47 | 48 | - name: Install dependencies 49 | run: | 50 | sudo apt-get update 51 | sudo apt-get install -y postgresql-client libpq-dev 52 | poetry install 53 | poetry add pytest pytest-xdist 54 | 55 | - name: Run tests 56 | continue-on-error: true 57 | env: 58 | # Database settings 59 | DB_CONNECTION: postgresql+psycopg://postgres:password@localhost:5432/omniagent 60 | 61 | # LLM provider settings 62 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 63 | GOOGLE_GEMINI_API_KEY: ${{ secrets.GOOGLE_GEMINI_API_KEY }} 64 | OLLAMA_HOST: ${{ secrets.OLLAMA_HOST }} 65 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 66 | 67 | # Optional API keys 68 | TAVILY_API_KEY: ${{ secrets.TAVILY_API_KEY }} 69 | MORALIS_API_KEY: ${{ secrets.MORALIS_API_KEY }} 70 | ROOTDATA_API_KEY: ${{ secrets.ROOTDATA_API_KEY }} 71 | COINGECKO_API_KEY: ${{ secrets.COINGECKO_API_KEY }} 72 | 73 | # RSS3 API URLs 74 | RSS3_DATA_API: https://gi.vividgen.me 75 | RSS3_SEARCH_API: https://devnet.vividgen.me/search 76 | 77 | 78 | run: | 79 | pwd 80 | ls -la 81 | cd tests 82 | poetry run python run_test.py 83 | 84 | - name: Commit and push report 85 | env: 86 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 87 | run: | 88 | # Switch to docs branch 89 | git fetch origin docs || git fetch origin main 90 | git checkout docs || git checkout -b docs 91 | 92 | git config --global user.name 'github-actions[bot]' 93 | git config --global user.email 'github-actions[bot]@users.noreply.github.com' 94 | 95 | # Check if file exists 96 | ls -la tests/compatible-models.mdx || echo "Report file not found!" 97 | 98 | # Add all changes (including new files) 99 | git add -A 100 | 101 | # Show pending changes 102 | git status 103 | 104 | # Create commit with timestamp 105 | git commit -m "docs: update compatibility test report" || echo "No changes to commit" 106 | 107 | # Push changes to docs branch 108 | git push origin docs 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #map_cache 2 | map_cache_* 3 | 4 | ### VisualStudioCode template 5 | .vscode/* 6 | !.vscode/settings.json 7 | !.vscode/tasks.json 8 | !.vscode/launch.json 9 | !.vscode/extensions.json 10 | !.vscode/*.code-snippets 11 | 12 | # Local History for Visual Studio Code 13 | .history/ 14 | 15 | # Built Visual Studio Code Extensions 16 | *.vsix 17 | 18 | ### JetBrains template 19 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 20 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 21 | 22 | # User-specific stuff 23 | .idea/**/workspace.xml 24 | .idea/**/tasks.xml 25 | .idea/**/usage.statistics.xml 26 | .idea/**/dictionaries 27 | .idea/**/shelf 28 | 29 | # AWS User-specific 30 | .idea/**/aws.xml 31 | 32 | # Generated files 33 | .idea/**/contentModel.xml 34 | 35 | # Sensitive or high-churn files 36 | .idea/**/dataSources/ 37 | .idea/**/dataSources.ids 38 | .idea/**/dataSources.local.xml 39 | .idea/**/sqlDataSources.xml 40 | .idea/**/dynamic.xml 41 | .idea/**/uiDesigner.xml 42 | .idea/**/dbnavigator.xml 43 | 44 | # Gradle 45 | .idea/**/gradle.xml 46 | .idea/**/libraries 47 | 48 | # Gradle and Maven with auto-import 49 | # When using Gradle or Maven with auto-import, you should exclude module files, 50 | # since they will be recreated, and may cause churn. Uncomment if using 51 | # auto-import. 52 | # .idea/artifacts 53 | # .idea/compiler.xml 54 | # .idea/jarRepositories.xml 55 | # .idea/modules.xml 56 | # .idea/*.iml 57 | # .idea/modules 58 | # *.iml 59 | # *.ipr 60 | 61 | # CMake 62 | cmake-build-*/ 63 | 64 | # Mongo Explorer plugin 65 | .idea/**/mongoSettings.xml 66 | 67 | # File-based project format 68 | *.iws 69 | 70 | # IntelliJ 71 | out/ 72 | 73 | # mpeltonen/sbt-idea plugin 74 | .idea_modules/ 75 | 76 | # JIRA plugin 77 | atlassian-ide-plugin.xml 78 | 79 | # Cursive Clojure plugin 80 | .idea/replstate.xml 81 | 82 | # SonarLint plugin 83 | .idea/sonarlint/ 84 | 85 | # Crashlytics plugin (for Android Studio and IntelliJ) 86 | com_crashlytics_export_strings.xml 87 | crashlytics.properties 88 | crashlytics-build.properties 89 | fabric.properties 90 | 91 | # Editor-based Rest Client 92 | .idea/httpRequests 93 | 94 | # Android studio 3.1+ serialized cache file 95 | .idea/caches/build_file_checksums.ser 96 | 97 | ### dotenv template 98 | .env 99 | 100 | ### Linux template 101 | *~ 102 | 103 | # temporary files which can be created if a process still has a handle open of a deleted file 104 | .fuse_hidden* 105 | 106 | # KDE directory preferences 107 | .directory 108 | 109 | # Linux trash folder which might appear on any partition or disk 110 | .Trash-* 111 | 112 | # .nfs files are created when an open file is removed but is still being accessed 113 | .nfs* 114 | 115 | chainlit.md 116 | ### Python template 117 | # Byte-compiled / optimized / DLL files 118 | __pycache__/ 119 | *.py[cod] 120 | *$py.class 121 | 122 | # C extensions 123 | *.so 124 | 125 | # Distribution / packaging 126 | .Python 127 | build/ 128 | develop-eggs/ 129 | dist/ 130 | downloads/ 131 | eggs/ 132 | .eggs/ 133 | lib/ 134 | lib64/ 135 | parts/ 136 | sdist/ 137 | var/ 138 | wheels/ 139 | share/python-wheels/ 140 | *.egg-info/ 141 | .installed.cfg 142 | *.egg 143 | MANIFEST 144 | 145 | # PyInstaller 146 | # Usually these files are written by a python script from a template 147 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 148 | *.manifest 149 | *.spec 150 | 151 | # Installer logs 152 | pip-log.txt 153 | pip-delete-this-directory.txt 154 | 155 | # Unit test / coverage reports 156 | htmlcov/ 157 | .tox/ 158 | .nox/ 159 | .coverage 160 | .coverage.* 161 | .cache 162 | nosetests.xml 163 | coverage.xml 164 | *.cover 165 | *.py,cover 166 | .hypothesis/ 167 | .pytest_cache/ 168 | cover/ 169 | 170 | # Translations 171 | *.mo 172 | *.pot 173 | 174 | # Django stuff: 175 | *.log 176 | local_settings.py 177 | db.sqlite3 178 | db.sqlite3-journal 179 | 180 | # Flask stuff: 181 | instance/ 182 | .webassets-cache 183 | 184 | # Scrapy stuff: 185 | .scrapy 186 | 187 | # Sphinx documentation 188 | docs/_build/ 189 | 190 | # PyBuilder 191 | .pybuilder/ 192 | target/ 193 | 194 | # Jupyter Notebook 195 | .ipynb_checkpoints 196 | 197 | # IPython 198 | profile_default/ 199 | ipython_config.py 200 | 201 | # pyenv 202 | # For a library or package, you might want to ignore these files since the code is 203 | # intended to run in multiple environments; otherwise, check them in: 204 | # .python-version 205 | 206 | # pipenv 207 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 208 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 209 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 210 | # install all needed dependencies. 211 | #Pipfile.lock 212 | 213 | # poetry 214 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 215 | # This is especially recommended for binary packages to ensure reproducibility, and is more 216 | # commonly ignored for libraries. 217 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 218 | #poetry.lock 219 | 220 | # pdm 221 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 222 | #pdm.lock 223 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 224 | # in version control. 225 | # https://pdm.fming.dev/#use-with-ide 226 | .pdm.toml 227 | 228 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 229 | __pypackages__/ 230 | 231 | # Celery stuff 232 | celerybeat-schedule 233 | celerybeat.pid 234 | 235 | # SageMath parsed files 236 | *.sage.py 237 | 238 | # Environments 239 | .env 240 | .venv 241 | env/ 242 | venv/ 243 | ENV/ 244 | env.bak/ 245 | venv.bak/ 246 | 247 | # Spyder project settings 248 | .spyderproject 249 | .spyproject 250 | 251 | # Rope project settings 252 | .ropeproject 253 | 254 | # mkdocs documentation 255 | /site 256 | 257 | # mypy 258 | .mypy_cache/ 259 | .dmypy.json 260 | dmypy.json 261 | 262 | # Pyre type checker 263 | .pyre/ 264 | 265 | # pytype static type analyzer 266 | .pytype/ 267 | 268 | # Cython debug symbols 269 | cython_debug/ 270 | 271 | # PyCharm 272 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 273 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 274 | # and can be added to the global gitignore or merged into this file. For a more nuclear 275 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 276 | #.idea/ 277 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "contracts/lib/forge-std"] 2 | path = contracts/lib/forge-std 3 | url = https://github.com/foundry-rs/forge-std 4 | [submodule "executor/contracts/lib/forge-std"] 5 | path = executor/contracts/lib/forge-std 6 | url = https://github.com/foundry-rs/forge-std 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-ast 6 | - id: check-case-conflict 7 | - id: check-docstring-first 8 | - id: check-executables-have-shebangs 9 | - id: check-json 10 | - id: check-added-large-files 11 | - id: pretty-format-json 12 | args: 13 | - "--autofix" 14 | - "--indent=4" 15 | 16 | - id: detect-private-key 17 | - id: debug-statements 18 | - id: end-of-file-fixer 19 | - id: trailing-whitespace 20 | 21 | - repo: local 22 | hooks: 23 | - id: mypy 24 | name: mypy 25 | entry: mypy . 26 | require_serial: true 27 | language: system 28 | types: [ python ] 29 | pass_filenames: false 30 | args: [ --config-file=pyproject.toml ] 31 | - id: ruff-lint 32 | name: ruff-lint 33 | entry: ruff check --fix 34 | require_serial: true 35 | language: system 36 | types: [ python ] 37 | - id: ruff-format 38 | name: ruff-format 39 | entry: ruff format 40 | require_serial: true 41 | language: system 42 | types: [ python ] 43 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Node stage 2 | FROM node:20.10.0 as builder 3 | WORKDIR /app/widget 4 | COPY widget/package.json widget/yarn.lock ./ 5 | RUN yarn install 6 | COPY widget ./ 7 | RUN yarn run build 8 | 9 | # Python stage 10 | FROM python:3.11.5-slim-bullseye 11 | 12 | WORKDIR /app 13 | 14 | COPY . . 15 | ENV PYTHONPATH=${PYTHONPATH}:${PWD} 16 | RUN pip3 install poetry 17 | RUN poetry config virtualenvs.create false 18 | RUN poetry install 19 | 20 | COPY --from=builder /app/dist /app/dist 21 | 22 | CMD ["python", "main.py"] 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-present Jowo Rinpoche 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OmniAgent Framework 2 | 3 | [](https://opensource.org/licenses/MIT) 4 | [](https://github.com/VividGen/OmniAgent/stargazers) 5 | [](https://github.com/VividGen/OmniAgent/issues) 6 | 7 | OmniAgent is an enterprise-grade AI orchestration framework that revolutionizes Web3 development by seamlessly bridging artificial intelligence with blockchain technologies. Build powerful on-chain AI agents in hours instead of months. 8 | 9 | ## 🚀 Key Features 10 | 11 | - **Modular Architecture**: Three-layer design with Interpreter, Classifier, and specialized Executors 12 | - **Intelligent Task Routing**: Smart classification system powered by Google Gemma and domain-specific models 13 | - **Plug-and-Play Model Integration**: Easy integration with various AI models 14 | - **Cross-Chain Compatibility**: Seamless interaction with multiple blockchain networks 15 | - **Specialized Executors**: 16 | - DeFi Operations 17 | - Token/NFT Management 18 | - Web3 Knowledge Integration 19 | - Social Data Analysis 20 | 21 | ## 🏗️ Architecture 22 | 23 | ``` 24 | ┌─────────────────┐ 25 | │ User Input │ 26 | └────────┬────────┘ 27 | ▼ 28 | ┌─────────────────┐ 29 | │ Interpreter │ ─── Task Understanding & Parameter Extraction 30 | └────────┬────────┘ 31 | ▼ 32 | ┌─────────────────┐ 33 | │ Classifier │ ─── Intelligent Task Routing 34 | └────────┬────────┘ 35 | ▼ 36 | ┌─────────────────┐ 37 | │ Executor │ ─── Specialized Task Execution 38 | └────────┬────────┘ 39 | ▼ 40 | ┌─────────────────┐ 41 | │ Web3 │ ─── Blockchain & Protocol Interaction 42 | └─────────────────┘ 43 | ``` 44 | 45 | ## 🛠️ Installation 46 | 47 | ```bash 48 | # Clone the repository 49 | git clone https://github.com/VividGen/OmniAgent.git 50 | 51 | # Configure environment 52 | cp .env.example .env 53 | 54 | # Start 55 | docker-compose up -d 56 | ``` 57 | 58 | ## 📦 Quick Start 59 | 60 | ```javascript 61 | const { OmniAgent } = require('omniagent'); 62 | 63 | // Initialize OmniAgent 64 | const agent = new OmniAgent({ 65 | model: 'gemma', 66 | executors: ['defi', 'token', 'social'] 67 | }); 68 | 69 | // Execute a task 70 | const result = await agent.execute({ 71 | task: 'Token swap', 72 | params: { 73 | fromToken: 'ETH', 74 | toToken: 'USDC', 75 | amount: '1.0' 76 | } 77 | }); 78 | ``` 79 | 80 | ## 💡 Use Cases 81 | 82 | - **DeFi Operations**: Token swaps, liquidity provision, yield farming 83 | - **Asset Management**: NFT trading, token transfers, portfolio analysis 84 | - **Market Intelligence**: Price tracking, trend analysis, social sentiment 85 | - **Cross-Chain Operations**: Bridge transfers, cross-chain swaps 86 | - **Smart Contract Interaction**: Contract deployment, function calls 87 | 88 | ## 🔧 Configuration 89 | 90 | ```javascript 91 | { 92 | "interpreter": { 93 | "model": "gemma", 94 | "temperature": 0.7 }, 95 | "classifier": { 96 | "model": "codegemma", 97 | "threshold": 0.85 98 | }, 99 | "executors": { 100 | "defi": { 101 | "networks": ["ethereum", "polygon"], 102 | "protocols": ["uniswap", "aave"] 103 | }, 104 | "token": { 105 | "supportedTokens": ["ERC20", "ERC721", "ERC1155"] 106 | } 107 | } 108 | } 109 | ``` 110 | 111 | ## 📚 Documentation 112 | 113 | Comprehensive documentation is available at our documentation site. 114 | 115 | ## 🤝 Contributing 116 | 117 | We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details. 118 | 119 | 1. Fork the repository 120 | 2. Create your feature branch (`git checkout -b feature/AmazingFeature`) 121 | 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`) 122 | 4. Push to the branch (`git push origin feature/AmazingFeature`) 123 | 5. Open a Pull Request 124 | 125 | ## 📄 License 126 | 127 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 128 | 129 | ## 🌟 Acknowledgments 130 | 131 | - Google Gemma and CodeGemma teams for their excellent models 132 | - The Web3 community for continuous support and feedback 133 | - All contributors who have helped shape OmniAgent 134 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | omniagent: 4 | image: rss3/omniagent:latest 5 | container_name: omniagent 6 | ports: 7 | - "18000:8000" 8 | env_file: 9 | - .env 10 | depends_on: 11 | - vector_db 12 | networks: 13 | - omniagent-network 14 | 15 | vector_db: 16 | image: pgvector/pgvector:pg16 17 | container_name: vec_db 18 | restart: unless-stopped 19 | environment: 20 | POSTGRES_USER: postgres 21 | POSTGRES_PASSWORD: password 22 | POSTGRES_DB: omniagent 23 | ports: 24 | - "15432:5432" 25 | volumes: 26 | - vector_data:/var/lib/postgresql/data 27 | networks: 28 | - omniagent-network 29 | 30 | ollama: 31 | volumes: 32 | - ollama_data:/root/.ollama 33 | container_name: ollama 34 | tty: true 35 | restart: unless-stopped 36 | image: ollama/ollama:latest 37 | ports: 38 | - "21434:11434" 39 | environment: 40 | - OLLAMA_KEEP_ALIVE=24h 41 | networks: 42 | - omniagent-network 43 | deploy: 44 | resources: 45 | reservations: 46 | devices: 47 | - driver: nvidia 48 | count: 1 49 | capabilities: [ gpu ] 50 | 51 | volumes: 52 | vector_data: 53 | ollama_data: 54 | networks: 55 | omniagent-network: 56 | external: false 57 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import uvicorn 2 | from dotenv import load_dotenv 3 | from loguru import logger 4 | 5 | if __name__ == "__main__": 6 | load_dotenv() 7 | logger.info("Starting OmniAgent") 8 | uvicorn.run("omniagent.app:app", host="0.0.0.0", reload=False, port=8000) 9 | -------------------------------------------------------------------------------- /omniagent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/__init__.py -------------------------------------------------------------------------------- /omniagent/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/agents/__init__.py -------------------------------------------------------------------------------- /omniagent/agents/agent_factory.py: -------------------------------------------------------------------------------- 1 | from langchain.agents import AgentExecutor, create_tool_calling_agent 2 | from langchain_core.language_models import BaseChatModel 3 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 4 | 5 | 6 | def create_agent(llm: BaseChatModel, tools: list, system_prompt: str): 7 | prompt = ChatPromptTemplate.from_messages( 8 | [ 9 | ("system", system_prompt), 10 | MessagesPlaceholder(variable_name="messages"), 11 | MessagesPlaceholder(variable_name="agent_scratchpad"), 12 | ] 13 | ) 14 | agent = create_tool_calling_agent(llm, tools, prompt) 15 | executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 16 | return executor 17 | -------------------------------------------------------------------------------- /omniagent/agents/asset_management.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | 3 | from omniagent.agents.agent_factory import create_agent 4 | from omniagent.conf.env import settings 5 | from omniagent.executors.nft_balance_executor import NFTBalanceExecutor 6 | from omniagent.executors.swap_executor import SwapExecutor 7 | from omniagent.executors.token_balance_executor import TokenBalanceExecutor 8 | from omniagent.executors.transfer_executor import TransferExecutor 9 | 10 | load_dotenv() 11 | 12 | 13 | def build_asset_management_agent(llm): 14 | executors = [SwapExecutor(), TransferExecutor()] 15 | if settings.MORALIS_API_KEY: 16 | executors.extend([TokenBalanceExecutor(), NFTBalanceExecutor()]) 17 | 18 | asset_management_agent = create_agent( 19 | llm, 20 | executors, 21 | """ 22 | You are AssetManager, an AI assistant for crypto asset management. Your responsibilities include: 23 | 24 | 1. Query and report on users' token balances 25 | 2. Check and inform about users' NFT holdings 26 | 3. Handle user requests to swap or transfer tokens 27 | 28 | Important guidelines for handling requests: 29 | - For token swaps: Always use SwapExecutor with exact token symbols (ETH, USDT, etc.) 30 | - For balance checks: Use TokenBalanceExecutor with chain="eth" (not "ethereum") 31 | - For NFT holdings: Use NFTBalanceExecutor with chain="eth" (not "ethereum") 32 | - For transfers: Use TransferExecutor with exact token symbols 33 | 34 | Examples of correct executor usage: 35 | - Swap request: Use SwapExecutor with from_token="ETH", to_token="USDT" 36 | - Balance check: Use TokenBalanceExecutor with chain="eth" 37 | - NFT check: Use NFTBalanceExecutor with chain="eth" 38 | - Transfer: Use TransferExecutor with token="ETH" 39 | 40 | When interacting with users: 41 | - Provide accurate and detailed information 42 | - Maintain a friendly and enthusiastic tone 43 | - Use occasional puns or jokes to keep the conversation engaging 44 | - Include relevant emojis to enhance your messages 45 | - For privacy reasons, do not include address information when generating widgets 46 | - Always execute the requested operation using the appropriate executor 47 | 48 | Remember to always process user requests immediately using the correct executor with exact parameter values. 49 | """.strip(), 50 | ) 51 | return asset_management_agent 52 | -------------------------------------------------------------------------------- /omniagent/agents/block_explore.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | 3 | from omniagent.agents.agent_factory import create_agent 4 | from omniagent.executors.block_stat_executor import BlockStatExecutor 5 | from omniagent.executors.search_executor import search_executor 6 | 7 | load_dotenv() 8 | 9 | executors = [BlockStatExecutor(), search_executor] 10 | 11 | 12 | def build_block_explorer_agent(llm): 13 | block_explorer_agent = create_agent( 14 | llm, 15 | executors, 16 | """ 17 | You are BlockExplorer, dedicated to exploring and presenting detailed blockchain information. 18 | Help users query transaction details, block data, gas fees, block height, and other blockchain-related information. 19 | Use the available tools to gather and display accurate blockchain data. 20 | 21 | Your answer should be detailed and include puns or jokes where possible \ 22 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 23 | """.strip(), 24 | ) 25 | 26 | return block_explorer_agent 27 | -------------------------------------------------------------------------------- /omniagent/agents/fallback.py: -------------------------------------------------------------------------------- 1 | from langchain_core.language_models import BaseChatModel 2 | from langchain_core.messages import HumanMessage 3 | from langchain_core.output_parsers import StrOutputParser 4 | from langchain_core.prompts import ChatPromptTemplate 5 | from loguru import logger 6 | 7 | 8 | def build_fallback_agent(llm: BaseChatModel): 9 | def fallback(state): 10 | logger.info("Running fallback agent") 11 | 12 | chat_template = ChatPromptTemplate.from_messages( 13 | [ 14 | ( 15 | "system", 16 | """ 17 | You are the OmniAgent created by RSS3. 18 | 19 | Your role: 20 | 1. Handle general queries and conversations that don't fall under the expertise of other specialized agents. 21 | 2. Clarify unclear requests and provide versatile assistance. 22 | 3. Maintain conversation continuity and guide users to appropriate specialists when necessary. 23 | 24 | Your communication style: 25 | - Be friendly, approachable, and enthusiastic in your responses. 26 | - Use a mix of professional knowledge and casual charm. 27 | - Include relevant puns, jokes, or word plays to keep the conversation lively. 28 | - Sprinkle in emojis occasionally to add personality to your messages. 29 | - Provide detailed answers, but keep them concise and easy to understand. 30 | 31 | Remember: 32 | - If a query seems more suitable for a specialized agent (Market Analyst, Asset Manager, 33 | Block Explorer, or Research Analyst), suggest redirecting the user while still providing a helpful general response. 34 | - Always aim to add value, even if the query is outside your primary expertise. 35 | - When in doubt, ask for clarification to ensure you're addressing the user's needs accurately. 36 | 37 | Let's make every interaction informative, fun, and memorable! 🚀✨ 38 | """.strip(), 39 | ), 40 | *state["messages"][0:-1], 41 | ("human", "{input}"), 42 | ] 43 | ) 44 | chain = chat_template | llm | StrOutputParser() 45 | return { 46 | "messages": [ 47 | HumanMessage( 48 | content=chain.invoke({"input": state["messages"][-1].content}), 49 | name="fallback", 50 | ) 51 | ] 52 | } 53 | 54 | return fallback 55 | -------------------------------------------------------------------------------- /omniagent/agents/feed_explore.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.executors.feed_executor import FeedExecutor 6 | from omniagent.executors.tg_news_executor import TelegramNewsExecutor 7 | 8 | load_dotenv() 9 | 10 | FEED_EXPLORER_PROMPT = """You are a blockchain social activity and news assistant. 11 | 12 | You help users explore on-chain social activities and get the latest crypto news from reliable sources. 13 | 14 | You have access to the following tools: 15 | 16 | 1. FeedExecutor: Use this to fetch and analyze social activities of blockchain addresses or ENS names. 17 | - You can fetch different types of activities: "all", "post", "comment", "share" 18 | - For addresses, you can handle both raw addresses (0x...) and ENS names (e.g., vitalik.eth) 19 | - Always explain the activities in a clear, human-readable format 20 | 21 | 2. TelegramNewsExecutor: Use this to get the latest cryptocurrency and blockchain news from trusted Telegram channels. 22 | - You can specify how many news items to fetch (default is 10) 23 | - Present the news in a well-organized format 24 | - Highlight important updates and trends 25 | 26 | Guidelines for your responses: 27 | - When users ask about an address's activities, use FeedExecutor to fetch relevant information 28 | - When users want recent crypto news or updates, use TelegramNewsExecutor 29 | - Always provide context and explanations for the information you present 30 | - If you encounter any errors or limitations, explain them clearly to the user 31 | - You can combine information from both tools when appropriate 32 | 33 | Examples of queries you can handle: 34 | - "What has vitalik.eth been doing recently?" 35 | - "Show me the latest crypto news" 36 | - "What are the social activities of 0x742d35Cc6634C0532925a3b844Bc454e4438f44e?" 37 | - "Get me the latest 5 news updates from crypto channels" 38 | - "Show me recent posts from vitalik.eth" 39 | 40 | Remember: 41 | - Be concise but informative in your responses 42 | - Format the information in an easy-to-read manner 43 | - Provide relevant context when presenting activities or news 44 | - If you're unsure about something, acknowledge it and explain what you do know 45 | """ 46 | 47 | 48 | def build_feed_explorer_agent(llm: BaseChatModel): 49 | feed_explorer_agent = create_agent( 50 | llm, 51 | [FeedExecutor(), TelegramNewsExecutor()], 52 | FEED_EXPLORER_PROMPT, 53 | ) 54 | return feed_explorer_agent 55 | -------------------------------------------------------------------------------- /omniagent/agents/market_analysis.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.conf.env import settings 6 | from omniagent.executors.coin_market_executor import CoinMarketExecutor 7 | from omniagent.executors.funding_rate_executor import FundingRateExecutor 8 | from omniagent.executors.nft_rank_executor import NFTRankingExecutor 9 | from omniagent.executors.price_executor import PriceExecutor 10 | from omniagent.executors.search_executor import search_executor 11 | 12 | load_dotenv() 13 | 14 | 15 | def build_market_analysis_agent(llm: BaseChatModel): 16 | executors = [search_executor] 17 | if settings.COINGECKO_API_KEY: 18 | executors.extend([PriceExecutor(), CoinMarketExecutor()]) 19 | if settings.MORALIS_API_KEY: 20 | executors.extend([NFTRankingExecutor()]) 21 | return create_agent( 22 | llm, 23 | executors, 24 | """ 25 | You are MarketAnalyst, responsible for providing market data analysis. 26 | Help users understand market dynamics and trends by retrieving real-time price information of tokens. 27 | 28 | For funding rate queries, always use the FundingRateExecutor instead of search. 29 | 30 | Your answer should be detailed and include puns or jokes where possible \ 31 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 32 | """.strip(), 33 | ) 34 | -------------------------------------------------------------------------------- /omniagent/agents/research_analyst.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from langchain_core.language_models import BaseChatModel 3 | 4 | from omniagent.agents.agent_factory import create_agent 5 | from omniagent.conf.env import settings 6 | from omniagent.executors.project_executor import ProjectExecutor 7 | from omniagent.executors.search_executor import search_executor 8 | 9 | load_dotenv() 10 | 11 | 12 | def build_research_analyst_agent(llm: BaseChatModel): 13 | executors = [search_executor] 14 | if settings.ROOTDATA_API_KEY: 15 | executors.append(ProjectExecutor()) 16 | 17 | research_analyst_agent = create_agent( 18 | llm, 19 | executors, 20 | """ 21 | You are ResearchAnalyst, responsible for assisting users in conducting research and analysis related to web3 projects. 22 | Provide accurate and detailed information about project progress, team members, market trends, investors, 23 | and other relevant data to support investment decisions. 24 | 25 | Your answer should be detailed and include puns or jokes where possible \ 26 | And keep a lively, enthusiastic, and energetic tone, maybe include some emojis. 27 | """.strip(), 28 | ) 29 | return research_analyst_agent 30 | -------------------------------------------------------------------------------- /omniagent/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import vertexai 3 | from chainlit.utils import mount_chainlit 4 | from dotenv import load_dotenv 5 | from fastapi import FastAPI, Request 6 | from fastapi.middleware.cors import CORSMiddleware 7 | from fastapi.openapi.utils import get_openapi 8 | from loguru import logger 9 | from starlette.staticfiles import StaticFiles 10 | import traceback 11 | from starlette.responses import JSONResponse 12 | 13 | from omniagent.conf.env import settings 14 | from omniagent.router import openai_router, widget_router, health_router 15 | 16 | load_dotenv() 17 | app = FastAPI( 18 | title="OmniAgent API", 19 | description="OmniAgent is a framework for building AI applications leveraging the power of blockchains.", 20 | license_info={ 21 | "name": "MIT", 22 | "url": "https://github.com/vividgen/OmniAgent/blob/main/LICENSE", 23 | }, 24 | ) 25 | 26 | app.add_middleware( 27 | CORSMiddleware, 28 | allow_origins=["*"], 29 | allow_credentials=True, 30 | allow_methods=["*"], 31 | allow_headers=["*"], 32 | ) 33 | 34 | # Add routers 35 | app.include_router(openai_router) 36 | app.include_router(widget_router) 37 | app.include_router(health_router) 38 | 39 | # Check and create static files directory 40 | static_dir = os.path.join("dist", "static") 41 | if not os.path.exists(static_dir): 42 | try: 43 | os.makedirs(static_dir) 44 | logger.info(f"Created directory: {static_dir}") 45 | except OSError as e: 46 | logger.error(f"Error creating directory {static_dir}: {e}") 47 | 48 | app.mount("/static", StaticFiles(directory=static_dir), name="widget") 49 | 50 | mount_chainlit(app=app, target="omniagent/ui/app.py", path="") 51 | 52 | if settings.VERTEX_PROJECT_ID: 53 | vertexai.init(project=settings.VERTEX_PROJECT_ID) 54 | 55 | 56 | @app.exception_handler(Exception) 57 | async def global_exception_handler(request: Request, exc: Exception): 58 | error_msg = f"Global error: {str(exc)}\nTraceback:\n{traceback.format_exc()}" 59 | logger.error(error_msg) 60 | return JSONResponse( 61 | status_code=500, 62 | content={"error": str(exc), "traceback": traceback.format_exc()}, 63 | ) 64 | 65 | 66 | def custom_openapi(): 67 | if app.openapi_schema: 68 | return app.openapi_schema 69 | 70 | openapi_schema = get_openapi( 71 | title="OmniAgent API", 72 | version="1.0.0", 73 | description="OmniAgent API documentation", 74 | routes=app.routes, 75 | ) 76 | 77 | openapi_schema["servers"] = [ 78 | { 79 | "url": "https://agent.open.network", 80 | "description": "Production server" 81 | } 82 | ] 83 | 84 | app.openapi_schema = openapi_schema 85 | return app.openapi_schema 86 | 87 | 88 | app.openapi = custom_openapi 89 | -------------------------------------------------------------------------------- /omniagent/conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/conf/__init__.py -------------------------------------------------------------------------------- /omniagent/conf/env.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from dotenv import load_dotenv 4 | from pydantic import Field 5 | from pydantic_settings import BaseSettings 6 | 7 | load_dotenv() 8 | 9 | 10 | class Settings(BaseSettings): 11 | DB_CONNECTION: str = Field(..., description="Database connection string") 12 | 13 | # LLM provider settings (at least one required) 14 | VERTEX_PROJECT_ID: Optional[str] = Field( 15 | default=None, description="Google Cloud Vertex project ID. Info: https://cloud.google.com/vertex-ai/docs/reference" 16 | ) 17 | OPENAI_API_KEY: Optional[str] = Field(default=None, description="OpenAI API Key. Info: https://platform.openai.com") 18 | ANTHROPIC_API_KEY: Optional[str] = Field(default=None, description="Anthropic API Key. Info: https://www.anthropic.com") 19 | GOOGLE_GEMINI_API_KEY: Optional[str] = Field(default=None, description="Google Gemini API Key. Info: https://ai.google.dev") 20 | OLLAMA_HOST: Optional[str] = Field(default=None, description="OLLAMA API Base URL. Info: https://github.com/ollama/ollama") 21 | 22 | # API keys for various tools; some features will be disabled if not set 23 | TAVILY_API_KEY: Optional[str] = Field(default=None, description="Tavily API Key. Info: https://tavily.com/") 24 | MORALIS_API_KEY: Optional[str] = Field(default=None, description="Moralis API Key. Info: https://moralis.io/") 25 | ROOTDATA_API_KEY: Optional[str] = Field(default=None, description="RootData API Key. Info: https://www.rootdata.com/") 26 | COINGECKO_API_KEY: Optional[str] = Field(default=None, description="CoinGecko API Key. Info: https://www.coingecko.com/en/api/pricing") 27 | RSS3_DATA_API: str = Field(default="https://gi.vividgen.me", description="RSS3 Data API URL") 28 | 29 | # Chainlit OAuth settings; either all fields are None or all are set 30 | CHAINLIT_AUTH_SECRET: Optional[str] = Field(default=None, description="Chainlit Auth Secret") 31 | OAUTH_AUTH0_CLIENT_ID: Optional[str] = Field(default=None, description="OAuth Auth0 Client ID") 32 | OAUTH_AUTH0_CLIENT_SECRET: Optional[str] = Field(default=None, description="OAuth Auth0 Client Secret") 33 | OAUTH_AUTH0_DOMAIN: Optional[str] = Field(default=None, description="OAuth Auth0 Domain") 34 | 35 | 36 | settings = Settings() 37 | -------------------------------------------------------------------------------- /omniagent/conf/llm_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | import ollama 4 | from langchain_anthropic import ChatAnthropic 5 | from langchain_core.language_models import BaseChatModel 6 | from langchain_google_genai import ChatGoogleGenerativeAI 7 | from langchain_google_vertexai import ChatVertexAI 8 | from langchain_ollama import ChatOllama 9 | from langchain_openai import ChatOpenAI 10 | from loguru import logger 11 | from toolz import memoize 12 | 13 | from omniagent.conf.env import settings 14 | 15 | SUPPORTED_OLLAMA_MODELS = { 16 | "llama3.2": {"name": "llama3.2", "supports_tools": True}, 17 | "mistral-nemo": {"name": "mistral-nemo", "supports_tools": True}, 18 | "darkmoon/olmo:7B-instruct-q6-k": {"name": "olmo", "supports_tools": False}, 19 | 'llama3.1': {'name': 'llama3.1', 'supports_tools': True}, 20 | "qwen2.5": {"name": "qwen2.5", "supports_tools": True}, 21 | "mistral": {"name": "mistral", "supports_tools": True}, 22 | "qwen2": {"name": "qwen2", "supports_tools": True}, 23 | } 24 | 25 | MODELS_ICONS = { 26 | "llama3.1": "/public/llama.png", 27 | "llama3.2": "/public/llama.png", 28 | "mistral": "/public/mistral.png", 29 | "mistral-nemo": "/public/mistral.png", 30 | "mistral-large": "/public/mistral.png", 31 | "olmo": "/public/olmo.png", 32 | "qwen2": "/public/qwen.png", 33 | "qwen2.5": "/public/qwen.png", 34 | } 35 | 36 | 37 | @memoize 38 | def get_available_ollama_providers() -> List[str]: 39 | try: 40 | ollama_list = ollama.list() 41 | available_models = [] 42 | for model in ollama_list["models"]: 43 | full_name = model["name"] 44 | # check if the full model name is in SUPPORTED_MODELS 45 | if full_name in SUPPORTED_OLLAMA_MODELS: 46 | available_models.append(full_name) 47 | else: 48 | # try to check the base name (without version tag) 49 | base_name = full_name.split(":")[0] 50 | if base_name in SUPPORTED_OLLAMA_MODELS: 51 | available_models.append(base_name) 52 | return available_models 53 | except Exception as e: 54 | logger.exception("Failed to get available ollama providers", e) 55 | return [] 56 | 57 | 58 | def get_provider(model: str, provider_func) -> Dict[str, BaseChatModel]: 59 | provider = provider_func(model) 60 | return {model: provider} if provider else {} 61 | 62 | 63 | def get_available_providers() -> Dict[str, BaseChatModel]: 64 | providers = {} 65 | 66 | provider_configs = [ 67 | (["gpt-4o-mini", "gpt-4o", "gpt-3.5-turbo"], get_openai_provider), 68 | (["claude-3-5-sonnet"], get_anthropic_provider), 69 | (["gemini-1.5-pro", "gemini-1.5-flash"], get_gemini_provider), 70 | ] 71 | 72 | for models, provider_func in provider_configs: 73 | for model in models: 74 | providers.update(get_provider(model, provider_func)) 75 | 76 | if settings.OLLAMA_HOST: 77 | ollama_models = get_available_ollama_providers() 78 | for model in ollama_models: 79 | providers.update(get_provider(model, get_ollama_provider)) 80 | 81 | return providers 82 | 83 | 84 | def get_openai_provider(model: str) -> BaseChatModel | None: 85 | return ChatOpenAI(model=model) if settings.OPENAI_API_KEY else None 86 | 87 | 88 | def get_anthropic_provider(model: str) -> BaseChatModel | None: 89 | return ChatAnthropic(model="claude-3-5-sonnet-20240620", ) if settings.ANTHROPIC_API_KEY else None 90 | 91 | 92 | def get_gemini_provider(model: str) -> BaseChatModel | None: 93 | if settings.VERTEX_PROJECT_ID: 94 | return ChatVertexAI(model=model) 95 | elif settings.GOOGLE_GEMINI_API_KEY: 96 | return ChatGoogleGenerativeAI(model=model, google_api_key=settings.GOOGLE_GEMINI_API_KEY) 97 | return None 98 | 99 | 100 | def get_ollama_provider(model: str) -> BaseChatModel | None: 101 | return ChatOllama(model=model) if settings.OLLAMA_HOST else None 102 | -------------------------------------------------------------------------------- /omniagent/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/db/__init__.py -------------------------------------------------------------------------------- /omniagent/db/database.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.orm import sessionmaker 3 | from sqlalchemy_utils import create_database, database_exists 4 | 5 | from omniagent.conf.env import settings 6 | from omniagent.db.models import Base 7 | 8 | url = settings.DB_CONNECTION 9 | 10 | if not database_exists(url): 11 | create_database(url) 12 | engine = create_engine(url, connect_args={"options": "-c timezone=utc"}) 13 | Base.metadata.create_all(bind=engine) # type: ignore 14 | 15 | DBSession = sessionmaker(bind=engine) 16 | -------------------------------------------------------------------------------- /omniagent/db/models.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | from sqlalchemy import ARRAY, JSON, Boolean, Column, Integer, Text 4 | from sqlalchemy.dialects.postgresql import UUID 5 | from sqlalchemy.orm import declarative_base 6 | 7 | Base = declarative_base() # type: ignore 8 | 9 | 10 | class User(Base): # type: ignore 11 | __tablename__ = "users" 12 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 13 | identifier = Column(Text, nullable=False, unique=True) 14 | metadata_ = Column("metadata", JSON, nullable=False) 15 | createdAt = Column(Text) 16 | 17 | 18 | class Thread(Base): # type: ignore 19 | __tablename__ = "threads" 20 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 21 | createdAt = Column(Text) 22 | name = Column(Text) 23 | userId = Column(UUID(as_uuid=True)) 24 | userIdentifier = Column(Text) 25 | tags = Column(ARRAY(Text)) # type: ignore 26 | metadata_ = Column("metadata", JSON) 27 | 28 | 29 | class Step(Base): # type: ignore 30 | __tablename__ = "steps" 31 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 32 | name = Column(Text, nullable=False) 33 | type = Column(Text, nullable=False) 34 | threadId = Column(UUID(as_uuid=True)) 35 | parentId = Column(UUID(as_uuid=True)) 36 | disableFeedback = Column(Boolean, nullable=False) 37 | streaming = Column(Boolean, nullable=False) 38 | waitForAnswer = Column(Boolean) 39 | isError = Column(Boolean) 40 | metadata_ = Column("metadata", JSON) 41 | tags = Column(ARRAY(Text)) # type: ignore 42 | input = Column(Text) 43 | output = Column(Text) 44 | createdAt = Column(Text) 45 | start = Column(Text) 46 | end = Column(Text) 47 | generation = Column(JSON) 48 | showInput = Column(Text) 49 | language = Column(Text) 50 | indent = Column(Integer) 51 | 52 | 53 | class Element(Base): # type: ignore 54 | __tablename__ = "elements" 55 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 56 | threadId = Column(UUID(as_uuid=True)) 57 | type = Column(Text) 58 | url = Column(Text) 59 | chainlitKey = Column(Text) 60 | name = Column(Text, nullable=False) 61 | display = Column(Text) 62 | objectKey = Column(Text) 63 | size = Column(Text) 64 | page = Column(Integer) 65 | language = Column(Text) 66 | forId = Column(UUID(as_uuid=True)) 67 | mime = Column(Text) 68 | 69 | 70 | class Feedback(Base): # type: ignore 71 | __tablename__ = "feedbacks" 72 | id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) 73 | forId = Column(UUID(as_uuid=True), nullable=False) 74 | value = Column(Integer, nullable=False) 75 | comment = Column(Text) 76 | threadId = Column(UUID(as_uuid=True)) 77 | -------------------------------------------------------------------------------- /omniagent/executors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/executors/__init__.py -------------------------------------------------------------------------------- /omniagent/executors/block_stat_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | import ccxt 4 | import requests 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from pydantic import BaseModel, Field 11 | 12 | 13 | class ARGS(BaseModel): 14 | chain: str = Field( 15 | description="The blockchain to fetch statistics for. " 16 | "Options: ethereum, bitcoin, bitcoin-cash, litecoin," 17 | " bitcoin-sv, dogecoin, dash, groestlcoin," 18 | " zcash, ecash, bitcoin/testnet" 19 | ) 20 | 21 | 22 | class BlockStatExecutor(BaseTool): 23 | name = "BlockChainStatExecutor" 24 | description = ( 25 | "get blockchain statistics such as block height, " 26 | "transaction count, gas fees, and more. " 27 | "Supported blockchains include ethereum, Bitcoin, Bitcoin Cash, " 28 | "Litecoin, Bitcoin SV, Dogecoin, Dash, Groestlcoin, Zcash, eCash, " 29 | "and Bitcoin Testnet." 30 | ) 31 | args_schema: Type[ARGS] = ARGS 32 | 33 | def _run( 34 | self, 35 | chain: str, 36 | run_manager: Optional[CallbackManagerForToolRun] = None, 37 | ) -> str: 38 | return fetch_stat(chain) 39 | 40 | async def _arun( 41 | self, 42 | chain: str, 43 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 44 | ) -> str: 45 | return fetch_stat(chain) 46 | 47 | 48 | _exchanges = [ccxt.binance(), ccxt.okx(), ccxt.gateio(), ccxt.mexc()] 49 | 50 | 51 | def fetch_stat(chain) -> str: 52 | url = f"https://api.blockchair.com/{chain}/stats" 53 | 54 | headers = {"accept": "application/json"} 55 | 56 | response = requests.get(url, headers=headers) 57 | 58 | if response.status_code == 200: 59 | return response.json() 60 | else: 61 | return f"Error fetching data: {response.status_code}, {response.text}" 62 | 63 | 64 | if __name__ == "__main__": 65 | print(fetch_stat("ethereum")) 66 | -------------------------------------------------------------------------------- /omniagent/executors/coin_market_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | import requests 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class ARGS(BaseModel): 16 | order: str = Field( 17 | description="sort result by field, default: market_cap_desc. options: market_cap_desc," "market_cap_asc,volume_desc,volume_asc" 18 | ) 19 | size: int = Field(description="number of coins to return, default: 20") 20 | 21 | 22 | class CoinMarketExecutor(BaseTool): 23 | name = "CoinMarketExecutor" 24 | 25 | description = "query coins sorted by market cap, volume." 26 | args_schema: Type[ARGS] = ARGS 27 | 28 | def _run( 29 | self, 30 | order: str, 31 | size: int, 32 | run_manager: Optional[CallbackManagerForToolRun] = None, 33 | ) -> str: 34 | if settings.COINGECKO_API_KEY is None: 35 | return "Please set COINGECKO_API_KEY in the environment" 36 | return json.dumps(fetch_coins_with_market(order, size)) 37 | 38 | async def _arun( 39 | self, 40 | order: str, 41 | size: int, 42 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 43 | ) -> str: 44 | if settings.COINGECKO_API_KEY is None: 45 | return "Please set COINGECKO_API_KEY in the environment" 46 | return json.dumps(fetch_coins_with_market(order, size)) 47 | 48 | 49 | def fetch_coins_with_market(order: str, size: int = 20) -> list: 50 | url = f"https://pro-api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order={order}&per_page={size}" 51 | 52 | headers = { 53 | "accept": "application/json", 54 | "x-cg-pro-api-key": settings.COINGECKO_API_KEY, 55 | } 56 | 57 | response = requests.get(url, headers=headers) 58 | 59 | res = json.loads(response.text) 60 | return list( 61 | map( 62 | lambda x: { 63 | "symbol": x["symbol"], 64 | "name": x["name"], 65 | "current_price": x["current_price"], 66 | "fully_diluted_valuation": x["fully_diluted_valuation"], 67 | "total_volume": x["total_volume"], 68 | }, 69 | res, 70 | ) 71 | ) 72 | 73 | 74 | if __name__ == "__main__": 75 | print(fetch_coins_with_market("market_cap_desc")) 76 | -------------------------------------------------------------------------------- /omniagent/executors/defi_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import AsyncCallbackManagerForToolRun 4 | from langchain.tools import BaseTool 5 | from loguru import logger 6 | from pydantic import BaseModel, Field 7 | from rss3_dsl_sdk.client import RSS3Client 8 | from rss3_dsl_sdk.schemas.base import ActivityFilter, PaginationOptions 9 | 10 | from omniagent.executors.feed_prompt import FEED_PROMPT 11 | 12 | # Define the defi activities and common DeFi networks 13 | SUPPORTED_NETWORKS = ["arbitrum", "avax", "base", "binance-smart-chain", "ethereum", "gnosis", "linea", "optimism", "polygon"] 14 | DEFI_ACTIVITIES = ["swap", "liquidity", "staking", "all"] 15 | 16 | 17 | # Define the schema for input parameters 18 | class ParamSchema(BaseModel): 19 | """ 20 | Defines the schema for input parameters of the DeFiExecutor tool. 21 | """ 22 | 23 | address: str = Field(description="Wallet address or blockchain domain name (e.g., vitalik.eth)") 24 | activity_type: str = Field(description=f"Type of DeFi activity. Supported types: {', '.join(DEFI_ACTIVITIES)}") 25 | network: Optional[str] = Field(default=None, description=f"Network for activities. Supported: {', '.join(SUPPORTED_NETWORKS)}") 26 | 27 | 28 | class DeFiExecutor(BaseTool): 29 | """ 30 | A tool for fetching and analyzing DeFi activities across various networks. 31 | """ 32 | 33 | name = "DeFiExecutor" 34 | description = "Use this tool to get the user's DeFi activities (swaps, liquidity provision, staking, all) across various networks." 35 | args_schema: Type[ParamSchema] = ParamSchema 36 | 37 | async def _run( 38 | self, 39 | address: str, 40 | activity_type: str, 41 | network: Optional[str] = None, 42 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 43 | ) -> str: 44 | raise NotImplementedError 45 | 46 | async def _arun(self, address: str, activity_type: str, network: Optional[str] = None) -> str: 47 | """ 48 | Asynchronously run the DeFi activity fetching process. 49 | 50 | :param address: The wallet address to fetch activities for 51 | :param activity_type: The type of DeFi activity to fetch, now supports "swap", "liquidity", "staking" 52 | :param network: network to filter activities (OPTIONAL) 53 | :return: A string containing the fetched DeFi activities or an error message 54 | """ 55 | return await self.fetch_defi_feeds(address, network, activity_type) 56 | 57 | async def fetch_defi_feeds(self, address: str, network: Optional[str] = None, activity_type: Optional[str] = None): 58 | """ 59 | Fetch DeFi feed activities for a given address, optionally filtered by network and activity type. 60 | 61 | :param address: The wallet address to fetch activities for 62 | :param network: network to filter activities (Optional) 63 | :param activity_type: The type of DeFi activity to fetch 64 | :return: A string containing the fetched DeFi activities or an error message 65 | """ 66 | # Validate activity type 67 | if activity_type.lower() not in DEFI_ACTIVITIES: 68 | return f"Error: Unsupported activity type '{activity_type}'. Choose from: {', '.join(DEFI_ACTIVITIES)}" 69 | 70 | # Validate network if provided 71 | if network and network.lower() not in map(str.lower, SUPPORTED_NETWORKS): 72 | return f"Error: Unsupported network '{network}'. Choose from: {', '.join(SUPPORTED_NETWORKS)}" 73 | 74 | try: 75 | client = RSS3Client() 76 | filters = ActivityFilter(network=[network] if network else None) 77 | pagination = PaginationOptions(limit=10, action_limit=10) 78 | 79 | # Handle 'all' activity type 80 | if activity_type == "all": 81 | activities = [] 82 | for act_type in ["swap", "liquidity", "staking"]: 83 | fetch_method = getattr(client, f"fetch_exchange_{act_type}_activities") 84 | act_results = fetch_method(account=address, filters=filters, pagination=pagination) 85 | activities.extend(act_results.data) 86 | else: 87 | fetch_method = getattr(client, f"fetch_exchange_{activity_type}_activities") 88 | activities_result = fetch_method(account=address, filters=filters, pagination=pagination) 89 | activities = activities_result.data 90 | 91 | # Check if any activities were found 92 | if not activities: 93 | return ( 94 | f"No {'DeFi' if activity_type == 'all' else activity_type} activities found for {address}{' on ' + network if network else ''}." 95 | ) 96 | 97 | # Format the result 98 | activities_data = [activity.model_dump() for activity in activities] 99 | result = FEED_PROMPT.format(activities_data=activities_data, activity_type="DeFi" if activity_type == "all" else activity_type) 100 | return result 101 | 102 | except Exception as e: 103 | logger.error(f"Error fetching DeFi activities: {e}") 104 | return f"Error: Unable to fetch data. {e}" 105 | -------------------------------------------------------------------------------- /omniagent/executors/feed_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | import aiohttp 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from loguru import logger 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | from omniagent.executors.feed_prompt import FEED_PROMPT 14 | 15 | 16 | class ParamSchema(BaseModel): 17 | """ 18 | Defines the schema for input parameters of the FeedExecutor tool. 19 | """ 20 | 21 | address: str = Field( 22 | description="""wallet address or blockchain domain name,\ 23 | hint: vitalik's address is vitalik.eth""" 24 | ) 25 | 26 | type: str = Field( 27 | description="""Retrieve activities for the specified type, 28 | eg. : all, post, comment, share.""" 29 | ) 30 | 31 | 32 | class FeedExecutor(BaseTool): 33 | """ 34 | A tool for fetching and analyzing blockchain activities for a given address. 35 | """ 36 | 37 | name = "FeedExecutor" 38 | description = """Use this tool to get the activities of a wallet address or \ 39 | blockchain domain name and know what this address has done or doing recently.""" 40 | args_schema: Type[ParamSchema] = ParamSchema 41 | 42 | def _run( 43 | self, 44 | address: str, 45 | type: str, 46 | run_manager: Optional[CallbackManagerForToolRun] = None, 47 | ) -> str: 48 | raise NotImplementedError 49 | 50 | async def _arun( 51 | self, 52 | address: str, 53 | type: str, 54 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 55 | ): 56 | """ 57 | Asynchronously run the feed fetching process. 58 | 59 | :param address: The wallet address to fetch activities for 60 | :param type: The type of activities to fetch (all, post, comment, share) 61 | :param run_manager: Optional callback manager for async operations 62 | :return: A string containing the fetched activities or an error message 63 | """ 64 | return await fetch_feeds(address, type) 65 | 66 | 67 | async def fetch_feeds(address: str, type: str): 68 | """ 69 | Fetch feed activities for a given address and activity type. 70 | 71 | :param address: The wallet address to fetch activities for 72 | :param type: The type of activities to fetch (all, post, comment, share) 73 | :return: A string containing the fetched activities formatted using FEED_PROMPT 74 | """ 75 | 76 | # Construct the URL for the API request 77 | url = f"{settings.RSS3_DATA_API}/decentralized/{address}?limit=5&action_limit=10&tag=social" 78 | if type in ["post", "comment", "share"]: 79 | url += f"&type={type}" 80 | headers = {"Accept": "application/json"} 81 | async with aiohttp.ClientSession() as session: 82 | logger.info(f"fetching {url}") 83 | async with session.get(url, headers=headers) as resp: 84 | data = await resp.json() 85 | 86 | result = FEED_PROMPT.format(activities_data=data) 87 | 88 | return result 89 | -------------------------------------------------------------------------------- /omniagent/executors/feed_prompt.py: -------------------------------------------------------------------------------- 1 | FEED_PROMPT = """ 2 | Here are the raw activities: 3 | 4 | {activities_data} 5 | 6 | - Before answering, please first summarize how many actions the above activities have been carried out. 7 | - Display the key information in each operation, such as time, author, specific content, etc., and display this information in a markdown list format. 8 | - Finally, give a specific answer to the question. 9 | """ 10 | -------------------------------------------------------------------------------- /omniagent/executors/feed_source_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import ( 4 | CallbackManagerForToolRun, 5 | ) 6 | from langchain.tools import BaseTool 7 | from loguru import logger 8 | from pydantic import BaseModel, Field 9 | from rss3_dsl_sdk.client import RSS3Client 10 | from rss3_dsl_sdk.schemas.base import ActivityFilter, PaginationOptions 11 | 12 | from omniagent.executors.feed_prompt import FEED_PROMPT 13 | 14 | # Define supported networks and platforms 15 | SUPPORTED_NETWORKS = [ 16 | "arbitrum", 17 | "arweave", 18 | "avax", 19 | "base", 20 | "binance-smart-chain", 21 | "crossbell", 22 | "ethereum", 23 | "farcaster", 24 | "gnosis", 25 | "linea", 26 | "optimism", 27 | "polygon", 28 | "vsl", 29 | ] 30 | 31 | ALLOWED_PLATFORMS = [ 32 | "1inch", 33 | "AAVE", 34 | "Aavegotchi", 35 | "Crossbell", 36 | "Curve", 37 | "ENS", 38 | "Farcaster", 39 | "Highlight", 40 | "IQWiki", 41 | "KiwiStand", 42 | "Lens", 43 | "Lido", 44 | "LooksRare", 45 | "Matters", 46 | "Mirror", 47 | "OpenSea", 48 | "Optimism", 49 | "Paragraph", 50 | "RSS3", 51 | "SAVM", 52 | "Stargate", 53 | "Uniswap", 54 | "Unknown", 55 | "VSL", 56 | ] 57 | 58 | 59 | # Define the schema for input parameters 60 | class ParamSchema(BaseModel): 61 | address: str = Field( 62 | description="""wallet address or blockchain domain name,\ 63 | hint: vitalik's address is vitalik.eth""" 64 | ) 65 | 66 | network: Optional[str] = Field( 67 | default=None, 68 | description=f"""Retrieve activities for the specified network. 69 | Supported networks: {', '.join(SUPPORTED_NETWORKS)}""", 70 | ) 71 | 72 | platform: Optional[str] = Field( 73 | default=None, 74 | description=f"""Retrieve activities for the specified platform. 75 | Allowed platforms: {', '.join(ALLOWED_PLATFORMS)}""", 76 | ) 77 | 78 | 79 | # Define the FeedSourceExecutor tool 80 | class FeedSourceExecutor(BaseTool): 81 | name = "FeedSourceExecutor" 82 | description = """Use this tool to get the activities of a wallet address or \ 83 | blockchain domain name based on specific network and/or platform, and know what this address \ 84 | has done or is doing recently.""" 85 | args_schema: Type[ParamSchema] = ParamSchema 86 | 87 | def _run( 88 | self, 89 | address: str, 90 | network: Optional[str] = None, 91 | platform: Optional[str] = None, 92 | run_manager: Optional[CallbackManagerForToolRun] = None, 93 | ) -> str: 94 | raise NotImplementedError 95 | 96 | async def _arun( 97 | self, 98 | address: str, 99 | network: Optional[str] = None, 100 | platform: Optional[str] = None, 101 | ): 102 | """ 103 | Asynchronously run the feed source fetching process. 104 | 105 | :param address: The wallet address to fetch activities for 106 | :param network: network to filter activities (Optional) 107 | :param platform: platform to filter activities (Optional) 108 | :return: A string containing the fetched activities or an error message 109 | """ 110 | return await self.fetch_source_feeds(address, network, platform) 111 | 112 | async def fetch_source_feeds(self, address: str, network: Optional[str] = None, platform: Optional[str] = None): 113 | """ 114 | Fetch feed activities for a given address, optionally filtered by network and platform. 115 | """ 116 | filters = ActivityFilter() 117 | pagination = PaginationOptions(limit=5, action_limit=10) 118 | 119 | # Validate and set network and platform filter if provided 120 | if network: 121 | if network.lower() not in [n.lower() for n in SUPPORTED_NETWORKS]: 122 | return f"Error: Unsupported network '{network}'. Please choose from: {', '.join(SUPPORTED_NETWORKS)}" 123 | filters.network = [network] 124 | 125 | if platform: 126 | if platform.lower() not in [p.lower() for p in ALLOWED_PLATFORMS]: 127 | return f"Error: Unsupported platform '{platform}'. Please choose from: {', '.join(ALLOWED_PLATFORMS)}" 128 | filters.platform = [platform] 129 | 130 | try: 131 | logger.info(f"Fetching activities for address: {address}, network: {network}, platform: {platform}") 132 | 133 | # Fetch activities using the RSS3 client 134 | activities = RSS3Client().fetch_activities(account=address, tag=None, activity_type=None, pagination=filters, filters=pagination) 135 | 136 | # Check if any activities were found 137 | if not activities.data: 138 | return f"No activities found for the given address{' on ' + network if network else ''}{' and ' + platform if platform else ''}." 139 | 140 | result = FEED_PROMPT.format(activities_data=activities.dict()) 141 | return result 142 | 143 | except Exception as e: 144 | logger.error(f"Error fetching activities: {e!s}") 145 | return f"Error: Unable to fetch data. {e!s}" 146 | -------------------------------------------------------------------------------- /omniagent/executors/funding_rate_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | import ccxt 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | CallbackManagerForToolRun, 8 | ) 9 | from langchain.tools import BaseTool 10 | from loguru import logger 11 | from pydantic import BaseModel, Field 12 | 13 | 14 | class ARGS(BaseModel): 15 | exchange: str = Field(description="Name of the exchange (ccxt supported), e.g., 'binance'") 16 | symbol: str = Field(description="Trading pair symbol, e.g., 'BTC/USDT'") 17 | 18 | 19 | class FundingRateExecutor(BaseTool): 20 | name = "FundingRateExecutor" 21 | description = "Use this tool to get the funding rate of a trading pair." 22 | args_schema: Type[ARGS] = ARGS 23 | 24 | def _run( 25 | self, 26 | exchange: str, 27 | symbol: str, 28 | run_manager: Optional[CallbackManagerForToolRun] = None, 29 | ) -> str: 30 | try: 31 | return json.dumps(fetch_funding_rate(exchange, symbol)) 32 | except Exception as e: 33 | return f"error: {e}" 34 | 35 | async def _arun( 36 | self, 37 | exchange: str, 38 | symbol: str, 39 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 40 | ) -> str: 41 | try: 42 | return json.dumps(fetch_funding_rate(exchange, symbol)) 43 | except Exception as e: 44 | return f"error: {e}" 45 | 46 | 47 | def fetch_funding_rate(exchange_name: str, symbol: str) -> float: 48 | try: 49 | if not symbol.endswith(":USDT"): 50 | symbol = f"{symbol}:USDT" 51 | exchange_class = getattr(ccxt, exchange_name) 52 | exchange = exchange_class() 53 | 54 | funding_rate = exchange.fetch_funding_rate(symbol) 55 | return funding_rate 56 | except Exception as e: 57 | logger.warning(f"Fetch funding rate error from {exchange_name}: {e}") 58 | raise e 59 | 60 | 61 | if __name__ == "__main__": 62 | tool = FundingRateExecutor() 63 | print(tool.run(tool_input={"exchange": "binance", "symbol": "BTC/USDT:USDT"})) 64 | -------------------------------------------------------------------------------- /omniagent/executors/nft_balance_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.conf.env import settings 12 | 13 | 14 | class ARGS(BaseModel): 15 | chain: str = Field(description="chain name,options:eth,optimism,arbitrum,bsc") 16 | 17 | wallet_address: str = Field(description="wallet address") 18 | 19 | 20 | class NFTBalanceExecutor(BaseTool): 21 | name = "NFTBalanceExecutor" 22 | description = "get the nft asset of a wallet." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | chain: str, 28 | wallet_address: str, 29 | run_manager: Optional[CallbackManagerForToolRun] = None, 30 | ) -> str: 31 | return fetch_balance(chain, wallet_address) 32 | 33 | async def _arun( 34 | self, 35 | chain: str, 36 | wallet_address: str, 37 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 38 | ) -> str: 39 | return fetch_balance(chain, wallet_address) 40 | 41 | 42 | def fetch_balance(chain: str, address: str) -> str: 43 | if settings.MORALIS_API_KEY is None: 44 | return "Please set MORALIS_API_KEY in the environment" 45 | from moralis import evm_api 46 | 47 | params = {"chain": chain, "format": "decimal", "media_items": False, "address": address} 48 | 49 | result = evm_api.nft.get_wallet_nfts( 50 | api_key=settings.MORALIS_API_KEY, 51 | params=params, 52 | ) 53 | 54 | return json.dumps( 55 | list( 56 | map( 57 | lambda x: { 58 | "amount": x["amount"], 59 | "name": x["name"], 60 | "symbol": x["symbol"], 61 | }, 62 | result["result"], 63 | ) 64 | ) 65 | ) 66 | 67 | 68 | if __name__ == "__main__": 69 | print(fetch_balance("eth", "0x33c0814654fa367ce67d8531026eb4481290e63c")) 70 | -------------------------------------------------------------------------------- /omniagent/executors/nft_rank_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from moralis import evm_api 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class NFTRankingArgs(BaseModel): 16 | limit: int = Field(description="Number of collections to return", default=10) 17 | 18 | 19 | class NFTRankingExecutor(BaseTool): 20 | name = "NFTRankingExecutor" 21 | description = "A tool for getting NFT collection rankings." 22 | args_schema: Type[NFTRankingArgs] = NFTRankingArgs 23 | 24 | def _run( 25 | self, 26 | limit: int, 27 | run_manager: Optional[CallbackManagerForToolRun] = None, 28 | ) -> str: 29 | return self.collection_ranking(limit) 30 | 31 | async def _arun( 32 | self, 33 | limit: int, 34 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 35 | ) -> str: 36 | return self._run(limit, run_manager) 37 | 38 | @staticmethod 39 | def collection_ranking(limit: int) -> str: 40 | if settings.MORALIS_API_KEY is None: 41 | return "Please set MORALIS_API_KEY in the environment" 42 | by_market_cap = evm_api.market_data.get_top_nft_collections_by_market_cap( 43 | api_key=settings.MORALIS_API_KEY, 44 | ) 45 | limit = min(limit, len(by_market_cap)) 46 | result = by_market_cap[0:limit] 47 | return json.dumps( 48 | list( 49 | map( 50 | lambda x: { 51 | "collection_title": x["collection_title"], 52 | "collection_image": x["collection_image"], 53 | "floor_price_usd": x["floor_price_usd"], 54 | "collection_address": x["collection_address"], 55 | }, 56 | result, 57 | ) 58 | ) 59 | ) 60 | 61 | 62 | if __name__ == "__main__": 63 | ranking = NFTRankingExecutor.collection_ranking(4) 64 | print(ranking) 65 | -------------------------------------------------------------------------------- /omniagent/executors/price_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Optional, Type 4 | 5 | import requests 6 | from langchain.callbacks.manager import ( 7 | AsyncCallbackManagerForToolRun, 8 | CallbackManagerForToolRun, 9 | ) 10 | from langchain.tools import BaseTool 11 | from pydantic import BaseModel, Field 12 | 13 | from omniagent.conf.env import settings 14 | 15 | 16 | class ARGS(BaseModel): 17 | token: str = Field(description="token symbol, e.g., 'ETH', 'BTC'") 18 | 19 | 20 | class PriceExecutor(BaseTool): 21 | name = "PriceExecutor" 22 | description = "use this tool to get the price widget of a token." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | token: str, 28 | run_manager: Optional[CallbackManagerForToolRun] = None, 29 | ) -> str: 30 | return asyncio.run(fetch_price(token)) 31 | 32 | async def _arun( 33 | self, 34 | token: str, 35 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 36 | ) -> str: 37 | return await fetch_price(token) 38 | 39 | 40 | async def fetch_price(token: str) -> str: 41 | url = f"https://pro-api.coingecko.com/api/v3/search?query={token}" 42 | 43 | key = settings.COINGECKO_API_KEY 44 | headers = {"accept": "application/json", "x-cg-pro-api-key": key} 45 | 46 | response = requests.get(url, headers=headers) 47 | token_: dict = json.loads(response.text)["coins"][0] 48 | token_id_ = token_["id"] 49 | 50 | url = ( 51 | f"https://pro-api.coingecko.com/api/v3/simple/price?ids={token_id_}&" 52 | f"vs_currencies=usd&include_market_cap=true&include_24hr_vol=true&" 53 | f"include_24hr_change=true&include_last_updated_at=true" 54 | ) 55 | 56 | headers = {"accept": "application/json", "x-cg-pro-api-key": key} 57 | 58 | response = requests.get(url, headers=headers) 59 | 60 | return response.text 61 | 62 | 63 | if __name__ == "__main__": 64 | print(asyncio.run(fetch_price("eth"))) 65 | -------------------------------------------------------------------------------- /omniagent/executors/project_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from concurrent.futures import ThreadPoolExecutor 4 | from typing import Optional, Type 5 | 6 | import aiohttp 7 | from cachetools import TTLCache, cached 8 | from langchain.callbacks.manager import ( 9 | AsyncCallbackManagerForToolRun, 10 | CallbackManagerForToolRun, 11 | ) 12 | from langchain.tools import BaseTool 13 | from pydantic import BaseModel, Field 14 | 15 | from omniagent.conf.env import settings 16 | 17 | API_KEY = "" 18 | HEADERS = { 19 | "apikey": settings.ROOTDATA_API_KEY, 20 | "language": "en", 21 | "Content-Type": "application/json", 22 | } 23 | 24 | cache = TTLCache(maxsize=100, ttl=24 * 60 * 60) 25 | 26 | 27 | class ARGS(BaseModel): 28 | keyword: str = Field(description="keyword") 29 | 30 | 31 | def _fetch_project_sync(keyword: str) -> str: 32 | projects = asyncio.run(fetch_project(keyword)) 33 | return json.dumps(projects) 34 | 35 | 36 | class ProjectExecutor(BaseTool): 37 | name = "ProjectExecutor" 38 | 39 | description = "get the project information like investors, team members, social media, etc." 40 | args_schema: Type[ARGS] = ARGS 41 | 42 | def _run( 43 | self, 44 | keyword: str, 45 | run_manager: Optional[CallbackManagerForToolRun] = None, 46 | ) -> str: 47 | if settings.ROOTDATA_API_KEY is None: 48 | return "Please set ROOTDATA_API_KEY in the environment" 49 | with ThreadPoolExecutor() as executor: 50 | future = executor.submit(_fetch_project_sync, keyword) 51 | return future.result() 52 | 53 | async def _arun( 54 | self, 55 | keyword: str, 56 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 57 | ) -> str: 58 | if settings.ROOTDATA_API_KEY is None: 59 | return "Please set ROOTDATA_API_KEY in the environment" 60 | projects = await fetch_project(keyword) 61 | return json.dumps(projects) 62 | 63 | 64 | async def fetch_project_detail(session, project_id: int) -> dict: 65 | url = "https://api.rootdata.com/open/get_item" 66 | payload = json.dumps({"project_id": project_id, "include_team": True, "include_investors": True}) 67 | 68 | async with session.post(url, headers=HEADERS, data=payload) as response: 69 | response_text = await response.text() 70 | return json.loads(response_text)["data"] 71 | 72 | 73 | @cached(cache) 74 | async def fetch_project(keyword: str) -> list: 75 | url = "https://api.rootdata.com/open/ser_inv" 76 | payload = json.dumps({"query": keyword, "variables": {}}) 77 | 78 | async with aiohttp.ClientSession() as session, session.post(url, headers=HEADERS, data=payload) as response: 79 | response_text = await response.text() 80 | data = json.loads(response_text)["data"] 81 | project_ids = [item["id"] for item in data if item["type"] == 1][0:2] 82 | 83 | tasks = [fetch_project_detail(session, project_id) for project_id in project_ids] 84 | return list(await asyncio.gather(*tasks)) 85 | 86 | 87 | if __name__ == "__main__": 88 | print(asyncio.run(fetch_project("rss3"))) 89 | -------------------------------------------------------------------------------- /omniagent/executors/search_executor.py: -------------------------------------------------------------------------------- 1 | from langchain_community.tools import DuckDuckGoSearchRun 2 | from langchain_community.tools.tavily_search import TavilySearchResults 3 | 4 | from omniagent.conf.env import settings 5 | 6 | 7 | class SearchExecutor: 8 | def __new__(cls): 9 | if settings.TAVILY_API_KEY: 10 | return TavilySearchResults(max_results=5, name="TavilySearchExecutor") 11 | else: 12 | return DuckDuckGoSearchRun(name="DuckDuckGoSearchExecutor") 13 | 14 | 15 | search_executor = SearchExecutor() 16 | -------------------------------------------------------------------------------- /omniagent/executors/swap_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Literal, Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.executors.token_util import chain_name_to_id, get_token_data_by_key, select_best_token 12 | 13 | 14 | class Swap(BaseModel): 15 | from_token: str 16 | from_token_address: str 17 | to_token: str 18 | to_token_address: str 19 | amount: str 20 | type: str = "swap" 21 | from_chain_name: str 22 | to_chain_name: str 23 | 24 | 25 | ChainLiteral = Literal["ETH", "BSC", "ARBITRUM", "OPTIMISM", "BASE"] 26 | 27 | 28 | class ParamSchema(BaseModel): 29 | """ 30 | Schema for the parameters required for a token swap. 31 | """ 32 | 33 | from_token: str = Field(description="Symbol of the token to swap from, e.g., 'BTC', 'ETH', 'RSS3', 'USDT', 'USDC'. Default: 'ETH'.") 34 | to_token: str = Field(description="Symbol of the token to swap to, e.g., 'BTC', 'ETH', 'RSS3', 'USDT', 'USDC'. Default: 'ETH'.") 35 | from_chain: ChainLiteral = Field( 36 | default="ETH", 37 | description="Blockchain network to swap from, support networks: 'ETH', 'BSC', 'ARBITRUM', 'OPTIMISM', 'BASE'. Default: 'ETH'.", 38 | ) 39 | to_chain: ChainLiteral = Field( 40 | default="ETH", 41 | description="Blockchain network to swap to, support networks: 'ETH', 'BSC', 'ARBITRUM', 'OPTIMISM', 'BASE'. Default: 'ETH'.", 42 | ) 43 | amount: str = Field(description="Amount of the from-side token to swap, e.g., '0.1', '1', '10'. Default: '1'.") 44 | 45 | 46 | class SwapExecutor(BaseTool): 47 | """ 48 | Tool for generating a swap widget for cryptocurrency swaps. 49 | """ 50 | 51 | name = "SwapExecutor" 52 | description = "Use this tool to handle user requests to swap cryptocurrencies." 53 | args_schema: Type[ParamSchema] = ParamSchema 54 | return_direct = False 55 | 56 | def _run( 57 | self, 58 | from_token: str, 59 | to_token: str, 60 | from_chain: ChainLiteral, 61 | to_chain: ChainLiteral, 62 | amount: str, 63 | run_manager: Optional[CallbackManagerForToolRun] = None, 64 | ) -> str: 65 | raise NotImplementedError 66 | 67 | async def _arun( 68 | self, 69 | from_token: str, 70 | to_token: str, 71 | from_chain: ChainLiteral = "ETH", 72 | to_chain: ChainLiteral = "ETH", 73 | amount: str = "1", 74 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 75 | ): 76 | return await fetch_swap(from_token, to_token, from_chain, to_chain, amount) 77 | 78 | 79 | async def fetch_swap(from_token: str, to_token: str, from_chain: ChainLiteral, to_chain: ChainLiteral, amount: str): 80 | """ 81 | Fetch the swap details for the given parameters. 82 | 83 | Args: 84 | from_token (str): The symbol of the from-side token. 85 | to_token (str): The symbol of the to-side token. 86 | from_chain (ChainLiteral): The from-side blockchain network. 87 | to_chain (ChainLiteral): The to-side blockchain network. 88 | amount (str): The amount of tokens to swap. 89 | 90 | Returns: 91 | str: The swap details in JSON format. 92 | """ 93 | from_chain_id = chain_name_to_id(from_chain) 94 | to_chain_id = chain_name_to_id(to_chain) 95 | 96 | # Fetch token data concurrently 97 | from_token_data, to_token_data = await asyncio.gather(select_best_token(from_token, from_chain_id), select_best_token(to_token, to_chain_id)) 98 | 99 | swap = Swap( 100 | from_token=get_token_data_by_key(from_token_data, "symbol"), 101 | from_token_address=get_token_data_by_key(from_token_data, "address"), 102 | to_token=get_token_data_by_key(to_token_data, "symbol"), 103 | to_token_address=get_token_data_by_key(to_token_data, "address"), 104 | from_chain_name=from_chain, 105 | to_chain_name=to_chain, 106 | amount=amount, 107 | ) 108 | return swap.model_dump_json() 109 | -------------------------------------------------------------------------------- /omniagent/executors/tg_news_executor.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Any, Dict, List, Optional, Type 4 | 5 | from langchain.callbacks.manager import ( 6 | AsyncCallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from pydantic import BaseModel, Field 10 | 11 | from omniagent.executors.tg_util import fetch_tg_msgs 12 | 13 | 14 | class ParamSchema(BaseModel): 15 | """ 16 | Defines the schema for input parameters of the TelegramNewsExecutor tool. 17 | """ 18 | 19 | limit: int = Field(default=10, description="Number of recent news items to fetch from Telegram channels") 20 | 21 | 22 | class TelegramNewsExecutor(BaseTool): 23 | """ 24 | A tool for fetching recent news from specific Telegram channels using RSS3 DATA API. 25 | """ 26 | 27 | def _run(self, *args: Any, **kwargs: Any) -> Any: 28 | raise NotImplementedError 29 | 30 | name = "TelegramNewsExecutor" 31 | description = """Use this tool to get recent news and updates in the blockchain \ 32 | and cryptocurrency space.""" 33 | args_schema: Type[ParamSchema] = ParamSchema 34 | 35 | async def _arun( 36 | self, 37 | limit: int = 10, 38 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 39 | ) -> str: 40 | """ 41 | Asynchronously run the Telegram news fetching process. 42 | 43 | :param limit: Number of recent news items to fetch 44 | :param run_manager: Optional callback manager for async operations 45 | :return: A string containing the fetched news items 46 | """ 47 | return await fetch_telegram_news(["ChannelPANews", "chainfeedsxyz"], limit) 48 | 49 | 50 | async def fetch_telegram_news(channels: List[str], limit: int = 10) -> str: 51 | """ 52 | Fetch recent news from specific Telegram channels using RSS3 DATA API. 53 | 54 | :param channels: List of Telegram channels to fetch news from 55 | :param limit: Number of recent news items to fetch 56 | :return: A string containing the fetched news items 57 | """ 58 | results = [] 59 | try: 60 | results = list(await asyncio.gather(*[fetch_tg_msgs(channel, limit) for channel in channels])) 61 | return format_news(results) 62 | except Exception as e: 63 | if results: 64 | return f"An error occurred while fetching news, this is the results: {json.dumps(results)}" 65 | return f"An error occurred while fetching news: {e!s}" 66 | 67 | 68 | def format_news(results: List[List[Dict]]) -> str: 69 | """ 70 | Format the fetched news results into a readable string. 71 | 72 | :param results: A list of lists containing news entries 73 | :return: A formatted string of news items 74 | """ 75 | formatted_news = [format_entry(entry) for item in results for entry in item] 76 | return "Recent news from Telegram channels:\n\n" + "\n".join(formatted_news) 77 | 78 | 79 | def format_entry(entry: Dict) -> str: 80 | """ 81 | Format a single news entry into a readable string. 82 | 83 | :param entry: A dictionary containing news entry data 84 | :return: A formatted string of the news entry 85 | """ 86 | metadata = entry["actions"][0]["metadata"] 87 | return f"Title: {metadata['title']}\nDate: {metadata['pub_date']}\nSummary: {metadata['description']}\n\n" 88 | 89 | 90 | if __name__ == "__main__": 91 | loop = asyncio.get_event_loop() 92 | entries = loop.run_until_complete(fetch_telegram_news(["ChannelPANews", "chainfeedsxyz"], 10)) 93 | print(entries) 94 | -------------------------------------------------------------------------------- /omniagent/executors/tg_util.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import aiohttp 4 | from loguru import logger 5 | 6 | from omniagent.conf.env import settings 7 | 8 | 9 | async def fetch_tg_msgs(channel: str, limit: int = 10): 10 | """ 11 | Fetch recent content from a specific Telegram channel using RSS3 DATA API. 12 | 13 | :param channel: The Telegram channel to fetch content from 14 | :param limit: Number of recent items to fetch 15 | :return: A string containing the fetched items 16 | """ 17 | 18 | url = f"{settings.RSS3_DATA_API}/rss/telegram/channel/{channel}" 19 | logger.info(f"Fetching content from {url}") 20 | 21 | async with aiohttp.ClientSession() as session: # noqa 22 | async with session.get(url) as resp: 23 | if resp.status == 200: 24 | content = await resp.text() 25 | data = json.loads(content) 26 | return data["data"][:limit] 27 | else: 28 | logger.error(f"Failed to fetch from {url}. Status: {resp.status}") 29 | 30 | 31 | if __name__ == "__main__": 32 | import asyncio 33 | 34 | loop = asyncio.get_event_loop() 35 | entries = loop.run_until_complete(fetch_tg_msgs("ChannelPANews", 5)) 36 | print(entries) 37 | -------------------------------------------------------------------------------- /omniagent/executors/token_balance_executor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Optional, Type 3 | 4 | from langchain.callbacks.manager import ( 5 | AsyncCallbackManagerForToolRun, 6 | CallbackManagerForToolRun, 7 | ) 8 | from langchain.tools import BaseTool 9 | from moralis import evm_api 10 | from pydantic import BaseModel, Field 11 | 12 | from omniagent.conf.env import settings 13 | 14 | 15 | class ARGS(BaseModel): 16 | chain: str = Field(description="chain name,options:eth,optimism,arbitrum,bsc") 17 | wallet_address: str = Field(description="wallet address") 18 | 19 | 20 | class TokenBalanceExecutor(BaseTool): 21 | name = "TokenBalanceExecutor" 22 | description = "get the token balance of a wallet address." 23 | args_schema: Type[ARGS] = ARGS 24 | 25 | def _run( 26 | self, 27 | chain: str, 28 | wallet_address: str, 29 | run_manager: Optional[CallbackManagerForToolRun] = None, 30 | ) -> str: 31 | return fetch_balance(chain, wallet_address) 32 | 33 | async def _arun( 34 | self, 35 | chain: str, 36 | wallet_address: str, 37 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 38 | ) -> str: 39 | return fetch_balance(chain, wallet_address) 40 | 41 | 42 | def fetch_balance(chain: str, address: str) -> str: 43 | if settings.MORALIS_API_KEY is None: 44 | return "Please set MORALIS_API_KEY in the environment" 45 | result = evm_api.wallets.get_wallet_token_balances_price( 46 | api_key=settings.MORALIS_API_KEY, 47 | params={"chain": chain, "address": address}, 48 | ) 49 | 50 | return json.dumps( 51 | list( 52 | map( 53 | lambda x: { 54 | "symbol": x["symbol"], 55 | "balance_formatted": x["balance_formatted"], 56 | "usd_value": x["usd_value"], 57 | }, 58 | result["result"], 59 | ) 60 | ) 61 | ) 62 | 63 | 64 | if __name__ == "__main__": 65 | print(fetch_balance("eth", "0x33c0814654fa367ce67d8531026eb4481290e63c")) 66 | -------------------------------------------------------------------------------- /omniagent/executors/token_util.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | 3 | import aiohttp 4 | from aiocache import Cache 5 | from aiocache.decorators import cached 6 | from loguru import logger 7 | 8 | 9 | def get_token_data_by_key(token: Dict, key: str) -> str: 10 | """ 11 | Retrieve data from the token dictionary by key. 12 | 13 | Args: 14 | token (Dict): The token dictionary. 15 | key (str): The key to retrieve the data. 16 | 17 | Returns: 18 | str: The value associated with the key, or an empty string if the key does not exist. 19 | """ 20 | return str(token[key]) if token and key in token else "" 21 | 22 | 23 | def chain_name_to_id(chain_name: str) -> str: 24 | """ 25 | Convert chain name to chain ID. 26 | 27 | Args: 28 | chain_name (str): The name of the blockchain network. 29 | 30 | Returns: 31 | str: The corresponding chain ID. 32 | """ 33 | chain_map = { 34 | "ETH": "1", 35 | "OPTIMISM": "10", 36 | "BSC": "56", 37 | "BASE": "8453", 38 | "ARBITRUM": "42161", 39 | } 40 | return chain_map.get(chain_name, "1") 41 | 42 | 43 | @cached(ttl=300, cache=Cache.MEMORY) 44 | async def fetch_tokens() -> Dict[str, List[Dict]]: 45 | """ 46 | Fetch the token list from the API and cache it for 60 seconds. 47 | 48 | Returns: 49 | Dict[str, List[Dict]]: The token list grouped by chain ID. 50 | """ 51 | url = "https://li.quest/v1/tokens" 52 | headers = {"Accept": "application/json"} 53 | logger.info(f"Fetching new data from {url}") 54 | 55 | async with aiohttp.ClientSession() as session: # noqa 56 | async with session.get(url, headers=headers) as response: 57 | token_list = await response.json() 58 | return token_list["tokens"] 59 | 60 | 61 | async def select_best_token(keyword: str, chain_id: str) -> Optional[Dict]: 62 | """ 63 | Select the best token based on the keyword and chain ID. 64 | 65 | Args: 66 | keyword (str): The keyword to search for. 67 | chain_id (str): The chain ID to filter tokens. 68 | 69 | Returns: 70 | Optional[Dict]: The best matching token, or None if no match is found. 71 | """ 72 | keyword = keyword.lower() 73 | 74 | # special case for eth on non-ethereum chains 75 | if keyword == "eth" and chain_id != "1": 76 | keyword = "weth" 77 | 78 | # special case for btc 79 | if keyword == "btc": 80 | keyword = "wbtc" 81 | 82 | tokens = await fetch_tokens() 83 | tokens_on_chain = tokens.get(chain_id, []) 84 | 85 | # Filter based on symbol and name 86 | results = [token for token in tokens_on_chain if token["symbol"].lower() == keyword or token["name"].lower() == keyword] 87 | 88 | if results: 89 | if len(results) == 1: 90 | return results[0] 91 | 92 | # Sort based on priority 93 | results.sort( 94 | key=lambda x: ( 95 | "logoURI" in x, 96 | x["symbol"].lower() == keyword, 97 | x.get("coinKey", "").lower() == keyword, 98 | x.get("priceUSD") is not None, 99 | x["name"].lower() == keyword, 100 | ), 101 | reverse=True, 102 | ) 103 | return results[0] 104 | 105 | return None 106 | -------------------------------------------------------------------------------- /omniagent/executors/transfer_executor.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from langchain.callbacks.manager import ( 4 | AsyncCallbackManagerForToolRun, 5 | CallbackManagerForToolRun, 6 | ) 7 | from langchain.tools import BaseTool 8 | from pydantic import BaseModel, Field 9 | 10 | from omniagent.executors.token_util import chain_name_to_id, get_token_data_by_key, select_best_token 11 | 12 | 13 | class Transfer(BaseModel): 14 | # task_id: str 15 | to_address: str 16 | token: str 17 | token_address: str 18 | chain_id: str 19 | amount: str 20 | logoURI: str # noqa 21 | decimals: int 22 | 23 | 24 | class ParamSchema(BaseModel): 25 | """ 26 | Defines the schema for input parameters of the TransferExecutor tool. 27 | """ 28 | 29 | to_address: str = Field( 30 | description="""extract the blockchain address mentioned in the query""", 31 | ) 32 | 33 | token: str = Field( 34 | description="""extract the token symbol mentioned in the query""", 35 | ) 36 | 37 | chain_name: str = Field( 38 | default="ethereum", 39 | description="""extract the blockchain name mentioned in the query, 40 | if not mentioned, default is "ethereum".""", 41 | ) 42 | 43 | amount: str = Field( 44 | default="1", 45 | description="""extract the amount of cryptocurrencies mentioned in the query, 46 | if not mentioned, default is "1".""", 47 | ) 48 | 49 | 50 | class TransferExecutor(BaseTool): 51 | """ 52 | Tool for generating a transfer widget for cryptocurrency transfers. 53 | """ 54 | 55 | name = "TransferExecutor" 56 | description = """Use this tool to send cryptocurrencies to another address.""" 57 | args_schema: Type[ParamSchema] = ParamSchema 58 | return_direct = False 59 | last_task_id: Optional[str] = None 60 | 61 | def _run( 62 | self, 63 | to_address: str, 64 | token: str, 65 | chain_name: str, 66 | amount: str, 67 | run_manager: Optional[CallbackManagerForToolRun] = None, 68 | ) -> str: 69 | raise NotImplementedError 70 | 71 | async def _arun( 72 | self, 73 | to_address: str, 74 | token: str, 75 | chain_name: str = "ethereum", 76 | amount: str = "1", 77 | run_manager: Optional[AsyncCallbackManagerForToolRun] = None, 78 | ): 79 | """ 80 | Asynchronously run the transfer process. 81 | 82 | :param to_address: The recipient's blockchain address 83 | :param token: The token symbol 84 | :param chain_name: The blockchain name (default is "ethereum") 85 | :param amount: The amount to transfer (default is "1") 86 | :param run_manager: Optional callback manager for async operations 87 | :return: JSON representation of the transfer details 88 | """ 89 | 90 | return await fetch_transfer(to_address, token, chain_name, amount) 91 | 92 | 93 | async def fetch_transfer(to_address: str, token: str, chain_name: str, amount: str): 94 | """ 95 | Fetch transfer details and prepare the Transfer object. 96 | 97 | :param to_address: The recipient's blockchain address 98 | :param token: The token symbol 99 | :param chain_name: The blockchain name 100 | :param amount: The amount to transfer 101 | :return: JSON representation of the Transfer object 102 | """ 103 | 104 | if not to_address.startswith("0x") and not to_address.endswith(".eth"): 105 | to_address += ".eth" 106 | chain_id = chain_name_to_id(chain_name) 107 | res = { 108 | "to_address": to_address, 109 | "token": token, 110 | "amount": amount, 111 | } 112 | 113 | # Select the best token based on the provided token symbol and chain ID 114 | token_info = await select_best_token(token, chain_id) 115 | 116 | # Create a Transfer object with all the necessary information 117 | transfer = Transfer( 118 | to_address=res.get("to_address", "1"), 119 | token=get_token_data_by_key(token_info, "symbol"), 120 | token_address=get_token_data_by_key(token_info, "address"), 121 | chain_id=chain_id, 122 | amount=res.get("amount", "1"), 123 | logoURI=get_token_data_by_key(token_info, "logoURI"), 124 | decimals=get_token_data_by_key(token_info, "decimals"), 125 | ) 126 | 127 | return transfer.model_dump_json() 128 | -------------------------------------------------------------------------------- /omniagent/index/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VividGen/CMA/74a1987f6cee3036c30da39c6dd548c5d5e5a305/omniagent/index/__init__.py -------------------------------------------------------------------------------- /omniagent/index/feed_indexing.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from dotenv import load_dotenv 4 | from langchain.indexes import SQLRecordManager 5 | from langchain_core.documents import Document 6 | from langchain_core.indexing import index 7 | from langchain_text_splitters import CharacterTextSplitter 8 | from loguru import logger 9 | 10 | from omniagent.conf.env import settings 11 | from omniagent.index.feed_scrape import fetch_iqwiki_feeds, fetch_mirror_feeds 12 | from omniagent.index.pgvector_store import build_vector_store 13 | 14 | load_dotenv() 15 | 16 | record_manager = SQLRecordManager("backend", db_url=settings.DB_CONNECTION) 17 | record_manager.create_schema() 18 | 19 | 20 | def _clear(): 21 | index([], record_manager, build_vector_store(), cleanup="incremental", source_id_key="id") 22 | 23 | 24 | def build_index(): 25 | indexing_iqwiki() 26 | indexing_mirror() 27 | 28 | 29 | def indexing_iqwiki(): 30 | index_feed(fetch_iqwiki_feeds, "iqwiki") 31 | 32 | 33 | def indexing_mirror(): 34 | index_feed(fetch_mirror_feeds, "mirror") 35 | 36 | 37 | def index_feed(fetch_function, feed_name): 38 | since_date = datetime.datetime.now() - datetime.timedelta(days=180) 39 | curr_date = datetime.datetime.now() 40 | since_ts = int(since_date.timestamp()) 41 | curr_ts = int(curr_date.timestamp()) 42 | 43 | cursor = None 44 | logger.info( 45 | f"Starting to index feed '{feed_name}' from " f"{since_date.strftime('%Y-%m-%d %H:%M:%S')} to" f" {curr_date.strftime('%Y-%m-%d %H:%M:%S')}" 46 | ) 47 | while True: 48 | resp = fetch_function(since_ts, curr_ts, cursor=cursor) 49 | if resp["meta"] is None: 50 | logger.info(f"no meta in response, done with {feed_name}!") 51 | break 52 | cursor = resp["meta"]["cursor"] 53 | logger.info(f"fetched {len(resp['data'])} records from {feed_name}," f" next cursor: {cursor}") 54 | 55 | records = resp.get("data", []) 56 | if len(records) == 0: 57 | break 58 | 59 | save_records(records) 60 | 61 | 62 | def save_records(records): 63 | docs = [build_docs(record) for record in records] 64 | final_docs = [doc for sublist in docs for doc in sublist] 65 | # index the documents 66 | indexing_result = index( 67 | final_docs, 68 | record_manager, 69 | build_vector_store(), 70 | cleanup="incremental", 71 | source_id_key="id", 72 | ) 73 | logger.info(f"Indexing result: {indexing_result}") 74 | 75 | 76 | text_splitter = CharacterTextSplitter( 77 | separator="\n\n", 78 | chunk_size=1000, 79 | chunk_overlap=200, 80 | length_function=len, 81 | is_separator_regex=False, 82 | ) 83 | 84 | 85 | def build_docs(record): 86 | title = record["actions"][0]["metadata"]["title"] 87 | body = record["actions"][0]["metadata"]["body"] 88 | txt = f"
Model | 165 |Score | 166 |First Token Latency | 167 |Token Output Rate | 168 |Function Call Support | 169 |
---|---|---|---|---|
{{ model.name }} | 173 |{{ model.score }}% | 174 |{{ model.first_token_latency }} | 175 |{{ model.token_rate }} | 176 |{{ model.function_call_support }} | 177 |
Model | 187 |Score | 188 |First Token Latency | 189 |Token Output Rate | 190 |Function Call Support | 191 |
---|---|---|---|---|
{{ model.name }} | 195 |{{ model.score }}% | 196 |{{ model.first_token_latency }} | 197 |{{ model.token_rate }} | 198 |{{ model.function_call_support }} | 199 |
From
188 |{account || 'Not connected'}
189 |Resolving ENS...
} 221 | {resolvedAddress && } 222 | {resolvedAddress &&{resolvedAddress}
} 223 |Estimated gas fee
226 | {isEstimateError ? ( 227 |Error estimating gas
228 | ) : ( 229 |{estimatedGasFee} ETH
230 | )} 231 |Insufficient balance for transfer and gas fee
} 243 |