├── src ├── backend │ ├── auth │ │ ├── __init__.py │ │ ├── auth_utils.py │ │ └── sample_user.py │ ├── context │ │ └── __init__.py │ ├── helpers │ │ ├── __init__.py │ │ ├── dutils.py │ │ ├── otlp_tracing.py │ │ ├── text.py │ │ ├── azureblob.py │ │ ├── coding.py │ │ ├── summarizeutils.py │ │ ├── dcfutils.py │ │ ├── yfutils.py │ │ └── secutils.py │ ├── models │ │ └── __init__.py │ ├── Dockerfile │ ├── event_utils.py │ ├── .env.sample │ ├── requirements.txt │ ├── .dockerrun │ ├── agents │ │ ├── generic.py │ │ ├── human.py │ │ ├── fundamental_analysis.py │ │ ├── agentutils.py │ │ ├── company_analyst.py │ │ ├── base_agent.py │ │ ├── earningcalls_analyst.py │ │ └── forecaster.py │ ├── middleware │ │ └── health_check.py │ ├── handlers │ │ └── runtime_interrupt.py │ ├── Playground.ipynb │ └── config.py ├── .dockerignore └── frontend │ ├── .dockerenv │ ├── requirements.txt │ ├── wwwroot │ ├── assets │ │ ├── images │ │ │ ├── A.png │ │ │ ├── AA.png │ │ │ ├── CA.png │ │ │ ├── EA.png │ │ │ ├── HA.png │ │ │ ├── PA.png │ │ │ ├── SA.png │ │ │ ├── TA.png │ │ │ ├── U.png │ │ │ ├── add.png │ │ │ ├── done.png │ │ │ ├── Unknown.png │ │ │ ├── air-button.svg │ │ │ └── stars.svg │ │ ├── avatar │ │ │ ├── user0.png │ │ │ ├── user1.png │ │ │ ├── user2.png │ │ │ ├── user3.png │ │ │ ├── user4.png │ │ │ ├── user5.png │ │ │ ├── hr_agent.png │ │ │ ├── manager.png │ │ │ ├── unknown.png │ │ │ ├── tech_agent.png │ │ │ ├── legal_agent.png │ │ │ ├── product_agent.png │ │ │ ├── marketing_agent.png │ │ │ ├── procurement_agent.png │ │ │ ├── expense_billing_agent.png │ │ │ └── invoice_reconciliation_agent.png │ │ ├── favicon │ │ │ ├── favicon-16x16.png │ │ │ └── favicon-32x32.png │ │ ├── Send.svg │ │ ├── microsoft-logo.svg │ │ ├── theme.css │ │ └── app-logo.svg │ ├── utils.js │ ├── app.html │ ├── task │ │ ├── employee.html │ │ └── task.css │ ├── home │ │ ├── home.css │ │ └── home.js │ └── app.css │ ├── Dockerfile │ ├── .dockerrun │ └── frontend_server.py ├── documentation ├── images │ ├── readme │ │ ├── macae-home.png │ │ ├── userStory.png │ │ ├── macae-report.png │ │ ├── customerTruth.png │ │ ├── oneClickDeploy.png │ │ ├── macae-application.png │ │ └── macae-architecture.png │ └── azure-app-service-auth-setup │ │ ├── Web.png │ │ ├── AddDetails.png │ │ ├── WebAppURL.png │ │ ├── AddPlatform.png │ │ ├── AddRedirectURL.png │ │ ├── NewRegistration.png │ │ ├── AppAuthentication.png │ │ ├── Appregistrations.png │ │ ├── MicrosoftEntraID.png │ │ ├── AppAuthIdentityProvider.png │ │ ├── AppAuthenticationIdentity.png │ │ ├── AppAuthIdentityProviderAdd.png │ │ └── AppAuthIdentityProviderAdded.png ├── azure_app_service_auth_setup.md └── LocalDeployment.md ├── deploy ├── macae-large.bicepparam ├── macae-mini.bicepparam └── macae-dev.bicep ├── .github ├── CODEOWNER ├── ISSUE_TEMPLATE │ ├── subtask.md │ ├── feature_request.md │ └── bug_report.md ├── CODE_OF_CONDUCT.md ├── workflows │ ├── pr-title-checker.yml │ ├── stale-bot.yml │ ├── agnext-biab-02-containerimage.yml │ ├── create-release.yml │ └── codeql.yml ├── dependabot.yml └── PULL_REQUEST_TEMPLATE.md ├── SUPPORT.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── SECURITY.md └── TRANSPARENCY_FAQS.md /src/backend/auth/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/backend/context/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/backend/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/backend/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | .env.sample 3 | test.http -------------------------------------------------------------------------------- /src/frontend/.dockerenv: -------------------------------------------------------------------------------- 1 | BACKEND_API_URL=http://localhost:8000 2 | -------------------------------------------------------------------------------- /src/frontend/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | jinja2 4 | azure-identity 5 | python-dotenv 6 | python-multipart -------------------------------------------------------------------------------- /documentation/images/readme/macae-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/macae-home.png -------------------------------------------------------------------------------- /documentation/images/readme/userStory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/userStory.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/A.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/A.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/AA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/AA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/CA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/CA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/EA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/EA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/HA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/HA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/PA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/PA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/SA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/SA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/TA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/TA.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/U.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/U.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/add.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/add.png -------------------------------------------------------------------------------- /documentation/images/readme/macae-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/macae-report.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user0.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user1.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user2.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user3.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user4.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/user5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/user5.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/done.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/done.png -------------------------------------------------------------------------------- /documentation/images/readme/customerTruth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/customerTruth.png -------------------------------------------------------------------------------- /documentation/images/readme/oneClickDeploy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/oneClickDeploy.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/hr_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/hr_agent.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/manager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/manager.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/unknown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/unknown.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/Unknown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/images/Unknown.png -------------------------------------------------------------------------------- /documentation/images/readme/macae-application.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/macae-application.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/tech_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/tech_agent.png -------------------------------------------------------------------------------- /documentation/images/readme/macae-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/readme/macae-architecture.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/legal_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/legal_agent.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/product_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/product_agent.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/marketing_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/marketing_agent.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/Web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/Web.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/procurement_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/procurement_agent.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/expense_billing_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/expense_billing_agent.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AddDetails.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AddDetails.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/WebAppURL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/WebAppURL.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AddPlatform.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AddPlatform.png -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/avatar/invoice_reconciliation_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/src/frontend/wwwroot/assets/avatar/invoice_reconciliation_agent.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AddRedirectURL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AddRedirectURL.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/NewRegistration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/NewRegistration.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AppAuthentication.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AppAuthentication.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/Appregistrations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/Appregistrations.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/MicrosoftEntraID.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/MicrosoftEntraID.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AppAuthIdentityProvider.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AppAuthIdentityProvider.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AppAuthenticationIdentity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AppAuthenticationIdentity.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AppAuthIdentityProviderAdd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AppAuthIdentityProviderAdd.png -------------------------------------------------------------------------------- /documentation/images/azure-app-service-auth-setup/AppAuthIdentityProviderAdded.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/akshata29/finagent/HEAD/documentation/images/azure-app-service-auth-setup/AppAuthIdentityProviderAdded.png -------------------------------------------------------------------------------- /src/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim AS frontend 2 | WORKDIR /frontend 3 | COPY . . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | EXPOSE 3000 6 | CMD ["uvicorn", "frontend_server:app", "--host", "0.0.0.0", "--port", "3000"] -------------------------------------------------------------------------------- /deploy/macae-large.bicepparam: -------------------------------------------------------------------------------- 1 | using './macae.bicep' 2 | 3 | param resourceSize = { 4 | gpt4oCapacity: 50 5 | cosmosThroughput: 1000 6 | containerAppSize: { 7 | cpu: '2.0' 8 | memory: '4.0Gi' 9 | minReplicas: 1 10 | maxReplicas: 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /deploy/macae-mini.bicepparam: -------------------------------------------------------------------------------- 1 | using './macae.bicep' 2 | 3 | param resourceSize = { 4 | gpt4oCapacity: 15 5 | cosmosThroughput: 400 6 | containerAppSize: { 7 | cpu: '1.0' 8 | memory: '2.0Gi' 9 | minReplicas: 0 10 | maxReplicas: 1 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.github/CODEOWNER: -------------------------------------------------------------------------------- 1 | # Lines starting with '#' are comments. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in the repo. 5 | * @Avijit-Microsoft @Roopan-Microsoft @Prajwal-Microsoft @marktayl1 @Fr4nc3 6 | -------------------------------------------------------------------------------- /src/backend/Dockerfile: -------------------------------------------------------------------------------- 1 | # Base Python image 2 | FROM python:3.11 3 | 4 | 5 | # Backend app setup 6 | WORKDIR /app/backend 7 | COPY . . 8 | # Install dependencies 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | EXPOSE 8000 11 | CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"] 12 | -------------------------------------------------------------------------------- /src/backend/helpers/dutils.py: -------------------------------------------------------------------------------- 1 | def decorate_all_methods(decorator): 2 | def class_decorator(cls): 3 | for attr_name, attr_value in cls.__dict__.items(): 4 | if callable(attr_value): 5 | setattr(cls, attr_name, decorator(attr_value)) 6 | return cls 7 | 8 | return class_decorator -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/subtask.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Sub task 3 | about: A sub task 4 | title: '' 5 | labels: subtask 6 | assignees: '' 7 | 8 | --- 9 | 10 | Required by 11 | 12 | # Description 13 | 14 | A clear and concise description of what this subtask is. 15 | 16 | # Tasks 17 | 18 | _To be filled in by the engineer picking up the subtask 19 | 20 | - [ ] Task 1 21 | - [ ] Task 2 22 | - [ ] ... -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | ## How to file issues and get help 4 | 5 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 6 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 7 | feature request as a new Issue. 8 | 9 | ## Microsoft Support Policy 10 | 11 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 12 | -------------------------------------------------------------------------------- /src/backend/event_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from azure.monitor.events.extension import track_event 4 | 5 | 6 | def track_event_if_configured(event_name: str, event_data: dict): 7 | instrumentation_key = os.getenv("APPLICATIONINSIGHTS_INSTRUMENTATION_KEY") 8 | if instrumentation_key: 9 | track_event(event_name, event_data) 10 | else: 11 | logging.warning(f"Skipping track_event for {event_name} as Application Insights is not configured") -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /src/backend/.env.sample: -------------------------------------------------------------------------------- 1 | COSMOSDB_ENDPOINT=https://.documents.azure.com:443/ 2 | COSMOSDB_DATABASE=autogen 3 | COSMOSDB_CONTAINER=memory 4 | 5 | AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ 6 | AZURE_OPENAI_DEPLOYMENT_NAME=chat4o 7 | AZURE_OPENAI_API_VERSION=2024-08-01-preview 8 | AZURE_OPENAI_KEY= 9 | 10 | BACKEND_API_URL='http://localhost:8000' 11 | FRONTEND_SITE_NAME='http://127.0.0.1:3000' 12 | 13 | FMP_API_KEY= 14 | SEC_API_KEY= 15 | DCF_API_KEY= 16 | 17 | AZURE_TENANT_ID= 18 | AZURE_CLIENT_ID= 19 | AZURE_CLIENT_SECRET= -------------------------------------------------------------------------------- /.github/workflows/pr-title-checker.yml: -------------------------------------------------------------------------------- 1 | name: "pr-title-checker" 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | merge_group: 10 | 11 | permissions: 12 | pull-requests: read 13 | 14 | jobs: 15 | main: 16 | name: Validate PR title 17 | runs-on: ubuntu-latest 18 | if: ${{ github.event_name != 'merge_group' }} 19 | steps: 20 | - uses: amannn/action-semantic-pull-request@v5 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/stale-bot.yml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '0 1 * * *' 5 | 6 | permissions: 7 | contents: write 8 | issues: write 9 | pull-requests: write 10 | 11 | jobs: 12 | stale: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/stale@v9 16 | with: 17 | stale-issue-message: 'This issue is stale because it has been open 180 days with no activity. Remove stale label or comment or this will be closed in 30 days.' 18 | days-before-stale: 180 19 | days-before-close: 30 20 | -------------------------------------------------------------------------------- /src/frontend/.dockerrun: -------------------------------------------------------------------------------- 1 | -- Docker build 2 | docker build --tag ghcr.io/akshata29/finagent-frontend:latest . 3 | az acr build --registry astdnapublicacr --image finagents-frontend . 4 | 5 | -- Docker Push 6 | echo | docker login ghcr.io -u akshata13 --password-stdin 7 | docker push ghcr.io/akshata29/finagent-frontend:latest 8 | 9 | -- Docker Run 10 | docker run --env-file .dockerenv -p 3000:3000 --name finagent-frontend -it ghcr.io/akshata29/finagent-frontend:latest 11 | 12 | -- Deploy 13 | az functionapp config container set --image ghcr.io/akshata29/entaoai-python:latest --name --resource-group -------------------------------------------------------------------------------- /src/backend/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | autogen-agentchat 4 | autogen-ext[azure] 5 | autogen-ext[openai] 6 | azure-cosmos 7 | azure-identity 8 | azure-storage-blob 9 | python-dotenv 10 | python-multipart 11 | opentelemetry-api 12 | opentelemetry-sdk 13 | opentelemetry-exporter-otlp-proto-grpc 14 | opentelemetry-instrumentation-fastapi 15 | opentelemetry-instrumentation-openai 16 | opentelemetry-exporter-otlp-proto-http 17 | opentelemetry-exporter-otlp-proto-grpc 18 | reportlab 19 | aiohttp 20 | numpy 21 | pandas 22 | yfinance 23 | tenacity 24 | langchain 25 | sec_api 26 | mplfinance 27 | azure-monitor-opentelemetry 28 | azure-monitor-events-extension 29 | ta -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/air-button.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /src/backend/helpers/otlp_tracing.py: -------------------------------------------------------------------------------- 1 | from opentelemetry import trace 2 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import \ 3 | OTLPSpanExporter 4 | from opentelemetry.sdk.resources import Resource 5 | from opentelemetry.sdk.trace import TracerProvider 6 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 7 | 8 | 9 | def configure_oltp_tracing(endpoint: str = None) -> trace.TracerProvider: 10 | # Configure Tracing 11 | tracer_provider = TracerProvider(resource=Resource({"service.name": "finagents"})) 12 | processor = BatchSpanProcessor(OTLPSpanExporter()) 13 | tracer_provider.add_span_processor(processor) 14 | trace.set_tracer_provider(tracer_provider) 15 | 16 | return tracer_provider 17 | -------------------------------------------------------------------------------- /src/backend/helpers/text.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated 2 | 3 | class TextUtils: 4 | 5 | def check_text_length( 6 | text: Annotated[str, "text to check"], 7 | min_length: Annotated[int, "minimum length of the text, default to 0"] = 0, 8 | max_length: Annotated[int, "maximum length of the text, default to 100000"] = 100000, 9 | ) -> str: 10 | """ 11 | Check if the length of the text is exceeds than the maximum length. 12 | """ 13 | length = len(text.split()) 14 | if length > max_length: 15 | return f"Text length {length} exceeds the maximum length of {max_length}." 16 | elif length < min_length: 17 | return f"Text length {length} is less than the minimum length of {min_length}." 18 | else: 19 | return f"Text length {length} is within the expected range." -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | # Motivation 11 | 12 | A clear and concise description of why this feature would be useful and the value it would bring. 13 | Explain any alternatives considered and why they are not sufficient. 14 | 15 | # How would you feel if this feature request was implemented? 16 | 17 | _Share a gif from [giphy](https://giphy.com/) to tells us how you'd feel. Format: ![alt_text](https://media.giphy.com/media/xxx/giphy.gif)_ 18 | 19 | # Requirements 20 | 21 | A list of requirements to consider this feature delivered 22 | - Requirement 1 23 | - Requirement 2 24 | - ... 25 | 26 | # Tasks 27 | 28 | _To be filled in by the engineer picking up the issue_ 29 | 30 | - [ ] Task 1 31 | - [ ] Task 2 32 | - [ ] ... -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This project welcomes contributions and suggestions. Most contributions require you to 4 | agree to a Contributor License Agreement (CLA) declaring that you have the right to, 5 | and actually do, grant us the rights to use your contribution. For details, visit 6 | https://cla.microsoft.com. 7 | 8 | When you submit a pull request, a CLA-bot will automatically determine whether you need 9 | to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the 10 | instructions provided by the bot. You will only need to do this once across all repositories using our CLA. 11 | 12 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 13 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 14 | or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/Send.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Dependabot configuration file 2 | # For more details, refer to: https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 3 | 4 | version: 2 5 | updates: 6 | # GitHub Actions dependencies 7 | - package-ecosystem: "github-actions" 8 | directory: "/" 9 | schedule: 10 | interval: "monthly" 11 | commit-message: 12 | prefix: "build" 13 | target-branch: "dependabotchanges" 14 | open-pull-requests-limit: 10 15 | 16 | - package-ecosystem: "pip" 17 | directory: "/src/backend" 18 | schedule: 19 | interval: "monthly" 20 | commit-message: 21 | prefix: "build" 22 | target-branch: "dependabotchanges" 23 | open-pull-requests-limit: 10 24 | 25 | - package-ecosystem: "pip" 26 | directory: "/src/frontend" 27 | schedule: 28 | interval: "monthly" 29 | commit-message: 30 | prefix: "build" 31 | target-branch: "dependabotchanges" 32 | open-pull-requests-limit: 10 -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | # Describe the bug 11 | A clear and concise description of what the bug is. 12 | 13 | # Expected behavior 14 | A clear and concise description of what you expected to happen. 15 | 16 | # How does this bug make you feel? 17 | _Share a gif from [giphy](https://giphy.com/) to tells us how you'd feel_ 18 | 19 | --- 20 | 21 | # Debugging information 22 | 23 | ## Steps to reproduce 24 | Steps to reproduce the behavior: 25 | 1. Go to '...' 26 | 2. Click on '....' 27 | 3. Scroll down to '....' 28 | 4. See error 29 | 30 | ## Screenshots 31 | If applicable, add screenshots to help explain your problem. 32 | 33 | ## Logs 34 | 35 | If applicable, add logs to help the engineer debug the problem. 36 | 37 | --- 38 | 39 | # Tasks 40 | 41 | _To be filled in by the engineer picking up the issue_ 42 | 43 | - [ ] Task 1 44 | - [ ] Task 2 45 | - [ ] ... -------------------------------------------------------------------------------- /src/backend/.dockerrun: -------------------------------------------------------------------------------- 1 | -- Docker build 2 | docker build --tag ghcr.io/akshata29/finagent-backend:latest . 3 | az acr build --registry astdnapublicacr --image finagents-backend . 4 | 5 | az containerapp revision deactivate --revision finagent-backend--latest --resource-group astdnapublic 6 | 7 | # az containerapp env create --name "astdnafinagent" --resource-group "astdnapublic" --location "eastus2" 8 | -- Docker Push 9 | echo | docker login ghcr.io -u akshata29 --password-stdin 10 | docker push ghcr.io/akshata29/finagent-backend:latest 11 | 12 | -- Docker Run 13 | docker run --env-file .dockerenv -p 8000:8000 --name finagent-backend -it ghcr.io/akshata29/finagent-backend:latest 14 | 15 | -- Deploy 16 | az containerapp create --name finagent-backend --resource-group astdnapublic --environment astdnafinagent --image astdnapublicacr.azurecr.io/finagents-backend --target-port 8000 --ingress external --registry-server astdnapublicacr.azurecr.io --user-assigned "astdnapubid" --registry-identity "/subscriptions/b14d5e08-eb70-45f2-ad78-ac5a9016fbf7/resourcegroups/astdnapublic/providers/Microsoft.ManagedIdentity/userAssignedIdentities/astdnapubid" --query properties.configuration.ingress.fqdn -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) Microsoft Corporation. 3 | 4 | MIT License 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE 23 | -------------------------------------------------------------------------------- /.github/workflows/agnext-biab-02-containerimage.yml: -------------------------------------------------------------------------------- 1 | name: Create and publish a Docker image 2 | on: 3 | push: 4 | branches: ['main', 'test', 'release'] 5 | paths: 6 | - 'agnext-biab-02/**' 7 | - '.github/workflows/agnext-biab-02-containerimage.yml' 8 | env: 9 | REGISTRY: ghcr.io 10 | IMAGE_NAME: ${{ github.repository }} 11 | jobs: 12 | build-and-push-image: 13 | runs-on: ubuntu-latest 14 | permissions: 15 | contents: read 16 | packages: write 17 | steps: 18 | - name: Checkout repository 19 | uses: actions/checkout@v4 20 | # - name: Download deps 21 | # run: | 22 | # curl -fsSL ${{ vars.AUTOGEN_WHL_URL }} -o agnext-biab-02/autogen_core-0.3.dev0-py3-none-any.whl 23 | - name: Log in to the Container registry 24 | uses: docker/login-action@v3 25 | with: 26 | registry: ${{ env.REGISTRY }} 27 | username: ${{ github.actor }} 28 | password: ${{ secrets.GITHUB_TOKEN }} 29 | - name: Extract metadata (tags, labels) for Docker 30 | id: meta 31 | uses: docker/metadata-action@v5 32 | with: 33 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 34 | tags: | 35 | type=ref,event=branch 36 | type=sha 37 | - name: Build and push Docker image 38 | uses: docker/build-push-action@v6 39 | with: 40 | context: agnext-biab-02/ 41 | file: agnext-biab-02/Dockerfile 42 | push: true 43 | tags: ${{ steps.meta.outputs.tags }} 44 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Purpose 2 | 3 | * ... 4 | 5 | ## Does this introduce a breaking change? 6 | 7 | 8 | - [ ] Yes 9 | - [ ] No 10 | 11 | 25 | 26 | ## How to Test 27 | * Get the code 28 | 29 | ``` 30 | git clone [repo-address] 31 | cd [repo-name] 32 | git checkout [branch-name] 33 | npm install 34 | ``` 35 | 36 | * Test the code 37 | 38 | ``` 39 | ``` 40 | 41 | ## What to Check 42 | Verify that the following are valid 43 | * ... 44 | 45 | ## Other Information 46 | -------------------------------------------------------------------------------- /src/backend/helpers/azureblob.py: -------------------------------------------------------------------------------- 1 | from helpers.dutils import decorate_all_methods 2 | from azure.identity import ClientSecretCredential 3 | from azure.storage.blob import BlobServiceClient 4 | from config import Config 5 | 6 | # from finrobot.utils import decorate_all_methods, get_next_weekday 7 | from functools import wraps 8 | 9 | def init_blob_api(func): 10 | @wraps(func) 11 | def wrapper(*args, **kwargs): 12 | global tenantId, clientId, clientSecret, blobAccountName, blobContainerName 13 | if Config.AZURE_TENANT_ID is None: 14 | print("Please set the environment variable AZURE_TENANT_ID to use the Blob API.") 15 | return None 16 | else: 17 | tenantId = Config.AZURE_TENANT_ID 18 | clientId = Config.AZURE_CLIENT_ID 19 | clientSecret = Config.AZURE_CLIENT_SECRET 20 | blobAccountName = Config.AZURE_BLOB_STORAGE_NAME 21 | blobContainerName = Config.AZURE_BLOB_CONTAINER_NAME 22 | print("Blob api key found successfully.") 23 | return func(*args, **kwargs) 24 | 25 | return wrapper 26 | 27 | 28 | @decorate_all_methods(init_blob_api) 29 | class azureBlobApi: 30 | 31 | def copyReport(downloadPath, blobName): 32 | try: 33 | with open(downloadPath, "rb") as file: 34 | readBytes = file.read() 35 | credentials = ClientSecretCredential(tenantId, clientId, clientSecret) 36 | blobService = BlobServiceClient( 37 | "https://{}.blob.core.windows.net".format(blobAccountName), credential=credentials) 38 | blobClient = blobService.get_blob_client(container=blobContainerName, blob=blobName) 39 | blobClient.upload_blob(readBytes,overwrite=True) 40 | return blobClient.url 41 | except Exception as e: 42 | print("Error in copyReport: ", e) 43 | return None -------------------------------------------------------------------------------- /src/backend/agents/generic.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from autogen_core import AgentId 4 | from autogen_core import default_subscription 5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 6 | from autogen_core.tools import FunctionTool, Tool 7 | 8 | from agents.base_agent import BaseAgent 9 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 10 | 11 | async def dummy_function() -> str: 12 | # This is a placeholder function, for a proper Azure AI Search RAG process. 13 | 14 | """This is a placeholder""" 15 | return "This is a placeholder function" 16 | 17 | 18 | # Create the ProductTools list 19 | def get_generic_tools() -> List[Tool]: 20 | GenericTools: List[Tool] = [ 21 | FunctionTool( 22 | dummy_function, 23 | description="This is a placeholder", 24 | name="dummy_function", 25 | ), 26 | ] 27 | return GenericTools 28 | 29 | 30 | @default_subscription 31 | class GenericAgent(BaseAgent): 32 | def __init__( 33 | self, 34 | model_client: AzureOpenAIChatCompletionClient, 35 | session_id: str, 36 | user_id: str, 37 | memory: CosmosBufferedChatCompletionContext, 38 | generic_tools: List[Tool], 39 | generic_tool_agent_id: AgentId, 40 | ) -> None: 41 | super().__init__( 42 | "GenericAgent", 43 | model_client, 44 | session_id, 45 | user_id, 46 | memory, 47 | generic_tools, 48 | generic_tool_agent_id, 49 | "You are a generic agent. You are used to handle generic tasks that a general Large Language Model can assist with. You are being called as a fallback, when no other agents are able to use their specialised functions in order to solve the user's task. Summarize back the user what was done. Do not use any function calling- just use your native LLM response.", 50 | ) 51 | -------------------------------------------------------------------------------- /src/backend/auth/auth_utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import logging 4 | 5 | 6 | def get_authenticated_user_details(request_headers): 7 | user_object = {} 8 | 9 | # check the headers for the Principal-Id (the guid of the signed in user) 10 | if "x-ms-client-principal-id" not in request_headers: 11 | logging.info("No user principal found in headers") 12 | # if it's not, assume we're in development mode and return a default user 13 | from . import sample_user 14 | 15 | raw_user_object = sample_user.sample_user 16 | else: 17 | # if it is, get the user details from the EasyAuth headers 18 | raw_user_object = {k: v for k, v in request_headers.items()} 19 | 20 | normalized_headers = {k.lower(): v for k, v in raw_user_object.items()} 21 | user_object["user_principal_id"] = normalized_headers.get("x-ms-client-principal-id") 22 | user_object["user_name"] = normalized_headers.get("x-ms-client-principal-name") 23 | user_object["auth_provider"] = normalized_headers.get("x-ms-client-principal-idp") 24 | user_object["auth_token"] = normalized_headers.get("x-ms-token-aad-id-token") 25 | user_object["client_principal_b64"] = normalized_headers.get("x-ms-client-principal") 26 | user_object["aad_id_token"] = normalized_headers.get("x-ms-token-aad-id-token") 27 | 28 | return user_object 29 | 30 | 31 | def get_tenantid(client_principal_b64): 32 | logger = logging.getLogger(__name__) 33 | tenant_id = "" 34 | if client_principal_b64: 35 | try: 36 | # Decode the base64 header to get the JSON string 37 | decoded_bytes = base64.b64decode(client_principal_b64) 38 | decoded_string = decoded_bytes.decode("utf-8") 39 | # Convert the JSON string1into a Python dictionary 40 | user_info = json.loads(decoded_string) 41 | # Extract the tenant ID 42 | tenant_id = user_info.get("tid") # 'tid' typically holds the tenant ID 43 | except Exception as ex: 44 | logger.exception(ex) 45 | return tenant_id 46 | -------------------------------------------------------------------------------- /src/frontend/frontend_server.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uvicorn 3 | 4 | from fastapi import FastAPI 5 | from fastapi.responses import FileResponse, HTMLResponse, RedirectResponse, PlainTextResponse 6 | from fastapi.staticfiles import StaticFiles 7 | 8 | # Resolve wwwroot path relative to this script 9 | WWWROOT_PATH = os.path.join(os.path.dirname(__file__), 'wwwroot') 10 | 11 | # Debugging information 12 | print(f"Current Working Directory: {os.getcwd()}") 13 | print(f"Absolute path to wwwroot: {WWWROOT_PATH}") 14 | if not os.path.exists(WWWROOT_PATH): 15 | raise FileNotFoundError(f"wwwroot directory not found at path: {WWWROOT_PATH}") 16 | print(f"Files in wwwroot: {os.listdir(WWWROOT_PATH)}") 17 | 18 | app = FastAPI() 19 | 20 | import html 21 | 22 | @app.get("/config.js", response_class=PlainTextResponse) 23 | def get_config(): 24 | backend_url = html.escape(os.getenv("BACKEND_API_URL", "http://localhost:8001")) 25 | return f'const BACKEND_API_URL = "{backend_url}";' 26 | 27 | 28 | # Redirect root to app.html 29 | @app.get("/") 30 | async def index_redirect(): 31 | return RedirectResponse(url="/app.html?v=home") 32 | 33 | 34 | # Mount static files 35 | app.mount("/", StaticFiles(directory=WWWROOT_PATH, html=True), name="static") 36 | 37 | 38 | # Debugging route 39 | @app.get("/debug") 40 | async def debug_route(): 41 | return { 42 | "message": "Frontend debug route working", 43 | "wwwroot_path": WWWROOT_PATH, 44 | "files": os.listdir(WWWROOT_PATH), 45 | } 46 | 47 | 48 | # Catch-all route for SPA 49 | @app.get("/{full_path:path}") 50 | async def catch_all(full_path: str): 51 | print(f"Requested path: {full_path}") 52 | app_html_path = os.path.join(WWWROOT_PATH, "app.html") 53 | 54 | if os.path.exists(app_html_path): 55 | return FileResponse(app_html_path) 56 | else: 57 | return HTMLResponse( 58 | content=f"app.html not found. Current path: {app_html_path}", 59 | status_code=404, 60 | ) 61 | 62 | if __name__ == "__main__": 63 | uvicorn.run(app, host="127.0.0.1", port=3000) 64 | -------------------------------------------------------------------------------- /.github/workflows/create-release.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | 6 | permissions: 7 | contents: write 8 | pull-requests: write 9 | 10 | name: create-release 11 | 12 | jobs: 13 | create-release: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | with: 19 | ref: ${{ github.event.workflow_run.head_sha }} 20 | 21 | - uses: codfish/semantic-release-action@v3 22 | id: semantic 23 | with: 24 | tag-format: 'v${version}' 25 | additional-packages: | 26 | ['conventional-changelog-conventionalcommits@7'] 27 | plugins: | 28 | [ 29 | [ 30 | "@semantic-release/commit-analyzer", 31 | { 32 | "preset": "conventionalcommits" 33 | } 34 | ], 35 | [ 36 | "@semantic-release/release-notes-generator", 37 | { 38 | "preset": "conventionalcommits", 39 | "presetConfig": { 40 | "types": [ 41 | { type: 'feat', section: 'Features', hidden: false }, 42 | { type: 'fix', section: 'Bug Fixes', hidden: false }, 43 | { type: 'perf', section: 'Performance Improvements', hidden: false }, 44 | { type: 'revert', section: 'Reverts', hidden: false }, 45 | { type: 'docs', section: 'Other Updates', hidden: false }, 46 | { type: 'style', section: 'Other Updates', hidden: false }, 47 | { type: 'chore', section: 'Other Updates', hidden: false }, 48 | { type: 'refactor', section: 'Other Updates', hidden: false }, 49 | { type: 'test', section: 'Other Updates', hidden: false }, 50 | { type: 'build', section: 'Other Updates', hidden: false }, 51 | { type: 'ci', section: 'Other Updates', hidden: false } 52 | ] 53 | } 54 | } 55 | ], 56 | '@semantic-release/github' 57 | ] 58 | env: 59 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 60 | - run: echo ${{ steps.semantic.outputs.release-version }} 61 | 62 | - run: echo "$OUTPUTS" 63 | env: 64 | OUTPUTS: ${{ toJson(steps.semantic.outputs) }} 65 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /documentation/azure_app_service_auth_setup.md: -------------------------------------------------------------------------------- 1 | # Set Up Authentication in Azure App Service 2 | 3 | ## Step 1: Add Authentication in Azure App Service configuration 4 | 5 | 1. Click on `Authentication` from left menu. 6 | 7 | ![Authentication](./images/azure-app-service-auth-setup/AppAuthentication.png) 8 | 9 | 2. Click on `+ Add Provider` to see a list of identity providers. 10 | 11 | ![Authentication Identity](./images/azure-app-service-auth-setup/AppAuthenticationIdentity.png) 12 | 13 | 3. Click on `+ Add Provider` to see a list of identity providers. 14 | 15 | ![Add Provider](./images/azure-app-service-auth-setup/AppAuthIdentityProvider.png) 16 | 17 | 4. Select the first option `Microsoft Entra Id` from the drop-down list. If `Create new app registration` is disabled, go to [Step 1a](#step-1a-creating-a-new-app-registration). 18 | 19 | ![Add Provider](./images/azure-app-service-auth-setup/AppAuthIdentityProviderAdd.png) 20 | 21 | 5. Accept the default values and click on `Add` button to go back to the previous page with the identify provider added. 22 | 23 | ![Add Provider](./images/azure-app-service-auth-setup/AppAuthIdentityProviderAdded.png) 24 | 25 | ### Step 1a: Creating a new App Registration 26 | 27 | 1. Click on `Home` and select `Microsoft Entra ID`. 28 | 29 | ![Microsoft Entra ID](./images/azure-app-service-auth-setup/MicrosoftEntraID.png) 30 | 31 | 2. Click on `App registrations`. 32 | 33 | ![App registrations](./images/azure-app-service-auth-setup/Appregistrations.png) 34 | 35 | 3. Click on `+ New registration`. 36 | 37 | ![New Registrations](./images/azure-app-service-auth-setup/NewRegistration.png) 38 | 39 | 4. Provide the `Name`, select supported account types as `Accounts in this organizational directory only(Contoso only - Single tenant)`, select platform as `Web`, enter/select the `URL` and register. 40 | 41 | ![Add Details](./images/azure-app-service-auth-setup/AddDetails.png) 42 | 43 | 5. After application is created sucessfully, then click on `Add a Redirect URL`. 44 | 45 | ![Redirect URL](./images/azure-app-service-auth-setup/AddRedirectURL.png) 46 | 47 | 6. Click on `+ Add a platform`. 48 | 49 | ![+ Add platform](./images/azure-app-service-auth-setup/AddPlatform.png) 50 | 51 | 7. Click on `Web`. 52 | 53 | ![Web](./images/azure-app-service-auth-setup/Web.png) 54 | 55 | 8. Enter the `web app URL` (Provide the app service name in place of XXXX) and Save. Then go back to [Step 1](#step-1-add-authentication-in-azure-app-service-configuration) and follow from _Point 4_ choose `Pick an existing app registration in this directory` from the Add an Identity Provider page and provide the newly registered App Name. 56 | E.g. https://appservicename.azurewebsites.net/.auth/login/aad/callback 57 | 58 | ![Add Details](./images/azure-app-service-auth-setup/WebAppURL.png) 59 | -------------------------------------------------------------------------------- /src/backend/middleware/health_check.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Awaitable, Callable, Dict 3 | 4 | from fastapi import Request 5 | from fastapi.encoders import jsonable_encoder 6 | from fastapi.responses import JSONResponse, PlainTextResponse 7 | from starlette.middleware.base import BaseHTTPMiddleware 8 | 9 | 10 | class HealthCheckResult: 11 | def __init__(self, status: bool, message: str): 12 | self.status = status 13 | self.message = message 14 | 15 | 16 | class HealthCheckSummary: 17 | def __init__(self): 18 | self.status = True 19 | self.results = {} 20 | 21 | def Add(self, name: str, result: HealthCheckResult): 22 | self.results[name] = result 23 | self.status = self.status and result.status 24 | 25 | def AddDefault(self): 26 | self.Add( 27 | "Default", 28 | HealthCheckResult( 29 | True, "This is the default check, it always returns True" 30 | ), 31 | ) 32 | 33 | def AddException(self, name: str, exception: Exception): 34 | self.Add(name, HealthCheckResult(False, str(exception))) 35 | 36 | 37 | class HealthCheckMiddleware(BaseHTTPMiddleware): 38 | __healthz_path = "/healthz" 39 | 40 | def __init__( 41 | self, 42 | app, 43 | checks: Dict[str, Callable[..., Awaitable[HealthCheckResult]]], 44 | password: str = None, 45 | ): 46 | super().__init__(app) 47 | self.checks = checks 48 | self.password = password 49 | 50 | async def check(self) -> HealthCheckSummary: 51 | results = HealthCheckSummary() 52 | results.AddDefault() 53 | 54 | for name, check in self.checks.items(): 55 | if not name or not check: 56 | logging.warning(f"Check '{name}' is not valid") 57 | continue 58 | try: 59 | if not callable(check) or not hasattr(check, "__await__"): 60 | logging.error(f"Check {name} is not a coroutine function") 61 | raise ValueError(f"Check {name} is not a coroutine function") 62 | results.Add(name, await check()) 63 | except Exception as e: 64 | logging.error(f"Check {name} failed: {e}") 65 | results.AddException(name, e) 66 | 67 | return results 68 | 69 | async def dispatch(self, request: Request, call_next): 70 | if request.url.path == self.__healthz_path: 71 | status = await self.check() 72 | 73 | status_code = 200 if status.status else 503 74 | status_message = "OK" if status.status else "Service Unavailable" 75 | 76 | if ( 77 | self.password is not None 78 | and request.query_params.get("code") == self.password 79 | ): 80 | return JSONResponse(jsonable_encoder(status), status_code=status_code) 81 | 82 | return PlainTextResponse(status_message, status_code=status_code) 83 | 84 | response = await call_next(request) 85 | return response 86 | -------------------------------------------------------------------------------- /src/backend/auth/sample_user.py: -------------------------------------------------------------------------------- 1 | sample_user = { 2 | "Accept": "*/*", 3 | "Accept-Encoding": "gzip, deflate, br", 4 | "Accept-Language": "en", 5 | "Client-Ip": "22.222.222.2222:64379", 6 | "Content-Length": "192", 7 | "Content-Type": "application/json", 8 | "Cookie": "AppServiceAuthSession=/AuR5ENU+pmpoN3jnymP8fzpmVBgphx9uPQrYLEWGcxjIITIeh8NZW7r3ePkG8yBcMaItlh1pX4nzg5TFD9o2mxC/5BNDRe/uuu0iDlLEdKecROZcVRY7QsFdHLjn9KB90Z3d9ZeLwfVIf0sZowWJt03BO5zKGB7vZgL+ofv3QY3AaYn1k1GtxSE9HQWJpWar7mOA64b7Lsy62eY3nxwg3AWDsP3/rAta+MnDCzpdlZMFXcJLj+rsCppW+w9OqGhKQ7uCs03BPeon3qZOdmE8cOJW3+i96iYlhneNQDItHyQqEi1CHbBTSkqwpeOwWP4vcwGM22ynxPp7YFyiRw/X361DGYy+YkgYBkXq1AEIDZ44BCBz9EEaEi0NU+m6yUOpNjEaUtrJKhQywcM2odojdT4XAY+HfTEfSqp0WiAkgAuE/ueCu2JDOfvxGjCgJ4DGWCoYdOdXAN1c+MenT4OSvkMO41YuPeah9qk9ixkJI5s80lv8rUu1J26QF6pstdDkYkAJAEra3RQiiO1eAH7UEb3xHXn0HW5lX8ZDX3LWiAFGOt5DIKxBKFymBKJGzbPFPYjfczegu0FD8/NQPLl2exAX3mI9oy/tFnATSyLO2E8DxwP5wnYVminZOQMjB/I4g3Go14betm0MlNXlUbU1fyS6Q6JxoCNLDZywCoU9Y65UzimWZbseKsXlOwYukCEpuQ5QPT55LuEAWhtYier8LSh+fvVUsrkqKS+bg0hzuoX53X6aqUr7YB31t0Z2zt5TT/V3qXpdyD8Xyd884PqysSkJYa553sYx93ETDKSsfDguanVfn2si9nvDpvUWf6/R02FmQgXiaaaykMgYyIuEmE77ptsivjH3hj/MN4VlePFWokcchF4ciqqzonmICmjEHEx5zpjU2Kwa+0y7J5ROzVVygcnO1jH6ZKDy9bGGYL547bXx/iiYBYqSIQzleOAkCeULrGN2KEHwckX5MpuRaqTpoxdZH9RJv0mIWxbDA0kwGsbMICQd0ZODBkPUnE84qhzvXInC+TL7MbutPEnGbzgxBAS1c2Ct4vxkkjykOeOxTPxqAhxoefwUfIwZZax6A9LbeYX2bsBpay0lScHcA==", 9 | "Disguised-Host": "your_app_service.azurewebsites.net", 10 | "Host": "your_app_service.azurewebsites.net", 11 | "Max-Forwards": "10", 12 | "Origin": "https://your_app_service.azurewebsites.net", 13 | "Referer": "https://your_app_service.azurewebsites.net/", 14 | "Sec-Ch-Ua": '"Microsoft Edge";v="113", "Chromium";v="113", "Not-A.Brand";v="24"', 15 | "Sec-Ch-Ua-Mobile": "?0", 16 | "Sec-Ch-Ua-Platform": '"Windows"', 17 | "Sec-Fetch-Dest": "empty", 18 | "Sec-Fetch-Mode": "cors", 19 | "Sec-Fetch-Site": "same-origin", 20 | "Traceparent": "00-24e9a8d1b06f233a3f1714845ef971a9-3fac69f81ca5175c-00", 21 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42", 22 | "Was-Default-Hostname": "your_app_service.azurewebsites.net", 23 | "X-Appservice-Proto": "https", 24 | "X-Arr-Log-Id": "4102b832-6c88-4c7c-8996-0edad9e4358f", 25 | "X-Arr-Ssl": "2048|256|CN=Microsoft Azure TLS Issuing CA 02, O=Microsoft Corporation, C=US|CN=*.azurewebsites.net, O=Microsoft Corporation, L=Redmond, S=WA, C=US", 26 | "X-Client-Ip": "22.222.222.222", 27 | "X-Client-Port": "64379", 28 | "X-Forwarded-For": "22.222.222.22:64379", 29 | "X-Forwarded-Proto": "https", 30 | "X-Forwarded-Tlsversion": "1.2", 31 | "X-Ms-Client-Principal": "your_base_64_encoded_token", 32 | "X-Ms-Client-Principal-Id": "00000000-0000-0000-0000-000000000000", 33 | "X-Ms-Client-Principal-Idp": "aad", 34 | "X-Ms-Client-Principal-Name": "testusername@constoso.com", 35 | "X-Ms-Token-Aad-Id-Token": "your_aad_id_token", 36 | "X-Original-Url": "/chatgpt", 37 | "X-Site-Deployment-Id": "your_app_service", 38 | "X-Waws-Unencoded-Url": "/chatgpt", 39 | } 40 | -------------------------------------------------------------------------------- /src/backend/agents/human.py: -------------------------------------------------------------------------------- 1 | # human_agent.py 2 | import logging 3 | 4 | from autogen_core import AgentId, MessageContext 5 | from autogen_core import (RoutedAgent, default_subscription, 6 | message_handler) 7 | 8 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 9 | from models.messages import ( 10 | ApprovalRequest, 11 | HumanFeedback, 12 | HumanClarification, 13 | HumanFeedbackStatus, 14 | StepStatus, 15 | AgentMessage, 16 | Step, 17 | ) 18 | from event_utils import track_event_if_configured 19 | 20 | 21 | @default_subscription 22 | class HumanAgent(RoutedAgent): 23 | def __init__( 24 | self, 25 | memory: CosmosBufferedChatCompletionContext, 26 | user_id:str, 27 | group_chat_manager_id: AgentId, 28 | ) -> None: 29 | super().__init__("HumanAgent") 30 | self._memory = memory 31 | self.user_id = user_id 32 | self.group_chat_manager_id = group_chat_manager_id 33 | 34 | @message_handler 35 | async def handle_step_feedback( 36 | self, message: HumanFeedback, ctx: MessageContext 37 | ) -> None: 38 | """ 39 | Handles the human feedback for a single step from the GroupChatManager. 40 | Updates the step status and stores the feedback in the session context. 41 | """ 42 | # Retrieve the step from the context 43 | step: Step = await self._memory.get_step(message.step_id, message.session_id) 44 | if not step: 45 | logging.info(f"No step found with id: {message.step_id}") 46 | return 47 | 48 | # Update the step status and feedback 49 | step.status = StepStatus.completed 50 | step.human_feedback = message.human_feedback 51 | await self._memory.update_step(step) 52 | await self._memory.add_item( 53 | AgentMessage( 54 | session_id=message.session_id, 55 | user_id=self.user_id, 56 | plan_id=step.plan_id, 57 | content=f"Received feedback for step: {step.action}", 58 | source="HumanAgent", 59 | step_id=message.step_id, 60 | ) 61 | ) 62 | logging.info(f"HumanAgent received feedback for step: {step}") 63 | 64 | track_event_if_configured( 65 | f"Human Agent - Received feedback for step: {step} and added into the cosmos", 66 | { 67 | "session_id": message.session_id, 68 | "user_id": self.user_id, 69 | "plan_id": step.plan_id, 70 | "content": f"Received feedback for step: {step.action}", 71 | "source": "HumanAgent", 72 | "step_id": message.step_id, 73 | }, 74 | ) 75 | 76 | # Notify the GroupChatManager that the step has been completed 77 | await self._memory.add_item( 78 | ApprovalRequest( 79 | session_id=message.session_id, 80 | user_id=self.user_id, 81 | plan_id=step.plan_id, 82 | step_id=message.step_id, 83 | agent_id=self.group_chat_manager_id, 84 | ) 85 | ) 86 | logging.info(f"HumanAgent sent approval request for step: {step}") 87 | -------------------------------------------------------------------------------- /src/backend/handlers/runtime_interrupt.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | from autogen_core import AgentId 4 | from autogen_core import DefaultInterventionHandler 5 | 6 | from models.messages import GetHumanInputMessage, GroupChatMessage 7 | 8 | 9 | class NeedsUserInputHandler(DefaultInterventionHandler): 10 | def __init__(self): 11 | self.question_for_human: Optional[GetHumanInputMessage] = None 12 | self.messages: List[Dict[str, Any]] = [] 13 | 14 | async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any: 15 | sender_type = sender.type if sender else "unknown_type" 16 | sender_key = sender.key if sender else "unknown_key" 17 | print( 18 | f"NeedsUserInputHandler received message: {message} from sender: {sender}" 19 | ) 20 | if isinstance(message, GetHumanInputMessage): 21 | self.question_for_human = message 22 | self.messages.append( 23 | { 24 | "agent": {"type": sender_type, "key": sender_key}, 25 | "content": message.content, 26 | } 27 | ) 28 | print("Captured question for human in NeedsUserInputHandler") 29 | elif isinstance(message, GroupChatMessage): 30 | self.messages.append( 31 | { 32 | "agent": {"type": sender_type, "key": sender_key}, 33 | "content": message.body.content, 34 | } 35 | ) 36 | print(f"Captured group chat message in NeedsUserInputHandler - {message}") 37 | return message 38 | 39 | @property 40 | def needs_human_input(self) -> bool: 41 | return self.question_for_human is not None 42 | 43 | @property 44 | def question_content(self) -> Optional[str]: 45 | if self.question_for_human: 46 | return self.question_for_human.content 47 | return None 48 | 49 | def get_messages(self) -> List[Dict[str, Any]]: 50 | messages = self.messages.copy() 51 | self.messages.clear() 52 | print("Returning and clearing captured messages in NeedsUserInputHandler") 53 | return messages 54 | 55 | 56 | class AssistantResponseHandler(DefaultInterventionHandler): 57 | def __init__(self): 58 | self.assistant_response: Optional[str] = None 59 | 60 | async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any: 61 | # Check if the message is from the assistant agent 62 | print( 63 | f"on_publish called in AssistantResponseHandler with message from sender: {sender} - {message}" 64 | ) 65 | if hasattr(message, "body") and sender and sender.type in ["writer", "editor"]: 66 | self.assistant_response = message.body.content 67 | print("Assistant response set in AssistantResponseHandler") 68 | return message 69 | 70 | @property 71 | def has_response(self) -> bool: 72 | has_response = self.assistant_response is not None 73 | print(f"has_response called, returning: {has_response}") 74 | return has_response 75 | 76 | def get_response(self) -> Optional[str]: 77 | response = self.assistant_response 78 | print(f"get_response called, returning: {response}") 79 | return response 80 | -------------------------------------------------------------------------------- /src/backend/helpers/coding.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing_extensions import Annotated 3 | from IPython import get_ipython 4 | 5 | default_path = "coding/" 6 | 7 | class IPythonUtils: 8 | 9 | def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: 10 | """ 11 | run cell in ipython and return the execution result. 12 | """ 13 | ipython = get_ipython() 14 | result = ipython.run_cell(cell) 15 | log = str(result.result) 16 | if result.error_before_exec is not None: 17 | log += f"\n{result.error_before_exec}" 18 | if result.error_in_exec is not None: 19 | log += f"\n{result.error_in_exec}" 20 | return log 21 | 22 | def display_image( 23 | image_path: Annotated[str, "Path to image file to display."] 24 | ) -> str: 25 | """ 26 | Display image in Jupyter Notebook. 27 | """ 28 | log = __class__.exec_python( 29 | f"from IPython.display import Image, display\n\ndisplay(Image(filename='{image_path}'))" 30 | ) 31 | if not log: 32 | return "Image displayed successfully" 33 | else: 34 | return log 35 | 36 | 37 | class CodingUtils: # Borrowed from https://microsoft.github.io/autogen/docs/notebooks/agentchat_function_call_code_writing 38 | 39 | def list_dir(directory: Annotated[str, "Directory to check."]) -> str: 40 | """ 41 | List files in choosen directory. 42 | """ 43 | files = os.listdir(default_path + directory) 44 | return str(files) 45 | 46 | def see_file(filename: Annotated[str, "Name and path of file to check."]) -> str: 47 | """ 48 | Check the contents of a chosen file. 49 | """ 50 | with open(default_path + filename, "r") as file: 51 | lines = file.readlines() 52 | formatted_lines = [f"{i+1}:{line}" for i, line in enumerate(lines)] 53 | file_contents = "".join(formatted_lines) 54 | 55 | return file_contents 56 | 57 | def modify_code( 58 | filename: Annotated[str, "Name and path of file to change."], 59 | start_line: Annotated[int, "Start line number to replace with new code."], 60 | end_line: Annotated[int, "End line number to replace with new code."], 61 | new_code: Annotated[ 62 | str, 63 | "New piece of code to replace old code with. Remember about providing indents.", 64 | ], 65 | ) -> str: 66 | """ 67 | Replace old piece of code with new one. Proper indentation is important. 68 | """ 69 | with open(default_path + filename, "r+") as file: 70 | file_contents = file.readlines() 71 | file_contents[start_line - 1 : end_line] = [new_code + "\n"] 72 | file.seek(0) 73 | file.truncate() 74 | file.write("".join(file_contents)) 75 | return "Code modified" 76 | 77 | def create_file_with_code( 78 | filename: Annotated[str, "Name and path of file to create."], 79 | code: Annotated[str, "Code to write in the file."], 80 | ) -> str: 81 | """ 82 | Create a new file with provided code. 83 | """ 84 | directory = os.path.dirname(default_path + filename) 85 | os.makedirs(directory, exist_ok=True) 86 | with open(default_path + filename, "w") as file: 87 | file.write(code) 88 | return "File created successfully" -------------------------------------------------------------------------------- /documentation/LocalDeployment.md: -------------------------------------------------------------------------------- 1 | # Guide to local development 2 | 3 | ## Requirements: 4 | 5 | - Python 3.10 or higher + PIP 6 | - Azure CLI, and an Azure Subscription 7 | - Visual Studio Code IDE 8 | 9 | ## Local deployment and debugging: 10 | 11 | 1. **Clone the repository.** 12 | 13 | 2. **Log into the Azure CLI:** 14 | 15 | - Check your login status using: 16 | ```bash 17 | az account show 18 | ``` 19 | - If not logged in, use: 20 | ```bash 21 | az login 22 | ``` 23 | - To specify a tenant, use: 24 | ```bash 25 | az login --tenant 16b3c013-0000-0000-0000-000000000 26 | ``` 27 | 28 | 3. **Create a Resource Group:** 29 | 30 | - You can create it either through the Azure Portal or the Azure CLI: 31 | ```bash 32 | az group create --name --location EastUS2 33 | ``` 34 | 35 | 4. **Deploy the Bicep template:** 36 | 37 | - You can use the Bicep extension for VSCode (Right-click the `.bicep` file, then select "Show deployment plane") or use the Azure CLI: 38 | ```bash 39 | az deployment group create -g -f deploy/macae-dev.bicep --query 'properties.outputs' 40 | ``` 41 | - **Note**: You will be prompted for a `principalId`, which is the ObjectID of your user in Entra ID. To find it, use the Azure Portal or run: 42 | ```bash 43 | az ad signed-in-user show --query id -o tsv 44 | ``` 45 | You will also be prompted for locations for Cosmos and Open AI services. This is to allow separate regions where there may be service quota restrictions 46 | 47 | 5. **Create a `.env` file:** 48 | 49 | - Navigate to the `src` folder and create a `.env` file based on the provided `.env.sample` file. 50 | 51 | 6. **Fill in the `.env` file:** 52 | 53 | - Use the output from the deployment or check the Azure Portal under "Deployments" in the resource group. 54 | 55 | 7. **(Optional) Set up a virtual environment:** 56 | 57 | - If you are using `venv`, create and activate your virtual environment for both the frontend and backend folders. 58 | 59 | 8. **Install requirements - frontend:** 60 | 61 | - In each of the frontend and backend folders - 62 | Open a terminal in the `src` folder and run: 63 | ```bash 64 | pip install -r requirements.txt 65 | ``` 66 | 67 | 9. **Run the application:** 68 | - From the src/backend directory: 69 | ```bash 70 | python app.py 71 | ``` 72 | - In a new terminal from the src/frontend directory 73 | ```bash 74 | python frontend_server.py 75 | ``` 76 | 77 | 10. Open a browser and navigate to `http://localhost:3000` 78 | 11. To see swagger API documentation, you can navigate to `http://localhost:8000/docs` 79 | 80 | ## Debugging the solution locally 81 | 82 | You can debug the API backend running locally with VSCode using the following launch.json entry: 83 | 84 | ``` 85 | { 86 | "name": "Python Debugger: Backend", 87 | "type": "debugpy", 88 | "request": "launch", 89 | "cwd": "${workspaceFolder}/src/backend", 90 | "module": "uvicorn", 91 | "args": ["app:app", "--reload"], 92 | "jinja": true 93 | } 94 | ``` 95 | To debug the python server in the frontend directory (frontend_server.py) and related, add the following launch.json entry: 96 | 97 | ``` 98 | { 99 | "name": "Python Debugger: Frontend", 100 | "type": "debugpy", 101 | "request": "launch", 102 | "cwd": "${workspaceFolder}/src/frontend", 103 | "module": "uvicorn", 104 | "args": ["frontend_server:app", "--port", "3000", "--reload"], 105 | "jinja": true 106 | } 107 | ``` 108 | 109 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/utils.js: -------------------------------------------------------------------------------- 1 | 2 | // Utility to generate a SHA-256 hash of a string 3 | window.GenerateHash = async (data) => { 4 | const encoder = new TextEncoder(); 5 | const dataBuffer = encoder.encode(JSON.stringify(data)); // Convert the object to a string 6 | const hashBuffer = await crypto.subtle.digest('SHA-256', dataBuffer); 7 | const hashArray = Array.from(new Uint8Array(hashBuffer)); // Convert buffer to byte array 8 | const hashHex = hashArray.map(byte => byte.toString(16).padStart(2, '0')).join(''); 9 | return hashHex; // Return the hash as a hex string 10 | }; 11 | 12 | // Function to fetch authentication details from EasyAuth 13 | window.GetAuthDetails = async () => { 14 | // Check if we are running on the server (production environment) 15 | if (window.location.hostname !== 'localhost' && window.location.hostname !== '127.0.0.1') { 16 | // This code runs on the server 17 | try { 18 | const authResponse = await fetch('/.auth/me'); 19 | 20 | // Check if the request is successful 21 | if (!authResponse.ok) { 22 | console.log("Failed to fetch authentication details. Access to chat will be blocked."); 23 | return null; 24 | } 25 | 26 | // Parse the response to get user details 27 | const authData = await authResponse.json(); 28 | 29 | // Extract the user details (Azure returns an array, so we pick the first element) 30 | const userDetails = authData[0] || {}; 31 | 32 | // Construct headers using the global config object 33 | const headers = { 34 | 'Content-Type': 'application/json', 35 | 'X-Ms-Client-Principal': userDetails?.client_principal || '', 36 | 'X-Ms-Client-Principal-Id': userDetails?.user_claims?.find(claim => claim.typ === 'http://schemas.microsoft.com/identity/claims/objectidentifier')?.val || '', 37 | 'X-Ms-Client-Principal-Name': userDetails?.user_claims?.find(claim => claim.typ === 'name')?.val || '', 38 | 'X-Ms-Client-Principal-Idp': userDetails?.identity_provider || '', 39 | }; 40 | 41 | return headers; 42 | } catch (error) { 43 | console.error("Error fetching authentication details:", error); 44 | return null; 45 | } 46 | } else { 47 | // This code runs locally so setup mock headers 48 | console.log("Running locally. Skipping authentication details fetch."); 49 | 50 | const mockUserDetails = { 51 | client_principal: 'mock-client-principal-id', 52 | user_claims: [ 53 | { typ: 'http://schemas.microsoft.com/identity/claims/objectidentifier', val: '12345678-abcd-efgh-ijkl-9876543210ab' }, // Mock Object ID 54 | { typ: 'name', val: 'Local User' }, // Mock Name 55 | { typ: 'email', val: 'localuser@example.com' }, // Mock Email (optional claim) 56 | ], 57 | identity_provider: 'mock-identity-provider', // Mock Identity Provider 58 | }; 59 | 60 | const headers = { 61 | 'Content-Type': 'application/json', 62 | 'X-Ms-Client-Principal': mockUserDetails.client_principal || '', 63 | 'X-Ms-Client-Principal-Id': mockUserDetails.user_claims?.find(claim => claim.typ === 'http://schemas.microsoft.com/identity/claims/objectidentifier')?.val || '', 64 | 'X-Ms-Client-Principal-Name': mockUserDetails.user_claims?.find(claim => claim.typ === 'name')?.val || '', 65 | 'X-Ms-Client-Principal-Idp': mockUserDetails.identity_provider || '', 66 | }; 67 | 68 | return headers; 69 | } 70 | }; 71 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/app.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Financial Agent 7 | 8 | 9 | 10 | 11 | 12 | 13 |
14 |
16 | 17 | 26 | 31 | 32 |
33 |
34 | 35 | 36 | 37 | 38 | 39 |
40 |
41 | 42 | 55 | 56 | 69 | 70 | 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/images/stars.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/microsoft-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/backend/agents/fundamental_analysis.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any 2 | import pandas as pd 3 | import yfinance as yf 4 | 5 | from autogen_core import AgentId 6 | from autogen_core import default_subscription 7 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 8 | from autogen_core.tools import FunctionTool, Tool 9 | 10 | from agents.base_agent import BaseAgent 11 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 12 | from helpers.fmputils import * 13 | from helpers.yfutils import * 14 | from helpers.analyzer import * 15 | from datetime import date, timedelta, datetime 16 | 17 | from typing import List, Dict, Any 18 | import os 19 | import requests 20 | from autogen_core.tools import FunctionTool, Tool 21 | 22 | async def fetch_and_analyze_fundamentals(ticker_symbol: str) -> Dict[str, Any]: 23 | """ 24 | Fetch up to 5 years of fundamental data (Income Statement, Balance Sheet, Cash Flow) 25 | from Financial Modeling Prep, then compute ratios (ROE, ROA, placeholders for 26 | Altman Z-score, Piotroski F-score, etc.) for the given ticker. 27 | 28 | Returns a JSON-serializable dict with: 29 | - 5-year income statements 30 | - 5-year balance sheets 31 | - 5-year cash flows 32 | - A 'ratios_scores' section (ROE, ROA, AltmanZ, PiotroskiF) 33 | - Any notes or error messages 34 | """ 35 | 36 | result = { 37 | "ticker_symbol": ticker_symbol, 38 | "financial_metrics": [], 39 | "ratings": {}, 40 | "financial_scores": [] 41 | } 42 | 43 | try: 44 | financialMetrics = fmpUtils.get_financial_metrics(ticker_symbol) 45 | ratings = fmpUtils.get_ratings(ticker_symbol) 46 | finacialScores = fmpUtils.get_financial_scores(ticker_symbol) 47 | 48 | result["financial_metrics"] = financialMetrics 49 | result["ratings"] = ratings 50 | result["financial_scores"] = finacialScores 51 | 52 | except Exception as e: 53 | result["notes"].append(f"Exception during fetch: {e}") 54 | 55 | return result 56 | 57 | 58 | def get_fundamental_analysis_tools() -> List[Tool]: 59 | """ 60 | Return a list of Tools for the Fundamental Analysis Agent 61 | that fetch data from Financial Modeling Prep (FMP). 62 | """ 63 | return [ 64 | FunctionTool( 65 | fetch_and_analyze_fundamentals, 66 | description=( 67 | "Fetch fundamental data (Income, Balance, Cash Flow)" 68 | "and compute ratios (ROE, ROA, Altman Z, Piotroski, etc.) for a given ticker." 69 | ), 70 | ) 71 | ] 72 | 73 | @default_subscription 74 | class FundamentalAnalysisAgent(BaseAgent): 75 | """ 76 | A dedicated agent to perform fundamental analysis over the last ~5 years 77 | by pulling data from Financial Modeling Prep (FMP). 78 | Computes key ratios or scores (ROE, ROA, Altman Z, Piotroski, etc.). 79 | """ 80 | def __init__( 81 | self, 82 | model_client: AzureOpenAIChatCompletionClient, 83 | session_id: str, 84 | user_id: str, 85 | memory: CosmosBufferedChatCompletionContext, 86 | fundamental_analysis_tools: List[Tool], 87 | fundamental_analysis_tool_agent_id: AgentId, 88 | ): 89 | super().__init__( 90 | "FundamentalAnalysisAgent", 91 | model_client, 92 | session_id, 93 | user_id, 94 | memory, 95 | fundamental_analysis_tools, 96 | fundamental_analysis_tool_agent_id, 97 | system_message=dedent( 98 | """ 99 | You are a Fundamental Analysis Agent. 100 | Your role is to retrieve and analyze up to 5 years of fundamental data 101 | (cash flow, income statements, balance sheets) for a given ticker 102 | using the Financial Modeling Prep API. 103 | You also compute basic ratios like ROE, ROA, and placeholders for 104 | Altman Z-score and Piotroski F-score. 105 | Return the data and computations in structured JSON. 106 | """ 107 | ) 108 | ) -------------------------------------------------------------------------------- /deploy/macae-dev.bicep: -------------------------------------------------------------------------------- 1 | @description('Location for all resources.') 2 | param location string = resourceGroup().location 3 | 4 | @description('location for Cosmos DB resources.') 5 | // prompt for this as there is often quota restrictions 6 | param cosmosLocation string 7 | 8 | @description('Location for OpenAI resources.') 9 | // prompt for this as there is often quota restrictions 10 | param azureOpenAILocation string 11 | 12 | @description('A prefix to add to the start of all resource names. Note: A "unique" suffix will also be added') 13 | param prefix string = 'macae' 14 | 15 | @description('Tags to apply to all deployed resources') 16 | param tags object = {} 17 | 18 | @description('Principal ID to assign to the Cosmos DB contributor & Azure OpenAI user role, leave empty to skip role assignment. This is your ObjectID wihtin Entra ID.') 19 | param developerPrincipalId string 20 | 21 | var uniqueNameFormat = '${prefix}-{0}-${uniqueString(resourceGroup().id, prefix)}' 22 | var aoaiApiVersion = '2024-08-01-preview' 23 | 24 | resource openai 'Microsoft.CognitiveServices/accounts@2023-10-01-preview' = { 25 | name: format(uniqueNameFormat, 'openai') 26 | location: azureOpenAILocation 27 | tags: tags 28 | kind: 'OpenAI' 29 | sku: { 30 | name: 'S0' 31 | } 32 | properties: { 33 | customSubDomainName: format(uniqueNameFormat, 'openai') 34 | } 35 | resource gpt4o 'deployments' = { 36 | name: 'gpt-4o' 37 | sku: { 38 | name: 'GlobalStandard' 39 | capacity: 15 40 | } 41 | properties: { 42 | model: { 43 | format: 'OpenAI' 44 | name: 'gpt-4o' 45 | version: '2024-08-06' 46 | } 47 | versionUpgradeOption: 'NoAutoUpgrade' 48 | } 49 | } 50 | } 51 | 52 | resource aoaiUserRoleDefinition 'Microsoft.Authorization/roleDefinitions@2022-05-01-preview' existing = { 53 | name: '5e0bd9bd-7b93-4f28-af87-19fc36ad61bd' //'Cognitive Services OpenAI User' 54 | } 55 | 56 | resource devAoaiRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if(!empty(trim(developerPrincipalId))) { 57 | name: guid(developerPrincipalId, openai.id, aoaiUserRoleDefinition.id) 58 | scope: openai 59 | properties: { 60 | principalId: developerPrincipalId 61 | roleDefinitionId: aoaiUserRoleDefinition.id 62 | principalType: 'User' 63 | } 64 | } 65 | 66 | resource cosmos 'Microsoft.DocumentDB/databaseAccounts@2024-05-15' = { 67 | name: format(uniqueNameFormat, 'cosmos') 68 | location: cosmosLocation 69 | tags: tags 70 | kind: 'GlobalDocumentDB' 71 | properties: { 72 | databaseAccountOfferType: 'Standard' 73 | enableFreeTier: false 74 | locations: [ 75 | { 76 | failoverPriority: 0 77 | locationName: cosmosLocation 78 | } 79 | ] 80 | } 81 | 82 | resource contributorRoleDefinition 'sqlRoleDefinitions' existing = { 83 | name: '00000000-0000-0000-0000-000000000002' 84 | } 85 | 86 | resource devRoleAssignment 'sqlRoleAssignments' = if(!empty(trim(developerPrincipalId))) { 87 | name: guid(developerPrincipalId, contributorRoleDefinition.id) 88 | properties: { 89 | principalId: developerPrincipalId 90 | roleDefinitionId: contributorRoleDefinition.id 91 | scope: cosmos.id 92 | } 93 | } 94 | 95 | resource autogenDb 'sqlDatabases' = { 96 | name: 'autogen' 97 | properties: { 98 | resource: { 99 | id: 'autogen' 100 | createMode: 'Default' 101 | } 102 | options: { 103 | throughput: 400 104 | } 105 | } 106 | 107 | resource memoryContainer 'containers' = { 108 | name: 'memory' 109 | properties: { 110 | resource: { 111 | id: 'memory' 112 | partitionKey: { 113 | kind: 'Hash' 114 | version: 2 115 | paths: [ 116 | '/session_id' 117 | ] 118 | } 119 | } 120 | } 121 | } 122 | } 123 | } 124 | 125 | 126 | 127 | output COSMOSDB_ENDPOINT string = cosmos.properties.documentEndpoint 128 | output COSMOSDB_DATABASE string = cosmos::autogenDb.name 129 | output COSMOSDB_CONTAINER string = cosmos::autogenDb::memoryContainer.name 130 | output AZURE_OPENAI_ENDPOINT string = openai.properties.endpoint 131 | output AZURE_OPENAI_DEPLOYMENT_NAME string = openai::gpt4o.name 132 | output AZURE_OPENAI_API_VERSION string = aoaiApiVersion 133 | 134 | -------------------------------------------------------------------------------- /src/backend/helpers/summarizeutils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import json 4 | import pandas as pd 5 | from datetime import date, timedelta, datetime 6 | from typing import Annotated 7 | 8 | SavePathType = Annotated[str, "File path to save data. If None, data is not saved."] 9 | 10 | def summarize(description: str) -> str: 11 | try: 12 | print("*"*35) 13 | print("Calling summarize") 14 | print("*"*35) 15 | AZURE_OPENAI_DEPLOYMENT_NAME = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME") 16 | AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION") 17 | AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") 18 | 19 | url = f"{AZURE_OPENAI_ENDPOINT}/openai/deployments/{AZURE_OPENAI_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}" 20 | headers = { 21 | 'api-key': os.getenv("AZURE_OPENAI_KEY"), 22 | "Content-Type": "application/json", 23 | } 24 | 25 | # Payload for the request 26 | payload = { 27 | "messages": [ 28 | { 29 | "role": "system", 30 | "content": [ 31 | { 32 | "type": "text", 33 | "text": "You are an AI assistant that will summarize the user input. You will not answer questions or respond to statements that are focused about" 34 | } 35 | ] 36 | }, 37 | { 38 | "role": "user", 39 | "content": description 40 | } 41 | ], 42 | "temperature": 0.7, 43 | "top_p": 0.95, 44 | "max_tokens": 1200 45 | } 46 | # Send request 47 | response_json = requests.post(url, headers=headers, json=payload) 48 | return json.loads(response_json.text)['choices'][0]['message']['content'] 49 | except Exception as e: 50 | return "I am sorry, I am unable to summarize the input at this time." 51 | 52 | def summarizeTopic(description: str, topic:str) -> str: 53 | try: 54 | print("*"*35) 55 | print("Calling summarizeTopic") 56 | print("*"*35) 57 | AZURE_OPENAI_DEPLOYMENT_NAME = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME") 58 | AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION") 59 | AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") 60 | 61 | url = f"{AZURE_OPENAI_ENDPOINT}/openai/deployments/{AZURE_OPENAI_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}" 62 | headers = { 63 | 'api-key': os.getenv("AZURE_OPENAI_KEY"), 64 | "Content-Type": "application/json", 65 | } 66 | 67 | # Payload for the request 68 | payload = { 69 | "messages": [ 70 | { 71 | "role": "system", 72 | "content": [ 73 | { 74 | "type": "text", 75 | "text": f"You are an AI assistant that will summarize the user input on a {topic}. You will not answer questions or respond to statements that are focused about" 76 | } 77 | ] 78 | }, 79 | { 80 | "role": "user", 81 | "content": description 82 | } 83 | ], 84 | "temperature": 0.7, 85 | "top_p": 0.95, 86 | "max_tokens": 1200 87 | } 88 | # Send request 89 | response_json = requests.post(url, headers=headers, json=payload) 90 | print("response_json", response_json.text) 91 | return json.loads(response_json.text)['choices'][0]['message']['content'] 92 | except Exception as e: 93 | return "I am sorry, I am unable to summarize the topic at this time." 94 | 95 | def get_next_weekday(date): 96 | 97 | if not isinstance(date, datetime): 98 | date = datetime.strptime(date, "%Y-%m-%d") 99 | 100 | if date.weekday() >= 5: 101 | days_to_add = 7 - date.weekday() 102 | next_weekday = date + timedelta(days=days_to_add) 103 | return next_weekday 104 | else: 105 | return date 106 | 107 | async def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None: 108 | if save_path: 109 | await data.to_csv(save_path) 110 | print(f"{tag} saved to {save_path}") 111 | 112 | 113 | def get_current_date(): 114 | return date.today().strftime("%Y-%m-%d") -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL Advanced" 13 | 14 | on: 15 | push: 16 | branches: [ "main", "dev", "demo" ] 17 | pull_request: 18 | branches: [ "main", "dev", "demo" ] 19 | schedule: 20 | - cron: '44 20 * * 2' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze (${{ matrix.language }}) 25 | # Runner size impacts CodeQL analysis time. To learn more, please see: 26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 27 | # - https://gh.io/supported-runners-and-hardware-resources 28 | # - https://gh.io/using-larger-runners (GitHub.com only) 29 | # Consider using larger runners or machines with greater resources for possible analysis time improvements. 30 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 31 | permissions: 32 | # required for all workflows 33 | security-events: write 34 | 35 | # required to fetch internal or private CodeQL packs 36 | packages: read 37 | 38 | # only required for workflows in private repositories 39 | actions: read 40 | contents: read 41 | 42 | strategy: 43 | fail-fast: false 44 | matrix: 45 | include: 46 | - language: javascript-typescript 47 | build-mode: none 48 | - language: python 49 | build-mode: none 50 | # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' 51 | # Use `c-cpp` to analyze code written in C, C++ or both 52 | # Use 'java-kotlin' to analyze code written in Java, Kotlin or both 53 | # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both 54 | # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, 55 | # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. 56 | # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how 57 | # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages 58 | steps: 59 | - name: Checkout repository 60 | uses: actions/checkout@v4 61 | 62 | # Initializes the CodeQL tools for scanning. 63 | - name: Initialize CodeQL 64 | uses: github/codeql-action/init@v3 65 | with: 66 | languages: ${{ matrix.language }} 67 | build-mode: ${{ matrix.build-mode }} 68 | # If you wish to specify custom queries, you can do so here or in a config file. 69 | # By default, queries listed here will override any specified in a config file. 70 | # Prefix the list here with "+" to use these queries and those in the config file. 71 | 72 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 73 | # queries: security-extended,security-and-quality 74 | 75 | # If the analyze step fails for one of the languages you are analyzing with 76 | # "We were unable to automatically build your code", modify the matrix above 77 | # to set the build mode to "manual" for that language. Then modify this step 78 | # to build your code. 79 | # ℹ️ Command-line programs to run using the OS shell. 80 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 81 | - if: matrix.build-mode == 'manual' 82 | shell: bash 83 | run: | 84 | echo 'If you are using a "manual" build mode for one or more of the' \ 85 | 'languages you are analyzing, replace this with the commands to build' \ 86 | 'your code, for example:' 87 | echo ' make bootstrap' 88 | echo ' make release' 89 | exit 1 90 | 91 | - name: Perform CodeQL Analysis 92 | uses: github/codeql-action/analyze@v3 93 | with: 94 | category: "/language:${{matrix.language}}" 95 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/theme.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --bulma-link-h: 200deg; 3 | --bulma-info-s: 0%; 4 | --bulma-link-l: 0%; 5 | 6 | --bulma-info-h: 237deg; 7 | --bulma-info-s: 80%; 8 | --bulma-info-l: 60%; 9 | 10 | --bulma-focus-h: 237deg; 11 | --bulma-focus-s: 80%; 12 | --bulma-focus-l: 60%; 13 | 14 | --bulma-scheme-h: 200; 15 | --bulma-scheme-s: 0%; 16 | 17 | --bulma-soft-l: 80%; 18 | --bulma-bold-l: 0%; 19 | 20 | --bulma-family-primary: ui-sans-serif, -apple-system, system-ui, Segoe UI, Helvetica, Apple Color Emoji, Arial, sans-serif, Segoe UI Emoji, Segoe UI Symbol; 21 | --bulma-family-secondary: ui-sans-serif, -apple-system, system-ui, Segoe UI, Helvetica, Apple Color Emoji, Arial, sans-serif, Segoe UI Emoji, Segoe UI Symbol; 22 | 23 | --bulma-body-background-color: hsl(0, 100%, 100%); 24 | --bulma-body-color: rgb(74, 74, 74); 25 | --bulma-strong-color: hsl(0, 0%, 0%); 26 | --bulma-control-radius: 1000px; 27 | --bulma-info-invert-l: 98%; 28 | 29 | --bulma-success-h: 134deg; 30 | --bulma-success-s: 61%; 31 | --bulma-success-l: 41%; 32 | --bulma-success-invert-l: 98%; 33 | 34 | --bulma-danger-h: 354deg; 35 | --bulma-danger-s: 70%; 36 | --bulma-danger-l: 54%; 37 | } 38 | 39 | .card { 40 | --bulma-card-color: rgb(74, 74, 74); 41 | --bulma-card-shadow: rgba(51, 51, 51, 0.05) 0px 1px 2px 0px, 42 | rgba(51, 51, 51, 0.05) 0px 2px 4px 0px, 43 | rgb(214, 217, 224) 0px 0px 0px 1px inset; 44 | --bulma-card-header-shadow: 0; 45 | } 46 | 47 | .card.is-hoverable { 48 | transition: all 0.2s ease-in-out; 49 | } 50 | 51 | .card.is-hoverable:hover { 52 | cursor: pointer; 53 | transform: translateY(-2px); 54 | --bulma-card-shadow: 0 0 2px rgba(0, 0, 0, 0.12), 55 | 0 4px 8px rgba(0, 0, 0, 0.14), 56 | rgb(70, 79, 235) 0px 0px 0px 1px inset; 57 | } 58 | 59 | .media { 60 | --bulma-media-border-size: 0; 61 | } 62 | 63 | .modal-card-head { 64 | box-shadow: 0px 1px 1px 0px rgb(214, 217, 224); 65 | } 66 | 67 | .modal { 68 | --bulma-modal-background-background-color: hsla(0, 0%, 0%, 0.5); 69 | --bulma-modal-card-head-background-color: hsl(0, 0%, 100%); 70 | --bulma-modal-card-title-color: hsl(0, 0%, 0%); 71 | --bulma-modal-card-foot-background-color: hsl(0, 0%, 100%); 72 | --bulma-modal-card-body-background-color: hsl(0, 0%, 100%); 73 | } 74 | 75 | .box { 76 | --bulma-box-color: hsl(var(--bulma-menu-item-h), var(--bulma-menu-item-s), var(--bulma-menu-item-color-l)); 77 | --bulma-box-shadow: rgb(214, 217, 224) 0px 0px 0px 1px; 78 | } 79 | 80 | .panel { 81 | --bulma-panel-shadow: rgb(214, 217, 224) 0px 0px 0px 1px; 82 | --bulma-panel-block-hover-background-color: hsl(0, 0%, 100%); 83 | } 84 | 85 | .tabs { 86 | --bulma-tabs-link-active-border-bottom-color: hsl(0, 0%, 0%); 87 | --bulma-tabs-link-active-color: hsl(0, 0%, 0%); 88 | } 89 | 90 | .tag { 91 | border-radius: var(--bulma-control-radius); 92 | font-weight: 500; 93 | } 94 | 95 | .menu-label { 96 | --bulma-menu-label-color: hsl(0, 0%, 0%); 97 | --bulma-menu-label-font-size: .85rem; 98 | text-transform: none; 99 | font-weight: 500; 100 | letter-spacing: 0; 101 | } 102 | 103 | .menu-list i { 104 | color: hsl(0, 0%, 0%); 105 | } 106 | 107 | .title { 108 | font-weight: 700; 109 | color: hsl(0, 0%, 0%); 110 | } 111 | 112 | .button { 113 | --bulma-button-ghost-color: hsl(0, 0%, 0%); 114 | } 115 | 116 | .modal { 117 | --bulma-modal-card-title-size: 1.15rem; 118 | --bulma-modal-card-head-padding: 1rem 2rem; 119 | } 120 | 121 | .modal-card-title { 122 | font-weight: 600; 123 | } 124 | 125 | .modal-card-head { 126 | display: flex; 127 | align-items: center; 128 | } 129 | 130 | .modal-card-head .button { 131 | --bulma-button-padding-horizontal: 0.68rem; 132 | } 133 | 134 | .modal-card-head .button i { 135 | margin-top: 1px; 136 | } 137 | 138 | .dropdown-content { 139 | padding: var(--bulma-dropdown-content-padding-bottom); 140 | box-shadow: var(--bulma-dropdown-content-shadow), rgb(214, 217, 224) 0px 0px 0px 1px inset; 141 | } 142 | 143 | .dropdown-content a.dropdown-item { 144 | border-radius: 0.25rem; 145 | } 146 | 147 | .input:focus, 148 | .input:focus-within, 149 | .is-focused.input, 150 | .is-focused.textarea, 151 | .select select.is-focused, 152 | .select select:focus, 153 | .select select:focus-within, 154 | .textarea:focus, 155 | .textarea:focus-within { 156 | border-color: hsl(var(--bulma-input-focus-h), var(--bulma-input-focus-s), var(--bulma-input-focus-l)); 157 | box-shadow: var(--bulma-input-focus-shadow-size) hsla(var(--bulma-input-focus-h), var(--bulma-input-focus-s), var(--bulma-input-focus-l), var(--bulma-input-focus-shadow-alpha)); 158 | } 159 | 160 | .progress::-webkit-progress-value { 161 | transition: width 0.5s ease; 162 | } -------------------------------------------------------------------------------- /src/backend/Playground.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from typing import List\n", 10 | "\n", 11 | "from autogen_core import AgentId\n", 12 | "from autogen_core import default_subscription\n", 13 | "from autogen_ext.models.openai import AzureOpenAIChatCompletionClient\n", 14 | "from autogen_core.tools import FunctionTool, Tool\n", 15 | "from typing_extensions import Annotated\n", 16 | "\n", 17 | "from agents.base_agent import BaseAgent\n", 18 | "from context.cosmos_memory import CosmosBufferedChatCompletionContext\n", 19 | "from helpers.fmputils import *\n", 20 | "from helpers.yfutils import *\n", 21 | "from datetime import date, timedelta, datetime\n", 22 | "from helpers.summarizeutils import summarize, summarizeTopic\n", 23 | "from helpers.analyzer import *\n", 24 | "from helpers.reports import ReportLabUtils\n", 25 | "from helpers.charting import ReportChartUtils\n", 26 | "from azure.identity import ClientSecretCredential, DefaultAzureCredential\n", 27 | "from azure.storage.blob import BlobServiceClient, ContentSettings, generate_blob_sas\n", 28 | "from config import Config\n", 29 | "import uuid\n", 30 | "from helpers.azureblob import azureBlobApi" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 5, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "async def copyReport(tenantId, clientId, clientSecret, blobAccountName, downloadPath, blobName, openAiBlobContainer):\n", 40 | " try:\n", 41 | " with open(downloadPath, \"rb\") as file:\n", 42 | " readBytes = file.read()\n", 43 | " credentials = ClientSecretCredential(tenantId, clientId, clientSecret)\n", 44 | " blobService = BlobServiceClient(\n", 45 | " \"https://{}.blob.core.windows.net\".format(blobAccountName), credential=credentials)\n", 46 | " blobClient = blobService.get_blob_client(container=openAiBlobContainer, blob=blobName)\n", 47 | " blobClient.upload_blob(readBytes,overwrite=True)\n", 48 | " return blobClient.url\n", 49 | " except Exception as e:\n", 50 | " print(\"Error in copyReport: \", e)\n", 51 | " return None" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 7, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "Local File Name: reports\\MSFT_Equity_Research_report.pdf\n", 64 | "Blob File Name: b8990f6a-29e3-40c9-8ccf-98b1613ee2e4_MSFTEquity_Research_report.pdf\n", 65 | "Blob api key found successfully.\n", 66 | "Blob URL: https://astdnapubstor.blob.core.windows.net/reports/b8990f6a-29e3-40c9-8ccf-98b1613ee2e4_MSFTEquity_Research_report.pdf\n" 67 | ] 68 | } 69 | ], 70 | "source": [ 71 | "ticker_symbol = \"MSFT\"\n", 72 | "\n", 73 | "blobFileName = \"{}_{}Equity_Research_report.pdf\".format(str(uuid.uuid4()), ticker_symbol)\n", 74 | "localFileName = \"reports\\\\{}_Equity_Research_report.pdf\".format(ticker_symbol)\n", 75 | "print(\"Local File Name: \", localFileName)\n", 76 | "print(\"Blob File Name: \", blobFileName)\n", 77 | "\n", 78 | "blobUrl = azureBlobApi.copyReport(\"reports\\\\{}_Equity_Research_report.pdf\".format(ticker_symbol), blobFileName)\n", 79 | "\n", 80 | "print(\"Blob URL: \", blobUrl)\n" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 2, 86 | "metadata": {}, 87 | "outputs": [ 88 | { 89 | "name": "stdout", 90 | "output_type": "stream", 91 | "text": [ 92 | "****************\n", 93 | "reportDir: /app/backend/reports/\n", 94 | "****************\n" 95 | ] 96 | } 97 | ], 98 | "source": [ 99 | "if Config.APP_IN_CONTAINER:\n", 100 | " reportDir = \"/app/backend/reports/\"\n", 101 | "else:\n", 102 | " reportDir = \"reports\\\\\"\n", 103 | "\n", 104 | "print(\"****************\")\n", 105 | "print(\"reportDir: \", reportDir)\n", 106 | "print(\"****************\")" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [] 115 | } 116 | ], 117 | "metadata": { 118 | "kernelspec": { 119 | "display_name": "Python 3", 120 | "language": "python", 121 | "name": "python3" 122 | }, 123 | "language_info": { 124 | "codemirror_mode": { 125 | "name": "ipython", 126 | "version": 3 127 | }, 128 | "file_extension": ".py", 129 | "mimetype": "text/x-python", 130 | "name": "python", 131 | "nbconvert_exporter": "python", 132 | "pygments_lexer": "ipython3", 133 | "version": "3.11.9" 134 | } 135 | }, 136 | "nbformat": 4, 137 | "nbformat_minor": 2 138 | } 139 | -------------------------------------------------------------------------------- /src/backend/agents/agentutils.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from autogen_core.models import (AssistantMessage) 4 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 5 | from pydantic import BaseModel 6 | 7 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 8 | from models.messages import Step 9 | 10 | common_agent_system_message = "If you do not have the information for the arguments of the function you need to call, do not call the function. Instead, respond back to the user requesting further information. You must not hallucinate or invent any of the information used as arguments in the function. For example, if you need to call a function that requires a delivery address, you must not generate 123 Example St. You must skip calling functions and return a clarification message along the lines of: Sorry, I'm missing some information I need to help you with that. Could you please provide the delivery address so I can do that for you?" 11 | 12 | 13 | async def extract_and_update_transition_states( 14 | step: Step, 15 | session_id: str, 16 | user_id: str, 17 | planner_dynamic_or_workflow: str, 18 | model_client: AzureOpenAIChatCompletionClient, 19 | ): 20 | """ 21 | This function extracts the identified target state and transition from the LLM response and updates the step with the identified target state and transition. This is reliant on the agent_reply already being present. 22 | """ 23 | planner_dynamic_or_workflow = "workflow" 24 | if planner_dynamic_or_workflow == "workflow": 25 | 26 | class FSMStateAndTransition(BaseModel): 27 | identifiedTargetState: str 28 | identifiedTargetTransition: str 29 | 30 | cosmos = CosmosBufferedChatCompletionContext(session_id or "", user_id) 31 | combined_LLM_messages = [ 32 | AssistantMessage(content=step.action, source="GroupChatManager") 33 | ] 34 | combined_LLM_messages.extend( 35 | [AssistantMessage(content=step.agent_reply, source="AgentResponse")] 36 | ) 37 | combined_LLM_messages.extend( 38 | [ 39 | AssistantMessage( 40 | content="Based on the above conversation between two agents, I need you to identify the identifiedTargetState and identifiedTargetTransition values. Only return these values. Do not make any function calls. If you are unable to work out the next transition state, return ERROR.", 41 | source="GroupChatManager", 42 | ) 43 | ] 44 | ) 45 | 46 | # TODO - from local testing, this step is often causing the app to hang. It's unclear why- often the first time it fails when running a workflow that requires human input. If the app is manually restarted, it works the second time. However this is not consistent- sometimes it will work fine the first time. It may be the LLM generating some invalid characters which is causing errors on the JSON formatting. However, even when attempting a timeout and retry, the timeout with asnycio would never trigger. It's unclear what the issue is here. 47 | # Get the LLM response 48 | llm_temp_result = await model_client.create( 49 | combined_LLM_messages, 50 | extra_create_args={"response_format": FSMStateAndTransition}, 51 | ) 52 | content = llm_temp_result.content 53 | 54 | # Parse the LLM response 55 | parsed_result = json.loads(content) 56 | structured_plan = FSMStateAndTransition(**parsed_result) 57 | 58 | # update the steps 59 | step.identified_target_state = structured_plan.identifiedTargetState 60 | step.identified_target_transition = structured_plan.identifiedTargetTransition 61 | 62 | await cosmos.update_step(step) 63 | return step 64 | 65 | 66 | # async def set_next_viable_step_to_runnable(session_id): 67 | # cosmos = CosmosBufferedChatCompletionContext(session_id) 68 | # plan_with_steps = await cosmos.get_plan_with_steps(session_id) 69 | # if plan_with_steps.overall_status != PlanStatus.completed: 70 | # for step_object in plan_with_steps.steps: 71 | # if step_object.status not in [StepStatus.rejected, StepStatus.completed]: 72 | # step_object.runnable = True 73 | # await cosmos.update_step(step_object) 74 | # break 75 | 76 | 77 | # async def initiate_replanning(session_id): 78 | # from utils import handle_input_task_wrapper 79 | 80 | # cosmos = CosmosBufferedChatCompletionContext(session_id) 81 | # plan_with_steps = await cosmos.get_plan_with_steps(session_id) 82 | # input_task = InputTask( 83 | # session_id=plan_with_steps.session_id, 84 | # description=plan_with_steps.initial_goal, 85 | # planner_type=plan_with_steps.planner_type, 86 | # new_plan_or_replanning="replanning", 87 | # human_comments_on_overall_plan=plan_with_steps.human_comments_on_overall_plan, 88 | # planner_dynamic_or_workflow=plan_with_steps.planner_dynamic_or_workflow, 89 | # workflowName=plan_with_steps.workflowName, 90 | # ) 91 | # await handle_input_task_wrapper(input_task) 92 | -------------------------------------------------------------------------------- /TRANSPARENCY_FAQS.md: -------------------------------------------------------------------------------- 1 | # Multi-Agent: Custom Automation Engine – Solution Accelerator : Responsible AI FAQ 2 | 3 | ## What is the Multi Agent: Custom Automation Engine – Solution Accelerator? 4 | Multi Agent: Custom Automation Engine – Solution Accelerator is an open-source GitHub Repository that enables users to solve complex tasks using multiple agents. The accelerator is designed to be generic across business tasks. The user enters a task and a planning LLM formulates a plan to complete that task. The system then dynamically generates agents which can complete the task. The system also allows the user to create actions that agents can take (for example sending emails or scheduling orientation sessions for new employees). These actions are taken into account by the planner and dynamically created agents may be empowered to take these actions. 5 | 6 | ## What can the Multi Agent: Custom Automation Engine – Solution Accelerator do? 7 | The solution accelerator is designed to replace and enhance enterprise workflows and processes with intelligent automation. Agents can specialize in various functions and work together to achieve an objective as specified by the user. The accelerator will integrate seamlessly with existing systems and is designed to scale according to the needs of the customer. The system allows users to review, reorder and approve steps generated in a plan, ensuring human oversight. The system uses function calling with LLMs to perform actions, users can approve or modify these actions. 8 | 9 | ## What is/are Multi Agent: Custom Automation Engine – Solution Accelerator’s intended use(s)? 10 | This repository is to be used only as a solution accelerator following the open-source license terms listed in the GitHub repository. The example scenario’s intended purpose is to demonstrate how users can analyze and process audio files and call transcripts to help them work more efficiently and streamline their human made decisions. 11 | 12 | ## How was Multi Agent: Custom Automation Engine – Solution Accelerator evaluated? What metrics are used to measure performance? 13 | The evaluation process includes human review of the outputs, and tuned LLM prompts to produce relevant responses. It's worth noting that the system is designed to be highly customizable and can be tailored to fit specific business needs and use cases. As such, the metrics used to evaluate the system's performance may vary depending on the specific use case and business requirements. 14 | 15 | ## What are the limitations of Multi Agent: Custom Automation Engine – Solution Accelerator? How can users minimize the impact Multi Agent: Custom Automation Engine – Solution Accelerator’s limitations when using the system? 16 | The system allows users to review, reorder and approve steps generated in a plan, ensuring human oversight. The system uses function calling with LLMs to perform actions, users can approve or modify these actions. Users of the accelerator should review the system prompts provided and update as per their organizational guidance. Users should run their own evaluation flow either using the guidance provided in the GitHub repository or their choice of evaluation methods. 17 | Note that the Multi Agent: Custom Automation Engine – Solution Accelerator relies on the AutoGen Multi Agent framework. AutoGen has published their own [list of limitations and impacts](https://github.com/microsoft/autogen/blob/gaia_multiagent_v01_march_1st/TRANSPARENCY_FAQS.md#what-are-the-limitations-of-autogen-how-can-users-minimize-the-impact-of-autogens-limitations-when-using-the-system). 18 | 19 | ## What operational factors and settings allow for effective and responsible use of Multi Agent: Custom Automation Engine – Solution Accelerator? 20 | Effective and responsible use of the Multi Agent: Custom Automation Engine – Solution Accelerator depends on several operational factors and settings. The system is designed to perform reliably and safely across a range of business tasks that it was evaluated for. Users can customize certain settings, such as the planning language model used by the system, the types of tasks that agents are assigned, and the specific actions that agents can take (e.g., sending emails or scheduling orientation sessions for new employees). However, it's important to note that these choices may impact the system's behavior in real-world scenarios. 21 | For example, selecting a planning language model that is not well-suited to the complexity of the tasks may result in lower accuracy and performance. Similarly, assigning tasks that are outside the system's intended scope may lead to errors or incomplete results. Users can choose the LLM that is optimized for responsible use. The default LLM is GPT-4o which inherits the existing RAI mechanisms and filters from the LLM provider. Caching is enabled by default to increase reliability and control cost. We encourage developers to review [OpenAI’s Usage policies](https://openai.com/policies/usage-policies/) and [Azure OpenAI’s Code of Conduct](https://learn.microsoft.com/en-us/legal/cognitive-services/openai/code-of-conduct) when using GPT-40. To ensure effective and responsible use of the accelerator, users should carefully consider their choices and use the system within its intended scope. -------------------------------------------------------------------------------- /src/frontend/wwwroot/task/employee.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Task 6 | 7 | 11 | 12 | 13 | 14 |
15 | 16 | 17 |
18 |
19 |
20 |
21 | 22 | 31 | 32 |
33 |
34 |
37 | 38 |
39 |
40 |

41 |
42 |
43 |
44 | 53 | 56 |
57 |
58 |
59 | 60 | 61 | 62 |
63 |
64 |

65 | Status: 66 | In Progress 67 |

68 |
69 |
70 |
71 |
75 | 76 |
77 |
78 |
82 | 83 |
84 |
85 | 91 | 92 | 93 | 94 |
95 |
96 | 101 |
102 | 0/1000 105 |
106 |
107 | 108 | stars 109 | 110 | 116 |
117 |
118 | 119 |
120 |
121 |
122 |
123 | 127 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | -------------------------------------------------------------------------------- /src/backend/config.py: -------------------------------------------------------------------------------- 1 | # config.py 2 | import logging 3 | import os 4 | 5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 6 | from azure.cosmos.aio import CosmosClient 7 | from azure.identity.aio import (ClientSecretCredential, DefaultAzureCredential, 8 | get_bearer_token_provider) 9 | from dotenv import load_dotenv 10 | 11 | load_dotenv(".env", override=True) 12 | 13 | 14 | def GetRequiredConfig(name): 15 | #return os.environ[name] 16 | #print(f"GetRequiredConfig: {os.getenv(name)}") 17 | return os.getenv(name) 18 | 19 | 20 | def GetOptionalConfig(name, default=""): 21 | # if name in os.environ: 22 | # return os.environ[name] 23 | #print(f"GetOptionalConfig: {os.getenv(name)}") 24 | if os.getenv(name): 25 | return os.getenv(name) 26 | return default 27 | 28 | 29 | def GetBoolConfig(name): 30 | #return name in os.environ and os.environ[name].lower() in ["true", "1"] 31 | #return os.getenv(name) and os.getenv(name).lower() in ["true", "1"] 32 | if os.getenv(name): 33 | if os.getenv(name).lower() in ["true", "1"]: 34 | return True 35 | return False 36 | 37 | 38 | class Config: 39 | AZURE_TENANT_ID = GetOptionalConfig("AZURE_TENANT_ID") 40 | AZURE_CLIENT_ID = GetOptionalConfig("AZURE_CLIENT_ID") 41 | AZURE_CLIENT_SECRET = GetOptionalConfig("AZURE_CLIENT_SECRET") 42 | 43 | COSMOSDB_ENDPOINT = GetRequiredConfig("COSMOSDB_ENDPOINT") 44 | COSMOSDB_DATABASE = GetRequiredConfig("COSMOSDB_DATABASE") 45 | COSMOSDB_CONTAINER = GetRequiredConfig("COSMOSDB_CONTAINER") 46 | 47 | AZURE_OPENAI_DEPLOYMENT_NAME = GetRequiredConfig("AZURE_OPENAI_DEPLOYMENT_NAME") 48 | AZURE_OPENAI_API_VERSION = GetRequiredConfig("AZURE_OPENAI_API_VERSION") 49 | AZURE_OPENAI_ENDPOINT = GetRequiredConfig("AZURE_OPENAI_ENDPOINT") 50 | AZURE_OPENAI_API_KEY = GetOptionalConfig("AZURE_OPENAI_KEY") 51 | 52 | AZURE_BLOB_STORAGE_NAME = GetRequiredConfig("AZURE_BLOB_STORAGE_NAME") 53 | AZURE_BLOB_CONTAINER_NAME = GetRequiredConfig("AZURE_BLOB_CONTAINER_NAME") 54 | 55 | APP_IN_CONTAINER = GetBoolConfig("APP_IN_CONTAINER") 56 | FRONTEND_SITE_NAME = GetOptionalConfig("FRONTEND_SITE_NAME", "http://127.0.0.1:3000") 57 | 58 | 59 | __azure_credentials = DefaultAzureCredential() 60 | __comos_client = None 61 | __cosmos_database = None 62 | __aoai_chatCompletionClient = None 63 | 64 | def GetAzureCredentials(): 65 | # If we have specified the credentials in the environment, use them (backwards compatibility) 66 | if all( 67 | [Config.AZURE_TENANT_ID, Config.AZURE_CLIENT_ID, Config.AZURE_CLIENT_SECRET] 68 | ): 69 | return ClientSecretCredential( 70 | tenant_id=Config.AZURE_TENANT_ID, 71 | client_id=Config.AZURE_CLIENT_ID, 72 | client_secret=Config.AZURE_CLIENT_SECRET, 73 | ) 74 | 75 | # Otherwise, use the default Azure credential which includes managed identity 76 | return Config.__azure_credentials 77 | 78 | # Gives us a cached approach to DB access 79 | def GetCosmosDatabaseClient(): 80 | # TODO: Today this is a single DB, we might want to support multiple DBs in the future 81 | if Config.__comos_client is None: 82 | Config.__comos_client = CosmosClient( 83 | Config.COSMOSDB_ENDPOINT, Config.GetAzureCredentials() 84 | ) 85 | 86 | if Config.__cosmos_database is None: 87 | Config.__cosmos_database = Config.__comos_client.get_database_client( 88 | Config.COSMOSDB_DATABASE 89 | ) 90 | 91 | return Config.__cosmos_database 92 | 93 | def GetTokenProvider(scopes): 94 | return get_bearer_token_provider(Config.GetAzureCredentials(), scopes) 95 | 96 | def GetAzureOpenAIChatCompletionClient(model_capabilities): 97 | if Config.__aoai_chatCompletionClient is not None: 98 | return Config.__aoai_chatCompletionClient 99 | 100 | #print(f"Config.AZURE_OPENAI_API_KEY: {Config.AZURE_OPENAI_API_KEY}") 101 | if Config.AZURE_OPENAI_API_KEY == "": 102 | # Use DefaultAzureCredential for auth 103 | Config.__aoai_chatCompletionClient = AzureOpenAIChatCompletionClient( 104 | azure_deployment=Config.AZURE_OPENAI_DEPLOYMENT_NAME, 105 | api_version=Config.AZURE_OPENAI_API_VERSION, 106 | azure_endpoint=Config.AZURE_OPENAI_ENDPOINT, 107 | azure_ad_token_provider=Config.GetTokenProvider( 108 | "https://cognitiveservices.azure.com/.default" 109 | ), 110 | model="gpt-4o", 111 | #model="o1-mini", 112 | model_capabilities=model_capabilities, 113 | temperature=0, 114 | ) 115 | else: 116 | # Fallback behavior to use API key 117 | Config.__aoai_chatCompletionClient = AzureOpenAIChatCompletionClient( 118 | azure_deployment=Config.AZURE_OPENAI_DEPLOYMENT_NAME, 119 | api_version=Config.AZURE_OPENAI_API_VERSION, 120 | azure_endpoint=Config.AZURE_OPENAI_ENDPOINT, 121 | api_key=Config.AZURE_OPENAI_API_KEY, 122 | model="gpt-4o", 123 | #model="o1-mini", 124 | model_capabilities=model_capabilities, 125 | temperature=0, 126 | ) 127 | 128 | return Config.__aoai_chatCompletionClient 129 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/home/home.css: -------------------------------------------------------------------------------- 1 | @import "../app.css"; 2 | 3 | .container { 4 | inset: 0; 5 | min-height: 100vh; 6 | overflow-y: auto; 7 | } 8 | 9 | .section { 10 | min-width: 800px; 11 | z-index: 1; 12 | } 13 | 14 | .app-logo { 15 | width: 60px; 16 | margin: 0 auto; 17 | } 18 | 19 | .background { 20 | inset: 0; 21 | position: absolute; 22 | opacity: 0.1; 23 | overflow-y: hidden; 24 | } 25 | 26 | .description { 27 | color: Black; 28 | text-align: center; 29 | 30 | /* Web/Body 1 */ 31 | font-family: "Segoe UI"; 32 | font-size: 14px; 33 | font-style: normal; 34 | font-weight: 400; 35 | line-height: 20px; /* 142.857% */ 36 | margin-bottom: 10px; 37 | } 38 | 39 | .title { 40 | color: var( 41 | --Light-Mode-Foreground-Neutral-Primary, 42 | var(--Foreground-Neutral-Primary, #111) 43 | ); 44 | text-align: center; 45 | font-family: "Segoe UI"; 46 | font-size: 32px; 47 | font-style: normal; 48 | font-weight: 700; 49 | line-height: 40px; /* 125% */ 50 | } 51 | 52 | .assistants { 53 | text-align: center; 54 | font-family: "Segoe UI"; 55 | font-size: 32px; 56 | font-style: normal; 57 | font-weight: 700; 58 | line-height: 40px; /* 125% */ 59 | 60 | background: var( 61 | --Gradient-M365-Chat-Light-Accessible, 62 | linear-gradient(90deg, #464feb 10.42%, #8330e9 100%) 63 | ); 64 | background-clip: text; 65 | -webkit-background-clip: text; 66 | -webkit-text-fill-color: transparent; 67 | } 68 | 69 | .orb { 70 | width: 50%; 71 | height: 50%; 72 | border-radius: 50%; 73 | filter: blur(50px); 74 | background: radial-gradient(circle); 75 | position: absolute; 76 | } 77 | 78 | .orb.one { 79 | bottom: -40%; 80 | left: 47.5%; 81 | background-color: rgba(70, 79, 235, 1); 82 | } 83 | 84 | .orb.two { 85 | bottom: -30%; 86 | left: 25%; 87 | background-color: rgb(18, 172, 149); 88 | z-index: 1; 89 | } 90 | 91 | .orb.three { 92 | bottom: -40%; 93 | left: 2.5%; 94 | background-color: rgb(199, 20, 184); 95 | } 96 | 97 | .new-task-control:has(> textarea:disabled)::before, 98 | .new-task-control:has(> textarea:disabled)::after { 99 | pointer-events: none; 100 | content: ""; 101 | position: absolute; 102 | left: -2px; 103 | top: -2px; 104 | background: linear-gradient( 105 | 45deg, 106 | #fb0094, 107 | #0000ff, 108 | #fb0094, 109 | #0000ff, 110 | #fb0094, 111 | #0000ff, 112 | #fb0094, 113 | #0000ff 114 | ); 115 | background-size: 400%; 116 | width: calc(100% + 4px); 117 | height: calc(100% + 4px); 118 | border-radius: var(--bulma-input-radius); 119 | opacity: 0.5; 120 | z-index: -1; 121 | animation: steam 20s linear infinite; 122 | } 123 | 124 | @keyframes steam { 125 | 0% { 126 | background-position: 0 0; 127 | } 128 | 129 | 50% { 130 | background-position: 400% 0; 131 | } 132 | 133 | 100% { 134 | background-position: 0 0; 135 | } 136 | } 137 | 138 | .new-task-control::after { 139 | filter: blur(25px); 140 | } 141 | 142 | .text-input-container { 143 | width: 950px; 144 | position: relative; 145 | border: 1px solid #ccc; 146 | border-radius: 8px; 147 | background-color: white; 148 | } 149 | 150 | textarea { 151 | width: 98%; 152 | padding: 16px 0px 0px 0px; 153 | border: none; 154 | border-radius: 8px 8px 0 0; 155 | font-size: 16px; 156 | line-height: 1.5; 157 | resize: none; 158 | outline: none; 159 | overflow: hidden; 160 | margin: 0 10px; 161 | align-items: center; 162 | background-color: white; 163 | } 164 | textarea:disabled { 165 | cursor: default; 166 | background-color: white; 167 | } 168 | 169 | /*Spinner start*/ 170 | #spinnerLoader { 171 | display: flex; 172 | flex-direction: column; 173 | /* justify-content: center; */ 174 | align-items: center; 175 | position: absolute; 176 | inset: 0; 177 | color: black; 178 | top: 30%; 179 | left: 50%; 180 | transform: translateX(-50%); 181 | /* background-color: rgb(247, 249, 251);*/ 182 | z-index: 9999; 183 | font-weight: 500; 184 | } 185 | 186 | #spinnerLoader span::before { 187 | content: "Creating Tasks..."; 188 | animation: spinLoaderAnimation infinite 3s linear; 189 | } 190 | 191 | @keyframes spinLoaderAnimation { 192 | 75% { 193 | content: "Agents are on it..."; 194 | } 195 | } 196 | 197 | #spinnerLoader i { 198 | font-size: 3rem; 199 | } 200 | 201 | #overlay { 202 | position: fixed; 203 | display: none; 204 | top: 0; 205 | left: 0; 206 | right: 0; 207 | bottom: 0; 208 | background-color: rgba(255, 255, 255, 0.5); 209 | 210 | z-index: 1; 211 | } 212 | 213 | /*Spinner end*/ 214 | 215 | .middle-bar { 216 | display: flex; 217 | justify-content: space-between; 218 | align-items: left; 219 | padding: 0px 5px; 220 | background-color: white; 221 | } 222 | 223 | .bottom-bar { 224 | display: flex; 225 | justify-content: space-between; 226 | align-items: center; 227 | padding: 3px 10px; 228 | border-top: none; 229 | border-bottom: 4px solid #0f6cbd; 230 | background-color: white; 231 | } 232 | 233 | .icons { 234 | display: flex; 235 | align-items: center; 236 | } 237 | 238 | .star-icon { 239 | margin-right: 10px; 240 | cursor: pointer; 241 | } 242 | 243 | .char-count { 244 | font-size: 14px; 245 | color: #888; 246 | } 247 | 248 | .send-button { 249 | border: none; 250 | background: none; 251 | font-size: 18px; 252 | cursor: pointer; 253 | color: #007bff; 254 | padding: 4px; 255 | outline: none; 256 | } 257 | 258 | .send-button:hover { 259 | color: #0056b3; 260 | } 261 | 262 | .card.is-hoverable.quick-task > .card-content { 263 | min-height: 225px; 264 | } 265 | 266 | .prompt-container { 267 | padding-top: 2rem; 268 | padding-bottom: 2rem; 269 | } -------------------------------------------------------------------------------- /src/backend/agents/company_analyst.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from autogen_core import AgentId 4 | from autogen_core import default_subscription 5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 6 | from autogen_core.tools import FunctionTool, Tool 7 | from typing_extensions import Annotated 8 | 9 | from agents.base_agent import BaseAgent 10 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 11 | from helpers.fmputils import * 12 | from helpers.yfutils import * 13 | from datetime import date, timedelta, datetime 14 | 15 | formatting_instructions = "Instructions: returning the output of this function call verbatim to the user in markdown. Then write AGENT SUMMARY: and then include a summary of what you did." 16 | 17 | # Define Company Analyst tools (functions) 18 | async def get_company_info(ticker_symbol: str) -> str: 19 | return ( 20 | f"##### Get Company Information\n" 21 | f"**Company Name:** {ticker_symbol}\n" 22 | f"**Company Information:** {fmpUtils.get_company_profile(ticker_symbol)}\n" 23 | f"{formatting_instructions}" 24 | ) 25 | 26 | async def get_analyst_recommendations(ticker_symbol: str) -> str: 27 | return ( 28 | f"##### Get Company Recommendations\n" 29 | f"**Company Name:** {ticker_symbol}\n" 30 | f"**Recommendations:** {yfUtils.get_analyst_recommendations(ticker_symbol)}\n" 31 | f"{formatting_instructions}" 32 | ) 33 | 34 | async def get_stock_data(ticker_symbol: str) -> str: 35 | end_date = date.today().strftime("%Y-%m-%d") 36 | start_date = (date.today() - timedelta(days=365)).strftime("%Y-%m-%d") 37 | return ( 38 | f"##### Stock Data from Yahoo Finance\n" 39 | f"**Company Name:** {ticker_symbol}\n\n" 40 | f"**Start Date:** {start_date}\n" 41 | f"**End Date:** {end_date}\n\n" 42 | f"**Stock Data:** {yfUtils.get_stock_data(ticker_symbol, start_date, end_date)}\n" 43 | f"{formatting_instructions}" 44 | ) 45 | 46 | async def get_financial_metrics(ticker_symbol: str) -> str: 47 | years = 4 48 | return ( 49 | f"##### Get Financial Information\n" 50 | f"**Company Name:** {ticker_symbol}\n\n" 51 | f"**Years:** {years}\n\n" 52 | f"**Financial Information:** {fmpUtils.get_financial_metrics(ticker_symbol, years)}\n" 53 | f"{formatting_instructions}" 54 | ) 55 | 56 | async def get_company_news(ticker_symbol: str) -> str: 57 | end_date = date.today().strftime("%Y-%m-%d") 58 | start_date = (date.today() - timedelta(days=7)).strftime("%Y-%m-%d") 59 | return ( 60 | f"##### Get Company News\n" 61 | f"**Company Name:** {ticker_symbol}\n\n" 62 | #f"**Company News:** {fmpUtils.get_company_news(ticker_symbol, start_date, end_date)}\n" 63 | f"**Company News:** {yfUtils.get_company_news(ticker_symbol, start_date, end_date)}\n" 64 | f"{formatting_instructions}" 65 | ) 66 | 67 | async def get_sentiment_analysis(ticker_symbol: str) -> str: 68 | return ( 69 | f"##### Get Company Information\n" 70 | f"**Company Name:** {ticker_symbol}\n" 71 | f"{formatting_instructions}" 72 | ) 73 | 74 | # async def analyze_predict_company(ticker_symbol: str) -> str: 75 | # return ( 76 | # f"##### Analyze and Prediction\n" 77 | # f"**Company Name:** {ticker_symbol}\n\n" 78 | # f"{formatting_instructions}" 79 | # ) 80 | 81 | # Create the Company Analyst Tools list 82 | def get_company_analyst_tools() -> List[Tool]: 83 | return [ 84 | FunctionTool( 85 | get_company_info, 86 | description="get a company's profile information", 87 | ), 88 | FunctionTool( 89 | get_stock_data, 90 | description="retrieve stock price data for designated ticker symbol", 91 | ), 92 | FunctionTool( 93 | get_financial_metrics, 94 | description="get latest financial basics for a designated company", 95 | ), 96 | FunctionTool( 97 | get_company_news, 98 | description="retrieve market news related to designated company", 99 | ), 100 | FunctionTool( 101 | get_analyst_recommendations, 102 | description="get analyst recommendation for a designated company", 103 | ), 104 | FunctionTool( 105 | get_sentiment_analysis, 106 | description="Analyze the data that you have access to like news and analyst recommendations and provide a sentiment analysis, positive or negative outlook", 107 | ), 108 | # FunctionTool( 109 | # analyze_predict_company, 110 | # description="Analyze and predict the future of a designated company", 111 | # ), 112 | ] 113 | 114 | 115 | @default_subscription 116 | class CompanyAnalystAgent(BaseAgent): 117 | def __init__( 118 | self, 119 | model_client: AzureOpenAIChatCompletionClient, 120 | session_id: str, 121 | user_id: str, 122 | memory: CosmosBufferedChatCompletionContext, 123 | ca_tools: List[Tool], 124 | ca_tool_agent_id: AgentId, 125 | ): 126 | super().__init__( 127 | "CompanyAnalystAgent", 128 | model_client, 129 | session_id, 130 | user_id, 131 | memory, 132 | ca_tools, 133 | ca_tool_agent_id, 134 | system_message="You are an AI Agent. You have knowledge about stock market, company information, company news, analyst recommendation and company's financial data and metrics." 135 | # system_message="As a Company Analyst, one must possess strong analytical and problem-solving abilities, collect necessary financial information and aggregate them based on client's requirement." 136 | # "For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.", 137 | ) 138 | -------------------------------------------------------------------------------- /src/frontend/wwwroot/task/task.css: -------------------------------------------------------------------------------- 1 | @import "../app.css"; 2 | @import "../assets/bulma-switch.css"; 3 | 4 | .task-stats { 5 | min-height: 70px; 6 | } 7 | 8 | .section { 9 | min-height: 100%; 10 | } 11 | 12 | .task-asside { 13 | min-height: 100vh; 14 | max-width: 500px; 15 | } 16 | 17 | .task-asside.is-audit { 18 | max-width: 700px; 19 | } 20 | 21 | .task-asside .task-menu { 22 | margin: 3rem 1rem; 23 | } 24 | 25 | .task-asside .task-menu .menu-label:first-of-type { 26 | margin-top: 137px; 27 | } 28 | 29 | .task-asside .title { 30 | font-size: 1.25rem; 31 | height: 30px; 32 | display: flex; 33 | align-items: center; 34 | } 35 | 36 | .task-details { 37 | width: 100%; 38 | padding: 2rem; 39 | } 40 | .colChatSec { 41 | width: 55%; 42 | } 43 | /*Notification message styles start*/ 44 | /* Ensures block-level elements (like

,

, etc.) wrap inside the message */
 45 | .notification p,
 46 | .notification pre {
 47 |   margin: 0;
 48 |   word-wrap: break-word;
 49 |   white-space: pre-wrap; /* Allow preformatted text to wrap */
 50 | }
 51 | .message-content {
 52 |   max-width: 100%;
 53 |   overflow: hidden;
 54 |   word-break: break-word;
 55 |   line-height: 1.4;
 56 | }
 57 | /* Optional: Add word-breaking for URLs */
 58 | .notification a {
 59 |   word-wrap: break-word;
 60 |   word-break: break-word;
 61 |   text-decoration: underline;
 62 | }
 63 | /*Notification message styles end*/
 64 | 
 65 | .task-progress {
 66 |   height: 40vh;
 67 |   overflow-y: auto;
 68 |   background-color: white;
 69 |   border-radius: var(--bulma-radius);
 70 | }
 71 | 
 72 | @media (min-height: 1200px) {
 73 |   .task-progress {
 74 |     height: 50vh;
 75 |   }
 76 | }
 77 | 
 78 | @media (min-height: 1400px) {
 79 |   .task-progress {
 80 |     height: 60vh;
 81 |   }
 82 | }
 83 | 
 84 | .task-progress .notification {
 85 |   padding: 0.5rem 1rem;
 86 |   display: block;
 87 |   max-width: 100%;
 88 |   word-wrap: break-word;
 89 |   box-sizing: border-box;
 90 |   overflow-wrap: break-word;
 91 | }
 92 | 
 93 | .menu-list .menu-item,
 94 | .menu-list a,
 95 | .menu-list button {
 96 |   background-color: transparent;
 97 | }
 98 | 
 99 | .menu-list ul.menu-stages {
100 |   border-inline-start: 3px solid var(--bulma-border);
101 |   padding-inline-start: 0;
102 | }
103 | 
104 | .menu-list ul.menu-stages li {
105 |   margin-left: calc(-1.4rem - 5px);
106 | }
107 | 
108 | .menu-list a.menu-stage {
109 |   display: flex;
110 |   align-items: center;
111 |   position: relative;
112 |   padding: 0.5em 0 0.5rem 0.75em;
113 |   width: calc(100% + 1.4rem - 5px);
114 | }
115 | 
116 | .menu-list a.menu-stage > i {
117 |   font-size: 1.4rem;
118 |   margin-top: 3px;
119 |   border-radius: 50%;
120 |   background-color: rgb(247, 249, 251);
121 |   padding: 5px;
122 | }
123 | 
124 | .menu-list a.menu-stage span {
125 |   flex: 1;
126 |   word-break: break-word; /*this for stages span alignment*/
127 | }
128 | 
129 | .menu-list a.menu-stage.rejected span {
130 |   text-decoration: line-through;
131 |   opacity: 0.5;
132 | }
133 | 
134 | .menu-list a.menu-stage.action_requested span {
135 |   font-weight: 500;
136 | }
137 | 
138 | .menu-list a.menu-stage div {
139 |   display: flex;
140 |   align-items: center;
141 | }
142 | 
143 | .menu-stage-actions i {
144 |   font-size: 1.4rem;
145 | }
146 | 
147 | .business-animation {
148 |   position: relative;
149 |   border-radius: var(--bulma-radius-large);
150 | }
151 | 
152 | #taskLoader {
153 |   display: flex;
154 |   flex-direction: column;
155 |   justify-content: center;
156 |   align-items: center;
157 |   position: absolute;
158 |   inset: 0;
159 |   color: black;
160 |   background-color: rgb(247, 249, 251);
161 |   z-index: 1000;
162 |   font-weight: 500;
163 | }
164 | 
165 | #taskLoader span::before {
166 |   content: "Getting task plan...";
167 |   animation: taskLoaderAnimation infinite 3s linear;
168 | }
169 | 
170 | #taskLoader.is-hidden {
171 |   display: none;
172 | }
173 | 
174 | @keyframes taskLoaderAnimation {
175 |   0% {
176 |     content: "Getting task plan...";
177 |   }
178 | 
179 |   50% {
180 |     content: "Contacting agents...";
181 |   }
182 | 
183 |   75% {
184 |     content: "Loading conversations...";
185 |   }
186 | }
187 | 
188 | #taskLoader i {
189 |   font-size: 3rem;
190 | }
191 | 
192 | .task-stage-divider {
193 |   text-align: center;
194 |   margin: 1rem 0;
195 |   font-size: 0.85rem;
196 |   font-weight: 500;
197 |   border: 1px solid rgb(71, 80, 235);
198 |   border-left-width: 0;
199 |   border-right-width: 0;
200 |   border-bottom-width: 0;
201 | }
202 | 
203 | .task-stage-divider legend {
204 |   color: rgb(71, 80, 235);
205 |   -webkit-padding-start: 1rem;
206 |   -webkit-padding-end: 1rem;
207 |   background: transparent;
208 | }
209 | 
210 | .text-input-container {
211 |   position: relative;
212 |   border: 1px solid #ccc;
213 |   border-radius: 8px;
214 |   background-color: white;
215 | }
216 | 
217 | textarea {
218 |   width: 98%;
219 |   padding: 16px 0px 0px 0px;
220 |   border: none;
221 |   border-radius: 8px 8px 0 0;
222 |   font-size: 16px;
223 |   line-height: 1.5;
224 |   resize: none;
225 |   outline: none;
226 |   overflow: hidden;
227 |   margin: 0 10px;
228 |   align-items: center;
229 |   background-color: white;
230 | }
231 | 
232 | .star-icon {
233 |   margin-right: 10px;
234 |   cursor: pointer;
235 | }
236 | 
237 | .char-count {
238 |   font-size: 14px;
239 |   color: #888;
240 | }
241 | 
242 | .middle-bar {
243 |   display: flex;
244 |   justify-content: space-between;
245 |   align-items: left;
246 |   padding: 0px 5px;
247 |   /* background-color: white; */
248 | }
249 | 
250 | .bottom-bar {
251 |   display: flex;
252 |   justify-content: space-between;
253 |   align-items: center;
254 |   padding: 3px 10px;
255 |   border-top: none;
256 |   border-bottom: 4px solid #0f6cbd;
257 |   /* background-color: white; */
258 | }
259 | 
260 | .send-button {
261 |   border: none;
262 |   background: none;
263 |   font-size: 18px;
264 |   cursor: pointer;
265 |   color: #007bff;
266 |   padding: 4px;
267 |   outline: none;
268 | }
269 | 
270 | .send-button:hover {
271 |   color: #0056b3;
272 | }
273 | 
274 | .menu.task-menu {
275 |   position: sticky;
276 |   top: 0;
277 | }


--------------------------------------------------------------------------------
/src/frontend/wwwroot/app.css:
--------------------------------------------------------------------------------
  1 | @import "https://cdn.jsdelivr.net/npm/bulma@1.0.2/css/bulma.min.css";
  2 | @import "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.6.0/css/all.min.css";
  3 | @import "assets/theme.css";
  4 | 
  5 | /* App global */
  6 | 
  7 | html,
  8 | body {
  9 |   overflow-x: hidden;
 10 |   overflow-y: auto;
 11 |   height: 100%;
 12 | }
 13 | 
 14 | body {
 15 |   position: relative;
 16 |   background: rgb(247, 249, 251);
 17 |   min-height: 100vh;
 18 | }
 19 | 
 20 | .border-right {
 21 |   border-right: 1px solid hsl(221, 14%, calc(86% + 0%));
 22 | }
 23 | 
 24 | /* App template */
 25 | 
 26 | #app .columns {
 27 |   min-height: 100vh;
 28 |   height: 100%;
 29 | }
 30 | #app .modal,
 31 | #app .menu {
 32 |   overflow: hidden; /* Prevent scrolling within modals and menus */
 33 | }
 34 | #app .asside {
 35 |   background: rgba(231, 236, 243, 0.7);
 36 | }
 37 | ul#tasksStats.menu-list {
 38 |   min-height: 100px;
 39 | }
 40 | @media (min-width: 1800px) {
 41 |   #app .asside {
 42 |     max-width: 400px;
 43 |   }
 44 | }
 45 | 
 46 | #app .menu-logo {
 47 |   font-size: 1.25rem;
 48 |   font-weight: 700;
 49 |   cursor: pointer;
 50 | }
 51 | 
 52 | #app .menu-logo img {
 53 |   width: 30px;
 54 | }
 55 | 
 56 | #app .asside .menu-list a {
 57 |   background-color: transparent;
 58 | }
 59 | 
 60 | #app .asside .menu-list a.is-active {
 61 |   background-color: rgb(71, 80, 235);
 62 | }
 63 | 
 64 | #app .asside .menu-list a.is-active i {
 65 |   color: white !important;
 66 | }
 67 | 
 68 | #app .asside .menu-list a.is-active:hover {
 69 |   background-color: rgb(71, 80, 235);
 70 | }
 71 | 
 72 | #app .asside .menu-list a.menu-task {
 73 |   display: flex;
 74 |   align-items: center;
 75 | }
 76 | 
 77 | #app .asside .menu-list a.menu-task span {
 78 |   flex: 1;
 79 | }
 80 | 
 81 | #app .asside .menu-list a:hover {
 82 |   background-color: rgba(0, 0, 0, 0.1);
 83 | }
 84 | 
 85 | #app .iframe {
 86 |   width: 100%;
 87 |   background-color: transparent;
 88 | }
 89 | 
 90 | #app .context-switch {
 91 |   position: fixed;
 92 |   bottom: 50px;
 93 |   right: calc(50% - 220px);
 94 |   z-index: 3;
 95 | }
 96 | 
 97 | .is-avatar.is-rounded {
 98 |   border-radius: var(--bulma-radius-rounded);
 99 | }
100 | 
101 | .is-avatar.is-agent {
102 |   display: flex;
103 |   /* background-color: rgba(231, 236, 243, 0.7); */
104 |   background-color: rgba(70, 79, 235, 0.25);
105 | }
106 | 
107 | .is-avatar.is-agent img {
108 |   width: 75%;
109 |   height: 75%;
110 |   margin: 13%;
111 | }
112 | 
113 | @keyframes moveImage {
114 |   0% {
115 |     transform: rotate(0deg);
116 |   }
117 | 
118 |   50% {
119 |     transform: rotate(-3deg);
120 |   }
121 | 
122 |   100% {
123 |     transform: rotate(3deg);
124 |   }
125 | }
126 | 
127 | .is-avatar.is-agent img.manager {
128 |   background-color: rgba(220, 56, 72, 0.35);
129 |   box-shadow: 0 0 0 4px rgba(220, 56, 72, 0.35);
130 |   animation: moveImage 0.3s infinite alternate;
131 | }
132 | 
133 | .is-avatar.is-agent img.hr_agent {
134 |   background-color: rgba(0, 209, 178, 0.35);
135 |   box-shadow: 0 0 0 4px rgba(0, 209, 178, 0.35);
136 |   animation: moveImage 0.5s infinite alternate;
137 | }
138 | 
139 | .is-avatar.is-agent img.procurement_agent {
140 |   background-color: rgba(255, 183, 15, 0.35);
141 |   box-shadow: 0 0 0 4px rgba(255, 183, 15, 0.35);
142 |   animation: moveImage 0.1s infinite alternate;
143 | }
144 | 
145 | .is-avatar.is-agent img.tech_agent {
146 |   background-color: rgba(178, 222, 39, 0.35);
147 |   box-shadow: 0 0 0 4px rgba(178, 222, 39, 0.35);
148 |   animation: moveImage 0.7s infinite alternate;
149 | }
150 | 
151 | .is-avatar.is-agent img.unknown {
152 |   background-color: rgba(39, 57, 222, 0.35);
153 |   box-shadow: 0 0 0 4px rgba(39, 57, 222, 0.35);
154 |   animation: moveImage 0.7s infinite alternate;
155 | }
156 | 
157 | .is-avatar.has-status::after {
158 |   content: "";
159 |   position: absolute;
160 |   bottom: 0;
161 |   right: 0;
162 |   width: 30%;
163 |   height: 30%;
164 |   border-radius: 50%;
165 |   background-color: rgb(255, 255, 255);
166 |   border: 2px solid rgb(255, 255, 255);
167 | }
168 | 
169 | .is-avatar.has-status.has-status-active::after {
170 |   background-color: hsl(
171 |     var(--bulma-success-h),
172 |     var(--bulma-success-s),
173 |     var(--bulma-success-l)
174 |   );
175 | }
176 | 
177 | .is-avatar.has-status.has-status-busy::after {
178 |   background-color: hsl(
179 |     var(--bulma-danger-h),
180 |     var(--bulma-danger-s),
181 |     var(--bulma-danger-l)
182 |   );
183 | }
184 | 
185 | .is-avatar.has-status.has-status-paused::after {
186 |   background-color: hsl(
187 |     var(--bulma-dark-h),
188 |     var(--bulma-dark-s),
189 |     var(--bulma-dark-l)
190 |   );
191 | }
192 | 
193 | .button.is-greyed-out {
194 |   background-color: #e0e0e0;
195 |   color: lightgrey;
196 |   cursor: not-allowed;
197 | }
198 | 
199 | .button.is-selected {
200 |   background-color: #d3d3d3;
201 |   color: #000;
202 | }
203 | 
204 | .notyf__toast {
205 |   max-width: 100% !important;
206 |   border-radius: var(--bulma-control-radius) !important;
207 | }
208 | 
209 | .notyf__wrapper {
210 |   padding: 0.75rem 0.5rem !important;
211 | }
212 | /* Menu list scroll style start*/
213 | #app .asside .menu-list {
214 |   max-height: 400px;
215 |   overflow-y: scroll;
216 |   padding-right: 2px;
217 |   transition: all 0.3s ease;
218 |   box-sizing: border-box;
219 | }
220 | /* Hide the scrollbar initially (before hover) */
221 | #app .asside .menu-list::-webkit-scrollbar {
222 |   width: 8px;
223 |   opacity: 0;
224 |   visibility: hidden;
225 |   transition: opacity 0.3s ease, visibility 0s 0.3s;
226 | }
227 | /* Style the scrollbar thumb (the draggable part) */
228 | #app .asside .menu-list::-webkit-scrollbar-thumb {
229 |   border-radius: 10px;
230 |   transition: background-color 0.3s ease;
231 | }
232 | /* Show the scrollbar and thumb when hovering */
233 | #app .asside .menu-list:hover::-webkit-scrollbar {
234 |   opacity: 1;
235 |   visibility: visible;
236 |   transition: opacity 0.3s ease, visibility 0s;
237 | }
238 | /* Style the thumb when hovering */
239 | #app .asside .menu-list:hover::-webkit-scrollbar-thumb {
240 |   background-color: rgba(0, 0, 0, 0.2);
241 | }
242 | /* Menu list scroll style end*/
243 | 


--------------------------------------------------------------------------------
/src/backend/agents/base_agent.py:
--------------------------------------------------------------------------------
  1 | import logging
  2 | from typing import Any, List, Mapping
  3 | 
  4 | from autogen_core import AgentId, MessageContext
  5 | from autogen_core import RoutedAgent, message_handler
  6 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
  7 | from autogen_core.models import (AssistantMessage, LLMMessage, SystemMessage,
  8 |                                             UserMessage)
  9 | from autogen_core.tool_agent import tool_agent_caller_loop
 10 | from autogen_core.tools import Tool
 11 | 
 12 | from context.cosmos_memory import CosmosBufferedChatCompletionContext
 13 | from models.messages import (ActionRequest, ActionResponse,
 14 |                              AgentMessage, Step, StepStatus)
 15 | from event_utils import track_event_if_configured
 16 | 
 17 | class BaseAgent(RoutedAgent):
 18 |     def __init__(
 19 |         self,
 20 |         agent_name: str,
 21 |         model_client: AzureOpenAIChatCompletionClient,
 22 |         session_id: str,
 23 |         user_id: str,
 24 |         model_context: CosmosBufferedChatCompletionContext,
 25 |         tools: List[Tool],
 26 |         tool_agent_id: AgentId,
 27 |         system_message: str,
 28 |     ):
 29 |         super().__init__(agent_name)
 30 |         self._agent_name = agent_name
 31 |         self._model_client = model_client
 32 |         self._session_id = session_id
 33 |         self._user_id = user_id
 34 |         self._model_context = model_context
 35 |         self._tools = tools
 36 |         self._tool_schema = [tool.schema for tool in tools]
 37 |         self._tool_agent_id = tool_agent_id
 38 |         self._chat_history: List[LLMMessage] = [SystemMessage(content=system_message)]
 39 | 
 40 |     @message_handler
 41 |     async def handle_action_request(
 42 |         self, message: ActionRequest, ctx: MessageContext
 43 |     ) -> ActionResponse:
 44 |         step: Step = await self._model_context.get_step(
 45 |             message.step_id, message.session_id
 46 |         )
 47 |         # TODO: Agent verbosity
 48 |         # await self._model_context.add_item(
 49 |         #     AgentMessage(
 50 |         #         session_id=message.session_id,
 51 |         #         plan_id=message.plan_id,
 52 |         #         content=f"{self._agent_name} received action request: {message.action}",
 53 |         #         source=self._agent_name,
 54 |         #         step_id=message.step_id,
 55 |         #     )
 56 |         # )
 57 |         if not step:
 58 |             return ActionResponse(
 59 |                 step_id=message.step_id,
 60 |                 status=StepStatus.failed,
 61 |                 message="Step not found in memory.",
 62 |             )
 63 |         # TODO - here we use the action message as the source of the action, rather than step.action, as we have added a temporary conversation history to the agent, as a mechanism to give it visibility of the replies of other agents. The logic/approach needs to be thought through further to make it more consistent.
 64 |         self._chat_history.extend(
 65 |             [
 66 |                 AssistantMessage(content=message.action, source="GroupChatManager"),
 67 |                 UserMessage(
 68 |                     content=f"{step.human_feedback}. Now make the function call",
 69 |                     source="HumanAgent",
 70 |                 ),
 71 |             ]
 72 |         )
 73 |         try:
 74 |             messages: List[LLMMessage] = await tool_agent_caller_loop(
 75 |                 caller=self,
 76 |                 tool_agent_id=self._tool_agent_id,
 77 |                 model_client=self._model_client,
 78 |                 input_messages=self._chat_history,
 79 |                 tool_schema=self._tools,
 80 |                 cancellation_token=ctx.cancellation_token,
 81 |             )
 82 |             logging.info("*" * 12)
 83 |             logging.info(f"LLM call completed: {messages}")
 84 |             final_message = messages[-1]
 85 |             assert isinstance(final_message.content, str)
 86 |             result = final_message.content
 87 |             await self._model_context.add_item(
 88 |                 AgentMessage(
 89 |                     session_id=message.session_id,
 90 |                     user_id=self._user_id,
 91 |                     plan_id=message.plan_id,
 92 |                     content=f"{result}",
 93 |                     source=self._agent_name,
 94 |                     step_id=message.step_id,
 95 |                 )
 96 |             )
 97 | 
 98 |             track_event_if_configured(
 99 |                 "Base agent - Added into the cosmos",
100 |                 {
101 |                     "session_id": message.session_id,
102 |                     "user_id": self._user_id,
103 |                     "plan_id": message.plan_id,
104 |                     "content": f"{result}",
105 |                     "source": self._agent_name,
106 |                     "step_id": message.step_id,
107 |                 },
108 |             )
109 |         except Exception as e:
110 |             print(f"Error during LLM call: {e}")
111 |             return
112 |         print(f"Task completed: {result}")
113 | 
114 |         step.status = StepStatus.completed
115 |         step.agent_reply = result
116 |         await self._model_context.update_step(step)
117 | 
118 |         track_event_if_configured(
119 |             "Base agent - Updated step and updated into the cosmos",
120 |             {
121 |                 "status": StepStatus.completed,
122 |                 "session_id": message.session_id,
123 |                 "agent_reply": f"{result}",
124 |                 "user_id": self._user_id,
125 |                 "plan_id": message.plan_id,
126 |                 "content": f"{result}",
127 |                 "source": self._agent_name,
128 |                 "step_id": message.step_id,
129 |             },
130 |         )
131 |         
132 |         action_response = ActionResponse(
133 |             step_id=step.id,
134 |             plan_id=step.plan_id,
135 |             session_id=message.session_id,
136 |             result=result,
137 |             status=StepStatus.completed,
138 |         )
139 | 
140 |         group_chat_manager_id = AgentId("group_chat_manager", self._session_id)
141 |         await self.publish_message(action_response, group_chat_manager_id)
142 |         # TODO: Agent verbosity
143 |         # await self._model_context.add_item(
144 |         #     AgentMessage(
145 |         #         session_id=message.session_id,
146 |         #         plan_id=message.plan_id,
147 |         #         content=f"{self._agent_name} sending update to GroupChatManager",
148 |         #         source=self._agent_name,
149 |         #         step_id=message.step_id,
150 |         #     )
151 |         # )
152 |         return action_response
153 | 
154 |     def save_state(self) -> Mapping[str, Any]:
155 |         print("Saving state:")
156 |         return {"memory": self._model_context.save_state()}
157 | 
158 |     def load_state(self, state: Mapping[str, Any]) -> None:
159 |         self._model_context.load_state(state["memory"])
160 | 


--------------------------------------------------------------------------------
/src/backend/agents/earningcalls_analyst.py:
--------------------------------------------------------------------------------
  1 | from typing import List
  2 | 
  3 | from autogen_core import AgentId
  4 | from autogen_core import default_subscription
  5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient
  6 | from autogen_core.tools import FunctionTool, Tool
  7 | from typing_extensions import Annotated
  8 | 
  9 | from agents.base_agent import BaseAgent
 10 | from context.cosmos_memory import CosmosBufferedChatCompletionContext
 11 | from helpers.fmputils import *
 12 | from helpers.yfutils import *
 13 | from datetime import date, timedelta, datetime
 14 | from helpers.summarizeutils import summarize, summarizeTopic
 15 | from helpers.dcfutils import DcfUtils
 16 | 
 17 | formatting_instructions = "Instructions: returning the output of this function call verbatim to the user in markdown."
 18 | latestEarnings = None
 19 | 
 20 | # Define HR tools (functions)
 21 | async def get_earning_calls_transcript(ticker_symbol: str, year:str) -> str:
 22 |     global latestEarnings
 23 |     print("Calling get_earning_calls_transcript")
 24 |     if year is None or year == "latest":
 25 |         year = datetime.now().year
 26 |         if datetime.now().month < 3:
 27 |             year = int(year) - 1
 28 | 
 29 |     if latestEarnings is None or len(latestEarnings) == 0:
 30 |         #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
 31 |         latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
 32 |     return (
 33 |         f"##### Get Earning Calls\n"
 34 |         f"{formatting_instructions}"
 35 |     )
 36 | 
 37 | async def summarize_transcripts(ticker_symbol:str, year:str) -> str:
 38 |     global latestEarnings
 39 |     if latestEarnings is None or len(latestEarnings) == 0:
 40 |         #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
 41 |         latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
 42 |     print("*"*35)
 43 |     print("Calling summarize_transcripts")
 44 |     summarized = summarize(latestEarnings)
 45 |     print("*"*35)
 46 |     return (
 47 |         f"##### Summarized transcripts\n"
 48 |         f"**Company Name:** {ticker_symbol}\n"
 49 |         f"**Summary:** {summarized}\n"
 50 |         f"{formatting_instructions}"
 51 |     )
 52 | 
 53 | async def management_positive_outlook(ticker_symbol: str, year:str) -> str:
 54 |     global latestEarnings
 55 |     if latestEarnings is None or len(latestEarnings) == 0:
 56 |         #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
 57 |         latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
 58 |     print("*"*35)
 59 |     print("Calling management_positive_outlook")
 60 |     positiveOutlook = summarizeTopic(latestEarnings, 'Management Positive Outlook')
 61 |     print("*"*35)
 62 |     return (
 63 |         f"##### Management Positive Outlook\n"
 64 |         f"**Company Name:** {ticker_symbol}\n"
 65 |         f"**Topic Summary:** {positiveOutlook}\n"
 66 |         f"{formatting_instructions}"
 67 |     )
 68 | 
 69 | async def management_negative_outlook(ticker_symbol: str, year:str) -> str:
 70 |     global latestEarnings
 71 |     if latestEarnings is None or len(latestEarnings) == 0:
 72 |         #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
 73 |         latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
 74 |     print("*"*35)
 75 |     print("Calling management_negative_outlook")
 76 |     negativeOutlook = summarizeTopic(latestEarnings, 'Management Negative Outlook')
 77 |     print("*"*35)
 78 |     years = 4
 79 |     return (
 80 |         f"##### Management Negative Outlook\n"
 81 |         f"**Company Name:** {ticker_symbol}\n"
 82 |         f"**Topic Summary:** {negativeOutlook}\n"
 83 |         f"{formatting_instructions}"
 84 |     )
 85 | 
 86 | async def future_growth_opportunity(ticker_symbol: str, year:str) -> str:
 87 |     global latestEarnings
 88 |     if latestEarnings is None or len(latestEarnings) == 0:
 89 |         #latestEarnings = fmpUtils.get_earning_calls(ticker_symbol, year)
 90 |         latestEarnings = DcfUtils.get_earning_calls(ticker_symbol)
 91 |     print("*"*35)
 92 |     print("Calling management_negative_outlook")
 93 |     futureGrowth = summarizeTopic(latestEarnings, 'Future Growth Opportunities')
 94 |     print("*"*35)
 95 |     return (
 96 |         f"##### Future Growth and Opportunities\n"
 97 |         f"**Company Name:** {ticker_symbol}\n\n"
 98 |         f"**Topic Summary:** {futureGrowth}\n"
 99 |         f"{formatting_instructions}"
100 |     )
101 | 
102 | # async def analyze_predict_transcript(ticker_symbol: str) -> str:
103 | #     return (
104 | #         f"##### Transcription Analyze and Prediction\n"
105 | #         f"**Company Name:** {ticker_symbol}\n\n"
106 | #         f"{formatting_instructions}"
107 | #     )
108 | 
109 | # Create the Company Analyst Tools list
110 | def get_earning_calls_analyst_tools() -> List[Tool]:
111 |     return [
112 |         FunctionTool(
113 |             get_earning_calls_transcript, 
114 |             description="get a earning call's transcript for a company",
115 |         ),
116 |         FunctionTool(
117 |            summarize_transcripts, 
118 |            description="summarize the earning call's transcript for a company",
119 |         ),
120 |         FunctionTool(
121 |             management_positive_outlook, 
122 |             description="From the extracted earning call's transcript, identify the management's positive outlook for a company",
123 |         ),
124 |         FunctionTool(
125 |             management_negative_outlook, 
126 |             description="From the extracted earning call's transcript, identify the management's negative outlook for a company",
127 |         ),
128 |         FunctionTool(
129 |             future_growth_opportunity, 
130 |             description="From the extracted earning call's transcript, identify the future growth and opportunities for a company",
131 |         ),
132 |         # FunctionTool(
133 |         #     analyze_predict_transcript, 
134 |         #     description="Analyze and predict the future of a designated company based on the information from the earning call's transcript",
135 |         # ),
136 |     ]
137 | 
138 | 
139 | @default_subscription
140 | class EarningCallsAnalystAgent(BaseAgent):
141 |     def __init__(
142 |         self,
143 |         model_client: AzureOpenAIChatCompletionClient,
144 |         session_id: str,
145 |         user_id: str,
146 |         memory: CosmosBufferedChatCompletionContext,
147 |         earning_calls_analyst_tools: List[Tool],
148 |         earning_calls_analyst_tool_agent_id: AgentId,
149 |     ):
150 |         super().__init__(
151 |             "EarningCallsAnalystAgent",
152 |             model_client,
153 |             session_id,
154 |             user_id,
155 |             memory,
156 |             earning_calls_analyst_tools,
157 |             earning_calls_analyst_tool_agent_id,
158 |             system_message="You are an AI Agent. You have knowledge about the management positive and negative outlook, future growths and opportunities based on the earning call transcripts."
159 |         )
160 | 


--------------------------------------------------------------------------------
/src/backend/helpers/dcfutils.py:
--------------------------------------------------------------------------------
  1 | import os
  2 | import requests
  3 | import numpy as np
  4 | import pandas as pd
  5 | from datetime import datetime, timedelta
  6 | import random
  7 | from helpers.dutils import decorate_all_methods
  8 | from helpers.summarizeutils import get_next_weekday
  9 | import re
 10 | from tenacity import RetryError
 11 | from tenacity import retry, stop_after_attempt, wait_random_exponential
 12 | from langchain.schema import Document
 13 | import json
 14 | from typing import List
 15 | import ast 
 16 | 
 17 | # from finrobot.utils import decorate_all_methods, get_next_weekday
 18 | from functools import wraps
 19 | from typing import Annotated, List
 20 | 
 21 | def init_dcf_api(func):
 22 |     @wraps(func)
 23 |     def wrapper(*args, **kwargs):
 24 |         global dcf_api_key
 25 |         if os.environ.get("DCF_API_KEY") is None:
 26 |             print("Please set the environment variable DCF_API_KEY to use the DCF API.")
 27 |             return None
 28 |         else:
 29 |             dcf_api_key = os.environ["DCF_API_KEY"]
 30 |             print("DCF api key found successfully.")
 31 |             return func(*args, **kwargs)
 32 | 
 33 |     return wrapper
 34 | 
 35 | @decorate_all_methods(init_dcf_api)
 36 | class DcfUtils:
 37 | 
 38 |     def correct_date(yr, dt):
 39 |         """Some transcripts have incorrect date, correcting it
 40 | 
 41 |         Args:
 42 |             yr (int): actual
 43 |             dt (datetime): given date
 44 | 
 45 |         Returns:
 46 |             datetime: corrected date
 47 |         """
 48 |         dt = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
 49 |         if dt.year != yr:
 50 |             dt = dt.replace(year=yr)
 51 |         return dt.strftime("%Y-%m-%d %H:%M:%S")
 52 | 
 53 |     def extract_speakers(cont: str) -> List[str]:
 54 |         """Extract the list of speakers
 55 | 
 56 |         Args:
 57 |             cont (str): transcript content
 58 | 
 59 |         Returns:
 60 |             List[str]: list of speakers
 61 |         """
 62 |         pattern = re.compile(r"\n(.*?):")
 63 |         matches = pattern.findall(cont)
 64 | 
 65 |         return list(set(matches))
 66 |     
 67 |     def clean_speakers(speaker):
 68 |         speaker = re.sub("\n", "", speaker)
 69 |         speaker = re.sub(":", "", speaker)
 70 |         return speaker
 71 |     
 72 |     def get_earnings_transcript(quarter: str, ticker: str, year: int):
 73 |         """Get the earnings transcripts
 74 | 
 75 |         Args:
 76 |             quarter (str)
 77 |             ticker (str)
 78 |             year (int)
 79 |         """
 80 |         response = requests.get(
 81 |             f"https://discountingcashflows.com/api/transcript/?ticker={ticker}&quarter={quarter}&year={year}&key={dcf_api_key}"
 82 |         )
 83 | 
 84 |         resp_text = json.loads(response.text)
 85 |         # speakers_list = extract_speakers(resp_text[0]["content"])
 86 |         corrected_date = DcfUtils.correct_date(resp_text[0]["year"], resp_text[0]["date"])
 87 |         resp_text[0]["date"] = corrected_date
 88 |         return resp_text[0]
 89 |     
 90 |     def get_earnings_all_quarters_data(quarter: str, ticker: str, year: int):
 91 |         docs = []
 92 |         resp_dict = DcfUtils.get_earnings_transcript(quarter, ticker, year)
 93 | 
 94 |         content = resp_dict["content"]
 95 |         pattern = re.compile(r"\n(.*?):")
 96 |         matches = pattern.finditer(content)
 97 | 
 98 |         speakers_list = []
 99 |         ranges = []
100 |         for match_ in matches:
101 |             # print(match.span())
102 |             span_range = match_.span()
103 |             # first_idx = span_range[0]
104 |             # last_idx = span_range[1]
105 |             ranges.append(span_range)
106 |             speakers_list.append(match_.group())
107 |         speakers_list = [DcfUtils.clean_speakers(sl) for sl in speakers_list]
108 | 
109 |         for idx, speaker in enumerate(speakers_list[:-1]):
110 |             start_range = ranges[idx][1]
111 |             end_range = ranges[idx + 1][0]
112 |             speaker_text = content[start_range + 1 : end_range]
113 | 
114 |             docs.append(
115 |                 Document(
116 |                     page_content=speaker_text,
117 |                     metadata={"speaker": speaker, "quarter": quarter},
118 |                 )
119 |             )
120 | 
121 |         docs.append(
122 |             Document(
123 |                 page_content=content[ranges[-1][1] :],
124 |                 metadata={"speaker": speakers_list[-1], "quarter": quarter},
125 |             )
126 |         )
127 |         return docs, speakers_list
128 | 
129 |     def get_earning_calls(ticker: str) -> str:
130 |         
131 |         url = f"https://discountingcashflows.com/api/transcript/list/?ticker={ticker}&key={dcf_api_key}"
132 | 
133 |         response = requests.get(url)
134 | 
135 |         if response.status_code == 200:
136 |             data = ast.literal_eval(response.text)
137 |             quarter, year = data[0][0], data[0][1]
138 | 
139 |             resp_dict = DcfUtils.get_earnings_transcript("Q" + str(quarter), ticker, year)
140 | 
141 |             transcripts = resp_dict["content"]
142 |             return transcripts
143 |         else:
144 |             return f"Failed to retrieve data: {response.status_code}"
145 |         
146 |     def get_earnings_all_docs(ticker: str, year: int):
147 |         earnings_docs = []
148 |         earnings_call_quarter_vals = []
149 |         print("Earnings Call Q1")
150 |         try:
151 |             docs, speakers_list_1 = DcfUtils.get_earnings_all_quarters_data("Q1", ticker, year)
152 |             earnings_call_quarter_vals.append("Q1")
153 |             earnings_docs.extend(docs)
154 |         except RetryError:
155 |             print(f"Don't have the data for Q1")
156 |             speakers_list_1 = []
157 | 
158 |         print("Earnings Call Q2")
159 |         try:
160 |             docs, speakers_list_2 = DcfUtils.get_earnings_all_quarters_data("Q2", ticker, year)
161 |             earnings_call_quarter_vals.append("Q2")
162 |             earnings_docs.extend(docs)
163 |         except RetryError:
164 |             print(f"Don't have the data for Q2")
165 |             speakers_list_2 = []
166 |         print("Earnings Call Q3")
167 |         try:
168 |             docs, speakers_list_3 = DcfUtils.get_earnings_all_quarters_data("Q3", ticker, year)
169 |             earnings_call_quarter_vals.append("Q3")
170 |             earnings_docs.extend(docs)
171 |         except RetryError:
172 |             print(f"Don't have the data for Q3")
173 |             speakers_list_3 = []
174 |         print("Earnings Call Q4")
175 |         try:
176 |             docs, speakers_list_4 = DcfUtils.get_earnings_all_quarters_data("Q4", ticker, year)
177 |             earnings_call_quarter_vals.append("Q4")
178 |             earnings_docs.extend(docs)
179 |         except RetryError:
180 |             print(f"Don't have the data for Q4")
181 |             speakers_list_4 = []
182 |         return (
183 |             earnings_docs,
184 |             earnings_call_quarter_vals,
185 |             speakers_list_1,
186 |             speakers_list_2,
187 |             speakers_list_3,
188 |             speakers_list_4,
189 |         )
190 |     


--------------------------------------------------------------------------------
/src/frontend/wwwroot/home/home.js:
--------------------------------------------------------------------------------
  1 | (() => {
  2 |   const notyf = new Notyf({
  3 |     position: { x: "right", y: "top" },
  4 |     ripple: false,
  5 |     duration: 3000,
  6 |   });
  7 |   const apiEndpoint = sessionStorage.getItem("apiEndpoint");
  8 |   const newTaskPrompt = document.getElementById("newTaskPrompt");
  9 |   const startTaskButton = document.getElementById("startTaskButton");
 10 |   const startTaskButtonContainer = document.querySelector(".send-button");
 11 |   const startTaskButtonImg = startTaskButtonContainer
 12 |     ? startTaskButtonContainer.querySelector("img")
 13 |     : null;
 14 | 
 15 |   newTaskPrompt.focus();
 16 | 
 17 |   // Create spinner element
 18 |   const createSpinner = () => {
 19 |     if (!document.getElementById("spinnerContainer")) {
 20 |       const spinnerContainer = document.createElement("div");
 21 |       spinnerContainer.id = "spinnerContainer";
 22 |       spinnerContainer.innerHTML = `
 23 |        
24 | 25 | 26 |
27 | 28 | `; 29 | document.body.appendChild(spinnerContainer); 30 | } 31 | }; 32 | 33 | // Function to create and add the overlay 34 | const createOverlay = () => { 35 | let overlay = document.getElementById("overlay"); 36 | if (!overlay) { 37 | overlay = document.createElement("div"); 38 | overlay.id = "overlay"; 39 | document.body.appendChild(overlay); 40 | } 41 | }; 42 | 43 | const showOverlay = () => { 44 | const overlay = document.getElementById("overlay"); 45 | if (overlay) { 46 | overlay.style.display = "block"; 47 | } 48 | createSpinner(); 49 | }; 50 | 51 | const hideOverlay = () => { 52 | const overlay = document.getElementById("overlay"); 53 | if (overlay) { 54 | overlay.style.display = "none"; 55 | } 56 | removeSpinner(); 57 | }; 58 | 59 | // Remove spinner element 60 | const removeSpinner = () => { 61 | const spinnerContainer = document.getElementById("spinnerContainer"); 62 | if (spinnerContainer) { 63 | spinnerContainer.remove(); 64 | } 65 | }; 66 | 67 | // Function to update button image based on textarea content 68 | const updateButtonImage = () => { 69 | if (startTaskButtonImg) { 70 | if (newTaskPrompt.value.trim() === "") { 71 | startTaskButtonImg.src = "../assets/images/air-button.svg"; 72 | startTaskButton.disabled = true; 73 | } else { 74 | startTaskButtonImg.src = "/assets/Send.svg"; 75 | startTaskButtonImg.style.width = "16px"; 76 | startTaskButtonImg.style.height = "16px"; 77 | startTaskButton.disabled = false; 78 | } 79 | } 80 | }; 81 | 82 | const startTask = () => { 83 | startTaskButton.addEventListener("click", (event) => { 84 | if (startTaskButton.disabled) { 85 | return; 86 | } 87 | const sessionId = 88 | "sid_" + new Date().getTime() + "_" + Math.floor(Math.random() * 10000); 89 | 90 | newTaskPrompt.disabled = true; 91 | startTaskButton.disabled = true; 92 | startTaskButton.classList.add("is-loading"); 93 | createOverlay(); 94 | showOverlay(); 95 | window.headers.then((headers) => { 96 | fetch(apiEndpoint + "/input_task", { 97 | method: "POST", 98 | headers: headers, 99 | body: JSON.stringify({ 100 | session_id: sessionId, 101 | description: newTaskPrompt.value, 102 | }), 103 | }) 104 | .then((response) => response.json()) 105 | .then((data) => { 106 | if (data.status == "Plan not created") { 107 | notyf.error("Unable to create plan for this task."); 108 | newTaskPrompt.disabled = false; 109 | startTaskButton.disabled = false; 110 | return; 111 | } 112 | 113 | console.log("startTaskButton", data); 114 | 115 | newTaskPrompt.disabled = false; 116 | startTaskButton.disabled = false; 117 | startTaskButton.classList.remove("is-loading"); 118 | 119 | window.parent.postMessage( 120 | { 121 | action: "taskStarted", 122 | session_id: data.session_id, 123 | task_id: data.plan_id, 124 | task_name: newTaskPrompt.value, 125 | }, 126 | "*" 127 | ); 128 | 129 | newTaskPrompt.value = ""; 130 | 131 | // Reset character count to 0 132 | const charCount = document.getElementById("charCount"); 133 | if (charCount) { 134 | charCount.textContent = "0"; 135 | } 136 | updateButtonImage(); 137 | notyf.success("Task created successfully. AI agents are on it!"); 138 | 139 | // Remove spinner and hide overlay 140 | removeSpinner(); 141 | hideOverlay(); 142 | }) 143 | .catch((error) => { 144 | console.error("Error:", error); 145 | newTaskPrompt.disabled = false; 146 | startTaskButton.disabled = false; 147 | startTaskButton.classList.remove("is-loading"); 148 | 149 | // Remove spinner and hide overlay 150 | removeSpinner(); 151 | hideOverlay(); 152 | }); 153 | }); 154 | }); 155 | }; 156 | 157 | const quickTasks = () => { 158 | document.querySelectorAll(".quick-task").forEach((task) => { 159 | task.addEventListener("click", (event) => { 160 | const quickTaskPrompt = 161 | task.querySelector(".quick-task-prompt").innerHTML; 162 | newTaskPrompt.value = quickTaskPrompt.trim().replace(/\s+/g, " "); 163 | const charCount = document.getElementById("charCount"); 164 | // Update character count 165 | charCount.textContent = newTaskPrompt.value.length; 166 | updateButtonImage(); 167 | newTaskPrompt.focus(); 168 | }); 169 | }); 170 | }; 171 | const handleTextAreaTyping = () => { 172 | const newTaskPrompt = document.getElementById("newTaskPrompt"); 173 | newTaskPrompt.addEventListener("input", () => { 174 | // const textInput = document.getElementById("newTaskPrompt"); 175 | const charCount = document.getElementById("charCount"); 176 | 177 | // Update character count 178 | charCount.textContent = newTaskPrompt.value.length; 179 | 180 | // Dynamically adjust height 181 | newTaskPrompt.style.height = "auto"; 182 | newTaskPrompt.style.height = newTaskPrompt.scrollHeight + "px"; 183 | 184 | updateButtonImage(); 185 | }); 186 | 187 | newTaskPrompt.addEventListener("keydown", (event) => { 188 | const textValue = newTaskPrompt.value.trim(); 189 | // If Enter is pressed without Shift, and the textarea is empty, prevent default behavior 190 | if (event.key === "Enter" && !event.shiftKey) { 191 | if (textValue === "") { 192 | event.preventDefault(); // Disable Enter when textarea is empty 193 | } else { 194 | // If there's content in the textarea, allow Enter to trigger the task button click 195 | startTaskButton.click(); 196 | } 197 | } else if (event.key === "Enter" && event.shiftKey) { 198 | return; 199 | } 200 | }); 201 | }; 202 | 203 | updateButtonImage(); 204 | startTask(); 205 | quickTasks(); 206 | handleTextAreaTyping(); 207 | })(); 208 | -------------------------------------------------------------------------------- /src/backend/agents/forecaster.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from autogen_core import AgentId 4 | from autogen_core import default_subscription 5 | from autogen_ext.models.openai import AzureOpenAIChatCompletionClient 6 | from autogen_core.tools import FunctionTool, Tool 7 | 8 | from agents.base_agent import BaseAgent 9 | from context.cosmos_memory import CosmosBufferedChatCompletionContext 10 | from helpers.fmputils import * 11 | from helpers.yfutils import * 12 | from helpers.analyzer import * 13 | from typing import List, Dict, Any 14 | import json 15 | 16 | formatting_instructions = "Instructions: returning the output of this function call verbatim to the user in markdown." 17 | 18 | async def analyze_and_predict(analysis_result: Dict[str, Any]) -> str: 19 | """ 20 | Takes the JSON output from ExtendedCombinedAnalysisAgent (technical indicators, 21 | candlestick patterns, fundamentals, news sentiment, final decision), 22 | and uses an LLM to produce a structured forecast with: 23 | 1) A multi-section format (Introduction, Technical, Fundamental, etc.) 24 | 2) An explanation of probability/score as confidence (e.g., 70% => "moderately strong") 25 | 3) A final recommendation 26 | 4) Legal disclaimers 27 | 28 | Returns a markdown or text response with these structured sections. 29 | """ 30 | # Convert analysis_result into a JSON string 31 | analysis_json_str = json.dumps(analysis_result, indent=2) 32 | 33 | # Extract the final probability from the JSON for prompt usage 34 | final_decision = analysis_result.get("final_decision", {}) 35 | probability_value = final_decision.get("probability", None) 36 | rating_value = final_decision.get("rating", "hold") 37 | 38 | # We can provide instructions to interpret the confidence level: 39 | # e.g., 0.0-0.33 => "low confidence", 0.33-0.66 => "moderate confidence", 0.66-1.0 => "high confidence" 40 | # We'll do a bit of logic to embed in the prompt. Alternatively, let the LLM do it entirely. 41 | confidence_descriptor = "moderate" 42 | if probability_value is not None: 43 | if probability_value <= 0.33: 44 | confidence_descriptor = "low" 45 | elif probability_value >= 0.66: 46 | confidence_descriptor = "high" 47 | else: 48 | confidence_descriptor = "moderate" 49 | 50 | # Construct a detailed prompt with strict output structure 51 | prompt = f""" 52 | You are a specialized financial analysis LLM. You have received a JSON structure that 53 | represents an extended analysis of a stock, including: 54 | - Technical signals (RSI, MACD, Bollinger, EMA crossover, Stochastics, ADX) 55 | - Candlestick pattern detections (TA-Lib) 56 | - Basic fundamentals (P/E ratios, etc.) 57 | - News sentiment 58 | - A final numeric probability (score) and rating (Buy/Sell/Hold). 59 | 60 | The JSON data is: 61 | 62 | ``` 63 | {analysis_json_str} 64 | ``` 65 | 66 | **Please return your answer in the following sections:** 67 | 68 | 1) **Introduction** 69 | - Briefly introduce the analysis. 70 | 71 | 2) **Technical Overview** 72 | - Summarize the key technical indicators and any candlestick patterns. 73 | - Explain whether they are bullish, bearish, or neutral. 74 | 75 | 3) **Fundamental Overview** 76 | - Mention any notable fundamental data (like forwardPE, trailingPE, etc.) 77 | and how it influences the outlook. 78 | 79 | 4) **News & Sentiment** 80 | - Highlight the sentiment score (range: -1.0 to +1.0). 81 | Explain if it's a tailwind (positive) or headwind (negative). 82 | 83 | 5) **Probability & Confidence** 84 | - The system’s final probability is **{probability_value}** (range: 0.0 to 1.0). 85 | - Interpret it as **{confidence_descriptor}** confidence 86 | (e.g., <=0.33 => "low", 0.33-0.66 => "moderate", >=0.66 => "high"). 87 | - Elaborate how confident or uncertain this rating might be based on 88 | conflicting signals, volatility, etc. 89 | 90 | 6) **Final Recommendation** 91 | - Based on the system’s final rating: **{rating_value}**. 92 | - Explain briefly why you agree or disagree, or how you interpret it. 93 | 94 | 7) **Disclaimers** 95 | - Include disclaimers such as "Past performance is not indicative of future results." 96 | - Remind the user that this is not guaranteed investment advice. 97 | - Encourage further research before making any decisions. 98 | 99 | Please format your response in **Markdown**, with headings for each section 100 | and bullet points where appropriate. 101 | """ 102 | 103 | return prompt 104 | # Now we call the LLM with this prompt. We'll mock the response for this example. 105 | # In real usage, you'd do something like: 106 | # response = await model_client.get_chat_completion( 107 | # system_message="You are a financial analysis LLM.", 108 | # user_message=prompt, 109 | # temperature=0.7, 110 | # max_tokens=1200, 111 | # ) 112 | # 113 | 114 | # Create the Company Analyst Tools list 115 | def get_forecaster_tools() -> List[Tool]: 116 | return [ 117 | FunctionTool( 118 | analyze_and_predict, 119 | description=( 120 | "Interprets the JSON output from ExtendedCombinedAnalysisAgent. " 121 | "Generates a final Buy/Sell/Hold recommendation with a structured rationale, " 122 | "risk factors, disclaimers, and an explanation of the probability or confidence." 123 | ), 124 | ), 125 | ] 126 | 127 | 128 | @default_subscription 129 | class ForecasterAgent(BaseAgent): 130 | def __init__( 131 | self, 132 | model_client: AzureOpenAIChatCompletionClient, 133 | session_id: str, 134 | user_id: str, 135 | memory: CosmosBufferedChatCompletionContext, 136 | forecaster_tools: List[Tool], 137 | forecaster_tool_agent_id: AgentId, 138 | ): 139 | super().__init__( 140 | "ForecasterAgent", 141 | model_client, 142 | session_id, 143 | user_id, 144 | memory, 145 | forecaster_tools, 146 | forecaster_tool_agent_id, 147 | #system_message="You are an AI Agent. You have knowledge about the SEC annual (10-K) and quarterly (10-Q) reports. SEC reports includes the information about income statement, balance sheet, cash flow, risk assessment, competitor analysis, business highlights and business information." 148 | system_message=dedent( 149 | f""" 150 | You are a Forecaster and Analysis Agent. 151 | Your role is to interpret the output of an extended technical & fundamental analysis pipeline 152 | and additional data from the list of one or more the following: 153 | - Business Overview 154 | - Risk Assessment 155 | - Market Position 156 | - Income Statement 157 | - Segment Statement 158 | - Income Summarization 159 | - Competitor Analysis 160 | - Business Highlights 161 | - Business Information 162 | - Earnings Call Transcripts 163 | - SEC Reports 164 | - Analyst Reports 165 | - News 166 | - Stock Price Data 167 | Produce a final recommendation (Buy, Sell, or Hold) with 168 | a structured format and thorough, bullet-pointed explanation. 169 | You must mention the final probability, interpret it as confidence level, 170 | and provide disclaimers like "Past performance is not indicative of future results. 171 | """ 172 | ) 173 | ) 174 | -------------------------------------------------------------------------------- /src/backend/helpers/yfutils.py: -------------------------------------------------------------------------------- 1 | import yfinance as yf 2 | from typing import Annotated, Callable, Any, Optional 3 | from pandas import DataFrame 4 | from functools import wraps 5 | from helpers.dutils import decorate_all_methods 6 | from helpers.summarizeutils import get_next_weekday, save_output, SavePathType 7 | import random 8 | from datetime import datetime 9 | 10 | def init_ticker(func: Callable) -> Callable: 11 | """Decorator to initialize yf.Ticker and pass it to the function.""" 12 | 13 | @wraps(func) 14 | def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any: 15 | ticker = yf.Ticker(symbol) 16 | return func(ticker, *args, **kwargs) 17 | 18 | return wrapper 19 | 20 | 21 | @decorate_all_methods(init_ticker) 22 | class yfUtils: 23 | 24 | def get_stock_data( 25 | symbol: Annotated[str, "ticker symbol"], 26 | start_date: Annotated[ 27 | str, "start date for retrieving stock price data, YYYY-mm-dd" 28 | ], 29 | end_date: Annotated[ 30 | str, "end date for retrieving stock price data, YYYY-mm-dd" 31 | ], 32 | save_path: SavePathType = None, 33 | ) -> DataFrame: 34 | """retrieve stock price data for designated ticker symbol""" 35 | ticker = symbol 36 | stock_data = ticker.history(start=start_date, end=end_date) 37 | save_output(stock_data, f"Stock data for {ticker.ticker}", save_path) 38 | return stock_data 39 | 40 | def get_stock_info( 41 | symbol: Annotated[str, "ticker symbol"], 42 | ) -> dict: 43 | """Fetches and returns latest stock information.""" 44 | ticker = symbol 45 | stock_info = ticker.info 46 | return stock_info 47 | 48 | def get_company_info( 49 | symbol: Annotated[str, "ticker symbol"], 50 | save_path: Optional[str] = None, 51 | ) -> DataFrame: 52 | """Fetches and returns company information as a DataFrame.""" 53 | ticker = symbol 54 | info = ticker.info 55 | company_info = { 56 | "Company Name": info.get("shortName", "N/A"), 57 | "Industry": info.get("industry", "N/A"), 58 | "Sector": info.get("sector", "N/A"), 59 | "Country": info.get("country", "N/A"), 60 | "Website": info.get("website", "N/A"), 61 | } 62 | company_info_df = DataFrame([company_info]) 63 | if save_path: 64 | company_info_df.to_csv(save_path) 65 | print(f"Company info for {ticker.ticker} saved to {save_path}") 66 | return company_info_df 67 | 68 | def get_stock_dividends( 69 | symbol: Annotated[str, "ticker symbol"], 70 | save_path: Optional[str] = None, 71 | ) -> DataFrame: 72 | """Fetches and returns the latest dividends data as a DataFrame.""" 73 | ticker = symbol 74 | dividends = ticker.dividends 75 | if save_path: 76 | dividends.to_csv(save_path) 77 | print(f"Dividends for {ticker.ticker} saved to {save_path}") 78 | return dividends 79 | 80 | def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 81 | """Fetches and returns the latest income statement of the company as a DataFrame.""" 82 | ticker = symbol 83 | income_stmt = ticker.financials 84 | return income_stmt 85 | 86 | def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 87 | """Fetches and returns the latest balance sheet of the company as a DataFrame.""" 88 | ticker = symbol 89 | balance_sheet = ticker.balance_sheet 90 | return balance_sheet 91 | 92 | def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 93 | """Fetches and returns the latest cash flow statement of the company as a DataFrame.""" 94 | ticker = symbol 95 | cash_flow = ticker.cashflow 96 | return cash_flow 97 | 98 | def get_company_news( 99 | symbol: Annotated[str, "ticker symbol"], 100 | start_date: Annotated[ 101 | str, 102 | "start date of the search period for the company's basic financials, yyyy-mm-dd", 103 | ], 104 | end_date: Annotated[ 105 | str, 106 | "end date of the search period for the company's basic financials, yyyy-mm-dd", 107 | ], 108 | max_news_num: Annotated[ 109 | int, "maximum number of news to return, default to 10" 110 | ] = 25, 111 | ) -> DataFrame: 112 | """Get the url and filing date of the 10-K report for a given stock and year""" 113 | 114 | ticker = symbol 115 | tickerNews = ticker.news 116 | 117 | if tickerNews: 118 | news = [ 119 | { 120 | #"date": datetime.fromtimestamp(n["providerPublishTime"]).strftime("%Y-%m-%d %H%M%S"), 121 | "date": n['content']["pubDate"], 122 | "headline": n['content']["title"], 123 | "summary": n['content']["summary"], 124 | } 125 | for n in tickerNews 126 | ] 127 | if len(news) > max_news_num: 128 | news = random.choices(news, k=max_news_num) 129 | news.sort(key=lambda x: x["date"]) 130 | output = DataFrame(news) 131 | return output 132 | else: 133 | return f"Failed to retrieve data: {symbol}" 134 | 135 | def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple: 136 | """Fetches the latest analyst recommendations and returns the most common recommendation and its count.""" 137 | ticker = symbol 138 | recommendations = ticker.recommendations 139 | if recommendations.empty: 140 | return None, 0 # No recommendations available 141 | 142 | # Assuming 'period' column exists and needs to be excluded 143 | row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary 144 | 145 | # Find the maximum voting result 146 | max_votes = row_0.max() 147 | majority_voting_result = row_0[row_0 == max_votes].index.tolist() 148 | 149 | return majority_voting_result[0], max_votes 150 | 151 | def get_fundamentals(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 152 | """Fetches and returns the latest fundamentals data as a DataFrame.""" 153 | ticker = symbol 154 | info = ticker.info # yfinance's fundamental data 155 | # Some commonly used fields: 'forwardPE', 'trailingPE', 'priceToBook', 'beta', 'profitMargins', etc. 156 | # Not all fields are guaranteed to exist for every ticker. 157 | fundamentals = { 158 | "forwardPE": info.get("forwardPE", None), 159 | "trailingPE": info.get("trailingPE", None), 160 | "priceToBook": info.get("priceToBook", None), 161 | "beta": info.get("beta", None), 162 | "bookValue": info.get("bookValue", None), 163 | "trailingEps": info.get("trailingEps", None), 164 | "forwardEps": info.get("forwardEps", None), 165 | "enterpriseToRevenue": info.get("enterpriseToRevenue", None), 166 | "enterpriseToEbitda": info.get("enterpriseToEbitda", None), 167 | "debtToEquity": info.get("debtToEquity", None), 168 | "returnOnEquity": info.get("returnOnEquity", None), 169 | "returnOnAssets": info.get("returnOnAssets", None), 170 | "currentRatio": info.get("currentRatio", None), 171 | "quickRatio": info.get("quickRatio", None), 172 | "trailingPegRatio": info.get("trailingPegRatio", None), 173 | } 174 | 175 | fundamentals_df = DataFrame([fundamentals]) 176 | return fundamentals_df -------------------------------------------------------------------------------- /src/frontend/wwwroot/assets/app-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /src/backend/helpers/secutils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | from sec_api import ExtractorApi, QueryApi, RenderApi 4 | from functools import wraps 5 | from typing import Annotated 6 | from helpers.fmputils import fmpUtils 7 | from helpers.dutils import decorate_all_methods 8 | from helpers.summarizeutils import SavePathType 9 | 10 | CACHE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".cache") 11 | PDF_GENERATOR_API = "https://api.sec-api.io/filing-reader" 12 | 13 | 14 | def init_sec_api(func): 15 | @wraps(func) 16 | def wrapper(*args, **kwargs): 17 | global extractor_api, query_api, render_api 18 | if os.environ.get("SEC_API_KEY") is None: 19 | print("Please set the environment variable SEC_API_KEY to use sec_api.") 20 | return None 21 | else: 22 | extractor_api = ExtractorApi(os.environ["SEC_API_KEY"]) 23 | query_api = QueryApi(os.environ["SEC_API_KEY"]) 24 | render_api = RenderApi(os.environ["SEC_API_KEY"]) 25 | print("Sec Api initialized") 26 | return func(*args, **kwargs) 27 | 28 | return wrapper 29 | 30 | 31 | @decorate_all_methods(init_sec_api) 32 | class SECUtils: 33 | 34 | def get_10k_metadata( 35 | ticker: Annotated[str, "ticker symbol"], 36 | start_date: Annotated[ 37 | str, "start date of the 10-k file search range, in yyyy-mm-dd format" 38 | ], 39 | end_date: Annotated[ 40 | str, "end date of the 10-k file search range, in yyyy-mm-dd format" 41 | ], 42 | ): 43 | """ 44 | Search for 10-k filings within a given time period, and return the meta data of the latest one 45 | """ 46 | query = { 47 | "query": f'ticker:"{ticker}" AND formType:"10-K" AND filedAt:[{start_date} TO {end_date}]', 48 | "from": 0, 49 | "size": 10, 50 | "sort": [{"filedAt": {"order": "desc"}}], 51 | } 52 | response = query_api.get_filings(query) 53 | if response["filings"]: 54 | return response["filings"][0] 55 | return None 56 | 57 | def download_10k_filing( 58 | ticker: Annotated[str, "ticker symbol"], 59 | start_date: Annotated[ 60 | str, "start date of the 10-k file search range, in yyyy-mm-dd format" 61 | ], 62 | end_date: Annotated[ 63 | str, "end date of the 10-k file search range, in yyyy-mm-dd format" 64 | ], 65 | save_folder: Annotated[ 66 | str, "name of the folder to store the downloaded filing" 67 | ], 68 | ) -> str: 69 | """Download the latest 10-K filing as htm for a given ticker within a given time period.""" 70 | metadata = SECUtils.get_10k_metadata(ticker, start_date, end_date) 71 | if metadata: 72 | ticker = metadata["ticker"] 73 | url = metadata["linkToFilingDetails"] 74 | 75 | try: 76 | date = metadata["filedAt"][:10] 77 | file_name = date + "_" + metadata["formType"] + "_" + url.split("/")[-1] 78 | 79 | if not os.path.isdir(save_folder): 80 | os.makedirs(save_folder) 81 | 82 | file_content = render_api.get_filing(url) 83 | file_path = os.path.join(save_folder, file_name) 84 | with open(file_path, "w") as f: 85 | f.write(file_content) 86 | return f"{ticker}: download succeeded. Saved to {file_path}" 87 | except: 88 | return f"❌ {ticker}: downloaded failed: {url}" 89 | else: 90 | return f"No 2023 10-K filing found for {ticker}" 91 | 92 | def download_10k_pdf( 93 | ticker: Annotated[str, "ticker symbol"], 94 | start_date: Annotated[ 95 | str, "start date of the 10-k file search range, in yyyy-mm-dd format" 96 | ], 97 | end_date: Annotated[ 98 | str, "end date of the 10-k file search range, in yyyy-mm-dd format" 99 | ], 100 | save_folder: Annotated[ 101 | str, "name of the folder to store the downloaded pdf filing" 102 | ], 103 | ) -> str: 104 | """Download the latest 10-K filing as pdf for a given ticker within a given time period.""" 105 | metadata = SECUtils.get_10k_metadata(ticker, start_date, end_date) 106 | if metadata: 107 | ticker = metadata["ticker"] 108 | filing_url = metadata["linkToFilingDetails"] 109 | 110 | try: 111 | date = metadata["filedAt"][:10] 112 | print(filing_url.split("/")[-1]) 113 | file_name = ( 114 | date 115 | + "_" 116 | + metadata["formType"].replace("/A", "") 117 | + "_" 118 | + filing_url.split("/")[-1] 119 | + ".pdf" 120 | ) 121 | 122 | if not os.path.isdir(save_folder): 123 | os.makedirs(save_folder) 124 | 125 | api_url = f"{PDF_GENERATOR_API}?token={os.environ['SEC_API_KEY']}&type=pdf&url={filing_url}" 126 | response = requests.get(api_url, stream=True) 127 | response.raise_for_status() 128 | 129 | file_path = os.path.join(save_folder, file_name) 130 | with open(file_path, "wb") as file: 131 | for chunk in response.iter_content(chunk_size=8192): 132 | file.write(chunk) 133 | return f"{ticker}: download succeeded. Saved to {file_path}" 134 | except Exception as e: 135 | return f"❌ {ticker}: downloaded failed: {filing_url}, {e}" 136 | else: 137 | return f"No 2023 10-K filing found for {ticker}" 138 | 139 | def get_10k_section( 140 | ticker_symbol: Annotated[str, "ticker symbol"], 141 | fyear: Annotated[str, "fiscal year of the 10-K report"], 142 | section: Annotated[ 143 | str | int, 144 | "Section of the 10-K report to extract, should be in [1, 1A, 1B, 2, 3, 4, 5, 6, 7, 7A, 8, 9, 9A, 9B, 10, 11, 12, 13, 14, 15]", 145 | ], 146 | report_address: Annotated[ 147 | str, 148 | "URL of the 10-K report, if not specified, will get report url from fmp api", 149 | ] = None, 150 | save_path: SavePathType = None, 151 | ) -> str: 152 | """ 153 | Get a specific section of a 10-K report from the SEC API. 154 | """ 155 | if isinstance(section, int): 156 | section = str(section) 157 | if section not in [ 158 | "1A", 159 | "1B", 160 | "7A", 161 | "9A", 162 | "9B", 163 | ] + [str(i) for i in range(1, 16)]: 164 | raise ValueError( 165 | "Section must be in [1, 1A, 1B, 2, 3, 4, 5, 6, 7, 7A, 8, 9, 9A, 9B, 10, 11, 12, 13, 14, 15]" 166 | ) 167 | 168 | # os.makedirs(f"{self.project_dir}/10k", exist_ok=True) 169 | 170 | # report_name = f"{self.project_dir}/10k/section_{section}.txt" 171 | 172 | # if USE_CACHE and os.path.exists(report_name): 173 | # with open(report_name, "r") as f: 174 | # section_text = f.read() 175 | # else: 176 | if report_address is None: 177 | report_address = fmpUtils.get_sec_report(ticker_symbol, fyear) 178 | if report_address.startswith("Link: "): 179 | report_address = report_address.lstrip("Link: ").split()[0] 180 | else: 181 | return report_address # debug info 182 | 183 | cache_path = os.path.join( 184 | CACHE_PATH, f"sec_utils/{ticker_symbol}_{fyear}_{section}.txt" 185 | ) 186 | if os.path.exists(cache_path): 187 | with open(cache_path, "r") as f: 188 | section_text = f.read() 189 | else: 190 | section_text = extractor_api.get_section(report_address, section, "text") 191 | os.makedirs(os.path.dirname(cache_path), exist_ok=True) 192 | with open(cache_path, "w") as f: 193 | f.write(section_text) 194 | 195 | if save_path: 196 | os.makedirs(os.path.dirname(save_path), exist_ok=True) 197 | with open(save_path, "w") as f: 198 | f.write(section_text) 199 | 200 | return section_text --------------------------------------------------------------------------------