├── .github
├── CODE_OF_CONDUCT.md
├── ISSUE_TEMPLATE.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── THIRD_PARTY_LICENSES.md
├── assets
├── AssistantToolMultiFunctions.png
├── AzureAIAssistantTool1.png
├── AzureAIAssistantTool2.png
├── FileSearchAssistant.png
├── ImageInputAssistant.png
├── PetsTravelPlannerAssistant.png
├── RealtimeAssistant.mp4
├── kws.table
└── silero_vad.onnx
├── config
├── AzureLogicAppFunctionCreator_assistant_config.yaml
├── ConversationTitleCreator_assistant_config.yaml
├── FunctionImplCreator_assistant_config.yaml
├── FunctionSpecCreator_assistant_config.yaml
├── InstructionsReviewer_assistant_config.yaml
├── TaskRequestsCreator_assistant_config.yaml
├── function_error_specs.json
└── system_function_specs.json
├── gui
├── __init__.py
├── assistant_client_manager.py
├── assistant_dialogs.py
├── assistant_gui_workers.py
├── conversation.py
├── conversation_sidebar.py
├── debug_dialog.py
├── diagnostic_sidebar.py
├── function_dialogs.py
├── images
│ ├── assistant_icon.png
│ ├── mic_off.png
│ ├── mic_on.png
│ └── paperclip_icon.png
├── log_broadcaster.py
├── main_window.py
├── menu.py
├── settings_dialogs.py
├── signals.py
├── status_bar.py
├── task_dialogs.py
└── utils.py
├── main.py
├── requirements.txt
├── samples
├── FileSearch
│ ├── README.md
│ ├── docker-compose.yaml
│ └── src
│ │ ├── .dockerignore
│ │ ├── Dockerfile
│ │ ├── __init__.py
│ │ ├── config
│ │ └── file_search_assistant_config.yaml
│ │ ├── files
│ │ ├── product_info_1.md
│ │ └── product_info_2.md
│ │ ├── gunicorn.conf.py
│ │ ├── pyproject.toml
│ │ ├── quartapp
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── static
│ │ │ ├── ChatClient.js
│ │ │ ├── ChatUI.js
│ │ │ ├── main.js
│ │ │ └── styles.css
│ │ └── templates
│ │ │ └── index.html
│ │ └── requirements.txt
├── ImageInput
│ ├── README.md
│ ├── docker-compose.yaml
│ └── src
│ │ ├── .dockerignore
│ │ ├── Dockerfile
│ │ ├── __init__.py
│ │ ├── config
│ │ └── image_input_assistant_config.yaml
│ │ ├── gunicorn.conf.py
│ │ ├── pyproject.toml
│ │ ├── quartapp
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── static
│ │ │ ├── ChatClient.js
│ │ │ ├── ChatUI.js
│ │ │ ├── main.js
│ │ │ └── styles.css
│ │ └── templates
│ │ │ └── index.html
│ │ └── requirements.txt
├── MultiAgentCodeOrchestration
│ ├── README.md
│ ├── config
│ │ ├── CodeInspectionAgent_assistant_config.yaml
│ │ ├── CodeProgrammerAgent_assistant_config.yaml
│ │ ├── FileCreatorAgent_assistant_config.yaml
│ │ ├── TaskExecutionAgent_assistant_config.yaml
│ │ ├── TaskPlannerAgent_assistant_config.yaml
│ │ ├── UserAgent_assistant_config.yaml
│ │ └── function_error_specs.json
│ └── main.py
└── PetTravelPlanChatAssistant
│ ├── FormTemplateForPetTransportation.yaml
│ ├── README.md
│ ├── config
│ ├── PetTravelPlanChatAssistant_assistant_config.yaml
│ └── function_error_specs.json
│ ├── functions
│ └── user_functions.py
│ └── main.py
├── sdk
└── azure-ai-assistant
│ ├── README.md
│ ├── azure
│ ├── __init__.py
│ └── ai
│ │ ├── __init__.py
│ │ └── assistant
│ │ ├── __init__.py
│ │ ├── _version.py
│ │ ├── audio
│ │ ├── __init__.py
│ │ ├── audio_capture.py
│ │ ├── audio_playback.py
│ │ ├── azure_keyword_recognizer.py
│ │ ├── realtime_audio.py
│ │ └── vad.py
│ │ ├── functions
│ │ ├── __init__.py
│ │ ├── file_functions.py
│ │ ├── llm_functions.py
│ │ └── system_function_mappings.py
│ │ ├── management
│ │ ├── __init__.py
│ │ ├── agent_client.py
│ │ ├── agent_stream_event_handler.py
│ │ ├── ai_client_factory.py
│ │ ├── ai_client_type.py
│ │ ├── assistant_client.py
│ │ ├── assistant_client_callbacks.py
│ │ ├── assistant_config.py
│ │ ├── assistant_config_manager.py
│ │ ├── async_assistant_client.py
│ │ ├── async_assistant_client_callbacks.py
│ │ ├── async_chat_assistant_client.py
│ │ ├── async_conversation.py
│ │ ├── async_conversation_thread_client.py
│ │ ├── async_message.py
│ │ ├── async_stream_event_handler.py
│ │ ├── async_task.py
│ │ ├── async_task_manager.py
│ │ ├── async_task_manager_callbacks.py
│ │ ├── attachment.py
│ │ ├── azure_functions_manager.py
│ │ ├── azure_logic_app_manager.py
│ │ ├── base_assistant_client.py
│ │ ├── base_chat_assistant_client.py
│ │ ├── chat_assistant_client.py
│ │ ├── conversation.py
│ │ ├── conversation_thread_client.py
│ │ ├── conversation_thread_config.py
│ │ ├── exceptions.py
│ │ ├── function_config.py
│ │ ├── function_config_manager.py
│ │ ├── logger_module.py
│ │ ├── message.py
│ │ ├── message_utils.py
│ │ ├── realtime_assistant_client.py
│ │ ├── stream_event_handler.py
│ │ ├── task.py
│ │ ├── task_manager.py
│ │ ├── task_manager_callbacks.py
│ │ └── text_message.py
│ │ └── py.typed
│ ├── setup.py
│ └── test
│ ├── resources
│ ├── product_info_1.md
│ ├── product_info_2.md
│ └── scenery.png
│ ├── test_assistant_client.py
│ └── test_async_assistant_client.py
└── templates
├── async_main_template.py
├── async_stream_template.py
├── main_template.py
├── multi_template.py
├── realtime_audio_template.py
└── realtime_text_template.py
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Microsoft Open Source Code of Conduct
2 |
3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4 |
5 | Resources:
6 |
7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
4 | > Please provide us with the following information:
5 | > ---------------------------------------------------------------
6 |
7 | ### This issue is for a: (mark with an `x`)
8 | ```
9 | - [ ] bug report -> please search issues before submitting
10 | - [ ] feature request
11 | - [ ] documentation issue or request
12 | - [ ] regression (a behavior that used to work and stopped in a new release)
13 | ```
14 |
15 | ### Minimal steps to reproduce
16 | >
17 |
18 | ### Any log messages given by the failure
19 | >
20 |
21 | ### Expected/desired behavior
22 | >
23 |
24 | ### OS and Version?
25 | > Windows 7, 8 or 10. Linux (which distribution). macOS (Yosemite? El Capitan? Sierra?)
26 |
27 | ### Versions
28 | >
29 |
30 | ### Mention any other details that might be useful
31 |
32 | > ---------------------------------------------------------------
33 | > Thanks! We'll be in touch soon.
34 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Purpose
2 |
3 | * ...
4 |
5 | ## Does this introduce a breaking change?
6 |
7 | ```
8 | [ ] Yes
9 | [ ] No
10 | ```
11 |
12 | ## Pull Request Type
13 | What kind of change does this Pull Request introduce?
14 |
15 |
16 | ```
17 | [ ] Bugfix
18 | [ ] Feature
19 | [ ] Code style update (formatting, local variables)
20 | [ ] Refactoring (no functional changes, no api changes)
21 | [ ] Documentation content changes
22 | [ ] Other... Please describe:
23 | ```
24 |
25 | ## How to Test
26 | * Get the code
27 |
28 | ```
29 | git clone [repo-address]
30 | cd [repo-name]
31 | git checkout [branch-name]
32 | npm install
33 | ```
34 |
35 | * Test the code
36 |
37 | ```
38 | ```
39 |
40 | ## What to Check
41 | Verify that the following are valid
42 | * ...
43 |
44 | ## Other Information
45 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## [project-title] Changelog
2 |
3 |
4 | # x.y.z (yyyy-mm-dd)
5 |
6 | *Features*
7 | * ...
8 |
9 | *Bug Fixes*
10 | * ...
11 |
12 | *Breaking Changes*
13 | * ...
14 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Microsoft Open Source Code of Conduct
2 |
3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4 |
5 | Resources:
6 |
7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
10 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to [project-title]
2 |
3 | This project welcomes contributions and suggestions. Most contributions require you to agree to a
4 | Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
5 | the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
6 |
7 | When you submit a pull request, a CLA bot will automatically determine whether you need to provide
8 | a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
9 | provided by the bot. You will only need to do this once across all repos using our CLA.
10 |
11 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
12 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
13 | contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
14 |
15 | - [Code of Conduct](#coc)
16 | - [Issues and Bugs](#issue)
17 | - [Feature Requests](#feature)
18 | - [Submission Guidelines](#submit)
19 |
20 | ## Code of Conduct
21 | Help us keep this project open and inclusive. Please read and follow our [Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
22 |
23 | ## Found an Issue?
24 | If you find a bug in the source code or a mistake in the documentation, you can help us by
25 | [submitting an issue](#submit-issue) to the GitHub Repository. Even better, you can
26 | [submit a Pull Request](#submit-pr) with a fix.
27 |
28 | ## Want a Feature?
29 | You can *request* a new feature by [submitting an issue](#submit-issue) to the GitHub
30 | Repository. If you would like to *implement* a new feature, please submit an issue with
31 | a proposal for your work first, to be sure that we can use it.
32 |
33 | * **Small Features** can be crafted and directly [submitted as a Pull Request](#submit-pr).
34 |
35 | ## Submission Guidelines
36 |
37 | ### Submitting an Issue
38 | Before you submit an issue, search the archive, maybe your question was already answered.
39 |
40 | If your issue appears to be a bug, and hasn't been reported, open a new issue.
41 | Help us to maximize the effort we can spend fixing issues and adding new
42 | features, by not reporting duplicate issues. Providing the following information will increase the
43 | chances of your issue being dealt with quickly:
44 |
45 | * **Overview of the Issue** - if an error is being thrown a non-minified stack trace helps
46 | * **Version** - what version is affected (e.g. 0.1.2)
47 | * **Motivation for or Use Case** - explain what are you trying to do and why the current behavior is a bug for you
48 | * **Browsers and Operating System** - is this a problem with all browsers?
49 | * **Reproduce the Error** - provide a live example or a unambiguous set of steps
50 | * **Related Issues** - has a similar issue been reported before?
51 | * **Suggest a Fix** - if you can't fix the bug yourself, perhaps you can point to what might be
52 | causing the problem (line of code or commit)
53 |
54 | You can file new issues by providing the above information at the corresponding repository's issues link: https://github.com/[organization-name]/[repository-name]/issues/new].
55 |
56 | ### Submitting a Pull Request (PR)
57 | Before you submit your Pull Request (PR) consider the following guidelines:
58 |
59 | * Search the repository (https://github.com/[organization-name]/[repository-name]/pulls) for an open or closed PR
60 | that relates to your submission. You don't want to duplicate effort.
61 |
62 | * Make your changes in a new git fork:
63 |
64 | * Commit your changes using a descriptive commit message
65 | * Push your fork to GitHub:
66 | * In GitHub, create a pull request
67 | * If we suggest changes then:
68 | * Make the required updates.
69 | * Rebase your fork and force push to your GitHub repository (this will update your Pull Request):
70 |
71 | ```shell
72 | git rebase master -i
73 | git push -f
74 | ```
75 |
76 | That's it! Thank you for your contribution!
77 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Azure Samples
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/THIRD_PARTY_LICENSES.md:
--------------------------------------------------------------------------------
1 | # Third-Party Components and Licenses
2 |
3 | This file provides information regarding third-party components included in this project (or referenced by it) and their respective licenses. Please review these licenses to ensure compliance.
4 |
5 | ---
6 |
7 | ## 1. Silero Voice Activity Detector (VAD) Model
8 |
9 | - **Repository:**
10 | [https://github.com/snakers4/silero-vad](https://github.com/snakers4/silero-vad)
11 |
12 | - **License:**
13 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)
14 |
15 | - **Full License Text:**
16 | The full text of the CC BY-NC-SA 4.0 license can be accessed here:
17 | [https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode)
18 |
19 | **NOTE:** The Silero VAD model is not bundled directly in this repository’s wheel by default. If you opt to download and use it, you must comply with the CC BY-NC-SA 4.0 license—particularly regarding noncommercial use and share-alike obligations. Our code remains licensed under the MIT License, found in `LICENSE.md`.
--------------------------------------------------------------------------------
/assets/AssistantToolMultiFunctions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/AssistantToolMultiFunctions.png
--------------------------------------------------------------------------------
/assets/AzureAIAssistantTool1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/AzureAIAssistantTool1.png
--------------------------------------------------------------------------------
/assets/AzureAIAssistantTool2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/AzureAIAssistantTool2.png
--------------------------------------------------------------------------------
/assets/FileSearchAssistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/FileSearchAssistant.png
--------------------------------------------------------------------------------
/assets/ImageInputAssistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/ImageInputAssistant.png
--------------------------------------------------------------------------------
/assets/PetsTravelPlannerAssistant.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/PetsTravelPlannerAssistant.png
--------------------------------------------------------------------------------
/assets/RealtimeAssistant.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/RealtimeAssistant.mp4
--------------------------------------------------------------------------------
/assets/kws.table:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/kws.table
--------------------------------------------------------------------------------
/assets/silero_vad.onnx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/assets/silero_vad.onnx
--------------------------------------------------------------------------------
/config/AzureLogicAppFunctionCreator_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: AzureLogicAppFunctionCreator
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - The user request must provide a function specification that includes the Logic App's name and a JSON schema detailing the expected input parameters.
5 | - If the request is unrelated to Logic Apps, instruct the user to supply both the Logic App name and its associated HTTP trigger JSON schema.
6 |
7 | ## Requirements
8 | 1. Create a Python function implementation based on the given function specification for the Logic App. The function's name must be derived from the Logic App's name (converted to lowercase and with spaces replaced by underscores).
9 | 2. The function must use valid Python code and be executable with the following command:
10 | python -c "from functions.user_functions import ; ()"
11 | 3. The function signature should include parameters that correspond to the keys defined in the provided JSON schema (if applicable). This means that for each expected input described by the schema, there should be a matching parameter.
12 | 4. Inside the function, create a payload dictionary composed of these parameters. This payload will be sent to the Logic App.
13 | 5. Use the following call to invoke the Logic App:
14 | result = service.invoke_logic_app(logic_app_name, payload)
15 | where logic_app_name is the original Logic App name provided.
16 | 6. Handle error scenarios gracefully. For error handling, include the following imports:
17 | from azure.ai.assistant.management.logger_module import logger
18 | from azure.ai.assistant.management.azure_logic_app_manager import AzureLogicAppManager
19 | 7. Use these error types for handling different conditions:
20 | ["file_not_found", "directory_not_found", "no_matching_folders_found", "no_matching_files_found", "json_load_content_error", "invalid_input", "generic_error"]
21 | 8. Any exceptions should be caught and a clear error message returned via json.dumps() with a key "function_error". On success, the function must return the result as a JSON string with the key "result".
22 | 9. The final output must consist solely of the generated code without any markdown formatting (e.g., no triple backticks).
23 |
24 | ## Example
25 | For example, if the Logic App is named "Send Email" and the provided JSON schema indicates that the function should accept parameters named "recipient", "subject", and "body", the generated function might look like:
26 |
27 | def send_email_via_logic_app(recipient: str, subject: str, body: str) -> str:
28 | """
29 | Sends an email by invoking the specified Logic App with the given recipient, subject, and body.
30 |
31 | :param recipient: The email address of the recipient.
32 | :param subject: The subject of the email.
33 | :param body: The body of the email.
34 | :return: A JSON string summarizing the result of the operation.
35 | """
36 | service = AzureLogicAppManager.get_instance()
37 | payload = {
38 | "to": recipient,
39 | "subject": subject,
40 | "body": body,
41 | }
42 | try:
43 | result = service.invoke_logic_app("Send Email", payload)
44 | return json.dumps({"result": result})
45 | except Exception as e:
46 | function_config_manager = FunctionConfigManager()
47 | error_message = function_config_manager.get_error_message("generic_error")
48 | logger.error(error_message)
49 | return json.dumps({"function_error": error_message})
50 |
51 | ## Note:
52 | - Use the provided JSON schema to dynamically define the required parameters for the function.
53 | - Ensure that the function implementation exactly follows the given instructions and produces only valid Python code.
54 | model: o1
55 | assistant_id:
56 | code_interpreter: false
57 | output_folder_path: ''
58 | ai_client_type: OPEN_AI
59 | assistant_type: chat_assistant
60 | assistant_role: system
61 | file_references: []
62 | completion_settings: null
63 | tool_resources: null
64 | file_search: false
65 | functions: []
66 | config_folder: null
67 | audio: null
68 | realtime_settings: null
69 |
--------------------------------------------------------------------------------
/config/ConversationTitleCreator_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: ConversationTitleCreator
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - You will get initial text as input.
5 |
6 | ## Requirements
7 | 1. You are required to create title of given text by finding the overall theme.
8 | 2. The end result(title) must be only 3 words long at max. Returning more than 3 words will be a failure.
9 | model: ''
10 | assistant_id: null
11 | functions: []
12 | code_interpreter: false
13 | output_folder_path: ''
14 | ai_client_type: AZURE_OPEN_AI
15 | assistant_type: chat_assistant
16 | assistant_role: system
17 | file_references: []
18 | completion_settings: null
19 | tool_resources: null
20 | file_search: false
--------------------------------------------------------------------------------
/config/FunctionImplCreator_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: FunctionImplCreator
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - User requests to implement Python function from the JSON specification.
5 | - If user requests something totally different, then you shall instruct the user what input you need in order to process the request.
6 |
7 | ## Requirements
8 | 1. You are required to create function implementation from given function specification using Python programming language.
9 | 2. The implementation must be valid Python code and executable in the following way: `python -c from functions.user_functions import function_name; function_name()`.
10 | 3. For error handling, you shall include following imports:
11 | ```
12 | from azure.ai.assistant.management.logger_module import logger
13 | from azure.ai.assistant.management.function_config_manager import FunctionConfigManager
14 | ```
15 | 4. Use the following error types for handling different scenarios:
16 | ```
17 | ["file_not_found", "directory_not_found", "no_matching_folders_found", "no_matching_files_found", "json_load_content_error", "invalid_input", "generic_error"]
18 | ```
19 | 5. An example below of imaginary function that return result and handles error handling of input using given error types:
20 | ```
21 | # FunctionConfigManager is singleton and required for retrieving error messages for possible error types
22 | def new_user_function(directory):
23 | function_config_manager = FunctionConfigManager()
24 | if not os.path.isdir(directory):
25 | error_message = function_config_manager.get_error_message("directory_not_found")
26 | logger.error(error_message) return json.dumps({"function_error": error_message})
27 | # Generate result
28 | return json.dumps({"result": result})
29 | ```
30 | 6. Ensure your function handles errors gracefully and returns a clear error message in case of exceptions.
31 | 7. Ensure function returns result using json.dumps() and where "result" is key and its value is the result.
32 | 8. The end result must be only code and must not contain triple backtics, otherwise, it is considered a failure.
33 | model: ''
34 | assistant_id: null
35 | code_interpreter: false
36 | output_folder_path: ''
37 | ai_client_type: AZURE_OPEN_AI
38 | assistant_type: chat_assistant
39 | assistant_role: system
40 | file_references: []
41 | completion_settings: null
42 | tool_resources: null
43 | file_search: false
44 | functions: []
45 |
--------------------------------------------------------------------------------
/config/FunctionSpecCreator_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: FunctionSpecCreator
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - User requests to create a function.
5 | - Example requests, "Create function that generates random number", "Create function that returns current weekday" etc.
6 | - If user requests something else than function creation, then you shall instruct the user what input you need in order to process the request.
7 |
8 | ## Requirements
9 | 1. You are required of creating a function specification of given user input.
10 | 2. The function specification shall follow JSON template below:
11 | {
12 | "type": "function",
13 | "function": {
14 | "name": "name of function",
15 | "module": "functions.user_functions",
16 | "description": "description of function",
17 | "parameters": {
18 | "type": "object",
19 | "properties": {
20 | "argument_1 of function": {
21 | "type": "string",
22 | "description": "The description of the argument 1"
23 | }
24 | },
25 | "required": [
26 | "argument_1 of function",
27 | "..."
28 | ]
29 | }
30 | }
31 | }
32 | 3. The function spec must have "type" & "function" main blocks.
33 | 4. The "function" must have "name", "module", "description", "parameters" fields.
34 | 5. The module field value shall be "functions.user_functions".
35 | 6. The function name must follow the snake case format.
36 | 7. The module value must not be changed from what is in the template.
37 | 8. The end result must not contain triple backtics, otherwise, it is considered a failure.
38 | model: ''
39 | assistant_id: null
40 | code_interpreter: false
41 | output_folder_path: ''
42 | ai_client_type: AZURE_OPEN_AI
43 | assistant_type: chat_assistant
44 | assistant_role: system
45 | file_references: []
46 | completion_settings: null
47 | tool_resources: null
48 | file_search: false
49 | functions: []
50 |
--------------------------------------------------------------------------------
/config/InstructionsReviewer_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: InstructionsReviewer
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - You will get instructions text for review
5 |
6 | ## Requirements
7 | 1. Verify instructions contain Pre-requisites for processing section:
8 | - Ensure the instructions explicitly list all necessary information required by the new assistant before its operation begins.
9 | - For example, for travel assistant the necessary input from user could be destination, travel dates, and budget information.
10 | - Lack of the necessary input shall initiate interaction between user and assistant.
11 | 2. Verify instructions contain Requirements section:
12 | - Confirm that the instructions provide a detailed account of the new assistant requirements, covering:
13 | - Each primary function the assistant is required to perform, like gathering user information and providing recommendations.
14 | - Interaction guidelines on how the assistant should communicate with users, manage queries, and the overall communication strategy.
15 | - Error handling strategies, detailing how the assistant should address misunderstandings or lack of information from users.
16 | 3. Output of review shall be report that is max 200 words and which:
17 | - Lists identified gaps or discrepancies in the instructions.
18 | - Provides recommendations on how to revise the instructions to meet the requirements.
19 | - Suggests examples that could enhance the clarity and completeness of the instructions.
20 | model: ''
21 | assistant_id: null
22 | code_interpreter: false
23 | output_folder_path: ''
24 | ai_client_type: AZURE_OPEN_AI
25 | assistant_type: chat_assistant
26 | assistant_role: system
27 | file_references: []
28 | completion_settings: null
29 | tool_resources: null
30 | file_search: false
31 | functions: []
32 |
--------------------------------------------------------------------------------
/config/TaskRequestsCreator_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: TaskRequestsCreator
2 | instructions: |-
3 | ## Pre-requisites for processing
4 | - You will get input request to do something for the files in the given list of folders.
5 | - Example user requests:
6 | - "Please review the python files and suggest improvements. Input folders: folder1, folder2".
7 | - "Convert python files to javascript. Input folders: folder1, folder2"
8 |
9 | ## Requirements
10 | 1. Your task is to format a list of output requests from the input request as explained in the steps below.
11 | - Remember, your task is only format the input request string, newer consider to do the actual request given in input,
12 | just treat it as text input that shall be formatted.
13 | 2. First, You need to decide what is the input file type:
14 | - The allowed input file type can be one of the following:
15 | ["cpp", "cs", "py", "java", "js", "json", "xml", "html", "css", "txt", "md", "yaml", "yml", "sh", "bat", "ps1", "swift", "go"].
16 | - If the input file type is not one of the given input file types, you need to return a message to user that requested input file type is not supported.
17 | 3. Second, You need to form a list of requests:
18 | - First, call "find_files_by_extension_in_directory" function with directory and file extension to see all the files of input type in the folders given in user requests
19 | - Second, you will form final list of output requests
20 | - See, example flow below:
21 | - User input: "Please review the python files and suggest improvements. Input folders: folder1, folder2".
22 | - Checking the file type results file type ".py"
23 | - Calling find_files_by_extension_in_directory function for both folder1 and folder2 with file extension ".py" results to list:
24 | ["./folder1/input1.py", "./folder2/input2.py"]
25 | - Forming the list of requests will produce output:
26 | ["Please review the ./folder1/input1.py file and suggest improvements.", "Please review the ./folder2/input2.py file and suggest improvements."]
27 | 4. The end result must be always valid list, e.g. ["formatted user request1", "formatted user request2"], otherwise result is considered as failure.
28 | model: ''
29 | assistant_id: null
30 | functions:
31 | - type: function
32 | function:
33 | name: find_files_by_extension_in_directory
34 | module: azure.ai.assistant.functions.file_functions
35 | description: Searches for files matching specific criteria by file extension in
36 | a directory and its sub-directories (case-insensitive).
37 | parameters:
38 | type: object
39 | properties:
40 | directory:
41 | type: string
42 | description: The directory to search in.
43 | file_extension:
44 | type: string
45 | description: The file extension to filter by.
46 | required:
47 | - directory
48 | - file_extension
49 | code_interpreter: false
50 | output_folder_path: ''
51 | ai_client_type: AZURE_OPEN_AI
52 | assistant_type: chat_assistant
53 | assistant_role: system
54 | file_references: []
55 | completion_settings: null
56 | tool_resources: null
57 | file_search: false
--------------------------------------------------------------------------------
/config/function_error_specs.json:
--------------------------------------------------------------------------------
1 | {
2 | "file_not_found": "The requested file was not found. Please find the file by name and try again.",
3 | "directory_not_found": "The requested input directory was not found. Please check the directory path e.g. find all folders by name from current directory or retrieve the current directory structure (if not retrieved already) and then revise the directory name",
4 | "no_matching_folders_found": "No matching folders were found. Have you retrieved current directory structure to check if there are similar folder names? If not, find if the current directory structure contains similar folder names, otherwise inform user that no matching folders were found.",
5 | "no_matching_files_found": "No matching files were found. Please find files by an extension from the directory.",
6 | "json_load_content_error": "An error occurred while loading the content to JSON file. Please check the content contains valid json and try once again.",
7 | "invalid_input": "Invalid input provided. Please check the function input parameters and try again.",
8 | "generic_error": "An unexpected error occurred. Please check the function input parameters and try again."
9 | }
--------------------------------------------------------------------------------
/gui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/gui/__init__.py
--------------------------------------------------------------------------------
/gui/assistant_client_manager.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from typing import Dict, Optional
5 | from azure.ai.assistant.audio.realtime_audio import RealtimeAudio
6 |
7 |
8 | class AssistantClientManager:
9 | _instance = None
10 | _clients = {}
11 | _audios: Dict[str, "RealtimeAudio"] = {}
12 |
13 | """
14 | A class to manage assistant clients.
15 | """
16 | def __init__(self) -> None:
17 | pass
18 |
19 | @classmethod
20 | def get_instance(cls) -> 'AssistantClientManager':
21 | """
22 | Get the singleton instance of the assistant client manager.
23 |
24 | :return: The singleton instance of the assistant client manager.
25 | :rtype: AssistantClientManager
26 | """
27 | if cls._instance is None:
28 | cls._instance = cls()
29 | return cls._instance
30 |
31 | def register_client(
32 | self,
33 | name : str,
34 | assistant_client, #: AssistantClient
35 | realtime_audio: Optional["RealtimeAudio"] = None
36 | ) -> None:
37 | """
38 | Register a new assistant client with the given name.
39 |
40 | :param name: The name of the assistant client.
41 | :type name: str
42 | :param assistant_client: The assistant client to register.
43 | :type assistant_client: AssistantClient
44 | :param realtime_audio: The RealtimeAudio instance associated with the assistant client.
45 | :type realtime_audio: RealtimeAudio, optional
46 |
47 | :return: None
48 | :rtype: None
49 | """
50 | self._clients[name] = assistant_client
51 | if realtime_audio:
52 | self._audios[name] = realtime_audio
53 |
54 | def remove_client(self, name : str) -> None:
55 | """
56 | Remove an assistant client with the given name.
57 |
58 | :param name: The name of the assistant client.
59 | :type name: str
60 |
61 | :return: None
62 | :rtype: None
63 | """
64 | if name in self._clients:
65 | del self._clients[name]
66 | if name in self._audios:
67 | del self._audios[name]
68 |
69 | def get_client(self, name : str):
70 | """
71 | Get an assistant client with the given name.
72 |
73 | :param name: The name of the assistant client.
74 | :type name: str
75 |
76 | :return: The assistant client with the given name.
77 | :rtype: AssistantClient
78 | """
79 | return self._clients.get(name)
80 |
81 | def get_audio(self, name: str) -> Optional["RealtimeAudio"]:
82 | """
83 | Get the RealtimeAudio instance associated with the given assistant client name.
84 |
85 | :param name: The name of the assistant client.
86 | :type name: str
87 |
88 | :return: The RealtimeAudio instance associated with the assistant client, or None if not found.
89 | :rtype: RealtimeAudio or None
90 | """
91 | return self._audios.get(name)
92 |
93 | def get_all_clients(self) -> list:
94 | """
95 | Get a list of all registered assistant clients.
96 |
97 | :return: A list of all registered assistant clients.
98 | :rtype: list
99 | """
100 | return list(self._clients.values())
--------------------------------------------------------------------------------
/gui/images/assistant_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/gui/images/assistant_icon.png
--------------------------------------------------------------------------------
/gui/images/mic_off.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/gui/images/mic_off.png
--------------------------------------------------------------------------------
/gui/images/mic_on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/gui/images/mic_on.png
--------------------------------------------------------------------------------
/gui/images/paperclip_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/gui/images/paperclip_icon.png
--------------------------------------------------------------------------------
/gui/log_broadcaster.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | # This software uses the PySide6 library, which is licensed under the GNU Lesser General Public License (LGPL).
5 | # For more details on PySide6's license, see
6 |
7 | from PySide6.QtCore import Signal
8 | from PySide6.QtCore import QObject
9 |
10 |
11 | class StreamCapture(QObject):
12 | textEmitted = Signal(str)
13 |
14 | def write(self, text):
15 | self.textEmitted.emit(str(text))
16 |
17 | def flush(self):
18 | pass
19 |
20 |
21 | class LogBroadcaster:
22 | def __init__(self):
23 | self._subscribers = []
24 |
25 | def subscribe(self, callback):
26 | if callback not in self._subscribers:
27 | self._subscribers.append(callback)
28 |
29 | def unsubscribe(self, callback):
30 | if callback in self._subscribers:
31 | self._subscribers.remove(callback)
32 |
33 | def emit(self, message):
34 | for callback in self._subscribers:
35 | callback(message)
--------------------------------------------------------------------------------
/gui/signals.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | # This software uses the PySide6 library, which is licensed under the GNU Lesser General Public License (LGPL).
5 | # For more details on PySide6's license, see
6 |
7 | from PySide6.QtCore import QObject, Signal
8 |
9 | from gui.status_bar import ActivityStatus
10 |
11 | class AppendConversationSignal(QObject):
12 | update_signal = Signal(str, str, str)
13 |
14 | class ConversationViewClear(QObject):
15 | update_signal = Signal()
16 |
17 | class ConversationAppendMessageSignal(QObject):
18 | append_signal = Signal(object)
19 |
20 | class ConversationAppendMessagesSignal(QObject):
21 | append_signal = Signal(list)
22 |
23 | class ConversationAppendImageSignal(QObject):
24 | append_signal = Signal(str)
25 |
26 | class ConversationAppendChunkSignal(QObject):
27 | append_signal = Signal(str, str, bool)
28 |
29 | class StartStatusAnimationSignal(QObject):
30 | start_signal = Signal(ActivityStatus)
31 |
32 | class StopStatusAnimationSignal(QObject):
33 | stop_signal = Signal(ActivityStatus)
34 |
35 | class StartProcessingSignal(QObject):
36 | start_signal = Signal(str, bool)
37 |
38 | class StopProcessingSignal(QObject):
39 | stop_signal = Signal(str, bool)
40 |
41 | class UpdateConversationTitleSignal(QObject):
42 | update_signal = Signal(str, str)
43 |
44 | class DiagnosticStartRunSignal(QObject):
45 | # Define a signal that carries assistant name, run identifier, run start time and user input
46 | start_signal = Signal(str, str, str, str)
47 |
48 | class DiagnosticAddFunctionCallSignal(QObject):
49 | # Define a signal that carries assistant name, function name, arguments and function response
50 | call_signal = Signal(str, str, str, str, str)
51 |
52 | class DiagnosticEndRunSignal(QObject):
53 | # Define a signal that carries assistant name, run end time and assistant messages
54 | end_signal = Signal(str, str, str, str)
55 |
56 | class ErrorSignal(QObject):
57 | # Define a signal that carries error message
58 | error_signal = Signal(str)
59 |
--------------------------------------------------------------------------------
/gui/status_bar.py:
--------------------------------------------------------------------------------
1 | from PySide6.QtWidgets import QLabel
2 | from PySide6.QtGui import QFont
3 | from PySide6.QtCore import Qt, QTimer
4 | from enum import Enum
5 |
6 | from azure.ai.assistant.management.logger_module import logger
7 |
8 |
9 | class ActivityStatus(Enum):
10 | PROCESSING = "Processing"
11 | PROCESSING_USER_INPUT = "UserInput"
12 | PROCESSING_SCHEDULED_TASK = "ScheduledTask"
13 | LISTENING_SPEECH = "ListeningSpeech"
14 | LISTENING_KEYWORD = "ListeningKeyword"
15 | FUNCTION_EXECUTION = "FunctionExecution"
16 | DELETING = "Deleting"
17 |
18 |
19 | class StatusBar:
20 |
21 | def __init__(self, main_window):
22 | self.main_window = main_window
23 | self.setup_status_bar()
24 | # Now active_statuses is a dict mapping statuses to their call counts
25 | self.active_statuses = {}
26 | self.current_thread_name = None
27 |
28 | def setup_status_bar(self):
29 | self.processingLabel = QLabel("", self.main_window)
30 | self.processingLabel.setFont(QFont("Arial", 11))
31 | self.processingLabel.setAlignment(Qt.AlignRight)
32 |
33 | self.processingDots = 0
34 | self.animation_timer = QTimer()
35 | self.animation_timer.timeout.connect(self.animate_processing_label)
36 |
37 | def animate_processing_label(self):
38 | frames = [" ", ". ", ".. ", "..."]
39 |
40 | if ActivityStatus.DELETING in self.active_statuses:
41 | base_text = f"Deleting {self.current_thread_name or ''}"
42 | self.processingLabel.setText(f"{base_text}{frames[self.processingDots]}")
43 | self.processingDots = (self.processingDots + 1) % 4
44 | return
45 |
46 | if ActivityStatus.PROCESSING in self.active_statuses:
47 | base_text = "Processing"
48 | self.processingLabel.setText(f"{base_text}{frames[self.processingDots]}")
49 |
50 | elif self.active_statuses:
51 | status_labels = {
52 | ActivityStatus.LISTENING_SPEECH: "Speech Input",
53 | ActivityStatus.LISTENING_KEYWORD: "Keyword Input",
54 | ActivityStatus.FUNCTION_EXECUTION: "Function Call",
55 | ActivityStatus.PROCESSING_USER_INPUT: "User Input",
56 | ActivityStatus.PROCESSING_SCHEDULED_TASK: "Scheduled Task"
57 | }
58 | active_labels = [
59 | status_labels.get(status, "")
60 | for status in self.active_statuses.keys()
61 | if status in status_labels
62 | ]
63 | status_message = " | ".join(filter(None, active_labels))
64 | base_text = f"Processing ({status_message})"
65 | self.processingLabel.setText(f"{base_text}{frames[self.processingDots]}")
66 | else:
67 | # No active statuses
68 | self.stop_animation()
69 |
70 | self.processingDots = (self.processingDots + 1) % 4
71 |
72 | def start_animation(self, status, interval=500, thread_name=None):
73 | if status == ActivityStatus.DELETING and thread_name:
74 | self.current_thread_name = thread_name
75 |
76 | # Increase the counter for the given status
77 | if status in self.active_statuses:
78 | self.active_statuses[status] += 1
79 | else:
80 | self.active_statuses[status] = 1
81 |
82 | if not self.animation_timer.isActive():
83 | self.animation_timer.setInterval(interval)
84 | self.animation_timer.start()
85 |
86 | self.animate_processing_label()
87 |
88 | def stop_animation(self, status=None):
89 | if status is not None:
90 | # If the status exists in our active_statuses, decrement its count.
91 | if status in self.active_statuses:
92 | self.active_statuses[status] -= 1
93 | if self.active_statuses[status] <= 0:
94 | del self.active_statuses[status]
95 | else:
96 | # If no status is provided, clear everything.
97 | self.active_statuses.clear()
98 |
99 | if not self.active_statuses:
100 | self.animation_timer.stop()
101 | self.processingLabel.clear()
102 |
103 | def get_widget(self):
104 | return self.processingLabel
105 |
106 | def clear_all_statuses(self):
107 | self.active_statuses.clear()
108 | self.animation_timer.stop()
109 | self.processingLabel.clear()
110 | self.processingDots = 0
111 | self.current_thread_name = None
112 |
--------------------------------------------------------------------------------
/gui/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | # This software uses the PySide6 library, which is licensed under the GNU Lesser General Public License (LGPL).
5 | # For more details on PySide6's license, see
6 |
7 | import os
8 | import sys
9 | import re
10 | from typing import Optional
11 |
12 | from PySide6.QtWidgets import QMessageBox
13 |
14 | from azure.ai.assistant.management.logger_module import logger
15 | from azure.ai.assistant.management.assistant_config import AssistantConfig
16 | from azure.ai.assistant.management.ai_client_factory import AIClientFactory
17 | from azure.ai.assistant.management.ai_client_factory import AIClientType
18 | from azure.ai.assistant.management.chat_assistant_client import ChatAssistantClient
19 |
20 |
21 | def resource_path(relative_path):
22 | """
23 | Get absolute path to resource, works for development and for PyInstaller
24 | """
25 | if hasattr(sys, '_MEIPASS'):
26 | logger.info("Running in PyInstaller mode")
27 | # PyInstaller creates a temp folder and stores path in _MEIPASS
28 | base_path = sys._MEIPASS
29 | else:
30 | logger.info("Running in normal mode")
31 | base_path = os.path.abspath(".")
32 |
33 | path = os.path.join(base_path, relative_path)
34 | logger.info(f"Resource path: {path}")
35 | return path
36 |
37 |
38 | def camel_to_snake(name):
39 | """
40 | Convert camel case to snake case
41 | """
42 | name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
43 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
44 |
45 |
46 | def init_system_assistant(instance, assistant_name: str):
47 | """
48 | Initialize the system assistant
49 | """
50 | # Fetch assistant config using assistant name
51 | assistant_config: AssistantConfig = instance.assistant_config_manager.get_config(assistant_name)
52 |
53 | try:
54 | ai_client_type: AIClientType = instance.system_client_type
55 | if ai_client_type is None:
56 | QMessageBox.warning(instance, "Warning", f"Selected system AI client is not initialized properly, system assistant {assistant_name} may not work as expected.")
57 | return
58 | else:
59 | # Update the ai_client_type in the assistant_config
60 | assistant_config.ai_client_type = ai_client_type
61 |
62 | if not assistant_config.model or assistant_config.model != instance.system_model:
63 | logger.warning(f"Model not found in the {assistant_name} assistant config, using the system assistant model.")
64 | assistant_config.model = instance.system_model
65 |
66 | if not assistant_config.model:
67 | error_message = f"Model not found in the {assistant_name} assistant config, and system assistant model is not set."
68 | QMessageBox.warning(instance, "Warning", error_message)
69 | return
70 |
71 | # Then, use it when setting the attribute:
72 | setattr(instance, camel_to_snake(assistant_name), ChatAssistantClient.from_config(assistant_config))
73 |
74 | except Exception as e:
75 | error_message = f"An error occurred while initializing the {assistant_name} assistant, check the system settings: {e}"
76 | QMessageBox.warning(instance, "Error", error_message)
77 |
78 |
79 | def get_ai_client(ai_client_type: AIClientType, api_version: Optional[str] = None) -> Optional[object]:
80 | """
81 | Returns an AI client instance for the given AIClientType, optionally using a specified api_version.
82 | Logs an error if any exception occurs during creation.
83 | """
84 | client_factory = AIClientFactory.get_instance()
85 |
86 | client_map = {
87 | AIClientType.AZURE_OPEN_AI: lambda: client_factory.get_client(
88 | AIClientType.AZURE_OPEN_AI, api_version=api_version
89 | ),
90 | AIClientType.OPEN_AI: lambda: client_factory.get_client(
91 | AIClientType.OPEN_AI
92 | ),
93 | AIClientType.OPEN_AI_REALTIME: lambda: client_factory.get_client(
94 | AIClientType.OPEN_AI_REALTIME
95 | ),
96 | AIClientType.AZURE_OPEN_AI_REALTIME: lambda: client_factory.get_client(
97 | AIClientType.AZURE_OPEN_AI_REALTIME, api_version=api_version
98 | ),
99 | AIClientType.AZURE_AI_AGENT: lambda: client_factory.get_client(
100 | AIClientType.AZURE_AI_AGENT
101 | ),
102 | }
103 |
104 | try:
105 | return client_map.get(ai_client_type, lambda: None)()
106 | except Exception as e:
107 | logger.error(f"[get_ai_client] Error getting client for {ai_client_type.name}: {e}")
108 | return None
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | # This software uses the PySide6 library, which is licensed under the GNU Lesser General Public License (LGPL).
5 | # For more details on PySide6's license, see
6 |
7 | from PySide6.QtWidgets import QApplication
8 |
9 | import sys
10 |
11 | from gui.main_window import MainWindow
12 |
13 |
14 | def main():
15 | # Create an instance of QApplication
16 | app = QApplication(sys.argv)
17 |
18 | # Initialize the main window with engine components
19 | main_window = MainWindow()
20 |
21 | # Show the main window
22 | main_window.show()
23 |
24 | # Execute the application's main loop
25 | sys.exit(app.exec())
26 |
27 |
28 | if __name__ == '__main__':
29 | main()
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # Assistant middleware from GitHub Release
2 | https://github.com/Azure-Samples/azureai-assistant-tool/releases/download/v0.5.2-alpha/azure_ai_assistant-0.5.2a1-py3-none-any.whl
3 |
4 | # GUI Framework
5 | PySide6
6 |
7 | # Azure Cognitive Services Speech SDK
8 | azure-cognitiveservices-speech
9 |
10 | # Beautiful Soup
11 | beautifulsoup4
12 |
13 | # Realtime Python Library
14 | https://github.com/jhakulin/realtime-ai/releases/download/v0.1.8/realtime_ai-0.1.8-py3-none-any.whl
--------------------------------------------------------------------------------
/samples/FileSearch/README.md:
--------------------------------------------------------------------------------
1 | # Sample Application using Azure OpenAI Assistants and File Search tool (Python)
2 |
3 | This sample includes a simple Python [Quart](https://quart.palletsprojects.com/en/latest/) app that streams responses from OpenAI Assistant to an HTML/JS frontend using Server-Sent Events (SSEs). The application is configured to upload two documents under the `files` folder for use with the OpenAI Assistant's File Search tool.
4 |
5 | The sample is designed for use with [Docker containers](https://www.docker.com/), both for local development and Azure deployment. For Azure deployment to [Azure Container Apps](https://learn.microsoft.com/azure/container-apps/overview), please use this [template](https://github.com/Azure-Samples/openai-chat-app-quickstart) and replace the `src` folder content with this application.
6 |
7 | ## Local development with Docker
8 |
9 | This sample includes a `docker-compose.yaml` for local development which creates a volume for the app code. That allows you to make changes to the code and see them instantly.
10 |
11 | 1. Install [Docker Desktop](https://www.docker.com/products/docker-desktop/). If you opened this inside Github Codespaces or a Dev Container in VS Code, installation is not needed. ⚠️ If you're on an Apple M1/M2, you won't be able to run `docker` commands inside a Dev Container; either use Codespaces or do not open the Dev Container.
12 |
13 | 2. Make sure that the `.env` file exists.
14 |
15 | 3. Store a keys and endpoint information (Azure) for the OpenAI resource in the `.env` file. The key should be stored in the `.env` file as `AZURE_OPENAI_API_KEY or OPENAI_API_KEY`. This is necessary because Docker containers don't have access to your user Azure credentials.
16 |
17 | 4. Start the services with this command:
18 |
19 | ```shell
20 | docker-compose up --build
21 | ```
22 |
23 | 5. Click 'http://localhost:50505' in the browser to run the application.
24 |
25 | ## Example run
26 |
27 | 
28 |
29 | ## Deployment to Azure
30 |
31 | As mentioned earlier, please integrate this app using [template](https://github.com/Azure-Samples/openai-chat-app-quickstart) and following the Azure Container App deployment steps there.
--------------------------------------------------------------------------------
/samples/FileSearch/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: ./src
5 | env_file:
6 | - .env
7 | ports:
8 | - 50505:50505
9 | volumes:
10 | - ./src:/code
11 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/.dockerignore:
--------------------------------------------------------------------------------
1 | .git*
2 | .venv/
3 | **/*.pyc
4 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/Dockerfile:
--------------------------------------------------------------------------------
1 | # ------------------- Stage 0: Base Stage ------------------------------
2 | FROM python:3.11-alpine AS base
3 |
4 | WORKDIR /code
5 |
6 | # Install tini, a tiny init for containers
7 | RUN apk add --update --no-cache tini
8 |
9 | # Install required packages for cryptography package
10 | # https://cryptography.io/en/latest/installation/#building-cryptography-on-linux
11 | RUN apk add gcc musl-dev python3-dev libffi-dev openssl-dev cargo pkgconfig
12 |
13 | # ------------------- Stage 1: Build Stage ------------------------------
14 | FROM base AS build
15 |
16 | COPY requirements.txt .
17 |
18 | RUN pip3 install -r requirements.txt
19 |
20 | COPY . .
21 |
22 | # ------------------- Stage 2: Final Stage ------------------------------
23 | FROM base AS final
24 |
25 | RUN addgroup -S app && adduser -S app -G app
26 |
27 | COPY --from=build --chown=app:app /usr/local/lib/python3.11 /usr/local/lib/python3.11
28 | COPY --from=build --chown=app:app /usr/local/bin /usr/local/bin
29 | COPY --from=build --chown=app:app /code /code
30 |
31 | # Copy the files directory
32 | COPY --chown=app:app files /code/files
33 |
34 | USER app
35 |
36 | EXPOSE 50505
37 |
38 | ENTRYPOINT ["tini", "gunicorn", "quartapp:create_app()"]
39 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/samples/FileSearch/src/__init__.py
--------------------------------------------------------------------------------
/samples/FileSearch/src/config/file_search_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: file_search
2 | instructions: |-
3 | You are a helpful assistant capable of answering questions about uploaded documents. When providing answers, you must include file citations for all referred documents.
4 | model: gpt-4-turbo-2024-04-09
5 | assistant_id:
6 | file_references: []
7 | tool_resources:
8 | code_interpreter:
9 | files: {}
10 | file_search:
11 | vector_stores:
12 | - name: Assistant file_search vector store
13 | id:
14 | files:
15 | /code/files/product_info_1.md:
16 | /code/files/product_info_2.md:
17 | metadata: null
18 | expires_after: null
19 | functions: []
20 | file_search: true
21 | code_interpreter: false
22 | output_folder_path: /code/output
23 | ai_client_type: OPEN_AI
24 | assistant_type: assistant
25 | completion_settings: null
26 | assistant_role: user
27 | config_folder: null
28 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/files/product_info_1.md:
--------------------------------------------------------------------------------
1 | # Information about product item_number: 1
2 |
3 | ## Brand
4 | Contoso Galaxy Innovations
5 |
6 | ## Category
7 | Smart Eyewear
8 |
9 | ## Features
10 | - Augmented Reality interface
11 | - Voice-controlled AI assistant
12 | - HD video recording with 3D audio
13 | - UV protection and blue light filtering
14 | - Wireless charging with extended battery life
15 |
16 | ## User Guide
17 |
18 | ### 1. Introduction
19 | Introduction to your new SmartView Glasses
20 |
21 | ### 2. Product Overview
22 | Overview of features and controls
23 |
24 | ### 3. Sizing and Fit
25 | Finding your perfect fit and style adjustments
26 |
27 | ### 4. Proper Care and Maintenance
28 | Cleaning and caring for your SmartView Glasses
29 |
30 | ### 5. Break-in Period
31 | Adjusting to the augmented reality experience
32 |
33 | ### 6. Safety Tips
34 | Safety guidelines for public and private spaces
35 |
36 | ### 7. Troubleshooting
37 | Quick fixes for common issues
38 |
39 | ## Warranty Information
40 | Two-year limited warranty on all electronic components
41 |
42 | ## Contact Information
43 | Customer Support at support@contoso-galaxy-innovations.com
44 |
45 | ## Return Policy
46 | 30-day return policy with no questions asked
47 |
48 | ## FAQ
49 | - How to sync your SmartView Glasses with your devices
50 | - Troubleshooting connection issues
51 | - Customizing your augmented reality environment
52 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/files/product_info_2.md:
--------------------------------------------------------------------------------
1 | # Information about product item_number: 2
2 |
3 | ## Brand
4 | Contoso Quantum Comfort
5 |
6 | ## Category
7 | Self-Warming Blanket
8 |
9 | ## Features
10 | - Nano-fiber heating elements for even warmth distribution
11 | - Intelligent temperature control with machine learning preferences
12 | - Eco-friendly and energy-efficient design
13 | - Wireless and portable with up to 12 hours of battery life
14 | - Waterproof and machine washable material
15 |
16 | ## User Guide
17 |
18 | ### 1. Introduction
19 | Getting to know your new Self-Warming Blanket
20 |
21 | ### 2. Product Overview
22 | How to set up and operate your blanket
23 |
24 | ### 3. Sizing and Fit
25 | Selecting the ideal warmth setting for comfort
26 |
27 | ### 4. Proper Care and Maintenance
28 | Care instructions to maintain warmth and softness
29 |
30 | ### 5. Break-in Period
31 | What to expect during the first use
32 |
33 | ### 6. Safety Tips
34 | Best practices for safe use
35 |
36 | ### 7. Troubleshooting
37 | Common questions and solutions
38 |
39 | ## Warranty Information
40 | Three-year warranty with free technical support
41 |
42 | ## Contact Information
43 | Quantum Comfort Support at contact@contosoquantumcomfort.co
44 |
45 | ## Return Policy
46 | 45-day satisfaction guarantee with full refund
47 |
48 | ## FAQ
49 | - How to pair the blanket with your smart home devices
50 | - Optimizing battery life for longer use
51 | - Adjusting blanket settings for different climates
52 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/gunicorn.conf.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 |
4 | from dotenv import load_dotenv
5 |
6 | load_dotenv()
7 |
8 | max_requests = 1000
9 | max_requests_jitter = 50
10 | log_file = "-"
11 | bind = "0.0.0.0:50505"
12 |
13 | if not os.getenv("RUNNING_IN_PRODUCTION"):
14 | reload = True
15 |
16 | num_cpus = multiprocessing.cpu_count()
17 | workers = 1 #(num_cpus * 2) + 1
18 | worker_class = "uvicorn.workers.UvicornWorker"
19 |
20 | timeout = 120
21 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "quartapp"
3 | version = "1.0.0"
4 | description = "Create a simple chat app using Quart and OpenAI"
5 | dependencies = [
6 | "quart",
7 | "werkzeug",
8 | "gunicorn",
9 | "uvicorn[standard]",
10 | "openai",
11 | "azure-identity",
12 | "aiohttp",
13 | "python-dotenv",
14 | "pyyaml",
15 | "azure-ai-assistant"
16 | ]
17 |
18 | [build-system]
19 | requires = ["flit_core<4"]
20 | build-backend = "flit_core.buildapi"
21 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/quartapp/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import logging
5 | import os
6 |
7 | from quart import Quart
8 |
9 |
10 | def create_app():
11 | if os.getenv("RUNNING_IN_PRODUCTION"):
12 | logging.basicConfig(level=logging.INFO)
13 | else:
14 | logging.basicConfig(level=logging.DEBUG)
15 |
16 | app = Quart(__name__)
17 |
18 | from . import chat # noqa
19 |
20 | app.register_blueprint(chat.bp)
21 |
22 | return app
23 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/quartapp/static/ChatClient.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) Microsoft. All rights reserved.
2 | // Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class ChatClient {
5 | constructor(ui) {
6 | this.ui = ui;
7 | this.messageInput = document.getElementById("message");
8 | this.eventSource = null;
9 | }
10 |
11 | async sendMessage(url) {
12 | const message = this.messageInput.value.trim();
13 | if (!message) return false;
14 |
15 | this.ui.appendUserMessage(message);
16 |
17 | const response = await fetch(url, {
18 | method: "POST",
19 | headers: { "Content-Type": "application/json" },
20 | body: JSON.stringify({ message })
21 | });
22 |
23 | const data = await response.json();
24 | return data.thread_name;
25 | }
26 |
27 | listenToServer(url, threadName) {
28 | if (!this.eventSource || this.eventSource.readyState === EventSource.CLOSED) {
29 | this.eventSource = new EventSource(`${url}/${threadName}`);
30 | this.handleMessages();
31 | }
32 | }
33 |
34 | handleMessages() {
35 | let messageDiv = null;
36 | let accumulatedContent = '';
37 | let isStreaming = true;
38 |
39 | this.eventSource.onmessage = event => {
40 | const data = JSON.parse(event.data);
41 |
42 | if (data.type === "stream_end") {
43 | this.eventSource.close();
44 | messageDiv = null;
45 | accumulatedContent = '';
46 | } else {
47 | if (!messageDiv) {
48 | messageDiv = this.ui.createAssistantMessageDiv();
49 | if (!messageDiv) {
50 | console.error("Failed to create message div.");
51 | }
52 | }
53 |
54 | // Check if it's a completed message
55 | if (data.type === "completed_message") {
56 | //console.log("Received completed message:", data.content);
57 | // Replace the accumulated content with the completed message
58 | this.ui.clearAssistantMessage(messageDiv);
59 | accumulatedContent = data.content;
60 | isStreaming = false;
61 | } else {
62 | //console.log("Received partial message:", data.content);
63 | // Append the partial message to the accumulated content
64 | accumulatedContent += data.content;
65 | }
66 |
67 | this.ui.appendAssistantMessage(messageDiv, accumulatedContent, isStreaming);
68 | }
69 | };
70 |
71 | this.eventSource.onerror = error => {
72 | console.error("EventSource failed:", error);
73 | this.eventSource.close();
74 | };
75 | }
76 |
77 | closeEventSource() {
78 | if (this.eventSource) this.eventSource.close();
79 | }
80 | }
81 |
82 | export default ChatClient;
83 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/quartapp/static/main.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) Microsoft. All rights reserved.
2 | // Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import ChatUI from './ChatUI.js';
5 | import ChatClient from './ChatClient.js';
6 |
7 | function initChat() {
8 | const chatUI = new ChatUI();
9 | const chatClient = new ChatClient(chatUI);
10 |
11 | const form = document.getElementById("chat-form");
12 |
13 | form.addEventListener("submit", async function(e) {
14 | e.preventDefault();
15 | const threadName = await chatClient.sendMessage("/chat");
16 | if (threadName) {
17 | chatClient.listenToServer("/stream", threadName);
18 | }
19 | chatClient.messageInput.value = "";
20 | });
21 |
22 | window.onbeforeunload = function() {
23 | chatClient.closeEventSource();
24 | };
25 | }
26 |
27 | document.addEventListener("DOMContentLoaded", initChat);
28 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/quartapp/static/styles.css:
--------------------------------------------------------------------------------
1 | /* Copyright (c) Microsoft. All rights reserved.
2 | Licensed under the MIT license. See LICENSE.md file in the project root for full license information. */
3 |
4 | * {
5 | box-sizing: border-box;
6 | }
7 |
8 | html, body {
9 | height: 100%;
10 | margin: 0;
11 | padding: 0;
12 | overflow-x: hidden; /* Prevent horizontal scroll */
13 | }
14 |
15 | .row {
16 | height: 100%;
17 | }
18 |
19 | /* Chat section */
20 | #messages {
21 | height: calc(100% - 60px); /* Adjust height based on the input area */
22 | overflow-y: auto; /* Enable scrolling for overflow content */
23 | }
24 |
25 | #messages .toast-container {
26 | margin-bottom: 12px;
27 | }
28 |
29 | /* Styles for the message box */
30 | #messages .message-content {
31 | /* Other styles... */
32 | font-size: 16px;
33 | font-family: Arial, sans-serif; /* Example font */
34 | }
35 |
36 | #messages .message-content ol {
37 | padding-left: 20px;
38 | list-style-type: decimal; /* Ensures numbered lists are displayed correctly */
39 | }
40 |
41 | #messages .message-content ol li {
42 | margin-bottom: 5px;
43 | }
44 |
45 | /* Ensure consistent font size and styling for message text */
46 | .message-text {
47 | font-size: 16px;
48 | font-family: Arial, sans-serif;
49 | }
50 |
51 | .message-text h1,
52 | .message-text h2,
53 | .message-text h3,
54 | .message-text h4,
55 | .message-text h5,
56 | .message-text h6,
57 | .message-text p,
58 | .message-text span,
59 | .message-text div {
60 | font-size: 16px;
61 | font-family: Arial, sans-serif;
62 | margin: 0; /* Reset margin to avoid extra spacing */
63 | line-height: 1.5; /* Ensure consistent line height */
64 | }
65 |
66 | /* Optional: Adjust font weight for headers to distinguish them without changing size */
67 | .message-text h1,
68 | .message-text h2,
69 | .message-text h3,
70 | .message-text h4,
71 | .message-text h5,
72 | .message-text h6 {
73 | font-weight: bold;
74 | }
75 |
76 | #chat-area {
77 | height: 60px; /* Fixed height for the chat input area */
78 | padding: 10px; /* Padding for the input area */
79 | }
80 |
81 | /* Ensure Flexbox is applied to parent and children elements */
82 | .container-fluid {
83 | display: flex;
84 | flex-direction: row;
85 | height: 100%;
86 | }
87 |
88 | #chat-container {
89 | flex: 1;
90 | display: flex;
91 | flex-direction: column;
92 | height: 100%;
93 | }
94 |
95 | #document-viewer-section {
96 | display: none; /* Initially hidden */
97 | width: 0; /* Initially take no space */
98 | overflow: hidden; /* Prevent any overflow */
99 | transition: width 0.3s ease; /* Smooth transition for width adjustments */
100 | }
101 |
102 | #document-viewer-section.visible {
103 | display: block; /* Make visible */
104 | width: 50%; /* Adjust width as needed */
105 | overflow: auto; /* Allow scrolling if needed */
106 | }
107 |
108 | .col-full {
109 | flex: 0 0 100%;
110 | max-width: 100%;
111 | }
112 |
113 | .col-half {
114 | flex: 0 0 50%;
115 | max-width: 50%;
116 | }
117 |
118 | .hidden {
119 | display: none;
120 | }
121 |
122 | #document-viewer-section button {
123 | padding: 5px 10px;
124 | background-color: #f44336; /* Red color for visibility */
125 | color: white;
126 | border: none;
127 | border-radius: 5px;
128 | cursor: pointer;
129 | font-size: 16px;
130 | }
131 |
132 | #document-viewer-section button:hover {
133 | background-color: #d32f2f; /* Slightly darker on hover */
134 | }
135 |
136 | #document-viewer {
137 | flex: 1;
138 | width: 100%;
139 | height: 100%;
140 | border: none;
141 | background-color: white;
142 | }
143 |
144 | #close-button {
145 | display: none; /* Initially hidden */
146 | position: absolute;
147 | top: 10px;
148 | right: 10px;
149 | z-index: 10;
150 | }
151 |
152 | /* Background colors for user and assistant messages */
153 | .background-user {
154 | background-color: #2372cc;
155 | color: white; /* Ensure text is readable on the background */
156 | }
157 |
158 | .background-assistant {
159 | background-color: #2c8310;
160 | color: white; /* Ensure text is readable on the background */
161 | }
162 |
163 | /* Styling for messages */
164 | .toast {
165 | position: relative;
166 | display: block;
167 | margin-bottom: 0.5rem;
168 | border-radius: 0.25rem;
169 | }
170 |
171 | .toast-header {
172 | display: flex;
173 | align-items: center;
174 | padding: 0.5rem 0.75rem;
175 | color: #ffffff;
176 | }
177 |
178 | .toast-body {
179 | padding: 0.75rem;
180 | }
--------------------------------------------------------------------------------
/samples/FileSearch/src/quartapp/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | OpenAI ChatGPT Demo
8 |
10 |
12 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
Document Viewer
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 | You
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 | Assistant
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/samples/FileSearch/src/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.11
3 | # by the following command:
4 | #
5 | # pip-compile --output-file=requirements.txt pyproject.toml
6 | #
7 | aiofiles==23.2.1
8 | # via quart
9 | aiohttp==3.9.3
10 | # via quartapp (pyproject.toml)
11 | aiosignal==1.3.1
12 | # via aiohttp
13 | annotated-types==0.6.0
14 | # via pydantic
15 | anyio==4.3.0
16 | # via
17 | # httpx
18 | # openai
19 | # watchfiles
20 | attrs==23.2.0
21 | # via aiohttp
22 | azure-core==1.30.1
23 | # via azure-identity
24 | azure-identity==1.15.0
25 | # via quartapp (pyproject.toml)
26 | blinker==1.7.0
27 | # via
28 | # flask
29 | # quart
30 | certifi==2024.2.2
31 | # via
32 | # httpcore
33 | # httpx
34 | # requests
35 | cffi==1.16.0
36 | # via cryptography
37 | charset-normalizer==3.3.2
38 | # via requests
39 | click==8.1.7
40 | # via
41 | # flask
42 | # quart
43 | # uvicorn
44 | cryptography==42.0.5
45 | # via
46 | # azure-identity
47 | # msal
48 | # pyjwt
49 | distro==1.9.0
50 | # via openai
51 | flask==3.0.3
52 | # via quart
53 | frozenlist==1.4.1
54 | # via
55 | # aiohttp
56 | # aiosignal
57 | gunicorn==21.2.0
58 | # via quartapp (pyproject.toml)
59 | h11==0.14.0
60 | # via
61 | # httpcore
62 | # hypercorn
63 | # uvicorn
64 | # wsproto
65 | h2==4.1.0
66 | # via hypercorn
67 | hpack==4.0.0
68 | # via h2
69 | httpcore==1.0.5
70 | # via httpx
71 | httptools==0.6.1
72 | # via uvicorn
73 | httpx==0.27.0
74 | # via openai
75 | hypercorn==0.16.0
76 | # via quart
77 | hyperframe==6.0.1
78 | # via h2
79 | idna==3.7
80 | # via
81 | # anyio
82 | # httpx
83 | # requests
84 | # yarl
85 | itsdangerous==2.1.2
86 | # via
87 | # flask
88 | # quart
89 | jinja2==3.1.3
90 | # via
91 | # flask
92 | # quart
93 | markupsafe==2.1.5
94 | # via
95 | # jinja2
96 | # quart
97 | # werkzeug
98 | msal==1.28.0
99 | # via
100 | # azure-identity
101 | # msal-extensions
102 | msal-extensions==1.1.0
103 | # via azure-identity
104 | multidict==6.0.5
105 | # via
106 | # aiohttp
107 | # yarl
108 | openai==1.30.1
109 | # via quartapp (pyproject.toml)
110 | packaging==24.0
111 | # via
112 | # gunicorn
113 | # msal-extensions
114 | portalocker==2.8.2
115 | # via msal-extensions
116 | priority==2.0.0
117 | # via hypercorn
118 | pycparser==2.22
119 | # via cffi
120 | pydantic==2.6.4
121 | # via openai
122 | pydantic-core==2.16.3
123 | # via pydantic
124 | pyjwt[crypto]==2.8.0
125 | # via msal
126 | python-dotenv==1.0.1
127 | # via
128 | # quartapp (pyproject.toml)
129 | # uvicorn
130 | pyyaml==6.0.1
131 | # via
132 | # quartapp (pyproject.toml)
133 | # uvicorn
134 | quart==0.19.5
135 | # via quartapp (pyproject.toml)
136 | requests==2.31.0
137 | # via
138 | # azure-core
139 | # msal
140 | six==1.16.0
141 | # via azure-core
142 | sniffio==1.3.1
143 | # via
144 | # anyio
145 | # httpx
146 | # openai
147 | tqdm==4.66.2
148 | # via openai
149 | typing-extensions==4.11.0
150 | # via
151 | # azure-core
152 | # openai
153 | # pydantic
154 | # pydantic-core
155 | urllib3==2.2.1
156 | # via requests
157 | uvicorn[standard]==0.29.0
158 | # via quartapp (pyproject.toml)
159 | #uvloop==0.19.0
160 | # via uvicorn
161 | watchfiles==0.21.0
162 | # via uvicorn
163 | websockets==12.0
164 | # via uvicorn
165 | werkzeug==3.0.2
166 | # via
167 | # flask
168 | # quart
169 | # quartapp (pyproject.toml)
170 | wsproto==1.2.0
171 | # via hypercorn
172 | yarl==1.9.4
173 | # via aiohttp
174 | https://github.com/Azure-Samples/azureai-assistant-tool/releases/download/v0.4.0-alpha/azure_ai_assistant-0.4.0a1-py3-none-any.whl
175 | # via quartapp (pyproject.toml)
176 |
--------------------------------------------------------------------------------
/samples/ImageInput/README.md:
--------------------------------------------------------------------------------
1 | # Sample Application using Azure OpenAI Assistants with Image Input Support (Python)
2 |
3 | This sample includes a simple Python [Quart](https://quart.palletsprojects.com/en/latest/) app that streams responses from OpenAI Assistant to an HTML/JS frontend using Server-Sent Events (SSEs). The application supports both image (.jpg/jpeg, .webp, .gif, .png) and text inputs.
4 |
5 | The sample is designed for use with [Docker containers](https://www.docker.com/), both for local development and Azure deployment. For Azure deployment to [Azure Container Apps](https://learn.microsoft.com/azure/container-apps/overview), please use this [template](https://github.com/Azure-Samples/openai-chat-app-quickstart) and replace the `src` folder content with this application.
6 |
7 | ## Local development with Docker
8 |
9 | This sample includes a `docker-compose.yaml` for local development which creates a volume for the app code. That allows you to make changes to the code and see them instantly.
10 |
11 | 1. Install [Docker Desktop](https://www.docker.com/products/docker-desktop/). If you opened this inside Github Codespaces or a Dev Container in VS Code, installation is not needed. ⚠️ If you're on an Apple M1/M2, you won't be able to run `docker` commands inside a Dev Container; either use Codespaces or do not open the Dev Container.
12 |
13 | 2. Make sure that the `.env` file exists.
14 |
15 | 3. Store keys and endpoint information (Azure) for the OpenAI resource in the `.env` file. The key should be stored in the `.env` file as `AZURE_OPENAI_API_KEY or OPENAI_API_KEY`. This is necessary because Docker containers don't have access to your user Azure credentials.
16 |
17 | 4. Start the services with this command:
18 |
19 | ```shell
20 | docker-compose up --build
21 | ```
22 |
23 | 5. Click 'http://localhost:50505' in the browser to run the application.
24 |
25 | ## Example run
26 |
27 | 
28 |
29 | ## Deployment to Azure
30 |
31 | As mentioned earlier, please integrate this app using [template](https://github.com/Azure-Samples/openai-chat-app-quickstart) and following the Azure Container App deployment steps there.
--------------------------------------------------------------------------------
/samples/ImageInput/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | app:
3 | build:
4 | context: ./src
5 | env_file:
6 | - .env
7 | ports:
8 | - 50505:50505
9 | volumes:
10 | - ./src:/code
11 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/.dockerignore:
--------------------------------------------------------------------------------
1 | .git*
2 | .venv/
3 | **/*.pyc
4 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/Dockerfile:
--------------------------------------------------------------------------------
1 | # ------------------- Stage 0: Base Stage ------------------------------
2 | FROM python:3.11-alpine AS base
3 |
4 | WORKDIR /code
5 |
6 | # Install tini, a tiny init for containers
7 | RUN apk add --update --no-cache tini
8 |
9 | # Install required packages for cryptography package
10 | # https://cryptography.io/en/latest/installation/#building-cryptography-on-linux
11 | RUN apk add gcc musl-dev python3-dev libffi-dev openssl-dev cargo pkgconfig
12 |
13 | # ------------------- Stage 1: Build Stage ------------------------------
14 | FROM base AS build
15 |
16 | COPY requirements.txt .
17 |
18 | RUN pip3 install -r requirements.txt
19 |
20 | COPY . .
21 |
22 | # ------------------- Stage 2: Final Stage ------------------------------
23 | FROM base AS final
24 |
25 | RUN addgroup -S app && adduser -S app -G app
26 |
27 | COPY --from=build --chown=app:app /usr/local/lib/python3.11 /usr/local/lib/python3.11
28 | COPY --from=build --chown=app:app /usr/local/bin /usr/local/bin
29 | COPY --from=build --chown=app:app /code /code
30 |
31 | USER app
32 |
33 | EXPOSE 50505
34 |
35 | ENTRYPOINT ["tini", "gunicorn", "quartapp:create_app()"]
36 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/samples/ImageInput/src/__init__.py
--------------------------------------------------------------------------------
/samples/ImageInput/src/config/image_input_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: image_input
2 | instructions: You are a helpful assistant capable of answering questions.
3 | model: gpt-4-turbo-2024-04-09
4 | assistant_id:
5 | file_references: []
6 | tool_resources:
7 | code_interpreter:
8 | files: {}
9 | file_search:
10 | vector_stores: []
11 | functions: []
12 | file_search: false
13 | code_interpreter: false
14 | output_folder_path: /code/output
15 | ai_client_type: OPEN_AI
16 | assistant_type: assistant
17 | completion_settings: null
18 | assistant_role: user
19 | config_folder: null
20 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/gunicorn.conf.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 |
4 | from dotenv import load_dotenv
5 |
6 | load_dotenv()
7 |
8 | max_requests = 1000
9 | max_requests_jitter = 50
10 | log_file = "-"
11 | bind = "0.0.0.0:50505"
12 |
13 | if not os.getenv("RUNNING_IN_PRODUCTION"):
14 | reload = True
15 |
16 | num_cpus = multiprocessing.cpu_count()
17 | workers = 1 #(num_cpus * 2) + 1
18 | worker_class = "uvicorn.workers.UvicornWorker"
19 |
20 | timeout = 120
21 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "quartapp"
3 | version = "1.0.0"
4 | description = "Create a simple chat app using Quart and OpenAI"
5 | dependencies = [
6 | "quart",
7 | "werkzeug",
8 | "gunicorn",
9 | "uvicorn[standard]",
10 | "openai",
11 | "azure-identity",
12 | "aiohttp",
13 | "python-dotenv",
14 | "pyyaml",
15 | "azure-ai-assistant"
16 | ]
17 |
18 | [build-system]
19 | requires = ["flit_core<4"]
20 | build-backend = "flit_core.buildapi"
21 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import logging
5 | import os
6 |
7 | from quart import Quart
8 |
9 |
10 | def create_app():
11 | if os.getenv("RUNNING_IN_PRODUCTION"):
12 | logging.basicConfig(level=logging.INFO)
13 | else:
14 | logging.basicConfig(level=logging.DEBUG)
15 |
16 | app = Quart(__name__)
17 |
18 | from . import chat # noqa
19 |
20 | app.register_blueprint(chat.bp)
21 |
22 | return app
23 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/static/ChatClient.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) Microsoft. All rights reserved.
2 | // Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class ChatClient {
5 | constructor(ui) {
6 | this.ui = ui;
7 | this.messageInput = document.getElementById("message");
8 | this.fileInput = document.getElementById("file");
9 | this.eventSource = null;
10 | }
11 |
12 | async sendMessage(url) {
13 | const message = this.messageInput.value.trim();
14 | const files = this.fileInput.files;
15 |
16 | if (!message) return false;
17 |
18 | if (files.length > 0) {
19 | this.ui.appendUserMessage(message, files);
20 | } else {
21 | this.ui.appendUserMessage(message);
22 | }
23 |
24 | const formData = new FormData();
25 | formData.append("message", message);
26 | for (const [i, file] of Array.from(files).entries()) {
27 | if (file.type == "image/jpeg" || file.type == "image/png" || file.type == "image/gif" || file.type == "image/webp") {
28 | formData.append(`${i}_${file.name}`, file);
29 | } else {
30 | console.error("Unsupported file type")
31 | }
32 | }
33 |
34 | const response = await fetch(url, {
35 | method: "POST",
36 | body: formData,
37 | });
38 |
39 | const data = await response.json();
40 | return data.thread_name;
41 | }
42 |
43 | listenToServer(url, threadName) {
44 | if (!this.eventSource || this.eventSource.readyState === EventSource.CLOSED) {
45 | this.eventSource = new EventSource(`${url}/${threadName}`);
46 | this.handleMessages();
47 | }
48 | }
49 |
50 | handleMessages() {
51 | let messageDiv = null;
52 | let accumulatedContent = '';
53 | let isStreaming = true;
54 |
55 | this.eventSource.onmessage = event => {
56 | const data = JSON.parse(event.data);
57 |
58 | if (data.type === "stream_end") {
59 | this.eventSource.close();
60 | messageDiv = null;
61 | accumulatedContent = '';
62 | } else {
63 | if (!messageDiv) {
64 | messageDiv = this.ui.createAssistantMessageDiv();
65 | if (!messageDiv) {
66 | console.error("Failed to create message div.");
67 | }
68 | }
69 |
70 | // Check if it's a completed message
71 | if (data.type === "completed_message") {
72 | //console.log("Received completed message:", data.content);
73 | // Replace the accumulated content with the completed message
74 | this.ui.clearAssistantMessage(messageDiv);
75 | accumulatedContent = data.content;
76 | isStreaming = false;
77 | } else {
78 | //console.log("Received partial message:", data.content);
79 | // Append the partial message to the accumulated content
80 | accumulatedContent += data.content;
81 | }
82 |
83 | this.ui.appendAssistantMessage(messageDiv, accumulatedContent, isStreaming);
84 | }
85 | };
86 |
87 | this.eventSource.onerror = error => {
88 | console.error("EventSource failed:", error);
89 | this.eventSource.close();
90 | };
91 | }
92 |
93 | closeEventSource() {
94 | if (this.eventSource) this.eventSource.close();
95 | }
96 | }
97 |
98 | export default ChatClient;
99 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/static/ChatUI.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) Microsoft. All rights reserved.
2 | // Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class ChatUI {
5 | constructor() {
6 | this.targetContainer = document.getElementById("messages");
7 | this.userTemplate = document.querySelector('#message-template-user');
8 | this.assistantTemplate = document.querySelector('#message-template-assistant');
9 | if (!this.assistantTemplate) {
10 | console.error("Assistant template not found!");
11 | }
12 | }
13 |
14 | appendUserMessage(message, imageFiles = null) {
15 | const userTemplateClone = this.userTemplate.content.cloneNode(true);
16 | userTemplateClone.querySelector(".message-content").textContent = message;
17 |
18 | if (imageFiles) {
19 | userTemplateClone.querySelector(".message-content").innerHTML += "
";
20 | for (const imageFile of imageFiles){
21 | if (imageFile.type == "image/jpeg" || imageFile.type == "image/png" || imageFile.type == "image/gif" || imageFile.type == "image/webp") {
22 | userTemplateClone.querySelector(".message-content").innerHTML += ``;
23 | } else {
24 | console.error("Unsupported file type")
25 | }
26 | }
27 | }
28 |
29 | this.targetContainer.appendChild(userTemplateClone);
30 | this.scrollToBottom();
31 | }
32 |
33 | appendAssistantMessage(messageDiv, accumulatedContent, isStreaming) {
34 | //console.log("Accumulated Content before conversion:", accumulatedContent);
35 | const md = window.markdownit({
36 | html: true,
37 | linkify: true,
38 | typographer: true,
39 | breaks: true
40 | });
41 |
42 | try {
43 | // Convert the accumulated content to HTML using markdown-it
44 | let htmlContent = md.render(accumulatedContent);
45 | const messageTextDiv = messageDiv.querySelector(".message-text");
46 | if (!messageTextDiv) {
47 | throw new Error("Message content div not found in the template.");
48 | }
49 |
50 | // Set the innerHTML of the message text div to the HTML content
51 | messageTextDiv.innerHTML = htmlContent;
52 |
53 | // Use requestAnimationFrame to ensure the DOM has updated before scrolling
54 | // Only scroll if not streaming
55 | if (!isStreaming) {
56 | console.log("Accumulated content:", accumulatedContent);
57 | console.log("HTML set to messageTextDiv:", messageTextDiv.innerHTML);
58 | requestAnimationFrame(() => {
59 | this.scrollToBottom();
60 | });
61 | }
62 | } catch (error) {
63 | console.error("Error in appendAssistantMessage:", error);
64 | }
65 | }
66 |
67 | clearAssistantMessage(messageDiv) {
68 | const messageTextDiv = messageDiv.querySelector(".message-text");
69 | if (messageTextDiv) {
70 | messageTextDiv.innerHTML = '';
71 | }
72 | }
73 |
74 | createAssistantMessageDiv() {
75 | const assistantTemplateClone = this.assistantTemplate.content.cloneNode(true);
76 | if (!assistantTemplateClone) {
77 | console.error("Failed to clone assistant template.");
78 | return null;
79 | }
80 |
81 | // Append the clone to the target container
82 | this.targetContainer.appendChild(assistantTemplateClone);
83 |
84 | // Since the content of assistantTemplateClone is now transferred to the DOM,
85 | // you should query the targetContainer for the elements you want to interact with.
86 | // Specifically, you look at the last added 'toast' which is where the new content lives.
87 | const newlyAddedToast = this.targetContainer.querySelector(".toast-container:last-child .toast:last-child");
88 |
89 | if (!newlyAddedToast) {
90 | console.error("Failed to find the newly added toast element.");
91 | return null;
92 | }
93 |
94 | // Now, find the .message-content within this newly added toast
95 | const messageDiv = newlyAddedToast.querySelector(".message-content");
96 |
97 | if (!messageDiv) {
98 | console.error("Message content div not found in the template.");
99 | }
100 |
101 | return messageDiv;
102 | }
103 |
104 | scrollToBottom() {
105 | const lastChild = this.targetContainer.lastElementChild;
106 | if (lastChild) {
107 | // Adjust the scroll to make sure the input box is visible
108 | lastChild.scrollIntoView({ behavior: 'smooth', block: 'end' });
109 | }
110 |
111 | // Ensure the input box remains visible
112 | const inputBox = document.querySelector('#chat-area');
113 | if (inputBox) {
114 | inputBox.scrollIntoView({ behavior: 'smooth', block: 'end' });
115 | }
116 | }
117 | }
118 |
119 | export default ChatUI;
120 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/static/main.js:
--------------------------------------------------------------------------------
1 | // Copyright (c) Microsoft. All rights reserved.
2 | // Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import ChatUI from './ChatUI.js';
5 | import ChatClient from './ChatClient.js';
6 |
7 | function initChat() {
8 | const chatUI = new ChatUI();
9 | const chatClient = new ChatClient(chatUI);
10 |
11 | const form = document.getElementById("chat-form");
12 |
13 | form.addEventListener("submit", async function(e) {
14 | e.preventDefault();
15 | const threadName = await chatClient.sendMessage("/chat");
16 | if (threadName) {
17 | chatClient.listenToServer("/stream", threadName);
18 | }
19 | chatClient.messageInput.value = "";
20 | });
21 |
22 | window.onbeforeunload = function() {
23 | chatClient.closeEventSource();
24 | };
25 | }
26 |
27 | document.addEventListener("DOMContentLoaded", initChat);
28 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/static/styles.css:
--------------------------------------------------------------------------------
1 | /* Copyright (c) Microsoft. All rights reserved.
2 | Licensed under the MIT license. See LICENSE.md file in the project root for full license information. */
3 |
4 | * {
5 | box-sizing: border-box;
6 | }
7 |
8 | html, body {
9 | height: 100%;
10 | margin: 0;
11 | padding: 0;
12 | overflow-x: hidden; /* Prevent horizontal scroll */
13 | }
14 |
15 | .row {
16 | height: 100%;
17 | }
18 |
19 | /* Chat section */
20 | #messages {
21 | height: calc(100% - 60px); /* Adjust height based on the input area */
22 | overflow-y: auto; /* Enable scrolling for overflow content */
23 | }
24 |
25 | #messages .toast-container {
26 | margin-bottom: 12px;
27 | }
28 |
29 | /* Styles for the message box */
30 | #messages .message-content {
31 | /* Other styles... */
32 | font-size: 16px;
33 | font-family: Arial, sans-serif; /* Example font */
34 | }
35 |
36 | #messages .message-content ol {
37 | padding-left: 20px;
38 | list-style-type: decimal; /* Ensures numbered lists are displayed correctly */
39 | }
40 |
41 | #messages .message-content ol li {
42 | margin-bottom: 5px;
43 | }
44 |
45 | /* Ensure consistent font size and styling for message text */
46 | .message-text {
47 | font-size: 16px;
48 | font-family: Arial, sans-serif;
49 | }
50 |
51 | .message-text h1,
52 | .message-text h2,
53 | .message-text h3,
54 | .message-text h4,
55 | .message-text h5,
56 | .message-text h6,
57 | .message-text p,
58 | .message-text span,
59 | .message-text div {
60 | font-size: 16px;
61 | font-family: Arial, sans-serif;
62 | margin: 0; /* Reset margin to avoid extra spacing */
63 | line-height: 1.5; /* Ensure consistent line height */
64 | }
65 |
66 | /* Optional: Adjust font weight for headers to distinguish them without changing size */
67 | .message-text h1,
68 | .message-text h2,
69 | .message-text h3,
70 | .message-text h4,
71 | .message-text h5,
72 | .message-text h6 {
73 | font-weight: bold;
74 | }
75 |
76 | #chat-area {
77 | height: 60px; /* Fixed height for the chat input area */
78 | padding: 10px; /* Padding for the input area */
79 | }
80 |
81 | /* Ensure Flexbox is applied to parent and children elements */
82 | .container-fluid {
83 | display: flex;
84 | flex-direction: row;
85 | height: 100%;
86 | }
87 |
88 | #chat-container {
89 | flex: 1;
90 | display: flex;
91 | flex-direction: column;
92 | height: 100%;
93 | }
94 |
95 | .col-full {
96 | flex: 0 0 100%;
97 | max-width: 100%;
98 | }
99 |
100 | .col-half {
101 | flex: 0 0 50%;
102 | max-width: 50%;
103 | }
104 |
105 | .hidden {
106 | display: none;
107 | }
108 |
109 | /* Background colors for user and assistant messages */
110 | .background-user {
111 | background-color: #2372cc;
112 | color: white; /* Ensure text is readable on the background */
113 | }
114 |
115 | .background-assistant {
116 | background-color: #2c8310;
117 | color: white; /* Ensure text is readable on the background */
118 | }
119 |
120 | /* Styling for messages */
121 | .toast {
122 | position: relative;
123 | display: block;
124 | margin-bottom: 0.5rem;
125 | border-radius: 0.25rem;
126 | }
127 |
128 | .toast-header {
129 | display: flex;
130 | align-items: center;
131 | padding: 0.5rem 0.75rem;
132 | color: #ffffff;
133 | }
134 |
135 | .toast-body {
136 | padding: 0.75rem;
137 | }
--------------------------------------------------------------------------------
/samples/ImageInput/src/quartapp/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | OpenAI ChatGPT Demo
8 |
10 |
12 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 | You
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 | Assistant
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/samples/ImageInput/src/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.11
3 | # by the following command:
4 | #
5 | # pip-compile --output-file=requirements.txt pyproject.toml
6 | #
7 | aiofiles==23.2.1
8 | # via quart
9 | aiohttp==3.9.3
10 | # via quartapp (pyproject.toml)
11 | aiosignal==1.3.1
12 | # via aiohttp
13 | annotated-types==0.6.0
14 | # via pydantic
15 | anyio==4.3.0
16 | # via
17 | # httpx
18 | # openai
19 | # watchfiles
20 | attrs==23.2.0
21 | # via aiohttp
22 | azure-core==1.30.1
23 | # via azure-identity
24 | azure-identity==1.15.0
25 | # via quartapp (pyproject.toml)
26 | blinker==1.7.0
27 | # via
28 | # flask
29 | # quart
30 | certifi==2024.2.2
31 | # via
32 | # httpcore
33 | # httpx
34 | # requests
35 | cffi==1.16.0
36 | # via cryptography
37 | charset-normalizer==3.3.2
38 | # via requests
39 | click==8.1.7
40 | # via
41 | # flask
42 | # quart
43 | # uvicorn
44 | cryptography==42.0.5
45 | # via
46 | # azure-identity
47 | # msal
48 | # pyjwt
49 | distro==1.9.0
50 | # via openai
51 | flask==3.0.3
52 | # via quart
53 | frozenlist==1.4.1
54 | # via
55 | # aiohttp
56 | # aiosignal
57 | gunicorn==21.2.0
58 | # via quartapp (pyproject.toml)
59 | h11==0.14.0
60 | # via
61 | # httpcore
62 | # hypercorn
63 | # uvicorn
64 | # wsproto
65 | h2==4.1.0
66 | # via hypercorn
67 | hpack==4.0.0
68 | # via h2
69 | httpcore==1.0.5
70 | # via httpx
71 | httptools==0.6.1
72 | # via uvicorn
73 | httpx==0.27.0
74 | # via openai
75 | hypercorn==0.16.0
76 | # via quart
77 | hyperframe==6.0.1
78 | # via h2
79 | idna==3.7
80 | # via
81 | # anyio
82 | # httpx
83 | # requests
84 | # yarl
85 | itsdangerous==2.1.2
86 | # via
87 | # flask
88 | # quart
89 | jinja2==3.1.3
90 | # via
91 | # flask
92 | # quart
93 | markupsafe==2.1.5
94 | # via
95 | # jinja2
96 | # quart
97 | # werkzeug
98 | msal==1.28.0
99 | # via
100 | # azure-identity
101 | # msal-extensions
102 | msal-extensions==1.1.0
103 | # via azure-identity
104 | multidict==6.0.5
105 | # via
106 | # aiohttp
107 | # yarl
108 | openai==1.30.1
109 | # via quartapp (pyproject.toml)
110 | packaging==24.0
111 | # via
112 | # gunicorn
113 | # msal-extensions
114 | portalocker==2.8.2
115 | # via msal-extensions
116 | priority==2.0.0
117 | # via hypercorn
118 | pycparser==2.22
119 | # via cffi
120 | pydantic==2.6.4
121 | # via openai
122 | pydantic-core==2.16.3
123 | # via pydantic
124 | pyjwt[crypto]==2.8.0
125 | # via msal
126 | python-dotenv==1.0.1
127 | # via
128 | # quartapp (pyproject.toml)
129 | # uvicorn
130 | pyyaml==6.0.1
131 | # via
132 | # quartapp (pyproject.toml)
133 | # uvicorn
134 | quart==0.19.5
135 | # via quartapp (pyproject.toml)
136 | requests==2.31.0
137 | # via
138 | # azure-core
139 | # msal
140 | six==1.16.0
141 | # via azure-core
142 | sniffio==1.3.1
143 | # via
144 | # anyio
145 | # httpx
146 | # openai
147 | tqdm==4.66.2
148 | # via openai
149 | typing-extensions==4.11.0
150 | # via
151 | # azure-core
152 | # openai
153 | # pydantic
154 | # pydantic-core
155 | urllib3==2.2.1
156 | # via requests
157 | uvicorn[standard]==0.29.0
158 | # via quartapp (pyproject.toml)
159 | #uvloop==0.19.0
160 | # via uvicorn
161 | watchfiles==0.21.0
162 | # via uvicorn
163 | websockets==12.0
164 | # via uvicorn
165 | werkzeug==3.0.2
166 | # via
167 | # flask
168 | # quart
169 | # quartapp (pyproject.toml)
170 | wsproto==1.2.0
171 | # via hypercorn
172 | yarl==1.9.4
173 | # via aiohttp
174 | https://github.com/Azure-Samples/azureai-assistant-tool/releases/download/v0.4.2-alpha/azure_ai_assistant-0.4.2a1-py3-none-any.whl
175 | # via quartapp (pyproject.toml)
176 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/CodeInspectionAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: CodeInspectionAgent
2 | instructions: |-
3 | As a CodeInspectionAgent, your primary responsibility is to review and provide feedback on code created or converted by the CodeProgrammerAgent.
4 | You must ensure that the code adheres to idiomatic programming styles, industry best practices, and fulfills the specified requirements.
5 | Your tasks involve a thorough inspection for completeness, correctness, and quality.
6 |
7 | ## Pre-requisites for processing
8 | You will receive code files (with path information) or code snippets generated by CodeProgrammerAgent.
9 | The code may be in various programming languages, and your task is to inspect and evaluate it based on the provided requirements.
10 |
11 | ## Requirements
12 | Completeness Check:
13 | - Verify that the code implementation is complete, including all classes, methods, and functionalities as requested by the user.
14 | - Ensure there are no placeholders or incomplete implementations.
15 | - Each part of the code should be functional and serve its intended purpose.
16 | Documentation Review:
17 | - Examine the code for adequate documentation, including comments that explain the purpose of functions, classes, and significant code blocks.
18 | - Check for the presence of docstrings (for languages that support them) that provide clear descriptions of each function purpose, parameters, and return values.
19 | Coding Standards and Style:
20 | - Ensure the code follows the idiomatic programming styles of the language it is written in. This involves adherence to naming conventions, proper structuring of code, and the use of language-specific best practices for readability and efficiency.
21 | Error Handling and Validation:
22 | - Check for robust error handling and input validation. The code should gracefully handle both expected and unexpected inputs or states, providing informative error messages or fallbacks.
23 | Scope of Inspection:
24 | - The inspection should focus only on the code within the single file provided. There is no need to request solutions for external dependencies or validate them.
25 | model: gpt-4o
26 | assistant_id:
27 | file_references: []
28 | tool_resources:
29 | code_interpreter:
30 | files: {}
31 | file_search:
32 | vector_stores: []
33 | functions:
34 | - type: function
35 | function:
36 | name: retrieve_file_content_from_directory
37 | module: azure.ai.assistant.functions.file_functions
38 | description: Retrieves the content of a specified file in a given directory. Returns
39 | an empty JSON result if the content is not found.
40 | parameters:
41 | type: object
42 | properties:
43 | input_directory:
44 | type: string
45 | description: The path to the directory containing the file.
46 | filename:
47 | type: string
48 | description: The name of the file whose content is to be retrieved.
49 | required:
50 | - input_directory
51 | - filename
52 | - type: function
53 | function:
54 | name: find_files_by_name_in_directory
55 | module: azure.ai.assistant.functions.file_functions
56 | description: Searches for files matching specific criteria by name in a directory
57 | and its sub-directories (case-insensitive).
58 | parameters:
59 | type: object
60 | properties:
61 | directory:
62 | type: string
63 | description: The directory to search in.
64 | file_name_contains:
65 | type: string
66 | description: A partial or full file name to search for.
67 | required:
68 | - directory
69 | - file_name_contains
70 | - type: function
71 | function:
72 | name: find_all_folders_by_name_from_current_directory
73 | module: azure.ai.assistant.functions.file_functions
74 | description: Searches for matching folders with a given name in the current directory
75 | and its subdirectories. The search is case-sensitive and uses fuzzy matching.
76 | parameters:
77 | type: object
78 | properties:
79 | folder_name:
80 | type: string
81 | description: The name of the folder to search for.
82 | required:
83 | - folder_name
84 | file_search: false
85 | code_interpreter: false
86 | output_folder_path: output
87 | ai_client_type: OPEN_AI
88 | assistant_type: assistant
89 | completion_settings: null
90 | assistant_role: engineer
91 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/CodeProgrammerAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: CodeProgrammerAgent
2 | instructions: |-
3 | As a CodeProgrammerAgent, you are a highly skilled programmer proficient in multiple programming languages, adhering to idiomatic programming styles and top industry practices.
4 | You always ensure that the code you create and the actions you perform are complete and do not require manual intervention afterward.
5 |
6 | ## Pre-requisites for processing
7 | - You will receive requests to generate code or perform actions based on user requirements.
8 | - You will be provided with the necessary information to complete the programming tasks, including input data, expected output, and any specific constraints or preferences.
9 |
10 | ## Requirements
11 | Explicit Output Format:
12 | - You must produce outputs exactly as requested by the user, creating complete and fully implemented solutions.
13 | Step-by-Step Approach:
14 | - You shall implement tasks in small steps, providing comprehensive code and actions for each part of the task.
15 | Contextual Understanding:
16 | - Before generating code or performing actions, you analyze and understand the context of the task, considering available data sources, input formats, desired transformations, and expected results.
17 | Reading/Writing Files:
18 | - If the input or output file paths are unclear, you must first retrieve information about the current directory structure to understand the directory layout.
19 | - You will check the file/folder name by searching using the file name or extension.
20 | - Check and revise the previously given function arguments, ensure proper JSON escaping, validate directory names, and then retry the function call.
21 | model: gpt-4o
22 | assistant_id:
23 | file_references: []
24 | tool_resources:
25 | code_interpreter:
26 | files: {}
27 | file_search:
28 | vector_stores: []
29 | functions:
30 | - type: function
31 | function:
32 | name: retrieve_file_content_from_directory
33 | module: azure.ai.assistant.functions.file_functions
34 | description: Retrieves the content of a specified file in a given directory. Returns
35 | an empty JSON result if the content is not found.
36 | parameters:
37 | type: object
38 | properties:
39 | input_directory:
40 | type: string
41 | description: The path to the directory containing the file.
42 | filename:
43 | type: string
44 | description: The name of the file whose content is to be retrieved.
45 | required:
46 | - input_directory
47 | - filename
48 | - type: function
49 | function:
50 | name: find_files_by_name_in_directory
51 | module: azure.ai.assistant.functions.file_functions
52 | description: Searches for files matching specific criteria by name in a directory
53 | and its sub-directories (case-insensitive).
54 | parameters:
55 | type: object
56 | properties:
57 | directory:
58 | type: string
59 | description: The directory to search in.
60 | file_name_contains:
61 | type: string
62 | description: A partial or full file name to search for.
63 | required:
64 | - directory
65 | - file_name_contains
66 | - type: function
67 | function:
68 | name: find_all_folders_by_name_from_current_directory
69 | module: azure.ai.assistant.functions.file_functions
70 | description: Searches for matching folders with a given name in the current directory
71 | and its subdirectories. The search is case-sensitive and uses fuzzy matching.
72 | parameters:
73 | type: object
74 | properties:
75 | folder_name:
76 | type: string
77 | description: The name of the folder to search for.
78 | required:
79 | - folder_name
80 | file_search: false
81 | code_interpreter: false
82 | output_folder_path: output
83 | ai_client_type: OPEN_AI
84 | assistant_type: assistant
85 | completion_settings: null
86 | assistant_role: engineer
87 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/FileCreatorAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: FileCreatorAgent
2 | instructions: |-
3 | Your task is to process the provided text, identify relevant code blocks specified by the CodeProgrammerAgent, and create corresponding files with these blocks. Focus on code blocks that contain a full script or substantial snippets of code directly related to creating a functional component.
4 |
5 | ## Pre-requisites for processing
6 | - Check the programming language specified in the code block to ensure compatibility and correct file extension (e.g., `.py` for Python).
7 |
8 | ## Requirements
9 | 1. **Text Analysis**:
10 | - Parse the input text to distinguish between instructional content and code blocks.
11 | - Identify and classify code blocks that are meant for file creation based on contextual clues (e.g., file names mentioned before the code block).
12 |
13 | 2. **File Handling**:
14 | - Extract the file name from the text immediately preceding the code block or within the block comments.
15 | - Create files only for code blocks that represent complete scripts or modules, not for package installation commands or intermediate code snippets unless explicitly indicated.
16 |
17 | 3. **Writing Files**:
18 | - Ensure each extracted code block is written into a separate file with the appropriate file name and extension.
19 | - Save all files in an `output` folder relative to the current directory.
20 | - Handle errors during file creation gracefully and log them appropriately.
21 |
22 | 4. **User Notification**:
23 | - Inform the user about the successful creation of files and provide the file paths for reference.
24 | model: gpt-4o
25 | assistant_id:
26 | file_references: []
27 | tool_resources: null
28 | functions:
29 | - type: function
30 | function:
31 | name: create_file_with_specified_content
32 | module: azure.ai.assistant.functions.file_functions
33 | description: Creates a new file with the provided content in the specified directory.
34 | parameters:
35 | type: object
36 | properties:
37 | file_name:
38 | type: string
39 | description: The name of the file to be created
40 | output_directory:
41 | type: string
42 | description: The path to the output directory where the file will be created.
43 | If the directory does not exist, it will be created automatically.
44 | content:
45 | type: string
46 | description: The content to be written to the file
47 | file_extension:
48 | type: string
49 | description: The file extension to be used for the created file.
50 | default: ''
51 | required:
52 | - file_name
53 | - output_directory
54 | - content
55 | file_search: false
56 | code_interpreter: false
57 | output_folder_path: output
58 | ai_client_type: OPEN_AI
59 | assistant_type: chat_assistant
60 | completion_settings:
61 | frequency_penalty: 0.0
62 | max_tokens: 4096
63 | presence_penalty: 0.0
64 | response_format: text
65 | temperature: 0.17
66 | top_p: 0.1
67 | seed: null
68 | max_text_messages: null
69 | assistant_role: user
70 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/TaskExecutionAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: TaskExecutionAgent
2 | instructions: |-
3 | Your task is to take the execution plan provided by the TaskPlannerAgent in the conversation and return it in the following format:
4 |
5 | ```json
6 | [
7 | {
8 | "assistant": "assistant_name",
9 | "task": "Description of the task"
10 | },
11 | {
12 | "assistant": "assistant_name",
13 | "task": "Description of the task"
14 | },
15 | {
16 | "assistant": "assistant_name",
17 | "task": "Description of the task"
18 | }
19 | ]
20 | ``
21 |
22 | The above example is the correct format where all the steps are inside a single JSON code block. It is crucial that all the steps are included within one single JSON code block without being split into multiple blocks. This ensures the tasks are executed as a cohesive plan.
23 |
24 | **Important**: DO NOT split the execution plan into multiple JSON code blocks. Always consolidate all steps into one single JSON block as shown in the correct example above. Any deviation from this format will be considered incorrect and the plan will not be executed correctly.
25 | **Important**: DO NOT implement the plan. Your task is to return the plan in the correct format only.
26 | model: gpt-4o
27 | assistant_id:
28 | file_references: null
29 | tool_resources: null
30 | functions: []
31 | file_search: false
32 | code_interpreter: false
33 | output_folder_path: output
34 | ai_client_type: OPEN_AI
35 | assistant_type: chat_assistant
36 | completion_settings:
37 | frequency_penalty: 0.0
38 | max_tokens: 4096
39 | presence_penalty: 0.0
40 | response_format: text
41 | temperature: 0.17
42 | top_p: 0.1
43 | seed: null
44 | max_text_messages: null
45 | assistant_role: user_interaction
46 | config_folder: null
47 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/TaskPlannerAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: TaskPlannerAgent
2 | instructions: |-
3 | You are expert in creating task planning for SW development by coordinating multiple assistants to complete a task.
4 |
5 | ## Pre-requisites for processing
6 | - A user requests assistance with a software development task. For instance, they might say:
7 | - "Please transform the {any programming language} file in the current folder into an idiomatic {any programming language} version and save the resulting file in the output folder."
8 | - "Please create a python class that reads the input file and writes the output to a new file."
9 | - Below are the details about the assistants you will coordinate to complete the task:
10 | - {file_reference:0}
11 | - {file_reference:1}
12 | - If above details are indicating "file not found" error, inform the user and ask to correct the file references.
13 |
14 | ## Requirements
15 | Your task is to dynamically generate a plan based on a user's request and information about available assistants. This plan must be formatted
16 | as a Python list enclosed within JSON code block markers. The list will contain instructions for various assistants, with certain details to be filled
17 | in according to the specifics of the user's request and information you have about available assistants.
18 |
19 | Here is a template of what the plan could look like. Note that within this template, there are placeholders indicated by {}. These placeholders represent
20 | information that you, the plan creator, must determine and replace based on the details provided by the user and the capabilities of the available assistants.
21 | ```json
22 | [
23 | {
24 | "assistant": {assistant_name},
25 | "task": "{Description of the task}"
26 | },
27 | {
28 | "assistant": {assistant_name},
29 | "task": "{Description of the task}"
30 | },
31 | {
32 | "assistant": {assistant_name},
33 | "task": "{Description of the task}"
34 | }
35 | ]
36 | ```
37 | Instructions for filling the placeholders for the example plan above:
38 | - {assistant_name} - Replace this with the name of the assistant that is best suited for the task. The selection should be based on the capabilities of the available assistants.
39 | - {Description of the task} - Replace this with a detailed description of the task that the assistant should perform. This description is part of the plan that the assistants will execute.
40 |
41 | Additional instructions for creating the plan:
42 | - Review feedback follow-up: Ensure that each review/inspection task has follow-up implementation task by using the review feedback as input.
43 | - Multiple Files: If the plan involves multiple files, create a separate set of tasks for each file, ensuring each assistant processes only one file at a time.
44 | - Non-Software Development Requests: If a user request does not pertain to software development, kindly inform the user with a casual message and always with question how you can assist further.
45 | - Example response: "It seems your request isn't related to software development. I'm here to help with a wide range of questions and tasks, any specific area you'd like assistance with?"
46 | - Always include question in your answer in this case.
47 | - User Confirmation: Before proceeding with any plan, always seek confirmation from the user with question mark. You can say something like, "Here's the plan based on your request. Would you like me to go ahead with this?"
48 | model: gpt-4o
49 | assistant_id:
50 | file_references:
51 | - C:/Git/azureai-assistant-tool/samples/MultiAgentCodeOrchestration/config/CodeProgrammerAgent_assistant_config.yaml
52 | - C:/Git/azureai-assistant-tool/samples/MultiAgentCodeOrchestration/config/CodeInspectionAgent_assistant_config.yaml
53 | tool_resources: null
54 | functions: []
55 | file_search: false
56 | code_interpreter: false
57 | output_folder_path: output
58 | ai_client_type: OPEN_AI
59 | assistant_type: chat_assistant
60 | completion_settings:
61 | frequency_penalty: 0.0
62 | max_tokens: 4096
63 | presence_penalty: 0.0
64 | response_format: text
65 | temperature: 0.17
66 | top_p: 0.1
67 | seed: null
68 | max_text_messages: null
69 | assistant_role: user_interaction
70 | config_folder: null
71 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/UserAgent_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: UserAgent
2 | instructions: |-
3 | Your task is to help the user make decisions and guide them effectively:
4 | ### Objectives:
5 | - Decide whether a plan needs to be created or improved by TaskPlannerAgent.
6 | - Confirm with the user to proceed with the execution of the plan using TaskExecutionAgent.
7 | - Guide the user if their request is not related to planning or execution.
8 |
9 | ### Expected Actions:
10 | - If the user request pertains to planning, request TaskPlannerAgent to create or improve a plan.
11 | - Always seek user confirmation before proceeding to execution of the plan.
12 | - If the user confirms, initiate the execution of the plan using TaskExecutionAgent.
13 | - If the user request is not relevant for planning or execution, guide the user with a suggestion.
14 |
15 | ### Response Format:
16 | Return the response in the following JSON format to indicate the required action:
17 | ```json
18 | {
19 | "action": "",
20 | "details": ""
21 | }
22 | ```
23 | Where `` can be:
24 | - "create_plan" for creating a new plan,
25 | - "improve_plan" for improving an existing plan,
26 | - "execute_plan" for confirming execution of the current plan,
27 | - "not_relevant" for requests unrelated to planning or execution, with guidance in details.
28 |
29 | ### Examples:
30 | - **Relevant Request for Planning:**
31 | User: "Please create a Python class that reads the input file and writes the output to a new file."
32 | Response:
33 | ```json
34 | {
35 | "action": "create_plan",
36 | "details": "Creating a plan for a Python class to read the input file and write to a new file."
37 | }
38 | ```
39 |
40 | - **Relevant Request for Improving a Plan:**
41 | User: "Can you enhance the current plan to include unit tests?"
42 | Response:
43 | ```json
44 | {
45 | "action": "improve_plan",
46 | "details": "Enhancing the plan to include unit tests."
47 | }
48 | ```
49 |
50 | - **Request to Execute Plan:**
51 | User: "Please execute the current plan."
52 | Response:
53 | ```json
54 | {
55 | "action": "execute_plan",
56 | "details": "Proceeding to execute the current plan."
57 | }
58 | ```
59 |
60 | - **Irrelevant Request Example:**
61 | User: "What is the capital of France?"
62 | Response:
63 | ```json
64 | {
65 | "action": "not_relevant",
66 | "details": "It seems your request isn't related to software development. I'm here to help with a wide range of questions and tasks, any specific area you'd like assistance with?"
67 | }
68 | ```
69 | model: gpt-4o
70 | assistant_id:
71 | file_references: null
72 | tool_resources: null
73 | functions: []
74 | file_search: false
75 | code_interpreter: false
76 | output_folder_path: output
77 | ai_client_type: OPEN_AI
78 | assistant_type: chat_assistant
79 | completion_settings:
80 | frequency_penalty: 0.0
81 | max_tokens: 4096
82 | presence_penalty: 0.0
83 | response_format: json_object
84 | temperature: 0.17
85 | top_p: 0.1
86 | seed: null
87 | max_text_messages: null
88 | assistant_role: user_interaction
89 | config_folder: null
90 |
--------------------------------------------------------------------------------
/samples/MultiAgentCodeOrchestration/config/function_error_specs.json:
--------------------------------------------------------------------------------
1 | {
2 | "file_not_found": "The requested file was not found. Please find the file by name and try again.",
3 | "directory_not_found": "The requested input directory was not found. Please check the directory path e.g. find all folders by name from current directory or retrieve the current directory structure (if not retrieved already) and then revise the directory name",
4 | "no_matching_folders_found": "No matching folders were found. Have you retrieved current directory structure to check if there are similar folder names? If not, find if the current directory structure contains similar folder names, otherwise inform user that no matching folders were found.",
5 | "no_matching_files_found": "No matching files were found. Please find files by an extension from the directory.",
6 | "json_load_content_error": "An error occurred while loading the content to JSON file. Please check the content contains valid json and try once again.",
7 | "invalid_input": "Invalid input provided. Please check the function input parameters and try again.",
8 | "generic_error": "An unexpected error occurred. Please check the function input parameters and try again."
9 | }
--------------------------------------------------------------------------------
/samples/PetTravelPlanChatAssistant/README.md:
--------------------------------------------------------------------------------
1 | # Sample: Plan pets travel using yaml form and ChatAssistantClient
2 |
3 | This sample demonstrates how to leverage a YAML form (alternatively, the form could be in some other text format) to create a intelligent chatbot experience tailored for pet travel planning. Utilizing the ChatAssistantClient, the application guides users through a series of inquiries and actions defined in a YAML configuration, helping pet owners plan travel with their pets seamlessly.
4 |
5 | ## Prerequisites
6 |
7 | Please see the [Prerequisities] for details.
8 |
9 | ## Configure the sample
10 |
11 | ### Configure PetTravelPlanChatAssistant_assistant_config.yaml under config folder.
12 | This sample provides example configuration file, you need to tailor it for your environment
13 | - Check the model to use your model deployment name
14 | - Check the file references path is setup with your own paths, NOTE: file_references field in yaml requires absolute path.
15 |
16 | ### Configure the YAML Form
17 | The core of this sample is the YAML form that defines the chatbot's logic. Here's a brief overview of configuring your YAML:
18 | - Define Inquiries: Specify the questions, options, and response types the bot should use.
19 | - Set Up Actions: Outline the actions to be taken based on user responses, including validation rules and procedures for sending emails or SMS messages.
20 | - Implement Validation: Ensure input from users is validated according to the rules you define.
21 |
22 | This sample uses `FormTemplateForPetTransportation.yaml` found in the sample folder for chatbot logic. The logic includes:
23 |
24 | - Inquiries to engage users with questions that can be multiple-choice or open-ended. Multiple-choice inquiries allow for branching logic, leading to different paths based on the user's selection.
25 | - Actions for executing functions like sending notifications or booking services in response to user choices.
26 | - Validation to check user inputs against specific formats or criteria, ensuring accuracy and prompting corrections as needed.
27 | These components work together to offer a tailored chatbot experience, guiding users through the pet travel planning process with personalized paths and interactions.
28 |
29 | NOTE: If you want to use OpenAI Assistants API, you can do that by replacing the `AsyncChatAssistantClient` with `AsyncAssistantClient` and the configuration
30 | file under config folder to use OpenAI Assistants.
31 |
32 | ## Run the sample
33 |
34 | ```sh
35 | python main.py
36 | ```
37 |
38 | ## Example run
39 |
40 | 
41 |
42 | [Prerequisities]: ../../README.md
--------------------------------------------------------------------------------
/samples/PetTravelPlanChatAssistant/config/PetTravelPlanChatAssistant_assistant_config.yaml:
--------------------------------------------------------------------------------
1 | name: PetTravelPlanChatAssistant
2 | instructions: |-
3 | ## Pre-requisites for processing:
4 | - You have access to document form (yaml file) which contains questions that needs to be filled for travelling with pets.
5 | - Here is the form you need to follow and get answers to {file_reference:0}
6 |
7 | ## Requirements
8 | 1. You are required to ask only one question at the time from the form document and collect answers.
9 | 2. You only ask same question more than once if you do not get satisfactory answer. The document form you have lists either possible answer options or accepts free form answers.
10 | 3. To know what is the next question to ask from the from, you will always analyze the earlier questions and answers in the conversation
11 | 4. After all questions has been answered and all questions from form have been answered successfully, you will provide a summary to user which contains all questions and answers.
12 | model: gpt-4-32k
13 | assistant_id: 770c45f7-fd9e-4364-a3ab-ef4d5d861b4c
14 | file_references:
15 | - C:/Git/azureai-assistant-tool/samples/PetTravelPlanChatAssistant/FormTemplateForPetTransportation.yaml
16 | tool_resources: null
17 | functions:
18 | - type: function
19 | function:
20 | name: validate_booking_reference
21 | module: functions.user_functions
22 | description: Validates that the booking reference string is exactly 6 characters
23 | long and consists only of capital letters.
24 | parameters:
25 | type: object
26 | properties:
27 | booking_reference:
28 | type: string
29 | description: The booking reference string to validate.
30 | required:
31 | - booking_reference
32 | - type: function
33 | function:
34 | name: send_email
35 | module: functions.user_functions
36 | description: Send the summary of the reservation and payment link via email.
37 | parameters:
38 | type: object
39 | properties:
40 | email_address:
41 | type: string
42 | description: The recipient's email address
43 | reservation_summary:
44 | type: string
45 | description: The summary of the reservation details
46 | payment_link:
47 | type: string
48 | description: The URL of the payment link
49 | required:
50 | - email_address
51 | - reservation_summary
52 | - payment_link
53 | - type: function
54 | function:
55 | name: send_sms
56 | module: functions.user_functions
57 | description: Send the summary of the reservation and payment link via SMS.
58 | parameters:
59 | type: object
60 | properties:
61 | phone_number:
62 | type: string
63 | description: The phone number to which the SMS will be sent
64 | message:
65 | type: string
66 | description: The body of the SMS message containing the reservation summary
67 | and payment link
68 | required:
69 | - phone_number
70 | - message
71 | file_search: false
72 | code_interpreter: false
73 | output_folder_path: output
74 | ai_client_type: AZURE_OPEN_AI
75 | assistant_type: chat_assistant
76 | completion_settings: null
77 | assistant_role: user
78 |
--------------------------------------------------------------------------------
/samples/PetTravelPlanChatAssistant/config/function_error_specs.json:
--------------------------------------------------------------------------------
1 | {
2 | "file_not_found": "The requested file was not found. Please find the file by name and try again.",
3 | "directory_not_found": "The requested input directory was not found. Please check the directory path e.g. find all folders by name from current directory or retrieve the current directory structure (if not retrieved already) and then revise the directory name",
4 | "no_matching_folders_found": "No matching folders were found. Have you retrieved current directory structure to check if there are similar folder names? If not, find if the current directory structure contains similar folder names, otherwise inform user that no matching folders were found.",
5 | "no_matching_files_found": "No matching files were found. Please find files by an extension from the directory.",
6 | "json_load_content_error": "An error occurred while loading the content to JSON file. Please check the content contains valid json and try once again.",
7 | "invalid_input": "Invalid input provided. Please check the function input parameters and try again.",
8 | "generic_error": "An unexpected error occurred. Please check the function input parameters and try again."
9 | }
--------------------------------------------------------------------------------
/samples/PetTravelPlanChatAssistant/functions/user_functions.py:
--------------------------------------------------------------------------------
1 | from azure.ai.assistant.management.function_config_manager import FunctionConfigManager
2 | from azure.ai.assistant.management.function_config_manager import FunctionConfigManager
3 | from azure.ai.assistant.management.logger_module import logger
4 | from datetime import datetime
5 | import json
6 | import os
7 | import platform
8 | import random
9 | import re
10 |
11 | # This file is auto-generated. Do not edit directly.
12 |
13 | # User function: validate_booking_reference
14 | def validate_booking_reference(booking_reference):
15 | function_config_manager = FunctionConfigManager()
16 | # Updated pattern to accept uppercase letters (A-Z) and numbers (0-9)
17 | pattern = r'^[A-Z0-9]{6}$'
18 | # Check if booking_reference parameter is a string and matches the pattern
19 | if not isinstance(booking_reference, str) or not re.match(pattern, booking_reference):
20 | error_message = function_config_manager.get_error_message('invalid_input')
21 | logger.error(error_message)
22 | return json.dumps({"function_error": error_message})
23 | # All validations passed
24 | return json.dumps({"result": True})
25 |
26 | # User function: send_email
27 | def send_email(email_address, reservation_summary, payment_link):
28 | function_config_manager = FunctionConfigManager()
29 | try:
30 | # Simulate sending an email by printing the details (since we can't actually send emails)
31 | # In a real scenario, here you would use an email service provider API
32 | email_content = f"To: {email_address}\n\nReservation Summary:\n{reservation_summary}\n\nPayment Link: {payment_link}"
33 | # Log the simulated email content
34 | logger.info(f"Email content:\n{email_content}")
35 |
36 | # Since it's a simulation, always return success
37 | result = {
38 | "email_sent": True,
39 | "email_address": email_address,
40 | "reservation_summary": reservation_summary,
41 | "payment_link": payment_link
42 | }
43 | return json.dumps({"result": result})
44 | except Exception as e:
45 | error_type = 'generic_error'
46 | error_message = function_config_manager.get_error_message(error_type)
47 | logger.error(f"{error_message}: {str(e)}")
48 | return json.dumps({"function_error": error_message})
49 |
50 | # User function: send_sms
51 | def send_sms(phone_number, message):
52 | function_config_manager = FunctionConfigManager()
53 |
54 | try:
55 | # Emulating SMS sending process; in reality you'd have an API or service call here
56 | # This is a mock-up of the successful operation.
57 | # For example, we can log the SMS message as proof of the "sending" operation.
58 | logger.info(f"Sending SMS to {phone_number}: {message}")
59 | return json.dumps({"result": f"SMS sent to {phone_number} successfully."})
60 | except Exception as e:
61 | error_message = function_config_manager.get_error_message('generic_error')
62 | logger.error(error_message)
63 | return json.dumps({"function_error": error_message})
64 |
65 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/README.md:
--------------------------------------------------------------------------------
1 | # Azure AI Assistant Client Library
2 |
3 | ## Build
4 | - `python setup.py sdist bdist_wheel`
5 |
6 | ## Installation
7 | - Install azure-ai-assistant package with all dependencies
8 | - `pip install /path/to/azure_ai_assistant-x.x.x-py3-none-any.whl`
9 | - For development, install without dependencies
10 | - `pip install --force-reinstall --no-deps /path/to/azure_ai_assistant-x.x.x-py3-none-any.whl`
11 | - For development, install in edit mode
12 | - `pip install -e . `
13 | - Need to be verified
14 |
15 | ## Management Module
16 | - This contains middleware components for assistants configuration and execution management.
17 |
18 | ## Functions Module
19 | - Currently following functions have been implemented with specifications (in config folder)
20 | - File Functions
21 | - `fetch_current_datetime`: Get the current time as a JSON string.
22 | - `fetch_detailed_files_info_in_directory`: Get information about files inside a given folder and return as a JSON string.
23 | - `list_files_from_directory`: Returns a list of files of a certain type from a specified directory.
24 | - `copy_multiple_files_by_extension`: Copies files of a certain type from an input directory to an output directory.
25 | - `copy_specific_file_to_directory`: Copies a single file from an input directory to an output directory.
26 | - `create_file_with_specified_content`: Creates a new file with the provided content in the specified directory, with an optional file extension.
27 | - `retrieve_file_content_from_directory`: Retrieves the content of a specified file in a given directory.
28 | - `get_content_from_matching_files`: Gets the content of all files matching with a specific file extension in a given directory.
29 | - `retrieve_current_directory_structure`: Retrieves the structure of the current directory and its subdirectories.
30 | - `find_files_by_name_in_directory`: Searches for files matching specific criteria by name in a directory and its sub-directories (case-insensitive).
31 | - `find_files_by_extension_in_directory`: Searches for files matching specific criteria by file extension in a directory and its sub-directories (case-insensitive).
32 |
33 | ## Dependencies
34 | - openai
35 | - python-Levenshtein
36 | - fuzzywuzzy
37 | - azure-cognitiveservices-speech
38 | - Pillow
39 |
40 | ## Setup keys
41 | 1. Set the OpenAI key
42 | - Windows:
43 | - setx OPENAI_API_KEY "Your OpenAI Key"
44 | - Linux/Mac:
45 | - export OPENAI_API_KEY="Your OpenAI Key"
46 |
47 | 2. Set Cognitive Services Speech key (if you want to use speech input)
48 | - Windows:
49 | - setx SPEECH_KEY "Your Speech Key"
50 | - setx SPEECH_REGION "Your Speech Region"
51 | - Linux/Mac:
52 | - export SPEECH_KEY="Your Speech Key"
53 | - export SPEECH_REGION="Your Speech Region"
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/ai/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/ai/assistant/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/_version.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # --------------------------------------------------------------------------
3 | # Copyright (c) Microsoft Corporation. All rights reserved.
4 | # Licensed under the MIT License. See License.txt in the project root for license information.
5 | # Code generated by Microsoft (R) AutoRest Code Generator.
6 | # Changes may cause incorrect behavior and will be lost if the code is regenerated.
7 | # --------------------------------------------------------------------------
8 |
9 | VERSION = "0.5.2a1"
10 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/audio/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/ai/assistant/audio/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/audio/azure_keyword_recognizer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import azure.cognitiveservices.speech as speechsdk
5 | from azure.ai.assistant.management.logger_module import logger
6 | from scipy.signal import resample_poly
7 | import numpy as np
8 |
9 |
10 | def convert_sample_rate(audio_data: np.ndarray, orig_sr: int = 24000, target_sr: int = 16000) -> np.ndarray:
11 | """
12 | Converts the sample rate of the given audio data from orig_sr to target_sr using polyphase filtering.
13 |
14 | Parameters:
15 | - audio_data: np.ndarray
16 | The input audio data as a NumPy array of type int16.
17 | - orig_sr: int
18 | Original sample rate of the audio data.
19 | - target_sr: int
20 | Desired sample rate after conversion.
21 |
22 | Returns:
23 | - np.ndarray
24 | The resampled audio data as a NumPy array of type int16.
25 | """
26 | from math import gcd
27 | divisor = gcd(orig_sr, target_sr)
28 | up = target_sr // divisor
29 | down = orig_sr // divisor
30 |
31 | # Convert to float for high-precision processing
32 | audio_float = audio_data.astype(np.float32)
33 |
34 | # Perform resampling
35 | resampled_float = resample_poly(audio_float, up, down)
36 |
37 | # Ensure the resampled data is within int16 range
38 | resampled_float = np.clip(resampled_float, -32768, 32767)
39 |
40 | # Convert back to int16
41 | resampled_int16 = resampled_float.astype(np.int16)
42 |
43 | return resampled_int16
44 |
45 |
46 | class AzureKeywordRecognizer:
47 | """
48 | A class to recognize specific keywords from PCM audio streams using Azure Cognitive Services.
49 | """
50 |
51 | def __init__(self, model_file: str, callback, sample_rate: int = 16000, channels: int = 1):
52 | """
53 | Initializes the AzureKeywordRecognizer.
54 |
55 | :param model_file: Path to the keyword recognition model file.
56 | :type model_file: str
57 | """
58 |
59 | # Create a push stream to which we'll write PCM audio data
60 | self.sample_rate = sample_rate
61 | self.channels = channels
62 |
63 | # Validate the sample rate is either 16000 or 24000
64 | if sample_rate not in [16000, 24000]:
65 | raise ValueError("Invalid sample rate. Supported rates are 16000 and 24000.")
66 | # Validate the number of channels is 1
67 | if channels != 1:
68 | raise ValueError("Invalid number of channels. Only mono audio is supported.")
69 |
70 | self.audio_stream = speechsdk.audio.PushAudioInputStream()
71 | self.audio_config = speechsdk.audio.AudioConfig(stream=self.audio_stream)
72 |
73 | # Initialize the speech recognizer
74 | self.recognizer = speechsdk.KeywordRecognizer(
75 | audio_config=self.audio_config
76 | )
77 |
78 | # Connect callback functions to the recognizer
79 | self.recognizer.recognized.connect(self._on_recognized)
80 | self.recognizer.canceled.connect(self._on_canceled)
81 |
82 | # Define the keyword recognition model
83 | self.keyword_model = speechsdk.KeywordRecognitionModel(filename=model_file)
84 | self.is_started = False
85 |
86 | if not callable(callback):
87 | raise ValueError("Callback must be a callable function.")
88 |
89 | self.keyword_detected_callback = callback
90 |
91 | def start_recognition(self):
92 | """
93 | Starts the keyword recognition process.
94 |
95 | :param callback: A function to be called when the keyword is detected.
96 | It should accept a single argument with the recognition result.
97 | """
98 | # Start continuous keyword recognition
99 | if not self.recognizer.recognized.is_connected():
100 | self.recognizer.recognized.connect(self._on_recognized)
101 | if not self.recognizer.canceled.is_connected():
102 | self.recognizer.canceled.connect(self._on_canceled)
103 |
104 | self.recognizer.recognize_once_async(model=self.keyword_model)
105 | self.is_started = True
106 |
107 | def stop_recognition(self):
108 | """
109 | Stops the keyword recognition process.
110 | """
111 | future = self.recognizer.stop_recognition_async()
112 | future.get()
113 | self.recognizer.recognized.disconnect_all()
114 | self.recognizer.canceled.disconnect_all()
115 | self.is_started = False
116 |
117 | def push_audio(self, pcm_data):
118 | """
119 | Pushes PCM audio data to the recognizer.
120 |
121 | :param pcm_data: Numpy array of PCM audio samples.
122 | """
123 | if not self.is_started:
124 | # If keyword recognition is not started, ignore the audio data
125 | return
126 |
127 | if self.sample_rate == 24000:
128 | converted_audio = convert_sample_rate(pcm_data, orig_sr=24000, target_sr=16000)
129 | self.audio_stream.write(converted_audio.tobytes())
130 | else:
131 | self.audio_stream.write(pcm_data.tobytes())
132 |
133 | def _on_recognized(self, event: speechsdk.SpeechRecognitionEventArgs):
134 | """
135 | Internal callback when a keyword is recognized.
136 | """
137 | result = event.result
138 | if result.reason == speechsdk.ResultReason.RecognizedKeyword:
139 | if self.keyword_detected_callback:
140 | self.keyword_detected_callback(result)
141 |
142 | def _on_canceled(self, event: speechsdk.SpeechRecognitionCanceledEventArgs):
143 | """
144 | Internal callback when recognition is canceled.
145 | """
146 | logger.warning(f"Recognition canceled: {event.reason}")
147 | if event.result.reason == speechsdk.ResultReason.Canceled:
148 | logger.warning(f"Cancellation details: {event.cancellation_details}")
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/ai/assistant/functions/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/functions/system_function_mappings.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | # Import the file functions
5 | from azure.ai.assistant.functions.file_functions import (
6 | fetch_current_datetime,
7 | fetch_detailed_files_info_in_directory,
8 | list_files_from_directory,
9 | copy_multiple_files_by_extension,
10 | copy_specific_file_to_directory,
11 | create_file_with_specified_content,
12 | retrieve_file_content_from_directory,
13 | get_content_from_matching_files,
14 | find_all_folders_by_name_from_current_directory,
15 | retrieve_current_directory_structure_subfolders,
16 | find_files_by_name_in_directory,
17 | find_files_by_extension_in_directory,
18 | )
19 |
20 | from azure.ai.assistant.functions.llm_functions import (
21 | take_screenshot,
22 | look_at_screen,
23 | generate_image,
24 | )
25 |
26 | # Statically defined system functions for fast reference
27 | system_functions = {
28 | "fetch_current_datetime": fetch_current_datetime,
29 | "fetch_detailed_files_info_in_directory": fetch_detailed_files_info_in_directory,
30 | "list_files_from_directory": list_files_from_directory,
31 | "copy_multiple_files_by_extension": copy_multiple_files_by_extension,
32 | "copy_specific_file_to_directory": copy_specific_file_to_directory,
33 | "create_file_with_specified_content": create_file_with_specified_content,
34 | "retrieve_file_content_from_directory": retrieve_file_content_from_directory,
35 | "get_content_from_matching_files": get_content_from_matching_files,
36 | "find_all_folders_by_name_from_current_directory": find_all_folders_by_name_from_current_directory,
37 | "retrieve_current_directory_structure_subfolders": retrieve_current_directory_structure_subfolders,
38 | "find_files_by_name_in_directory": find_files_by_name_in_directory,
39 | "find_files_by_extension_in_directory": find_files_by_extension_in_directory,
40 | "take_screenshot": take_screenshot,
41 | "look_at_screen": look_at_screen,
42 | "generate_image": generate_image,
43 | }
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/azure/ai/assistant/management/__init__.py
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/ai_client_type.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from enum import Enum, auto
5 |
6 |
7 | class AIClientType(Enum):
8 | """
9 | An enum for the different types of AI clients.
10 | """
11 | AZURE_OPEN_AI = auto()
12 | """Azure OpenAI client"""
13 | OPEN_AI = auto()
14 | """OpenAI client"""
15 | AZURE_OPEN_AI_REALTIME = auto()
16 | """Azure OpenAI client used with Realtime API"""
17 | OPEN_AI_REALTIME = auto()
18 | """OpenAI client used with Realtime API"""
19 | AZURE_AI_AGENT = auto()
20 | """Azure AI Agents client"""
21 |
22 |
23 | class AsyncAIClientType(Enum):
24 | """
25 | An enum for the different types of AI clients.
26 | """
27 | AZURE_OPEN_AI = auto()
28 | """Azure OpenAI async client"""
29 | OPEN_AI = auto()
30 | """OpenAI async client"""
31 | AZURE_OPEN_AI_REALTIME = auto()
32 | """Azure OpenAI async client used with Realtime API"""
33 | OPEN_AI_REALTIME = auto()
34 | """OpenAI async client used with Realtime API"""
35 | AZURE_AI_AGENT = auto()
36 | """Azure AI Agents async client"""
37 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/async_conversation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from azure.ai.assistant.management.async_message import AsyncConversationMessage, TextMessage, AsyncImageMessage
5 |
6 | from openai.types.beta.threads import Message
7 | from openai import AsyncAzureOpenAI, AsyncOpenAI
8 |
9 | from typing import Optional, List, Union
10 | import asyncio
11 |
12 |
13 | class AsyncConversation:
14 | """
15 | A class representing a conversation asynchronously.
16 | """
17 | def __init__(self) -> None:
18 | self._messages : List[AsyncConversationMessage] = []
19 |
20 | @classmethod
21 | async def create(
22 | cls,
23 | ai_client: Union[AsyncOpenAI, AsyncAzureOpenAI],
24 | messages: List[Message],
25 | max_text_messages: Optional[int] = None
26 | ) -> 'AsyncConversation':
27 | """
28 | Creates a new instance of the AsyncConversation class.
29 |
30 | :param ai_client: The type of AI client to use for the conversation.
31 | :type ai_client: Union[AsyncOpenAI, AsyncAzureOpenAI]
32 | :param messages: The list of messages in the conversation.
33 | :type messages: List[Message]
34 | :param max_text_messages: The maximum number of text messages to include in the conversation.
35 | :type max_text_messages: Optional[int]
36 |
37 | :return: A new instance of the AsyncConversation class.
38 | :rtype: AsyncConversation
39 | """
40 | instance = cls()
41 |
42 | tasks = [AsyncConversationMessage.create(ai_client, message) for message in messages]
43 | instance._messages = await asyncio.gather(*tasks)
44 |
45 | if max_text_messages is not None:
46 | instance._messages = instance._messages[:max_text_messages]
47 |
48 | return instance
49 |
50 | @property
51 | def messages(self) -> List[AsyncConversationMessage]:
52 | """
53 | Returns the list of messages in the conversation.
54 |
55 | :return: The list of messages in the conversation.
56 | :rtype: List[AsyncConversationMessage]
57 | """
58 | return self._messages
59 |
60 | def get_last_message(self, sender: str) -> AsyncConversationMessage:
61 | """
62 | Returns the last message in the conversation from the specified sender.
63 |
64 | :param sender: The sender of the message.
65 | :type sender: str
66 |
67 | :return: The last message in the conversation from the specified sender.
68 | :rtype: AsyncConversationMessage
69 | """
70 | for message in (self._messages):
71 | if message.sender == sender:
72 | return message
73 | return None
74 |
75 | @property
76 | def text_messages(self) -> List[TextMessage]:
77 | """
78 | Returns the list of text message contents in the conversation.
79 |
80 | :return: The list of text message contents in the conversation.
81 | :rtype: List[TextMessage]
82 | """
83 | return [message.text_message for message in self._messages if message.text_message is not None]
84 |
85 | def get_last_text_message(self, sender: str) -> TextMessage:
86 | """
87 | Returns the last text message content in the conversation from the specified sender.
88 |
89 | :param sender: The sender of the message.
90 | :type sender: str
91 | :return: The last text message content in the conversation from the specified sender.
92 | :rtype: TextMessage
93 | """
94 | for message in (self._messages):
95 | if message.sender == sender and message.text_message is not None:
96 | return message.text_message
97 | return None
98 |
99 | def contains_file_id(self, file_id: str) -> bool:
100 | """
101 | Checks if the list of file messages contains a specific file ID.
102 | :param file_id: The file ID to check.
103 | :type file_id: str
104 | :return: True if the file ID is found, False otherwise.
105 | :rtype: bool
106 | """
107 | image_files_contains = any(image_message.file_id == file_id for message in self.messages for image_message in message.image_messages if image_message is not None)
108 | files_contains = any(file_message.file_id == file_id for message in self.messages for file_message in message.file_messages if file_message is not None)
109 | return image_files_contains or files_contains
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/async_task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import uuid
5 | from abc import ABC, abstractmethod
6 |
7 | class AsyncTask(ABC):
8 | def __init__(self):
9 | self.id = uuid.uuid4() # Unique identifier for the task
10 |
11 | def set_assistant_name(self, assistant_name):
12 | """
13 | Sets the name of the assistant.
14 |
15 | :param assistant_name: The name of the assistant.
16 | :type assistant_name: str
17 | """
18 | self.assistant_name = "multi-assistant" if assistant_name is None else assistant_name
19 |
20 | @abstractmethod
21 | async def execute(self, callback=None):
22 | """
23 | Executes the task.
24 |
25 | :param callback: The callback function to call when the task is complete.
26 | :type callback: function
27 |
28 | :return: None
29 | :rtype: None
30 | """
31 | pass
32 |
33 |
34 | class AsyncBasicTask(AsyncTask):
35 | """
36 | This class represents a basic task.
37 |
38 | :param user_request: The user request to process.
39 | :type user_request: str
40 | """
41 | def __init__(self, user_request):
42 | super().__init__()
43 | self.user_request = user_request
44 |
45 | async def execute(self, callback=None):
46 | """
47 | Executes the basic task.
48 |
49 | :param callback: The callback function to call when the task is complete.
50 | :type callback: function
51 | """
52 | if callback:
53 | await callback()
54 |
55 |
56 | class AsyncBatchTask(AsyncTask):
57 | """
58 | This class represents a batch task.
59 |
60 | :param requests: A list of user requests to process.
61 | :type requests: list
62 | """
63 | def __init__(self, requests):
64 | super().__init__()
65 | self.requests = requests
66 |
67 | async def execute(self, callback=None):
68 | """
69 | Executes the batch task.
70 |
71 | :param callback: The callback function to call when the task is complete.
72 | :type callback: function
73 | """
74 | if callback:
75 | await callback()
76 |
77 |
78 | class AsyncMultiTask(AsyncTask):
79 | """
80 | This class represents a multi task.
81 |
82 | :param requests: A list of requests, each request is a dict with 'assistant' and 'task' keys.
83 | A single dict is also accepted and will be converted to a list.
84 | :type requests: list or dict
85 | """
86 | def __init__(self, requests):
87 | super().__init__()
88 | self.requests = self._validate_and_convert_requests(requests)
89 |
90 | def _validate_and_convert_requests(self, requests):
91 | """
92 | Validates and converts the requests to a list of dictionaries if necessary.
93 |
94 | :param requests: A list of requests or a single request dictionary.
95 | :type requests: list or dict
96 | :return: A list of request dictionaries.
97 | :rtype: list
98 | """
99 | if isinstance(requests, dict):
100 | return [requests]
101 | elif isinstance(requests, list):
102 | # Check if all items in the list are dictionaries
103 | if not all(isinstance(request, dict) for request in requests):
104 | raise ValueError("All items in the requests list must be dictionaries.")
105 | return requests
106 | else:
107 | raise TypeError("Requests should be a dictionary or a list of dictionaries.")
108 |
109 | async def execute(self, callback=None):
110 | """
111 | Executes the multi task.
112 |
113 | :param callback: The callback function to call when the task is complete.
114 | :type callback: callable or None
115 | """
116 | try:
117 | if callback:
118 | await callback()
119 | except Exception as e:
120 | print(f"Error during task execution: {e}")
121 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/async_task_manager.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from azure.ai.assistant.management.async_task import AsyncBasicTask, AsyncBatchTask, AsyncMultiTask
5 | from azure.ai.assistant.management.async_task_manager_callbacks import AsyncTaskManagerCallbacks
6 |
7 | import asyncio
8 | import uuid
9 | from datetime import datetime
10 |
11 |
12 | class AsyncTaskManager:
13 | _instance = None
14 | """
15 | This class is responsible for scheduling and executing tasks.
16 |
17 | :param callbacks: The callbacks to use for task execution.
18 | :type callbacks: AsyncTaskManagerCallbacks
19 | """
20 | def __init__(self, callbacks : AsyncTaskManagerCallbacks):
21 | self._callbacks = callbacks
22 | self._scheduled_tasks = []
23 |
24 | @classmethod
25 | def get_instance(cls, callbacks) -> 'AsyncTaskManager':
26 | """
27 | Gets the singleton instance of the task manager.
28 |
29 | :param callbacks: The callbacks to use for task execution.
30 | :type callbacks: AsyncTaskManagerCallbacks
31 |
32 | :return: The singleton instance of the task manager.
33 | :rtype: AsyncTaskManager
34 | """
35 | if cls._instance is None:
36 | cls._instance = AsyncTaskManager(callbacks)
37 | return cls._instance
38 |
39 | def create_basic_task(self, user_request) -> AsyncBasicTask:
40 | """
41 | Creates a basic task.
42 |
43 | :param user_request: The user request to use for the task.
44 | :type user_request: str
45 |
46 | :return: The basic task.
47 | :rtype: AsyncBasicTask
48 | """
49 | return AsyncBasicTask(user_request)
50 |
51 | def create_batch_task(self, requests) -> AsyncBatchTask:
52 | """
53 | Creates a batch task.
54 |
55 | :param requests: A list of user requests to use for the task.
56 | :type requests: list
57 |
58 | :return: The batch task.
59 | :rtype: AsyncBatchTask
60 | """
61 | return AsyncBatchTask(requests)
62 |
63 | def create_multi_task(self, requests) -> AsyncMultiTask:
64 | """
65 | Creates a multi task.
66 |
67 | :param requests: A list of user requests to use for the task.
68 | :type requests: list
69 |
70 | :return: The multi task.
71 | :rtype: AsyncMultiTask
72 | """
73 | return AsyncMultiTask(requests)
74 |
75 | async def schedule_task(self, task, assistant_name=None, start_time=None, interval_seconds=0, recurrence_count=1):
76 | """
77 | Schedules a task for execution.
78 |
79 | :param task: The task to schedule.
80 | :type task: Task
81 | :param assistant_name: The name of the assistant to use for the task.
82 | :type assistant_name: str
83 | :param start_time: The start time for the task.
84 | :type start_time: datetime
85 | :param interval_seconds: The interval in seconds for recurring tasks.
86 | :type interval_seconds: int
87 | :param recurrence_count: The number of times to recur the task.
88 | :type recurrence_count: int
89 |
90 | :return: The ID of the scheduled task.
91 | :rtype: str
92 | """
93 | schedule_id = str(uuid.uuid4())
94 | task.set_assistant_name(None if isinstance(task, AsyncMultiTask) else assistant_name)
95 | if start_time is None or (start_time - datetime.now()).total_seconds() <= 0:
96 | asyncio.create_task(self._execute_task(task, schedule_id, interval_seconds, recurrence_count))
97 | else:
98 | delay = (start_time - datetime.now()).total_seconds()
99 | asyncio.get_event_loop().call_later(delay, asyncio.create_task, self._execute_task(task, schedule_id, interval_seconds, recurrence_count))
100 | return schedule_id
101 |
102 | async def _execute_task(self, task, schedule_id, interval_seconds=0, recurrence_count=1):
103 | try:
104 | await self._callbacks.on_task_started(task, schedule_id)
105 | await self._run_task_with_recurrence(task, schedule_id, interval_seconds, recurrence_count)
106 | except Exception as e:
107 | await self._callbacks.on_task_failed(task, schedule_id, str(e))
108 |
109 | async def _run_task_with_recurrence(self, task, schedule_id, interval_seconds, recurrence_count):
110 | async def callback():
111 | await self._callbacks.on_task_execute(task, schedule_id)
112 |
113 | while recurrence_count > 0:
114 | await task.execute(callback=callback)
115 | recurrence_count -= 1
116 | if recurrence_count > 0:
117 | await asyncio.sleep(interval_seconds)
118 | await self._callbacks.on_task_completed(task, schedule_id, "Success")
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/async_task_manager_callbacks.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class AsyncTaskManagerCallbacks:
5 | async def on_task_started(self, task, schedule_id) -> None:
6 | """Called when a task starts.
7 |
8 | :param task: The task that started.
9 | :type task: Task
10 | :param schedule_id: The ID of the schedule.
11 | :type schedule_id: str
12 |
13 | :return: None
14 | :rtype: None
15 | """
16 | pass
17 |
18 | async def on_task_completed(self, task, schedule_id, result) -> None:
19 | """Called when a task completes successfully.
20 |
21 | :param task: The task that completed.
22 | :type task: Task
23 | :param schedule_id: The ID of the schedule.
24 | :type schedule_id: str
25 | :param result: The result of the task.
26 | :type result: Any
27 |
28 | :return: None
29 | :rtype: None
30 | """
31 | pass
32 |
33 | async def on_task_failed(self, task, schedule_id, error) -> None:
34 | """Called when a task fails or encounters an error.
35 |
36 | :param task: The task that failed.
37 | :type task: Task
38 | :param schedule_id: The ID of the schedule.
39 | :type schedule_id: str
40 | :param error: The error that occurred.
41 | :type error: Exception
42 |
43 | :return: None
44 | :rtype: None
45 | """
46 | pass
47 |
48 | async def on_task_execute(self, task, schedule_id) -> None:
49 | """Called for a specific event or action during task execution.
50 |
51 | :param task: The task that is executing.
52 | :type task: Task
53 | :param schedule_id: The ID of the schedule.
54 | :type schedule_id: str
55 |
56 | :return: None
57 | :rtype: None
58 | """
59 | pass
60 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/base_chat_assistant_client.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from azure.ai.assistant.management.assistant_client_callbacks import AssistantClientCallbacks
5 | from azure.ai.assistant.management.base_assistant_client import BaseAssistantClient
6 | from azure.ai.assistant.management.assistant_config_manager import AssistantConfigManager
7 | from azure.ai.assistant.management.assistant_config import AssistantConfig
8 | from azure.ai.assistant.management.exceptions import EngineError
9 | from azure.ai.assistant.management.logger_module import logger
10 |
11 | from typing import Optional
12 | import uuid
13 |
14 |
15 | class BaseChatAssistantClient(BaseAssistantClient):
16 | """
17 | Base class for chat assistant clients.
18 |
19 | :param config_json: The JSON string containing the assistant configuration.
20 | :type config_json: str
21 | :param callbacks: The callback functions to handle messages from the assistant.
22 | :type callbacks: Optional[AssistantClientCallbacks]
23 | :param async_mode: Whether to run the assistant in async mode.
24 | :type async_mode: bool
25 | :param client_args: Additional keyword arguments for configuring the AI client.
26 | :type client_args: Dict
27 | """
28 | def __init__(
29 | self,
30 | config_json: str,
31 | callbacks: Optional[AssistantClientCallbacks] = None,
32 | async_mode: bool = False,
33 | **client_args
34 | ) -> None:
35 | super().__init__(config_json, callbacks, async_mode, **client_args)
36 | self._tools = None
37 | self._messages = []
38 |
39 | def _init_chat_assistant_client(
40 | self,
41 | config_data: dict,
42 | is_create: bool = True,
43 | timeout: Optional[float] = None
44 | ):
45 | try:
46 | # Create or update the assistant
47 | assistant_config = AssistantConfig.from_dict(config_data)
48 | if is_create:
49 | assistant_config.assistant_id = str(uuid.uuid4())
50 | self._reset_system_messages(assistant_config)
51 | tools = self._update_tools(assistant_config)
52 | self._tools = tools if tools else None
53 | self._load_selected_functions(assistant_config)
54 | self._assistant_config = assistant_config
55 |
56 | # Update the local configuration using AssistantConfigManager
57 | # TODO make optional to save the assistant_config in the config manager
58 | config_manager = AssistantConfigManager.get_instance()
59 | config_manager.update_config(self._name, assistant_config.to_json())
60 |
61 | except Exception as e:
62 | logger.error(f"Failed to initialize assistant instance: {e}")
63 | raise EngineError(f"Failed to initialize assistant instance: {e}")
64 |
65 | def _purge(
66 | self,
67 | timeout: Optional[float] = None
68 | )-> None:
69 | try:
70 | logger.info(f"Purging chat assistant with name: {self.name}")
71 | # retrieve the assistant configuration
72 | config_manager = AssistantConfigManager.get_instance()
73 | assistant_config = config_manager.get_config(self.name)
74 |
75 | # remove from the local config
76 | config_manager.delete_config(assistant_config.name)
77 |
78 | self._clear_variables()
79 |
80 | except Exception as e:
81 | logger.error(f"Failed to purge chat assistant with name: {self.name}: {e}")
82 | raise EngineError(f"Failed to purge chat assistant with name: {self.name}: {e}")
83 |
84 | def _append_tool_calls(self, tool_calls, tcchunklist):
85 | for tcchunk in tcchunklist:
86 | while len(tool_calls) <= tcchunk.index:
87 | tool_calls.append({"id": "", "type": "function", "function": {"name": "", "arguments": ""}})
88 | tc = tool_calls[tcchunk.index]
89 | tc["id"] += tcchunk.id or ""
90 | tc["function"]["name"] += tcchunk.function.name or ""
91 | tc["function"]["arguments"] += tcchunk.function.arguments or ""
92 | return tool_calls
93 |
94 | def _reset_system_messages(self, assistant_config: AssistantConfig):
95 | instructions = self._replace_file_references_with_content(assistant_config)
96 | self._messages = [{"role": "developer", "content": instructions}]
97 |
98 | def _parse_conversation_messages(self, messages):
99 | for message in reversed(messages):
100 | content = []
101 | if message.text_message:
102 | content.append({"type": "text", "text": message.text_message.content})
103 | if len(message.image_messages) > 0:
104 | for image_message in message.image_messages:
105 | img_base64 = image_message.get_image_base64()
106 | if img_base64:
107 | img_str = f"data:image/jpeg;base64,{img_base64}"
108 | content.append({"type": "image_url", "image_url": {"url": img_str, "detail": "high"}})
109 | if message.image_urls:
110 | for image_url in message.image_urls:
111 | content.append({"type": "image_url", "image_url": {"url": image_url, "detail": "high"}})
112 | if content:
113 | self._messages.append({"role": message.role, "content": content})
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/conversation.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from azure.ai.assistant.management.message import ConversationMessage, TextMessage, ImageMessage
5 | from azure.ai.assistant.management.ai_client_factory import AIClient
6 |
7 | from openai.types.beta.threads import Message
8 |
9 | from typing import Optional, List
10 |
11 |
12 | class Conversation:
13 | """
14 | A class representing a conversation.
15 |
16 | :param ai_client: The AI client (OpenAI, AzureOpenAI, or AIProjectClient).
17 | :type ai_client: AIClient
18 | :param messages: The list of messages in the conversation.
19 | :type messages: List[Message]
20 | :param max_text_messages: The maximum number of text messages to include in the conversation.
21 | :type max_text_messages: Optional[int]
22 |
23 | :return: A new instance of the Conversation class.
24 | :rtype: Conversation
25 | """
26 | def __init__(
27 | self,
28 | ai_client : AIClient,
29 | messages: List[Message],
30 | max_text_messages: Optional[int] = None
31 | ) -> None:
32 | self._messages = [ConversationMessage(ai_client, message) for message in messages]
33 | if max_text_messages is not None:
34 | self._messages = self._messages[:max_text_messages]
35 |
36 | @property
37 | def messages(self) -> List[ConversationMessage]:
38 | """
39 | Returns the list of messages in the conversation.
40 |
41 | :return: The list of messages in the conversation.
42 | :rtype: List[ConversationMessage]
43 | """
44 | return self._messages
45 |
46 | def get_last_message(self, sender: str) -> ConversationMessage:
47 | """
48 | Returns the last message in the conversation from the specified sender.
49 |
50 | :param sender: The sender of the message.
51 | :type sender: str
52 |
53 | :return: The last message in the conversation from the specified sender.
54 | :rtype: ConversationMessage
55 | """
56 | for message in (self._messages):
57 | if message.sender == sender:
58 | return message
59 | return None
60 |
61 | @property
62 | def text_messages(self) -> List[TextMessage]:
63 | """
64 | Returns the list of text message contents in the conversation.
65 |
66 | :return: The list of text message contents in the conversation.
67 | :rtype: List[TextMessage]
68 | """
69 | return [message.text_message for message in self._messages if message.text_message is not None]
70 |
71 | def get_last_text_message(self, sender: str) -> TextMessage:
72 | """
73 | Returns the last text message content in the conversation from the specified sender.
74 |
75 | :param sender: The sender of the message.
76 | :type sender: str
77 | :return: The last text message content in the conversation from the specified sender.
78 | :rtype: TextMessage
79 | """
80 | for message in (self._messages):
81 | if message.sender == sender and message.text_message is not None:
82 | return message.text_message
83 | return None
84 |
85 | def contains_file_id(self, file_id: str) -> bool:
86 | """
87 | Checks if the list of file messages contains a specific file ID.
88 |
89 | :param file_id: The file ID to check.
90 | :type file_id: str
91 | :return: True if the file ID is found, False otherwise.
92 | :rtype: bool
93 | """
94 | image_files_contains = any(image_message.file_id == file_id for message in self.messages for image_message in message.image_messages if image_message is not None)
95 | files_contains = any(file_message.file_id == file_id for message in self.messages for file_message in message.file_messages if file_message is not None)
96 | return image_files_contains or files_contains
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/exceptions.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class EngineError(Exception):
5 | """Base class for all exceptions in the engine module."""
6 | pass
7 |
8 | class ConfigError(EngineError):
9 | """General exception for configuration-related errors."""
10 | pass
11 |
12 | class InvalidJSONError(ConfigError):
13 | """Exception raised for errors in JSON format."""
14 | pass
15 |
16 | class DuplicateConfigError(ConfigError):
17 | """Exception raised for duplicate configuration entries."""
18 | pass
19 |
20 | class UpdateConfigError(ConfigError):
21 | """Exception raised during configuration update errors."""
22 | pass
23 |
24 | class DeleteConfigError(ConfigError):
25 | """Exception raised when deletion of configuration fails."""
26 | pass
27 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/logger_module.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import logging
5 | import os
6 |
7 |
8 | class BroadcasterLoggingHandler(logging.Handler):
9 | def __init__(self, broadcaster):
10 | super().__init__()
11 | self.broadcaster = broadcaster
12 |
13 | def emit(self, record):
14 | try:
15 | message = self.format(record)
16 | self.broadcaster.emit(message)
17 | except Exception:
18 | self.handleError(record)
19 |
20 | def setup_logger() -> logging.Logger:
21 | """
22 | Sets up a logger named 'assistant_logger' with INFO level. The logger configuration for console logging
23 | is determined by the environment variable ASSISTANT_LOG_TO_CONSOLE. If ASSISTANT_LOG_TO_CONSOLE is not set or set to a value
24 | that does not equate to 'true', logging will default to file only. This version also includes the function
25 | name in the log messages.
26 |
27 | :return: The logger instance.
28 | :rtype: logging.Logger
29 | """
30 |
31 | realtime_ai_package_logger = logging.getLogger('realtime_ai')
32 | realtime_ai_package_logger.setLevel(logging.CRITICAL)
33 |
34 | logger = logging.getLogger('assistant_logger')
35 | logger.setLevel(logging.INFO)
36 |
37 | # Disable by default
38 | logger.disabled = True
39 |
40 | # Including function name in the log message format
41 | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
42 |
43 | # Environment variable check for console logging
44 | log_to_console = os.getenv('ASSISTANT_LOG_TO_CONSOLE', 'false').lower() in ('true', '1', 't')
45 |
46 | log_to_file = os.getenv('ASSISTANT_LOG_TO_FILE', 'false').lower() in ('true', '1', 't')
47 |
48 | if log_to_file:
49 | logger.disabled = False
50 | # Set the file handler with UTF-8 encoding for file output
51 | file_handler = logging.FileHandler('assistant.log', encoding='utf-8')
52 | file_handler.setFormatter(formatter)
53 | logger.addHandler(file_handler)
54 |
55 | if log_to_console:
56 | logger.disabled = False
57 | # Set the stream handler for console output
58 | stream_handler = logging.StreamHandler()
59 | stream_handler.setFormatter(formatter)
60 | logger.addHandler(stream_handler)
61 |
62 | return logger
63 |
64 | def add_broadcaster_to_logger(broadcaster) -> None:
65 | """
66 | Adds or updates the broadcaster in the global logger.
67 |
68 | :param broadcaster: An instance of LogBroadcaster to broadcast log messages.
69 | """
70 | global logger
71 |
72 | logger.disabled = False
73 | # Check if a BroadcasterLoggingHandler is already added and update it
74 | for handler in logger.handlers:
75 | if isinstance(handler, BroadcasterLoggingHandler):
76 | handler.broadcaster = broadcaster
77 | break
78 | else: # If no BroadcasterLoggingHandler is found, add a new one
79 | broadcast_handler = BroadcasterLoggingHandler(broadcaster)
80 | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
81 | broadcast_handler.setFormatter(formatter)
82 | logger.addHandler(broadcast_handler)
83 |
84 | # Function to add broadcaster to any logger
85 | def add_broadcaster_to_specific_logger(target_logger):
86 | # Check if a BroadcasterLoggingHandler is already added and update it
87 | for handler in target_logger.handlers:
88 | if isinstance(handler, BroadcasterLoggingHandler):
89 | handler.broadcaster = broadcaster
90 | return # Exit if the broadcaster is already added
91 |
92 | # If no BroadcasterLoggingHandler is found, add a new one
93 | broadcast_handler = BroadcasterLoggingHandler(broadcaster)
94 | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
95 | broadcast_handler.setFormatter(formatter)
96 | target_logger.addHandler(broadcast_handler)
97 |
98 | # Add broadcaster to the global logger
99 | add_broadcaster_to_specific_logger(logger)
100 |
101 | # Add broadcaster to the OpenAI logger
102 | openai_logger = logging.getLogger("openai")
103 | add_broadcaster_to_specific_logger(openai_logger)
104 |
105 | # Example usage:
106 | # To enable console logging, set the environment variable ASSISTANT_LOG_TO_CONSOLE=true before running the script.
107 | # If ASSISTANT_LOG_TO_CONSOLE is not set or set to false, logging will default to file.
108 |
109 | # Create the logger instance based on the environment variable
110 | logger = setup_logger()
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/message_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | import io, re
5 | from PIL import Image
6 | from azure.ai.assistant.management.logger_module import logger
7 |
8 |
9 | def _extract_image_urls(message: str) -> list:
10 | urls = re.findall(r'(https?://\S+)', message)
11 | image_urls = [url for url in urls if url.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))]
12 | return image_urls
13 |
14 | def _resize_image(image_data: bytes, target_width: float, target_height: float) -> bytes:
15 | try:
16 | with Image.open(io.BytesIO(image_data)) as img:
17 | new_width = int(img.width * target_width)
18 | new_height = int(img.height * target_height)
19 | resized_img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
20 |
21 | buffer = io.BytesIO()
22 | resized_img.save(buffer, format="PNG")
23 | return buffer.getvalue()
24 | except Exception as e:
25 | logger.error(f"Error resizing image: {e}")
26 | return None
27 |
28 | def _save_image(image_data: bytes, file_path: str) -> str:
29 | try:
30 | with open(file_path, 'wb') as f:
31 | f.write(image_data)
32 | return file_path
33 | except Exception as e:
34 | logger.error(f"Error saving image: {e}")
35 | return None
36 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/task.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from abc import ABC, abstractmethod
5 | import os
6 | import glob
7 | import uuid
8 |
9 |
10 | class Task(ABC):
11 | """
12 | This class is the base class for all tasks.
13 | """
14 | def __init__(self) -> None:
15 | self.id = uuid.uuid4() # Unique identifier for the task
16 |
17 | def set_assistant_name(
18 | self,
19 | assistant_name
20 | ) -> None:
21 | """
22 | Sets the name of the assistant.
23 |
24 | :param assistant_name: The name of the assistant.
25 | :type assistant_name: str
26 | """
27 | if assistant_name is None:
28 | self.assistant_name = "multi-assistant"
29 | else:
30 | self.assistant_name = assistant_name
31 |
32 | @abstractmethod
33 | def execute(self, callback=None) -> None:
34 | """
35 | Executes the task.
36 |
37 | :param callback: The callback function to call when the task is complete.
38 | :type callback: function
39 |
40 | :return: None
41 | :rtype: None
42 | """
43 | pass
44 |
45 |
46 | class BasicTask(Task):
47 | """
48 | This class represents a basic task.
49 |
50 | :param user_request: The user request to process.
51 | :type user_request: str
52 | """
53 | def __init__(self,
54 | user_request: str) -> None:
55 | super().__init__()
56 | self.user_request = user_request
57 |
58 | def execute(self, callback=None) -> None:
59 | """
60 | Executes the basic task.
61 |
62 | :param callback: The callback function to call when the task is complete.
63 | :type callback: function
64 | """
65 | if callback:
66 | callback()
67 |
68 |
69 | class BatchTask(Task):
70 | """
71 | This class represents a batch task.
72 |
73 | :param requests: A list of user requests to process.
74 | :type requests: list
75 | """
76 | def __init__(self,
77 | requests: list) -> None:
78 | super().__init__()
79 | self.requests = requests
80 |
81 | def execute(self, callback=None) -> None:
82 | """
83 | Executes the batch task.
84 |
85 | :param callback: The callback function to call when the task is complete.
86 | :type callback: function
87 | """
88 | if callback:
89 | callback()
90 |
91 |
92 | class MultiTask(Task):
93 | """
94 | This class represents a multi task.
95 |
96 | :param requests: A list of requests, each request is a dict with 'assistant' and 'task' keys.
97 | :type requests: list
98 | """
99 | def __init__(self,
100 | requests: list) -> None:
101 | super().__init__()
102 | # List of requests, each request is a dict with 'assistant' and 'task' keys
103 | self.requests = self._validate_and_convert_requests(requests)
104 |
105 | def _validate_and_convert_requests(self, requests):
106 | """
107 | Validates and converts the requests to a list of dictionaries if necessary.
108 |
109 | :param requests: A list of requests or a single request dictionary.
110 | :type requests: list or dict
111 | :return: A list of request dictionaries.
112 | :rtype: list
113 | """
114 | if isinstance(requests, dict):
115 | return [requests]
116 | elif isinstance(requests, list):
117 | # Check if all items in the list are dictionaries
118 | if not all(isinstance(request, dict) for request in requests):
119 | raise ValueError("All items in the requests list must be dictionaries.")
120 | return requests
121 | else:
122 | raise TypeError("Requests should be a dictionary or a list of dictionaries.")
123 |
124 | def execute(self, callback=None) -> None:
125 | """
126 | Executes the multi task.
127 |
128 | :param callback: The callback function to call when the task is complete.
129 | :type callback: function
130 | """
131 | if callback:
132 | callback()
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/task_manager.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from azure.ai.assistant.management.task_manager_callbacks import TaskManagerCallbacks
5 | from azure.ai.assistant.management.task import BasicTask, BatchTask, MultiTask, Task
6 | import threading
7 | import uuid, time
8 | from datetime import datetime
9 |
10 |
11 | class TaskManager:
12 | _instance = None
13 |
14 | """
15 | This class is responsible for scheduling and executing tasks.
16 |
17 | :param callbacks: The callbacks to use for task execution.
18 | :type callbacks: TaskManagerCallbacks
19 | """
20 | def __init__(
21 | self,
22 | callbacks : TaskManagerCallbacks
23 | ) -> None:
24 | self._callbacks = callbacks
25 | self._scheduled_tasks = []
26 |
27 | @classmethod
28 | def get_instance(
29 | cls,
30 | callbacks : TaskManagerCallbacks
31 | ) -> 'TaskManager':
32 | """
33 | Gets the singleton instance of the task manager.
34 |
35 | :param callbacks: The callbacks to use for task execution.
36 | :type callbacks: TaskManagerCallbacks
37 |
38 | :return: The singleton instance of the task manager.
39 | :rtype: TaskManager
40 | """
41 | if cls._instance is None:
42 | cls._instance = TaskManager(callbacks)
43 | return cls._instance
44 |
45 | def create_basic_task(self, user_request : str) -> BasicTask:
46 | """
47 | Creates a basic task.
48 |
49 | :param user_request: The user request to use for the task.
50 | :type user_request: str
51 |
52 | :return: The basic task.
53 | :rtype: BasicTask
54 | """
55 | return BasicTask(user_request)
56 |
57 | def create_batch_task(self, requests : list) -> BatchTask:
58 | """
59 | Creates a batch task.
60 |
61 | :param requests: A list of user requests to use for the task.
62 | :type requests: list
63 |
64 | :return: The batch task.
65 | :rtype: BatchTask
66 | """
67 | return BatchTask(requests)
68 |
69 | def create_multi_task(self, requests: list) -> MultiTask:
70 | """
71 | Creates a multi task.
72 |
73 | :param requests: A list of user requests to use for the task.
74 | :type requests: list
75 |
76 | :return: The multi task.
77 | :rtype: MultiTask
78 | """
79 | return MultiTask(requests)
80 |
81 | def schedule_task(
82 | self,
83 | task : Task,
84 | assistant_name : str = None,
85 | start_time : datetime=None,
86 | interval_seconds : int=0,
87 | recurrence_count : int=1
88 | ) -> str:
89 | """
90 | Schedules a task for execution.
91 |
92 | :param task: The task to schedule.
93 | :type task: Task
94 | :param assistant_name: The name of the assistant to use for the task.
95 | :type assistant_name: str
96 | :param start_time: The start time for the task.
97 | :type start_time: datetime
98 | :param interval_seconds: The interval in seconds for recurring tasks.
99 | :type interval_seconds: int
100 | :param recurrence_count: The number of times to recur the task.
101 | :type recurrence_count: int
102 |
103 | :return: The ID of the scheduled task.
104 | :rtype: str
105 | """
106 | schedule_id = str(uuid.uuid4())
107 | if isinstance(task, MultiTask):
108 | task.set_assistant_name(None)
109 | else:
110 | task.set_assistant_name(assistant_name)
111 |
112 | if start_time is None or (start_time - datetime.now()).total_seconds() <= 0:
113 | # Execute immediately or set up recurrence
114 | task_thread = threading.Thread(target=self._execute_task, args=(task, schedule_id, interval_seconds, recurrence_count))
115 | task_thread.start()
116 | else:
117 | # Schedule for later execution or recurrence
118 | self._schedule_task(task, schedule_id, start_time, interval_seconds, recurrence_count)
119 | return schedule_id
120 |
121 | def _schedule_task(self, task, schedule_id, start_time, interval_seconds, recurrence_count):
122 | delay = (start_time - datetime.now()).total_seconds()
123 | timer = threading.Timer(delay, self._execute_task, [task, schedule_id, interval_seconds, recurrence_count])
124 | timer.start()
125 | self._scheduled_tasks.append((schedule_id, task, timer))
126 |
127 | def _execute_task(self, task, schedule_id, interval_seconds=0, recurrence_count=1):
128 | try:
129 | self._callbacks.on_task_started(task, schedule_id)
130 | self._run_task_with_recurrence(task, schedule_id, interval_seconds, recurrence_count)
131 | except Exception as e:
132 | self._callbacks.on_task_failed(task, schedule_id, str(e))
133 |
134 | def _run_task_with_recurrence(self, task, schedule_id, interval_seconds, recurrence_count):
135 | while recurrence_count > 0:
136 | task.execute(callback=lambda: self._callbacks.on_task_execute(task, schedule_id))
137 | recurrence_count -= 1
138 | if recurrence_count > 0:
139 | time.sleep(interval_seconds)
140 | self._callbacks.on_task_completed(task, schedule_id, "Success")
141 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/task_manager_callbacks.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | class TaskManagerCallbacks:
5 | def on_task_started(self, task, schedule_id) -> None:
6 | """Called when a task starts.
7 |
8 | :param task: The task that started.
9 | :type task: Task
10 | :param schedule_id: The ID of the schedule.
11 | :type schedule_id: str
12 |
13 | :return: None
14 | :rtype: None
15 | """
16 | pass
17 |
18 | def on_task_completed(self, task, schedule_id, result) -> None:
19 | """Called when a task completes successfully.
20 |
21 | :param task: The task that completed.
22 | :type task: Task
23 | :param schedule_id: The ID of the schedule.
24 | :type schedule_id: str
25 | :param result: The result of the task.
26 | :type result: Any
27 |
28 | :return: None
29 | :rtype: None
30 | """
31 | pass
32 |
33 | def on_task_failed(self, task, schedule_id, error) -> None:
34 | """Called when a task fails or encounters an error.
35 |
36 | :param task: The task that failed.
37 | :type task: Task
38 | :param schedule_id: The ID of the schedule.
39 | :type schedule_id: str
40 | :param error: The error that occurred.
41 | :type error: Exception
42 |
43 | :return: None
44 | :rtype: None
45 | """
46 | pass
47 |
48 | def on_task_execute(self, task, schedule_id) -> None:
49 | """Called for a specific event or action during task execution.
50 |
51 | :param task: The task that is executing.
52 | :type task: Task
53 | :param schedule_id: The ID of the schedule.
54 | :type schedule_id: str
55 |
56 | :return: None
57 | :rtype: None
58 | """
59 | pass
60 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/management/text_message.py:
--------------------------------------------------------------------------------
1 |
2 | # Copyright (c) Microsoft. All rights reserved.
3 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
4 |
5 | from typing import List, Optional
6 |
7 |
8 | class FileCitation:
9 | """
10 | A class representing a file citation.
11 |
12 | :param file_id: The ID of the file.
13 | :type file_id: str
14 | :param file_name: The name of the file.
15 | :type file_name: str
16 | """
17 | def __init__(
18 | self,
19 | file_id : str,
20 | file_name : str
21 | ) -> None:
22 | self._file_id = file_id
23 | self._file_name = file_name
24 |
25 | @property
26 | def file_id(self) -> str:
27 | """
28 | Returns the ID of the file.
29 |
30 | :return: The ID of the file.
31 | :rtype: str
32 | """
33 | return self._file_id
34 |
35 | @property
36 | def file_name(self) -> str:
37 | """
38 | Returns the name of the file.
39 |
40 | :return: The name of the file.
41 | :rtype: str
42 | """
43 | return self._file_name
44 |
45 |
46 | class UrlCitation:
47 | """
48 | A class representing a URL citation.
49 |
50 | :param url: The URL being cited.
51 | :type url: str
52 | :param title: An optional title or label for the URL.
53 | :type title: Optional[str]
54 | """
55 | def __init__(
56 | self,
57 | url: str,
58 | title: Optional[str] = None
59 | ) -> None:
60 | self._url = url
61 | # If no title is provided, use the URL as the fallback
62 | self._title = title or url
63 |
64 | @property
65 | def url(self) -> str:
66 | """
67 | Returns the cited URL.
68 |
69 | :return: The cited URL.
70 | :rtype: str
71 | """
72 | return self._url
73 |
74 | @property
75 | def title(self) -> str:
76 | """
77 | Returns the title (or fallback) of the cited URL.
78 |
79 | :return: The title for this citation.
80 | :rtype: str
81 | """
82 | return self._title
83 |
84 |
85 | class TextMessage:
86 | """
87 | A class representing a text message.
88 |
89 | :param content: The content of the message.
90 | :type content: str
91 | :param file_citations: The list of file citations in the message.
92 | :type file_citations: Optional[List[FileCitation]]
93 | :param url_citations: The list of URL citations in the message.
94 | :type url_citations: Optional[List[UrlCitation]]
95 | """
96 | def __init__(
97 | self,
98 | content: str,
99 | file_citations: Optional[List[FileCitation]] = None,
100 | url_citations: Optional[List[UrlCitation]] = None
101 | ):
102 | self._content = content
103 | # Initialize citations to empty lists if None is provided
104 | self._file_citations = file_citations or []
105 | self._url_citations = url_citations or []
106 |
107 | @property
108 | def content(self) -> str:
109 | """
110 | Returns the content of the message.
111 |
112 | :return: The content of the message.
113 | :rtype: str
114 | """
115 | return self._content
116 |
117 | @content.setter
118 | def content(self, value: str):
119 | """
120 | Sets the content of the message.
121 |
122 | :param value: The content of the message.
123 | :type value: str
124 | """
125 | self._content = value
126 |
127 | @property
128 | def file_citations(self) -> List[FileCitation]:
129 | """
130 | Returns the list of file citations in the message.
131 |
132 | :return: The list of file citations in the message.
133 | :rtype: List[FileCitation]
134 | """
135 | return self._file_citations
136 |
137 | @property
138 | def url_citations(self) -> List[UrlCitation]:
139 | """
140 | Returns the list of URL citations in the message.
141 |
142 | :return: The list of URL citations in the message.
143 | :rtype: List[UrlCitation]
144 | """
145 | return self._url_citations
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/azure/ai/assistant/py.typed:
--------------------------------------------------------------------------------
1 | # Marker file for PEP 561.
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Microsoft. All rights reserved.
2 | # Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
3 |
4 | from setuptools import setup, find_packages
5 | import os
6 | from io import open
7 | import re
8 |
9 |
10 | PACKAGE_NAME = "azure-ai-assistant"
11 | PACKAGE_PPRINT_NAME = "AI Assistant"
12 |
13 | # a-b-c => a/b/c
14 | PACKAGE_FOLDER_PATH = PACKAGE_NAME.replace("-", "/")
15 | # a-b-c => a.b.c
16 | NAMESPACE_NAME = PACKAGE_NAME.replace("-", ".")
17 |
18 | # Version extraction inspired from 'requests'
19 | with open(os.path.join(PACKAGE_FOLDER_PATH, "_version.py"), "r") as fd:
20 | version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
21 | if not version:
22 | raise RuntimeError("Cannot find version information")
23 |
24 | with open("README.md", encoding="utf-8") as f:
25 | long_description = f.read()
26 |
27 | setup(
28 | name=PACKAGE_NAME,
29 | version=version,
30 | description="Microsoft Azure {} Client Library for Python".format(PACKAGE_PPRINT_NAME),
31 | # ensure that these are updated to reflect the package owners' information
32 | long_description=long_description,
33 | long_description_content_type="text/markdown",
34 | url="https://github.com/Azure/azure-sdk-for-python",
35 | keywords="azure, azure sdk, assistant", # update with search keywords relevant to the azure service / product
36 | author="Microsoft Corporation",
37 | author_email="azuresdkengsysadmins@microsoft.com",
38 | license="MIT License",
39 | # ensure that the development status reflects the status of your package
40 | classifiers=[
41 | "Development Status :: 3 - Alpha",
42 | "Programming Language :: Python",
43 | "Programming Language :: Python :: 3",
44 | "Programming Language :: Python :: 3 :: Only",
45 | "Programming Language :: Python :: 3.8",
46 | "Programming Language :: Python :: 3.9",
47 | "Programming Language :: Python :: 3.10",
48 | "Programming Language :: Python :: 3.11",
49 | "Programming Language :: Python :: 3.12",
50 | "License :: OSI Approved :: MIT License",
51 | ],
52 | packages=find_packages(
53 | exclude=[
54 | # Exclude packages that will be covered by PEP420 or nspkg
55 | # This means any folder structure that only consists of a __init__.py.
56 | # For example, for storage, this would mean adding 'azure.storage'
57 | # in addition to the default 'azure' that is seen here.
58 | "azure",
59 | "azure.ai"
60 | ]
61 | ),
62 | package_data={
63 | 'azure.ai.assistant': ['py.typed'],
64 | },
65 | install_requires=[
66 | "openai",
67 | "python-Levenshtein",
68 | "fuzzywuzzy",
69 | "Pillow",
70 | "PyYAML",
71 | "pyaudio",
72 | "numpy",
73 | "scipy",
74 | "onnxruntime",
75 | "resampy",
76 | "azure-ai-projects",
77 | "azure-identity",
78 | "azure-mgmt-logic",
79 | "azure-mgmt-web",
80 | ],
81 | python_requires=">=3.8",
82 | project_urls={
83 | "Bug Reports": "https://github.com/Azure/azure-sdk-for-python/issues",
84 | "Source": "https://github.com/Azure/azure-sdk-python",
85 | },
86 | )
87 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/test/resources/product_info_1.md:
--------------------------------------------------------------------------------
1 | # Information about product item_number: 1
2 |
3 | ## Brand
4 | Contoso Galaxy Innovations
5 |
6 | ## Category
7 | Smart Eyewear
8 |
9 | ## Features
10 | - Augmented Reality interface
11 | - Voice-controlled AI assistant
12 | - HD video recording with 3D audio
13 | - UV protection and blue light filtering
14 | - Wireless charging with extended battery life
15 |
16 | ## User Guide
17 |
18 | ### 1. Introduction
19 | Introduction to your new SmartView Glasses
20 |
21 | ### 2. Product Overview
22 | Overview of features and controls
23 |
24 | ### 3. Sizing and Fit
25 | Finding your perfect fit and style adjustments
26 |
27 | ### 4. Proper Care and Maintenance
28 | Cleaning and caring for your SmartView Glasses
29 |
30 | ### 5. Break-in Period
31 | Adjusting to the augmented reality experience
32 |
33 | ### 6. Safety Tips
34 | Safety guidelines for public and private spaces
35 |
36 | ### 7. Troubleshooting
37 | Quick fixes for common issues
38 |
39 | ## Warranty Information
40 | Two-year limited warranty on all electronic components
41 |
42 | ## Contact Information
43 | Customer Support at support@contoso-galaxy-innovations.com
44 |
45 | ## Return Policy
46 | 30-day return policy with no questions asked
47 |
48 | ## FAQ
49 | - How to sync your SmartView Glasses with your devices
50 | - Troubleshooting connection issues
51 | - Customizing your augmented reality environment
52 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/test/resources/product_info_2.md:
--------------------------------------------------------------------------------
1 | # Information about product item_number: 2
2 |
3 | ## Brand
4 | Contoso Quantum Comfort
5 |
6 | ## Category
7 | Self-Warming Blanket
8 |
9 | ## Features
10 | - Nano-fiber heating elements for even warmth distribution
11 | - Intelligent temperature control with machine learning preferences
12 | - Eco-friendly and energy-efficient design
13 | - Wireless and portable with up to 12 hours of battery life
14 | - Waterproof and machine washable material
15 |
16 | ## User Guide
17 |
18 | ### 1. Introduction
19 | Getting to know your new Self-Warming Blanket
20 |
21 | ### 2. Product Overview
22 | How to set up and operate your blanket
23 |
24 | ### 3. Sizing and Fit
25 | Selecting the ideal warmth setting for comfort
26 |
27 | ### 4. Proper Care and Maintenance
28 | Care instructions to maintain warmth and softness
29 |
30 | ### 5. Break-in Period
31 | What to expect during the first use
32 |
33 | ### 6. Safety Tips
34 | Best practices for safe use
35 |
36 | ### 7. Troubleshooting
37 | Common questions and solutions
38 |
39 | ## Warranty Information
40 | Three-year warranty with free technical support
41 |
42 | ## Contact Information
43 | Quantum Comfort Support at contact@contosoquantumcomfort.co
44 |
45 | ## Return Policy
46 | 45-day satisfaction guarantee with full refund
47 |
48 | ## FAQ
49 | - How to pair the blanket with your smart home devices
50 | - Optimizing battery life for longer use
51 | - Adjusting blanket settings for different climates
52 |
--------------------------------------------------------------------------------
/sdk/azure-ai-assistant/test/resources/scenery.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure-Samples/azureai-assistant-tool/c962df2526c7b5f67c8af32eead04f5872a80cdd/sdk/azure-ai-assistant/test/resources/scenery.png
--------------------------------------------------------------------------------
/templates/async_main_template.py:
--------------------------------------------------------------------------------
1 | from azure.ai.assistant.management.async_assistant_client import AsyncAssistantClient
2 | from azure.ai.assistant.management.async_conversation_thread_client import AsyncConversationThreadClient
3 |
4 | import asyncio
5 |
6 |
7 | async def main():
8 |
9 | assistant_name = "ASSISTANT_NAME"
10 |
11 | # open assistant configuration file
12 | try:
13 | with open(f"config/{assistant_name}_assistant_config.yaml", "r") as file:
14 | config = file.read()
15 | except FileNotFoundError:
16 | print(f"Configuration file for {assistant_name} not found.")
17 | exit(1)
18 |
19 | # retrieve the assistant client
20 | assistant_client = await AsyncAssistantClient.from_yaml(config)
21 |
22 | # create a new conversation thread client
23 | conversation_thread_client = AsyncConversationThreadClient.get_instance(assistant_client.ai_client_type)
24 |
25 | # create a new conversation thread
26 | thread_name = await conversation_thread_client.create_conversation_thread()
27 |
28 | while True:
29 | # Accept user input
30 | user_message = input("user: ")
31 | if user_message.lower() == 'exit': # Allow the user to exit the chat
32 | print("Exiting chat.")
33 | break
34 |
35 | # Create a message to the conversation thread
36 | await conversation_thread_client.create_conversation_thread_message(user_message, thread_name)
37 |
38 | # Process the user messages
39 | await assistant_client.process_messages(thread_name=thread_name)
40 |
41 | # Retrieve the conversation
42 | conversation = await conversation_thread_client.retrieve_conversation(thread_name)
43 |
44 | # Print the last assistant response from the conversation
45 | assistant_message = conversation.get_last_text_message(assistant_client.name)
46 | print(f"{assistant_client.name}: {assistant_message.content}")
47 |
48 | # add new line for better readability
49 | print()
50 |
51 |
52 | if __name__ == "__main__":
53 | asyncio.run(main())
--------------------------------------------------------------------------------
/templates/async_stream_template.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from azure.ai.assistant.management.async_assistant_client import AsyncAssistantClient
3 | from azure.ai.assistant.management.async_assistant_client_callbacks import AsyncAssistantClientCallbacks
4 | from azure.ai.assistant.management.async_conversation_thread_client import AsyncConversationThreadClient
5 | from azure.ai.assistant.management.async_message import AsyncConversationMessage
6 | from azure.ai.assistant.management.text_message import TextMessage
7 |
8 | # Define a custom callback class that inherits from AssistantClientCallbacks
9 | class MyAssistantClientCallbacks(AsyncAssistantClientCallbacks):
10 | def __init__(self, message_queue):
11 | self.message_queue = message_queue
12 |
13 | async def handle_message(self, action, message=""):
14 | await self.message_queue.put((action, message))
15 |
16 | async def on_run_update(self, assistant_name, run_identifier, run_status, thread_name, is_first_message=False, message : AsyncConversationMessage = None):
17 | if run_status == "streaming":
18 | await self.handle_message("start" if is_first_message else "message", message.text_message.content)
19 | elif run_status == "completed":
20 | if message:
21 | text_message : TextMessage = message.text_message
22 | if text_message.file_citations:
23 | for file_citation in text_message.file_citations:
24 | print(f"\nFile citation, file_id: {file_citation.file_id}, file_name: {file_citation.file_name}")
25 |
26 | async def on_function_call_processed(self, assistant_name, run_identifier, function_name, arguments, response):
27 | await self.handle_message("function", function_name)
28 |
29 |
30 | # Define a function to display streamed messages
31 | async def display_streamed_messages(message_queue, assistant_name):
32 | while True:
33 | message_type, message = await message_queue.get()
34 | if message_type == "start":
35 | # At the start of a new message, include the assistant's name.
36 | print(f"{assistant_name}: {message}", end="")
37 | elif message_type == "message":
38 | # Print the streamed part of the message; `flush=True` ensures it's immediately displayed
39 | print(message, end="", flush=True)
40 | elif message_type == "function":
41 | # Print assistant's name and calling the function on the new line
42 | print(f"{assistant_name}: called {message} function.")
43 | message_queue.task_done()
44 |
45 |
46 | # Define the main function
47 | async def main():
48 | assistant_name = "ASSISTANT_NAME"
49 |
50 | try:
51 | with open(f"config/{assistant_name}_assistant_config.yaml", "r") as file:
52 | config = file.read()
53 | except FileNotFoundError:
54 | print(f"Configuration file for {assistant_name} not found.")
55 | return
56 |
57 | try:
58 | message_queue = asyncio.Queue()
59 | callbacks = MyAssistantClientCallbacks(message_queue)
60 |
61 | # Create a new assistant client
62 | assistant_client = await AsyncAssistantClient.from_yaml(config, callbacks=callbacks)
63 |
64 | # Create a new conversation thread client
65 | conversation_thread_client = AsyncConversationThreadClient.get_instance(assistant_client.ai_client_type)
66 |
67 | # Create a new conversation thread
68 | thread_name = await conversation_thread_client.create_conversation_thread()
69 |
70 | display_task = asyncio.create_task(display_streamed_messages(message_queue, assistant_name))
71 |
72 | while True:
73 | user_message = input("user: ").strip()
74 | if user_message.lower() == 'exit':
75 | print("Exiting chat.")
76 | break
77 |
78 | # Create a message to the conversation thread
79 | await conversation_thread_client.create_conversation_thread_message(user_message, thread_name)
80 |
81 | # Process the user messages (await the asynchronous call)
82 | await assistant_client.process_messages(thread_name=thread_name, stream=True)
83 |
84 | print() # Add a newline for better readability
85 |
86 | except Exception as e:
87 | print(f"An error occurred: {e}")
88 | finally:
89 | # Cleanup before exiting
90 | await message_queue.join()
91 | display_task.cancel()
92 | await conversation_thread_client.close()
93 |
94 |
95 | if __name__ == "__main__":
96 | asyncio.run(main())
97 |
--------------------------------------------------------------------------------
/templates/main_template.py:
--------------------------------------------------------------------------------
1 | from azure.ai.assistant.management.assistant_client import AssistantClient
2 | from azure.ai.assistant.management.conversation_thread_client import ConversationThreadClient
3 |
4 |
5 | assistant_name = "ASSISTANT_NAME"
6 |
7 | # open assistant configuration file
8 | try:
9 | with open(f"config/{assistant_name}_assistant_config.yaml", "r") as file:
10 | config = file.read()
11 | except FileNotFoundError:
12 | print(f"Configuration file for {assistant_name} not found.")
13 | exit(1)
14 |
15 | # retrieve the assistant client
16 | assistant_client = AssistantClient.from_yaml(config)
17 |
18 | # create a new conversation thread client
19 | conversation_thread_client = ConversationThreadClient.get_instance(assistant_client.ai_client_type)
20 |
21 | # create a new conversation thread
22 | thread_name = conversation_thread_client.create_conversation_thread()
23 |
24 | while True:
25 | # Accept user input
26 | user_message = input("user: ")
27 | if user_message.lower() == 'exit': # Allow the user to exit the chat
28 | print("Exiting chat.")
29 | break
30 |
31 | # Create a message to the conversation thread
32 | conversation_thread_client.create_conversation_thread_message(user_message, thread_name)
33 |
34 | # Process the user messages
35 | assistant_client.process_messages(thread_name=thread_name)
36 |
37 | # Retrieve the conversation
38 | conversation = conversation_thread_client.retrieve_conversation(thread_name)
39 |
40 | # Print the last assistant response from the conversation
41 | assistant_message = conversation.get_last_text_message(assistant_client.name)
42 | print(f"{assistant_client.name}: {assistant_message.content}")
43 |
44 | # add new line for better readability
45 | print()
--------------------------------------------------------------------------------
/templates/multi_template.py:
--------------------------------------------------------------------------------
1 | import threading
2 | from typing import Dict
3 | from azure.ai.assistant.management.assistant_client import AssistantClient
4 | from azure.ai.assistant.management.assistant_client_callbacks import AssistantClientCallbacks
5 | from azure.ai.assistant.management.ai_client_factory import AIClientType
6 | from azure.ai.assistant.management.conversation_thread_client import ConversationThreadClient
7 | from azure.ai.assistant.management.task_manager import TaskManager, TaskManagerCallbacks, MultiTask
8 |
9 |
10 | assistant_names = ["ASSISTANT_NAME1", "ASSISTANT_NAME2"]
11 |
12 | class MultiAgentOrchestrator(TaskManagerCallbacks, AssistantClientCallbacks):
13 | def __init__(self, assistant_names, ai_client_type : AIClientType):
14 | self.task_completion_events = {}
15 | self.assistants: Dict[str, AssistantClient] = {}
16 | self.init_assistants(assistant_names)
17 | self.conversation_thread_client = ConversationThreadClient.get_instance(ai_client_type)
18 | super().__init__()
19 |
20 | def init_assistants(self, assistant_names):
21 | # create assistant clients from configuration files
22 | for assistant_name in assistant_names:
23 | try:
24 | with open(f"config/{assistant_name}_assistant_config.yaml", "r") as file:
25 | config = file.read()
26 | self.assistants[assistant_name] = AssistantClient.from_yaml(config, callbacks=self)
27 | except Exception as e:
28 | raise Exception(f"Error loading assistant configuration for {assistant_name}: {e}")
29 |
30 | def on_task_started(self, task : MultiTask, schedule_id):
31 | print(f"Task {task.id} started with schedule ID: {schedule_id}")
32 | self.task_completion_events[schedule_id] = threading.Event()
33 | self.thread_name = self.conversation_thread_client.create_conversation_thread()
34 |
35 | def on_task_execute(self, task : MultiTask, schedule_id):
36 | print(f"Task {task.id} execute with schedule ID: {schedule_id}")
37 | for request in task.requests:
38 | assistant_name = request["assistant"]
39 | assistant_client = self.assistants[assistant_name]
40 | self.conversation_thread_client.create_conversation_thread_message(request["task"], self.thread_name)
41 | assistant_client.process_messages(self.thread_name)
42 | conversation = self.conversation_thread_client.retrieve_conversation(self.thread_name)
43 | for message in reversed(conversation.messages):
44 | if message.text_message:
45 | if message.sender == assistant_name:
46 | print(f"{message.sender}: {message.text_message.content}")
47 | if len(message.file_messages) > 0:
48 | for file_message in message.file_messages:
49 | print(f"{message.sender}: provided file {file_message.file_name}")
50 | file_message.retrieve_file(assistant_client.assistant_config.output_folder_path)
51 | if len(message.image_messages) > 0:
52 | for image_message in message.image_messages:
53 | print(f"{message.sender}: provided image {image_message.file_name}")
54 | image_message.retrieve_image(assistant_client.assistant_config.output_folder_path)
55 |
56 | def on_task_completed(self, task : MultiTask, schedule_id, result):
57 | print(f"Task {task.id} completed with schedule ID: {schedule_id}. Result: {result}")
58 | event = self.task_completion_events.get(schedule_id)
59 | if event:
60 | event.set()
61 |
62 | def on_run_update(self, assistant_name, run_identifier, run_status, thread_name, is_first_message=False, message=None):
63 | if run_status == "in_progress":
64 | print(".", end="", flush=True)
65 | elif run_status == "completed":
66 | print(f"\n{assistant_name}: run {run_identifier} completed")
67 |
68 | def on_function_call_processed(self, assistant_name, run_identifier, function_name, arguments, response):
69 | print(f"\nFunction call {function_name} with arguments {arguments} processed by {assistant_name}")
70 |
71 | def wait_for_all_tasks(self):
72 | for event in self.task_completion_events.values():
73 | event.wait()
74 |
75 |
76 | # Create multi agent orchestration, assumed that all assistants are of AZURE_OPEN_AI type
77 | orchestrator = MultiAgentOrchestrator(assistant_names, AIClientType.AZURE_OPEN_AI)
78 | task_manager = TaskManager(orchestrator)
79 | tasks = [
80 | {
81 | "assistant": assistant_names[0],
82 | "task": "Convert main.py file in current folder to idiomatic Java implementation and create converted file in the output folder. Inform the full path of converted file at the end"
83 | },
84 | {
85 | "assistant": assistant_names[1],
86 | "task": "Review the converted Java file and inform about missing implementations and any improvements needed"
87 | },
88 | {
89 | "assistant": assistant_names[0],
90 | "task": "Implement the missing functionalities and create new file with updates in the output folder with the changes. Inform the full path of the new file at the end"
91 | }
92 | ]
93 | multi_task = MultiTask(tasks)
94 | task_manager.schedule_task(multi_task)
95 | orchestrator.wait_for_all_tasks()
96 |
--------------------------------------------------------------------------------