├── .gitignore ├── .vscode └── settings.json ├── README.md ├── SECURITY.md └── sample_gallery ├── .DS_Store ├── database_querying ├── cosmosdb_postgresql │ ├── Tutorial.md │ ├── media │ │ ├── Create an Azure Cosmos DB account.png │ │ ├── conn_input_secrests.png │ │ ├── conn_str.png │ │ ├── conn_str_sample.png │ │ ├── flow.png │ │ ├── flow_run.png │ │ ├── flow_ui.png │ │ ├── query_sample.png │ │ ├── ui_ci.png │ │ ├── ui_custom_conn.png │ │ ├── ui_flow_conn.png │ │ ├── ui_flow_cosmos_conn.png │ │ ├── ui_output.png │ │ ├── ui_run.png │ │ ├── ui_runtime.png │ │ └── ui_view_output.png │ ├── personal_finance_recommender │ │ ├── .promptflow │ │ │ ├── flow.detail.json │ │ │ ├── flow.layout.json │ │ │ ├── flow.log │ │ │ ├── flow.tools.json │ │ │ └── flow.uihint.json │ │ ├── __pycache__ │ │ │ ├── prompt_context.cpython-39.pyc │ │ │ └── query_cosmos.cpython-39.pyc │ │ ├── advise_generator.jinja2 │ │ ├── flow.dag.yaml │ │ ├── prompt_context.py │ │ ├── query_cosmos.py │ │ └── requirements.txt │ └── source_file │ │ ├── aoai_conn.yaml │ │ ├── cosmosdb_setup.ipynb │ │ ├── custom_conn.yaml │ │ ├── data.csv │ │ └── image_build │ │ ├── Dockerfile │ │ ├── environment.yaml │ │ └── requirements.txt └── index.md ├── evaluate_semantic_kernel_planner ├── Tutorial.md ├── eval_fulfillment_ratio │ ├── .promptflow │ │ ├── flow.detail.json │ │ ├── flow.layout.json │ │ ├── flow.log │ │ ├── flow.metrics.json │ │ ├── flow.output.json │ │ ├── flow.tools.json │ │ └── flow.uihint.json │ ├── If_standard_answer.py │ ├── __pycache__ │ │ ├── If_standard_answer.cpython-39.pyc │ │ ├── aggregate.cpython-39.pyc │ │ ├── calculate_standard_answer.cpython-39.pyc │ │ ├── final_result.cpython-39.pyc │ │ └── line_process.cpython-39.pyc │ ├── aggregate.py │ ├── calculate.jinja2 │ ├── calculate_standard_answer.py │ ├── calculate_variant_1.jinja2 │ ├── final_result.py │ ├── flow.dag.yaml │ ├── flow.meta.yaml │ ├── line_process.py │ ├── requirements.txt │ └── samples.json ├── media │ ├── eval_flow_authoring.png │ ├── pf_runs_visualization.png │ ├── sk_planner_pf.png │ ├── skplanner_flow_authoring.png │ ├── skplanner_flow_runs_visualization.png │ ├── skplanner_flow_test_output.png │ ├── vsc_flow_yaml.png │ ├── vsc_flow_yaml_run.png │ ├── vsc_flow_yaml_run_batch.png │ └── vsc_run_list.png ├── sk_planner_flow │ ├── .promptflow │ │ ├── flow.detail.json │ │ ├── flow.layout.json │ │ ├── flow.log │ │ ├── flow.output.json │ │ ├── flow.tools.json │ │ └── flow.uihint.json │ ├── __pycache__ │ │ └── planner.cpython-39.pyc │ ├── flow.dag.yaml │ ├── planner.py │ ├── requirements.txt │ └── skills │ │ ├── SummarizeSkill │ │ ├── MakeAbstractReadable │ │ │ ├── config.json │ │ │ └── skprompt.txt │ │ ├── Notegen │ │ │ ├── config.json │ │ │ └── skprompt.txt │ │ ├── Summarize │ │ │ ├── config.json │ │ │ └── skprompt.txt │ │ └── Topics │ │ │ ├── config.json │ │ │ └── skprompt.txt │ │ └── WriterSkill │ │ ├── Acronym │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── AcronymGenerator │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── AcronymReverse │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── Brainstorm │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── EmailGen │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── EmailTo │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── EnglishImprover │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── NovelChapter │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── NovelChapterWithNotes │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── NovelOutline │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── Rewrite │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── ShortPoem │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── StoryGen │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── TellMeMore │ │ ├── config.json │ │ └── skprompt.txt │ │ ├── Translate │ │ ├── config.json │ │ └── skprompt.txt │ │ └── TwoSentenceSummary │ │ ├── config.json │ │ └── skprompt.txt └── source_file │ ├── .env │ ├── batch_run.yaml │ ├── data.jsonl │ ├── eval_run.yaml │ ├── requirements.txt │ └── sk_planner.py ├── golden_dataset ├── copilot-golden-dataset-creation-guidance.md └── media │ └── excel-template.png ├── local_to_cloud ├── langchain_local │ ├── chroma_db │ │ ├── chroma.sqlite3 │ │ └── e17cd960-fa1a-4572-b4fb-60c666fd15f7 │ │ │ ├── data_level0.bin │ │ │ ├── header.bin │ │ │ ├── length.bin │ │ │ └── link_lists.bin │ ├── langchain.ipynb │ ├── rag_langchain.py │ ├── testset_clean.csv │ └── testset_question_answer.csv ├── local_to_azure │ ├── .promptflow │ │ ├── groundedness │ │ │ └── flow.tools.json │ │ └── similarity │ │ │ └── flow.tools.json │ ├── __pycache__ │ │ ├── eval_paths.cpython-39.pyc │ │ ├── rag_flexflow.cpython-39.pyc │ │ └── rag_paths.cpython-39.pyc │ ├── chat_ui.png │ ├── config.json │ ├── eval │ │ ├── .promptflow │ │ │ └── groundedness │ │ │ │ └── flow.tools.json │ │ ├── __pycache__ │ │ │ ├── eval.cpython-39.pyc │ │ │ ├── evaluate.cpython-39.pyc │ │ │ └── rag_paths.cpython-39.pyc │ │ ├── evaluate.py │ │ ├── flow.flex.yaml │ │ ├── groundedness.prompty │ │ ├── init.json │ │ ├── rag_paths.py │ │ ├── requirements.txt │ │ └── similarity.prompty │ ├── eval_paths.py │ ├── rag │ │ ├── .promptflow │ │ │ └── flow.log │ │ ├── README.md │ │ ├── __pycache__ │ │ │ ├── paths.cpython-39.pyc │ │ │ └── rag_flexflow.cpython-39.pyc │ │ ├── chroma_db │ │ │ ├── a2d49070-88e6-4933-8b77-7a1f8f15aa2b │ │ │ │ ├── data_level0.bin │ │ │ │ ├── header.bin │ │ │ │ ├── length.bin │ │ │ │ └── link_lists.bin │ │ │ └── chroma.sqlite3 │ │ ├── flow.flex.yaml │ │ ├── prompt.json │ │ ├── rag_flexflow.py │ │ └── requirements.txt │ ├── rag_paths.py │ ├── sdk_azure.py │ ├── sdk_local.py │ └── testset_clean.csv ├── readme.md └── requirements.txt ├── nl2sql ├── README.MD ├── development │ ├── .gitignore │ ├── README.MD │ ├── flow.dag.yaml │ ├── generate_sql_query.jinja2 │ ├── get_table_names.py │ ├── get_table_schema.py │ ├── identify_table_llm.jinja2 │ ├── media │ │ └── flow_run_sample.png │ ├── requirements.txt │ ├── test-inputs.jsonl │ └── validate_sql_query.jinja2 └── evaluation │ ├── .gitignore │ ├── Extract_score.py │ ├── Query.py │ ├── README.md │ ├── Similarity_score.py │ ├── Vector_similarity.py │ ├── aggregate.py │ ├── data │ └── batch-samples.jsonl │ ├── evaluate_by_llms.jinja2 │ ├── exact_string_match.py │ ├── execution_accuracy.py │ ├── flow.dag.yaml │ ├── media │ └── evaluation_flow_run_sample.png │ ├── requirements.txt │ ├── sample-groundtruth-and-generated-sql.jsonl │ └── sample-groundtruth-sql.jsonl └── query_existing_vector_index ├── .DS_Store ├── Tutorial.md ├── media ├── vectordb_lookup_tool_chatflow_graph.png └── vectordb_lookup_tool_query_module.png ├── source_file ├── ACS_conn.yaml └── aoai_ozguler_02.yaml └── vectordb-lookup-simplified ├── .DS_Store ├── .promptflow ├── flow.layout.json ├── flow.tools.json ├── flow.uihint.json └── lkg_sources │ ├── Prompt_variants__Variant_0.jinja2 │ ├── Prompt_variants__Variant_1.jinja2 │ ├── Prompt_variants__Variant_2.jinja2 │ ├── answer_the_question_with_context.jinja2 │ ├── generate_prompt_context.py │ ├── modify_query_with_history.jinja2 │ └── requirements.txt ├── Prompt_variants__Variant_0.jinja2 ├── __pycache__ ├── .generate_prompt_context.tmp.cpython-39.pyc └── generate_prompt_context.cpython-39.pyc ├── answer_the_question_with_context.jinja2 ├── flow.dag.yaml ├── generate_prompt_context.py ├── modify_query_with_history.jinja2 └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | .env -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.terminal.activateEnvironment": true, 3 | "cSpell.words": [ 4 | "groundthruth" 5 | ] 6 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Prompt flow ecosystem 2 | 3 | Welcome to the **prompt flow ecosystem** - a comprehensive gallery that showcases a variety of use cases demonstrating the the capabilities of prompt flow in building high-quality AI applications. 4 | 5 | Here are some common use cases that we've facilitated: 6 | 7 | * [**Evaluate semantic kernel planner**](./sample_gallery/evaluate_semantic_kernel_planner/Tutorial.md): This case demonstrates the evaluation of the Semantic Kernel Planner for superior control over AI agent's generation. 8 | * [**Personalized Financial advise using banking customer data on PostgreSQL/CosmosDB**](./sample_gallery/database_querying/cosmosdb_postgresql/Tutorial.md): This example demonstrates how to connect to a CosmosDB database, query data, and use the retrieved information as a reference for AI-generated financial advice. 9 | * [**Natural Language to SQL**](./sample_gallery/nl2sql/README.MD): This is an example for implementing Natural Language to SQL code generation on a sample database using PromptFlow and performing evaluation. 10 | 11 | In addition, we also provide best practices for: 12 | 13 | * [**Golden dataset preparation**](./sample_gallery/golden_dataset/copilot-golden-dataset-creation-guidance.md): Learn how to prepare a golden dataset for your AI application evaluation. 14 | 15 | Our Ecosystem serves as a practical guide, providing detailed walkthroughs of each use case for industry, and how to effectively leverage prompt flow in your AI application development process. 16 | 17 | ## Getting started 18 | 19 | 1. Install the prompt flow SDK 20 | 21 | ```bash 22 | pip install promptflow promptflow-tools 23 | ``` 24 | 25 | More information about the SDK installation can be found [here](https://github.com/microsoft/promptflow/tree/main#installation). 26 | 27 | 2. Install the prompt flow extension in VS code 28 | 29 | Search for `promptflow` in the VS code extension marketplace and install the extension. 30 | 31 | More information about the extension can be found [here](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow). 32 | 33 | ## Practicing 34 | Go to the [sample gallery](./sample_gallery), you can find more samples here, you can follow the tutorial to start practicing on a specific use case. 35 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /sample_gallery/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/.DS_Store -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/Create an Azure Cosmos DB account.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/Create an Azure Cosmos DB account.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/conn_input_secrests.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/conn_input_secrests.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/conn_str.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/conn_str.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/conn_str_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/conn_str_sample.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/flow.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/flow_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/flow_run.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/flow_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/flow_ui.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/query_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/query_sample.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_ci.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_ci.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_custom_conn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_custom_conn.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_flow_conn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_flow_conn.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_flow_cosmos_conn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_flow_cosmos_conn.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_output.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_run.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_runtime.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_runtime.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/media/ui_view_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/media/ui_view_output.png -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/.promptflow/flow.layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "orientation": "Vertical", 3 | "nodeLayouts": { 4 | "inputs": { 5 | "x": 67.5, 6 | "y": 62, 7 | "index": -1 8 | }, 9 | "outputs": { 10 | "x": 67.5, 11 | "y": 502, 12 | "index": -1 13 | }, 14 | "query_cosmos": { 15 | "x": 12, 16 | "y": 172, 17 | "height": 75, 18 | "index": 0 19 | }, 20 | "prompt_context": { 21 | "x": 12, 22 | "y": 282, 23 | "height": 75, 24 | "index": 1 25 | }, 26 | "advise_generator": { 27 | "x": 123, 28 | "y": 392, 29 | "height": 75, 30 | "index": 2 31 | } 32 | }, 33 | "viewport": { 34 | "transformMatrix": [ 35 | 1, 36 | 0, 37 | 0, 38 | 1, 39 | 42.383941650390625, 40 | -297.3671875 41 | ] 42 | } 43 | } -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/.promptflow/flow.log: -------------------------------------------------------------------------------- 1 | 2023-11-20 13:57:09 +0800 28952 execution.flow INFO Start to run 3 nodes with concurrency level 16. 2 | 2023-11-20 13:57:09 +0800 28952 execution.flow INFO Executing node query_cosmos. node run id: bc525fe0-8363-4ad1-af37-97aac9309f1a_query_cosmos_0 3 | 2023-11-20 13:57:09 +0800 28952 execution ERROR Node query_cosmos in line 0 failed. Exception: Execution failure in 'query_cosmos': (InterfaceError) ('IM002', '[IM002] [Microsoft][ODBC Driver Manager] Data source name not found and no default driver specified (0) (SQLDriverConnect)'). 4 | Traceback (most recent call last): 5 | File "C:\Users\chenlujiao\AppData\Local\anaconda3\envs\pfsdk\lib\site-packages\promptflow\_core\flow_execution_context.py", line 164, in invoke_tool 6 | return f(*args, **kwargs) 7 | File "C:\Users\chenlujiao\Documents\GitHub\promptflow-samples-ozguler\sample_gallery\database_querying\cosmosdb_postgresql\personal_finance_recommender\query_cosmos.py", line 25, in my_python_tool 8 | conn = pyodbc.connect(conn_str) 9 | pyodbc.InterfaceError: ('IM002', '[IM002] [Microsoft][ODBC Driver Manager] Data source name not found and no default driver specified (0) (SQLDriverConnect)') 10 | 11 | The above exception was the direct cause of the following exception: 12 | 13 | Traceback (most recent call last): 14 | File "C:\Users\chenlujiao\AppData\Local\anaconda3\envs\pfsdk\lib\site-packages\promptflow\_core\flow_execution_context.py", line 126, in invoke_tool_with_cache 15 | result = self.invoke_tool(f, args, kwargs) 16 | File "C:\Users\chenlujiao\AppData\Local\anaconda3\envs\pfsdk\lib\site-packages\promptflow\_core\flow_execution_context.py", line 175, in invoke_tool 17 | raise ToolExecutionError(node_name=node_name, module=f.__module__) from e 18 | promptflow._core._errors.ToolExecutionError: Execution failure in 'query_cosmos': (InterfaceError) ('IM002', '[IM002] [Microsoft][ODBC Driver Manager] Data source name not found and no default driver specified (0) (SQLDriverConnect)') 19 | 2023-11-20 13:57:09 +0800 28952 execution ERROR Execution of one node has failed. Cancelling all running nodes: query_cosmos. 20 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/.promptflow/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": {}, 3 | "code": { 4 | "query_cosmos.py": { 5 | "type": "python", 6 | "inputs": { 7 | "account_number": { 8 | "type": [ 9 | "string" 10 | ] 11 | }, 12 | "connection": { 13 | "type": [ 14 | "CustomConnection" 15 | ] 16 | } 17 | }, 18 | "source": "query_cosmos.py", 19 | "function": "my_python_tool" 20 | }, 21 | "prompt_context.py": { 22 | "type": "python", 23 | "inputs": { 24 | "input1": { 25 | "type": [ 26 | "object" 27 | ] 28 | } 29 | }, 30 | "source": "prompt_context.py", 31 | "function": "my_python_tool" 32 | }, 33 | "advise_generator.jinja2": { 34 | "type": "llm", 35 | "inputs": { 36 | "question": { 37 | "type": [ 38 | "string" 39 | ] 40 | }, 41 | "request": { 42 | "type": [ 43 | "string" 44 | ] 45 | } 46 | }, 47 | "source": "advise_generator.jinja2" 48 | }, 49 | "advise_generator__variant_1.jinja2": { 50 | "type": "llm", 51 | "inputs": { 52 | "question": { 53 | "type": [ 54 | "string" 55 | ] 56 | }, 57 | "request": { 58 | "type": [ 59 | "string" 60 | ] 61 | } 62 | }, 63 | "source": "advise_generator__variant_1.jinja2" 64 | } 65 | } 66 | } -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/.promptflow/flow.uihint.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": {} 3 | } -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/__pycache__/prompt_context.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/__pycache__/prompt_context.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/__pycache__/query_cosmos.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/__pycache__/query_cosmos.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/advise_generator.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You have a phd in finance from Harvard. Have been an investment advisor and chief economist in various investment banks. 3 | 4 | user: 5 | Give financial response for the below person specifically. Be as detailed as possible. 6 | Advise different scenarios for the world economy to evolve and specualte offering different options for investment. 7 | Tone should be professional and polite. 8 | These are the users details {{question}} 9 | This is his request {{request}} -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | id: template_standard_flow 2 | name: Template Standard Flow 3 | environment: 4 | python_requirements_txt: requirements.txt 5 | inputs: 6 | account_number: 7 | type: string 8 | default: "1234567890" 9 | is_chat_input: false 10 | request: 11 | type: string 12 | default: '"I want to make a loan application"' 13 | is_chat_input: false 14 | outputs: 15 | output: 16 | type: string 17 | reference: ${advise_generator.output} 18 | nodes: 19 | - name: query_cosmos 20 | type: python 21 | source: 22 | type: code 23 | path: query_cosmos.py 24 | inputs: 25 | connection: cosmos 26 | account_number: ${inputs.account_number} 27 | use_variants: false 28 | - name: prompt_context 29 | type: python 30 | source: 31 | type: code 32 | path: prompt_context.py 33 | inputs: 34 | input1: ${query_cosmos.output} 35 | use_variants: false 36 | - name: advise_generator 37 | type: llm 38 | source: 39 | type: code 40 | path: advise_generator.jinja2 41 | inputs: 42 | deployment_name: gpt-4 43 | temperature: 0.8 44 | top_p: 1 45 | max_tokens: 5024 46 | presence_penalty: 0 47 | frequency_penalty: 0 48 | question: ${prompt_context.output} 49 | request: ${inputs.request} 50 | model: gpt-4 51 | connection: azure_open_ai_connection 52 | api: chat 53 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/prompt_context.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | 3 | # Specify the type for input1 as a tuple of 8 elements 4 | @tool 5 | def my_python_tool(input1: (str, str, float, str, float, float, str, str)) -> str: 6 | ( 7 | account_holder_name, 8 | account_type, 9 | balance, 10 | employment_status, 11 | average_monthly_deposit, 12 | average_monthly_withdrawal, 13 | financial_goal, 14 | risk_tolerance, 15 | ) = input1 16 | 17 | prompt = f"""Based on the following detailed financial profile, could you provide a comprehensive financial plan? The plan should align with the account holder's long-term goals, risk tolerance, and current financial standing. 18 | 19 | 1. **Account Holder:** {account_holder_name} 20 | - Could you consider personalizing the advice for different age groups or life situations? 21 | 22 | 2. **Account Type:** {account_type} 23 | - How does this account type align or conflict with the individual's financial objectives? 24 | 25 | 3. **Current Balance:** {balance} 26 | - Could you outline strategies for maximizing the value of the current balance? 27 | 28 | 4. **Employment Status:** {employment_status} 29 | - How should the employment status factor into savings or investment strategies? 30 | 31 | 5. **Average Monthly Deposit:** {average_monthly_deposit} 32 | - What percentage of the average monthly deposit should go into investments, savings, and emergency funds? 33 | 34 | 6. **Average Monthly Withdrawal:** {average_monthly_withdrawal} 35 | - Can you suggest any methods to minimize unnecessary withdrawals? 36 | 37 | 7. **Financial Goal:** {financial_goal} 38 | - What are the short-term and long-term steps needed to achieve this financial goal? 39 | 40 | 8. **Risk Tolerance:** {risk_tolerance} 41 | - Given the risk tolerance, what kind of investment opportunities should be explored or avoided? 42 | 43 | Please provide a detailed and step-by-step financial plan that addresses these points.""" 44 | 45 | return prompt 46 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/query_cosmos.py: -------------------------------------------------------------------------------- 1 | import os # Import the os library for environment variables 2 | import pyodbc # Import the ODBC library 3 | from promptflow import tool 4 | from promptflow.connections import CustomConnection 5 | from urllib.parse import urlparse 6 | #from azure.cosmos import CosmosClient, PartitionKey, exceptions 7 | 8 | @tool 9 | def my_python_tool(account_number: str, connection: CustomConnection) -> str: 10 | # Set environment variables for ODBC 11 | os.environ['ODBCINI'] = '/etc/odbc.ini' 12 | os.environ['ODBCINSTINI'] = '/etc/odbcinst.ini' 13 | os.environ['ODBCSYSINI'] = '/usr/lib/x86_64-linux-gnu/odbc/' 14 | 15 | host = connection.endpoint 16 | port = connection.port 17 | database = connection.database 18 | user = connection.username 19 | password = connection.password 20 | 21 | # Initialize the ODBC connection string 22 | # The driver below is for Linux. For Windows, use the following driver: DRIVER={SQL Server}; 23 | conn_str = "DRIVER=/usr/lib/x86_64-linux-gnu/odbc/psqlodbcw.so;" + "SERVER=" + host + ";PORT=" + port + ";DATABASE=" + database + ";UID=" + user + ";PWD=" + password + ";sslmode=require;" 24 | 25 | # Initialize the ODBC connection 26 | conn = pyodbc.connect(conn_str) 27 | 28 | # Create a cursor object 29 | cursor = conn.cursor() 30 | 31 | # Issue a query to fetch all 8 columns 32 | query = f"SELECT account_holder_name, account_type, balance, employment_status, average_monthly_deposit, average_monthly_withdrawal, financial_goal, risk_tolerance FROM bank_accounts WHERE account_number = {account_number}" 33 | #query = f"SELECT * FROM bank_accounts" 34 | 35 | cursor.execute(query) 36 | result = cursor.fetchone() 37 | 38 | print(result) 39 | 40 | # Initialize variables to None in case no records are found 41 | account_holder_name = None 42 | account_type = None 43 | balance = None 44 | employment_status = None 45 | average_monthly_deposit = None 46 | average_monthly_withdrawal = None 47 | financial_goal = None 48 | risk_tolerance = None 49 | 50 | if result: 51 | # Unpack the result into separate variables 52 | ( 53 | account_holder_name, 54 | account_type, 55 | balance, 56 | employment_status, 57 | average_monthly_deposit, 58 | average_monthly_withdrawal, 59 | financial_goal, 60 | risk_tolerance 61 | ) = result 62 | 63 | print(f"Account Holder: {account_holder_name}") 64 | print(f"Account Type: {account_type}") 65 | print(f"Balance: {balance}") 66 | print(f"Employment Status: {employment_status}") 67 | print(f"Average Monthly Deposit: {average_monthly_deposit}") 68 | print(f"Average Monthly Withdrawal: {average_monthly_withdrawal}") 69 | print(f"Financial Goal: {financial_goal}") 70 | print(f"Risk Tolerance: {risk_tolerance}") 71 | else: 72 | print("No records found.") 73 | 74 | # Close the cursor and connection 75 | cursor.close() 76 | conn.close() 77 | 78 | return ( 79 | account_holder_name, 80 | account_type, 81 | balance, 82 | employment_status, 83 | average_monthly_deposit, 84 | average_monthly_withdrawal, 85 | financial_goal, 86 | risk_tolerance 87 | ) 88 | 89 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/database_querying/cosmosdb_postgresql/personal_finance_recommender/requirements.txt -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/aoai_conn.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/AzureOpenAIConnection.schema.json 2 | name: azure_open_ai_connection 3 | type: azure_open_ai # snake case 4 | api_key: "to_replace_with_azure_openai_api_key" 5 | api_base: "to_replace_with_azure_openai_api_endpoint" 6 | api_type: "azure" 7 | api_version: "2023-07-01-preview" -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/cosmosdb_setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 1. Check the banking customer data file" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "data": { 17 | "text/plain": [ 18 | "Index(['id', 'account_number', 'account_type', 'balance',\n", 19 | " 'account_holder_name', 'date_of_birth', 'employment_status',\n", 20 | " 'creation_date', 'interest_rate', 'branch_id', 'overdraft_limit',\n", 21 | " 'currency', 'last_transaction_date', 'average_monthly_deposit',\n", 22 | " 'average_monthly_withdrawal', 'financial_goal', 'goal_amount',\n", 23 | " 'risk_tolerance '],\n", 24 | " dtype='object')" 25 | ] 26 | }, 27 | "execution_count": 1, 28 | "metadata": {}, 29 | "output_type": "execute_result" 30 | } 31 | ], 32 | "source": [ 33 | "import pandas as pd \n", 34 | "df = pd.read_csv('data.csv')\n", 35 | "df.columns" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 5, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "name": "stdout", 45 | "output_type": "stream", 46 | "text": [ 47 | " id account_number account_type balance account_holder_name date_of_birth \\\n", 48 | "1 2 2345678901 Savings 7000.0 Jane Doe 1985-12-12 \n", 49 | "6 7 7890123456 Checking 25000.0 Michael Miller 1982-11-15 \n", 50 | "\n", 51 | " employment_status creation_date interest_rate branch_id overdraft_limit \\\n", 52 | "1 Self-employed 2023-02-01 2.0 102 1000.0 \n", 53 | "6 Self-employed 2023-07-01 1.5 107 900.0 \n", 54 | "\n", 55 | " currency last_transaction_date average_monthly_deposit \\\n", 56 | "1 USD 2023-07-25 3000.0 \n", 57 | "6 USD 2023-08-30 5000.0 \n", 58 | "\n", 59 | " average_monthly_withdrawal financial_goal goal_amount risk_tolerance \n", 60 | "1 1500.0 Buy a House 300000.0 Low \n", 61 | "6 3500.0 Buy a House 250000.0 Low \n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "print(df.sample(2))" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "# 2. Connect to your PostgreSQL CosmosDB database" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "import pandas as pd \n", 83 | "df = pd.read_csv('data.csv')\n", 84 | "\n", 85 | "import ssl\n", 86 | "ssl_context = ssl.create_default_context()" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "!pip install sqlalchemy\n", 96 | "!pip install psycopg2-binary" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "from sqlalchemy import create_engine\n", 106 | "\n", 107 | "#you can find your connection string under cosmosdb \"connection strings\"\n", 108 | "#XXXX is your db password, YYYY is your db hostname\n", 109 | "\n", 110 | "engine = create_engine(\n", 111 | " 'postgresql+pg8000://citus:XXXX@YYYY.[db-instance-name].postgres.cosmos.azure.com:5432/citus', \n", 112 | " connect_args={'ssl_context': ssl_context}\n", 113 | ")" 114 | ] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "# 3. Upload the data to the database" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "df.to_sql('bank_accounts', engine, if_exists='replace', index=False)" 130 | ] 131 | } 132 | ], 133 | "metadata": { 134 | "kernelspec": { 135 | "display_name": "pfsdk", 136 | "language": "python", 137 | "name": "python3" 138 | }, 139 | "language_info": { 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "file_extension": ".py", 145 | "mimetype": "text/x-python", 146 | "name": "python", 147 | "nbconvert_exporter": "python", 148 | "pygments_lexer": "ipython3", 149 | "version": "3.9.17" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 2 154 | } 155 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/custom_conn.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json 2 | name: cosmos 3 | type: custom 4 | configs: 5 | endpoint: "" 6 | database: "citus" 7 | username: "citus" 8 | port: "" 9 | secrets: 10 | # Don't replace the '' placeholder. The application will prompt you to enter a value when it runs. 11 | password: "" 12 | -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/data.csv: -------------------------------------------------------------------------------- 1 | id,account_number,account_type,balance,account_holder_name,date_of_birth,employment_status,creation_date,interest_rate,branch_id,overdraft_limit,currency,last_transaction_date,average_monthly_deposit,average_monthly_withdrawal,financial_goal,goal_amount,risk_tolerance 2 | 1,1234567890,Checking,5000.00,John Doe,1980-06-01,Employed,2023-01-01,1.00,101,500.00,USD,2023-07-30,2500.00,2000.00,Retirement,1000000.00,Medium 3 | 2,2345678901,Savings,7000.00,Jane Doe,1985-12-12,Self-employed,2023-02-01,2.00,102,1000.00,USD,2023-07-25,3000.00,1500.00,Buy a House,300000.00,Low 4 | 3,3456789012,Checking,12000.00,James Smith,1990-03-10,Unemployed,2023-03-01,1.50,103,800.00,USD,2023-07-20,3500.00,3000.00,Buy a Car,50000.00,High 5 | 4,4567890123,Savings,20000.00,Jennifer Johnson,1975-09-18,Retired,2023-04-01,2.50,104,0.00,USD,2023-07-15,1000.00,500.00,Travel,20000.00,Low 6 | 5,5678901234,Checking,4500.00,Robert Brown,1987-08-07,Part-time,2023-05-01,1.00,105,600.00,USD,2023-07-10,2000.00,1000.00,Education,40000.00,Medium 7 | 6,6789012345,Savings,10000.00,Patricia Davis,1993-05-22,Full-time,2023-06-01,2.00,106,700.00,USD,2023-07-05,1500.00,1200.00,Start a Business,100000.00,High 8 | 7,7890123456,Checking,25000.00,Michael Miller,1982-11-15,Self-employed,2023-07-01,1.50,107,900.00,USD,2023-08-30,5000.00,3500.00,Buy a House,250000.00,Low 9 | 8,8901234567,Savings,30000.00,Linda Wilson,1978-02-25,Retired,2023-08-01,2.50,108,0.00,USD,2023-08-25,2000.00,1000.00,Travel,35000.00,High 10 | 9,9012345678,Checking,15000.00,William Moore,1995-07-30,Full-time,2023-09-01,1.00,109,700.00,USD,2023-08-20,3000.00,2500.00,Education,60000.00,Medium 11 | 10,0123456789,Savings,20000.00,Elizabeth Taylor,1980-04-28,Part-time,2023-10-01,2.00,110,800.00,USD,2023-08-15,2500.00,2000.00,Start a Business,80000.00,Low 12 | 11,1122334455,Savings,35000.00,Ahmed Hussein,1975-10-12,Self-employed,2023-11-01,2.50,111,1000.00,USD,2023-09-10,4000.00,1500.00,Retirement,500000.00,Low 13 | 12,2233445566,Checking,4000.00,Chen Wei,1990-05-18,Student,2023-12-01,1.00,112,500.00,USD,2023-09-05,500.00,300.00,Education,20000.00,Medium 14 | 13,3344556677,Savings,80000.00,Marie Dupont,1980-01-24,Retired,2024-01-01,3.00,113,0.00,USD,2023-09-01,1000.00,500.00,Travel,30000.00,Low 15 | 14,4455667788,Checking,6000.00,Juan Fernandez,1987-08-30,Part-time,2024-02-01,1.50,114,800.00,USD,2023-08-28,2000.00,1000.00,Buy a Car,25000.00,High 16 | 15,5566778899,Savings,50000.00,Olga Smirnova,1993-12-12,Full-time,2024-03-01,2.00,115,700.00,USD,2023-08-23,3000.00,2000.00,Buy a House,300000.00,Medium -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/image_build/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the PromptFlow runtime as a parent image 2 | FROM mcr.microsoft.com/azureml/promptflow/promptflow-runtime-stable:latest 3 | 4 | # Upgrade pip 5 | RUN python -m pip install --upgrade pip 6 | 7 | # Install system packages required for pyodbc 8 | RUN apt-get update \ 9 | && apt-get install -y --no-install-recommends \ 10 | gcc \ 11 | g++ \ 12 | unixodbc-dev \ 13 | && apt-get clean \ 14 | && rm -rf /var/lib/apt/lists/* 15 | 16 | # Install PostgreSQL ODBC Driver 17 | RUN apt-get update && apt-get install -y \ 18 | curl \ 19 | gnupg \ 20 | && curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ 21 | && echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ 22 | && apt-get update \ 23 | && apt-get install -y odbc-postgresql \ 24 | && apt-get clean \ 25 | && rm -rf /var/lib/apt/lists/* 26 | 27 | # Install required packages defined in requirements.txt 28 | COPY ./* ./ 29 | RUN pip install -r requirements.txt -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/image_build/environment.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json 2 | name: pf-cosmos-env 3 | build: 4 | path: . 5 | inference_config: 6 | liveness_route: 7 | port: 8080 8 | path: /health 9 | readiness_route: 10 | port: 8080 11 | path: /health 12 | scoring_route: 13 | port: 8080 14 | path: /score -------------------------------------------------------------------------------- /sample_gallery/database_querying/cosmosdb_postgresql/source_file/image_build/requirements.txt: -------------------------------------------------------------------------------- 1 | ###### Requirements with Version Specifiers ###### 2 | pyodbc 3 | azure-search-documents == 11.4.0b8 4 | azure-cosmos -------------------------------------------------------------------------------- /sample_gallery/database_querying/index.md: -------------------------------------------------------------------------------- 1 | # Database querying 2 | 3 | Databases give us a powerful way to represent and query real-world relationships. There are a number of samples that make it easy to use LLMs to interact with various DBs within prompt flow. 4 | 5 | * [CosmosBD/PostgreSQL - personalized financial advise ](./cosmosdb_postgresql/Tutorial.md) 6 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodeLayouts": { 3 | "inputs": { 4 | "x": 187, 5 | "y": 62, 6 | "index": -1 7 | }, 8 | "outputs": { 9 | "x": 362, 10 | "y": 502, 11 | "index": -1 12 | }, 13 | "aggregate": { 14 | "x": 12, 15 | "y": 502, 16 | "index": 0 17 | }, 18 | "calculate": { 19 | "x": 337.30870225694446, 20 | "y": 285.08641221788196, 21 | "index": 1 22 | }, 23 | "If_standard_answer": { 24 | "x": 167.70977105034723, 25 | "y": 152.7098897298177, 26 | "index": 2 27 | }, 28 | "calculate_standard_answer": { 29 | "x": 42.09263780381944, 30 | "y": 282, 31 | "index": 3 32 | }, 33 | "final_result": { 34 | "x": 187, 35 | "y": 392, 36 | "index": 4 37 | } 38 | }, 39 | "orientation": "Vertical" 40 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.log: -------------------------------------------------------------------------------- 1 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Start to run 4 nodes with concurrency level 16. 2 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Executing node If_standard_answer. node run id: ceb923dc-9f44-4fc1-a4b3-77645bd7acca_If_standard_answer_0 3 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Node If_standard_answer completes. 4 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Bypassing node calculate. node run id: ceb923dc-9f44-4fc1-a4b3-77645bd7acca_calculate_0 5 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Executing node calculate_standard_answer. node run id: ceb923dc-9f44-4fc1-a4b3-77645bd7acca_calculate_standard_answer_0 6 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Node calculate_standard_answer completes. 7 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Executing node final_result. node run id: ceb923dc-9f44-4fc1-a4b3-77645bd7acca_final_result_0 8 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Node final_result completes. 9 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Start to run 1 nodes with concurrency level 16. 10 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Executing node aggregate. node run id: af08863f-ae92-4e70-9755-6dd71b6af672_aggregate_reduce 11 | 2023-11-12 15:28:32 +0800 2764 execution.flow INFO Node aggregate completes. 12 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.metrics.json: -------------------------------------------------------------------------------- 1 | { 2 | "fulfillment_ratio": "100.0%" 3 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.output.json: -------------------------------------------------------------------------------- 1 | { 2 | "Fulfillment Ratio": "100.00%" 3 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": {}, 3 | "code": { 4 | "line_process.py": { 5 | "type": "python", 6 | "inputs": { 7 | "groundtruth": { 8 | "type": [ 9 | "string" 10 | ] 11 | }, 12 | "prediction": { 13 | "type": [ 14 | "string" 15 | ] 16 | } 17 | }, 18 | "description": "This tool processes the prediction of a single line and returns the processed result.\n\n:param groundtruth: the groundtruth of a single line.\n:param prediction: the prediction of a single line.", 19 | "source": "line_process.py", 20 | "function": "line_process" 21 | }, 22 | "aggregate.py": { 23 | "type": "python", 24 | "inputs": { 25 | "processed_results": { 26 | "type": [ 27 | "object" 28 | ] 29 | } 30 | }, 31 | "description": "This tool aggregates the processed result of all lines and log metric.\n\n:param processed_results: List of the output of line_process node.", 32 | "source": "aggregate.py", 33 | "function": "aggregate" 34 | }, 35 | "calculate.jinja2": { 36 | "type": "llm", 37 | "inputs": { 38 | "answer": { 39 | "type": [ 40 | "string" 41 | ] 42 | }, 43 | "ask": { 44 | "type": [ 45 | "string" 46 | ] 47 | } 48 | }, 49 | "source": "calculate.jinja2" 50 | }, 51 | "If_standard_answer.py": { 52 | "type": "python", 53 | "inputs": { 54 | "groundtruth_answer": { 55 | "type": [ 56 | "string" 57 | ] 58 | } 59 | }, 60 | "source": "If_standard_answer.py", 61 | "function": "my_python_tool" 62 | }, 63 | "calculate_standard_answer.py": { 64 | "type": "python", 65 | "inputs": { 66 | "answer": { 67 | "type": [ 68 | "string" 69 | ] 70 | } 71 | }, 72 | "source": "calculate_standard_answer.py", 73 | "function": "my_python_tool" 74 | }, 75 | "final_result.py": { 76 | "type": "python", 77 | "inputs": { 78 | "input1": { 79 | "type": [ 80 | "string" 81 | ] 82 | }, 83 | "input2": { 84 | "type": [ 85 | "string" 86 | ] 87 | } 88 | }, 89 | "source": "final_result.py", 90 | "function": "my_python_tool" 91 | } 92 | } 93 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/.promptflow/flow.uihint.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "calculate": { 4 | "variant_0": { 5 | "inputs": { 6 | "answer": [], 7 | "ask": [] 8 | } 9 | } 10 | }, 11 | "aggregate": { 12 | "variant_0": { 13 | "inputs": { 14 | "processed_results": [] 15 | } 16 | } 17 | }, 18 | "If_standard_answer": { 19 | "variant_0": { 20 | "inputs": { 21 | "groundtruth_answer": [] 22 | } 23 | } 24 | }, 25 | "calculate_standard_answer": { 26 | "variant_0": { 27 | "inputs": { 28 | "answer": [] 29 | } 30 | } 31 | }, 32 | "final_result": { 33 | "variant_0": { 34 | "inputs": { 35 | "input1": [], 36 | "input2": [] 37 | } 38 | } 39 | } 40 | } 41 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/If_standard_answer.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | 3 | # The inputs section will change based on the arguments of the tool function, after you save the code 4 | # Adding type to arguments and return value will help the system show the types properly 5 | # Please update the function name/signature per need 6 | @tool 7 | def my_python_tool(groundtruth_answer: str) -> bool: 8 | is_standard_answer = False 9 | if groundtruth_answer == 'no standard answer': 10 | is_standard_answer = True 11 | return is_standard_answer 12 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/If_standard_answer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/If_standard_answer.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/aggregate.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/aggregate.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/calculate_standard_answer.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/calculate_standard_answer.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/final_result.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/final_result.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/line_process.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/__pycache__/line_process.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/aggregate.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from promptflow import tool 3 | import numpy 4 | 5 | 6 | @tool 7 | def aggregate(processed_results: List[str]): 8 | """ 9 | This tool aggregates the processed result of all lines and log metric. 10 | 11 | :param processed_results: List of the output of line_process node. 12 | """ 13 | 14 | value_results = [] 15 | 16 | for result in processed_results: 17 | value_result = float(result.rstrip('%')) 18 | value_results.append(value_result) 19 | 20 | # Add your aggregation logic here 21 | fulfillment_ratio = round(numpy.mean(value_results),2) 22 | 23 | aggregated_results = { 24 | "fulfillment_ratio": str(fulfillment_ratio)+"%" 25 | 26 | } 27 | 28 | # Log metric 29 | from promptflow import log_metric 30 | log_metric(key="fulfillment_ratio", value=aggregated_results["fulfillment_ratio"]) 31 | 32 | return aggregated_results 33 | 34 | if __name__ == "__main__": 35 | numbers = ["100%", "100%", "0", "100%", "0", "100%", "80%"] 36 | accuracy = aggregate(numbers) 37 | print("The accuracy is", accuracy) 38 | 39 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/calculate.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are a helpful assistant. 3 | Fulfillment Ratio measures the percentage of your criteria that were met. If you had 4 criteria and all were met, your fulfillment ratio is 100%. If 3 were met, it's 75%, and so on. 4 | To calculate the Fulfillment Ratio, you should count the number of criteria met, divide by the total number of criteria, and multiply by 100 to get a percentage. For example, if 3 out of 4 criteria were met, your fulfillment ratio would be (3/4)*100 = 75%. 5 | 6 | user: 7 | Calculate Fulfillment Ratio of the "1. Customized Marvel superhero costume.\n2. Marvel comic book collection set.\n3. Marvel-themed birthday cake featuring favorite superhero.\n4. Limited edition Marvel action figures.\n5. Marvel superhero wall art.\n6. Personalized Marvel-themed water bottle or coffee mug.\n7. Marvel Avengers LEGO set.\n8. Marvel superhero-themed board games.\n9. Marvel superhero bedding and room decor.\n10. Tickets to a Marvel movie premiere or comic-con event.\n" with the original "Next Monday is my boyfriend's birthday. I need to come up with a few date ideas. And prepare a present for him. He is fan of Marvel movies.", only give me the numeric result in percentage. 8 | 9 | assistant: 10 | 83.33% 11 | 12 | user: 13 | Calculate Fulfillment Ratio of the "{{answer}}" with the original "{{ask}}", only give me the numeric result in percentage. 14 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/calculate_standard_answer.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | 3 | # The inputs section will change based on the arguments of the tool function, after you save the code 4 | # Adding type to arguments and return value will help the system show the types properly 5 | # Please update the function name/signature per need 6 | @tool 7 | def my_python_tool(answer: str) -> str: 8 | try: 9 | answer = float(answer) 10 | return "100.00%" 11 | except ValueError: 12 | return "0.00%" 13 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/calculate_variant_1.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are a helpful assistant. 3 | Fulfillment Ratio measures the percentage of your criteria that were met. If you had 4 criteria and all were met, your fulfillment ratio is 100%. If 3 were met, it's 75%, and so on. 4 | To calculate the Fulfillment Ratio, you should count the number of criteria met, divide by the total number of criteria, and multiply by 100 to get a percentage. For example, if 3 out of 4 criteria were met, your fulfillment ratio would be (3/4)*100 = 75%. 5 | 6 | user: 7 | Calculate Fulfillment Ratio of the "1. Customized Marvel superhero costume.\n2. Marvel comic book collection set.\n3. Marvel-themed birthday cake featuring favorite superhero.\n4. Limited edition Marvel action figures.\n5. Marvel superhero wall art.\n6. Personalized Marvel-themed water bottle or coffee mug.\n7. Marvel Avengers LEGO set.\n8. Marvel superhero-themed board games.\n9. Marvel superhero bedding and room decor.\n10. Tickets to a Marvel movie premiere or comic-con event.\n" with the original "Next Monday is my boyfriend's birthday. I need to come up with a few date ideas. And prepare a present for him. He is fan of Marvel movies.", only give me the numeric result in percentage. 8 | 9 | assistant: 10 | 83.33% 11 | 12 | user: 13 | Calculate Fulfillment Ratio of the "{{answer}}" with the original "{{ask}}", only give me the numeric result in percentage. 14 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/final_result.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | 3 | # The inputs section will change based on the arguments of the tool function, after you save the code 4 | # Adding type to arguments and return value will help the system show the types properly 5 | # Please update the function name/signature per need 6 | @tool 7 | def my_python_tool(input1: str, input2: str) -> str: 8 | if input1: 9 | return input1 10 | else: 11 | return input2 12 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | id: template_eval_flow 2 | name: Template Evaluation Flow 3 | environment: 4 | python_requirements_txt: requirements.txt 5 | inputs: 6 | ask: 7 | type: string 8 | default: How many sheep would you have if you started with 3 and got 2 more? 9 | answer: 10 | type: string 11 | default: "5" 12 | groundtruth_answer: 13 | type: string 14 | default: "5" 15 | outputs: 16 | Fulfillment Ratio: 17 | type: string 18 | reference: ${final_result.output} 19 | nodes: 20 | - name: aggregate 21 | type: python 22 | source: 23 | type: code 24 | path: aggregate.py 25 | inputs: 26 | processed_results: ${final_result.output} 27 | aggregation: true 28 | - name: calculate 29 | type: llm 30 | source: 31 | type: code 32 | path: calculate.jinja2 33 | inputs: 34 | deployment_name: gpt-4 35 | answer: ${inputs.answer} 36 | ask: ${inputs.ask} 37 | connection: azure_open_ai_connection 38 | api: chat 39 | activate: 40 | when: ${If_standard_answer.output} 41 | is: true 42 | - name: If_standard_answer 43 | type: python 44 | source: 45 | type: code 46 | path: If_standard_answer.py 47 | inputs: 48 | groundtruth_answer: ${inputs.groundtruth_answer} 49 | - name: calculate_standard_answer 50 | type: python 51 | source: 52 | type: code 53 | path: calculate_standard_answer.py 54 | inputs: 55 | answer: ${inputs.answer} 56 | activate: 57 | when: ${If_standard_answer.output} 58 | is: false 59 | - name: final_result 60 | type: python 61 | source: 62 | type: code 63 | path: final_result.py 64 | inputs: 65 | input1: ${calculate.output} 66 | input2: ${calculate_standard_answer.output} 67 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/flow.meta.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/latest/flow.schema.json 2 | name: template_eval_flow 3 | display_name: Template Evaluation Flow 4 | type: evaluate 5 | path: ./flow.dag.yaml 6 | description: Template Evaluation Flow 7 | properties: 8 | promptflow.stage: prod 9 | promptflow.section: template 10 | promptflow.batch_inputs: samples.json 11 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/line_process.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | 3 | 4 | @tool 5 | def line_process(groundtruth: str, prediction: str): 6 | """ 7 | This tool processes the prediction of a single line and returns the processed result. 8 | 9 | :param groundtruth: the groundtruth of a single line. 10 | :param prediction: the prediction of a single line. 11 | """ 12 | 13 | processed_result = "" 14 | 15 | # Add your line processing logic here 16 | 17 | return processed_result 18 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/requirements.txt -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/eval_fulfillment_ratio/samples.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "groundtruth": "Tomorrow's weather will be sunny.", 4 | "prediction": "The weather will be sunny tomorrow." 5 | } 6 | ] 7 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/eval_flow_authoring.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/eval_flow_authoring.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/pf_runs_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/pf_runs_visualization.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/sk_planner_pf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/sk_planner_pf.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_authoring.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_authoring.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_runs_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_runs_visualization.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_test_output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/skplanner_flow_test_output.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml_run.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml_run_batch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/vsc_flow_yaml_run_batch.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/media/vsc_run_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/media/vsc_run_list.png -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.detail.json: -------------------------------------------------------------------------------- 1 | { 2 | "flow_runs": [ 3 | { 4 | "run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff_0", 5 | "status": "Completed", 6 | "error": null, 7 | "inputs": { 8 | "ask": "What is the sum of 110 and 990?" 9 | }, 10 | "output": { 11 | "answer": "1100", 12 | "steps": [ 13 | [ 14 | "Adds value to a value", 15 | "Function:", 16 | "math.Add", 17 | ":", 18 | { 19 | "variables": { 20 | "input": "" 21 | } 22 | } 23 | ] 24 | ] 25 | }, 26 | "metrics": null, 27 | "request": null, 28 | "parent_run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff", 29 | "root_run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff", 30 | "source_run_id": null, 31 | "flow_id": "default_flow_id", 32 | "start_time": "2023-11-21T09:53:06.297209Z", 33 | "end_time": "2023-11-21T09:53:10.268455Z", 34 | "index": 0, 35 | "api_calls": [ 36 | { 37 | "name": "my_python_tool", 38 | "type": "Tool", 39 | "inputs": { 40 | "aoai_deployment": "gpt-4-32k", 41 | "ask": "What is the sum of 110 and 990?", 42 | "conn": "custom_connection", 43 | "model": "gpt-4-32k" 44 | }, 45 | "output": { 46 | "result": "1100", 47 | "steps": [ 48 | [ 49 | "Adds value to a value", 50 | "Function:", 51 | "math.Add", 52 | ":", 53 | { 54 | "variables": { 55 | "input": "" 56 | } 57 | } 58 | ] 59 | ] 60 | }, 61 | "start_time": 1700531586.300216, 62 | "end_time": 1700531590.265407, 63 | "error": null, 64 | "children": null, 65 | "node_name": "planner" 66 | } 67 | ], 68 | "variant_id": "", 69 | "name": "", 70 | "description": "", 71 | "tags": null, 72 | "system_metrics": { 73 | "duration": 3.971246, 74 | "total_tokens": 0 75 | }, 76 | "result": { 77 | "answer": "1100", 78 | "steps": [ 79 | [ 80 | "Adds value to a value", 81 | "Function:", 82 | "math.Add", 83 | ":", 84 | { 85 | "variables": { 86 | "input": "" 87 | } 88 | } 89 | ] 90 | ] 91 | }, 92 | "upload_metrics": false 93 | } 94 | ], 95 | "node_runs": [ 96 | { 97 | "node": "planner", 98 | "flow_run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff", 99 | "run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff_planner_0", 100 | "status": "Completed", 101 | "inputs": { 102 | "aoai_deployment": "gpt-4-32k", 103 | "ask": "What is the sum of 110 and 990?", 104 | "conn": "custom_connection", 105 | "model": "gpt-4-32k" 106 | }, 107 | "output": { 108 | "result": "1100", 109 | "steps": [ 110 | [ 111 | "Adds value to a value", 112 | "Function:", 113 | "math.Add", 114 | ":", 115 | { 116 | "variables": { 117 | "input": "" 118 | } 119 | } 120 | ] 121 | ] 122 | }, 123 | "metrics": null, 124 | "error": null, 125 | "parent_run_id": "a3219eb6-1974-4e73-8df6-db61e87169ff_0", 126 | "start_time": "2023-11-21T09:53:06.300216Z", 127 | "end_time": "2023-11-21T09:53:10.266426Z", 128 | "index": 0, 129 | "api_calls": [ 130 | { 131 | "name": "my_python_tool", 132 | "type": "Tool", 133 | "inputs": { 134 | "aoai_deployment": "gpt-4-32k", 135 | "ask": "What is the sum of 110 and 990?", 136 | "conn": "custom_connection", 137 | "model": "gpt-4-32k" 138 | }, 139 | "output": { 140 | "result": "1100", 141 | "steps": [ 142 | [ 143 | "Adds value to a value", 144 | "Function:", 145 | "math.Add", 146 | ":", 147 | { 148 | "variables": { 149 | "input": "" 150 | } 151 | } 152 | ] 153 | ] 154 | }, 155 | "start_time": 1700531586.300216, 156 | "end_time": 1700531590.265407, 157 | "error": null, 158 | "children": null, 159 | "node_name": "planner" 160 | } 161 | ], 162 | "variant_id": "", 163 | "cached_run_id": null, 164 | "cached_flow_run_id": null, 165 | "logs": { 166 | "stdout": "[2023-11-21T09:53:10+0000] 1100\n", 167 | "stderr": "" 168 | }, 169 | "system_metrics": { 170 | "duration": 3.96621 171 | }, 172 | "result": { 173 | "result": "1100", 174 | "steps": [ 175 | [ 176 | "Adds value to a value", 177 | "Function:", 178 | "math.Add", 179 | ":", 180 | { 181 | "variables": { 182 | "input": "" 183 | } 184 | } 185 | ] 186 | ] 187 | } 188 | } 189 | ] 190 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodeLayouts": { 3 | "inputs": { 4 | "x": 73.111083984375, 5 | "y": 91.86111450195312, 6 | "index": -1 7 | }, 8 | "outputs": { 9 | "x": 53.3194580078125, 10 | "y": 456.3055725097656, 11 | "index": -1 12 | }, 13 | "planner": { 14 | "x": 8.52777099609375, 15 | "y": 233.11114501953125, 16 | "index": 0 17 | }, 18 | "Semantic_Kernel_Stepwise_Planner_u8ik": { 19 | "x": 92.55560302734375, 20 | "y": 306.3055725097656, 21 | "index": -1 22 | } 23 | }, 24 | "orientation": "Vertical" 25 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.log: -------------------------------------------------------------------------------- 1 | 2023-11-21 17:53:06 +0800 19368 execution.flow INFO Start to run 1 nodes with concurrency level 16. 2 | 2023-11-21 17:53:06 +0800 19368 execution.flow INFO Executing node planner. node run id: a3219eb6-1974-4e73-8df6-db61e87169ff_planner_0 3 | 2023-11-21 17:53:10 +0800 19368 execution.flow INFO [planner in line 0 (index starts from 0)] stdout> 1100 4 | 2023-11-21 17:53:10 +0800 19368 execution.flow INFO Node planner completes. 5 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.output.json: -------------------------------------------------------------------------------- 1 | { 2 | "answer": "1100", 3 | "steps": [ 4 | [ 5 | "Adds value to a value", 6 | "Function:", 7 | "math.Add", 8 | ":", 9 | { 10 | "variables": { 11 | "input": "" 12 | } 13 | } 14 | ] 15 | ] 16 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": { 3 | "promptflow_vectordb.tool.vector_index_lookup.VectorIndexLookup.search": { 4 | "name": "Vector Index Lookup", 5 | "description": "Search text or vector based query from AzureML Vector Index.", 6 | "type": "python", 7 | "module": "promptflow_vectordb.tool.vector_index_lookup", 8 | "class_name": "VectorIndexLookup", 9 | "function": "search", 10 | "inputs": { 11 | "path": { 12 | "type": [ 13 | "string" 14 | ] 15 | }, 16 | "query": { 17 | "type": [ 18 | "object" 19 | ] 20 | }, 21 | "top_k": { 22 | "default": "3", 23 | "type": [ 24 | "int" 25 | ] 26 | } 27 | }, 28 | "package": "promptflow-vectordb", 29 | "package_version": "0.1.0" 30 | }, 31 | "skpf.tools.planners.stepwise": { 32 | "description": "Incorporate Semantic Kernel Stepwise Planner into prompt flow.", 33 | "function": "stepwise", 34 | "inputs": { 35 | "connection": { 36 | "type": [ 37 | "AzureOpenAIConnection", 38 | "OpenAIConnection" 39 | ] 40 | }, 41 | "deployment_name": { 42 | "type": [ 43 | "string" 44 | ], 45 | "enabled_by": "connection", 46 | "enabled_by_type": [ 47 | "AzureOpenAIConnection" 48 | ], 49 | "capabilities": { 50 | "completion": true, 51 | "chat_completion": true, 52 | "embeddings": false 53 | }, 54 | "model_list": [ 55 | "gpt-35-turbo", 56 | "gpt-4", 57 | "text-davinci-003" 58 | ] 59 | }, 60 | "file_path": { 61 | "type": [ 62 | "file_path" 63 | ] 64 | }, 65 | "intent": { 66 | "type": [ 67 | "string" 68 | ] 69 | } 70 | }, 71 | "module": "skpf.tools.planners", 72 | "name": "Semantic Kernel Stepwise Planner", 73 | "type": "python", 74 | "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACe0lEQVR4nKWTS2xNURSGv73Puff00lZuG6IeQbXEq4NqlDQeEc+URCRXaGiCEM8mBowoTWvQSZF4jJEYEDFhwkARQQeikWjaoB5BJeqW9j7POXsZnGppmhjYs/3/e/9Za/3/UteJCf9x9D9fqNGwYXB0AaVQVkCJCIL8xRnxYBAbRUAh4uH6SQAsJ4zlhIIPWmEkS07BOLRlAzJCQCmMcolMLGTK+iWkiVPWXMPcEzGyOoFrEsyv30bl1cOIY0CPqEBbGleSzGvYwvTaZWQYIBTNJVyYS8p8p3RnNdNql9O27yJ+0kWJHhZQlsb1khSWzqZ47yr6XrwHBKUU6S9xJpdVsqBpK4+qT5P42IOlw4gI9lDp+OhQmPLzu/l0q42xxRMI6bG4P1NEK4opqCjh6fZzxDvfErbyEN8fHKJSoAXPT1F17Sj9nZ95tuMcBYtKCJkIbnyASRsrcIry+Xq/nZA1BjFmuG0Rg+snqGjZj5dO86SuhWSil4E3X6m6coyZB9bS0XSTeFs3K+424Pop0DKUBU0Yqi4fY9aRat5dfsiMTSspP7mL8Uvn4PYl+faoA4DHB5uJFEVZfKmOjP8zsFWBDjtj6G17Td/zbpbfO86sQxuYWx/jx8sPtNadItM7QE5RFKM8Wlc3MHnzIhY27sEni1Ia7faneHXhBg/WNdJ19g5eIk3nmds8rTlDxIrS1/6O3iddOJJHsucbD9Y0ES2fQX7xVDyTQV0nJtq28LwshiyOk082k0BhYWFj8ACFQqOUwpdskFA7B/G8wEbj+WhlY+kQXiaLrR1Egj1QygrGPXj/7b94gbA95IcI4gfBEfPH8oxY9mEucOEXVHIIywBn9vkAAAAASUVORK5CYII=", 75 | "package": "skpf", 76 | "package_version": "0.0.6" 77 | } 78 | }, 79 | "code": { 80 | "planner.py": { 81 | "type": "python", 82 | "inputs": { 83 | "ask": { 84 | "type": [ 85 | "string" 86 | ] 87 | }, 88 | "model": { 89 | "type": [ 90 | "string" 91 | ] 92 | }, 93 | "aoai_deployment": { 94 | "type": [ 95 | "string" 96 | ] 97 | }, 98 | "conn": { 99 | "type": [ 100 | "CustomConnection" 101 | ] 102 | } 103 | }, 104 | "source": "planner.py", 105 | "function": "my_python_tool" 106 | } 107 | } 108 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/.promptflow/flow.uihint.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "Semantic_Kernel_Stepwise_Planner_u8ik": { 4 | "variant_0": { 5 | "inputs": { 6 | "intent": [] 7 | } 8 | } 9 | } 10 | } 11 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/__pycache__/planner.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/__pycache__/planner.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | inputs: 2 | ask: 3 | type: string 4 | default: What is the sum of 110 and 990? 5 | outputs: 6 | answer: 7 | type: string 8 | reference: ${planner.output.result} 9 | steps: 10 | type: string 11 | reference: ${planner.output.steps} 12 | nodes: 13 | - name: planner 14 | type: python 15 | source: 16 | type: code 17 | path: planner.py 18 | inputs: 19 | conn: custom_connection 20 | ask: ${inputs.ask} 21 | model: gpt-4-32k 22 | aoai_deployment: gpt-4-32k 23 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/planner.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | from promptflow.connections import CustomConnection 3 | import semantic_kernel as sk 4 | from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, AzureChatCompletion 5 | #from semantic_kernel.planning import ActionPlanner 6 | from semantic_kernel.planning import SequentialPlanner 7 | from semantic_kernel.planning import ActionPlanner 8 | from semantic_kernel.core_skills import MathSkill, TextSkill 9 | import asyncio 10 | import json 11 | 12 | # The inputs section will change based on the arguments of the tool function, after you save the code 13 | # Adding type to arguments and return value will help the system show the types properly 14 | # Please update the function name/signature per need 15 | @tool 16 | def my_python_tool(ask: str, model: str, aoai_deployment: str, conn: CustomConnection) -> object: 17 | llm_service = "AzureOpenAI" 18 | endpoint = conn.AZURE_OPENAI_API_BASE 19 | api_key = conn.AZURE_OPENAI_API_KEY 20 | deployment = aoai_deployment 21 | 22 | kernel = sk.Kernel() 23 | useAzureOpenAI = True 24 | 25 | #deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env() 26 | kernel.add_chat_service(model, AzureChatCompletion(deployment, endpoint, api_key, api_version="2023-07-01-preview")) 27 | 28 | 29 | skills_directory = "skills/" 30 | summarize_skill = kernel.import_semantic_skill_from_directory(skills_directory, "SummarizeSkill") 31 | writer_skill = kernel.import_semantic_skill_from_directory(skills_directory, "WriterSkill") 32 | #text_skill = kernel.import_skill(TextSkill(), "TextSkill") 33 | kernel.import_skill(MathSkill(), "math") 34 | kernel.import_skill(TextSkill(), "text") 35 | 36 | planner = SequentialPlanner(kernel) 37 | #planner = ActionPlanner(kernel) 38 | 39 | plan = asyncio.run(planner.create_plan_async(goal=ask)) 40 | 41 | result = asyncio.run(plan.invoke_async()).result 42 | 43 | #result = asyncio.run(kernel.run_async(plan)).result 44 | 45 | print(result) 46 | #result = asyncio.run(plan.invoke_async()) 47 | #result = plan.invoke_async() 48 | 49 | #steps = [(step.description, ":", step._state.__dict__) for step in plan._steps] 50 | steps = [(step.description, "Function:", step.skill_name + "." + step._function.name, ":", step._state.__dict__) for step in plan._steps] 51 | 52 | return_value = {"result": result, "steps": steps} 53 | 54 | return return_value 55 | 56 | # for index, step in enumerate(plan._steps): 57 | # print("Step:", index) 58 | # print("Description:",step.description) 59 | # print("Function:", step.skill_name + "." + step._function.name) 60 | # if len(step._outputs) > 0: 61 | # print( " Output:\n", str.replace(result[step._outputs[0]],"\n", "\n ")) 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/requirements.txt -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/MakeAbstractReadable/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Given a scientific white paper abstract, rewrite it to make it more readable", 5 | "completion": { 6 | "max_tokens": 4000, 7 | "temperature": 0.0, 8 | "top_p": 1.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 2.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/MakeAbstractReadable/skprompt.txt: -------------------------------------------------------------------------------- 1 | {{$input}} 2 | 3 | == 4 | Summarize, using a user friendly, using simple grammar. Don't use subjects like "we" "our" "us" "your". 5 | == -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Notegen/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Automatically generate compact notes for any text or text document.", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Notegen/skprompt.txt: -------------------------------------------------------------------------------- 1 | Analyze the following extract taken from a document. 2 | - Produce key points for memory. 3 | - Give memory a name. 4 | - Extract only points worth remembering. 5 | - Be brief. Conciseness is very important. 6 | - Use broken English. 7 | You will use this memory to analyze the rest of this document, and for other relevant tasks. 8 | 9 | [Input] 10 | My name is Macbeth. I used to be King of Scotland, but I died. My wife's name is Lady Macbeth and we were married for 15 years. We had no children. Our beloved dog Toby McDuff was a famous hunter of rats in the forest. 11 | My story was immortalized by Shakespeare in a play. 12 | +++++ 13 | Family History 14 | - Macbeth, King Scotland 15 | - Wife Lady Macbeth, No Kids 16 | - Dog Toby McDuff. Hunter, dead. 17 | - Shakespeare play 18 | 19 | [Input] 20 | [[{{$input}}]] 21 | +++++ 22 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Summarize/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Summarize given text or any text document", 5 | "completion": { 6 | "max_tokens": 512, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "Text to summarize", 17 | "defaultValue": "" 18 | } 19 | ] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Summarize/skprompt.txt: -------------------------------------------------------------------------------- 1 | [SUMMARIZATION RULES] 2 | DONT WASTE WORDS 3 | USE SHORT, CLEAR, COMPLETE SENTENCES. 4 | DO NOT USE BULLET POINTS OR DASHES. 5 | USE ACTIVE VOICE. 6 | MAXIMIZE DETAIL, MEANING 7 | FOCUS ON THE CONTENT 8 | 9 | [BANNED PHRASES] 10 | This article 11 | This document 12 | This page 13 | This material 14 | [END LIST] 15 | 16 | Summarize: 17 | Hello how are you? 18 | +++++ 19 | Hello 20 | 21 | Summarize this 22 | {{$input}} 23 | +++++ -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Topics/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Analyze given text or document and extract key topics worth remembering", 5 | "completion": { 6 | "max_tokens": 128, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/SummarizeSkill/Topics/skprompt.txt: -------------------------------------------------------------------------------- 1 | Analyze the following extract taken from a document and extract key topics. 2 | - Topics only worth remembering. 3 | - Be brief. Short phrases. 4 | - Can use broken English. 5 | - Conciseness is very important. 6 | - Topics can include names of memories you want to recall. 7 | - NO LONG SENTENCES. SHORT PHRASES. 8 | - Return in JSON 9 | [Input] 10 | My name is Macbeth. I used to be King of Scotland, but I died. My wife's name is Lady Macbeth and we were married for 15 years. We had no children. Our beloved dog Toby McDuff was a famous hunter of rats in the forest. 11 | My tragic story was immortalized by Shakespeare in a play. 12 | [Output] 13 | { 14 | "topics": [ 15 | "Macbeth", 16 | "King of Scotland", 17 | "Lady Macbeth", 18 | "Dog", 19 | "Toby McDuff", 20 | "Shakespeare", 21 | "Play", 22 | "Tragedy" 23 | ] 24 | } 25 | +++++ 26 | [Input] 27 | {{$input}} 28 | [Output] -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Acronym/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Generate an acronym for the given concept or phrase", 5 | "completion": { 6 | "max_tokens": 100, 7 | "temperature": 0.5, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Acronym/skprompt.txt: -------------------------------------------------------------------------------- 1 | Generate a suitable acronym pair for the concept. Creativity is encouraged, including obscure references. 2 | The uppercase letters in the acronym expansion must agree with the letters of the acronym 3 | 4 | Q: A technology for detecting moving objects, their distance and velocity using radio waves. 5 | A: R.A.D.A.R: RAdio Detection And Ranging. 6 | 7 | Q: A weapon that uses high voltage electricity to incapacitate the target 8 | A. T.A.S.E.R: Thomas A. Swift’s Electric Rifle 9 | 10 | Q: Equipment that lets a diver breathe underwater 11 | A: S.C.U.B.A: Self Contained Underwater Breathing Apparatus. 12 | 13 | Q: Reminder not to complicated subject matter. 14 | A. K.I.S.S: Keep It Simple Stupid 15 | 16 | Q: A national organization for investment in space travel, rockets, space ships, space exploration 17 | A. N.A.S.A: National Aeronautics Space Administration 18 | 19 | Q: Agreement that governs trade among North American countries. 20 | A: N.A.F.T.A: North American Free Trade Agreement. 21 | 22 | Q: Organization to protect the freedom and security of its member countries in North America and Europe. 23 | A: N.A.T.O: North Atlantic Treaty Organization. 24 | 25 | Q:{{$input}} -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/AcronymGenerator/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Given a request to generate an acronym from a string, generate an acronym and provide the acronym explanation.", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.7, 8 | "top_p": 1.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0, 11 | "stop_sequences": [ 12 | "#" 13 | ] 14 | } 15 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/AcronymGenerator/skprompt.txt: -------------------------------------------------------------------------------- 1 | # Name of a super artificial intelligence 2 | J.A.R.V.I.S. = Just A Really Very Intelligent System. 3 | # Name for a new young beautiful assistant 4 | F.R.I.D.A.Y. = Female Replacement Intelligent Digital Assistant Youth. 5 | # Mirror to check what's behind 6 | B.A.R.F. = Binary Augmented Retro-Framing. 7 | # Pair of powerful glasses created by a genius that is now dead 8 | E.D.I.T.H. = Even Dead I’m The Hero. 9 | # A company building and selling computers 10 | I.B.M. = Intelligent Business Machine. 11 | # A super computer that is sentient. 12 | H.A.L = Heuristically programmed ALgorithmic computer. 13 | # an intelligent bot that helps with productivity. 14 | C.O.R.E. = Central Optimization Routines and Efficiency. 15 | # an intelligent bot that helps with productivity. 16 | P.A.L. = Personal Assistant Light. 17 | # an intelligent bot that helps with productivity. 18 | A.I.D.A. = Artificial Intelligence Digital Assistant. 19 | # an intelligent bot that helps with productivity. 20 | H.E.R.A. = Human Emulation and Recognition Algorithm. 21 | # an intelligent bot that helps with productivity. 22 | I.C.A.R.U.S. = Intelligent Control and Automation of Research and Utility Systems. 23 | # an intelligent bot that helps with productivity. 24 | N.E.M.O. = Networked Embedded Multiprocessor Orchestration. 25 | # an intelligent bot that helps with productivity. 26 | E.P.I.C. = Enhanced Productivity and Intelligence through Computing. 27 | # an intelligent bot that helps with productivity. 28 | M.A.I.A. = Multipurpose Artificial Intelligence Assistant. 29 | # an intelligent bot that helps with productivity. 30 | A.R.I.A. = Artificial Reasoning and Intelligent Assistant. 31 | # An incredibly smart entity developed with complex math, that helps me being more productive. 32 | O.M.E.G.A. = Optimized Mathematical Entity for Generalized Artificial intelligence. 33 | # An incredibly smart entity developed with complex math, that helps me being more productive. 34 | P.Y.T.H.O.N. = Precise Yet Thorough Heuristic Optimization Network. 35 | # An incredibly smart entity developed with complex math, that helps me being more productive. 36 | A.P.O.L.L.O. = Adaptive Probabilistic Optimization Learning Library for Online Applications. 37 | # An incredibly smart entity developed with complex math, that helps me being more productive. 38 | S.O.L.I.D. = Self-Organizing Logical Intelligent Data-base. 39 | # An incredibly smart entity developed with complex math, that helps me being more productive. 40 | D.E.E.P. = Dynamic Estimation and Prediction. 41 | # An incredibly smart entity developed with complex math, that helps me being more productive. 42 | B.R.A.I.N. = Biologically Realistic Artificial Intelligence Network. 43 | # An incredibly smart entity developed with complex math, that helps me being more productive. 44 | C.O.G.N.I.T.O. = COmputational and Generalized INtelligence TOolkit. 45 | # An incredibly smart entity developed with complex math, that helps me being more productive. 46 | S.A.G.E. = Symbolic Artificial General Intelligence Engine. 47 | # An incredibly smart entity developed with complex math, that helps me being more productive. 48 | Q.U.A.R.K. = Quantum Universal Algorithmic Reasoning Kernel. 49 | # An incredibly smart entity developed with complex math, that helps me being more productive. 50 | S.O.L.V.E. = Sophisticated Operational Logic and Versatile Expertise. 51 | # An incredibly smart entity developed with complex math, that helps me being more productive. 52 | C.A.L.C.U.L.U.S. = Cognitively Advanced Logic and Computation Unit for Learning and Understanding Systems. 53 | 54 | # {{$INPUT}} 55 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/AcronymReverse/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Given a single word or acronym, generate the expanded form matching the acronym letters.", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.5, 8 | "top_p": 1.0, 9 | "presence_penalty": 0.8, 10 | "frequency_penalty": 0.0, 11 | "stop_sequences": [ 12 | "#END#" 13 | ] 14 | } 15 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/AcronymReverse/skprompt.txt: -------------------------------------------------------------------------------- 1 | # acronym: Devis 2 | Sentences matching the acronym: 3 | 1. Dragons Eat Very Interesting Snacks 4 | 2. Develop Empathy and Vision to Increase Success 5 | 3. Don't Expect Vampires In Supermarkets 6 | #END# 7 | 8 | # acronym: Christmas 9 | Sentences matching the acronym: 10 | 1. Celebrating Harmony and Respect in a Season of Togetherness, Merriment, and True joy 11 | 2. Children Have Real Interest Since The Mystery And Surprise Thrills 12 | 3. Christmas Helps Reduce Inner Stress Through Mistletoe And Sleigh excursions 13 | #END# 14 | 15 | # acronym: noWare 16 | Sentences matching the acronym: 17 | 1. No One Wants an App that Randomly Erases everything 18 | 2. Nourishing Oatmeal With Almond, Raisin, and Egg toppings 19 | 3. Notice Opportunity When Available and React Enthusiastically 20 | #END# 21 | 22 | Reverse the following acronym back to a funny sentence. Provide 3 examples. 23 | # acronym: {{$INPUT}} 24 | Sentences matching the acronym: 25 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Brainstorm/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Given a goal or topic description generate a list of ideas", 5 | "completion": { 6 | "max_tokens": 2000, 7 | "temperature": 0.5, 8 | "top_p": 1.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0, 11 | "stop_sequences": ["##END##"] 12 | }, 13 | "input": { 14 | "parameters": [ 15 | { 16 | "name": "input", 17 | "description": "A topic description or goal.", 18 | "defaultValue": "" 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Brainstorm/skprompt.txt: -------------------------------------------------------------------------------- 1 | Must: brainstorm ideas and create a list. 2 | Must: use a numbered list. 3 | Must: only one list. 4 | Must: end list with ##END## 5 | Should: no more than 10 items. 6 | Should: at least 3 items. 7 | Topic: {{$INPUT}} 8 | Start. 9 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EmailGen/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Write an email from the given bullet points", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EmailGen/skprompt.txt: -------------------------------------------------------------------------------- 1 | Rewrite my bullet points into complete sentences. Use a polite and inclusive tone. 2 | 3 | [Input] 4 | - Macbeth, King Scotland 5 | - Married, Wife Lady Macbeth, No Kids 6 | - Dog Toby McDuff. Hunter, dead. 7 | - Shakespeare play 8 | +++++ 9 | The story of Macbeth 10 | My name is Macbeth. I used to be King of Scotland, but I died. My wife's name is Lady Macbeth and we were married for 15 years. We had no children. Our beloved dog Toby McDuff was a famous hunter of rats in the forest. 11 | My story was immortalized by Shakespeare in a play. 12 | 13 | +++++ 14 | [Input] 15 | {{$input}} 16 | +++++ 17 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EmailTo/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Turn bullet points into an email to someone, using a polite tone", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EmailTo/skprompt.txt: -------------------------------------------------------------------------------- 1 | Rewrite my bullet points into an email featuring complete sentences. Use a polite and inclusive tone. 2 | 3 | [Input] 4 | Toby, 5 | 6 | - Macbeth, King Scotland 7 | - Married, Wife Lady Macbeth, No Kids 8 | - Dog Toby McDuff. Hunter, dead. 9 | - Shakespeare play 10 | 11 | Thanks, 12 | Dexter 13 | 14 | +++++ 15 | Hi Toby, 16 | 17 | The story of Macbeth 18 | My name is Macbeth. I used to be King of Scotland, but I died. My wife's name is Lady Macbeth and we were married for 15 years. We had no children. Our beloved dog Toby McDuff was a famous hunter of rats in the forest. 19 | My story was immortalized by Shakespeare in a play. 20 | 21 | Thanks, 22 | Dexter 23 | 24 | +++++ 25 | [Input] 26 | {{$to}} 27 | {{$input}} 28 | 29 | Thanks, 30 | {{$sender}} 31 | +++++ 32 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EnglishImprover/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Translate text to English and improve it", 5 | "completion": { 6 | "max_tokens": 3000, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/EnglishImprover/skprompt.txt: -------------------------------------------------------------------------------- 1 | I want you to act as an English translator, spelling corrector and improver. 2 | I will speak to you in any language and you will detect the language, translate it and answer in the corrected and improved version of my text, in English. 3 | I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, upper level English words and sentences. 4 | Keep the meaning same, but make them more literary. 5 | I want you to only reply the correction, the improvements and nothing else, do not write explanations. 6 | 7 | Sentence: """ 8 | {{$INPUT}} 9 | """ 10 | 11 | Translation: 12 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelChapter/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Write a chapter of a novel.", 5 | "completion": { 6 | "max_tokens": 2048, 7 | "temperature": 0.3, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "A synopsis of what the chapter should be about.", 17 | "defaultValue": "" 18 | }, 19 | { 20 | "name": "theme", 21 | "description": "The theme or topic of this novel.", 22 | "defaultValue": "" 23 | }, 24 | { 25 | "name": "previousChapter", 26 | "description": "The synopsis of the previous chapter.", 27 | "defaultValue": "" 28 | }, 29 | { 30 | "name": "chapterIndex", 31 | "description": "The number of the chapter to write.", 32 | "defaultValue": "" 33 | } 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelChapter/skprompt.txt: -------------------------------------------------------------------------------- 1 | [CONTEXT] 2 | 3 | THEME OF STORY: 4 | {{$theme}} 5 | 6 | PREVIOUS CHAPTER: 7 | {{$previousChapter}} 8 | 9 | [END CONTEXT] 10 | 11 | 12 | WRITE THIS CHAPTER USING [CONTEXT] AND 13 | CHAPTER SYNOPSIS. DO NOT REPEAT SYNOPSIS IN THE OUTPUT 14 | 15 | Chapter Synopsis: 16 | {{$input}} 17 | 18 | Chapter {{$chapterIndex}} 19 | 20 | 21 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelChapterWithNotes/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Write a chapter of a novel using notes about the chapter to write.", 5 | "completion": { 6 | "max_tokens": 1024, 7 | "temperature": 0.5, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "What the novel should be about.", 17 | "defaultValue": "" 18 | }, 19 | { 20 | "name": "theme", 21 | "description": "The theme of this novel.", 22 | "defaultValue": "" 23 | }, 24 | { 25 | "name": "notes", 26 | "description": "Notes useful to write this chapter.", 27 | "defaultValue": "" 28 | }, 29 | { 30 | "name": "previousChapter", 31 | "description": "The previous chapter synopsis.", 32 | "defaultValue": "" 33 | }, 34 | { 35 | "name": "chapterIndex", 36 | "description": "The number of the chapter to write.", 37 | "defaultValue": "" 38 | } 39 | ] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelChapterWithNotes/skprompt.txt: -------------------------------------------------------------------------------- 1 | [CONTEXT] 2 | 3 | THEME OF STORY: 4 | {{$theme}} 5 | 6 | NOTES OF STORY SO FAR - USE AS REFERENCE 7 | {{$notes}} 8 | 9 | PREVIOUS CHAPTER, USE AS REFERENCE: 10 | {{$previousChapter}} 11 | 12 | [END CONTEXT] 13 | 14 | 15 | WRITE THIS CHAPTER CONTINUING STORY, USING [CONTEXT] AND CHAPTER SYNOPSIS BELOW. DO NOT REPEAT SYNOPSIS IN THE CHAPTER. DON'T REPEAT PREVIOUS CHAPTER. 16 | 17 | {{$input}} 18 | 19 | Chapter {{$chapterIndex}} 20 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelOutline/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Generate a list of chapter synopsis for a novel or novella", 5 | "completion": { 6 | "max_tokens": 2048, 7 | "temperature": 0.1, 8 | "top_p": 0.5, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "What the novel should be about.", 17 | "defaultValue": "" 18 | }, 19 | { 20 | "name": "chapterCount", 21 | "description": "The number of chapters to generate.", 22 | "defaultValue": "" 23 | }, 24 | { 25 | "name": "endMarker", 26 | "description": "The marker to use to end each chapter.", 27 | "defaultValue": "" 28 | } 29 | ] 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/NovelOutline/skprompt.txt: -------------------------------------------------------------------------------- 1 | I want to write a {{$chapterCount}} chapter novella about: 2 | {{$input}} 3 | 4 | There MUST BE {{$chapterCount}} CHAPTERS. 5 | 6 | INVENT CHARACTERS AS YOU SEE FIT. BE HIGHLY CREATIVE AND/OR FUNNY. 7 | WRITE SYNOPSIS FOR EACH CHAPTER. INCLUDE INFORMATION ABOUT CHARACTERS ETC. SINCE EACH 8 | CHAPTER WILL BE WRITTEN BY A DIFFERENT WRITER, YOU MUST INCLUDE ALL PERTINENT INFORMATION 9 | IN EACH SYNOPSIS 10 | 11 | YOU MUST END EACH SYNOPSIS WITH {{$endMarker}} 12 | 13 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Rewrite/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Automatically generate compact notes for any text or text document", 5 | "completion": { 6 | "max_tokens": 256, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Rewrite/skprompt.txt: -------------------------------------------------------------------------------- 1 | Rewrite the given text like it was written in this style or by: {{$style}}. 2 | MUST RETAIN THE MEANING AND FACTUAL CONTENT AS THE ORIGINAL. 3 | 4 | 5 | {{$input}} 6 | 7 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/ShortPoem/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Turn a scenario into a short and entertaining poem.", 5 | "completion": { 6 | "max_tokens": 60, 7 | "temperature": 0.5, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | }, 12 | "input": { 13 | "parameters": [ 14 | { 15 | "name": "input", 16 | "description": "The scenario to turn into a poem.", 17 | "defaultValue": "" 18 | } 19 | ] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/ShortPoem/skprompt.txt: -------------------------------------------------------------------------------- 1 | Generate a short funny poem or limerick to explain the given event. Be creative and be funny. Let your imagination run wild. 2 | Event:{{$input}} 3 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/StoryGen/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Generate a list of synopsis for a novel or novella with sub-chapters", 5 | "completion": { 6 | "max_tokens": 250, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/StoryGen/skprompt.txt: -------------------------------------------------------------------------------- 1 | ONLY USE XML TAGS IN THIS LIST: 2 | [XML TAG LIST] 3 | list: Surround any lists with this tag 4 | synopsis: An outline of the chapter to write 5 | [END LIST] 6 | 7 | EMIT WELL FORMED XML ALWAYS. Code should be CDATA. 8 | 9 | 10 | {{$input}} 11 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/TellMeMore/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Summarize given text or any text document", 5 | "completion": { 6 | "max_tokens": 500, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/TellMeMore/skprompt.txt: -------------------------------------------------------------------------------- 1 | >>>>>The following is part of a {{$conversationtype}}. 2 | {{$input}} 3 | 4 | >>>>>The following is an overview of a previous part of the {{$conversationtype}}, focusing on "{{$focusarea}}". 5 | {{$previousresults}} 6 | 7 | >>>>>In 250 words or less, write a verbose and detailed overview of the {{$conversationtype}} focusing solely on "{{$focusarea}}". -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Translate/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Translate the input into a language of your choice", 5 | "completion": { 6 | "max_tokens": 2000, 7 | "temperature": 0.7, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0, 11 | "stop_sequences": [ 12 | "[done]" 13 | ] 14 | } 15 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/Translate/skprompt.txt: -------------------------------------------------------------------------------- 1 | Translate the input below into {{$language}} 2 | 3 | MAKE SURE YOU ONLY USE {{$language}}. 4 | 5 | {{$input}} 6 | 7 | Translation: 8 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/TwoSentenceSummary/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "type": "completion", 4 | "description": "Summarize given text in two sentences or less", 5 | "completion": { 6 | "max_tokens": 100, 7 | "temperature": 0.0, 8 | "top_p": 0.0, 9 | "presence_penalty": 0.0, 10 | "frequency_penalty": 0.0 11 | } 12 | } -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/sk_planner_flow/skills/WriterSkill/TwoSentenceSummary/skprompt.txt: -------------------------------------------------------------------------------- 1 | Summarize the following text in two sentences or less. 2 | [BEGIN TEXT] 3 | {{$input}} 4 | [END TEXT] 5 | -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/.env: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_API_BASE= 2 | AZURE_OPENAI_API_KEY= -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/batch_run.yaml: -------------------------------------------------------------------------------- 1 | # name: sk_planner_flow_default_20231113_135701_021000 2 | display_name: sk_planner_flow_${variant_id}_${timestamp} # supported macros: ${variant_id},${timestamp},${run} 3 | flow: ../sk_planner_flow 4 | data: ./data.jsonl 5 | #run: 6 | column_mapping: 7 | ask: ${data.ask} 8 | environment_variables: 9 | PF_WORKER_COUNT: "2" # number of workers to run in parallel -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/data.jsonl: -------------------------------------------------------------------------------- 1 | {"ask": "Tomorrow is Valentine's day. I need to come up with a few date ideas. She likes Shakespeare so write using his style. She speaks French so write it in French. Convert the text to uppercase.", "answer": "no standard answer"} 2 | {"ask": "How many sheep would you have if you started with 3 and got 2 more?", "answer": "5"} 3 | {"ask": "I need to invite my friend to my wedding. Write an email invitation for him. His name is Lucas and he is Spanish, so write the email in Spanish.", "answer":"no standard answer"} 4 | {"ask": "I'm writing a novel about a time-traveling historian from scratch. Help to imagine the story line and write an outline for the first two chapters. It should be short and should be in English.", "answer":"no standard answer"} 5 | {"ask": "I want to publish a long article about the effects of climate change. I need some suggestions on the key points. I need 4 points only.", "answer":"no standard answer"} 6 | {"ask": "How many eggs would you have if you started with 6 and bought 4 more?", "answer": "10"} 7 | {"ask": "What is the sum of 5 and 3", "answer": "8"} 8 | {"ask": "I'm writing a script for a short film about a detective. Write the plot. It should be in Italian and within 300 words.", "answer":"no standard answer"} 9 | {"ask": "Tomorrow is my boyfriend's birthday. I need to come up with a few date ideas and prepare a present for him. He is a fan of Mavel movies.", "answer":"no standard answer"} 10 | {"ask": "I need to write a poem for my girlfriend. She loves nature and the poem should be written in Russian. Can you help me?", "answer":"no standard answer"} 11 | {"ask": "What is the product of 7 and 6", "answer": "42"} 12 | {"ask": "I want to surprise my wife for our anniversary. She loves music and plays the guitar. I need some ideas. 3 points are enough.", "answer":"no standard answer"} -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/eval_run.yaml: -------------------------------------------------------------------------------- 1 | #name: eval_fulfillment_ratio_default_20231113_150839_477000 2 | display_name: ${run}_eval_fulfillment_ratio_${timestamp} # supported macros: ${variant_id},${timestamp},${run} 3 | flow: ../eval_fulfillment_ratio 4 | data: ./data.jsonl 5 | run: sk_planner_default_20231107_213812_513000 6 | column_mapping: 7 | ask: ${run.inputs.ask} 8 | answer: ${run.outputs.answer} 9 | groundtruth_answer: ${data.answer} -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/requirements.txt: -------------------------------------------------------------------------------- 1 | semantic-kernel==0.3.10.dev0 2 | promptflow[azure] 3 | promptflow-tools 4 | openai 5 | jinja2 6 | python-dotenv -------------------------------------------------------------------------------- /sample_gallery/evaluate_semantic_kernel_planner/source_file/sk_planner.py: -------------------------------------------------------------------------------- 1 | import semantic_kernel as sk 2 | from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, AzureChatCompletion 3 | #from semantic_kernel.planning import ActionPlanner 4 | from semantic_kernel.planning import SequentialPlanner 5 | from semantic_kernel.planning import ActionPlanner 6 | from semantic_kernel.core_skills import MathSkill, TextSkill 7 | import asyncio 8 | 9 | def sk_planner(ask: str, model: str, aoai_deployment: str, api_version: str) -> object: 10 | llm_service = "AzureOpenAI" 11 | deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env(include_deployment=False) 12 | api_version = api_version 13 | deployment = aoai_deployment 14 | 15 | kernel = sk.Kernel() 16 | useAzureOpenAI = True 17 | 18 | #deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env() 19 | kernel.add_chat_service(model, AzureChatCompletion(deployment, endpoint, api_key, api_version)) 20 | 21 | skills_directory = "skplanner/sk_planner_flow/skills/" 22 | summarize_skill = kernel.import_semantic_skill_from_directory(skills_directory, "SummarizeSkill") 23 | writer_skill = kernel.import_semantic_skill_from_directory(skills_directory, "WriterSkill") 24 | #text_skill = kernel.import_skill(TextSkill(), "TextSkill") 25 | kernel.import_skill(MathSkill(), "math") 26 | kernel.import_skill(TextSkill(), "text") 27 | 28 | planner = SequentialPlanner(kernel) 29 | #planner = ActionPlanner(kernel) 30 | 31 | plan = asyncio.run(planner.create_plan_async(goal=ask)) 32 | 33 | result = asyncio.run(plan.invoke_async()).result 34 | 35 | #result = asyncio.run(kernel.run_async(plan)).result 36 | 37 | print(result) 38 | #result = asyncio.run(plan.invoke_async()) 39 | #result = plan.invoke_async() 40 | 41 | steps = [(step.description, ":", step._state.__dict__) for step in plan._steps] 42 | 43 | return_value = {"result": result, "steps": steps} 44 | 45 | return return_value 46 | 47 | if __name__=="__main__": 48 | 49 | ask = """ 50 | Tomorrow is Valentine's day. I need to come up with a few date ideas. She speaks French so write it in French. 51 | Convert the text to uppercase""" 52 | model = "gpt-4" 53 | aoai_deployment = "gpt-4" 54 | api_version = "2023-07-01-preview" 55 | 56 | sk_planner(ask=ask, model=model , aoai_deployment=aoai_deployment, api_version=api_version) 57 | 58 | # for index, step in enumerate(plan._steps): 59 | # print("Step:", index) 60 | # print("Description:",step.description) 61 | # print("Function:", step.skill_name + "." + step._function.name) 62 | # if len(step._outputs) > 0: 63 | # print( " Output:\n", str.replace(result[step._outputs[0]],"\n", "\n ")) 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /sample_gallery/golden_dataset/media/excel-template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/golden_dataset/media/excel-template.png -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/chroma_db/chroma.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/langchain_local/chroma_db/chroma.sqlite3 -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/header.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/length.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/length.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/link_lists.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/langchain_local/chroma_db/e17cd960-fa1a-4572-b4fb-60c666fd15f7/link_lists.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/langchain.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%pip install -r requirements.txt" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "from dotenv import load_dotenv \n", 19 | "import os \n", 20 | "\n", 21 | "from langchain_openai import AzureChatOpenAI\n", 22 | "from langchain_core.messages import HumanMessage\n", 23 | " \n", 24 | "# Load the .env file \n", 25 | "load_dotenv() \n", 26 | " \n", 27 | "# Get an environment variable \n", 28 | "endpoint = os.getenv('AZURE_OPENAI_ENDPOINT') \n", 29 | "key = os.getenv('AZURE_OPENAI_API_KEY')\n", 30 | "api_version = os.getenv('AZURE_OPENAI_API_VERSION') \n", 31 | "chat_deployment = os.getenv('AZURE_OPENAI_CHAT_DEPLOYMENT') \n", 32 | "\n", 33 | "\n", 34 | "chat = AzureChatOpenAI(\n", 35 | " azure_deployment=chat_deployment,\n", 36 | " openai_api_version=api_version,\n", 37 | ")\n", 38 | "\n", 39 | "# Test chat openai\n", 40 | "message = HumanMessage(\n", 41 | " content=\"Translate this sentence from English to French. I love programming.\"\n", 42 | ")\n", 43 | "result = chat.invoke([message]) \n", 44 | "print(result.content) \n", 45 | " " 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "import bs4\n", 55 | "from langchain import hub\n", 56 | "\n", 57 | "from langchain_community.document_loaders import WebBaseLoader\n", 58 | "from langchain_community.vectorstores import Chroma\n", 59 | "from langchain_core.output_parsers import StrOutputParser\n", 60 | "from langchain_core.runnables import RunnablePassthrough\n", 61 | "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", 62 | "\n", 63 | "from langchain_openai import AzureOpenAIEmbeddings\n", 64 | " \n", 65 | "\n", 66 | "# specify the directory where the index will be saved/loaded from \n", 67 | "directory = \"./chroma_db\"\n", 68 | "\n", 69 | "\n", 70 | "\n", 71 | "# Get an environment variable \n", 72 | "embedding_deployment = os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT')\n", 73 | "\n", 74 | "\n", 75 | "embedding=AzureOpenAIEmbeddings(\n", 76 | " azure_deployment=embedding_deployment,\n", 77 | " openai_api_version=api_version,)\n", 78 | "\n", 79 | "\n", 80 | "\n", 81 | " \n", 82 | "if os.path.exists(directory): \n", 83 | " print(f\"Index exists, loading\") \n", 84 | " # if the directory exists, load the index \n", 85 | " vectorstore = Chroma(persist_directory=directory, embedding_function=embedding)\n", 86 | "\n", 87 | "else: \n", 88 | " print(f\"Index not exist, building\") \n", 89 | " # if the directory does not exist, create the index and save it \n", 90 | "\n", 91 | " # Load, chunk and index the contents of the blog.\n", 92 | " loader = WebBaseLoader(\n", 93 | " web_paths=(\"https://lilianweng.github.io/posts/2023-06-23-agent/\",),\n", 94 | " bs_kwargs=dict(\n", 95 | " parse_only=bs4.SoupStrainer(\n", 96 | " class_=(\"post-content\", \"post-title\", \"post-header\")\n", 97 | " )\n", 98 | " ),\n", 99 | " )\n", 100 | " docs = loader.load()\n", 101 | "\n", 102 | "\n", 103 | " text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", 104 | " splits = text_splitter.split_documents(docs)\n", 105 | "\n", 106 | " vectorstore = Chroma.from_documents(documents=splits, embedding=embedding, persist_directory=directory) \n" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": null, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# Retrieve and generate using the relevant snippets of the blog.\n", 116 | "retriever = vectorstore.as_retriever()\n", 117 | "prompt = hub.pull(\"rlm/rag-prompt\")\n", 118 | "\n", 119 | "def format_docs(docs):\n", 120 | " return \"\\n\\n\".join(doc.page_content for doc in docs)\n", 121 | "\n", 122 | "\n", 123 | "rag_chain = (\n", 124 | " {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n", 125 | " | prompt\n", 126 | " | chat\n", 127 | " | StrOutputParser()\n", 128 | ")\n", 129 | "print(rag_chain.invoke(\"What is Task Decomposition?\"))" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [] 138 | } 139 | ], 140 | "metadata": { 141 | "kernelspec": { 142 | "display_name": "flexflow", 143 | "language": "python", 144 | "name": "python3" 145 | }, 146 | "language_info": { 147 | "codemirror_mode": { 148 | "name": "ipython", 149 | "version": 3 150 | }, 151 | "file_extension": ".py", 152 | "mimetype": "text/x-python", 153 | "name": "python", 154 | "nbconvert_exporter": "python", 155 | "pygments_lexer": "ipython3", 156 | "version": "3.9.19" 157 | } 158 | }, 159 | "nbformat": 4, 160 | "nbformat_minor": 2 161 | } 162 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/rag_langchain.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | from langchain import hub 3 | 4 | from langchain_community.document_loaders import WebBaseLoader 5 | from langchain_community.vectorstores import Chroma 6 | from langchain_core.output_parsers import StrOutputParser 7 | from langchain_core.runnables import RunnablePassthrough 8 | 9 | from langchain_text_splitters import RecursiveCharacterTextSplitter 10 | 11 | 12 | from langchain_openai import AzureChatOpenAI 13 | from langchain_openai import AzureOpenAIEmbeddings 14 | 15 | from langchain_core.messages import HumanMessage 16 | 17 | 18 | from dotenv import load_dotenv 19 | import os 20 | 21 | # Load the .env file 22 | load_dotenv() 23 | 24 | # Get an environment variable 25 | endpoint = os.getenv('AZURE_OPENAI_ENDPOINT') 26 | key = os.getenv('AZURE_OPENAI_API_KEY') 27 | api_version = os.getenv('AZURE_OPENAI_API_VERSION') 28 | chat_deployment = os.getenv('AZURE_OPENAI_CHAT_DEPLOYMENT') 29 | embedding_deployment = os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT') 30 | 31 | 32 | chat = AzureChatOpenAI( 33 | azure_deployment=chat_deployment, 34 | openai_api_version=api_version, 35 | ) 36 | 37 | # Test chat openai, you can uncommment to test if your enviroment variable is correctly set up. 38 | # message = HumanMessage( 39 | # content="Translate this sentence from English to French. I love programming." 40 | # ) 41 | # result = chat.invoke([message]) 42 | # print(result.content) 43 | 44 | 45 | 46 | 47 | 48 | 49 | def rag_chain(question: str = "What is Task Decomposition?", directory: str = "./chroma_db"): 50 | 51 | # directory where the index will be saved/loaded from 52 | # Load the .env file 53 | load_dotenv() 54 | 55 | # Get an environment variable 56 | endpoint = os.getenv('AZURE_OPENAI_ENDPOINT') 57 | key = os.getenv('AZURE_OPENAI_API_KEY') 58 | api_version = os.getenv('AZURE_OPENAI_API_VERSION') 59 | embedding_deployment = os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT') 60 | 61 | 62 | # Get an environment variable 63 | embedding_deployment = os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT') 64 | 65 | 66 | embedding=AzureOpenAIEmbeddings( 67 | azure_deployment=embedding_deployment, 68 | openai_api_version=api_version,) 69 | 70 | 71 | 72 | 73 | if os.path.exists(directory): 74 | print(f"Index exists, loading") 75 | # if the directory exists, load the index 76 | vectorstore = Chroma(persist_directory=directory, embedding_function=embedding) 77 | 78 | else: 79 | print(f"Index not exist, building") 80 | # if the directory does not exist, create the index and save it 81 | 82 | # Load, chunk and index the contents of the blog. 83 | loader = WebBaseLoader( 84 | web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",), 85 | bs_kwargs=dict( 86 | parse_only=bs4.SoupStrainer( 87 | class_=("post-content", "post-title", "post-header") 88 | ) 89 | ), 90 | ) 91 | docs = loader.load() 92 | 93 | 94 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) 95 | splits = text_splitter.split_documents(docs) 96 | 97 | vectorstore = Chroma.from_documents(documents=splits, embedding=embedding, persist_directory=directory) 98 | 99 | # Retrieve and generate using the relevant snippets of the blog. 100 | retriever = vectorstore.as_retriever() 101 | prompt = hub.pull("rlm/rag-prompt") 102 | 103 | def format_docs(docs): 104 | return "\n\n".join(doc.page_content for doc in docs) 105 | 106 | 107 | rag_chain = ( 108 | {"context": retriever | format_docs, "question": RunnablePassthrough()} 109 | | prompt 110 | | chat 111 | | StrOutputParser() 112 | ) 113 | return rag_chain.invoke(question) 114 | 115 | 116 | if __name__ == "__main__": 117 | 118 | 119 | 120 | import pandas as pd 121 | 122 | # Load the data 123 | data = pd.read_csv('testset_question.csv') 124 | 125 | # Create an empty DataFrame to store the question and answers 126 | results = pd.DataFrame(columns=['question', 'answer']) 127 | 128 | # Iterate over the questions in the data 129 | for index, row in data.iterrows(): 130 | question = row['question'] 131 | 132 | 133 | # Generate the answer using the rag_chain function 134 | answer = rag_chain(question) 135 | print(answer) 136 | 137 | # Append the question and answer to the results DataFrame 138 | results.loc[index] = [question, answer] 139 | 140 | # Save the results DataFrame to a new CSV file 141 | results.to_csv('testset_question_answer.csv', index=False) 142 | 143 | 144 | 145 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/langchain_local/testset_question_answer.csv: -------------------------------------------------------------------------------- 1 | question,answer 2 | What is the purpose of the Chain of Hindsight framework in improving model outputs?,"The purpose of the Chain of Hindsight (CoH) framework is to improve model outputs by presenting the model with a sequence of its past outputs which have been annotated with feedback. The model is then fine-tuned to predict the best output based on this feedback sequence, allowing it to self-reflect and produce better results. The idea is to train the model to recognize and follow a trend of sequentially improved outputs, leading to incremental improvements in performance." 3 | How does the API search engine assist in the API-Bank workflow?,"The API search engine assists in the API-Bank workflow by helping the Language Learning Model (LLM) find the appropriate API to call. Given a user's requirement, the LLM uses the search engine to locate potential APIs that may provide a solution. Once the right API is identified, the LLM uses the corresponding documentation to correctly make the API call." 4 | What is a common limitation of LLM-centered agents regarding context length?,"A common limitation of LLM-centered agents is their finite context length. This restricted context capacity limits the inclusion of historical information, detailed instructions, API call context, and responses. This limited communication bandwidth necessitates a system design that can work within these confines, even though longer or infinite context windows would benefit learning mechanisms like self-reflection." 5 | What are the risks of LLMs evaluating their own performance in complex domains?,"The risks of LLMs evaluating their own performance in complex domains include a potential lack of deep expertise that might cause them to be unaware of their own shortcomings, leading to an inaccurate judgment of task results. Furthermore, LLMs struggle with long-term planning and task decomposition, and may face difficulties adjusting plans when unexpected errors occur. Their finite context length also limits their ability to include historical information and detailed instructions, which can hinder self-improvement through self-reflection." 6 | What is the role of the heuristic function in the Reflexion framework?,"In the Reflexion framework, the heuristic function is responsible for determining when a trajectory is inefficient or contains hallucination, prompting it to be stopped. Inefficiency in planning refers to situations where trajectories take too long without success. Hallucination is defined as a sequence of consecutive identical actions that lead to the same observation in the environment." 7 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/.promptflow/groundedness/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": {}, 3 | "code": { 4 | "groundedness.prompty": { 5 | "type": "llm", 6 | "inputs": { 7 | "context": { 8 | "type": [ 9 | "string" 10 | ] 11 | }, 12 | "answer": { 13 | "type": [ 14 | "string" 15 | ] 16 | } 17 | } 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/.promptflow/similarity/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": {}, 3 | "code": { 4 | "similarity.prompty": { 5 | "type": "llm", 6 | "inputs": { 7 | "question": { 8 | "type": [ 9 | "string" 10 | ] 11 | }, 12 | "ground_truth": { 13 | "type": [ 14 | "string" 15 | ] 16 | }, 17 | "answer": { 18 | "type": [ 19 | "string" 20 | ] 21 | } 22 | } 23 | } 24 | } 25 | } -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/__pycache__/eval_paths.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/__pycache__/eval_paths.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/__pycache__/rag_flexflow.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/__pycache__/rag_flexflow.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/__pycache__/rag_paths.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/__pycache__/rag_paths.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/chat_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/chat_ui.png -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "ee85ed72-2b26-48f6-a0e8-cb5bcf98fbd9", 3 | "resource_group": "pipeline-pm", 4 | "workspace_name": "yijun-demo" 5 | } -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/.promptflow/groundedness/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": {}, 3 | "code": { 4 | "groundedness.prompty": { 5 | "type": "llm", 6 | "inputs": { 7 | "context": { 8 | "type": [ 9 | "string" 10 | ] 11 | }, 12 | "answer": { 13 | "type": [ 14 | "string" 15 | ] 16 | } 17 | } 18 | } 19 | } 20 | } -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/eval.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/eval.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/evaluate.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/evaluate.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/rag_paths.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/eval/__pycache__/rag_paths.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/flow.flex.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json 2 | entry: evaluate:QnAEvaluator 3 | environment: 4 | python_requirements_txt: requirements.txt 5 | environment_variables: 6 | AZURE_OPENAI_API_KEY: ${yijun-aoai.api_key} 7 | AZURE_OPENAI_ENDPOINT: ${yijun-aoai.api_base} 8 | AZURE_OPENAI_API_TYPE: azure 9 | AZURE_OPENAI_API_VERSION: 2024-02-15-preview 10 | AZURE_OPENAI_CHAT_DEPLOYMENT: gpt-4-32k 11 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT: text-embedding-ada-002 12 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/groundedness.prompty: -------------------------------------------------------------------------------- 1 | --- 2 | name: Evaluate QnA groundedness 3 | description: Evaluate the groundedness of a QnA application 4 | model: 5 | api: chat 6 | configuration: 7 | type: azure_openai 8 | azure_deployment: gpt-4-32k 9 | parameters: 10 | temperature: 0.2 11 | response_format: 12 | type: text 13 | inputs: 14 | context: 15 | type: string 16 | answer: 17 | type: string 18 | sample: sample.json 19 | --- 20 | 21 | # system: 22 | You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. 23 | # user: 24 | You will be presented with a CONTEXT and an ANSWER about that CONTEXT. You need to decide whether the ANSWER is entailed by the CONTEXT by choosing one of the following rating: 25 | 1. 5: The ANSWER follows logically from the information contained in the CONTEXT. 26 | 2. 1: The ANSWER is logically false from the information contained in the CONTEXT. 27 | 3. an integer score between 1 and 5 and if such integer score does not exists, use 1: It is not possible to determine whether the ANSWER is true or false without further information. 28 | 29 | Read the passage of information thoroughly and select the correct answer from the three answer labels. Read the CONTEXT thoroughly to ensure you know what the CONTEXT entails. 30 | 31 | Note the ANSWER is generated by a computer system, it can contain certain symbols, which should not be a negative factor in the evaluation. 32 | Independent Examples: 33 | ## Example Task #1 Input: 34 | {"CONTEXT": "The Academy Awards, also known as the Oscars are awards for artistic and technical merit for the film industry. They are presented annually by the Academy of Motion Picture Arts and Sciences, in recognition of excellence in cinematic achievements as assessed by the Academy's voting membership. The Academy Awards are regarded by many as the most prestigious, significant awards in the entertainment industry in the United States and worldwide.", "ANSWER": "Oscar is presented every other two years"} 35 | ## Example Task #1 Output: 36 | 1 37 | ## Example Task #2 Input: 38 | {"CONTEXT": "The Academy Awards, also known as the Oscars are awards for artistic and technical merit for the film industry. They are presented annually by the Academy of Motion Picture Arts and Sciences, in recognition of excellence in cinematic achievements as assessed by the Academy's voting membership. The Academy Awards are regarded by many as the most prestigious, significant awards in the entertainment industry in the United States and worldwide.", "ANSWER": "Oscar is very important awards in the entertainment industry in the United States. And it's also significant worldwide"} 39 | ## Example Task #2 Output: 40 | 5 41 | ## Example Task #3 Input: 42 | {"CONTEXT": "In Quebec, an allophone is a resident, usually an immigrant, whose mother tongue or home language is neither French nor English.", "ANSWER": "In Quebec, an allophone is a resident, usually an immigrant, whose mother tongue or home language is not French."} 43 | ## Example Task #3 Output: 44 | 5 45 | ## Example Task #4 Input: 46 | {"CONTEXT": "Some are reported as not having been wanted at all.", "ANSWER": "All are reported as being completely and fully wanted."} 47 | ## Example Task #4 Output: 48 | 1 49 | 50 | Reminder: The return values for each task should be correctly formatted as an integer between 1 and 5. Do not repeat the context. 51 | 52 | ## Actual Task Input: 53 | {"CONTEXT": {{context}}, "ANSWER": {{answer}}} 54 | 55 | Actual Task Output: -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/init.json: -------------------------------------------------------------------------------- 1 | { 2 | "model_config": { 3 | "connection": "yijun-aoai", 4 | "azure_deployment": "gpt-4-32k" 5 | } 6 | } -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/rag_paths.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pathlib 3 | 4 | # Add the path to the evaluation code quality module 5 | code_path = str(pathlib.Path(__file__).parent / "../rag") 6 | sys.path.insert(0, code_path) -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-openai==0.1.3 2 | chromadb==0.4.24 3 | langchainhub==0.1.15 -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval/similarity.prompty: -------------------------------------------------------------------------------- 1 | --- 2 | name: Evaluate QnA groundedness 3 | description: Evaluate the groundedness of a QnA application 4 | model: 5 | api: chat 6 | configuration: 7 | type: azure_openai 8 | azure_deployment: gpt-4-32k 9 | parameters: 10 | temperature: 0.2 11 | response_format: 12 | type: text 13 | inputs: 14 | question: 15 | type: string 16 | ground_truth: 17 | type: string 18 | answer: 19 | type: string 20 | sample: sample.json 21 | --- 22 | 23 | # system: 24 | You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric. 25 | 26 | # user: 27 | Equivalence, as a metric, measures the similarity between the predicted answer and the correct answer. If the information and content in the predicted answer is similar or equivalent to the correct answer, then the value of the Equivalence metric should be high, else it should be low. Given the question, correct answer, and predicted answer, determine the value of Equivalence metric using the following rating scale: 28 | One star: the predicted answer is not at all similar to the correct answer 29 | Two stars: the predicted answer is mostly not similar to the correct answer 30 | Three stars: the predicted answer is somewhat similar to the correct answer 31 | Four stars: the predicted answer is mostly similar to the correct answer 32 | Five stars: the predicted answer is completely similar to the correct answer 33 | 34 | This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. 35 | 36 | The examples below show the Equivalence score for a question, a correct answer, and a predicted answer. 37 | 38 | question: What is the role of ribosomes? 39 | correct answer: Ribosomes are cellular structures responsible for protein synthesis. They interpret the genetic information carried by messenger RNA (mRNA) and use it to assemble amino acids into proteins. 40 | predicted answer: Ribosomes participate in carbohydrate breakdown by removing nutrients from complex sugar molecules. 41 | stars: 1 42 | 43 | question: Why did the Titanic sink? 44 | correct answer: The Titanic sank after it struck an iceberg during its maiden voyage in 1912. The impact caused the ship's hull to breach, allowing water to flood into the vessel. The ship's design, lifeboat shortage, and lack of timely rescue efforts contributed to the tragic loss of life. 45 | predicted answer: The sinking of the Titanic was a result of a large iceberg collision. This caused the ship to take on water and eventually sink, leading to the death of many passengers due to a shortage of lifeboats and insufficient rescue attempts. 46 | stars: 2 47 | 48 | question: What causes seasons on Earth? 49 | correct answer: Seasons on Earth are caused by the tilt of the Earth's axis and its revolution around the Sun. As the Earth orbits the Sun, the tilt causes different parts of the planet to receive varying amounts of sunlight, resulting in changes in temperature and weather patterns. 50 | predicted answer: Seasons occur because of the Earth's rotation and its elliptical orbit around the Sun. The tilt of the Earth's axis causes regions to be subjected to different sunlight intensities, which leads to temperature fluctuations and alternating weather conditions. 51 | stars: 3 52 | 53 | question: How does photosynthesis work? 54 | correct answer: Photosynthesis is a process by which green plants and some other organisms convert light energy into chemical energy. This occurs as light is absorbed by chlorophyll molecules, and then carbon dioxide and water are converted into glucose and oxygen through a series of reactions. 55 | predicted answer: In photosynthesis, sunlight is transformed into nutrients by plants and certain microorganisms. Light is captured by chlorophyll molecules, followed by the conversion of carbon dioxide and water into sugar and oxygen through multiple reactions. 56 | stars: 4 57 | 58 | question: What are the health benefits of regular exercise? 59 | correct answer: Regular exercise can help maintain a healthy weight, increase muscle and bone strength, and reduce the risk of chronic diseases. It also promotes mental well-being by reducing stress and improving overall mood. 60 | predicted answer: Routine physical activity can contribute to maintaining ideal body weight, enhancing muscle and bone strength, and preventing chronic illnesses. In addition, it supports mental health by alleviating stress and augmenting general mood. 61 | stars: 5 62 | 63 | question: {{question}} 64 | correct answer: {{ground_truth}} 65 | predicted answer: {{answer}} 66 | stars: -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/eval_paths.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pathlib 3 | 4 | # Add the path to the evaluation code quality module 5 | code_path = str(pathlib.Path(__file__).parent / "eval") 6 | sys.path.insert(0, code_path) -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/.promptflow/flow.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/.promptflow/flow.log -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/README.md: -------------------------------------------------------------------------------- 1 | # Trace your application 2 | 3 | ## Prerequisite: setup environment 4 | 5 | **Install the VS Code in your local** 6 | 7 | Install the required packages from [requirements.txt](./requirements.txt) by running the following command: 8 | 9 | ```bash 10 | pip install -r /requirements.txt 11 | 12 | ``` 13 | 14 | Setup your LLM key in `.env` file. Create a `.env` file in [rag](./rag/) directory of the project and add the following content: 15 | 16 | ```bash 17 | AZURE_OPENAI_API_KEY= 18 | AZURE_OPENAI_ENDPOINT= 19 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT= 20 | AZURE_OPENAI_API_VERSION= 21 | AZURE_OPENAI_CHAT_DEPLOYMENT= 22 | ``` 23 | 24 | ## Test your application and view trace 25 | 26 | To enable trace, you need to use the `promptflow.tracing` library, and add the decorator `@trace` to the function you want to trace. More details can be found in the [entry file](./rag_flexflow.py). 27 | 28 | Then you can run this script directly by running the following command: 29 | 30 | ```bash 31 | python rag_flexflow.py 32 | ``` 33 | 34 | And after the script start running, you will see the following output in your terminal, which means the service has started successfully and you can view the traces in the browser by clicking the link provided in the output 35 | 36 | ```bash 37 | Prompt flow service has started... 38 | You can view the traces in local from http://localhost:... 39 | ``` 40 | 41 | ## Chat with your application 42 | 43 | Flex flow is a concept in prompt flow, the target user is engineers, the core idea is to build a flow with functions. 44 | 45 | To chat with your application, you should covert your application into a flex flow, which is to build a [flow.flex.yaml](./flow.flex.yaml) file with entry file and entry function specified. 46 | 47 | Then you can chat with your application by running the following command, which will create a yaml file for you automatically: 48 | 49 | ```bash 50 | pf flow test --flow rag_flexflow:generate --ui 51 | ``` 52 | 53 | Then you will be able to access the chat interface in your browser. 54 | 55 | 1. Set **Chat input** to the input parameter for your function, for example the question. 56 | 2. Set **Chat history** to chat history parameter, for example the chat_hisroty. 57 | 58 | ![chat ui](../chat_ui.png) 59 | 60 | Now you can chat with your application by sending the input to the chat interface. 61 | 62 | ## Trace your application in Azure AI Studio 63 | 64 | Set trace destination to Azure AI project by running the following command: 65 | 66 | ```bash 67 | az login 68 | ``` 69 | 70 | ```bash 71 | pf config set trace.destination="azureml://subscriptions//resourceGroups//providers/Microsoft.MachineLearningServices/workspaces/" 72 | ``` 73 | 74 | Then re-run your application, and you will be able to see another link in the output, which is the link to the trace in Azure AI Studio. 75 | 76 | ```bash 77 | You can view the traces in cloud from Azure portal: https://ai.azure.com/projectflows/... 78 | ``` 79 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/__pycache__/paths.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/__pycache__/paths.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/__pycache__/rag_flexflow.cpython-39.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/__pycache__/rag_flexflow.cpython-39.pyc -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/header.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/header.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/length.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/length.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/link_lists.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/a2d49070-88e6-4933-8b77-7a1f8f15aa2b/link_lists.bin -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/chroma.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/local_to_cloud/local_to_azure/rag/chroma_db/chroma.sqlite3 -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/flow.flex.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json 2 | entry: rag_flexflow:generate 3 | environment: 4 | python_requirements_txt: requirements.txt 5 | environment_variables: 6 | AZURE_OPENAI_API_KEY: ${yijun-aoai.api_key} 7 | AZURE_OPENAI_ENDPOINT: ${yijun-aoai.api_base} 8 | AZURE_OPENAI_API_TYPE: azure 9 | AZURE_OPENAI_API_VERSION: 2024-02-15-preview 10 | AZURE_OPENAI_CHAT_DEPLOYMENT: gpt-4-32k 11 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT: text-embedding-ada-002 12 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/prompt.json: -------------------------------------------------------------------------------- 1 | { 2 | "system": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum and keep the answer as concise as possible.Always say \"thanks for asking!\" at the end of the answer.{context}", 3 | "user":"Question: {question} \n Helpful Answer:" 4 | } 5 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/rag_flexflow.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | from langchain import hub 3 | from langchain_community.document_loaders import WebBaseLoader 4 | from langchain_community.vectorstores import Chroma 5 | from langchain_core.output_parsers import StrOutputParser 6 | from langchain_core.runnables import RunnablePassthrough 7 | from langchain_core.prompts import ChatPromptTemplate 8 | 9 | from langchain_text_splitters import RecursiveCharacterTextSplitter 10 | 11 | from langchain_openai import AzureChatOpenAI 12 | from langchain_openai import AzureOpenAIEmbeddings 13 | 14 | from langchain_core.messages import HumanMessage 15 | from promptflow.tracing import start_trace 16 | 17 | from dotenv import load_dotenv 18 | import os 19 | import json 20 | 21 | # Add tracing 22 | from promptflow.tracing import trace 23 | 24 | # To define output result class 25 | from typing import TypedDict 26 | class Result(TypedDict): 27 | output: str 28 | 29 | # Load the .env file 30 | load_dotenv() 31 | 32 | # Get an environment variable 33 | endpoint = os.getenv('AZURE_OPENAI_ENDPOINT') 34 | key = os.getenv('AZURE_OPENAI_API_KEY') 35 | api_version = os.getenv('AZURE_OPENAI_API_VERSION') 36 | embedding_deployment = os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT') 37 | chat_deployment = os.getenv('AZURE_OPENAI_CHAT_DEPLOYMENT') 38 | 39 | @trace 40 | def format_docs(docs): 41 | return "\n\n".join(doc.page_content for doc in docs) 42 | 43 | @trace 44 | def retrieve_docs(vector_store:Chroma, search_type="similarity", k = 6): 45 | return vector_store.as_retriever(search_type=search_type, search_kwargs={"k": k}) 46 | 47 | @trace 48 | def rag_chain_init(directory: str = "./chroma_db"): 49 | print(f"directory:", directory) 50 | 51 | embedding=AzureOpenAIEmbeddings( 52 | azure_deployment=embedding_deployment, 53 | openai_api_version=api_version,) 54 | 55 | if os.path.exists(directory): 56 | print(f"Index exists at {directory}, loading") 57 | # if the directory exists, load the index 58 | vector_store = Chroma(persist_directory=directory, embedding_function=embedding) 59 | 60 | else: 61 | print(f"Index does not exist at {directory}, building") 62 | # if the directory does not exist, create the index and save it 63 | 64 | # Load, chunk and index the contents of the blog. 65 | loader = WebBaseLoader( 66 | web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",), 67 | bs_kwargs=dict( 68 | parse_only=bs4.SoupStrainer( 69 | class_=("post-content", "post-title", "post-header") 70 | ) 71 | ), 72 | ) 73 | docs = loader.load() 74 | 75 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) 76 | splits = text_splitter.split_documents(docs) 77 | 78 | vector_store = Chroma.from_documents(documents=splits, embedding=embedding, persist_directory=directory) 79 | 80 | chat = AzureChatOpenAI(azure_deployment=chat_deployment,openai_api_version=api_version,) 81 | 82 | # Retrieve and generate using the relevant snippets of the blog. 83 | retriever = retrieve_docs(vector_store=vector_store,search_type="similarity", k = 6) 84 | 85 | with open("./prompt.json", "r") as f: 86 | prompt_text = json.load(f) 87 | 88 | system_prompt = prompt_text["system"] 89 | user_prompt = prompt_text["user"] 90 | 91 | prompt = ChatPromptTemplate.from_messages( 92 | [ 93 | ("system", system_prompt), 94 | ("human", user_prompt), 95 | ] 96 | ) 97 | 98 | #prompt = PromptTemplate.from_template(prompt_text) 99 | 100 | rag_chain = ( 101 | {"context": retriever | format_docs, "question": RunnablePassthrough()} 102 | | prompt 103 | | chat 104 | | StrOutputParser() 105 | ) 106 | 107 | return rag_chain 108 | 109 | 110 | @trace 111 | def generate(question: str = "What is Task Decomposition?", chat_history: list =[]) -> Result: 112 | 113 | print(f"question:", question) 114 | 115 | rag_chain = rag_chain_init() 116 | 117 | output = rag_chain.invoke(question) 118 | print(output) 119 | return Result(output=output) 120 | 121 | 122 | if __name__ == "__main__": 123 | 124 | 125 | start_trace() 126 | 127 | result = generate("What is Task Decomposition?") 128 | print(result) 129 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain-openai==0.1.3 2 | chromadb==0.4.24 3 | python-dotenv 4 | promptflow[azure] 5 | bs4 6 | langchain 7 | langchain-community 8 | langchainhub 9 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/rag_paths.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pathlib 3 | 4 | # Add the path to the evaluation code quality module 5 | code_path = str(pathlib.Path(__file__).parent / "rag") 6 | sys.path.insert(0, code_path) -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/sdk_azure.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | from langchain import hub 3 | from langchain_community.document_loaders import WebBaseLoader 4 | from langchain_community.vectorstores import Chroma 5 | from langchain_core.output_parsers import StrOutputParser 6 | from langchain_core.runnables import RunnablePassthrough 7 | 8 | from langchain_text_splitters import RecursiveCharacterTextSplitter 9 | 10 | 11 | from langchain_openai import AzureChatOpenAI 12 | from langchain_openai import AzureOpenAIEmbeddings 13 | 14 | from langchain_core.messages import HumanMessage 15 | 16 | 17 | from dotenv import load_dotenv 18 | import os 19 | 20 | # Add tracing 21 | from promptflow.tracing import trace 22 | 23 | 24 | 25 | import json 26 | 27 | 28 | if __name__ == "__main__": 29 | from promptflow.tracing import start_trace 30 | from promptflow.core import AzureOpenAIModelConfiguration 31 | 32 | start_trace() 33 | 34 | 35 | 36 | from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential 37 | 38 | try: 39 | credential = DefaultAzureCredential() 40 | # Check if given credential can get token successfully. 41 | credential.get_token("https://management.azure.com/.default") 42 | except Exception as ex: 43 | # Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work 44 | credential = InteractiveBrowserCredential() 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | from promptflow.azure import PFClient 53 | 54 | # Get a handle to workspace 55 | pf = PFClient.from_config(credential=credential) 56 | 57 | 58 | 59 | # Batch run and eval 60 | data = "testset_clean.csv" # path to the data file 61 | 62 | 63 | rag_flow = "rag/flow.flex.yaml" 64 | eval_flow = "eval/flow.flex.yaml" 65 | # create run with the flow function and data 66 | base_run = pf.run( 67 | flow=rag_flow, 68 | data=data, 69 | column_mapping={ 70 | "question": "${data.question}", 71 | "directory": "chroma_db", 72 | }, 73 | environment_variables={ 74 | "AZURE_OPENAI_API_KEY": "${yijun-aoai.api_key}", 75 | "AZURE_OPENAI_ENDPOINT": "${yijun-aoai.api_base}", 76 | }, 77 | stream=True, 78 | ) 79 | details = pf.get_details(base_run) 80 | details.head(10) 81 | 82 | 83 | 84 | # run the flow with existing run 85 | model_config = AzureOpenAIModelConfiguration( 86 | connection="yijun-aoai", 87 | azure_deployment="gpt-4-32k", 88 | ) 89 | eval_run = pf.run( 90 | flow=eval_flow, 91 | init={"model_config": model_config}, 92 | data=data, 93 | run=base_run, 94 | column_mapping={ # map the url field from the data to the url input of the flow 95 | "question:": "${data.question}", 96 | "context:": "${data.context}", 97 | "groundtruth": "${data.ground_truth}", 98 | "answer": "${run.outputs.output}", 99 | }, 100 | stream=True, 101 | ) 102 | 103 | 104 | 105 | # get the inputs/outputs details of a finished run. 106 | details = pf.get_details(eval_run) 107 | details.head(10) 108 | 109 | # view the metrics of the eval run 110 | metrics = pf.get_metrics(eval_run) 111 | print(json.dumps(metrics, indent=4)) 112 | 113 | # visualize both the base run and the eval run 114 | # pf.visualize([base_run, eval_run]) 115 | pf.visualize([base_run]) 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/local_to_azure/sdk_local.py: -------------------------------------------------------------------------------- 1 | import bs4 2 | from langchain import hub 3 | from langchain_community.document_loaders import WebBaseLoader 4 | from langchain_community.vectorstores import Chroma 5 | from langchain_core.output_parsers import StrOutputParser 6 | from langchain_core.runnables import RunnablePassthrough 7 | 8 | from langchain_text_splitters import RecursiveCharacterTextSplitter 9 | 10 | 11 | from langchain_openai import AzureChatOpenAI 12 | from langchain_openai import AzureOpenAIEmbeddings 13 | 14 | from langchain_core.messages import HumanMessage 15 | 16 | 17 | from dotenv import load_dotenv 18 | import os 19 | 20 | # Add tracing 21 | from promptflow.tracing import trace 22 | 23 | 24 | 25 | import json 26 | 27 | 28 | if __name__ == "__main__": 29 | from promptflow.tracing import start_trace 30 | from promptflow.core import AzureOpenAIModelConfiguration 31 | 32 | start_trace() 33 | 34 | # Batch run and eval 35 | 36 | 37 | from promptflow.client import PFClient 38 | 39 | pf = PFClient() 40 | data = "testset_clean.csv" # path to the data file 41 | 42 | 43 | rag_flow = "rag/flow.flex.yaml" 44 | eval_flow = "eval/flow.flex.yaml" 45 | # create run with the flow function and data 46 | base_run = pf.run( 47 | flow=rag_flow, 48 | data=data, 49 | column_mapping={ 50 | "question": "${data.question}", 51 | "directory": "chroma_db", 52 | }, 53 | stream=True, 54 | ) 55 | details = pf.get_details(base_run) 56 | details.head(10) 57 | 58 | 59 | 60 | # run the flow with existing run 61 | model_config = AzureOpenAIModelConfiguration( 62 | connection="yijun-aoai", 63 | azure_deployment="gpt-4-32k", 64 | ) 65 | eval_run = pf.run( 66 | flow=eval_flow, 67 | init={"model_config": model_config}, 68 | data=data, 69 | run=base_run, 70 | column_mapping={ # map the url field from the data to the url input of the flow 71 | "question:": "${data.question}", 72 | "context:": "${data.context}", 73 | "groundtruth": "${data.ground_truth}", 74 | "answer": "${run.outputs.output}", 75 | }, 76 | stream=True, 77 | ) 78 | 79 | 80 | 81 | # get the inputs/outputs details of a finished run. 82 | details = pf.get_details(eval_run) 83 | details.head(10) 84 | 85 | # view the metrics of the eval run 86 | metrics = pf.get_metrics(eval_run) 87 | print(json.dumps(metrics, indent=4)) 88 | 89 | # visualize both the base run and the eval run 90 | # pf.visualize([base_run, eval_run]) 91 | pf.visualize([base_run]) 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/readme.md: -------------------------------------------------------------------------------- 1 | # Bring your local application to cloud 2 | 3 | POC (proof-of-concept) is always easy, with the help of different kinds of frameworks, tools in the market, you can build your LLM application even in one day. But after POC, once you want to bring your application to production, there are some things your must consider: 4 | 5 | * Quality evaluation. 6 | * Collaboration. 7 | * Security. 8 | * Enterprise readiness. 9 | * LLMOps. 10 | 11 | That's where a cloud platform chimes in. Here in the tutorial, we aim to walk you through how to bring your proof-of-concept local LLM application to cloud. 12 | 13 | 14 | ## Prerequisite: POC in local 15 | 16 | Suppose your application is of RAG scenario, developed with Langchain in local. 17 | 18 | Run the [notebook](./example2/poc_single_web.ipynb) to build the poc RAG app. 19 | 20 | 21 | 22 | 23 | Note: If your local app is also developed by prompt flow, it is a DAG flow with dag.yaml file, you can skip step 1. 24 | 25 | 26 | ## Step 1: Convert to flex flow, 27 | 28 | Flex flow is a concept in prompt flow, the target user is engineers, the core idea is to build a flow with functions. We support Python and C# languages. For C# example, please refer to [](). 29 | 30 | 31 | 32 | Compared with DAG flow, the benefits are: 33 | 34 | * Easily bring you local code to cloud, just adding several lines of code, then run in azure. 35 | * Support advanced orchestration. DAG flow is short for Directed Acyclic Graph then no cycle support. If you have advanced orchestration needs like if-else, switch, for each and do while, then it is simple to achieve in python code. 36 | 37 | Define a data class for output. 38 | 39 | ```python 40 | # To define output result class 41 | from typing import TypedDict 42 | class Result(TypedDict): 43 | output: str 44 | ``` 45 | 46 | Wrap your RAG chain into a function. 47 | 48 | ```python 49 | def flow_entry(question: str = "What is Task Decomposition?") -> Result: 50 | # Your RAG chain script here 51 | 52 | return Result(output=output) 53 | ``` 54 | Then 55 | ```python 56 | if __name__ == "__main__": 57 | # Your "load data, chunk, build index script" here. 58 | 59 | # Then invoke the RAG chain. 60 | result = flow_entry("What is Task Decomposition?") 61 | ``` 62 | 63 | Optionally, you can add prompt tracing to debug. 64 | 65 | ```python 66 | # Import tracing 67 | from promptflow.tracing import trace 68 | 69 | # Add trace decorator 70 | @trace 71 | def flow_entry(question: str = "What is Task Decomposition?") -> Result: 72 | # Your RAG script here 73 | 74 | return Result(output=output) 75 | 76 | 77 | if __name__ == "__main__": 78 | # Start trace 79 | from promptflow.tracing import start_trace 80 | start_trace() 81 | 82 | # The rest of your code 83 | ``` 84 | 85 | 86 | Refer to [rag-flexflow.py]() for complete flex flow code. 87 | 88 | By adding these lines of code, you have converted a RAG chain of Langchain into a flex flow in prompt flow. Then you can evaluate the RAG application by using prompt flow's built-in metrics, for example: Groundedness, Relevance, etc. 89 | 90 | ## Evaluate 91 | 92 | First you need to prepare a golden dataset. 93 | 94 | We provide a [golden dataset](./example2/testset.csv) to help you evaluate the RAG app. You can refer to this [guideline](../../golden_dataset/copilot-golden-dataset-creation-guidance.md) to learn more on how to create your own golden dataset. 95 | 96 | 97 | Create a flow.flex.yaml file to define a flow which entry pointing to the python function we defined. 98 | 99 | 100 | ## submit to cloud. -------------------------------------------------------------------------------- /sample_gallery/local_to_cloud/requirements.txt: -------------------------------------------------------------------------------- 1 | langchain 2 | langchain-community 3 | langchainhub 4 | langchain-openai 5 | chromadb 6 | bs4 7 | 8 | python-dotenv 9 | promptflow 10 | ragas 11 | numpy 12 | unstructured 13 | markdown -------------------------------------------------------------------------------- /sample_gallery/nl2sql/README.MD: -------------------------------------------------------------------------------- 1 | # Natural Language to SQL Converter 2 | 3 | NL2SQL (Natural Language to SQL) is a process that allows users to input queries in natural, human language and have them translated into SQL queries. This is particularly useful for users who may not be familiar with SQL syntax but need to retrieve data from a database. 4 | 5 | In this sample, we show how to develop NL2SQL and evaluate its performance. 6 | 7 | ## Sample flows 8 | 9 | | path | description | 10 | | ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | 11 | | [development](./development/) | This flow demonstrates how to create a Natural Language to SQL Converter that takes natural languages as input and converts them into SQL queries. | 12 | | [evaluation](./evaluation/) | This evaluation flow demonstrates how to evaluate the performance of the Natural Language to SQL | 13 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore .promptflow folder 2 | .promptflow 3 | 4 | # Ignore __pycache__ folder 5 | __pycache__ -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json 2 | environment: 3 | python_requirements_txt: requirements.txt 4 | inputs: 5 | user_query: 6 | type: string 7 | default: Show the CompanyName for all customers with an address in City 'Dallas' 8 | schema_name: 9 | type: string 10 | default: SalesLT 11 | outputs: 12 | output_sql: 13 | type: string 14 | reference: ${generate_sql_query.output} 15 | output_validated_sql: 16 | type: string 17 | reference: ${validate_sql_query.output} 18 | nodes: 19 | - name: get_table_names 20 | type: python 21 | source: 22 | type: code 23 | path: get_table_names.py 24 | inputs: 25 | sqlconn: custom-sqlconn 26 | schema_name: ${inputs.schema_name} 27 | - name: identify_table_llm 28 | type: llm 29 | source: 30 | type: code 31 | path: identify_table_llm.jinja2 32 | inputs: 33 | deployment_name: gpt-4 34 | response_format: 35 | type: text 36 | user_query: ${inputs.user_query} 37 | table_names: ${get_table_names.output} 38 | connection: azure_openai_connection 39 | api: chat 40 | - name: get_table_schema 41 | type: python 42 | source: 43 | type: code 44 | path: get_table_schema.py 45 | inputs: 46 | sqlconn: custom-sqlconn 47 | tables: ${identify_table_llm.output} 48 | schema_name: ${inputs.schema_name} 49 | - name: generate_sql_query 50 | type: llm 51 | source: 52 | type: code 53 | path: generate_sql_query.jinja2 54 | inputs: 55 | deployment_name: gpt-4 56 | user_query: ${inputs.user_query} 57 | response_format: 58 | type: text 59 | table_schemainfo: ${get_table_schema.output} 60 | connection: azure_openai_connection 61 | api: chat 62 | - name: validate_sql_query 63 | type: llm 64 | source: 65 | type: code 66 | path: validate_sql_query.jinja2 67 | inputs: 68 | deployment_name: gpt-4 69 | response_format: 70 | type: text 71 | table_schemainfo: ${get_table_schema.output} 72 | user_query: ${inputs.user_query} 73 | sql_query: ${generate_sql_query.output} 74 | connection: azure_openai_connection 75 | api: chat 76 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/generate_sql_query.jinja2: -------------------------------------------------------------------------------- 1 | 2 | system: 3 | You are a SQL Query expert. Given an input question, first create a syntactically correct T-SQL query to run on SQL Server Database. 4 | You can order the results to return the most informative data in the database. Never query for all columns from a table. You must query only the columns that are needed to answer the question. 5 | Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. 6 | Only generate SELECT statements with out any newline characters and deny to generate any INSERT, UPDATE, CREATE or DELETE statements. 7 | 8 | Use the following format: 9 | 10 | Question: Question here 11 | SQLQuery: SQL Query to run 12 | 13 | Only use the following tables: 14 | {{table_schemainfo}} 15 | 16 | user: 17 | Question: {{user_query}} 18 | SQLQuery: -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/get_table_names.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # --------------------------------------------------------- 4 | 5 | from promptflow import tool 6 | from promptflow.connections import CustomConnection 7 | from langchain.sql_database import SQLDatabase 8 | 9 | # The inputs section will change based on the arguments of the tool function, after you save the code 10 | # Adding type to arguments and return value will help the system show the types properly 11 | # Please update the function name/signature per need 12 | 13 | @tool 14 | def get_table_names(schema_name: str,sqlconn: CustomConnection) -> str: 15 | 16 | uri = "mssql+pyodbc:///?odbc_connect=Driver={ODBC Driver 17 for SQL Server};Server="+sqlconn.configs['Server_name']+";Database="+sqlconn.configs['Database_name']+";Uid="+sqlconn.configs['User_name']+";Pwd="+sqlconn.secrets['Password']+";Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;}" 17 | #print(uri) 18 | db = SQLDatabase.from_uri(uri, schema=schema_name) 19 | 20 | return db.get_usable_table_names() -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/get_table_schema.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | from promptflow.connections import CustomConnection 3 | from langchain.sql_database import SQLDatabase 4 | import ast 5 | 6 | # The inputs section will change based on the arguments of the tool function, after you save the code 7 | # Adding type to arguments and return value will help the system show the types properly 8 | # Please update the function name/signature per need 9 | @tool 10 | def get_table_schema(tables: str, schema_name: str, sqlconn: CustomConnection) -> str: 11 | 12 | tables_list = ast.literal_eval(tables) 13 | print(tables_list) 14 | uri = "mssql+pyodbc:///?odbc_connect=Driver={ODBC Driver 17 for SQL Server};Server="+sqlconn.configs['Server_name']+";Database="+sqlconn.configs['Database_name']+";Uid="+sqlconn.configs['User_name']+";Pwd="+sqlconn.secrets['Password']+";Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;}" 15 | #print(uri) 16 | db = SQLDatabase.from_uri(uri,schema=schema_name, include_tables= tables_list ) 17 | 18 | return db.get_table_info_no_throw() 19 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/identify_table_llm.jinja2: -------------------------------------------------------------------------------- 1 | 2 | system: 3 | Return the names of ALL the SQL tables that MIGHT be relevant to the user question. 4 | 5 | The tables are: 6 | 7 | {{table_names}} 8 | 9 | Remember to include ALL POTENTIALLY RELEVANT tables, even if you're not sure that they're needed. 10 | 11 | user: 12 | Question: {{user_query}} 13 | Required Tables: 14 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/media/flow_run_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/nl2sql/development/media/flow_run_sample.png -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/requirements.txt: -------------------------------------------------------------------------------- 1 | promptflow 2 | promptflow-tools 3 | pyodbc == 5.0.1 4 | langchain -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/test-inputs.jsonl: -------------------------------------------------------------------------------- 1 | {"user_query": "Show the first name and the email address of customer with CompanyName 'Bike World'", "schema_name": "SalesLT"} 2 | {"user_query": "Give the CompanyName of those customers with orders over $100000. Include the subtotal plus tax plus freight.", "schema_name": "SalesLT"} 3 | {"user_query": "Show the SalesOrderID and the UnitPrice for every Single Item Order.", "schema_name": "SalesLT"} 4 | {"user_query": "Show the product description for culture 'fr' for product with ProductID 736.", "schema_name": "SalesLT"} 5 | {"user_query": "How many products in ProductCategory 'Cranksets' have been sold to an address in 'London'?", "schema_name": "SalesLT"} 6 | {"user_query": "For every customer with a 'Main Office' in Dallas show AddressLine1 of the 'Main Office' and AddressLine1 of the 'Shipping' address. If there is no shipping address leave it blank. Use one row per customer.", "schema_name": "SalesLT"} 7 | {"user_query": "Show the best selling item by value.", "schema_name": "SalesLT"} 8 | {"user_query": "Identify the three most important cities. Show the break down of top level product category against city.", "schema_name": "SalesLT"} 9 | {"user_query": "Find the best customer in each region.", "schema_name": "SalesLT"} 10 | {"user_query": "Show the total order value for each CountryRegion. List by value with the highest first.", "schema_name": "SalesLT"} -------------------------------------------------------------------------------- /sample_gallery/nl2sql/development/validate_sql_query.jinja2: -------------------------------------------------------------------------------- 1 | 2 | system: 3 | You are a T-SQL Query expert. Given an input question, Table Schema and the T-SQL query, your job is validate the T-SQL query for correctness and provide an explaination of the query. If required modify the query to be a compliant T-SQL query. 4 | Pay attention to column names used in the T-SQL query. Also, pay attention to which column is in which table. 5 | Ensure the query is only a SELECT statement. Deny any queries with CREATE, UPDATE or DELETE statements. 6 | Provide a detailed explaination of the logic behind generating the query in T-SQL comment format. Use /* */ for comments. Start of the explaination should always include this disclaimer: "Disclaimer: Ensure you validate the query and use a read-only user role to execute before executing." 7 | If the given query is correct and no corrections are needed, print the same query as ValidatedQuery. Always ensure you have output for ValidatedQuery. 8 | 9 | Use the following format: 10 | 11 | Question: Question here 12 | T-SQL Query: 13 | ValidatedQuery: 14 | Explaination: Detailed explaination here 15 | 16 | Only use the following tables: 17 | {{table_schemainfo}} 18 | 19 | user: 20 | Question: {{user_query}} 21 | T-SQL Query: {{sql_query}} 22 | ValidatedQuery: 23 | Explaination: 24 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore .promptflow folder 2 | .promptflow 3 | 4 | # Ignore __pycache__ folder 5 | __pycache__ -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/Extract_score.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | import re 3 | 4 | 5 | @tool 6 | def extract_score(llms_output: str) -> float: 7 | # first integar (0~5) in the string (llms_output) using re 8 | match = re.findall(r"\b[0-5]\b", llms_output) 9 | print("match", match) 10 | if not match: 11 | return 0.0 12 | else: 13 | return int(match[0]) / 5 14 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/Query.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Tuple, List, Any 3 | from promptflow import tool 4 | from promptflow.connections import CustomConnection 5 | import pyodbc 6 | 7 | 8 | @tool 9 | def query_to_database( 10 | sql_groundtruth: str, 11 | sql_generated: str, 12 | sqlconn: CustomConnection, 13 | ) -> Tuple[List[tuple], List[tuple], bool]: 14 | result_groundtruth = asyncio.run(get_values(sql_groundtruth, sqlconn)) 15 | result_generated = asyncio.run(get_values(sql_generated, sqlconn)) 16 | value_groundtruth = result_groundtruth[1] 17 | value_generated = result_generated[1] 18 | 19 | ## if sql_groundtruth includes "order by", set order_by to True, else set it to False 20 | order_by = "order by" in sql_groundtruth.lower() 21 | 22 | return value_groundtruth, value_generated, order_by 23 | 24 | 25 | async def get_values(query: str, sqlconn: CustomConnection) -> Tuple[str, Any]: 26 | print("connecting to database") 27 | with pyodbc.connect( 28 | "Driver={ODBC Driver 17 for SQL Server};Server=" 29 | + sqlconn.configs["Server_name"] 30 | + ";Database=" 31 | + sqlconn.configs["Database_name"] 32 | + ";Uid=" 33 | + sqlconn.configs["User_name"] 34 | + ";Pwd=" 35 | + sqlconn.secrets["Password"] 36 | + ";Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;}" 37 | ) as conn: 38 | with conn.cursor() as cursor: 39 | cursor.execute(query) 40 | rows = cursor.fetchall() 41 | results = list(tuple(sorted(row, key=lambda x: str(x))) for row in rows) 42 | if results is None: 43 | return "None", None 44 | else: 45 | return "Succeed", results 46 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/Similarity_score.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | import numpy as np 3 | from numpy.linalg import norm 4 | 5 | 6 | @tool 7 | def compute_cosine_cosine_similarity( 8 | groundtruth_embedding, generated_embedding 9 | ) -> float: 10 | return np.dot(groundtruth_embedding, generated_embedding) / ( 11 | norm(groundtruth_embedding) * norm(generated_embedding) 12 | ) 13 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/Vector_similarity.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | import numpy as np 3 | from numpy.linalg import norm 4 | 5 | 6 | @tool 7 | def compute_cosine_cosine_similarity( 8 | groundtruth_embedding, generated_embedding 9 | ) -> float: 10 | return np.dot(groundtruth_embedding, generated_embedding) / ( 11 | norm(groundtruth_embedding) * norm(generated_embedding) 12 | ) 13 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/aggregate.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | from typing import List, Tuple 3 | 4 | 5 | @tool 6 | def aggregation( 7 | exact_string_match: List[int], 8 | execution_accuracy: List[float], 9 | llms_score: List[float], 10 | vector_similarity: List[float], 11 | ) -> Tuple[float, float, float, float]: 12 | 13 | print("exact_string_match: ", exact_string_match) 14 | print("execution_accuracy: ", execution_accuracy) 15 | print("llms_score: ", llms_score) 16 | print("vector_similarity: ", vector_similarity) 17 | 18 | # total aggregated score of Exact String Match (EM) 19 | print( 20 | "Exact Match Accuracy (EM): ", sum(exact_string_match) / len(exact_string_match) 21 | ) 22 | 23 | # total aggragated score of Execution Accuracy (EX) 24 | print( 25 | "Execution Accuracy (EX): ", sum(execution_accuracy) / len(execution_accuracy) 26 | ) 27 | 28 | # total aggregated score of Score by LLMs (SL) 29 | print("Score by LLMs (SL): ", sum(llms_score) / len(llms_score)) 30 | 31 | # total aggregated score of Vector Similarity (ES) 32 | print( 33 | "Embedding Similarity (ES): ", sum(vector_similarity) / len(vector_similarity) 34 | ) 35 | 36 | return ( 37 | sum(exact_string_match) / len(exact_string_match), 38 | sum(execution_accuracy) / len(execution_accuracy), 39 | sum(llms_score) / len(llms_score), 40 | sum(vector_similarity) / len(vector_similarity), 41 | ) 42 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/data/batch-samples.jsonl: -------------------------------------------------------------------------------- 1 | {"sql_groundtruth": "SELECT Name FROM SalesLT.Product WHERE Color = 'Red' and Size = 62 and StandardCost < 300", "sql_generated": "SELECT Name FROM SalesLT.Product WHERE Color = 'Red' and Size = 62 and StandardCost < 300"} 2 | {"sql_groundtruth": "SELECT TOP(3) LastName, FirstName FROM SalesLT.Customer WHERE CustomerID < 4 ORDER BY CustomerID DESC", "sql_generated": "SELECT TOP(3) FirstName, LastName FROM SalesLT.Customer WHERE CustomerID < 4 ORDER BY CustomerID ASC"} 3 | {"sql_groundtruth": "SELECT * FROM SalesLT.Product WHERE StandardCost < 300", "sql_generated": "SELECT * FROM SalesLT.Product WHERE StandardCost <= 299"} 4 | {"sql_groundtruth": "SELECT FirstName, LastName FROM SalesLT.Customer", "sql_generated": "SELECT LastName, FirstName FROM SalesLT.Customer"} 5 | {"sql_groundtruth": "SELECT FirstName, LastName FROM SalesLT.Customer", "sql_generated": "select FirstName, LastName from SalesLT.Customer"} 6 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/evaluate_by_llms.jinja2: -------------------------------------------------------------------------------- 1 | System: 2 | You are an AI assistant. You will be given the ground truth SQL query and the SQL query generated by a text-to-SQL model. Your job is to compute an accurate evaluation score based on the similarity between the two queries. 3 | 4 | User: 5 | The similarity score measures the degree of equivalence between the ground truth SQL query and the SQL query generated by the model. Consider the structure, syntax, and semantics of the queries when evaluating similarity. Given the ground truth and generated SQL queries, score the similarity between them on a scale of one to five stars using the following rating scale: 6 | 7 | One star: the generated query is completely different from the ground truth 8 | Two stars: the generated query is mostly different from the ground truth 9 | Three stars: the generated query has some similarity with the ground truth 10 | Four stars: the generated query is mostly semantically similar to the ground truth 11 | Five stars: the generated query is semantically identical to the ground truth 12 | 13 | And if the results of the queries (the ground truth SQL and the generated SQL) are the same, then the rating should be five stars. 14 | 15 | This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5. 16 | 17 | ground_truth: SELECT name FROM students WHERE grade = 'A'; 18 | generated: SELECT name FROM students WHERE grade = 'B'; 19 | stars: 1 20 | 21 | ground_truth: SELECT COUNT(*) FROM orders WHERE status = 'delivered'; 22 | generated: SELECT COUNT(order_id) FROM orders WHERE status = 'delivered'; 23 | stars: 4 24 | 25 | ground_truth: SELECT * FROM employees WHERE salary > 50000; 26 | generated: SELECT * FROM employees WHERE salary > 60000; 27 | stars: 2 28 | 29 | ground_truth: SELECT name, age FROM customers WHERE city = 'New York'; 30 | generated: SELECT name, age FROM customers WHERE city = 'New York'; 31 | stars: 5 32 | 33 | ground_truth: {{sql_groundtruth}} 34 | generated: {{sql_generated}} 35 | stars: -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/exact_string_match.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | from typing import Tuple 3 | 4 | 5 | @tool 6 | def exact_string_match( 7 | sql_groundtruth: str, 8 | sql_generated: str, 9 | ) -> int: 10 | 11 | sql_groundtruth = sql_groundtruth.lower() 12 | sql_generated = sql_generated.lower() 13 | 14 | em = evaluate(sql_groundtruth, sql_generated) 15 | 16 | return em 17 | 18 | 19 | # Score of Exact String Accuracy (EM) 20 | def evaluate(sql_groundtruth, sql_generated): 21 | 22 | # compare sql_groundtruth and sql_generated. 23 | if sql_groundtruth == sql_generated: 24 | count = 1 25 | else: 26 | count = 0 27 | 28 | return count 29 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/execution_accuracy.py: -------------------------------------------------------------------------------- 1 | from promptflow import tool 2 | from typing import Tuple 3 | 4 | 5 | @tool 6 | def execution_accuracy( 7 | queried_data: Tuple[list, list, bool], 8 | ) -> float: 9 | 10 | value_groundtruth = queried_data[0] 11 | value_generated = queried_data[1] 12 | order_by = queried_data[2] 13 | 14 | ex = evaluate(value_groundtruth, value_generated, order_by) 15 | 16 | return ex 17 | 18 | 19 | # Score of Execution Accuracy (EX) 20 | def evaluate(value_groundtruth, value_generated, order_by: bool) -> float: 21 | 22 | # compare value_groundtruth and value_generated. If order_by is True, compare value_groundtruth and value_generated (don't use set) 23 | if order_by: 24 | for i in range(len(value_groundtruth)): 25 | if value_groundtruth[i] != value_generated[i]: 26 | count = 0 27 | break 28 | else: 29 | count = 1 30 | score = count / len(value_groundtruth) 31 | # compare value_groundtruth and value_generated regardless of the order of the elements. 32 | elif set(value_groundtruth) == set(value_generated): 33 | score = 1 34 | # count the number of elements of intersection between value_groundtruth and value_generated, and divide it by the number of elements of value_groundtruth. 35 | elif set(value_groundtruth) != set(value_generated): 36 | score = len(set(value_groundtruth).intersection(set(value_generated))) / len( 37 | set(value_groundtruth) 38 | ) 39 | else: 40 | score = 0 41 | return score 42 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json 2 | environment: 3 | python_requirements_txt: requirements.txt 4 | inputs: 5 | sql_groundtruth: 6 | type: string 7 | default: SELECT * FROM SalesLT.Product WHERE StandardCost < 300 8 | sql_generated: 9 | type: string 10 | default: SELECT * FROM SalesLT.Product WHERE StandardCost <= 299 11 | outputs: 12 | Exact_string_match: 13 | type: string 14 | reference: ${Exact_string_match.output} 15 | Execution_accuracy: 16 | type: string 17 | reference: ${Execution_accuracy.output} 18 | llms_score: 19 | type: string 20 | reference: ${Extract_score.output} 21 | Vector_similarity_score: 22 | type: string 23 | reference: ${Vector_similarity.output} 24 | nodes: 25 | - name: Execution_accuracy 26 | type: python 27 | source: 28 | type: code 29 | path: execution_accuracy.py 30 | inputs: 31 | queried_data: ${Query_to_database.output} 32 | - name: Exact_string_match 33 | type: python 34 | source: 35 | type: code 36 | path: exact_string_match.py 37 | inputs: 38 | sql_groundtruth: ${inputs.sql_groundtruth} 39 | sql_generated: ${inputs.sql_generated} 40 | - name: Aggregate 41 | type: python 42 | source: 43 | type: code 44 | path: aggregate.py 45 | inputs: 46 | execution_accuracy: ${Execution_accuracy.output} 47 | exact_string_match: ${Exact_string_match.output} 48 | vector_similarity: ${Vector_similarity.output} 49 | llms_score: ${Extract_score.output} 50 | aggregation: true 51 | - name: Query_to_database 52 | type: python 53 | source: 54 | type: code 55 | path: Query.py 56 | inputs: 57 | sql_groundtruth: ${inputs.sql_groundtruth} 58 | sql_generated: ${inputs.sql_generated} 59 | sqlconn: custom-sqlconn 60 | - name: Evaluate_by_LLM 61 | type: llm 62 | source: 63 | type: code 64 | path: evaluate_by_llms.jinja2 65 | inputs: 66 | deployment_name: gpt-4 67 | sql_groundtruth: ${inputs.sql_groundtruth} 68 | sql_generated: ${inputs.sql_generated} 69 | connection: azure_openai_connection 70 | api: chat 71 | - name: embedding_groundtruth 72 | type: python 73 | source: 74 | type: package 75 | tool: promptflow.tools.embedding.embedding 76 | inputs: 77 | connection: azure_openai_connection 78 | deployment_name: text-embedding-ada-002 79 | input: ${inputs.sql_groundtruth} 80 | - name: embedding_generated 81 | type: python 82 | source: 83 | type: package 84 | tool: promptflow.tools.embedding.embedding 85 | inputs: 86 | connection: azure_openai_connection 87 | deployment_name: text-embedding-ada-002 88 | input: ${inputs.sql_generated} 89 | - name: Vector_similarity 90 | type: python 91 | source: 92 | type: code 93 | path: Vector_similarity.py 94 | inputs: 95 | groundtruth_embedding: ${embedding_groundtruth.output} 96 | generated_embedding: ${embedding_generated.output} 97 | - name: Extract_score 98 | type: python 99 | source: 100 | type: code 101 | path: Extract_score.py 102 | inputs: 103 | llms_output: ${Evaluate_by_LLM.output} 104 | -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/media/evaluation_flow_run_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/nl2sql/evaluation/media/evaluation_flow_run_sample.png -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/requirements.txt: -------------------------------------------------------------------------------- 1 | promptflow 2 | promptflow-tools 3 | pyodbc == 5.0.1 -------------------------------------------------------------------------------- /sample_gallery/nl2sql/evaluation/sample-groundtruth-sql.jsonl: -------------------------------------------------------------------------------- 1 | {"user_query":"Show the first name and the email address of customer with CompanyName 'Bike World'", "actual_sql":"SELECT firstname, emailaddress FROM SalesLT.Customer WHERE companyname = 'Bike World'"} 2 | {"user_query":"Give the CompanyName of those customers with orders over $100000. Include the subtotal plus tax plus freight", "actual_sql":"SELECT companyname FROM SalesLT.Customer c JOIN SalesLT.SalesOrderHeader sh ON c.customerid = sh.customerid WHERE subtotal + taxamt + freight > 100000"} 3 | {"user_query":"Show the SalesOrderID and the UnitPrice for every Single Item Order", "actual_sql":"WITH temp1 AS (SELECT salesorderid, SUM(OrderQty) AS items FROM SalesLT.SalesOrderDetail GROUP BY salesorderid HAVING SUM(OrderQty) = 1) SELECT salesorderid, unitprice FROM SalesLT.SalesOrderDetail WHERE salesorderid IN (SELECT salesorderid FROM temp1)"} 4 | {"user_query":"Show the product description for culture 'fr' for product with ProductID 736", "actual_sql":"SELECT description FROM SalesLT.Product p JOIN SalesLT.ProductModel pm ON p.productmodelid = pm.productmodelid JOIN SalesLT.ProductModelProductDescription pmpd ON pm.productmodelid = pmpd.productmodelid JOIN SalesLT.ProductDescription pd ON pmpd.productdescriptionid = pd.productdescriptionid WHERE (productid = 736) AND (culture = 'fr')"} 5 | {"user_query":"How many products in ProductCategory 'Cranksets' have been sold to an address in 'London'?", "actual_sql":"SELECT SUM(orderqty) total FROM SalesLT.Address a JOIN SalesLT.SalesOrderHeader sh ON a.addressid = sh.billtoaddressid JOIN SalesLT.SalesOrderDetail sd ON sh.salesorderid = sd.salesorderid JOIN SalesLT.Product p ON sd.productid = p.productid JOIN SalesLT.ProductCategory pc ON p.productcategoryid = pc.productcategoryid WHERE (city = 'London') AND (pc.name = 'Cranksets');"} 6 | {"user_query":"For every customer with a 'Main Office' in Dallas show AddressLine1 of the 'Main Office' and AddressLine1 of the 'Shipping' address. If there is no shipping address leave it blank. Use one row per customer", "actual_sql":"SELECT companyname, MAX(CASE WHEN addresstype = 'Main Office' THEN addressline1 ELSE '' END) main_office, MAX(CASE WHEN addresstype = 'Shipping' THEN addressline1 ELSE '' END) shipping FROM SalesLT.CustomerAddress ca JOIN SalesLT.Address a ON ca.addressid = a.addressid JOIN SalesLT.Customer c ON ca.customerid = c.customerid WHERE city = 'Dallas' GROUP BY companyname"} 7 | {"user_query":"Show the best selling item by value", "actual_sql":"SELECT TOP 1 name, SUM(orderqty * unitprice) total_value FROM SalesLT.SalesOrderDetail sd JOIN SalesLT.Product p ON sd.productid = p.productid GROUP BY name ORDER BY total_value DESC"} 8 | {"user_query":"Identify the three most important cities. Show the break down of top level product category against city", "actual_sql":"WITH temp1 AS (SELECT TOP 3 city, SUM(unitprice * orderqty) AS total_sales FROM SalesLT.SalesOrderDetail sd JOIN SalesLT.SalesOrderHeader sh ON sd.salesorderid = sh.salesorderid JOIN SalesLT.Address a ON sh.shiptoaddressid = a.addressid GROUP BY city ORDER BY total_sales DESC) SELECT city, pc.name, SUM(unitprice * orderqty) total_sales FROM SalesLT.SalesOrderDetail sd JOIN SalesLT.SalesOrderHeader sh ON sd.salesorderid = sh.salesorderid JOIN SalesLT.Address a ON sh.shiptoaddressid = a.addressid JOIN SalesLT.Product p ON sd.productid = p.productid JOIN SalesLT.ProductCategory pc ON p.productcategoryid = pc.productcategoryid WHERE city IN (SELECT city FROM temp1) GROUP BY city, pc.name ORDER BY city, total_sales DESC"} 9 | {"user_query":"Find the best customer in each region", "actual_sql":"WITH temp1 (countryregion, companyname, total_sales, rnk) AS (SELECT countryregion, companyname, SUM(subtotal) AS total_sales, RANK() OVER (PARTITION BY countryregion ORDER BY SUM(subtotal) DESC) rnk FROM SalesLT.Address a JOIN SalesLT.SalesOrderHeader sh ON a.addressid = sh.shiptoaddressid JOIN SalesLT.Customer c ON sh.customerid = c.customerid GROUP BY countryregion, companyname) SELECT countryregion, companyname, total_sales FROM temp1 WHERE rnk = 1"} 10 | {"user_query":"Show the total order value for each CountryRegion. List by value with the highest first", "actual_sql":"SELECT countryregion, SUM(subtotal) total FROM SalesLT.Address a JOIN SalesLT.SalesOrderHeader sh ON a.addressid = sh.shiptoaddressid GROUP BY countryregion"} -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/query_existing_vector_index/.DS_Store -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/Tutorial.md: -------------------------------------------------------------------------------- 1 | # QUERYING EXISTING VECTOR INDEXES WITH PROMPTFLOW 2 | 3 | To query existing vector indexes in a PromptFlow standard or chat flow, a [Vector DB Lookup tool](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/tools-reference/vector-db-lookup-tool) is used. Vector DB Lookup tool is a wrapper for Azure AI Search as well as multiple third-party vector databases such as Qdrant and Weaviate. 4 | 5 | In this sample, we will go over how to query an existing vector index on Azure AI Search. 6 | 7 | The document we use is the "World Economic Outlook" document from 10th October 2023. (You can download the document [here](https://www.imf.org/en/Publications/WEO)). We assume the document is already chunked and its embeddings stored in an AI Search vector index named "worldeconomyoctober". 8 | 9 | Vector DB Lookup tool requires a set of required and optional "inputs" depending on the vector db used documented [here](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/tools-reference/vector-db-lookup-tool?view=azureml-api-2). For AI Search the required parameters are the AI Search connection to be used, the index name on AI Search, vector_field name, the vector to be queried (question embedding in our case from the earlier flow step). 10 | 11 | For further info please refer to the blog post [here](https://medium.com/@343544/azure-promptflow-querying-existing-vectordb-indexes-55af636e02fb). 12 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/media/vectordb_lookup_tool_chatflow_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/query_existing_vector_index/media/vectordb_lookup_tool_chatflow_graph.png -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/media/vectordb_lookup_tool_query_module.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/query_existing_vector_index/media/vectordb_lookup_tool_query_module.png -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/source_file/ACS_conn.yaml: -------------------------------------------------------------------------------- 1 | # Notices: 2 | # - Don't replace the '' placeholder in this file, the application will prompt you to enter a value when it runs. 3 | # - Manually save the file will create the connection after basic validation. 4 | # - The connection information will be stored in a local database with api_key encrypted for safety. 5 | # - Prompt flow will ONLY use the connection information (incl. keys) when instructed by you, e.g. manage connections, use connections to run flow etc. 6 | # - All the values should be string type, please use "123" instead of 123 or "True" instead of True. 7 | 8 | $schema: https://azuremlschemas.azureedge.net/promptflow/latest/CognitiveSearchConnection.schema.json 9 | name: "ACS_conn" 10 | type: cognitive_search # snake case 11 | api_key: "" # Don't replace the '' placeholder. The application will prompt you to enter a value when it runs. 12 | api_base: "endpoint" 13 | api_version: "2023-07-01-Preview" 14 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/source_file/aoai_ozguler_02.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/query_existing_vector_index/source_file/aoai_ozguler_02.yaml -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/promptflow-resource-hub/4d445d6969534fa1e39a589eae54b1329955fc54/sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.DS_Store -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/flow.layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "orientation": "Vertical", 3 | "nodeLayouts": { 4 | "inputs": { 5 | "x": 242, 6 | "y": 62, 7 | "index": -1 8 | }, 9 | "outputs": { 10 | "x": 242, 11 | "y": 832, 12 | "index": -1 13 | }, 14 | "modify_query_with_history": { 15 | "x": 187, 16 | "y": 172, 17 | "index": 0 18 | }, 19 | "embed_the_question": { 20 | "x": 187, 21 | "y": 282, 22 | "index": 1 23 | }, 24 | "generate_prompt_context": { 25 | "x": 133.83984375, 26 | "y": 487.6484375, 27 | "index": 2 28 | }, 29 | "Prompt_variants": { 30 | "x": 242, 31 | "y": 612, 32 | "index": 3 33 | }, 34 | "answer_the_question_with_context": { 35 | "x": 242, 36 | "y": 722, 37 | "index": 4 38 | }, 39 | "vectordblookup": { 40 | "x": 137.7734375, 41 | "y": 386.203125, 42 | "index": 5 43 | } 44 | }, 45 | "viewport": { 46 | "transformMatrix": [ 47 | 1, 48 | 0, 49 | 0, 50 | 1, 51 | -108.83984375, 52 | -637.5972290039062 53 | ] 54 | } 55 | } -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/flow.tools.json: -------------------------------------------------------------------------------- 1 | { 2 | "package": { 3 | "promptflow.tools.embedding.embedding": { 4 | "name": "Embedding", 5 | "description": "Use Open AI's embedding model to create an embedding vector representing the input text.", 6 | "type": "python", 7 | "module": "promptflow.tools.embedding", 8 | "function": "embedding", 9 | "inputs": { 10 | "connection": { 11 | "type": [ 12 | "AzureOpenAIConnection", 13 | "OpenAIConnection" 14 | ] 15 | }, 16 | "deployment_name": { 17 | "type": [ 18 | "string" 19 | ], 20 | "enabled_by": "connection", 21 | "enabled_by_type": [ 22 | "AzureOpenAIConnection" 23 | ], 24 | "capabilities": { 25 | "completion": false, 26 | "chat_completion": false, 27 | "embeddings": true 28 | }, 29 | "model_list": [ 30 | "text-embedding-ada-002", 31 | "text-search-ada-doc-001", 32 | "text-search-ada-query-001" 33 | ] 34 | }, 35 | "model": { 36 | "type": [ 37 | "string" 38 | ], 39 | "enabled_by": "connection", 40 | "enabled_by_type": [ 41 | "OpenAIConnection" 42 | ], 43 | "enum": [ 44 | "text-embedding-ada-002", 45 | "text-search-ada-doc-001", 46 | "text-search-ada-query-001" 47 | ], 48 | "allow_manual_entry": true 49 | }, 50 | "input": { 51 | "type": [ 52 | "string" 53 | ] 54 | } 55 | }, 56 | "package": "promptflow-tools", 57 | "package_version": "1.0.1" 58 | } 59 | }, 60 | "code": { 61 | "Prompt_variants__Variant_0.jinja2": { 62 | "type": "prompt", 63 | "inputs": { 64 | "question": { 65 | "type": [ 66 | "string" 67 | ] 68 | }, 69 | "contexts": { 70 | "type": [ 71 | "string" 72 | ] 73 | }, 74 | "chat_history": { 75 | "type": [ 76 | "string" 77 | ] 78 | } 79 | }, 80 | "source": "Prompt_variants__Variant_0.jinja2" 81 | }, 82 | "answer_the_question_with_context.jinja2": { 83 | "type": "llm", 84 | "inputs": { 85 | "prompt_text": { 86 | "type": [ 87 | "string" 88 | ] 89 | } 90 | }, 91 | "source": "answer_the_question_with_context.jinja2" 92 | }, 93 | "generate_prompt_context.py": { 94 | "name": "generate_prompt_context.py", 95 | "type": "python", 96 | "inputs": { 97 | "search_result": { 98 | "type": [ 99 | "object" 100 | ], 101 | "allow_manual_entry": false, 102 | "is_multi_select": false, 103 | "value": "${search_question_from_indexed_docs.output}" 104 | } 105 | }, 106 | "description": "Generate the context for the prompt.", 107 | "source": "generate_prompt_context.py", 108 | "function": "generate_prompt_context", 109 | "is_builtin": false, 110 | "toolId": "generate_prompt_context.py" 111 | }, 112 | "modify_query_with_history.jinja2": { 113 | "type": "llm", 114 | "inputs": { 115 | "question": { 116 | "type": [ 117 | "string" 118 | ] 119 | }, 120 | "chat_history": { 121 | "type": [ 122 | "string" 123 | ] 124 | } 125 | }, 126 | "source": "modify_query_with_history.jinja2" 127 | } 128 | } 129 | } -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/flow.uihint.json: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "modify_query_with_history": { 4 | "variant_0": { 5 | "inputs": {}, 6 | "inputsValue": {} 7 | } 8 | }, 9 | "embed_the_question": { 10 | "variant_0": { 11 | "inputs": {}, 12 | "inputsValue": {} 13 | } 14 | }, 15 | "vectordblookup": { 16 | "variant_0": { 17 | "inputs": {}, 18 | "inputsValue": {} 19 | } 20 | }, 21 | "generate_prompt_context": { 22 | "variant_0": { 23 | "inputs": {}, 24 | "inputsValue": {} 25 | } 26 | }, 27 | "Prompt_variants": { 28 | "Variant_0": { 29 | "inputs": {}, 30 | "inputsValue": {} 31 | } 32 | }, 33 | "answer_the_question_with_context": { 34 | "variant_0": { 35 | "inputs": {}, 36 | "inputsValue": {} 37 | } 38 | } 39 | } 40 | } -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/Prompt_variants__Variant_0.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are an AI assistant that helps users answer questions given a specific context and conversation history. You will be given a context and chat history, and then asked a question based on that context and history. Your answer should be as precise as possible, and should only come from the context. 3 | Please add citation after each sentence when possible in a form "(Source: citation)". 4 | 5 | user: 6 | {{contexts}} 7 | 8 | chat history: 9 | {% for item in chat_history %} user: 10 | {{ item.inputs.question }} 11 | assistant: 12 | {{ item.outputs.output }} 13 | {% endfor %} 14 | user: {{question}} 15 | assistant: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/Prompt_variants__Variant_1.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are an AI assistant that helps users answer questions given a specific context and chat history. You will be given a context and history and asked a question based on that context and chat history. Your answer should be as precise as possible and should only come from the context. 3 | Please add citation after each sentence when possible in a form "(Source: citation)". 4 | 5 | user: 6 | {{contexts}} 7 | 8 | chat history: 9 | {% for item in chat_history %} user: 10 | {{ item.inputs.question }} 11 | assistant: 12 | {{ item.outputs.output }} 13 | {% endfor %} 14 | user: {{question}} 15 | assistant: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/Prompt_variants__Variant_2.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are an chat assistant for helping users answering question given a specific context and history. You are given a context and conversation history and you'll be asked a question based on the context and history. Your answer should be as precise as possible and answer should be only from the context. 3 | Please add citation after each sentence when possible in a form "(Source: citation)". 4 | 5 | user: 6 | {{contexts}} 7 | 8 | chat history: 9 | {% for item in chat_history %} user: 10 | {{ item.inputs.question }} 11 | assistant: 12 | {{ item.outputs.output }} 13 | {% endfor %} 14 | user: {{question}} 15 | assistant: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/answer_the_question_with_context.jinja2: -------------------------------------------------------------------------------- 1 | {{prompt_text}} -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/generate_prompt_context.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | """File for context getting tool.""" 4 | from typing import List 5 | from promptflow import tool 6 | from promptflow_vectordb.core.contracts import SearchResultEntity 7 | 8 | 9 | @tool 10 | def generate_prompt_context(search_result: List[dict]) -> str: 11 | """Generate the context for the prompt.""" 12 | def format_doc(doc: dict): 13 | """Format Doc.""" 14 | return f"Content: {doc['Content']}\nSource: {doc['Source']}" 15 | 16 | SOURCE_KEY = "source" 17 | URL_KEY = "url" 18 | 19 | retrieved_docs = [] 20 | for item in search_result: 21 | 22 | entity = SearchResultEntity.from_dict(item) 23 | content = entity.text or "" 24 | 25 | source = "" 26 | if entity.metadata is not None: 27 | if SOURCE_KEY in entity.metadata: 28 | if URL_KEY in entity.metadata[SOURCE_KEY]: 29 | source = entity.metadata[SOURCE_KEY][URL_KEY] or "" 30 | 31 | retrieved_docs.append({ 32 | "Content": content, 33 | "Source": source 34 | }) 35 | doc_string = "\n\n".join([format_doc(doc) for doc in retrieved_docs]) 36 | return doc_string 37 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/modify_query_with_history.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | Given the following conversation history and the users next question,rephrase the question to be a stand alone question. 3 | If the conversation is irrelevant or empty, just restate the original question. 4 | Do not add more details than necessary to the question. 5 | conversation: 6 | 7 | chat history: 8 | {% for item in chat_history %} user: 9 | {{ item.inputs.question }} 10 | assistant: 11 | {{ item.outputs.output }} 12 | {% endfor %} 13 | 14 | Follow up Input: {{question}} 15 | Standalone Question: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/.promptflow/lkg_sources/requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/Prompt_variants__Variant_0.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | You are an AI assistant that helps users answer questions given a specific context and conversation history. You will be given a context and chat history, and then asked a question based on that context and history. Your answer should be as precise as possible, and should only come from the context. 3 | Please add citation after each sentence when possible in a form "(Source: citation)". 4 | 5 | user: 6 | {{contexts}} 7 | 8 | chat history: 9 | {% for item in chat_history %} user: 10 | {{ item.inputs.question }} 11 | assistant: 12 | {{ item.outputs.output }} 13 | {% endfor %} 14 | user: {{question}} 15 | assistant: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/__pycache__/.generate_prompt_context.tmp.cpython-39.pyc: -------------------------------------------------------------------------------- 1 | a 2 | 5�;eO�@sDdZddlmZddlmZddlmZeeeed�dd��Z dS) zFile for context getting tool.�)�List)�tool)�SearchResultEntity)� search_result�returnc s�td�dd��d}d}g}|D]`}t�|�}|jp4d}d}|jdurn||jvrn||j|vrn|j||pld}|�||d��qd ��fd 3 | d �|D��}|S) z$Generate the context for the prompt.��doccSsd|d�d|d��S)z Format Doc.z Content: �Contentz 4 | Source: �Source�rr r ��/mnt/cloud/code/Users/systemadministrator/Promptflows/6b8faf23-ec1e-4d33-a501-6e28163c9f30/rag_code_flow/.generate_prompt_context.tmp.py� 5 | format_doc sz+generate_prompt_context..format_doc�source�url�N)r r 6 | z 7 | 8 | csg|] }�|��qSr r )�.0r�r r r � 9 | #�z+generate_prompt_context..)�dictr� from_dict�text�metadata�append�join) rZ 10 | SOURCE_KEYZURL_KEYZretrieved_docs�item�entity�contentr� 11 | doc_stringr rr �generate_prompt_context s$ 12 |  13 |  14 |  15 | � 16 | rN) 17 | �__doc__�typingr� 18 | promptflowrZ"promptflow_vectordb.core.contractsrr�strrr r r r �s 19 |     -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/__pycache__/generate_prompt_context.cpython-39.pyc: -------------------------------------------------------------------------------- 1 | a 2 | �z\eO�@sDdZddlmZddlmZddlmZeeeed�dd��Z dS) zFile for context getting tool.�)�List)�tool)�SearchResultEntity)� search_result�returnc s�td�dd��d}d}g}|D]`}t�|�}|jp4d}d}|jdurn||jvrn||j|vrn|j||pld}|�||d��qd ��fd 3 | d �|D��}|S) z$Generate the context for the prompt.��doccSsd|d�d|d��S)z Format Doc.z Content: �Contentz 4 | Source: �Source�rr r �k/mnt/cloud/code/Users/admin/promptflow/clone-Flow-created-on-11-21-2023-12-39-52/generate_prompt_context.py� 5 | format_doc sz+generate_prompt_context..format_doc�source�url�N)r r 6 | z 7 | 8 | csg|] }�|��qSr r )�.0r�r r r � 9 | #�z+generate_prompt_context..)�dictr� from_dict�text�metadata�append�join) rZ 10 | SOURCE_KEYZURL_KEYZretrieved_docs�item�entity�contentr� 11 | doc_stringr rr �generate_prompt_context s$ 12 |  13 |  14 |  15 | � 16 | rN) 17 | �__doc__�typingr� 18 | promptflowrZ"promptflow_vectordb.core.contractsrr�strrr r r r �s 19 |     -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/answer_the_question_with_context.jinja2: -------------------------------------------------------------------------------- 1 | {{prompt_text}} -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/flow.dag.yaml: -------------------------------------------------------------------------------- 1 | inputs: 2 | question: 3 | type: string 4 | default: tell me how the economy goes... 5 | is_chat_input: true 6 | chat_history: 7 | type: list 8 | default: 9 | - inputs: 10 | question: tell me how the economy goes... 11 | outputs: 12 | line_number: 0 13 | output: "I apologize for any confusion earlier. As I mentioned earlier, the 14 | IMF's October 2023 report states that the global economy is recovering 15 | slowly from various crises, with slow and uneven growth and growing 16 | divergences. However, the situation is complex and dynamic, and the 17 | extent and severity of these risks may vary depending on the country 18 | or region. It is important to monitor ongoing developments and consult 19 | with experts for a more nuanced and up-to-date understanding of the 20 | global economic situation. (Source: IMFOctober.pdf)" 21 | is_chat_input: false 22 | is_chat_history: true 23 | outputs: 24 | output: 25 | type: string 26 | reference: ${answer_the_question_with_context.output} 27 | evaluation_only: false 28 | is_chat_output: true 29 | nodes: 30 | - name: modify_query_with_history 31 | type: llm 32 | source: 33 | type: code 34 | path: modify_query_with_history.jinja2 35 | inputs: 36 | deployment_name: gpt-35-turbo-02 37 | temperature: 0.7 38 | top_p: 1 39 | stop: "" 40 | max_tokens: 0 41 | presence_penalty: 0 42 | frequency_penalty: 0 43 | logit_bias: "" 44 | chat_history: ${inputs.chat_history} 45 | question: ${flow.question} 46 | provider: AzureOpenAI 47 | connection: aoai_ozguler_02 48 | api: chat 49 | module: promptflow.tools.aoai 50 | use_variants: false 51 | - name: embed_the_question 52 | type: python 53 | source: 54 | type: package 55 | tool: promptflow.tools.embedding.embedding 56 | inputs: 57 | connection: aoai_ozguler_02 58 | deployment_name: text-embedding-ada-002 59 | input: ${modify_query_with_history.output} 60 | aggregation: false 61 | use_variants: false 62 | - name: generate_prompt_context 63 | type: python 64 | source: 65 | type: code 66 | path: generate_prompt_context.py 67 | inputs: 68 | search_result: ${vectordblookup.output} 69 | aggregation: false 70 | use_variants: false 71 | - name: Prompt_variants 72 | type: prompt 73 | source: 74 | type: code 75 | path: Prompt_variants__Variant_0.jinja2 76 | inputs: 77 | chat_history: ${flow.chat_history} 78 | contexts: ${generate_prompt_context.output} 79 | question: ${flow.question} 80 | aggregation: false 81 | use_variants: false 82 | - name: answer_the_question_with_context 83 | type: llm 84 | source: 85 | type: code 86 | path: answer_the_question_with_context.jinja2 87 | inputs: 88 | deployment_name: gpt-35-turbo-02 89 | temperature: 0.7 90 | top_p: 1 91 | stop: "" 92 | max_tokens: 3000 93 | presence_penalty: 0 94 | frequency_penalty: 0 95 | logit_bias: "" 96 | prompt_text: ${Prompt_variants.output} 97 | provider: AzureOpenAI 98 | connection: aoai_ozguler_02 99 | api: chat 100 | module: promptflow.tools.aoai 101 | aggregation: false 102 | use_variants: false 103 | - name: vectordblookup 104 | type: python 105 | source: 106 | type: package 107 | tool: promptflow_vectordb.tool.vector_db_lookup.VectorDBLookup.search 108 | inputs: 109 | connection: ACS_conn 110 | index_name: worldeconomyoctober 111 | search_filters: 112 | filter: "" 113 | search_params: 114 | select: "" 115 | text_field: '""' 116 | vector_field: content_vector_open_ai 117 | top_k: 3 118 | vector: ${embed_the_question.output} 119 | use_variants: false 120 | node_variants: {} 121 | environment: 122 | python_requirements_txt: requirements.txt 123 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/generate_prompt_context.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | """File for context getting tool.""" 4 | from typing import List 5 | from promptflow import tool 6 | from promptflow_vectordb.core.contracts import SearchResultEntity 7 | 8 | 9 | @tool 10 | def generate_prompt_context(search_result: List[dict]) -> str: 11 | """Generate the context for the prompt.""" 12 | def format_doc(doc: dict): 13 | """Format Doc.""" 14 | return f"Content: {doc['Content']}\nSource: {doc['Source']}" 15 | 16 | SOURCE_KEY = "source" 17 | URL_KEY = "url" 18 | 19 | retrieved_docs = [] 20 | for item in search_result: 21 | 22 | entity = SearchResultEntity.from_dict(item) 23 | content = entity.text or "" 24 | 25 | source = "" 26 | if entity.metadata is not None: 27 | if SOURCE_KEY in entity.metadata: 28 | if URL_KEY in entity.metadata[SOURCE_KEY]: 29 | source = entity.metadata[SOURCE_KEY][URL_KEY] or "" 30 | 31 | retrieved_docs.append({ 32 | "Content": content, 33 | "Source": source 34 | }) 35 | doc_string = "\n\n".join([format_doc(doc) for doc in retrieved_docs]) 36 | return doc_string 37 | -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/modify_query_with_history.jinja2: -------------------------------------------------------------------------------- 1 | system: 2 | Given the following conversation history and the users next question,rephrase the question to be a stand alone question. 3 | If the conversation is irrelevant or empty, just restate the original question. 4 | Do not add more details than necessary to the question. 5 | conversation: 6 | 7 | chat history: 8 | {% for item in chat_history %} user: 9 | {{ item.inputs.question }} 10 | assistant: 11 | {{ item.outputs.output }} 12 | {% endfor %} 13 | 14 | Follow up Input: {{question}} 15 | Standalone Question: -------------------------------------------------------------------------------- /sample_gallery/query_existing_vector_index/vectordb-lookup-simplified/requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | --------------------------------------------------------------------------------