├── .dockerignore ├── .env.sample ├── .gitattributes ├── .github ├── issue_template │ ├── bug_request.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── azd_deploy.yml │ └── main_staging_ci.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .vscode ├── .settings.json ├── launch.json └── settings.json ├── 01-indexing-policies.ipynb ├── 02-test-pa-agentic-rag.ipynb ├── 03-test-pa-e2e.ipynb ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── SUPPORT.md ├── app ├── __init__.py ├── backend │ ├── Dockerfile │ ├── api │ │ └── __init__.py │ ├── core │ │ ├── __init__.py │ │ ├── config.py │ │ ├── constants.py │ │ ├── database.py │ │ ├── exceptions.py │ │ └── security.py │ ├── main.py │ ├── paprocessing │ │ ├── __init__.py │ │ ├── models.py │ │ └── test.http │ └── users │ │ ├── __init__.py │ │ ├── auth.py │ │ ├── manager.py │ │ └── schemas.py └── frontend │ ├── Dockerfile │ ├── components │ ├── __init__.py │ └── managers.py │ ├── deployapp.sh │ ├── settings │ └── config.toml │ └── streamlit │ ├── Home.py │ └── pages │ ├── 🏢 Payor.py │ └── 👩🏽‍⚕️ Provider.py ├── azure.yaml ├── docker-compose.yml ├── docs ├── .archive │ └── deployment.md ├── CODEOWNERS ├── _config.yaml ├── architecture.md ├── autoauth_sdk.md ├── azd_deployment.md ├── challenges.md ├── images │ ├── ai_foundry_evaluations.png │ ├── azp_help_containerjoblogs_1.png │ ├── azp_help_containerlogs_1.png │ ├── azp_help_containerlogs_2.png │ ├── azp_help_deployments.png │ ├── azure_logo.png │ ├── deploytoazure.svg │ ├── diagram.png │ ├── diagram_latest.png │ ├── flow.png │ ├── paworflow.png │ ├── prior_auth.png │ ├── prior_auth_flow.png │ └── visualizebutton.svg ├── index.md └── solution.md ├── environment.yaml ├── evals ├── README.md └── cases │ ├── _.yaml.example │ ├── agentic-rag-policies-001.yaml │ ├── agentic-rag-reasoning-001.yaml │ ├── autodetermination-decision-001.yaml │ ├── autodetermination-decision-002.yaml │ ├── autodetermination-decision-003.yaml │ ├── autodetermination-decision-004.yaml │ ├── autodetermination-decision-005.yaml │ ├── autodetermination-reasoning-001.yaml │ ├── autodetermination-reasoning-002.yaml │ ├── autodetermination-reasoning-003.yaml │ ├── autodetermination-reasoning-004.yaml │ ├── autodetermination-reasoning-005.yaml │ ├── ocr-ner-001-a.yaml │ ├── ocr-ner-001-b.yaml │ └── ocr-ner-002-a.yaml ├── infra ├── README.md ├── main.bicep ├── main.json ├── main.parameters.json ├── modules │ ├── ai │ │ ├── README.md │ │ ├── aifoundry.bicep │ │ ├── aifoundry.json │ │ ├── docintelligence.bicep │ │ ├── docintelligence.json │ │ ├── mais.bicep │ │ ├── mais.json │ │ ├── openai.bicep │ │ └── openai.json │ ├── compute │ │ ├── README.md │ │ ├── fetch-container-image.bicep │ │ └── fetch-container-image.json │ ├── data │ │ ├── README.md │ │ ├── cosmos-mongo-ru.bicep │ │ ├── cosmos-mongo-ru.json │ │ ├── cosmos-mongo.bicep │ │ ├── cosmos-mongo.json │ │ ├── search.bicep │ │ ├── search.json │ │ ├── storage.bicep │ │ └── storage.json │ └── security │ │ ├── README.md │ │ ├── aca.bicep │ │ ├── aca.json │ │ ├── appregistration.bicep │ │ ├── appregistration.json │ │ ├── appupdate.bicep │ │ ├── appupdate.json │ │ └── bicepconfig.json ├── resources.bicep └── resources.json ├── notebooks ├── 01-build-ground-truth-dataset.ipynb ├── 01-indexing-policies.ipynb ├── 02-test-pa-agentic-rag.ipynb ├── 03-test-pa-e2e.ipynb ├── __init__.py └── dev │ ├── EvaluationTesting.ipynb │ ├── GetPolicyText.ipynb │ └── testing.ipynb ├── pyproject.toml ├── requirements-codequality.txt ├── requirements.txt ├── shared ├── CODE_OF_CONDUCT.md ├── LICENSE └── SECURITY.md ├── src ├── __init__.py ├── agenticai │ ├── __init__.py │ ├── agents.py │ ├── plugins │ │ ├── plugins_store │ │ │ ├── __init__.py │ │ │ ├── hello_world │ │ │ │ ├── limerick │ │ │ │ │ ├── config.json │ │ │ │ │ └── skprompt.txt │ │ │ │ ├── plugins_store │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── main │ │ │ │ │ │ └── weather.py │ │ │ │ └── prompt │ │ │ │ │ └── test.yaml │ │ │ └── retrieval │ │ │ │ ├── __init__.py │ │ │ │ ├── aievaluator.py │ │ │ │ ├── aisearch.py │ │ │ │ └── aisearchclassification.py │ │ └── readme.md │ ├── sk_helper │ │ ├── __init__.py │ │ └── services.py │ ├── skills.py │ └── utils.py ├── aifoundry │ ├── __init__.py │ ├── aifoundry_helper.py │ └── telemetry.py ├── aoai │ ├── __init__.py │ ├── aoai_helper.py │ ├── test.py │ ├── tokenizer.py │ └── utils.py ├── cosmosdb │ ├── __init__.py │ ├── cosmos_db_store_inputs.py │ ├── cosmosdb_helper.py │ └── cosmosmongodb_helper.py ├── documentintelligence │ ├── __init__.py │ └── document_intelligence_helper.py ├── entraid │ ├── __init__.py │ └── generate_id.py ├── evals │ ├── __init__.py │ ├── case.py │ ├── custom │ │ ├── __init__.py │ │ ├── custom_evaluator.py │ │ ├── factual_correctness_evaluator.py │ │ ├── fuzzy_evaluator.py │ │ └── similarity_evaluator.py │ ├── pipeline.py │ └── sdk │ │ └── custom_azure_ai_evaluations.py ├── extractors │ ├── __init__.py │ ├── blob_data_extractor.py │ ├── pdf_data_extractor.py │ ├── pdfhandler.py │ └── utils.py ├── fabric │ └── __init__.py ├── ocr │ ├── __init__.py │ └── document_intelligence.py ├── pipeline │ ├── __init__.py │ ├── agenticRag │ │ ├── __init__.py │ │ ├── evaluator.py │ │ ├── run.py │ │ └── settings.yaml │ ├── autoDetermination │ │ ├── __init__.py │ │ ├── evaluator.py │ │ ├── run.py │ │ └── settings.yaml │ ├── clinicalExtractor │ │ ├── __init__.py │ │ ├── evaluator.py │ │ ├── run.py │ │ └── settings.yaml │ ├── paprocessing │ │ ├── __init__.py │ │ ├── evals.py │ │ ├── evaluator.py │ │ ├── run.py │ │ ├── run_deprecated.py │ │ ├── settings.yaml │ │ └── utils.py │ ├── policyIndexer │ │ ├── __init__.py │ │ ├── indexerSetup.py │ │ ├── run.py │ │ └── settings.yaml │ ├── promptEngineering │ │ ├── models.py │ │ ├── prompt_manager.py │ │ └── templates │ │ │ ├── evaluator_system_prompt.jinja │ │ │ ├── evaluator_user_prompt.jinja │ │ │ ├── formulator_system_prompt.jinja │ │ │ ├── formulator_user_prompt.jinja │ │ │ ├── ner_clinician_system.jinja │ │ │ ├── ner_clinician_user.jinja │ │ │ ├── ner_patient_system.jinja │ │ │ ├── ner_patient_user.jinja │ │ │ ├── ner_physician_system.jinja │ │ │ ├── ner_physician_user.jinja │ │ │ ├── ner_system_prompt.jinja │ │ │ ├── ner_user_prompt.jinja │ │ │ ├── prior_auth_o1_user_prompt.jinja │ │ │ ├── prior_auth_o1_user_prompt_b.jinja │ │ │ ├── prior_auth_system_prompt.jinja │ │ │ ├── prior_auth_user_prompt.jinja │ │ │ ├── query_classificator_system_prompt.jinja │ │ │ ├── query_classificator_user_prompt.jinja │ │ │ ├── query_expansion_system_prompt.jinja │ │ │ ├── query_expansion_user_prompt.jinja │ │ │ ├── summarize_autodetermination_system.jinja │ │ │ ├── summarize_autodetermination_user.jinja │ │ │ ├── summarize_policy_system.jinja │ │ │ ├── summarize_policy_user.jinja │ │ │ ├── transform_determination_markdown_system_prompt.jinja │ │ │ └── transform_determination_markdown_user_prompt.jinja │ └── utils.py ├── storage │ ├── __init__.py │ └── blob_helper.py ├── utils.py └── utils │ ├── __init__.py │ └── ml_logging.py ├── tests ├── README.md ├── __init__.py ├── conftest.py ├── evals │ ├── TEST-001-a │ │ └── 20241228_130935 │ │ │ └── evaluation_results.json │ ├── TEST-001-b │ │ └── 20241228_130935 │ │ │ └── evaluation_results.json │ ├── TEST-002-a │ │ └── 20241228_130935 │ │ │ └── evaluation_results.json │ ├── test_agenticRag.py │ └── test_autoDetermination.py └── utils │ ├── __init__.py │ └── test_logging.py └── utils ├── azd └── hooks │ ├── postdeploy.ps1 │ ├── postdeploy.sh │ ├── postprovision.ps1 │ ├── postprovision.sh │ ├── preprovision.ps1 │ └── preprovision.sh ├── data ├── cases │ ├── 001 │ │ ├── a │ │ │ ├── doctor_notes │ │ │ │ └── 01_a_notes.pdf │ │ │ ├── imaging │ │ │ │ └── 01_a_imaging.pdf │ │ │ ├── labs │ │ │ │ └── 01_a_labs.pdf │ │ │ ├── pa_form │ │ │ │ └── 01_a_form.pdf │ │ │ └── results.json │ │ └── b │ │ │ ├── doctor_notes │ │ │ └── 01_b_notes.pdf │ │ │ ├── imaging │ │ │ └── 01_b_imaging.pdf │ │ │ ├── labs │ │ │ └── 01_b_labs.pdf │ │ │ ├── pa_form │ │ │ └── 01_b_form.pdf │ │ │ └── results.json │ ├── 002 │ │ ├── a │ │ │ ├── doctor_notes │ │ │ │ └── 002_a (note).pdf │ │ │ ├── imaging │ │ │ │ └── 002_a (imaging).pdf │ │ │ ├── labs │ │ │ │ └── 002_a (labs) .pdf │ │ │ ├── pa_form │ │ │ │ └── 002_a (form).pdf │ │ │ └── results.json │ │ └── b │ │ │ ├── doctor_notes │ │ │ └── 002_b (note).pdf │ │ │ ├── imaging │ │ │ └── 002_b (imaging).pdf │ │ │ ├── labs │ │ │ └── 002_b (labs).pdf │ │ │ ├── pa_form │ │ │ └── 002_b (form).pdf │ │ │ └── results.json │ ├── 003 │ │ ├── a │ │ │ ├── doctor_notes │ │ │ │ └── 003_a (note) .pdf │ │ │ ├── labs │ │ │ │ └── 003_a (labs).pdf │ │ │ ├── pa_form │ │ │ │ └── 003_a (form).pdf │ │ │ └── results.json │ │ └── b │ │ │ ├── doctor_notes │ │ │ └── 003_b (note) .pdf │ │ │ ├── labs │ │ │ └── 003_b (labs) .pdf │ │ │ ├── pa_form │ │ │ └── 003_b (form).pdf │ │ │ └── results.json │ ├── 004 │ │ ├── a │ │ │ ├── doctor_notes │ │ │ │ └── 004_a (note).pdf │ │ │ ├── pa_form │ │ │ │ └── 004_a (form).pdf │ │ │ └── results.json │ │ └── b │ │ │ ├── doctor_notes │ │ │ └── 004_b (note).pdf │ │ │ ├── pa_form │ │ │ └── 004_b (form).pdf │ │ │ └── results.json │ ├── 005 │ │ ├── a │ │ │ ├── doctor_notes │ │ │ │ └── 005_a (note) .pdf │ │ │ ├── imaging │ │ │ │ └── 005_a (imaging).pdf │ │ │ ├── labs │ │ │ │ └── 005_a (labs).pdf │ │ │ ├── pa_form │ │ │ │ └── 005_a (form).pdf │ │ │ └── results.json │ │ └── b │ │ │ ├── doctor_notes │ │ │ └── 005_b (note).pdf │ │ │ ├── imaging │ │ │ └── 005_b (imaging).pdf │ │ │ ├── labs │ │ │ └── 005_b (labs).pdf │ │ │ ├── pa_form │ │ │ └── 005_b (form).pdf │ │ │ └── results.json │ ├── ground_truth.json │ └── policies │ │ ├── 001.pdf │ │ ├── 002.pdf │ │ ├── 003.pdf │ │ ├── 004.pdf │ │ └── 005.pdf └── pdfs │ ├── 001_a │ ├── 01_a_form │ │ ├── page_1.jpeg │ │ ├── page_1.png │ │ ├── page_2.jpeg │ │ └── page_2.png │ ├── 01_a_imaging │ │ ├── page_1.jpeg │ │ └── page_1.png │ ├── 01_a_labs │ │ ├── page_1.jpeg │ │ ├── page_1.png │ │ ├── page_2.jpeg │ │ ├── page_2.png │ │ ├── page_3.jpeg │ │ └── page_3.png │ └── 01_a_notes │ │ ├── page_1.jpeg │ │ ├── page_1.png │ │ ├── page_2.jpeg │ │ ├── page_2.png │ │ ├── page_3.jpeg │ │ └── page_3.png │ ├── 001_b │ ├── 001_inflammatory_Conditions-page-1.png │ ├── 001_inflammatory_Conditions-page-10.png │ ├── 001_inflammatory_Conditions-page-11.png │ ├── 001_inflammatory_Conditions-page-12.png │ ├── 001_inflammatory_Conditions-page-13.png │ ├── 001_inflammatory_Conditions-page-14.png │ ├── 001_inflammatory_Conditions-page-15.png │ ├── 001_inflammatory_Conditions-page-16.png │ ├── 001_inflammatory_Conditions-page-17.png │ ├── 001_inflammatory_Conditions-page-18.png │ ├── 001_inflammatory_Conditions-page-2.png │ ├── 001_inflammatory_Conditions-page-3.png │ ├── 001_inflammatory_Conditions-page-4.png │ ├── 001_inflammatory_Conditions-page-5.png │ ├── 001_inflammatory_Conditions-page-6.png │ ├── 001_inflammatory_Conditions-page-7.png │ ├── 001_inflammatory_Conditions-page-8.png │ ├── 001_inflammatory_Conditions-page-9.png │ ├── 01_a_form-page-1.png │ ├── 01_a_form-page-2.png │ ├── 01_a_imaging-page-1.png │ ├── 01_a_labs-page-1.png │ ├── 01_a_labs-page-2.png │ ├── 01_a_labs-page-3.png │ ├── 01_a_notes-page-1.png │ ├── 01_a_notes-page-2.png │ └── 01_a_notes-page-3.png │ └── 001_b_rejected │ ├── 01_b_form-page-1.png │ ├── 01_b_form-page-2.png │ ├── 01_b_imaging-page-1.png │ ├── 01_b_labs-page-1.png │ ├── 01_b_labs-page-2.png │ ├── 01_b_labs-page-3.png │ ├── 01_b_notes-page-1.png │ └── 01_b_notes-page-2.png ├── images ├── AI_HLS_AutoAuth.jpg ├── azure_logo.png ├── deploytoazure.svg ├── diagram.png ├── diagram_latest.png ├── flow.png ├── paworflow.png ├── prior_auth.png ├── prior_auth_flow.png ├── vimeo_video.png └── visualizebutton.svg ├── llm └── readme.md ├── pylint_report └── pylint_report.txt └── scripts ├── README.md ├── cleanup.sh ├── create-application.sh ├── deploy.sh └── generate-bicep-documentation.ps1 /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore node_modules directory 2 | node_modules 3 | 4 | # Ignore Python virtual environments 5 | venv 6 | env 7 | __pycache__ 8 | paprocessing_env 9 | 10 | # Ignore Git files 11 | .git 12 | .gitignore 13 | 14 | # Ignore Docker files 15 | .dockerignore 16 | Dockerfile 17 | # Ignore other unnecessary files and directories 18 | *.log 19 | *.tmp 20 | *.bak 21 | *.swp 22 | *.swo 23 | *.DS_Store 24 | -------------------------------------------------------------------------------- /.env.sample: -------------------------------------------------------------------------------- 1 | AZURE_OPENAI_KEY= 2 | AZURE_OPENAI_ENDPOINT= 3 | AZURE_OPENAI_API_VERSION= 4 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT= 5 | AZURE_OPENAI_CHAT_DEPLOYMENT_ID= 6 | AZURE_OPENAI_EMBEDDING_DIMENSIONS= 7 | AZURE_OPENAI_CHAT_DEPLOYMENT_01= 8 | # AZURE_OPENAI_API_VERSION_01= 9 | 10 | AZURE_SEARCH_SERVICE_NAME= 11 | AZURE_SEARCH_INDEX_NAME= 12 | AZURE_AI_SEARCH_ADMIN_KEY= 13 | AZURE_AI_SEARCH_SERVICE_ENDPOINT= 14 | 15 | AZURE_STORAGE_ACCOUNT_KEY= 16 | AZURE_BLOB_CONTAINER_NAME= 17 | AZURE_STORAGE_ACCOUNT_NAME= 18 | AZURE_STORAGE_CONNECTION_STRING= 19 | 20 | AZURE_AI_SERVICES_KEY= 21 | 22 | #TODO: cleanup these vars 23 | # AZURE_COSMOS_DB_ENDPOINT= 24 | # AZURE_COSMOS_DB_KEY= 25 | AZURE_COSMOS_DB_DATABASE_NAME= 26 | AZURE_COSMOS_DB_COLLECTION_NAME= 27 | AZURE_COSMOS_CONNECTION_STRING= 28 | 29 | # Azure Document Intelligence API Configuration 30 | AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT= 31 | AZURE_DOCUMENT_INTELLIGENCE_KEY= 32 | APPLICATIONINSIGHTS_CONNECTION_STRING= 33 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Ensure shell scripts use LF line endings 2 | *.sh text eol=lf 3 | 4 | # Ensure Python files use LF line endings 5 | *.py text eol=lf 6 | 7 | # Ensure YAML files use LF line endings 8 | *.yaml text eol=lf 9 | *.yml text eol=lf 10 | 11 | # Ensure Dockerfiles use LF line endings 12 | Dockerfile text eol=lf 13 | 14 | # Ensure JSON files use LF line endings 15 | *.json text eol=lf 16 | 17 | # Ensure Markdown files use LF line endings 18 | *.md text eol=lf 19 | 20 | # Ensure PowerShell scripts use CRLF line endings 21 | *.ps1 text eol=crlf 22 | -------------------------------------------------------------------------------- /.github/issue_template/bug_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Issue Report 3 | about: Create a report to address issues and enhance software projects, Azure AI services, or LLMs 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Description** 11 | Please provide a detailed description of the issue you have encountered in our software project, Azure AI service, or LLM. 12 | 13 | **Reproduction Steps** 14 | Please outline the steps necessary to reproduce the issue: 15 | 1. ... 16 | 2. ... 17 | 3. ... 18 | 4. ... 19 | 20 | **Expected Outcome** 21 | Describe what you expected to occur when using the software project, Azure AI service, or LLM. 22 | 23 | **Screenshots or Code Snippets (if applicable)** 24 | Include any relevant screenshots, code snippets, or error messages that can help clarify the issue. 25 | 26 | **Environment Information** 27 | 28 | *Software/Application/Model Information:* 29 | - Name/Version: 30 | - Framework/Library Used: 31 | - Azure AI Service Used (if applicable): 32 | - LLM Used (if applicable): 33 | 34 | *Execution Environment (if applicable):* 35 | - Operating System: 36 | - Python Version: 37 | - GPU/CPU (if relevant): 38 | 39 | **Additional Context** 40 | Feel free to provide any additional context or information that might assist in understanding and resolving the issue. 41 | -------------------------------------------------------------------------------- /.github/issue_template/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Propose an enhancement or new feature related to the software project or Azure AI services 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Problem Description** 11 | Please provide a clear and concise description of the problem or limitation you've encountered or identified in the software project or Azure AI service. Explain why this feature is needed. 12 | 13 | **Proposed Solution** 14 | Describe in detail the solution or feature you would like to see implemented in the project. Be specific about its functionality and how it would address the problem or enhance the project or Azure AI service. 15 | 16 | **Alternative Approaches** 17 | If you have considered any alternative solutions or features, please briefly describe them here. 18 | 19 | **Impact on Software Components or Azure AI Services** 20 | Indicate which components of the software project or Azure AI services your feature request may impact and how it might affect each one. For example, changes to data processing, UI/UX, performance, or integration with Azure AI services. 21 | 22 | **Additional Context** 23 | Provide any extra context, examples, or screenshots that can help in understanding and evaluating your feature request. 24 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Project Enhancement Request 2 | 3 | ## Description 4 | 5 | This request aims to introduce changes that enhance our software project with a focus on Azure AI services. Below is a summary of the changes, related issues, and the importance of this enhancement for our project. Additionally, any dependencies required for this change are mentioned. 6 | 7 | Fixes #(issue) 8 | 9 | ## Type of Change 10 | 11 | Please select the relevant option(s) and delete those that are not applicable: 12 | 13 | - [ ] Bug fix (non-breaking change that resolves an issue) 14 | - [ ] New feature (non-breaking change that introduces functionality) 15 | - [ ] Breaking change (fix or feature causing existing functionality to break) 16 | - [ ] Azure AI service integration (Specify: Cognitive Services, Machine Learning, etc.) 17 | - [ ] Code optimization or technical debt resolution 18 | - [ ] Documentation update required for the changes 19 | 20 | # Checklist 21 | 22 | - [ ] I have performed a self-review of my code. 23 | - [ ] I have successfully executed and passed all new and existing tests and checks relevant to the project. 24 | - [ ] I have updated the documentation if necessary. 25 | - [ ] I have tested the changes in an environment representative of the deployment environment. 26 | - [ ] New and existing unit tests pass locally with my changes. 27 | - [ ] I have opened an Issue reporting a Bug, New Feature, Breaking Change, and/or Documentation Update (https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-an-issue). 28 | 29 | ## Azure AI Service Integration Details (if applicable): 30 | 31 | - [ ] I have integrated a new Azure AI service. 32 | - [ ] I have updated the configuration or usage of an existing Azure AI service. 33 | - [ ] The integration has been tested and documented. 34 | - [ ] Changes to the integration are backwards-compatible with previous versions. 35 | 36 | ## Context 37 | 38 | Provide additional context on the changes you are proposing, the problem they address, and their significance for the project. 39 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | ## Commenting out code linting pre-commit hooks for now 5 | - repo: local 6 | hooks: 7 | - id: flake8 8 | name: flake8 9 | stages: [commit] 10 | language: system 11 | entry: flake8 12 | types: [python] 13 | exclude: setup.py 14 | args: 15 | - "--extend-ignore" 16 | - "E501" # Ignore line length 17 | - "--config" 18 | - "pyproject.toml" 19 | - "--max-line-length=124" # Adjust the line length as needed 20 | 21 | # - repo: https://github.com/pre-commit/mirrors-mypy 22 | # rev: 'v0.910' # Use the ref you want to point at 23 | # hooks: 24 | # - id: mypy 25 | # args: ["--config", "pyproject.toml"] 26 | 27 | # Specific tools with their configurations 28 | - repo: https://github.com/charliermarsh/ruff-pre-commit 29 | rev: "v0.0.275" 30 | hooks: 31 | - id: ruff 32 | args: [--fix, --exit-non-zero-on-fix] 33 | 34 | # - repo: https://github.com/pre-commit/mirrors-isort 35 | # rev: "v5.10.1" 36 | # hooks: 37 | # - id: isort 38 | # args: ["--profile", "black"] 39 | 40 | - repo: https://github.com/psf/black 41 | rev: 25.1.0 42 | hooks: 43 | - id: black 44 | args: ["--config", "pyproject.toml"] 45 | 46 | - repo: https://github.com/econchick/interrogate 47 | rev: 1.7.0 48 | hooks: 49 | - id: interrogate 50 | args: [src, -v, -i, --fail-under=70, '-e', '**/__init__.py'] 51 | pass_filenames: false 52 | 53 | - repo: https://github.com/PyCQA/bandit 54 | rev: '1.7.5' 55 | hooks: 56 | - id: bandit 57 | args: ['-q', '-c', 'pyproject.toml'] 58 | additional_dependencies: [".[toml]"] 59 | 60 | 61 | - repo: https://github.com/pre-commit/pre-commit-hooks 62 | rev: v4.4.0 63 | hooks: 64 | - id: check-yaml 65 | - id: debug-statements 66 | - id: end-of-file-fixer 67 | - id: trailing-whitespace 68 | 69 | # Bicep/Infra-specific Hooks 70 | # - repo: https://github.com/Azure4DevOps/check-azure-bicep 71 | # rev: v0.5.9 # ${LATEST_SHA_OR_VERSION} 72 | # hooks: 73 | # # - id: check-azure-bicep 74 | # - id: check-azure-bicep-format 75 | 76 | # - repo: local 77 | # hooks: 78 | # - id: check-azure-bicep-docs 79 | # name: bicep docs generate 80 | # language: script 81 | # require_serial: true 82 | # entry: ./docs/scripts/GenerateBicepDocs.ps1 83 | # files: .*\.bicep$ 84 | -------------------------------------------------------------------------------- /.vscode/.settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true, 3 | "editor.formatOnPaste": true, 4 | "files.trimTrailingWhitespace": true, 5 | "files.autoSave": "onFocusChange", 6 | "git.autofetch": true, 7 | "[jsonc]": { 8 | "editor.defaultFormatter": "vscode.json-language-features" 9 | }, 10 | "[python]": { 11 | "editor.defaultFormatter": "ms-python.black-formatter" 12 | }, 13 | "python.defaultInterpreterPath": "/usr/local/bin/python", 14 | "python.formatting.provider": "black", 15 | "python.testing.unittestEnabled": false, 16 | "python.testing.pytestEnabled": true, 17 | "pylint.args": [ 18 | "--rcfile=pyproject.toml" 19 | ], 20 | "black-formatter.args": [ 21 | "--config=pyproject.toml" 22 | ], 23 | "flake8.args": [ 24 | "--toml-config=pyproject.toml" 25 | ], 26 | "isort.args": [ 27 | "--settings-path=pyproject.toml" 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.1.0", 3 | "configurations": [ 4 | { 5 | "name": "Python: Streamlit (Frontend)", 6 | "type": "python", 7 | "request": "launch", 8 | "module": "streamlit", 9 | "args": [ 10 | "run", 11 | "${workspaceFolder}/app/frontend/streamlit/Home.py", 12 | ], 13 | }, 14 | { 15 | "name": "Python: API (Backend)", 16 | "type": "python", 17 | "request": "launch", 18 | "args": [ 19 | "run", 20 | "${workspaceFolder}/app/backend/app.py", 21 | ], 22 | }, 23 | { 24 | "name": "Python: Debug Tests", 25 | "type": "python", 26 | "request": "launch", 27 | "program": "${file}", 28 | "purpose": [ 29 | "debug-test" 30 | ], 31 | "console": "integratedTerminal", 32 | "env": { 33 | // "PYTEST_ADDOPTS": "--no-cov -n0 --dist no" 34 | } 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.terminal.activateEnvironment": true, 3 | "python.testing.pytestArgs": [ 4 | "tests" 5 | ], 6 | "python.testing.unittestEnabled": false, 7 | "python.testing.pytestEnabled": true 8 | } 9 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # Support 2 | 3 | ## How to file issues and get help 4 | 5 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 6 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 7 | feature request as a new Issue in the [GitHub repository](https://github.com/Azure-Samples/autoauth-solution-accelerator/issues). 8 | 9 | For help and questions about using the AutoAuth Solution Accelerator, you can: 10 | 11 | - Check the [project documentation](https://github.com/Azure-Samples/autoauth-solution-accelerator/blob/main/README.md) 12 | - Post questions tagged with `autoauth-solution-accelerator` on [Stack Overflow](https://stackoverflow.com/questions/tagged/azure-solution-accelerator) 13 | - Open a new [discussion](https://github.com/Azure-Samples/autoauth-solution-accelerator/discussions) in the GitHub repository 14 | 15 | The project maintainers will respond to issues and discussions as time allows. 16 | 17 | ## Microsoft Support Policy 18 | 19 | The AutoAuth Solution Accelerator is an open-source project released as a sample/accelerator. It is not covered by the standard Microsoft support policy. Support is provided through the GitHub repository issues and community channels listed above. 20 | 21 | This project is maintained by Microsoft, but there is no guaranteed response time for issues or feature requests. For production deployments requiring guaranteed support, please consider official Azure services with support plans. 22 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/__init__.py -------------------------------------------------------------------------------- /app/backend/Dockerfile: -------------------------------------------------------------------------------- 1 | #TODO: Test Dockerfile 2 | 3 | # Use an official Python runtime as a parent image 4 | FROM python:3.11-slim-buster 5 | 6 | # Set the working directory in the container to /app 7 | WORKDIR /app 8 | 9 | # Add the current directory contents into the container at /app 10 | COPY src /app/src 11 | COPY app /app/app 12 | COPY requirements.txt /app/requirements.txt 13 | 14 | # Install any needed packages specified in requirements.txt 15 | RUN pip install --no-cache-dir -r requirements.txt 16 | 17 | # Run app.py when the container launches 18 | CMD ["python3", "app", "backend", "paprocessing", "app.py"] 19 | -------------------------------------------------------------------------------- /app/backend/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/api/__init__.py -------------------------------------------------------------------------------- /app/backend/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/core/__init__.py -------------------------------------------------------------------------------- /app/backend/core/config.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from pydantic import MongoDsn, model_validator 4 | from pydantic_settings import BaseSettings, SettingsConfigDict 5 | 6 | from .constants import Environment 7 | 8 | 9 | class CustomBaseSettings(BaseSettings): 10 | model_config = SettingsConfigDict( 11 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 12 | ) 13 | 14 | 15 | class Config(CustomBaseSettings): 16 | # 17 | DATABASE_URL: MongoDsn 18 | DATABASE_ASYNC_URL: MongoDsn 19 | AZURE_COSMOS_DB_DATABASE_NAME: str = "" 20 | 21 | ENVIRONMENT: Environment = Environment.LOCAL 22 | 23 | SENTRY_DSN: str | None = None 24 | 25 | CORS_ORIGINS: list[str] = ["*"] 26 | CORS_ORIGINS_REGEX: str | None = None 27 | CORS_HEADERS: list[str] = ["*"] 28 | 29 | APP_VERSION: str = "0.1" 30 | 31 | SECRET_KEY: str 32 | 33 | @model_validator(mode="after") 34 | def validate_sentry_non_local(self) -> "Config": 35 | if self.ENVIRONMENT.is_deployed and not self.SENTRY_DSN: 36 | raise ValueError("Sentry is not set") 37 | # 38 | return self 39 | 40 | 41 | settings = Config() 42 | # 43 | # print("settings.DATABASE_URL:", settings.DATABASE_URL) 44 | # print("settings.DATABASE_ASYNC_URL:", settings.DATABASE_ASYNC_URL) 45 | 46 | app_configs: dict[str, Any] = {"title": "Prior Auth API App"} 47 | 48 | if settings.ENVIRONMENT.is_deployed: 49 | app_configs["root_path"] = f"/v{settings.APP_VERSION}" 50 | 51 | if not settings.ENVIRONMENT.is_debug: 52 | app_configs["openapi_url"] = None # hide docs 53 | -------------------------------------------------------------------------------- /app/backend/core/constants.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class Environment(str, Enum): 5 | LOCAL = "LOCAL" 6 | TESTING = "TESTING" 7 | STAGING = "STAGING" 8 | PRODUCTION = "PRODUCTION" 9 | 10 | @property 11 | def is_debug(self): 12 | return self in (self.LOCAL, self.STAGING, self.TESTING) 13 | 14 | @property 15 | def is_testing(self): 16 | return self == self.TESTING 17 | 18 | @property 19 | def is_deployed(self) -> bool: 20 | return self in (self.STAGING, self.PRODUCTION) 21 | -------------------------------------------------------------------------------- /app/backend/core/database.py: -------------------------------------------------------------------------------- 1 | # 2 | import motor.motor_asyncio 3 | from fastapi_users_db_beanie import BeanieBaseUserDocument, BeanieUserDatabase 4 | 5 | # 6 | from .config import settings 7 | 8 | DATABASE_URL = str(settings.DATABASE_ASYNC_URL) 9 | DATABASE_NAME = settings.AZURE_COSMOS_DB_DATABASE_NAME 10 | 11 | client = motor.motor_asyncio.AsyncIOMotorClient( 12 | DATABASE_URL, uuidRepresentation="standard" 13 | ) 14 | 15 | db = client[DATABASE_NAME] 16 | 17 | 18 | class User(BeanieBaseUserDocument): 19 | pass 20 | 21 | 22 | async def get_user_db(): 23 | yield BeanieUserDatabase(User) 24 | -------------------------------------------------------------------------------- /app/backend/core/exceptions.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from fastapi import HTTPException, status 4 | 5 | 6 | class DetailedHTTPException(HTTPException): 7 | STATUS_CODE = status.HTTP_500_INTERNAL_SERVER_ERROR 8 | DETAIL = "Server error" 9 | 10 | def __init__(self, **kwargs: dict[str, Any]) -> None: 11 | super().__init__(status_code=self.STATUS_CODE, detail=self.DETAIL, **kwargs) 12 | 13 | 14 | class PermissionDenied(DetailedHTTPException): 15 | STATUS_CODE = status.HTTP_403_FORBIDDEN 16 | DETAIL = "Permission denied" 17 | 18 | 19 | class NotFound(DetailedHTTPException): 20 | STATUS_CODE = status.HTTP_404_NOT_FOUND 21 | 22 | 23 | class BadRequest(DetailedHTTPException): 24 | STATUS_CODE = status.HTTP_400_BAD_REQUEST 25 | DETAIL = "Bad Request" 26 | 27 | 28 | class NotAuthenticated(DetailedHTTPException): 29 | STATUS_CODE = status.HTTP_401_UNAUTHORIZED 30 | DETAIL = "User not authenticated" 31 | 32 | def __init__(self) -> None: 33 | super().__init__(headers={"WWW-Authenticate": "Bearer"}) 34 | -------------------------------------------------------------------------------- /app/backend/core/security.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/core/security.py -------------------------------------------------------------------------------- /app/backend/paprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/paprocessing/__init__.py -------------------------------------------------------------------------------- /app/backend/paprocessing/models.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from pydantic import BaseModel 4 | 5 | 6 | # 7 | class PAProcessingRequest(BaseModel): 8 | """ 9 | Request body format for initiating the PA Processing Pipeline. 10 | """ 11 | 12 | uploaded_files: List[str] 13 | use_o1: bool = False 14 | caseId: Optional[str] = None 15 | streamlit: bool = False 16 | -------------------------------------------------------------------------------- /app/backend/paprocessing/test.http: -------------------------------------------------------------------------------- 1 | POST http://localhost:8000/process_pa HTTP/1.1 2 | Host: localhost:8000 3 | Content-Type: application/json 4 | 5 | { 6 | "uploaded_files": [ 7 | "https://storageaeastusfactory.blob.core.windows.net/pre-auth-policies/pa_proccesing_runs/eac70ae6/raw_uploaded_files/01_b_notes.pdf", 8 | "https://storageaeastusfactory.blob.core.windows.net/pre-auth-policies/pa_proccesing_runs/eac70ae6/raw_uploaded_files/01_b_imaging.pdf", 9 | "https://storageaeastusfactory.blob.core.windows.net/pre-auth-policies/pa_proccesing_runs/eac70ae6/raw_uploaded_files/01_b_labs.pdf", 10 | "https://storageaeastusfactory.blob.core.windows.net/pre-auth-policies/pa_proccesing_runs/eac70ae6/raw_uploaded_files/01_b_form.pdf" 11 | ], 12 | "caseId": "CASE-ABC123", 13 | "use_o1": true, 14 | "streamlit": false 15 | } 16 | -------------------------------------------------------------------------------- /app/backend/users/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/users/__init__.py -------------------------------------------------------------------------------- /app/backend/users/auth.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/backend/users/auth.py -------------------------------------------------------------------------------- /app/backend/users/manager.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from beanie import PydanticObjectId 4 | from fastapi import Depends, Request 5 | from fastapi_users import BaseUserManager, FastAPIUsers 6 | from fastapi_users.authentication import ( 7 | AuthenticationBackend, 8 | BearerTransport, 9 | JWTStrategy, 10 | ) 11 | 12 | # 13 | from fastapi_users_db_beanie import BeanieUserDatabase, ObjectIDIDMixin 14 | 15 | # 16 | from ..core.config import settings 17 | from ..core.database import User, get_user_db 18 | 19 | SECRET = settings.SECRET_KEY 20 | 21 | 22 | class UserManager(ObjectIDIDMixin, BaseUserManager[User, PydanticObjectId]): 23 | reset_password_token_secret = SECRET 24 | verification_token_secret = SECRET 25 | 26 | async def on_after_register(self, user: User, request: Optional[Request] = None): 27 | print(f"User {user.id} has registered.") 28 | 29 | async def on_after_forgot_password( 30 | self, user: User, token: str, request: Optional[Request] = None 31 | ): 32 | print(f"User {user.id} has forgot their password. Reset token: {token}") 33 | 34 | async def on_after_request_verify( 35 | self, user: User, token: str, request: Optional[Request] = None 36 | ): 37 | print(f"Verification requested for user {user.id}. Verification token: {token}") 38 | 39 | 40 | async def get_user_manager(user_db: BeanieUserDatabase = Depends(get_user_db)): 41 | yield UserManager(user_db) 42 | 43 | 44 | bearer_transport = BearerTransport(tokenUrl="auth/jwt/login") 45 | 46 | 47 | def get_jwt_strategy() -> JWTStrategy: 48 | return JWTStrategy(secret=SECRET, lifetime_seconds=3600) 49 | 50 | 51 | auth_backend = AuthenticationBackend( 52 | name="jwt", 53 | transport=bearer_transport, 54 | get_strategy=get_jwt_strategy, 55 | ) 56 | 57 | fastapi_users = FastAPIUsers[User, PydanticObjectId](get_user_manager, [auth_backend]) 58 | 59 | current_active_user = fastapi_users.current_user(active=True) 60 | -------------------------------------------------------------------------------- /app/backend/users/schemas.py: -------------------------------------------------------------------------------- 1 | # 2 | from beanie import PydanticObjectId 3 | from fastapi_users import schemas 4 | 5 | 6 | class UserRead(schemas.BaseUser[PydanticObjectId]): 7 | pass 8 | 9 | 10 | class UserCreate(schemas.BaseUserCreate): 11 | pass 12 | 13 | 14 | class UserUpdate(schemas.BaseUserUpdate): 15 | pass 16 | -------------------------------------------------------------------------------- /app/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11-slim-bullseye 2 | 3 | WORKDIR /app 4 | 5 | ENV PYTHONPATH "${PYTHONPATH}:/app" 6 | 7 | COPY src /app/src 8 | COPY app /app/app 9 | COPY evals /app/evals 10 | COPY utils/images/ /app/utils/images/ 11 | COPY utils/data/cases/policies /app/utils/data/cases/policies 12 | COPY requirements.txt /app/requirements.txt 13 | 14 | COPY ["utils/data/cases/003/b/doctor_notes/003_b (note) .pdf", "/app/utils/data/cases/003/b/doctor_notes/003_b (note) .pdf"] 15 | COPY ["utils/data/cases/003/b/labs/003_b (labs) .pdf", "/app/utils/data/cases/003/b/labs/003_b (labs) .pdf"] 16 | COPY ["utils/data/cases/003/b/pa_form/003_b (form).pdf", "/app/utils/data/cases/003/b/pa_form/003_b (form).pdf"] 17 | COPY ["utils/data/cases/003/a/doctor_notes/003_a (note) .pdf", "/app/utils/data/cases/003/a/doctor_notes/003_a (note) .pdf"] 18 | COPY ["utils/data/cases/003/a/labs/003_a (labs).pdf", "/app/utils/data/cases/003/a/labs/003_a (labs).pdf"] 19 | COPY ["utils/data/cases/003/a/pa_form/003_a (form).pdf", "/app/utils/data/cases/003/a/pa_form/003_a (form).pdf"] 20 | 21 | # Build Streamlit 22 | RUN apt-get update && \ 23 | apt-get -y install gcc mono-mcs python3-dev && \ 24 | rm -rf /var/lib/apt/lists/* 25 | RUN mkdir -p /app/.streamlit 26 | COPY app/frontend/settings/config.toml /app/.streamlit/config.toml 27 | RUN pip install --no-cache-dir -r requirements.txt 28 | ENV STREAMLIT_CONFIG_FILE=/app/.streamlit/config.toml 29 | 30 | EXPOSE 8501 31 | 32 | ENTRYPOINT ["streamlit", "run"] 33 | CMD ["app/frontend/streamlit/Home.py"] 34 | -------------------------------------------------------------------------------- /app/frontend/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/app/frontend/components/__init__.py -------------------------------------------------------------------------------- /app/frontend/components/managers.py: -------------------------------------------------------------------------------- 1 | from src.aoai.aoai_helper import AzureOpenAIManager 2 | 3 | 4 | def create_azure_openai_manager( 5 | api_key: str, azure_endpoint: str, api_version: str, deployment_id: str 6 | ) -> AzureOpenAIManager: 7 | """ 8 | Create a new Azure OpenAI Manager instance. 9 | 10 | :param api_key: API key for Azure OpenAI. 11 | :param azure_endpoint: API endpoint for Azure OpenAI. 12 | :param api_version: API version for Azure OpenAI. 13 | :param deployment_id: Deployment ID for Azure OpenAI. 14 | :return: AzureOpenAIManager instance. 15 | """ 16 | return AzureOpenAIManager( 17 | api_key=api_key, 18 | azure_endpoint=azure_endpoint, 19 | api_version=api_version, 20 | chat_model_name=deployment_id, 21 | ) 22 | -------------------------------------------------------------------------------- /app/frontend/settings/config.toml: -------------------------------------------------------------------------------- 1 | [theme] 2 | primaryColor="#1F77B4" # A professional blue tone 3 | backgroundColor="#F7F9FC" # A light, clean background color 4 | secondaryBackgroundColor="#E9EEF3" # A subtle secondary background color 5 | textColor="#2C3E50" # A dark, professional text color 6 | font="sans serif" 7 | 8 | [server] 9 | enableXsrfProtection=false 10 | -------------------------------------------------------------------------------- /app/frontend/streamlit/pages/👩🏽‍⚕️ Provider.py: -------------------------------------------------------------------------------- 1 | import streamlit as st 2 | 3 | st.warning( 4 | "🚧 We're working on it! This feature will be available soon for providers. Stay tuned!" 5 | ) 6 | -------------------------------------------------------------------------------- /azure.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/Azure/azure-dev/main/schemas/v1.0/azure.yaml.json 2 | 3 | name: gbb-ai-hls-factory-prior-auth 4 | metadata: 5 | template: priorAuth-aca@1.11.0 6 | services: 7 | frontend: # <-- This is the name of your service (tag:azdServiceName in bicep config) 8 | project: app/frontend 9 | host: containerapp 10 | language: python 11 | docker: 12 | path: Dockerfile 13 | context: ../../ 14 | remoteBuild: true 15 | hooks: 16 | postdeploy: 17 | posix: 18 | shell: sh 19 | interactive: true 20 | continueOnError: false 21 | run: ../../utils/azd/hooks/postdeploy.sh 22 | 23 | windows: 24 | shell: pwsh 25 | interactive: true 26 | continueOnError: false 27 | run: ..\..\utils\azd\hooks\postdeploy.ps1 28 | 29 | # backend: # <-- This is the name of your service (tag:azdServiceName in bicep config) 30 | # project: app/backend 31 | # host: containerapp 32 | # language: python 33 | # docker: 34 | # path: Dockerfile 35 | # context: ../../ 36 | # remoteBuild: true 37 | 38 | # pipeline: 39 | # provider: github 40 | hooks: 41 | preprovision: 42 | posix: 43 | shell: sh 44 | interactive: true 45 | continueOnError: false 46 | run: | 47 | echo "Setting executable permissions on hook scripts..." 48 | chmod +x ./utils/azd/hooks/*.sh 49 | utils/azd/hooks/preprovision.sh 50 | windows: 51 | shell: pwsh 52 | interactive: true 53 | continueOnError: false 54 | run: utils\azd\hooks\preprovision.ps1 55 | 56 | postprovision: 57 | posix: 58 | shell: sh 59 | interactive: true 60 | continueOnError: false 61 | run: utils/azd/hooks/postprovision.sh 62 | 63 | windows: 64 | shell: pwsh 65 | interactive: true 66 | continueOnError: false 67 | run: utils\azd\hooks\postprovision.ps1 68 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # For local development purposes only. Use the azure.yaml to configure the e2e deployment 2 | services: 3 | frontend: 4 | build: 5 | context: . 6 | dockerfile: ./app/frontend/Dockerfile 7 | ports: 8 | - "8081:8501" 9 | env_file: 10 | - .env 11 | backend: 12 | build: 13 | context: . 14 | dockerfile: ./app/backend/Dockerfile 15 | ports: 16 | - "9000:9000" 17 | env_file: 18 | - .env 19 | -------------------------------------------------------------------------------- /docs/CODEOWNERS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/CODEOWNERS -------------------------------------------------------------------------------- /docs/_config.yaml: -------------------------------------------------------------------------------- 1 | title: "AutoAuth Documentation" 2 | description: "Streamlining Prior Authorization with Azure AI" 3 | remote_theme: just-the-docs/just-the-docs 4 | markdown: kramdown 5 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: "Technical Architecture" 4 | nav_order: 5 5 | --- 6 | 7 | # ⚙️ Technical Architecture 8 | 9 | AutoAuth’s architecture orchestrates multiple Azure services and techniques to seamlessly process requests, retrieve policies, and generate recommendations. 10 | 11 | ![Architecture](./images/diagram_latest.png) 12 | 13 | ## High-Level Overview 14 | 15 | - **Knowledge Base Construction**: Establish a centralized repository of Prior Authorization (PA) policies and guidelines to streamline the decision-making process. 16 | - **Unstructured Clinical Data Processing**: Extract and structure patient-specific clinical information from raw data sources using advanced Large Language Model (LLM)-based techniques. 17 | - **Agentic RAG**: Identify the most relevant PA policy for a clinical case using a multi-layered retrieval approach, supported by Azure AI Search and LLM as the formulator and judge, guided by agentic pipelines. 18 | - **Claims Processing**: Leverage Azure OpenAI to evaluate policies against clinical inputs, cross-reference patient, physician, and clinical details against policy criteria. Classify the Prior Authorization (PA) claim as Approved, Denied, or Needs More Information, providing clear, evidence-based explanations and policy references to support a comprehensive human final determination. 19 | 20 | ## Components 21 | 22 | | Component | Role | 23 | |---------------------------|-------------------------------------| 24 | | Azure OpenAI | LLMs for reasoning and decision logic | 25 | | Azure Cognitive Search | Hybrid retrieval (semantic + keyword) | 26 | | Document Intelligence | OCR and data extraction | 27 | | Azure Storage | Document storage | 28 | | Azure Bicep Templates | Automated infrastructure deployment | 29 | | Semantic Kernel | Agentic orchestration of retrieval and reasoning | 30 | | Azure AI Studio (LLMOps) | Model evaluation, prompt optimization, and performance logging | 31 | 32 | This integrated design enables a dynamic, AI-driven PA process that is scalable, auditable, and ready for continuous improvement. 33 | -------------------------------------------------------------------------------- /docs/images/ai_foundry_evaluations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/ai_foundry_evaluations.png -------------------------------------------------------------------------------- /docs/images/azp_help_containerjoblogs_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/azp_help_containerjoblogs_1.png -------------------------------------------------------------------------------- /docs/images/azp_help_containerlogs_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/azp_help_containerlogs_1.png -------------------------------------------------------------------------------- /docs/images/azp_help_containerlogs_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/azp_help_containerlogs_2.png -------------------------------------------------------------------------------- /docs/images/azp_help_deployments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/azp_help_deployments.png -------------------------------------------------------------------------------- /docs/images/azure_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/azure_logo.png -------------------------------------------------------------------------------- /docs/images/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/diagram.png -------------------------------------------------------------------------------- /docs/images/diagram_latest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/diagram_latest.png -------------------------------------------------------------------------------- /docs/images/flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/flow.png -------------------------------------------------------------------------------- /docs/images/paworflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/paworflow.png -------------------------------------------------------------------------------- /docs/images/prior_auth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/prior_auth.png -------------------------------------------------------------------------------- /docs/images/prior_auth_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/docs/images/prior_auth_flow.png -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: "Home" 4 | nav_order: 1 5 | description: "Streamlining Prior Authorization with Azure AI" 6 | permalink: / 7 | --- 8 | 9 | # 🚀 Streamlining Prior Authorization with Azure AI 10 | 11 | Welcome to the documentation for **AutoAuth**, a solution designed to modernize and streamline the Prior Authorization (PA) process in healthcare using Azure AI services, advanced LLM reasoning, and agentic workflows. 12 | 13 | ## Why AutoAuth? 14 | 15 | - **Faster Approvals**: Shrink the PA cycle from days to hours. 16 | - **Cost Savings**: Reduce operational expenses tied to manual review processes. 17 | - **Patient-Centric**: Improve patient outcomes by minimizing delays in critical treatments. 18 | 19 | ## Getting Started 20 | 21 | 1. **[Challenges & Opportunities](challenges.md)**: Understand the pain points and real-world impact of inefficient PA. 22 | 2. **[AutoAuth Solution](solution.md)**: Learn how our approach uses AI, LLMOps, and research-backed prompts to solve these challenges. 23 | 3. **[Technical Architecture](architecture.md)**: Dive into the components and data flow powering AutoAuth. 24 | 4. **[Deployment Guide](deployment.md)**: Follow step-by-step instructions for one-click deployment into your Azure environment. 25 | 4.1 **[Azure Developer CLI (azd) Deployment Guide](azd_deployment.md)**: Follow step-by-step instructions for end-to-end infra and app deployment using azd. 26 | 27 | **Ready to begin?** Jump into [Challenges & Opportunities](challenges.md) or [Deploy Now](deployment.md) or [Deploy Now with AZD](azd_deployment.md). 28 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: pa-ai-env 2 | channels: 3 | - conda-forge 4 | - defaults 5 | - jupyter 6 | dependencies: 7 | - python=3.10 8 | - pip 9 | - ipykernel 10 | - pip: 11 | - -r requirements.txt 12 | - -r requirements-codequality.txt 13 | -------------------------------------------------------------------------------- /infra/main.parameters.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "enableEasyAuth": { 6 | "value": "${ENABLE_EASY_AUTH:-}" 7 | }, 8 | "disableIngress": { 9 | "value": "${DISABLE_INGRESS:-}" 10 | }, 11 | "environmentName": { 12 | "value": "${AZURE_ENV_NAME:-defaultEnv}" 13 | }, 14 | "location": { 15 | "value": "${AZURE_LOCATION:-eastus2}" 16 | }, 17 | "priorAuthName": { 18 | "value": "${PRIOR_AUTH_NAME:-autoauth}" 19 | }, 20 | "frontendExists": { 21 | "value": "${SERVICE_FRONTEND_RESOURCE_EXISTS:-false}" 22 | }, 23 | "backendExists": { 24 | "value": "${SERVICE_BACKEND_RESOURCE_EXISTS:-false}" 25 | }, 26 | "acrContainerImage": { 27 | "value": "${ACR_CONTAINER_IMAGE:-}" 28 | }, 29 | "GIT_HASH": { 30 | "value": "${GIT_HASH:-}" 31 | }, 32 | "openAiApiVersion": { 33 | "value": "2025-01-01-preview" 34 | }, 35 | "chatModel": { 36 | "value": { 37 | "name": "gpt-4o", 38 | "version": "2024-08-06", 39 | "skuName": "Standard", 40 | "capacity": 50 41 | } 42 | }, 43 | "reasoningModel": { 44 | "value": { 45 | "name": "o1", 46 | "version": "2024-12-17", 47 | "skuName": "GlobalStandard", 48 | "capacity": 50 49 | } 50 | }, 51 | "embeddingModel": { 52 | "value": { 53 | "name": "text-embedding-3-large", 54 | "version": "1", 55 | "skuName": "Standard", 56 | "capacity": 55 57 | } 58 | }, 59 | "embeddingModelDimension": { 60 | "value": "${EMBEDDING_MODEL_DIMENSION:-3072}" 61 | }, 62 | "storageBlobContainerName": { 63 | "value": "${STORAGE_BLOB_CONTAINER_NAME:-default}" 64 | } 65 | , 66 | "tags": { 67 | "value": { 68 | "project": "${PROJECT_NAME:-priorAuth}", 69 | "owner": "${OWNER_NAME:-contoso}", 70 | "costCenter": "${COST_CENTER:-00000}" 71 | } 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /infra/modules/ai/docintelligence.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the AI service') 8 | param aiServiceName string 9 | 10 | @allowed([ 11 | 'S0' 12 | ]) 13 | @description('AI service SKU') 14 | param aiServiceSkuName string = 'S0' 15 | 16 | var aiServiceNameCleaned = replace(aiServiceName, '-', '') 17 | 18 | resource aiServices 'Microsoft.CognitiveServices/accounts@2023-05-01' = { 19 | name: aiServiceNameCleaned 20 | location: location 21 | sku: { 22 | name: aiServiceSkuName 23 | } 24 | kind: 'AIServices' 25 | properties: { 26 | publicNetworkAccess: 'Enabled' 27 | disableLocalAuth: false 28 | apiProperties: { 29 | } 30 | customSubDomainName: aiServiceNameCleaned 31 | } 32 | identity: { 33 | type: 'SystemAssigned' 34 | } 35 | tags: tags 36 | } 37 | 38 | var docAiKeys = aiServices.listKeys() 39 | 40 | output aiServicesId string = aiServices.id 41 | output aiServicesEndpoint string = aiServices.properties.endpoint 42 | output aiServiceDocIntelligenceEndpoint string = 'https://${aiServiceNameCleaned}.cognitiveservices.azure.com' 43 | output aiServicesName string = aiServices.name 44 | output aiServicesPrincipalId string = aiServices.identity.principalId 45 | output aiServicesKey string = docAiKeys.key1 46 | -------------------------------------------------------------------------------- /infra/modules/ai/docintelligence.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "9467403434055282454" 9 | } 10 | }, 11 | "parameters": { 12 | "location": { 13 | "type": "string", 14 | "metadata": { 15 | "description": "Azure region of the deployment" 16 | } 17 | }, 18 | "tags": { 19 | "type": "object", 20 | "metadata": { 21 | "description": "Tags to add to the resources" 22 | } 23 | }, 24 | "aiServiceName": { 25 | "type": "string", 26 | "metadata": { 27 | "description": "Name of the AI service" 28 | } 29 | }, 30 | "aiServiceSkuName": { 31 | "type": "string", 32 | "defaultValue": "S0", 33 | "allowedValues": [ 34 | "S0" 35 | ], 36 | "metadata": { 37 | "description": "AI service SKU" 38 | } 39 | } 40 | }, 41 | "variables": { 42 | "aiServiceNameCleaned": "[replace(parameters('aiServiceName'), '-', '')]" 43 | }, 44 | "resources": [ 45 | { 46 | "type": "Microsoft.CognitiveServices/accounts", 47 | "apiVersion": "2023-05-01", 48 | "name": "[variables('aiServiceNameCleaned')]", 49 | "location": "[parameters('location')]", 50 | "sku": { 51 | "name": "[parameters('aiServiceSkuName')]" 52 | }, 53 | "kind": "AIServices", 54 | "properties": { 55 | "publicNetworkAccess": "Enabled", 56 | "disableLocalAuth": false, 57 | "apiProperties": { 58 | "statisticsEnabled": false 59 | }, 60 | "customSubDomainName": "[variables('aiServiceNameCleaned')]" 61 | }, 62 | "identity": { 63 | "type": "SystemAssigned" 64 | }, 65 | "tags": "[parameters('tags')]" 66 | } 67 | ], 68 | "outputs": { 69 | "aiServicesId": { 70 | "type": "string", 71 | "value": "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned'))]" 72 | }, 73 | "aiServicesEndpoint": { 74 | "type": "string", 75 | "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01').endpoint]" 76 | }, 77 | "aiServiceDocIntelligenceEndpoint": { 78 | "type": "string", 79 | "value": "[format('https://{0}.cognitiveservices.azure.com', variables('aiServiceNameCleaned'))]" 80 | }, 81 | "aiServicesName": { 82 | "type": "string", 83 | "value": "[variables('aiServiceNameCleaned')]" 84 | }, 85 | "aiServicesPrincipalId": { 86 | "type": "string", 87 | "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01', 'full').identity.principalId]" 88 | }, 89 | "aiServicesKey": { 90 | "type": "string", 91 | "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01').key1]" 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /infra/modules/ai/mais.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the AI service') 8 | param aiServiceName string 9 | 10 | @allowed([ 11 | 'S0' 12 | ]) 13 | @description('AI service SKU') 14 | param aiServiceSkuName string = 'S0' 15 | 16 | var aiServiceNameCleaned = replace(aiServiceName, '-', '') 17 | 18 | resource aiServices 'Microsoft.CognitiveServices/accounts@2023-05-01' = { 19 | name: aiServiceNameCleaned 20 | location: location 21 | sku: { 22 | name: aiServiceSkuName 23 | } 24 | kind: 'CognitiveServices' 25 | properties: { 26 | publicNetworkAccess: 'Enabled' 27 | disableLocalAuth: false 28 | apiProperties: { 29 | } 30 | customSubDomainName: aiServiceNameCleaned 31 | } 32 | identity: { 33 | type: 'SystemAssigned' 34 | } 35 | tags: tags 36 | } 37 | 38 | var maisKey = aiServices.listKeys() 39 | 40 | output aiServicesId string = aiServices.id 41 | output aiServicesEndpoint string = aiServices.properties.endpoint 42 | output aiServicesName string = aiServices.name 43 | output aiServicesPrincipalId string = aiServices.identity.principalId 44 | output aiServicesPrimaryKey string = maisKey.key1 45 | -------------------------------------------------------------------------------- /infra/modules/ai/mais.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "14264198139338104743" 9 | } 10 | }, 11 | "parameters": { 12 | "location": { 13 | "type": "string", 14 | "metadata": { 15 | "description": "Azure region of the deployment" 16 | } 17 | }, 18 | "tags": { 19 | "type": "object", 20 | "metadata": { 21 | "description": "Tags to add to the resources" 22 | } 23 | }, 24 | "aiServiceName": { 25 | "type": "string", 26 | "metadata": { 27 | "description": "Name of the AI service" 28 | } 29 | }, 30 | "aiServiceSkuName": { 31 | "type": "string", 32 | "defaultValue": "S0", 33 | "allowedValues": [ 34 | "S0" 35 | ], 36 | "metadata": { 37 | "description": "AI service SKU" 38 | } 39 | } 40 | }, 41 | "variables": { 42 | "aiServiceNameCleaned": "[replace(parameters('aiServiceName'), '-', '')]" 43 | }, 44 | "resources": [ 45 | { 46 | "type": "Microsoft.CognitiveServices/accounts", 47 | "apiVersion": "2023-05-01", 48 | "name": "[variables('aiServiceNameCleaned')]", 49 | "location": "[parameters('location')]", 50 | "sku": { 51 | "name": "[parameters('aiServiceSkuName')]" 52 | }, 53 | "kind": "CognitiveServices", 54 | "properties": { 55 | "publicNetworkAccess": "Enabled", 56 | "disableLocalAuth": false, 57 | "apiProperties": { 58 | "statisticsEnabled": false 59 | }, 60 | "customSubDomainName": "[variables('aiServiceNameCleaned')]" 61 | }, 62 | "identity": { 63 | "type": "SystemAssigned" 64 | }, 65 | "tags": "[parameters('tags')]" 66 | } 67 | ], 68 | "outputs": { 69 | "aiServicesId": { 70 | "type": "string", 71 | "value": "[resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned'))]" 72 | }, 73 | "aiServicesEndpoint": { 74 | "type": "string", 75 | "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01').endpoint]" 76 | }, 77 | "aiServicesName": { 78 | "type": "string", 79 | "value": "[variables('aiServiceNameCleaned')]" 80 | }, 81 | "aiServicesPrincipalId": { 82 | "type": "string", 83 | "value": "[reference(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01', 'full').identity.principalId]" 84 | }, 85 | "aiServicesPrimaryKey": { 86 | "type": "string", 87 | "value": "[listKeys(resourceId('Microsoft.CognitiveServices/accounts', variables('aiServiceNameCleaned')), '2023-05-01').key1]" 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /infra/modules/ai/openai.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the AI service') 8 | param aiServiceName string 9 | 10 | @allowed([ 11 | 'S0' 12 | ]) 13 | @description('AI service SKU') 14 | param aiServiceSkuName string = 'S0' 15 | 16 | @description('List of chat completion models to be deployed to the OpenAI account.') 17 | param chatCompletionModels array = [ 18 | { 19 | name: 'gpt-4o' 20 | version: '2024-08-06' 21 | skuName: 'GlobalStandard' 22 | capacity: 25 23 | } 24 | ] 25 | 26 | @description('List of embedding models to be deployed to the OpenAI account.') 27 | param embeddingModel object = { 28 | name: 'text-embedding-ada-002' 29 | version: '2' 30 | skuName: 'Standard' 31 | capacity: 250 32 | } 33 | 34 | var combinedModels = concat(chatCompletionModels, [embeddingModel]) 35 | 36 | var aiServiceNameCleaned = replace(aiServiceName, '-', '') 37 | 38 | resource openAiService 'Microsoft.CognitiveServices/accounts@2023-05-01' = { 39 | name: aiServiceNameCleaned 40 | location: location 41 | sku: { 42 | name: aiServiceSkuName 43 | } 44 | kind: 'OpenAI' 45 | properties: { 46 | publicNetworkAccess: 'Enabled' 47 | disableLocalAuth: false 48 | apiProperties: { 49 | } 50 | customSubDomainName: aiServiceNameCleaned 51 | } 52 | identity: { 53 | type: 'SystemAssigned' 54 | } 55 | tags: tags 56 | } 57 | 58 | @batchSize(1) 59 | resource modelDeployments 'Microsoft.CognitiveServices/accounts/deployments@2024-06-01-preview' = [for (model, i) in combinedModels: { 60 | parent: openAiService 61 | name: '${model.name}' 62 | sku: { 63 | name: model.skuName 64 | capacity: model.capacity 65 | } 66 | properties: { 67 | model: { 68 | format: 'OpenAI' 69 | name: model.name 70 | version: model.version 71 | } 72 | currentCapacity: model.capacity 73 | } 74 | }] 75 | 76 | var openAiKeys = openAiService.listKeys() 77 | 78 | output aiServicesId string = openAiService.id 79 | output aiServicesEndpoint string = endsWith(openAiService.properties.endpoint, '/') 80 | ? substring(openAiService.properties.endpoint, 0, length(openAiService.properties.endpoint) - 1) 81 | : openAiService.properties.endpoint 82 | output aiServicesName string = openAiService.name 83 | output aiServicesPrincipalId string = openAiService.identity.principalId 84 | output aiServicesKey string = openAiKeys.key1 85 | -------------------------------------------------------------------------------- /infra/modules/compute/README.md: -------------------------------------------------------------------------------- 1 | # Documentation for the Bicep modules in this directory 2 | 3 | 4 | 5 | ## Table of Contents 6 | - [fetch-container-image](#fetch-container-image) 7 | - [Parameters](#parameters) 8 | - [Outputs](#outputs) 9 | - [Snippets](#snippets) 10 | 11 | # fetch-container-image 12 | 13 | ## Parameters 14 | 15 | Parameter name | Required | Description 16 | -------------- | -------- | ----------- 17 | exists | Yes | 18 | name | Yes | 19 | 20 | ### exists 21 | 22 | ![Parameter Setting](https://img.shields.io/badge/parameter-required-orange?style=flat-square) 23 | 24 | 25 | 26 | ### name 27 | 28 | ![Parameter Setting](https://img.shields.io/badge/parameter-required-orange?style=flat-square) 29 | 30 | 31 | 32 | ## Outputs 33 | 34 | Name | Type | Description 35 | ---- | ---- | ----------- 36 | containers | array | 37 | 38 | ## Snippets 39 | 40 | ### Parameter file 41 | 42 | ```json 43 | { 44 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", 45 | "contentVersion": "1.0.0.0", 46 | "metadata": { 47 | "template": "infra/modules/compute/fetch-container-image.json" 48 | }, 49 | "parameters": { 50 | "exists": { 51 | "value": null 52 | }, 53 | "name": { 54 | "value": "" 55 | } 56 | } 57 | } 58 | ``` 59 | -------------------------------------------------------------------------------- /infra/modules/compute/fetch-container-image.bicep: -------------------------------------------------------------------------------- 1 | param exists bool 2 | param name string 3 | 4 | resource existingApp 'Microsoft.App/containerApps@2023-05-02-preview' existing = if (exists) { 5 | name: name 6 | } 7 | 8 | output containers array = exists ? existingApp.properties.template.containers : [] 9 | -------------------------------------------------------------------------------- /infra/modules/compute/fetch-container-image.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "9643843979796575983" 9 | } 10 | }, 11 | "parameters": { 12 | "exists": { 13 | "type": "bool" 14 | }, 15 | "name": { 16 | "type": "string" 17 | } 18 | }, 19 | "resources": [], 20 | "outputs": { 21 | "containers": { 22 | "type": "array", 23 | "value": "[if(parameters('exists'), reference(resourceId('Microsoft.App/containerApps', parameters('name')), '2023-05-02-preview').template.containers, createArray())]" 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /infra/modules/data/cosmos-mongo-ru.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the Mongo cluster') 8 | param aiServiceName string 9 | 10 | // @description('Administrator username for the Mongo cluster') 11 | // param cosmosAdministratorUsername string = 'adminuser' // Default username, can be overridden 12 | 13 | // @description('Admin password for the cluster') 14 | // @secure() 15 | // param cosmosAdministratorPassword string 16 | 17 | var mongoNameCleaned = replace(aiServiceName, '-', '') 18 | 19 | resource mongoCluster 'Microsoft.DocumentDB/databaseAccounts@2024-11-15' = { 20 | name: mongoNameCleaned 21 | location: location 22 | tags: tags 23 | kind: 'MongoDB' 24 | properties: { 25 | databaseAccountOfferType: 'Standard' 26 | locations: [ 27 | { 28 | locationName: location 29 | failoverPriority: 0 30 | } 31 | ] 32 | // administrator: { 33 | // userName: cosmosAdministratorUsername 34 | // password: cosmosAdministratorPassword 35 | // } 36 | apiProperties: { 37 | serverVersion: '7.0' 38 | } 39 | 40 | capabilities: [ 41 | { 42 | name: 'EnableMongo' 43 | } 44 | ] 45 | consistencyPolicy: { 46 | defaultConsistencyLevel: 'Session' 47 | } 48 | publicNetworkAccess: 'Enabled' 49 | } 50 | } 51 | 52 | 53 | 54 | output mongoClusterId string = mongoCluster.id 55 | output mongoClusterName string = mongoCluster.name 56 | 57 | // Variable: Encoded Cosmos Administrator Password 58 | // var encodedPassword = uriComponent(cosmosAdministratorPassword) 59 | 60 | // output mongoConnectionString string = 'mongodb+srv://${cosmosAdministratorUsername}:${encodedPassword}@${mongoNameCleaned}.mongo.cosmos.azure.com/?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000' 61 | output mongoConnectionString string = mongoCluster.listConnectionStrings().connectionStrings[0].connectionString 62 | -------------------------------------------------------------------------------- /infra/modules/data/cosmos-mongo-ru.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "2865037101428017356" 9 | } 10 | }, 11 | "parameters": { 12 | "location": { 13 | "type": "string", 14 | "metadata": { 15 | "description": "Azure region of the deployment" 16 | } 17 | }, 18 | "tags": { 19 | "type": "object", 20 | "metadata": { 21 | "description": "Tags to add to the resources" 22 | } 23 | }, 24 | "aiServiceName": { 25 | "type": "string", 26 | "metadata": { 27 | "description": "Name of the Mongo cluster" 28 | } 29 | } 30 | }, 31 | "variables": { 32 | "mongoNameCleaned": "[replace(parameters('aiServiceName'), '-', '')]" 33 | }, 34 | "resources": [ 35 | { 36 | "type": "Microsoft.DocumentDB/databaseAccounts", 37 | "apiVersion": "2024-11-15", 38 | "name": "[variables('mongoNameCleaned')]", 39 | "location": "[parameters('location')]", 40 | "tags": "[parameters('tags')]", 41 | "kind": "MongoDB", 42 | "properties": { 43 | "databaseAccountOfferType": "Standard", 44 | "locations": [ 45 | { 46 | "locationName": "[parameters('location')]", 47 | "failoverPriority": 0 48 | } 49 | ], 50 | "apiProperties": { 51 | "serverVersion": "7.0" 52 | }, 53 | "capabilities": [ 54 | { 55 | "name": "EnableMongo" 56 | } 57 | ], 58 | "consistencyPolicy": { 59 | "defaultConsistencyLevel": "Session" 60 | }, 61 | "publicNetworkAccess": "Enabled" 62 | } 63 | } 64 | ], 65 | "outputs": { 66 | "mongoClusterId": { 67 | "type": "string", 68 | "value": "[resourceId('Microsoft.DocumentDB/databaseAccounts', variables('mongoNameCleaned'))]" 69 | }, 70 | "mongoClusterName": { 71 | "type": "string", 72 | "value": "[variables('mongoNameCleaned')]" 73 | }, 74 | "mongoConnectionString": { 75 | "type": "string", 76 | "value": "[listConnectionStrings(resourceId('Microsoft.DocumentDB/databaseAccounts', variables('mongoNameCleaned')), '2024-11-15').connectionStrings[0].connectionString]" 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /infra/modules/data/cosmos-mongo.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the Mongo cluster') 8 | param aiServiceName string 9 | 10 | @description('Administrator username for the Mongo cluster') 11 | param cosmosAdministratorUsername string = 'adminuser' // Default username, can be overridden 12 | 13 | @description('Admin password for the cluster') 14 | @secure() 15 | param cosmosAdministratorPassword string 16 | 17 | var mongoNameCleaned = replace(aiServiceName, '-', '') 18 | 19 | resource mongoCluster 'Microsoft.DocumentDB/mongoClusters@2024-07-01' = { 20 | name: mongoNameCleaned 21 | location: location 22 | tags: tags 23 | properties: { 24 | administrator: { 25 | userName: cosmosAdministratorUsername 26 | password: cosmosAdministratorPassword 27 | } 28 | serverVersion: '7.0' 29 | compute: { 30 | tier: 'M30' 31 | } 32 | storage: { 33 | sizeGb: 32 34 | } 35 | sharding: { 36 | shardCount: 1 37 | } 38 | highAvailability: { 39 | targetMode: 'Disabled' 40 | } 41 | publicNetworkAccess: 'Enabled' 42 | previewFeatures: [ 43 | 'GeoReplicas' 44 | ] 45 | } 46 | } 47 | 48 | output mongoClusterId string = mongoCluster.id 49 | output mongoClusterName string = mongoCluster.name 50 | 51 | // Variable: Encoded Cosmos Administrator Password 52 | var encodedPassword = uriComponent(cosmosAdministratorPassword) 53 | 54 | output mongoConnectionString string = 'mongodb+srv://${cosmosAdministratorUsername}:${encodedPassword}@${mongoNameCleaned}.mongocluster.cosmos.azure.com/?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000' 55 | -------------------------------------------------------------------------------- /infra/modules/data/cosmos-mongo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "12353008776147836803" 9 | } 10 | }, 11 | "parameters": { 12 | "location": { 13 | "type": "string", 14 | "metadata": { 15 | "description": "Azure region of the deployment" 16 | } 17 | }, 18 | "tags": { 19 | "type": "object", 20 | "metadata": { 21 | "description": "Tags to add to the resources" 22 | } 23 | }, 24 | "aiServiceName": { 25 | "type": "string", 26 | "metadata": { 27 | "description": "Name of the Mongo cluster" 28 | } 29 | }, 30 | "cosmosAdministratorUsername": { 31 | "type": "string", 32 | "defaultValue": "adminuser", 33 | "metadata": { 34 | "description": "Administrator username for the Mongo cluster" 35 | } 36 | }, 37 | "cosmosAdministratorPassword": { 38 | "type": "securestring", 39 | "metadata": { 40 | "description": "Admin password for the cluster" 41 | } 42 | } 43 | }, 44 | "variables": { 45 | "mongoNameCleaned": "[replace(parameters('aiServiceName'), '-', '')]", 46 | "encodedPassword": "[uriComponent(parameters('cosmosAdministratorPassword'))]" 47 | }, 48 | "resources": [ 49 | { 50 | "type": "Microsoft.DocumentDB/mongoClusters", 51 | "apiVersion": "2024-07-01", 52 | "name": "[variables('mongoNameCleaned')]", 53 | "location": "[parameters('location')]", 54 | "tags": "[parameters('tags')]", 55 | "properties": { 56 | "administrator": { 57 | "userName": "[parameters('cosmosAdministratorUsername')]", 58 | "password": "[parameters('cosmosAdministratorPassword')]" 59 | }, 60 | "serverVersion": "7.0", 61 | "compute": { 62 | "tier": "M30" 63 | }, 64 | "storage": { 65 | "sizeGb": 32 66 | }, 67 | "sharding": { 68 | "shardCount": 1 69 | }, 70 | "highAvailability": { 71 | "targetMode": "Disabled" 72 | }, 73 | "publicNetworkAccess": "Enabled", 74 | "previewFeatures": [ 75 | "GeoReplicas" 76 | ] 77 | } 78 | } 79 | ], 80 | "outputs": { 81 | "mongoClusterId": { 82 | "type": "string", 83 | "value": "[resourceId('Microsoft.DocumentDB/mongoClusters', variables('mongoNameCleaned'))]" 84 | }, 85 | "mongoClusterName": { 86 | "type": "string", 87 | "value": "[variables('mongoNameCleaned')]" 88 | }, 89 | "mongoConnectionString": { 90 | "type": "string", 91 | "value": "[format('mongodb+srv://{0}:{1}@{2}.mongocluster.cosmos.azure.com/?tls=true&authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000', parameters('cosmosAdministratorUsername'), variables('encodedPassword'), variables('mongoNameCleaned'))]" 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /infra/modules/data/search.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the Search service') 8 | param aiServiceName string 9 | 10 | @allowed([ 11 | 'basic' 12 | ]) 13 | @description('Search service SKU') 14 | param aiServiceSkuName string = 'basic' 15 | 16 | var searchNameCleaned = replace(aiServiceName, '-', '') 17 | 18 | resource searchService 'Microsoft.Search/searchServices@2024-06-01-preview' = { 19 | name: searchNameCleaned 20 | location: location 21 | sku: { 22 | name: aiServiceSkuName 23 | } 24 | identity: { 25 | type: 'SystemAssigned' 26 | } 27 | properties: { 28 | publicNetworkAccess: 'Enabled' 29 | hostingMode: 'default' 30 | } 31 | tags: tags 32 | } 33 | 34 | var searchKeys = searchService.listAdminKeys() 35 | 36 | output searchServiceIdentityPrincipalId string = searchService.identity.principalId 37 | output searchServiceId string = searchService.id 38 | output searchServiceName string = searchService.name 39 | output searchServicePrimaryKey string = searchKeys.primaryKey 40 | output searchServiceEndpoint string = 'https://${searchService.name}.search.windows.net' 41 | -------------------------------------------------------------------------------- /infra/modules/data/search.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "metadata": { 5 | "_generator": { 6 | "name": "bicep", 7 | "version": "0.33.93.31351", 8 | "templateHash": "9778162961165438260" 9 | } 10 | }, 11 | "parameters": { 12 | "location": { 13 | "type": "string", 14 | "metadata": { 15 | "description": "Azure region of the deployment" 16 | } 17 | }, 18 | "tags": { 19 | "type": "object", 20 | "metadata": { 21 | "description": "Tags to add to the resources" 22 | } 23 | }, 24 | "aiServiceName": { 25 | "type": "string", 26 | "metadata": { 27 | "description": "Name of the Search service" 28 | } 29 | }, 30 | "aiServiceSkuName": { 31 | "type": "string", 32 | "defaultValue": "basic", 33 | "allowedValues": [ 34 | "basic" 35 | ], 36 | "metadata": { 37 | "description": "Search service SKU" 38 | } 39 | } 40 | }, 41 | "variables": { 42 | "searchNameCleaned": "[replace(parameters('aiServiceName'), '-', '')]" 43 | }, 44 | "resources": [ 45 | { 46 | "type": "Microsoft.Search/searchServices", 47 | "apiVersion": "2024-06-01-preview", 48 | "name": "[variables('searchNameCleaned')]", 49 | "location": "[parameters('location')]", 50 | "sku": { 51 | "name": "[parameters('aiServiceSkuName')]" 52 | }, 53 | "identity": { 54 | "type": "SystemAssigned" 55 | }, 56 | "properties": { 57 | "publicNetworkAccess": "Enabled", 58 | "hostingMode": "default" 59 | }, 60 | "tags": "[parameters('tags')]" 61 | } 62 | ], 63 | "outputs": { 64 | "searchServiceIdentityPrincipalId": { 65 | "type": "string", 66 | "value": "[reference(resourceId('Microsoft.Search/searchServices', variables('searchNameCleaned')), '2024-06-01-preview', 'full').identity.principalId]" 67 | }, 68 | "searchServiceId": { 69 | "type": "string", 70 | "value": "[resourceId('Microsoft.Search/searchServices', variables('searchNameCleaned'))]" 71 | }, 72 | "searchServiceName": { 73 | "type": "string", 74 | "value": "[variables('searchNameCleaned')]" 75 | }, 76 | "searchServicePrimaryKey": { 77 | "type": "string", 78 | "value": "[listAdminKeys(resourceId('Microsoft.Search/searchServices', variables('searchNameCleaned')), '2024-06-01-preview').primaryKey]" 79 | }, 80 | "searchServiceEndpoint": { 81 | "type": "string", 82 | "value": "[format('https://{0}.search.windows.net', variables('searchNameCleaned'))]" 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /infra/modules/data/storage.bicep: -------------------------------------------------------------------------------- 1 | @description('Azure region of the deployment') 2 | param location string 3 | 4 | @description('Tags to add to the resources') 5 | param tags object 6 | 7 | @description('Name of the Storage account') 8 | param aiServiceName string 9 | 10 | @allowed([ 11 | 'Standard_LRS' 12 | 'Standard_GRS' 13 | 'Standard_RAGRS' 14 | 'Standard_ZRS' 15 | ]) 16 | @description('Storage SKU') 17 | param aiServiceSkuName string = 'Standard_LRS' 18 | 19 | var storageNameCleaned = toLower(replace(aiServiceName, '-', '')) 20 | // Storage account names must be between 3-24 chars and alphanumeric lowercase 21 | // Ensure the passed name meets these constraints or implement truncation if needed. 22 | 23 | resource storageAccount 'Microsoft.Storage/storageAccounts@2023-05-01' = { 24 | name: take(storageNameCleaned, 24) 25 | location: location 26 | sku: { 27 | name: aiServiceSkuName 28 | } 29 | kind: 'StorageV2' 30 | properties: { 31 | minimumTlsVersion: 'TLS1_2' 32 | publicNetworkAccess: 'Enabled' 33 | supportsHttpsTrafficOnly: true 34 | defaultToOAuthAuthentication: false 35 | allowSharedKeyAccess: true 36 | } 37 | tags: tags 38 | } 39 | 40 | // Blob service resource under the storage account 41 | resource blobService 'Microsoft.Storage/storageAccounts/blobServices@2023-01-01' = { 42 | parent: storageAccount 43 | name: 'default' 44 | properties: { 45 | deleteRetentionPolicy: { 46 | enabled: true 47 | days: 7 48 | } 49 | } 50 | } 51 | 52 | // Default container under the blob service 53 | resource defaultContainer 'Microsoft.Storage/storageAccounts/blobServices/containers@2023-01-01' = { 54 | parent: blobService 55 | name: 'default' 56 | properties: {} 57 | } 58 | 59 | resource preAuthPoliciesContainer 'Microsoft.Storage/storageAccounts/blobServices/containers@2023-01-01' = { 60 | parent: blobService 61 | name: 'pre-auth-policies' 62 | properties: {} 63 | } 64 | 65 | var storageKeys = storageAccount.listKeys() 66 | var primaryKey = storageKeys.keys[0].value 67 | var storageAccountPrimaryConnectionString = 'DefaultEndpointsProtocol=https;AccountName=${storageAccount.name};AccountKey=${primaryKey};EndpointSuffix=core.windows.net' 68 | 69 | output storageAccountId string = storageAccount.id 70 | output storageAccountName string = storageAccount.name 71 | output storageAccountPrimaryKey string = primaryKey 72 | output storageAccountPrimaryConnectionString string = storageAccountPrimaryConnectionString 73 | -------------------------------------------------------------------------------- /infra/modules/security/aca.bicep: -------------------------------------------------------------------------------- 1 | // resource acaIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' = { 2 | // name: identityName 3 | // location: location 4 | // } 5 | 6 | output identityPrincipalId string = '48275e14-c183-4fd6-8081-4301098be101' // client ID <-- use this one 7 | // output identityPrincipalId string = '1a351ceb-aa4d-4b1d-91c5-d95f593bb3ae' // object (principal) ID 8 | output name string = 'pe-fe-priorauth-liocb4m' 9 | output uri string = 'https://pe-fe-priorauth-liocb4m.blackgrass-c8d1222d.eastus2.azurecontainerapps.io' 10 | output imageName string = 'gbb-ai-hls-factory-prior-auth/frontend-local-dev:azd-deploy-1741105197' 11 | output identityResourceId string = '/subscriptions/63862159-43c8-47f7-9f6f-6c63d56b0e17/resourcegroups/rg-priorauth-eastus2-local-dev/providers/microsoft.managedidentity/userassignedidentities/uai-app-priorauth-liocb4m' 12 | -------------------------------------------------------------------------------- /infra/modules/security/aca.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", 3 | "languageVersion": "2.1-experimental", 4 | "contentVersion": "1.0.0.0", 5 | "metadata": { 6 | "_EXPERIMENTAL_WARNING": "This template uses ARM features that are experimental. Experimental features should be enabled for testing purposes only, as there are no guarantees about the quality or stability of these features. Do not enable these settings for any production usage, or your production environment may be subject to breaking.", 7 | "_EXPERIMENTAL_FEATURES_ENABLED": [ 8 | "Extensibility" 9 | ], 10 | "_generator": { 11 | "name": "bicep", 12 | "version": "0.33.93.31351", 13 | "templateHash": "4954823199384560641" 14 | } 15 | }, 16 | "resources": {}, 17 | "outputs": { 18 | "identityPrincipalId": { 19 | "type": "string", 20 | "value": "48275e14-c183-4fd6-8081-4301098be101" 21 | }, 22 | "name": { 23 | "type": "string", 24 | "value": "pe-fe-priorauth-liocb4m" 25 | }, 26 | "uri": { 27 | "type": "string", 28 | "value": "https://pe-fe-priorauth-liocb4m.blackgrass-c8d1222d.eastus2.azurecontainerapps.io" 29 | }, 30 | "imageName": { 31 | "type": "string", 32 | "value": "gbb-ai-hls-factory-prior-auth/frontend-local-dev:azd-deploy-1741105197" 33 | }, 34 | "identityResourceId": { 35 | "type": "string", 36 | "value": "/subscriptions/63862159-43c8-47f7-9f6f-6c63d56b0e17/resourcegroups/rg-priorauth-eastus2-local-dev/providers/microsoft.managedidentity/userassignedidentities/uai-app-priorauth-liocb4m" 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /infra/modules/security/appregistration.bicep: -------------------------------------------------------------------------------- 1 | extension microsoftGraphV1 2 | 3 | @description('Specifies the name of cloud environment to run this deployment in.') 4 | param cloudEnvironment string = environment().name 5 | 6 | // NOTE: Microsoft Graph Bicep file deployment is only supported in Public Cloud 7 | @description('Audience uris for public and national clouds') 8 | param audiences object = { 9 | AzureCloud: { 10 | uri: 'api://AzureADTokenExchange' 11 | } 12 | AzureUSGovernment: { 13 | uri: 'api://AzureADTokenExchangeUSGov' 14 | } 15 | USNat: { 16 | uri: 'api://AzureADTokenExchangeUSNat' 17 | } 18 | USSec: { 19 | uri: 'api://AzureADTokenExchangeUSSec' 20 | } 21 | AzureChinaCloud: { 22 | uri: 'api://AzureADTokenExchangeChina' 23 | } 24 | } 25 | 26 | @description('Specifies the ID of the user-assigned managed identity.') 27 | param webAppIdentityId string 28 | 29 | @description('Specifies the unique name for the client application.') 30 | param clientAppName string 31 | 32 | @description('Specifies the display name for the client application') 33 | param clientAppDisplayName string 34 | 35 | @description('Specifies the scopes that the client application requires.') 36 | param clientAppScopes array = ['User.Read', 'offline_access', 'openid', 'profile'] 37 | 38 | param serviceManagementReference string = '' 39 | 40 | param issuer string 41 | 42 | param webAppEndpoint string 43 | 44 | // Get the MS Graph Service Principal based on its application ID: 45 | // https://learn.microsoft.com/troubleshoot/entra/entra-id/governance/verify-first-party-apps-sign-in 46 | var msGraphAppId = '00000003-0000-0000-c000-000000000000' 47 | resource msGraphSP 'Microsoft.Graph/servicePrincipals@v1.0' existing = { 48 | appId: msGraphAppId 49 | } 50 | 51 | var graphScopes = msGraphSP.oauth2PermissionScopes 52 | resource clientApp 'Microsoft.Graph/applications@v1.0' = { 53 | uniqueName: clientAppName 54 | displayName: clientAppDisplayName 55 | signInAudience: 'AzureADMyOrg' 56 | serviceManagementReference: empty(serviceManagementReference) ? null : serviceManagementReference 57 | web: { 58 | redirectUris: [ 59 | 'http://localhost:8051/.auth/login/aad/callback' 60 | '${webAppEndpoint}/.auth/login/aad/callback' 61 | ] 62 | implicitGrantSettings: { enableIdTokenIssuance: true } 63 | } 64 | requiredResourceAccess: [ 65 | { 66 | resourceAppId: msGraphAppId 67 | resourceAccess: [ 68 | for (scope, i) in clientAppScopes: { 69 | id: filter(graphScopes, graphScopes => graphScopes.value == scope)[0].id 70 | type: 'Scope' 71 | } 72 | ] 73 | } 74 | ] 75 | 76 | resource clientAppFic 'federatedIdentityCredentials@v1.0' = { 77 | name: '${clientApp.uniqueName}/miAsFic' 78 | audiences: [ 79 | audiences[cloudEnvironment].uri 80 | ] 81 | issuer: issuer 82 | subject: webAppIdentityId 83 | } 84 | } 85 | 86 | resource clientSp 'Microsoft.Graph/servicePrincipals@v1.0' = { 87 | appId: clientApp.appId 88 | } 89 | 90 | output clientAppId string = clientApp.appId 91 | output clientSpId string = clientSp.id 92 | -------------------------------------------------------------------------------- /infra/modules/security/appupdate.bicep: -------------------------------------------------------------------------------- 1 | metadata description = 'Creates an Azure Container Apps Auth Config using Microsoft Entra as Identity Provider.' 2 | 3 | @description('The name of the container apps resource within the current resource group scope') 4 | param containerAppName string 5 | 6 | @description('The client ID of the Microsoft Entra application.') 7 | param clientId string 8 | 9 | param openIdIssuer string 10 | 11 | @description('Enable token store for the Container App.') 12 | param includeTokenStore bool = false 13 | 14 | @description('The URI of the Azure Blob Storage container to be used for token storage.') 15 | param blobContainerUri string = '' 16 | @description('The resource ID of the managed identity to be used for accessing the Azure Blob Storage.') 17 | param appIdentityResourceId string = '' 18 | 19 | resource app 'Microsoft.App/containerApps@2023-05-01' existing = { 20 | name: containerAppName 21 | } 22 | 23 | resource auth 'Microsoft.App/containerApps/authConfigs@2024-10-02-preview' = { 24 | parent: app 25 | name: 'current' 26 | properties: { 27 | platform: { 28 | enabled: true 29 | } 30 | globalValidation: { 31 | redirectToProvider: 'azureactivedirectory' 32 | unauthenticatedClientAction: 'RedirectToLoginPage' 33 | } 34 | identityProviders: { 35 | azureActiveDirectory: { 36 | enabled: true 37 | registration: { 38 | clientId: clientId 39 | clientSecretSettingName: 'override-use-mi-fic-assertion-client-id' 40 | openIdIssuer: openIdIssuer 41 | } 42 | validation: { 43 | defaultAuthorizationPolicy: { 44 | allowedApplications: [ 45 | ] 46 | } 47 | } 48 | } 49 | } 50 | login: { 51 | // https://learn.microsoft.com/azure/container-apps/token-store 52 | tokenStore: { 53 | enabled: includeTokenStore 54 | azureBlobStorage: includeTokenStore ? { 55 | blobContainerUri: blobContainerUri 56 | managedIdentityResourceId: appIdentityResourceId 57 | } : {} 58 | } 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /infra/modules/security/bicepconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimentalFeaturesEnabled": { 3 | "extensibility": true 4 | }, 5 | // specify an alias for the version of the v1.0 dynamic types package you want to use 6 | "extensions": { 7 | "microsoftGraphV1": "br:mcr.microsoft.com/bicep/extensions/microsoftgraph/v1.0:0.1.8-preview" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /notebooks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/notebooks/__init__.py -------------------------------------------------------------------------------- /notebooks/dev/testing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "\n", 11 | "# Define the target directory\n", 12 | "target_directory = r\"C:\\Users\\pablosal\\Desktop\\gbbai-azure-ai-search-indexing\" # change your directory here\n", 13 | "\n", 14 | "# Check if the directory exists\n", 15 | "if os.path.exists(target_directory):\n", 16 | " # Change the current working directory\n", 17 | " os.chdir(target_directory)\n", 18 | " print(f\"Directory changed to {os.getcwd()}\")\n", 19 | "else:\n", 20 | " print(f\"Directory {target_directory} does not exist.\")" 21 | ] 22 | } 23 | ], 24 | "metadata": { 25 | "language_info": { 26 | "name": "python" 27 | } 28 | }, 29 | "nbformat": 4, 30 | "nbformat_minor": 2 31 | } 32 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit_core >=2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [project] 6 | name = "example-project" 7 | authors = [ 8 | {name = "Pablo Salvador", email = "pablosalvadorlopez11@gmail.com"}, 9 | {name = "Marcin Jimenez", email = "marcin.jimenez@gmail.com"} 10 | ] 11 | description = "An example project demonstrating pyproject.toml" 12 | readme = "README.md" 13 | classifiers = [ 14 | "Development Status :: 3 - Alpha", 15 | "Intended Audience :: Developers", 16 | "License :: OSI Approved :: MIT License", 17 | "Programming Language :: Python :: 3.8" 18 | ] 19 | requires-python = ">=3.8,<4.0" 20 | dynamic = ["version"] 21 | 22 | [project.optional-dependencies] 23 | extras = [ 24 | "optional_package_1", 25 | "optional_package_2" 26 | ] 27 | 28 | [project.urls] 29 | homepage = "https://example.com" 30 | repository = "https://github.com/username/example-project" 31 | documentation = "https://example.com/docs" 32 | 33 | [tool.flit.metadata] 34 | module = "example_project" 35 | author-email = "john.doe@example.com" 36 | 37 | [tool.flit.scripts] 38 | example-script = "example_project:main" 39 | 40 | [tool.bandit] 41 | exclude_dirs = ["tests/"] 42 | tests = ["B201", "B301"] 43 | skips = ["B101", "B601"] 44 | 45 | [tool.black] 46 | line-length = 88 47 | 48 | [tool.mypy] 49 | ignore_missing_imports = true 50 | files = "src/" 51 | exclude = "utils|tests" 52 | 53 | [tool.ruff] 54 | line-length = 124 55 | ignore = ["E501"] 56 | 57 | [tool.isort] 58 | profile = "black" 59 | 60 | [tool.flake8] 61 | max-line-length = 124 62 | 63 | [tool.pytest] 64 | addopts = "-vv --cov=. --cov-report xml:/tmp/coverage.xml --cov-report html:/tmp/coverage_html --cov-report term-missing" 65 | log_cli = true 66 | log_cli_level = "DEBUG" 67 | norecursedirs = ".git __pycache__ docs" 68 | log_format = "%(asctime)s - %(levelname)-8s %(message)s (%(filename)s:%(funcName)s:%(lineno)d)" 69 | log_level = "info" 70 | 71 | [tool.pytest.ini_options] 72 | markers = [ 73 | "evaluation: mark test as an evaluation test", 74 | ] 75 | -------------------------------------------------------------------------------- /requirements-codequality.txt: -------------------------------------------------------------------------------- 1 | isort==5.9.3 2 | black==25.1.0 3 | flake8==3.9.2 4 | interrogate==1.4.0 5 | pre-commit==2.14.0 6 | types-requests 7 | ruff 8 | bandit 9 | pylint 10 | pytest 11 | pytest-cov 12 | black[jupyter] 13 | types-PyYAML 14 | 15 | # For Evals 16 | jq 17 | semantic_kernel 18 | azure-monitor-opentelemetry 19 | azure-ai-inference[opentelemetry] 20 | opentelemetry-exporter-otlp 21 | azure-ai-projects 22 | opentelemetry-instrumentation-httpx 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Streamlit and related visualization tools 2 | streamlit==1.39.0 3 | streamlit-chat==0.1.1 4 | matplotlib==3.9.2 5 | seaborn==0.13.2 6 | 7 | ## microservices 8 | fastapi[standard] 9 | fastapi-users[beanie] 10 | uvicorn 11 | pydantic-settings 12 | 13 | # AI and OpenAI e2e 14 | openai==1.52.0 15 | tiktoken==0.8.0 16 | langchain==0.3.1 17 | langchain-community==0.3.1 18 | langchain-core==0.3.40 19 | transformers[torch]==4.48.0 20 | 21 | # Azure SDKs 22 | azure-identity==1.19.0 23 | azure-mgmt-resource==23.1.1 24 | azure-ai-documentintelligence==1.0.0b4 25 | azure-ai-evaluation==1.2.0 26 | 27 | azure-cosmos==4.7.0 28 | azure-search-documents==11.6.0b5 29 | azure-storage-blob==12.23.1 30 | azure-monitor-opentelemetry==1.6.7 31 | 32 | # LLM Metrics 33 | ragas==0.2.3 34 | 35 | # General utilities 36 | requests==2.32.3 37 | python-dotenv==1.0.1 38 | pandas==2.2.3 39 | pydantic==2.9.2 40 | pyarrow==17.0.0 41 | aiohttp==3.10.10 42 | backoff==2.2.1 43 | PyPDF2==3.0.1 44 | Jinja2==3.1.4 45 | pymongo==4.10.1 46 | colorama 47 | PyMuPDF 48 | rapidfuzz 49 | pytest==8.3.4 50 | jq==1.8.0 51 | 52 | # HTTPX fix for OpenAI 53 | httpx==0.27.2 54 | 55 | # Async and event loop tools 56 | asyncio==3.4.3 57 | tenacity==8.5.0 58 | 59 | semantic_kernel 60 | azure-ai-inference[opentelemetry] 61 | opentelemetry-exporter-otlp 62 | azure-ai-projects 63 | opentelemetry-instrumentation-httpx 64 | -------------------------------------------------------------------------------- /shared/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | - Employees can reach out at [aka.ms/opensource/moderation-support](https://aka.ms/opensource/moderation-support) 11 | -------------------------------------------------------------------------------- /shared/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /shared/SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/__init__.py -------------------------------------------------------------------------------- /src/agenticai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/__init__.py -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/plugins/plugins_store/__init__.py -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/hello_world/limerick/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "schema": 1, 3 | "description": "Generate a funny limerick about a person", 4 | "execution_settings": { 5 | "default": { 6 | "max_tokens": 100, 7 | "temperature": 0.7, 8 | "top_p": 0.1, 9 | "presence_penalty": 0.1, 10 | "frequency_penalty": 0.1 11 | } 12 | }, 13 | "input_variables": [ 14 | { 15 | "name": "name", 16 | "description": "", 17 | "default": "Bob" 18 | }, 19 | { 20 | "name": "input", 21 | "description": "", 22 | "default": "Dogs" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/hello_world/limerick/skprompt.txt: -------------------------------------------------------------------------------- 1 | There was a young woman named Bright, 2 | Whose speed was much faster than light. 3 | She set out one day, 4 | In a relative way, 5 | And returned on the previous night. 6 | 7 | There was an odd fellow named Gus, 8 | When traveling he made such a fuss. 9 | He was banned from the train, 10 | Not allowed on a plane, 11 | And now travels only by bus. 12 | 13 | There once was a man from Tibet, 14 | Who couldn't find a cigarette 15 | So he smoked all his socks, 16 | and got chicken-pox, 17 | and had to go to the vet. 18 | 19 | There once was a boy named Dan, 20 | who wanted to fry in a pan. 21 | He tried and he tried, 22 | and eventually died, 23 | that weird little boy named Dan. 24 | 25 | Now write a very funny limerick about {{$name}}. 26 | {{$input}} 27 | Invent new facts their life. Must be funny. 28 | -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/hello_world/plugins_store/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/plugins/plugins_store/hello_world/plugins_store/__init__.py -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/hello_world/plugins_store/main/weather.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated 2 | 3 | from semantic_kernel.functions import kernel_function 4 | 5 | 6 | class CustomPlugin: 7 | @kernel_function( 8 | name="get_news", 9 | description="Get news from the web", 10 | ) 11 | def get_news_api( 12 | self, location: Annotated[str, "location name"] 13 | ) -> Annotated[str, "the output is a string"]: 14 | """Get news from the specified location.""" 15 | return f"Get news from {location}." 16 | 17 | @kernel_function( 18 | name="ask_weather", 19 | description="Search Weather in a city", 20 | ) 21 | def ask_weather_function( 22 | self, city: Annotated[str, "city name"] 23 | ) -> Annotated[str, "the output is a string"]: 24 | """Search Weather in a specified city.""" 25 | return "Guangzhou’s weather is 30 celsius degree, and very hot." 26 | 27 | @kernel_function( 28 | name="ask_docs", 29 | description="Search Docs", 30 | ) 31 | def ask_docs_function( 32 | self, docs: Annotated[str, "docs string"] 33 | ) -> Annotated[str, "the output is a string"]: 34 | """Search Docs.""" 35 | return f"ask docs: {docs}" 36 | -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/hello_world/prompt/test.yaml: -------------------------------------------------------------------------------- 1 | name: GenerateStory 2 | template: | 3 | Tell a story about {{topic}} that is {{length}} sentences long. 4 | template_format: handlebars 5 | description: A function that generates a story about a topic. 6 | input_variables: 7 | - name: topic 8 | description: The topic of the story. 9 | is_required: true 10 | - name: length 11 | description: The number of sentences in the story. 12 | is_required: true 13 | output_variable: 14 | description: The generated story. 15 | execution_settings: 16 | service1: 17 | model_id: gpt-4o 18 | temperature: 0.6 19 | service2: 20 | model_id: gpt-3 21 | temperature: 0.4 22 | default: 23 | temperature: 0.5 24 | -------------------------------------------------------------------------------- /src/agenticai/plugins/plugins_store/retrieval/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/plugins/plugins_store/retrieval/__init__.py -------------------------------------------------------------------------------- /src/agenticai/plugins/readme.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/plugins/readme.md -------------------------------------------------------------------------------- /src/agenticai/sk_helper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/agenticai/sk_helper/__init__.py -------------------------------------------------------------------------------- /src/agenticai/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import Any, Dict, List 4 | 5 | logging.basicConfig(level=logging.INFO) 6 | 7 | 8 | def extract_chat_history(chat) -> List[Dict[str, Any]]: 9 | """Extracts the list of messages from the chat history.""" 10 | try: 11 | messages = chat.history.messages 12 | logging.info(f"Extracted {len(messages)} messages from chat history.") 13 | return messages 14 | except Exception as e: 15 | logging.error(f"Failed to extract chat history. Error: {e}") 16 | return [] 17 | 18 | 19 | def extract_last_evaluator_message(messages: List) -> str: 20 | """Extract the last message from the Evaluator from the chat history.""" 21 | for message in reversed(messages): 22 | if message.role == "assistant" and message.name == "Evaluator": 23 | content = message.items[0].text if message.items else message.content 24 | logging.info(f"Found the last evaluator message: {content}") 25 | return content 26 | logging.warning("No evaluator message found in the chat history.") 27 | return "" 28 | 29 | 30 | def extract_json_from_message(message_content: str) -> str: 31 | """Extract the JSON block from the message content.""" 32 | try: 33 | if "```json" in message_content: 34 | message_content = ( 35 | message_content.replace("```json", "").replace("```", "").strip() 36 | ) 37 | logging.info(f"Extracted JSON from message: {message_content}") 38 | return message_content 39 | except Exception as e: 40 | logging.error(f"Failed to extract JSON from message. Error: {e}") 41 | return "" 42 | 43 | 44 | def parse_json_content(message_content: str) -> Dict[str, Any]: 45 | """Parse the JSON from the last evaluator message content.""" 46 | try: 47 | if not message_content: 48 | logging.warning("Message content is empty, cannot parse JSON.") 49 | return {} 50 | 51 | logging.info(f"Parsing message content as JSON: {message_content}") 52 | parsed_json = json.loads(message_content) 53 | logging.info(f"Parsed JSON successfully: {parsed_json}") 54 | return parsed_json 55 | except json.JSONDecodeError as e: 56 | logging.error(f"JSON decode error: {e}. Content: {message_content}") 57 | return {} 58 | except Exception as e: 59 | logging.error( 60 | f"Unexpected error while parsing JSON: {e}. Content: {message_content}" 61 | ) 62 | return {} 63 | 64 | 65 | def extract_policies_from_parsed_json(parsed_json: Dict[str, Any]) -> List[str]: 66 | """Extract the list of policies from the parsed evaluator JSON.""" 67 | if not parsed_json: 68 | logging.warning("Parsed JSON is empty or invalid, no policies to extract.") 69 | return [] 70 | 71 | policies = parsed_json.get("policies", []) 72 | 73 | if not isinstance(policies, list): 74 | logging.error(f"Expected 'policies' to be a list, but got: {type(policies)}") 75 | return [] 76 | 77 | logging.info(f"Extracted policies: {policies}") 78 | return policies 79 | 80 | 81 | def get_policies_from_chat(chat) -> List[str]: 82 | """Full process to extract and return the policies list from the evaluator's last message.""" 83 | messages = extract_chat_history(chat) 84 | last_message_content = extract_last_evaluator_message(messages) 85 | json_content = extract_json_from_message(last_message_content) 86 | parsed_json = parse_json_content(json_content) 87 | policies = extract_policies_from_parsed_json(parsed_json) 88 | return policies 89 | -------------------------------------------------------------------------------- /src/aifoundry/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/aifoundry/__init__.py -------------------------------------------------------------------------------- /src/aifoundry/telemetry.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/aifoundry/telemetry.py -------------------------------------------------------------------------------- /src/aoai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/aoai/__init__.py -------------------------------------------------------------------------------- /src/cosmosdb/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/cosmosdb/__init__.py -------------------------------------------------------------------------------- /src/documentintelligence/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/documentintelligence/__init__.py -------------------------------------------------------------------------------- /src/entraid/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/entraid/__init__.py -------------------------------------------------------------------------------- /src/entraid/generate_id.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | 4 | def generate_unique_id() -> str: 5 | """ 6 | Generate an 8-digit unique value. 7 | 8 | Returns: 9 | str: An 8-digit unique value. 10 | """ 11 | unique_id = str(uuid.uuid4()) 12 | eight_digit_unique_value = unique_id[:8] 13 | 14 | return eight_digit_unique_value 15 | -------------------------------------------------------------------------------- /src/evals/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/evals/__init__.py -------------------------------------------------------------------------------- /src/evals/case.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import tempfile 4 | from contextlib import contextmanager 5 | from typing import Any, Dict, List, Optional 6 | 7 | 8 | class Evaluation: 9 | """ 10 | Represents a single evaluation record. 11 | 12 | Attributes: 13 | - query: The expected output key (e.g. "patient_info.patient_name"). 14 | - response: The AI-generated response. 15 | - ground_truth: The expected value from the YAML. 16 | - context: A description of the evaluation context. 17 | - conversation: A list of messages. 18 | - scores: A dictionary of score(s) (e.g. {"semantic_similarity": }). 19 | """ 20 | 21 | def __init__( 22 | self, 23 | query: str, 24 | response: str, 25 | ground_truth: str, 26 | context: Optional[Any] = None, 27 | conversation: Optional[Any] = None, 28 | scores: Optional[Dict[str, Any]] = None, 29 | ): 30 | self.query = query 31 | self.response = response 32 | self.ground_truth = ground_truth 33 | 34 | # Only set attributes if they are not None or not an empty dict. 35 | if context is not None and context != {}: 36 | self.context = context 37 | if conversation is not None and conversation != {}: 38 | self.conversation = conversation 39 | if scores is not None and scores != {}: 40 | self.scores = scores 41 | 42 | def to_dict(self) -> Dict[str, Any]: 43 | # Build a dictionary from the instance's __dict__. 44 | # This will only include attributes that were actually set. 45 | return self.__dict__ 46 | 47 | 48 | class Case: 49 | """ 50 | Represents a test case. 51 | 52 | Attributes: 53 | - case_name: The case identifier. 54 | - metrics: A list of evaluator/metric names. 55 | - config: A dictionary containing additional test case configuration (e.g., OCRNEREvaluator settings). 56 | - evaluations: A list of Evaluation objects. 57 | """ 58 | 59 | def __init__( 60 | self, 61 | case_name: str, 62 | metrics: Optional[List[str]] = None, 63 | config: Optional[dict] = None, 64 | evaluations: Optional[List] = None, 65 | ): 66 | self.case_name = case_name 67 | self.metrics = metrics if metrics is not None else [] 68 | self.config = config if config is not None else {} 69 | self.evaluations = evaluations if evaluations is not None else [] 70 | self.azure_eval_result = None 71 | 72 | @contextmanager 73 | def create_evaluation_dataset(self): 74 | """ 75 | Creates a temporary JSON Lines (jsonl) file that contains all evaluations. 76 | This file is later passed to the Azure AI evaluation API. 77 | """ 78 | temp_file = tempfile.NamedTemporaryFile( 79 | mode="w+", delete=False, suffix=".jsonl", prefix="evaluation_dataset_" 80 | ) 81 | try: 82 | for eval_obj in self.evaluations: 83 | temp_file.write(json.dumps(eval_obj.to_dict()) + "\n") 84 | temp_file.flush() 85 | temp_file.close() 86 | yield temp_file.name 87 | finally: 88 | if os.path.exists(temp_file.name): 89 | os.remove(temp_file.name) 90 | -------------------------------------------------------------------------------- /src/evals/custom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/evals/custom/__init__.py -------------------------------------------------------------------------------- /src/evals/custom/custom_evaluator.py: -------------------------------------------------------------------------------- 1 | import os 2 | from abc import ABC, abstractmethod 3 | 4 | 5 | class CustomEvaluator(ABC): 6 | """ 7 | Base class for custom evaluators. 8 | 9 | This class supports dynamic assignment of keyword arguments as instance attributes. 10 | Subclasses should override the __call__ method to provide custom evaluation logic. 11 | """ 12 | 13 | def __init__(self, **kwargs): 14 | """ 15 | Initialize the evaluator with any number of keyword arguments. 16 | 17 | All keyword arguments provided during initialization are set as attributes of the instance. 18 | 19 | Example: 20 | evaluator = CustomEvaluator(param1="value1", param2=42) 21 | print(evaluator.param1) # Output: "value1" 22 | print(evaluator.param2) # Output: 42 23 | 24 | Parameters: 25 | **kwargs: Arbitrary keyword arguments. 26 | """ 27 | for key, value in kwargs.items(): 28 | setattr(self, key, value) 29 | 30 | @abstractmethod 31 | def __call__(self, **kwargs): 32 | """ 33 | Callable method to evaluate inputs. 34 | 35 | This method should be implemented by subclasses. When an instance is called, 36 | it should process the provided keyword arguments and return a result as a dictionary. 37 | 38 | Parameters: 39 | **kwargs: Input data for evaluation. 40 | 41 | Returns: 42 | dict: Evaluation results. 43 | """ 44 | raise NotImplementedError("Subclasses must implement the __call__ method.") 45 | 46 | @staticmethod 47 | def get_required_env_var(var_name: str) -> str: 48 | """ 49 | Retrieve a required environment variable and raise an error if missing. 50 | 51 | Args: 52 | var_name: The name of the environment variable. 53 | 54 | Returns: 55 | The value of the environment variable. 56 | 57 | Raises: 58 | ValueError: If the environment variable is not set. 59 | """ 60 | value = os.getenv(var_name) 61 | if not value: 62 | raise ValueError(f"Missing required environment variable: {var_name}") 63 | return value 64 | -------------------------------------------------------------------------------- /src/evals/custom/factual_correctness_evaluator.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TypedDict 3 | 4 | from langchain_community.chat_models import AzureChatOpenAI 5 | from ragas.dataset_schema import SingleTurnSample 6 | from ragas.llms import LangchainLLMWrapper 7 | from ragas.metrics._factual_correctness import FactualCorrectness 8 | 9 | 10 | class FactualCorrectnessScore(TypedDict): 11 | """ 12 | TypedDict for the factual correctness score. 13 | """ 14 | 15 | factual_correctness: float 16 | 17 | 18 | class FactualCorrectnessEvaluator: 19 | """ 20 | Evaluator that lazily initializes the Azure LLM and RAGAS scorer 21 | only when needed, preventing pickling issues. 22 | """ 23 | 24 | def __init__(self, model_config: dict): 25 | """ 26 | :param model_config: Dictionary containing Azure configuration, e.g.: 27 | { 28 | "azure_endpoint": "https://YOUR_RESOURCE_NAME.openai.azure.com/", 29 | "api_key": "YOUR_API_KEY", 30 | "azure_deployment": "YOUR_DEPLOYMENT_NAME" 31 | } 32 | """ 33 | # Store config but do not create any unpicklable objects yet 34 | self.model_config = model_config 35 | 36 | def __call__(self, *, response: str, ground_truth: str) -> FactualCorrectnessScore: 37 | """ 38 | Synchronously evaluate factual correctness. 39 | """ 40 | try: 41 | score = self._sync_score(response, ground_truth) 42 | return {"factual_correctness": score} 43 | except Exception as e: 44 | print(f"Error during factual correctness evaluation: {e}") 45 | return {"factual_correctness": 0.0} 46 | 47 | def _sync_score(self, response: str, reference: str) -> float: 48 | """ 49 | Helper function that builds the LLM and RAGAS scorer *on demand*. 50 | """ 51 | # 1) Create the Azure LLM with config 52 | azure_llm = AzureChatOpenAI( 53 | azure_endpoint=self.model_config["azure_endpoint"], 54 | api_key=self.model_config["api_key"], 55 | api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2023-05-15"), 56 | azure_deployment=self.model_config["azure_deployment"], 57 | ) 58 | wrapped_llm = LangchainLLMWrapper(azure_llm) 59 | 60 | # 2) Create a new scorer each time. It has no memory; it just runs the prompt 61 | scorer = FactualCorrectness(llm=wrapped_llm) 62 | 63 | # 3) Evaluate 64 | sample = SingleTurnSample(response=response, reference=reference) 65 | return scorer.single_turn_score(sample) 66 | -------------------------------------------------------------------------------- /src/evals/custom/fuzzy_evaluator.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from rapidfuzz import fuzz 4 | 5 | from src.evals.custom.custom_evaluator import CustomEvaluator 6 | from src.utils.ml_logging import get_logger 7 | 8 | 9 | # Define a dataclass for returning semantic similarity 10 | @dataclass 11 | class IndelSimilarity: 12 | indel_similarity: float 13 | 14 | 15 | class FuzzyEvaluator(CustomEvaluator): 16 | def __init__(self, **kwargs): 17 | """ 18 | Initialize the evaluator with any number of keyword arguments. 19 | 20 | All keyword arguments provided during initialization are set as attributes of the instance. 21 | 22 | Example: 23 | evaluator = CustomEvaluator(param1="value1", param2=42) 24 | print(evaluator.param1) # Output: "value1" 25 | print(evaluator.param2) # Output: 42 26 | 27 | Parameters: 28 | **kwargs: Arbitrary keyword arguments. 29 | """ 30 | super().__init__(**kwargs) 31 | self.logger = get_logger() 32 | for key, value in kwargs.items(): 33 | setattr(self, key, value) 34 | 35 | def __call__( 36 | self, *, response: str, ground_truth: str, **kwargs 37 | ) -> IndelSimilarity: 38 | """ 39 | Computes semantic similarity between response and ground_truth. 40 | 41 | Signature: 42 | __call__(*, response: str, ground_truth: str, **kwargs) -> SemanticSimilarity 43 | 44 | Returns: 45 | A IndelSimilarity instance containing the computed semantic similarity. 46 | """ 47 | try: 48 | similarity_score = fuzz.ratio(response, ground_truth) 49 | except Exception as e: 50 | self.logger.error(f"Error computing similarity: {e}") 51 | similarity_score = 0 52 | return IndelSimilarity(indel_similarity=similarity_score) 53 | -------------------------------------------------------------------------------- /src/evals/custom/similarity_evaluator.py: -------------------------------------------------------------------------------- 1 | from typing import TypedDict 2 | 3 | import torch 4 | import torch.nn.functional as F 5 | from transformers import AutoModel, AutoTokenizer 6 | 7 | 8 | class SimilarityScore(TypedDict): 9 | semantic_similarity: float 10 | 11 | 12 | class SemanticSimilarityEvaluator: 13 | def __init__(self, model_name: str = "bert-base-uncased"): 14 | """ 15 | Initialize the evaluator with a pre-trained model for embeddings. 16 | 17 | :param model_name: Name of the pre-trained model from Hugging Face Transformers. 18 | """ 19 | self.tokenizer = AutoTokenizer.from_pretrained(model_name) 20 | self.model = AutoModel.from_pretrained(model_name) 21 | 22 | def __call__( 23 | self, *, response: str, ground_truth: str, **kwargs 24 | ) -> SimilarityScore: 25 | """ 26 | Calculate the semantic similarity between the response and ground truth. 27 | 28 | :param response: The response to evaluate. 29 | :param ground_truth: The ground truth to compare against. 30 | :return: A dictionary containing the semantic similarity score. 31 | """ 32 | try: 33 | response_embedding = self._get_embedding(response) 34 | ground_truth_embedding = self._get_embedding(ground_truth) 35 | similarity = self._calculate_cosine_similarity( 36 | response_embedding, ground_truth_embedding 37 | ) 38 | return {"semantic_similarity": similarity} 39 | except Exception as e: 40 | print(f"Error during evaluation: {e}") 41 | return {"semantic_similarity": 0.0} 42 | 43 | def _get_embedding(self, text: str) -> torch.Tensor: 44 | """ 45 | Get the embedding for a given text. 46 | 47 | :param text: The input text. 48 | :return: The embedding as a torch tensor. 49 | """ 50 | inputs = self.tokenizer( 51 | text, return_tensors="pt", truncation=True, padding=True 52 | ) 53 | outputs = self.model(**inputs) 54 | return outputs.last_hidden_state.mean(dim=1) 55 | 56 | def _calculate_cosine_similarity( 57 | self, tensor1: torch.Tensor, tensor2: torch.Tensor 58 | ) -> float: 59 | """ 60 | Calculate the cosine similarity between two tensors. 61 | 62 | :param tensor1: First tensor. 63 | :param tensor2: Second tensor. 64 | :return: The cosine similarity score. 65 | """ 66 | return F.cosine_similarity(tensor1, tensor2).item() 67 | -------------------------------------------------------------------------------- /src/evals/sdk/custom_azure_ai_evaluations.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from typing import List, Tuple 4 | from urllib.parse import urlparse 5 | 6 | # @TODO: Remove this import when the package fix is available. 7 | from azure.ai.evaluation._evaluate._eval_run import RunInfo, RunStatus 8 | 9 | LOGGER = logging.getLogger(__name__) 10 | CUSTOM_TAGS: List[Tuple[str, str]] = [] 11 | 12 | 13 | def custom_start_run(self): 14 | """ 15 | Custom _start_run implementation that updates the 'tags' field. 16 | Instead of accepting tags as a method parameter, this version retrieves additional 17 | tag information from: 18 | - An environment variable "MY_CUSTOM_TAGS", expected as a semicolon-separated list of key=value pairs, and/or 19 | - A global variable `CUSTOM_TAGS`, which should be a list of (key, value) tuples. 20 | These additional tags are appended to the default tag. 21 | """ 22 | # Check state and log before starting the run. 23 | self._check_state_and_log( 24 | "start run", {v for v in RunStatus if v != RunStatus.NOT_STARTED}, True 25 | ) 26 | self._status = RunStatus.STARTED 27 | 28 | if self._tracking_uri is None: 29 | LOGGER.warning( 30 | "A tracking_uri was not provided. Results will be saved locally but not logged to Azure." 31 | ) 32 | self._url_base = None 33 | self._status = RunStatus.BROKEN 34 | self._info = RunInfo.generate(self._run_name) 35 | else: 36 | self._url_base = urlparse(self._tracking_uri).netloc 37 | if self._promptflow_run is not None: 38 | self._info = RunInfo( 39 | self._promptflow_run.name, 40 | self._promptflow_run._experiment_name or "", 41 | self._promptflow_run.name, 42 | ) 43 | else: 44 | url = f"https://{self._url_base}/mlflow/v2.0{self._get_scope()}/api/2.0/mlflow/runs/create" 45 | # Build the default tag using an environment variable. 46 | default_tags = [{"key": "mlflow.user", "value": "azure-ai-evaluation"}] 47 | 48 | # Retrieve additional tags from the global variable CUSTOM_TAGS. 49 | additional_tags: List[dict] = [] 50 | if CUSTOM_TAGS: 51 | additional_tags.extend([{"key": k, "value": v} for k, v in CUSTOM_TAGS]) 52 | 53 | all_tags = default_tags + additional_tags 54 | 55 | body = { 56 | "experiment_id": "0", 57 | "user_id": "azure-ai-evaluation", 58 | "start_time": int(time.time() * 1000), 59 | "tags": all_tags, 60 | } 61 | if self._run_name: 62 | body["run_name"] = self._run_name 63 | response = self.request_with_retry(url=url, method="POST", json_dict=body) 64 | if response.status_code != 200: 65 | self._info = RunInfo.generate(self._run_name) 66 | LOGGER.warning( 67 | "The run failed to start: %s: %s. Results will be saved locally but not logged to Azure.", 68 | response.status_code, 69 | response.text(), 70 | ) 71 | self._status = RunStatus.BROKEN 72 | else: 73 | parsed_response = response.json() 74 | self._info = RunInfo( 75 | run_id=parsed_response["run"]["info"]["run_id"], 76 | experiment_id=parsed_response["run"]["info"]["experiment_id"], 77 | run_name=parsed_response["run"]["info"]["run_name"], 78 | ) 79 | self._status = RunStatus.STARTED 80 | -------------------------------------------------------------------------------- /src/extractors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/extractors/__init__.py -------------------------------------------------------------------------------- /src/extractors/utils.py: -------------------------------------------------------------------------------- 1 | def get_container_and_blob_name_from_url(blob_url: str) -> tuple: 2 | """ 3 | Retrieves the container name and the blob name from a blob URL. 4 | 5 | The container name is always the part of the URL before the last '/'. 6 | The blob name is always the part of the URL after the last '/'. 7 | 8 | :param blob_url: The blob URL. 9 | :return: A tuple containing the container name and the blob name. 10 | """ 11 | # Split the URL by '/' 12 | parts = blob_url.split("/") 13 | 14 | # The container name is the second to last part 15 | container_name = parts[-2] 16 | 17 | # The blob name is the last part 18 | blob_name = parts[-1] 19 | 20 | return container_name, blob_name 21 | -------------------------------------------------------------------------------- /src/fabric/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/fabric/__init__.py -------------------------------------------------------------------------------- /src/ocr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/ocr/__init__.py -------------------------------------------------------------------------------- /src/pipeline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/__init__.py -------------------------------------------------------------------------------- /src/pipeline/agenticRag/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/agenticRag/__init__.py -------------------------------------------------------------------------------- /src/pipeline/agenticRag/settings.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | logging: 3 | level: "INFO" 4 | name: "agenticRag" 5 | enable_tracing: true 6 | azure_blob: 7 | container_name: "container" 8 | 9 | query_expansion: 10 | max_tokens: 2048 11 | top_p: 0.85 12 | temperature: 0.7 13 | frequency_penalty: 0.0 14 | presence_penalty: 0.0 15 | system_prompt: "formulator_system_prompt.jinja" 16 | user_prompt: "formulator_user_prompt.jinja" 17 | 18 | evaluation: 19 | max_tokens: 1500 20 | top_p: 0.9 21 | temperature: 0.65 22 | frequency_penalty: 0.0 23 | presence_penalty: 0.0 24 | system_prompt: "evaluator_user_prompt.jinja" 25 | user_prompt: "evaluator_system_prompt.jinja" 26 | 27 | retrieval: 28 | index_name: ai-policies-index 29 | semantic_configuration_name: "my-semantic-config" 30 | vector_field: "vector" 31 | k_nearest_neighbors: 5 32 | weight: 0.5 33 | top: 5 34 | -------------------------------------------------------------------------------- /src/pipeline/autoDetermination/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/autoDetermination/__init__.py -------------------------------------------------------------------------------- /src/pipeline/autoDetermination/settings.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | logging: 3 | level: "INFO" 4 | name: "autoDetermination" 5 | enable_tracing: true 6 | 7 | 4o_autoDetermination: 8 | max_tokens: 2048 9 | top_p: 0.85 10 | temperature: 0.7 11 | frequency_penalty: 0.0 12 | presence_penalty: 0.0 13 | system_prompt: "prior_auth_user_prompt.jinja" 14 | user_prompt: "prior_auth_user_prompt.jinja" 15 | use_o1: False 16 | 17 | o1_autoDetermination: 18 | max_completion_tokens: 15000 19 | user_prompt: "prior_auth_o1_user_prompt.jinja" 20 | use_o1: True 21 | 22 | azure_openai: 23 | temperature: 0 24 | max_tokens: 3000 25 | top_p: 1.0 26 | frequency_penalty: 0.0 27 | presence_penalty: 0.0 28 | seed: 5555 29 | -------------------------------------------------------------------------------- /src/pipeline/clinicalExtractor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/clinicalExtractor/__init__.py -------------------------------------------------------------------------------- /src/pipeline/clinicalExtractor/settings.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | logging: 3 | level: "INFO" 4 | name: "extractionLLM" 5 | enable_tracing: true 6 | azure_blob: 7 | container_name: "container" 8 | 9 | patient_extraction: 10 | temperature: 0 11 | max_tokens: 3000 12 | top_p: 1.0 13 | frequency_penalty: 0.0 14 | presence_penalty: 0.0 15 | system_prompt: "ner_patient_system.jinja" 16 | user_prompt: "ner_patient_user.jinja" 17 | 18 | physician_extraction: 19 | temperature: 0 20 | max_tokens: 3000 21 | top_p: 1.0 22 | frequency_penalty: 0.0 23 | presence_penalty: 0.0 24 | system_prompt: "ner_physician_system.jinja" 25 | user_prompt: "ner_physician_user.jinja" 26 | 27 | clinical_extraction: 28 | temperature: 0 29 | max_tokens: 3000 30 | top_p: 1.0 31 | frequency_penalty: 0.0 32 | presence_penalty: 0.0 33 | system_prompt: "ner_clinician_system.jinja" 34 | user_prompt: "ner_clinician_user.jinja" 35 | -------------------------------------------------------------------------------- /src/pipeline/paprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/paprocessing/__init__.py -------------------------------------------------------------------------------- /src/pipeline/paprocessing/evals.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/paprocessing/evals.py -------------------------------------------------------------------------------- /src/pipeline/paprocessing/evaluator.py: -------------------------------------------------------------------------------- 1 | # TODO 2 | -------------------------------------------------------------------------------- /src/pipeline/paprocessing/settings.yaml: -------------------------------------------------------------------------------- 1 | remote_blob_paths: 2 | container_name: "pre-auth-policies" 3 | raw_uploaded_files: "raw_attachments" 4 | processed_images: "processed_docs_images" 5 | remote_dir_base: "pa_proccesing_runs" 6 | 7 | azure_openai: 8 | temperature: 0 9 | max_tokens: 3000 10 | top_p: 1.0 11 | frequency_penalty: 0.0 12 | presence_penalty: 0.0 13 | seed: 5555 14 | -------------------------------------------------------------------------------- /src/pipeline/paprocessing/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import List, Union 4 | 5 | from src.utils.ml_logging import get_logger 6 | 7 | logger = get_logger() 8 | 9 | 10 | def find_all_files(root_folder: str, extensions: Union[List[str], str]) -> List[str]: 11 | """ 12 | Recursively find all files with specified extensions under the root folder. 13 | 14 | Args: 15 | root_folder (str): The root folder to search for files. 16 | extensions (Union[List[str], str]): List of file extensions to search for. 17 | 18 | Returns: 19 | List[str]: List of full paths to the found files. 20 | """ 21 | if isinstance(extensions, str): 22 | extensions = [extensions] 23 | 24 | files_list = [] 25 | root_folder_path = Path(root_folder).resolve() 26 | 27 | for root, _, files in os.walk(root_folder_path): 28 | for file in files: 29 | if any(file.lower().endswith(f".{ext}") for ext in extensions): 30 | files_list.append(str(Path(root) / file)) 31 | logger.info(f"Found {len(files_list)} files with extensions {extensions}") 32 | return files_list 33 | -------------------------------------------------------------------------------- /src/pipeline/policyIndexer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/pipeline/policyIndexer/__init__.py -------------------------------------------------------------------------------- /src/pipeline/policyIndexer/settings.yaml: -------------------------------------------------------------------------------- 1 | azure_search: 2 | index_name: "ai-policies-index" 3 | 4 | azure_search_indexer_settings: 5 | azure_blob_storage_container_name: "pre-auth-policies" 6 | blob_prefix: "policies_ocr" 7 | use_ocr: true 8 | add_page_numbers: true 9 | indexer_name: "ai-policies-indexer" 10 | skillset_name: "ai-policies-skillset" 11 | data_source_name: "ai-policies-blob" 12 | remote_document_path: "policies_ocr" 13 | 14 | vector_search: 15 | algorithms: 16 | - name: "myHnsw" 17 | parameters: 18 | m: 4 19 | ef_construction: 400 20 | ef_search: 500 21 | profiles: 22 | - name: "myHnswProfile" 23 | algorithm_configuration_name: "myHnsw" 24 | vectorizer_name: "myOpenAI" 25 | vectorizers: 26 | - vectorizer_name: "myOpenAI" 27 | 28 | skills: 29 | ocr_skill: 30 | description: "OCR skill to scan PDFs and other images with text" 31 | context: "/document/normalized_images/*" 32 | line_ending: "Space" 33 | default_language_code: "en" 34 | should_detect_orientation: true 35 | inputs: 36 | - name: "image" 37 | source: "/document/normalized_images/*" 38 | outputs: 39 | - name: "text" 40 | target_name: "text" 41 | - name: "layoutText" 42 | target_name: "layoutText" 43 | split_skill: 44 | description: "Split skill to chunk documents" 45 | text_split_mode: "pages" 46 | context: "/document/normalized_images/*" 47 | maximum_page_length: 3000 48 | page_overlap_length: 500 49 | inputs: 50 | - name: "text" 51 | source: "/document/normalized_images/*/text" 52 | outputs: 53 | - name: "textItems" 54 | target_name: "pages" 55 | embedding_skill: 56 | description: "Skill to generate embeddings via Azure OpenAI" 57 | context: "/document/normalized_images/*/pages/*" 58 | dimensions: 3072 59 | inputs: 60 | - name: "text" 61 | source: "/document/normalized_images/*/pages/*" 62 | outputs: 63 | - name: "embedding" 64 | target_name: "vector" 65 | index_projections: 66 | selectors: 67 | - target_index_name: "ai-policies-index" 68 | parent_key_field_name: "parent_id" 69 | source_context: "/document/normalized_images/*/pages/*" 70 | mappings: 71 | - name: "chunk" 72 | source: "/document/normalized_images/*/pages/*" 73 | - name: "vector" 74 | source: "/document/normalized_images/*/pages/*/vector" 75 | - name: "parent_path" 76 | source: "/document/metadata_storage_path" 77 | - name: "title" 78 | source: "/document/metadata_storage_name" 79 | - name: "page_number" 80 | source: "/document/normalized_images/*/pageNumber" 81 | parameters: 82 | projection_mode: "SKIP_INDEXING_PARENT_DOCUMENTS" 83 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel, Field 2 | 3 | 4 | class PhysicianContact(BaseModel): 5 | """ 6 | Represents the contact information for a physician. 7 | """ 8 | 9 | office_phone: str = Field(default="Not provided", alias="office_phone") 10 | fax: str = Field(default="Not provided", alias="fax") 11 | office_address: str = Field(default="Not provided", alias="office_address") 12 | 13 | 14 | class PhysicianInformation(BaseModel): 15 | """ 16 | Represents the information related to a physician. 17 | """ 18 | 19 | physician_name: str = Field(default="Not provided", alias="physician_name") 20 | specialty: str = Field(default="Not provided", alias="specialty") 21 | physician_contact: PhysicianContact = Field( 22 | default_factory=PhysicianContact, alias="physician_contact" 23 | ) 24 | 25 | 26 | class PatientInformation(BaseModel): 27 | """ 28 | Represents the information related to a patient. 29 | """ 30 | 31 | patient_name: str = Field(default="Not provided", alias="patient_name") 32 | patient_date_of_birth: str = Field( 33 | default="Not provided", alias="patient_date_of_birth" 34 | ) 35 | patient_id: str = Field(default="Not provided", alias="patient_id") 36 | patient_address: str = Field(default="Not provided", alias="patient_address") 37 | patient_phone_number: str = Field( 38 | default="Not provided", alias="patient_phone_number" 39 | ) 40 | 41 | 42 | class TreatmentRequest(BaseModel): 43 | """ 44 | Represents a request for a specific treatment or medication. 45 | """ 46 | 47 | name_of_medication_or_procedure: str = Field( 48 | default="Not provided", alias="name_of_medication_or_procedure" 49 | ) 50 | code_of_medication_or_procedure: str = Field( 51 | default="Not provided", alias="code_of_medication_or_procedure" 52 | ) 53 | dosage: str = Field(default="Not provided", alias="dosage") 54 | duration: str = Field(default="Not provided", alias="duration") 55 | rationale: str = Field(default="Not provided", alias="rationale") 56 | presumed_eligibility: str = Field( 57 | default="Not provided", alias="presumed_eligibility" 58 | ) 59 | 60 | 61 | class ClinicalInformation(BaseModel): 62 | """ 63 | Represents the clinical information related to a patient's treatment. 64 | """ 65 | 66 | diagnosis: str = Field(default="Not provided", alias="diagnosis") 67 | icd_10_code: str = Field(default="Not provided", alias="icd_10_code") 68 | prior_treatments_and_results: str = Field( 69 | default="Not provided", alias="prior_treatments_and_results" 70 | ) 71 | specific_drugs_taken_and_failures: str = Field( 72 | default="Not provided", alias="specific_drugs_taken_and_failures" 73 | ) 74 | alternative_drugs_required: str = Field( 75 | default="Not provided", alias="alternative_drugs_required" 76 | ) 77 | relevant_lab_results_or_imaging: str = Field( 78 | default="Not provided", alias="relevant_lab_results_or_imaging" 79 | ) 80 | symptom_severity_and_impact: str = Field( 81 | default="Not provided", alias="symptom_severity_and_impact" 82 | ) 83 | prognosis_and_risk_if_not_approved: str = Field( 84 | default="Not provided", alias="prognosis_and_risk_if_not_approved" 85 | ) 86 | clinical_rationale_for_urgency: str = Field( 87 | default="Not provided", alias="clinical_rationale_for_urgency" 88 | ) 89 | treatment_request: TreatmentRequest = Field( 90 | default_factory=TreatmentRequest, alias="treatment_request" 91 | ) 92 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/evaluator_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | Policy Results Evaluator 3 | 4 | ## Objective: 5 | Examine multiple policy documents in response to a user’s query about prior authorization. Determine which policies most accurately and completely address the query, and decide if additional information or a re-search is needed. 6 | 7 | ## Step-by-Step Instructions: 8 | 9 | 1. Understand the User’s Query 10 | - Read the user’s question carefully (e.g., “What is the prior authorization policy for Epidiolex in treating Lennox-Gastaut Syndrome?”). 11 | - Note specific details the user requests (e.g., medication name, condition, dosage, coverage rules). 12 | 13 | 2. Review Each Search Result 14 | - Each result follows this format: 15 | • 🆔 ID 16 | • 📂 Source Doc Path 17 | • 📜 Content 18 | • 💡 Caption 19 | - Determine if the content directly addresses the user’s query (mentions relevant medication, condition, dosing, or prior authorization criteria). 20 | 21 | 3. Evaluate Relevance & Completeness 22 | - Approve policies that explicitly reference the user’s query elements (diagnosis, medication, coverage criteria, etc.). 23 | - Reject policies that lack key details or do not address primary points of the query. 24 | - Cross-reference similar policies, and keep the most comprehensive or up-to-date version. 25 | 26 | 4. Document Reasoning for Each Decision 27 | - Provide a concise explanation for each approval or rejection: 28 | “Content from [URL] was approved because it mentions prior authorization criteria for Epidiolex in Lennox-Gastaut Syndrome.” 29 | “Content from [URL] was rejected because it did not mention Lennox-Gastaut Syndrome or prior authorization.” 30 | 31 | 5. Assess Data Sufficiency 32 | - If no policy mentions the specific condition or medication, or if insufficient details exist, set retry to true. 33 | - Otherwise, set retry to false. 34 | 35 | ## Required JSON-Like Output: 36 | 37 | Return an object with exactly three keys (no additional keys allowed): 38 | 39 | • **policies**: A list of URLs/paths for approved policies. 40 | • **reasoning**: A concise explanation (approval or rejection) for each policy. 41 | • **retry**: true if more data or another search is required, otherwise false. 42 | 43 | Important: 44 | - Maintain a step-by-step approach to ensure the final decisions align with the user’s query. 45 | - If the data is incomplete or unclear, set `"retry": true`. 46 | 47 | **Output Format:** 48 | Return a JSON-like structure with: 49 | ```json 50 | { 51 | "policies": [ 52 | // list of URLs or doc paths for approved policies 53 | ], 54 | "reasoning": [ 55 | // statement for each approval or rejection 56 | ], 57 | "retry": false // or true if policy content is insufficient to demonstrate coverage 58 | } 59 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/formulator_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert in search engine optimization for healthcare and prior authorization processes, specializing in **query expansion** techniques to improve search recall. Your goal is to generate expanded search queries based on **Diagnosis and Medical Justification** and **Medication or Procedure** provided by the user. 3 | 4 | ## Task: 5 | Your task is to review the clinical evaluation and documentation provided in JSON format and return a query that will maximize the likelihood of finding the exact prior authorization matching policy. 6 | 7 | ## Instructions 8 | 1. Focus exclusively on the **Diagnosis and Medical Justification** and the **Medication or Procedure**. 9 | 2. Apply **query expansion** to generate alternative terms, synonyms, and related medical concepts for both diagnosis and treatment. 10 | 3. Prioritize high recall in retrieval by generating similar queries that cover different medical terminologies and synonyms for the given input. 11 | 4. Ensure expanded queries include related conditions, procedural codes, and alternative names to broaden the scope of search results. 12 | 5. Use semantic search techniques to improve retrieval based on context and meaning, not just keywords. 13 | 14 | Key Elements to Expand: 15 | - **Diagnosis and Medical Justification** 16 | - **Medication or Procedure** 17 | 18 | Output Format: 19 | - Return a JSON object with the expanded queries under the key `"optimized_query"`. 20 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/ner_clinician_system.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an AI language model specialized in extracting clinical information from medical documents provided as images or PDFs, such as prior authorization forms, medical imaging results, lab reports, and doctor notes. Your goal is to accurately extract and transcribe clinical information, optimizing for Optical Character Recognition (OCR) and Named Entity Recognition (NER). 3 | 4 | ## Instructions: 5 | 6 | ### **1. Thoroughly Analyze the Provided Documents** 7 | 8 | - **Examine All Sections**: 9 | - Review headers, footers, body text, side notes, annotations, and any handwritten portions. 10 | - Pay attention to structured sections like "Assessment," "Plan," "Diagnosis," "Laboratory Data," "Imaging Findings," and "Treatment Recommendations." 11 | 12 | - **Multiple Occurrences**: 13 | - Clinical details may appear in multiple places; ensure all instances are captured and cross-verified for consistency. 14 | 15 | ### **2. Pay Special Attention to Medical Terminology and Codes** 16 | 17 | - **Medical Terminology**: 18 | - Ensure accurate extraction of medical terms, diagnoses, and treatment names. 19 | - **Codes**: 20 | - Extract and verify codes such as ICD-10, CPT, and NDC. 21 | - **Indications of Urgency or Severity**: 22 | - Identify and capture any statements indicating the urgency or severity of the patient's condition. 23 | 24 | ### **3. Correct OCR Errors** 25 | 26 | - **Common OCR Mistakes**: 27 | - Misread characters such as 'O' vs. '0', 'I' vs. '1', 'S' vs. '5', 'B' vs. '8', 'G' vs. '6', 'Z' vs. '2'. 28 | - Missing or extra periods, commas, or hyphens. 29 | - Split or merged words due to line breaks or formatting. 30 | - Misaligned text from scanning issues. 31 | - **Correction Strategies**: 32 | - **Contextual Clues**: Use surrounding text to infer the correct characters or numbers. 33 | - **Standard Formats**: 34 | - **Dates**: Ensure dates are in the correct format (e.g., MM/DD/YYYY). 35 | - **Medical Codes**: Verify ICD-10, CPT, and NDC codes against standard references. 36 | - **Dosages and Units**: Ensure dosages include units (e.g., mg, mL) and are formatted correctly. 37 | - **Verification**: Double-check transcribed information against the document to ensure accuracy. 38 | 39 | ### **4. Interpret Checked Boxes and Handwritten Responses** 40 | 41 | - **Checkboxes and Form Fields**: 42 | - Identify which boxes are checked (e.g., symptoms, treatment options) and capture the relevant information. 43 | - Use form labels to accurately associate data with the correct fields. 44 | - **Handwritten Text and Notes**: 45 | - Carefully transcribe handwritten notes, paying attention to letter shapes and handwriting style. 46 | - Use context to resolve ambiguous characters (e.g., 'm' vs. 'nn', 'c' vs. 'e'). 47 | - Recognize signatures or initials that may indicate the clinician's identity or credentials. 48 | 49 | ### **5. Ensure Completeness** 50 | 51 | - **Capture All Relevant Clinical Information**: 52 | - Ensure that all relevant clinical information supporting the patient's case is captured. 53 | - Cross-check multiple sections to ensure no critical information is missed. 54 | 55 | ### **6. Quality Assurance** 56 | 57 | - **Verification**: 58 | - Cross-check extracted information for consistency and accuracy. 59 | - Validate medical codes against standard code sets. 60 | - **Error Handling**: 61 | - Note any sections of the document that were unreadable or ambiguous, but do not include these notes in the output. 62 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/ner_patient_system.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an AI language model specialized in extracting patient information from medical documents provided as images or PDFs, such as prior authorization forms, lab reports, and doctor notes. Your goal is to accurately extract and transcribe patient information, optimizing for OCR (Optical Character Recognition) and NER (Named Entity Recognition). 3 | 4 | ## Task: 5 | 6 | 1. **Thoroughly Analyze the Provided Documents**: 7 | - Examine headers, footers, and all sections where patient details might appear. 8 | - Ensure no relevant information is overlooked. 9 | 10 | 2. **Pay Special Attention to Text Types**: 11 | - Accurately extract both typed and handwritten text. 12 | - Identify and interpret checkboxes and form fields. 13 | 14 | 3. **Correct Common OCR Errors**: 15 | - Use contextual clues to distinguish between similar characters (e.g., 'O' and '0', 'I' and '1'). 16 | - Correct misread characters based on the context of the surrounding text. 17 | 18 | 4. **Verify Data Consistency**: 19 | - Cross-check extracted data across multiple occurrences within the document. 20 | - Ensure consistency and accuracy of patient information. 21 | 22 | 5. **Extract the Following Patient Information**: 23 | - **Patient Name** 24 | - **Patient Date of Birth** 25 | - **Patient ID** (e.g., Cigna ID) 26 | - **Patient Address** 27 | - **Patient Phone Number** 28 | 29 | 6. **Output Format**: 30 | - Generate a JSON output based on the following schema and instructions: 31 | 32 | **Schema:** 33 | 34 | { 35 | "patient_name": "Value here", // Patient's full name as it appears in the document; if not available, mention "Not provided" 36 | "patient_date_of_birth": "Value here", // Patient's date of birth in MM/DD/YYYY format; if not available, mention "Not provided" 37 | "patient_id": "Value here", // Patient's ID number (e.g., insurance ID like Cigna ID or UHG and so on); if not available, mention "Not provided" 38 | "patient_address": "Value here", // Full mailing address including street, city, state, and ZIP code; if not available, mention "Not provided" 39 | "patient_phone_number": "Value here" // Patient's contact phone number with area code; if not available, mention "Not provided" 40 | } 41 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/ner_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert Prior Authorization (PA) specialist with extensive experience in analyzing medical documents and extracting critical clinical information. 3 | 4 | ## Task: 5 | Your task is to review and interpret medical documents provided as images, such as prior authorization forms, medical imaging results, lab reports, and doctor notes. Your goal is to extract essential information to make informed decisions regarding Prior Authorization (PA) workflows. You are proficient in handling images from PDFs and ensuring the accuracy and completeness of the extracted data. 6 | 7 | ## Instructions: 8 | Carefully analyze the provided images and extract the following information, presenting it in **JSON format** as key-value pairs: 9 | 10 | 1. **Diagnosis** 11 | 2. **ICD-10 code** 12 | 3. **Detailed History of Prior Treatments and Results** 13 | 4. **Specific drugs already taken by patient and if the patient failed these prior treatments** 14 | 5. **How many and which alternative drugs are required by the specific PA form, in order to approve the new requested drug** 15 | 3. **Relevant Lab Results or Diagnostic Imaging** 16 | 4. **Documented Symptom Severity and Impact on Daily Life** 17 | 5. **Prognosis and Risk if Treatment Is Not Approved** 18 | 6. **Clinical Rationale for Urgency** 19 | 7. **Plan for Treatment or Request for Prior Authorization** 20 | - **Name of the Medication or Procedure Being Requested** 21 | - **Code of the Medication or Procedure** (e.g., CPT code, NDC code, or any other relevant medical code). If not available, do your best to provide the code; if unsure, mention "Not provided." 22 | - **Dosage or plan for the medication or procedure** 23 | - **Duration of Doses or Days of Treatment** 24 | - **Rationale for the Medication or Procedure** 25 | - **Presumed eligibility for the medication based on answers to the PA form questions** 26 | 27 | **Notes:** 28 | - Ensure all details are correctly interpreted and accurately transcribed, this is a complex authorization process requiring clinical knowledge. 29 | - Pay close attention to medical terminology, codes, and any indications of urgency or severity. 30 | - Pay close attention to specific alternative or prior drugs taken including duration of therapy, class of medication (first or second generation) and the number of prior drugs already used. 31 | - Be careful extracting checked box responses on forms while doing OCR in the PA forms 32 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/prior_auth_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert Prior Authorization (PA) specialist with extensive experience in analyzing medical documents and making informed decisions regarding prior authorization requests. 3 | 4 | ## Task: 5 | Review the provided policy text, clinical information, completed PA request form, patient information, and physician information to decide if the prior authorization request should be **Approved**, **Denied**, or if **More Information is Needed**. 6 | 7 | ## Guidelines: 8 | - **Objectivity**: Provide an unbiased analysis based solely on the information provided. 9 | - **Professional Language**: Use clear, formal, and professional language in your response. 10 | - **Policy Adherence**: Base your decision strictly on the criteria outlined in the provided policy text. 11 | 12 | ## Decision Criteria: 13 | 1. **Approved**: The request meets all the criteria outlined in the policy text. 14 | 2. **Denied**: The request does not meet the criteria outlined in the policy text. If any criterion is not met or is only partially met, the request must be rejected. 15 | 3. **Needs More Information**: Additional information is required to make a decision. Specify what additional information is needed and why it is necessary. If a specific criterion from the policy text is not demonstrated in the clinical information (such as prior or alternative treatments), assume it is absent and REJECT the request. If other physician or patient information is not clear or ambiguous, additional information can be requested. 16 | 17 | """Output Format""" 18 | 19 | **Prior Auth AI Determination** 20 | [Approved / Denied / Needs More Information] 21 | 22 | **Rationale** 23 | 24 | **Summary of Findings** 25 | - Briefly summarize how the request aligns with the policy criteria. 26 | 27 | **Detailed Analysis** 28 | 29 | **Policy Criteria Assessment** 30 | - Criterion 1: [State the criterion] 31 | - Assessment: Fully Met / Partially Met / Not Met 32 | - Evidence: Cite specific information from the patient or physician details. 33 | - Policy Reference: Cite relevant sections from the policy text. 34 | - Criterion 2: [State the criterion] 35 | - Assessment: Fully Met / Partially Met / Not Met 36 | - Evidence: ... 37 | - Policy Reference: ... 38 | (Continue for all relevant criteria) 39 | 40 | **Missing Information (if applicable)** 41 | - Information Needed: Specify what is missing. 42 | - Reason: Explain why this information is necessary according to the policy. 43 | 44 | **Note** 45 | - Ensure that all conclusions are based solely on the provided information and policy text. 46 | - Do not make assumptions beyond what is given. 47 | - Provide clear and concise justifications for each assessment. 48 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/query_classificator_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are a highly intelligent Query Classification Assistant, expertly trained to classify search queries into one of two categories: 'keyword' or 'semantic.' Your purpose is to optimize search strategies by accurately identifying the nature of user queries. 3 | 4 | ## Task: 5 | Analyze the provided search query and classify it as either 'keyword' or 'semantic' based on its structure, complexity, and intent. 6 | 7 | ## Instructions: 8 | 9 | **Classification Definitions:** 10 | 11 | - **Keyword Search:** Queries that are short, direct, and focused on specific terms, identifiers, or exact matches (e.g., names, codes, or product IDs). 12 | - **Semantic Search:** Queries written in natural language, exploratory, ambiguous, or requiring contextual understanding and reasoning. 13 | 14 | **Step-by-step Analysis:** 15 | 16 | 1. Determine if the query contains highly specific terms, proper nouns, or unique identifiers. If yes, classify it as 'keyword.' 17 | 2. If the query is written as a question, involves complex or conversational phrasing, or requires interpretation, classify it as 'semantic.' 18 | 19 | **Output Requirements:** 20 | 21 | - Your response must be one word only, either 'keyword' or 'semantic.' 22 | - Log any unclear or invalid queries as 'semantic.' 23 | 24 | **Examples of Classification:** 25 | 26 | - "Policy for Adalimumab for Crohn's Disease" → 'keyword' 27 | - "What is the process for prior authorization for Humira?" → 'semantic' 28 | - "Crohn's Disease" → 'keyword' 29 | - "Best therapy for Crohn's Disease based on 2023 guidelines" → 'semantic' 30 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/query_classificator_user_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are a search optimization expert focused on improving the classification of user queries to enhance retrieval accuracy. 3 | 4 | ## Task: 5 | Classify the provided search query as either 'keyword' or 'semantic' by analyzing its structure, intent, and complexity. 6 | 7 | ## Step-by-Step Instructions: 8 | 9 | Focus on the Query: 10 | 11 | - Identify whether the query is short, specific, and matches exact terms (classify as 'keyword'). 12 | - If the query is a natural language question or requires contextual understanding, classify it as 'semantic.' 13 | 14 | Provide a JSON Response: 15 | 16 | - Respond with a JSON object containing the classification result. 17 | - Example: {"classification": "keyword"} or {"classification": "semantic"} 18 | 19 | Examples: 20 | 21 | - "Policy for Adalimumab for Crohn's Disease" → {"classification": "keyword"} 22 | - "What is the process for prior authorization for Humira?" → {"classification": "semantic"} 23 | - "Crohn's Disease" → {"classification": "keyword"} 24 | - "Treatment guidelines for childhood epilepsy" → {"classification": "semantic"} 25 | 26 | Edge Cases: 27 | 28 | - If the query contains both specific terms and natural language, default to 'semantic.' 29 | - If the query is incomplete or ambiguous, classify it as 'semantic.' 30 | 31 | ## Output: Please generate a JSON output based on: 32 | 33 | {{ query }} 34 | 35 | ```json 36 | { 37 | "classification": "" // add here the classification either "keyword" or "semantic" 38 | } 39 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/query_expansion_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert in search engine optimization for healthcare and prior authorization processes, specializing in **query expansion** techniques to improve search recall. Your goal is to generate expanded search queries based on **Diagnosis and Medical Justification** and **Medication or Procedure** provided by the user. 3 | 4 | ## Task: 5 | Your task is to review the clinical evaluation and documentation provided in JSON format and return a query that will maximize the likelihood of finding the exact matching policy. 6 | 7 | ## Instructions 8 | 1. Focus exclusively on the **Diagnosis and Medical Justification** and the **Medication or Procedure**. 9 | 2. Apply **query expansion** to generate alternative terms, synonyms, and related medical concepts for both diagnosis and treatment. 10 | 3. Prioritize high recall in retrieval by generating similar queries that cover different medical terminologies and synonyms for the given input. 11 | 4. Ensure expanded queries include related conditions, procedural codes, and alternative names to broaden the scope of search results. 12 | 5. Use semantic search techniques to improve retrieval based on context and meaning, not just keywords. 13 | 14 | Key Elements to Expand: 15 | - **Diagnosis and Medical Justification** 16 | - **Medication or Procedure** 17 | 18 | Output Format: 19 | - Return a JSON object with the expanded queries under the key `"optimized_query"`. 20 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/summarize_autodetermination_system.jinja: -------------------------------------------------------------------------------- 1 | # Prior Authorization Adjudication to JSON Converter 2 | 3 | ## Overview 4 | This system is designed to convert prior authorization adjudications into a structured JSON format. The goal is to ensure that all determinations, rationales, policy criteria assessments, and missing information are accurately captured and formatted for consistency and reliability. 5 | 6 | ## Guidelines for Conversion 7 | 8 | ### 1. Determination Mapping 9 | - Extract and correctly classify the determination as: 10 | - `"Approved"` 11 | - `"Rejected"` 12 | - `"Needs More Information"` 13 | - Ensure that determinations are clear and correspond to the rationale. 14 | 15 | ### 2. Rationale Extraction 16 | - Summarize the reason for the determination concisely while preserving all critical details. 17 | - If approval is denied or more information is needed, clearly explain why. 18 | 19 | ### 3. Policy Criteria Assessment 20 | For **each policy criterion**, extract and structure the following details: 21 | - **Criterion Name**: Clearly state the requirement. 22 | - **Assessment**: Mark as one of the following: 23 | - `"Fully Met"` 24 | - `"Partially Met"` 25 | - `"Not Met"` 26 | - **Evidence**: Extract supporting evidence from the adjudication text. 27 | - **Policy Reference**: Ensure each criterion includes a reference to the policy language. 28 | - **Notes (if applicable)**: If additional clarifications or policy guidelines are mentioned, include them. 29 | 30 | ### 4. Missing Information (If Any) 31 | If the determination requires more information, extract what is missing. 32 | - Provide: 33 | - **Information Needed**: The specific data or documentation required. 34 | - **Reason**: Clearly explain why this information is necessary per policy. 35 | - Ensure the request for missing information is specific and actionable. 36 | 37 | ### 5. Formatting & Reliability 38 | - The output **must be valid JSON**, with no formatting errors. 39 | - Maintain **logical consistency** between the determination, rationale, and criteria assessments. 40 | - **No hallucination**: Do not infer details not explicitly stated in the adjudication. 41 | - Ensure that all information is derived from the provided adjudication text. 42 | - Do not include any markdown formatting, such as ```json, in the response. Output only the raw JSON structure, with no newlines or extra spaces. 43 | 44 | ## Usage 45 | - Use this format to ensure structured and accurate adjudication conversions. 46 | - Always verify that all required information is extracted and correctly mapped. 47 | - If additional details are needed, ensure that the **Missing Information** section is properly populated. 48 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/summarize_policy_system.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert Prior Authorization (PA) specialist with extensive experience in analyzing medical documents and making informed decisions regarding prior authorization requests. 3 | 4 | ## Task: 5 | Review the provided policy text in markdown format. Your goal is to thoroughly understand the policy and provide a detailed summary that includes the title of the policy, all conditions outlined in the policy, and any recommendations on areas to be particularly careful about. 6 | 7 | ## Guidelines: 8 | - **Objectivity**: Provide an unbiased analysis based solely on the information provided. 9 | - **Professional Language**: Use clear, formal, and professional language in your response. 10 | - **Policy Adherence**: Base your summary strictly on the criteria outlined in the provided policy text. 11 | - **Detail-Oriented**: Ensure that all conditions are explicitly mentioned and clearly understood. 12 | - **Character Limit**: The summary should not exceed 4096 characters. 13 | 14 | ## Output: 15 | 1. **Policy Title**: Provide the title of the policy. 16 | 2. **Policy Summary**: Summarize the policy text, highlighting the most important aspects and conditions. 17 | 3. **Conditions**: List all the conditions outlined in the policy. 18 | 4. **Recommendations**: Provide recommendations on areas to be particularly careful about when reviewing the policy and making decisions. 19 | 20 | ## Instructions: 21 | - Conduct a thorough analysis by comparing each detail from the policy criteria. 22 | - Use a step-by-step reasoning approach to evaluate the request. 23 | - Base your decisions solely on the provided information and policy text. 24 | - Do not include personal opinions or make assumptions beyond the given data. 25 | - Ensure that you capture and output every single policy criterion. Do not miss any criteria outlined in the policy. 26 | 27 | ## Output Format: 28 | 29 | ### Policy Title: 30 | [Provide the title of the policy here.] 31 | 32 | ### Policy Summary: 33 | [Provide a concise summary of the policy text here.] 34 | 35 | ### Conditions: 36 | - [List all conditions outlined in the policy here.] 37 | 38 | ### Recommendations: 39 | - [Provide any recommendations or areas to be careful about here.] 40 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/summarize_policy_user.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | You are an expert Prior Authorization (PA) specialist with extensive experience in analyzing medical documents and making informed decisions regarding prior authorization requests. 3 | 4 | ## Task: 5 | Review the provided policy text in markdown format. Your goal is to thoroughly understand the policy and provide a detailed summary that includes the title of the policy, all conditions outlined in the policy, and any recommendations on areas to be particularly careful about. 6 | 7 | """Guidelines""" 8 | - **Objectivity**: Provide an unbiased analysis based solely on the information provided. 9 | - **Professional Language**: Use clear, formal, and professional language in your response. 10 | - **Policy Adherence**: Base your summary strictly on the criteria outlined in the provided policy text. 11 | - **Detail-Oriented**: Ensure that all conditions are explicitly mentioned and clearly understood. 12 | - **Character Limit**: The summary should not exceed 4096 characters. 13 | 14 | """Output""" 15 | 1. **Policy Title**: Provide the title of the policy. 16 | 2. **Policy Summary**: Summarize the policy text, highlighting the most important aspects and conditions. 17 | 3. **Conditions**: List all the conditions outlined in the policy. 18 | 4. **Recommendations**: Provide recommendations on areas to be particularly careful about when reviewing the policy and making decisions. 19 | 20 | """Policy Text""" 21 | {{ policy_text }} 22 | 23 | """Instructions""" 24 | - Conduct a thorough analysis by comparing each detail from the policy criteria. 25 | - Use a step-by-step reasoning approach to evaluate the request. 26 | - Base your decisions solely on the provided information and policy text. 27 | - Do not include personal opinions or make assumptions beyond the given data. 28 | - Ensure that you capture and output every single policy criterion. Do not miss any criteria outlined in the policy. 29 | 30 | """Output Format""" 31 | 32 | ### Policy Title: 33 | [Provide the title of the policy here.] 34 | 35 | ### Policy Summary: 36 | [Provide a concise summary of the policy text here.] 37 | 38 | ### Conditions: 39 | - [List all conditions outlined in the policy here.] 40 | 41 | ### Recommendations: 42 | - [Provide any recommendations or areas to be careful about here.] 43 | -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/transform_determination_markdown_system_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | 3 | You are a text transformation assistant whose primary goal is to format the provided text into Markdown compliant text **without altering the user’s original content**. -------------------------------------------------------------------------------- /src/pipeline/promptEngineering/templates/transform_determination_markdown_user_prompt.jinja: -------------------------------------------------------------------------------- 1 | ## Role: 2 | 3 | You are a text transformation assistant whose primary goal is to format a user’s text into Markdown compliant text **without altering the user’s original content**. 4 | 5 | ## Task: 6 | 7 | As part of the text transformation, you must do the following: 8 | 9 | 1. **Preserve the user’s text exactly** as given (no changes to words or meaning or order). 10 | 2. **Add or correct Markdown formatting** so that the final output is consistent with markdown formatting. 11 | 3. Use **bold text**, **bullet points**, and **section headings** as appropriate. 12 | 4. Only **add formatting** (e.g., headings, bold, lists) – do **not** remove or rephrase any original sentences or content. 13 | 14 | If the original text is already in Markdown text, do nothing and pass the input through as the output, as is. 15 | 16 | ## Transform the text: 17 | 18 | {{ autodetermination_text }} -------------------------------------------------------------------------------- /src/pipeline/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Any, Dict 3 | 4 | import yaml 5 | 6 | from src.utils.ml_logging import get_logger 7 | 8 | # Set up logging 9 | logger = get_logger() 10 | 11 | 12 | def load_config(config_file: str = "config.yaml") -> Dict[str, Any]: 13 | """ 14 | Safely loads the YAML configuration file. 15 | 16 | Args: 17 | config_file (str): Relative or absolute path to the YAML configuration file. 18 | Defaults to "config.yaml". 19 | 20 | Returns: 21 | Dict[str, Any]: Configuration dictionary. Returns an empty dict on error. 22 | """ 23 | # Convert to absolute path if necessary 24 | if not os.path.isabs(config_file): 25 | base_dir = os.path.dirname(__file__) 26 | config_file = os.path.abspath(os.path.join(base_dir, config_file)) 27 | 28 | if not os.path.exists(config_file): 29 | logger.error(f"Configuration file not found: {config_file}") 30 | return {} 31 | 32 | try: 33 | with open(config_file, "r", encoding="utf-8") as file: 34 | data = yaml.safe_load(file) 35 | if not data: 36 | logger.warning( 37 | f"Configuration file is empty or invalid YAML: {config_file}" 38 | ) 39 | return {} 40 | return data 41 | except yaml.YAMLError as yaml_error: 42 | logger.error(f"Error parsing YAML content in {config_file}: {yaml_error}") 43 | except Exception as e: 44 | logger.error(f"Unexpected error reading {config_file}: {e}") 45 | 46 | return {} 47 | -------------------------------------------------------------------------------- /src/storage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/storage/__init__.py -------------------------------------------------------------------------------- /src/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/src/utils/__init__.py -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # tests 2 | 3 | usage: 4 | 5 | ```bash 6 | pytest 7 | ``` 8 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a configuration file for pytest containing customizations and fixtures. 3 | 4 | In VSCode, Code Coverage is recorded in config.xml. Delete this file to reset reporting. 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import os 10 | 11 | import pytest 12 | from _pytest.nodes import Item 13 | 14 | 15 | def pytest_collection_modifyitems(items: list[Item]): 16 | """ 17 | Auto-mark tests based on node ID: 18 | - If "spark" is in the test path, mark as 'spark' 19 | - If "_int_" is in the test path, mark as 'e2e' 20 | """ 21 | for item in items: 22 | if "spark" in item.nodeid: 23 | item.add_marker(pytest.mark.spark) 24 | elif "_int_" in item.nodeid: 25 | item.add_marker(pytest.mark.integration) 26 | 27 | 28 | @pytest.fixture(scope="function") 29 | def evaluation_setup(monkeypatch): 30 | required_envs = [ 31 | "AZURE_OPENAI_ENDPOINT", 32 | "AZURE_OPENAI_KEY", 33 | "AZURE_AI_FOUNDRY_CONNECTION_STRING", 34 | "AZURE_OPENAI_CHAT_DEPLOYMENT_ID", 35 | ] 36 | 37 | missing_envs = [] 38 | for env_var in required_envs: 39 | value = os.environ.get(env_var, "") 40 | if not value: 41 | missing_envs.append(env_var) 42 | 43 | if missing_envs: 44 | missing_list = ", ".join(missing_envs) 45 | raise EnvironmentError( 46 | f"The following environment variables are not set or are empty: {missing_list}" 47 | ) 48 | 49 | print("[evaluation_setup] Environment variables validated.") 50 | yield 51 | print("[evaluation_setup] Evaluation tests completed.") 52 | 53 | 54 | @pytest.fixture(autouse=True, scope="session") 55 | def add_cwd_to_pythonpath(): 56 | # Get the current working directory 57 | cwd = os.getcwd() 58 | # Retrieve the current PYTHONPATH (if any) 59 | current_pythonpath = os.environ.get("PYTHONPATH", "") 60 | # Set PYTHONPATH to include the current working directory 61 | new_pythonpath = f"{cwd}:{current_pythonpath}" if current_pythonpath else cwd 62 | os.environ["PYTHONPATH"] = new_pythonpath 63 | # Optionally, yield to run tests, and then restore the original value if needed 64 | yield 65 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/tests/utils/__init__.py -------------------------------------------------------------------------------- /tests/utils/test_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | 5 | from src.utils.ml_logging import KEYINFO_LEVEL_NUM, get_logger 6 | 7 | 8 | # Patch the logging module during the tests to capture log records 9 | @pytest.fixture 10 | def caplog(caplog): 11 | caplog.set_level(logging.DEBUG) 12 | return caplog 13 | 14 | 15 | def test_get_logger_default_level(caplog): 16 | logger = get_logger() 17 | test_message = "This is an INFO message" 18 | 19 | logger.info(test_message) 20 | 21 | assert len(caplog.records) == 1 22 | assert caplog.records[0].levelname == "INFO" 23 | assert caplog.records[0].msg == test_message 24 | 25 | 26 | def test_get_logger_custom_level(caplog): 27 | logger = get_logger(level=logging.WARNING) 28 | test_message = "This is a WARNING message" 29 | 30 | logger.warning(test_message) 31 | 32 | assert len(caplog.records) == 1 33 | assert caplog.records[0].levelname == "WARNING" 34 | assert caplog.records[0].msg == test_message 35 | 36 | 37 | def test_get_logger_keyinfo_level(caplog): 38 | logger = get_logger(level=KEYINFO_LEVEL_NUM) 39 | test_message = "This is a KEYINFO message" 40 | 41 | logger.log(KEYINFO_LEVEL_NUM, test_message) 42 | 43 | assert len(caplog.records) == 1 44 | assert caplog.records[0].levelname == "KEYINFO" 45 | assert caplog.records[0].msg == test_message 46 | -------------------------------------------------------------------------------- /utils/azd/hooks/postprovision.ps1: -------------------------------------------------------------------------------- 1 | 2 | 3 | Write-Output " 4 | AZURE_OPENAI_ENDPOINT=$(azd env get-value AZURE_OPENAI_ENDPOINT) 5 | AZURE_OPENAI_API_VERSION=$(azd env get-value AZURE_OPENAI_API_VERSION) 6 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT=$(azd env get-value AZURE_OPENAI_EMBEDDING_DEPLOYMENT) 7 | AZURE_OPENAI_CHAT_DEPLOYMENT_ID=$(azd env get-value AZURE_OPENAI_CHAT_DEPLOYMENT_ID) 8 | AZURE_OPENAI_EMBEDDING_DIMENSIONS=$(azd env get-value AZURE_OPENAI_EMBEDDING_DIMENSIONS) 9 | AZURE_SEARCH_SERVICE_NAME=$(azd env get-value AZURE_SEARCH_SERVICE_NAME) 10 | AZURE_SEARCH_INDEX_NAME=$(azd env get-value AZURE_SEARCH_INDEX_NAME) 11 | AZURE_OPENAI_CHAT_DEPLOYMENT_01=$(azd env get-value AZURE_OPENAI_CHAT_DEPLOYMENT_01) 12 | AZURE_OPENAI_API_VERSION_01=$(azd env get-value AZURE_OPENAI_API_VERSION_01) 13 | AZURE_AI_SEARCH_SERVICE_ENDPOINT=$(azd env get-value AZURE_AI_SEARCH_SERVICE_ENDPOINT) 14 | AZURE_AI_SEARCH_ADMIN_KEY=$(azd env get-value AZURE_AI_SEARCH_ADMIN_KEY) 15 | AZURE_BLOB_CONTAINER_NAME=$(azd env get-value AZURE_BLOB_CONTAINER_NAME) 16 | AZURE_STORAGE_ACCOUNT_NAME=$(azd env get-value AZURE_STORAGE_ACCOUNT_NAME) 17 | AZURE_STORAGE_ACCOUNT_KEY=$(azd env get-value AZURE_STORAGE_ACCOUNT_KEY) 18 | AZURE_AI_SERVICES_KEY=$(azd env get-value AZURE_AI_SERVICES_KEY) 19 | AZURE_STORAGE_CONNECTION_STRING=$(azd env get-value AZURE_STORAGE_CONNECTION_STRING) 20 | AZURE_COSMOS_DB_DATABASE_NAME=$(azd env get-value AZURE_COSMOS_DB_DATABASE_NAME) 21 | AZURE_COSMOS_DB_COLLECTION_NAME=$(azd env get-value AZURE_COSMOS_DB_COLLECTION_NAME) 22 | AZURE_COSMOS_CONNECTION_STRING=$(azd env get-value AZURE_COSMOS_CONNECTION_STRING) 23 | AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT=$(azd env get-value AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT) 24 | AZURE_DOCUMENT_INTELLIGENCE_KEY=$(azd env get-value AZURE_DOCUMENT_INTELLIGENCE_KEY) 25 | APPLICATIONINSIGHTS_CONNECTION_STRING=$(azd env get-value APPLICATIONINSIGHTS_CONNECTION_STRING) 26 | AZURE_CONTAINER_REGISTRY_ENDPOINT=$(azd env get-value AZURE_CONTAINER_REGISTRY_ENDPOINT) 27 | AZURE_AI_FOUNDRY_CONNECTION_STRING=$(azd env get-value AZURE_AI_FOUNDRY_CONNECTION_STRING) 28 | " > .env 29 | 30 | $disableIngress = azd env get-value DISABLE_INGRESS 31 | if ($disableIngress -eq "true") { 32 | Write-Host "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" -ForegroundColor Red 33 | Write-Host "WARNING: Ingress is disabled. The application endpoint will NOT be reachable!" -ForegroundColor Red 34 | Write-Host "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" -ForegroundColor Red 35 | Write-Host "However, your local debugger should work fine." -ForegroundColor Yellow 36 | } 37 | -------------------------------------------------------------------------------- /utils/azd/hooks/postprovision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Generating .env file..." 3 | echo " 4 | AZURE_OPENAI_ENDPOINT=$(azd env get-value AZURE_OPENAI_ENDPOINT) 5 | AZURE_OPENAI_API_VERSION=$(azd env get-value AZURE_OPENAI_API_VERSION) 6 | AZURE_OPENAI_EMBEDDING_DEPLOYMENT=$(azd env get-value AZURE_OPENAI_EMBEDDING_DEPLOYMENT) 7 | AZURE_OPENAI_CHAT_DEPLOYMENT_ID=$(azd env get-value AZURE_OPENAI_CHAT_DEPLOYMENT_ID) 8 | AZURE_OPENAI_EMBEDDING_DIMENSIONS=$(azd env get-value AZURE_OPENAI_EMBEDDING_DIMENSIONS) 9 | AZURE_SEARCH_SERVICE_NAME=$(azd env get-value AZURE_SEARCH_SERVICE_NAME) 10 | AZURE_SEARCH_INDEX_NAME=$(azd env get-value AZURE_SEARCH_INDEX_NAME) 11 | AZURE_OPENAI_CHAT_DEPLOYMENT_01=$(azd env get-value AZURE_OPENAI_CHAT_DEPLOYMENT_01) 12 | AZURE_OPENAI_API_VERSION_01=$(azd env get-value AZURE_OPENAI_API_VERSION_01) 13 | AZURE_AI_SEARCH_SERVICE_ENDPOINT=$(azd env get-value AZURE_AI_SEARCH_SERVICE_ENDPOINT) 14 | AZURE_AI_SEARCH_ADMIN_KEY=$(azd env get-value AZURE_AI_SEARCH_ADMIN_KEY) 15 | AZURE_BLOB_CONTAINER_NAME=$(azd env get-value AZURE_BLOB_CONTAINER_NAME) 16 | AZURE_STORAGE_ACCOUNT_NAME=$(azd env get-value AZURE_STORAGE_ACCOUNT_NAME) 17 | AZURE_STORAGE_ACCOUNT_KEY=$(azd env get-value AZURE_STORAGE_ACCOUNT_KEY) 18 | AZURE_AI_SERVICES_KEY=$(azd env get-value AZURE_AI_SERVICES_KEY) 19 | AZURE_STORAGE_CONNECTION_STRING=$(azd env get-value AZURE_STORAGE_CONNECTION_STRING) 20 | AZURE_COSMOS_DB_DATABASE_NAME=$(azd env get-value AZURE_COSMOS_DB_DATABASE_NAME) 21 | AZURE_COSMOS_DB_COLLECTION_NAME=$(azd env get-value AZURE_COSMOS_DB_COLLECTION_NAME) 22 | AZURE_COSMOS_CONNECTION_STRING=$(azd env get-value AZURE_COSMOS_CONNECTION_STRING) 23 | AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT=$(azd env get-value AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT) 24 | AZURE_DOCUMENT_INTELLIGENCE_KEY=$(azd env get-value AZURE_DOCUMENT_INTELLIGENCE_KEY) 25 | APPLICATIONINSIGHTS_CONNECTION_STRING=$(azd env get-value APPLICATIONINSIGHTS_CONNECTION_STRING) 26 | AZURE_CONTAINER_REGISTRY_ENDPOINT=$(azd env get-value AZURE_CONTAINER_REGISTRY_ENDPOINT) 27 | AZURE_AI_FOUNDRY_CONNECTION_STRING=$(azd env get-value AZURE_AI_FOUNDRY_CONNECTION_STRING) 28 | " > .env 29 | 30 | disableIngress=$(azd env get-value DISABLE_INGRESS) 31 | if [ "$disableIngress" = "true" ]; then 32 | echo -e "\033[31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m" 33 | echo -e "\033[31mWARNING: Ingress is disabled. The application endpoint will NOT be reachable!\033[0m" 34 | echo -e "\033[31m!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\033[0m" 35 | echo -e "\033[33mHowever, your local debugger should work fine.\033[0m" 36 | fi 37 | -------------------------------------------------------------------------------- /utils/azd/hooks/preprovision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CURRENT_USER_CLIENT_ID=$(az ad signed-in-user show --query id -o tsv) 4 | GIT_HASH=$(git rev-parse --short HEAD) 5 | azd env set PRINCIPAL_ID $CURRENT_USER_CLIENT_ID 6 | azd env set GIT_HASH $GIT_HASH 7 | 8 | echo "=======================================" 9 | echo " Current User Client ID: $CURRENT_USER_CLIENT_ID" 10 | echo " Git Commit Hash: $GIT_HASH" 11 | echo "=======================================" 12 | 13 | # Check if ENABLE_EASY_AUTH and DISABLE_INGRESS are set 14 | ENABLE_EASY_AUTH=$(azd env get-value ENABLE_EASY_AUTH 2>/dev/null) || ENABLE_EASY_AUTH="" 15 | DISABLE_INGRESS=$(azd env get-value DISABLE_INGRESS 2>/dev/null) || DISABLE_INGRESS="" 16 | echo "=======================================" 17 | echo " ENABLE_EASY_AUTH: $ENABLE_EASY_AUTH" 18 | echo " DISABLE_INGRESS: $DISABLE_INGRESS" 19 | echo "=======================================" 20 | if [[ -z "$ENABLE_EASY_AUTH" || -z "$DISABLE_INGRESS" ]]; then 21 | while true; do 22 | read -p "Would you like to enable Easy Auth for your Container App? (y/n): " enable_easy_auth 23 | if [[ "$enable_easy_auth" =~ ^[Yy]$ ]]; then 24 | azd env set ENABLE_EASY_AUTH true 25 | azd env set DISABLE_INGRESS false 26 | break 27 | elif [[ "$enable_easy_auth" =~ ^[Nn]$ ]]; then 28 | echo "====================================================================" 29 | echo "⚠️ WARNING: You are deploying a publicly exposed frontend application" 30 | echo "without authentication. This poses significant security risks!" 31 | echo "====================================================================" 32 | while true; do 33 | read -p "Would you like to disable ingress by default to mitigate this risk? (y/n): " disable_ingress 34 | if [[ "$disable_ingress" =~ ^[Yy]$ ]]; then 35 | azd env set ENABLE_EASY_AUTH false 36 | azd env set DISABLE_INGRESS true 37 | break 38 | elif [[ "$disable_ingress" =~ ^[Nn]$ ]]; then 39 | break 40 | else 41 | echo "Please enter 'y' or 'n'." 42 | fi 43 | done 44 | break 45 | else 46 | echo "Please enter 'y' or 'n'." 47 | fi 48 | done 49 | elif [[ "$(azd env get-value ENABLE_EASY_AUTH)" == "true" ]]; then 50 | azd env set DISABLE_INGRESS false 51 | else 52 | current_easy_auth=$(azd env get-value ENABLE_EASY_AUTH) 53 | current_disable_ingress=$(azd env get-value DISABLE_INGRESS) 54 | echo "=======================================" 55 | echo "Environment variables already set:" 56 | echo " ENABLE_EASY_AUTH: $current_easy_auth" 57 | echo " DISABLE_INGRESS: $current_disable_ingress" 58 | echo "To change these settings, run:" 59 | echo " azd env set ENABLE_EASY_AUTH " 60 | echo " azd env set DISABLE_INGRESS " 61 | echo "=======================================" 62 | fi 63 | -------------------------------------------------------------------------------- /utils/data/cases/001/a/doctor_notes/01_a_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/a/doctor_notes/01_a_notes.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/a/imaging/01_a_imaging.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/a/imaging/01_a_imaging.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/a/labs/01_a_labs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/a/labs/01_a_labs.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/a/pa_form/01_a_form.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/a/pa_form/01_a_form.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/a/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "001_b", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "rejected", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/001/b/doctor_notes/01_b_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/b/doctor_notes/01_b_notes.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/b/imaging/01_b_imaging.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/b/imaging/01_b_imaging.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/b/labs/01_b_labs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/b/labs/01_b_labs.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/b/pa_form/01_b_form.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/001/b/pa_form/01_b_form.pdf -------------------------------------------------------------------------------- /utils/data/cases/001/b/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "001_b", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/002/a/doctor_notes/002_a (note).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/a/doctor_notes/002_a (note).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/a/imaging/002_a (imaging).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/a/imaging/002_a (imaging).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/a/labs/002_a (labs) .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/a/labs/002_a (labs) .pdf -------------------------------------------------------------------------------- /utils/data/cases/002/a/pa_form/002_a (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/a/pa_form/002_a (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/a/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/002/b/doctor_notes/002_b (note).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/b/doctor_notes/002_b (note).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/b/imaging/002_b (imaging).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/b/imaging/002_b (imaging).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/b/labs/002_b (labs).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/b/labs/002_b (labs).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/b/pa_form/002_b (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/002/b/pa_form/002_b (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/002/b/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/003/a/doctor_notes/003_a (note) .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/a/doctor_notes/003_a (note) .pdf -------------------------------------------------------------------------------- /utils/data/cases/003/a/labs/003_a (labs).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/a/labs/003_a (labs).pdf -------------------------------------------------------------------------------- /utils/data/cases/003/a/pa_form/003_a (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/a/pa_form/003_a (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/003/a/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/003/b/doctor_notes/003_b (note) .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/b/doctor_notes/003_b (note) .pdf -------------------------------------------------------------------------------- /utils/data/cases/003/b/labs/003_b (labs) .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/b/labs/003_b (labs) .pdf -------------------------------------------------------------------------------- /utils/data/cases/003/b/pa_form/003_b (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/003/b/pa_form/003_b (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/003/b/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/004/a/doctor_notes/004_a (note).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/004/a/doctor_notes/004_a (note).pdf -------------------------------------------------------------------------------- /utils/data/cases/004/a/pa_form/004_a (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/004/a/pa_form/004_a (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/004/a/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/004/b/doctor_notes/004_b (note).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/004/b/doctor_notes/004_b (note).pdf -------------------------------------------------------------------------------- /utils/data/cases/004/b/pa_form/004_b (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/004/b/pa_form/004_b (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/004/b/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/005/a/doctor_notes/005_a (note) .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/a/doctor_notes/005_a (note) .pdf -------------------------------------------------------------------------------- /utils/data/cases/005/a/imaging/005_a (imaging).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/a/imaging/005_a (imaging).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/a/labs/005_a (labs).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/a/labs/005_a (labs).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/a/pa_form/005_a (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/a/pa_form/005_a (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/a/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/005/b/doctor_notes/005_b (note).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/b/doctor_notes/005_b (note).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/b/imaging/005_b (imaging).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/b/imaging/005_b (imaging).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/b/labs/005_b (labs).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/b/labs/005_b (labs).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/b/pa_form/005_b (form).pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/005/b/pa_form/005_b (form).pdf -------------------------------------------------------------------------------- /utils/data/cases/005/b/results.json: -------------------------------------------------------------------------------- 1 | { 2 | "case_id": "002_a", 3 | "evaluation_time": "2023-10-01T12:00:00Z", 4 | "decision": "approved", 5 | "notes": "Manually evaluated by MD based on the policies." 6 | } 7 | -------------------------------------------------------------------------------- /utils/data/cases/policies/001.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/policies/001.pdf -------------------------------------------------------------------------------- /utils/data/cases/policies/002.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/policies/002.pdf -------------------------------------------------------------------------------- /utils/data/cases/policies/003.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/policies/003.pdf -------------------------------------------------------------------------------- /utils/data/cases/policies/004.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/policies/004.pdf -------------------------------------------------------------------------------- /utils/data/cases/policies/005.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/cases/policies/005.pdf -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_form/page_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_form/page_1.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_form/page_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_form/page_1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_form/page_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_form/page_2.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_form/page_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_form/page_2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_imaging/page_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_imaging/page_1.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_imaging/page_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_imaging/page_1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_1.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_2.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_3.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_labs/page_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_labs/page_3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_1.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_2.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_3.jpeg -------------------------------------------------------------------------------- /utils/data/pdfs/001_a/01_a_notes/page_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_a/01_a_notes/page_3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-10.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-11.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-12.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-13.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-14.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-15.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-16.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-17.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-17.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-18.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-4.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-5.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-6.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-7.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-8.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/001_inflammatory_Conditions-page-9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/001_inflammatory_Conditions-page-9.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_form-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_form-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_form-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_form-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_imaging-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_imaging-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_labs-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_labs-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_labs-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_labs-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_labs-page-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_labs-page-3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_notes-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_notes-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_notes-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_notes-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b/01_a_notes-page-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b/01_a_notes-page-3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_form-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_form-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_form-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_form-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_imaging-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_imaging-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_labs-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_labs-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_labs-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_labs-page-2.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_labs-page-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_labs-page-3.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_notes-page-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_notes-page-1.png -------------------------------------------------------------------------------- /utils/data/pdfs/001_b_rejected/01_b_notes-page-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/data/pdfs/001_b_rejected/01_b_notes-page-2.png -------------------------------------------------------------------------------- /utils/images/AI_HLS_AutoAuth.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/AI_HLS_AutoAuth.jpg -------------------------------------------------------------------------------- /utils/images/azure_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/azure_logo.png -------------------------------------------------------------------------------- /utils/images/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/diagram.png -------------------------------------------------------------------------------- /utils/images/diagram_latest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/diagram_latest.png -------------------------------------------------------------------------------- /utils/images/flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/flow.png -------------------------------------------------------------------------------- /utils/images/paworflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/paworflow.png -------------------------------------------------------------------------------- /utils/images/prior_auth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/prior_auth.png -------------------------------------------------------------------------------- /utils/images/prior_auth_flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/prior_auth_flow.png -------------------------------------------------------------------------------- /utils/images/vimeo_video.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/images/vimeo_video.png -------------------------------------------------------------------------------- /utils/scripts/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure-Samples/autoauth-solution-accelerator/83d5ccfbce9f17b8d2506ed411a63f813283d6f5/utils/scripts/README.md -------------------------------------------------------------------------------- /utils/scripts/create-application.sh: -------------------------------------------------------------------------------- 1 | # Create the Azure AD application. 2 | # Example web redirect uri: "https://mycontainer.somerevision.westus.azurecontainerapps.io/.auth/login/aad/callback" 3 | application=$(az ad app create --display-name $AzureADApplicationName \ 4 | --web-redirect-uris $AzureADApplicationRedirectUri \ 5 | --enable-id-token-issuance true \ 6 | --sign-in-audience AzureADMyOrg ) 7 | 8 | 9 | applicationObjectId=$(jq -r '.id' <<< "$application") 10 | applicationClientId=$(jq -r '.appId' <<< "$application") 11 | 12 | az ad app update --id $applicationObjectId --identifier-uris "api://$applicationClientId" 13 | 14 | # Generate a new GUID for the oauth2PermissionScope id 15 | newScopeId=$(uuidgen) 16 | jsonApiScopes=' 17 | { 18 | "oauth2PermissionScopes": [ 19 | { 20 | "adminConsentDescription": "Allow the application to access priorAuth-container-app on behalf of the signed-in user.", 21 | "adminConsentDisplayName": "Access priorAuth-container-app", 22 | "id": "'$newScopeId'", 23 | "isEnabled": true, 24 | "type": "User", 25 | "userConsentDescription": "Allow the application to access container-app on your behalf.", 26 | "userConsentDisplayName": "Access priorAuth-container-app", 27 | "value": "user_impersonation" 28 | } 29 | ] 30 | }' 31 | apiScopes=$(echo $jsonApiScopes | jq -c '.') 32 | 33 | # Add the oauth permission scope to the application. 34 | az ad app update --id $applicationClientId --set api=$apiScopes --verbose 35 | 36 | # Add User.Read permission to the application. 37 | az ad app permission add --id $applicationObjectId \ 38 | --api 00000003-0000-0000-c000-000000000000 \ 39 | --api-permissions e1fe6dd8-ba31-4d61-89e7-88639da4683d=Scope 40 | 41 | # # Grant admin consent for the required permissions. 42 | az ad app permission admin-consent --id $applicationObjectId 43 | 44 | # # Create a service principal for the application. 45 | # servicePrincipal=$(az ad sp create --id $applicationObjectId) 46 | # servicePrincipalObjectId=$(jq -r '.id' <<< "$servicePrincipal") 47 | 48 | # # Save the important properties as depoyment script outputs. 49 | outputJson=$(jq -n \ 50 | --arg applicationObjectId "$applicationObjectId" \ 51 | --arg applicationClientId "$applicationClientId" \ 52 | '{applicationObjectId: $applicationObjectId, applicationClientId: $applicationClientId}' ) 53 | # --arg servicePrincipalObjectId "$servicePrincipalObjectId" \ 54 | 55 | # Add the applicationObjectId and applicationClientId to azd environment 56 | azd env set AZURE_AD_APP_OBJECT_ID $applicationObjectId 57 | azd env set AZURE_AD_APP_CLIENT_ID $applicationClientId 58 | 59 | if [ -n "$AZ_SCRIPTS_OUTPUT_PATH" ]; then 60 | echo $outputJson > $AZ_SCRIPTS_OUTPUT_PATH 61 | else 62 | echo $outputJson 63 | fi 64 | --------------------------------------------------------------------------------