├── tests ├── __init__.py └── test_friendly_case.py ├── docs ├── source │ ├── modules.rst │ ├── index.rst │ └── conf.py ├── requirements.txt ├── Makefile └── make.bat ├── src └── sempy_labs │ ├── tom │ └── __init__.py │ ├── report │ ├── _bpareporttemplate │ │ ├── definition │ │ │ ├── version.json │ │ │ ├── pages │ │ │ │ ├── 92735ae19b31712208ad │ │ │ │ │ ├── page.json │ │ │ │ │ └── visuals │ │ │ │ │ │ └── 66e60dfb526437cd78d1 │ │ │ │ │ │ └── visual.json │ │ │ │ ├── d37dce724a0ccc30044b │ │ │ │ │ ├── page.json │ │ │ │ │ └── visuals │ │ │ │ │ │ └── ce8532a7e25020271077 │ │ │ │ │ │ └── visual.json │ │ │ │ ├── pages.json │ │ │ │ ├── 01d72098bda5055bd500 │ │ │ │ │ ├── page.json │ │ │ │ │ └── visuals │ │ │ │ │ │ ├── 3b1182230aa6c600b43a │ │ │ │ │ │ └── visual.json │ │ │ │ │ │ ├── b6a80ee459e716e170b1 │ │ │ │ │ │ └── visual.json │ │ │ │ │ │ └── 1b08bce3bebabb0a27a8 │ │ │ │ │ │ └── visual.json │ │ │ │ └── c597da16dc7e63222a82 │ │ │ │ │ ├── page.json │ │ │ │ │ └── visuals │ │ │ │ │ ├── 0c171de9d2683d10b930 │ │ │ │ │ └── visual.json │ │ │ │ │ ├── 88d8141cb8500b60030c │ │ │ │ │ └── visual.json │ │ │ │ │ └── b8fdc82cddd61ac447bc │ │ │ │ │ └── visual.json │ │ │ └── report.json │ │ ├── .platform │ │ ├── definition.pbir │ │ └── .pbi │ │ │ └── localSettings.json │ ├── __init__.py │ ├── _paginated.py │ ├── _download_report.py │ ├── _report_list_functions.py │ └── _report_bpa_rules.py │ ├── graph │ ├── __init__.py │ └── _teams.py │ ├── lakehouse │ ├── __init__.py │ └── _get_lakehouse_columns.py │ ├── directlake │ ├── _get_shared_expression.py │ ├── __init__.py │ ├── _get_directlake_lakehouse.py │ ├── _list_directlake_model_calc_tables.py │ ├── _guardrails.py │ ├── _generate_shared_expression.py │ └── _show_unsupported_directlake_objects.py │ ├── migration │ ├── __init__.py │ ├── _migration_validation.py │ └── _direct_lake_to_import.py │ ├── _utils.py │ ├── _mirrored_warehouses.py │ ├── _dashboards.py │ ├── _dax_query_view.py │ ├── admin │ ├── _artifacts.py │ ├── _git.py │ ├── _shared.py │ ├── _external_data_share.py │ ├── __init__.py │ ├── _apps.py │ ├── _scanner.py │ ├── _users.py │ └── _workspaces.py │ ├── _workspace_identity.py │ ├── _graphQL.py │ ├── _ml_models.py │ ├── _ml_experiments.py │ ├── _eventstreams.py │ ├── _kql_querysets.py │ ├── _icons.py │ ├── _mounted_data_factories.py │ ├── _kql_databases.py │ ├── _semantic_models.py │ ├── _workloads.py │ └── _data_pipelines.py ├── .vscode └── settings.json ├── environment.yml ├── CODE_OF_CONDUCT.md ├── .github ├── ISSUE_TEMPLATE │ ├── issue--question---advice-needed.md │ ├── feature_request.md │ └── bug_report.md └── workflows │ ├── codeql.yaml │ └── build.yaml ├── .readthedocs.yaml ├── LICENSE ├── SUPPORT.md ├── pyproject.toml ├── SECURITY.md ├── notebooks ├── Best Practice Analyzer Report.ipynb ├── Delta Analyzer.ipynb ├── Semantic Model Management.ipynb └── Semantic Model Refresh.ipynb └── .gitignore /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | sempy_labs 2 | ========== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | sempy_labs 8 | -------------------------------------------------------------------------------- /src/sempy_labs/tom/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.tom._model import TOMWrapper, connect_semantic_model 2 | 3 | __all__ = ["TOMWrapper", "connect_semantic_model"] 4 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": [ 3 | "tests" 4 | ], 5 | "python.testing.unittestEnabled": false, 6 | "python.testing.pytestEnabled": true 7 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/version.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/versionMetadata/1.0.0/schema.json", 3 | "version": "2.0.0" 4 | } -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: fabric 2 | dependencies: 3 | - flake8 4 | - mypy 5 | - pytest 6 | - pytest-cov 7 | - pytest-mock 8 | - pip: 9 | - semantic-link-sempy>=0.9.3 10 | - azure-identity==1.7.1 11 | - azure-storage-blob>=12.9.0 12 | - pandas-stubs 13 | - types-tqdm -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | semantic-link-sempy 2 | sphinx_rtd_theme 3 | pandas==2.0.3 4 | numpy<2.0.0 # 2.0.0 breaks the build 5 | # pyspark==3.5.0 6 | azure-identity==1.16.1 7 | azure-keyvault-secrets 8 | azure-storage-file-datalake==12.3.1 9 | azure-storage-blob>=12.9.0 10 | anytree 11 | IPython 12 | polib 13 | powerbiclient 14 | jsonpath_ng -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/page.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/page/1.1.0/schema.json", 3 | "name": "92735ae19b31712208ad", 4 | "displayName": "Rules", 5 | "displayOption": "FitToPage", 6 | "height": 720, 7 | "width": 1280 8 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/.platform: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/gitIntegration/platformProperties/2.0.0/schema.json", 3 | "metadata": { 4 | "type": "Report", 5 | "displayName": "BPAReport" 6 | }, 7 | "config": { 8 | "version": "2.0", 9 | "logicalId": "a201f2cd-fd25-465f-bfbc-33b151e38b31" 10 | } 11 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/page.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/page/1.1.0/schema.json", 3 | "name": "d37dce724a0ccc30044b", 4 | "displayName": "Rule Tooltip", 5 | "displayOption": "ActualSize", 6 | "height": 240, 7 | "width": 320, 8 | "visibility": "HiddenInViewMode" 9 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/pages.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/pagesMetadata/1.0.0/schema.json", 3 | "pageOrder": [ 4 | "01d72098bda5055bd500", 5 | "c597da16dc7e63222a82", 6 | "92735ae19b31712208ad", 7 | "d37dce724a0ccc30044b" 8 | ], 9 | "activePageName": "01d72098bda5055bd500" 10 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/page.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/page/1.1.0/schema.json", 3 | "name": "01d72098bda5055bd500", 4 | "displayName": "Main View", 5 | "displayOption": "FitToPage", 6 | "height": 720, 7 | "width": 1280, 8 | "filterConfig": { 9 | "filterSortOrder": "Custom" 10 | } 11 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/page.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/page/1.1.0/schema.json", 3 | "name": "c597da16dc7e63222a82", 4 | "displayName": "Model View", 5 | "displayOption": "FitToPage", 6 | "height": 720, 7 | "width": 1280, 8 | "filterConfig": { 9 | "filterSortOrder": "Custom" 10 | } 11 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition.pbir: -------------------------------------------------------------------------------- 1 | { 2 | "version": "4.0", 3 | "datasetReference": { 4 | "byPath": null, 5 | "byConnection": { 6 | "connectionString": null, 7 | "pbiServiceModelId": null, 8 | "pbiModelVirtualServerName": "sobe_wowvirtualserver", 9 | "pbiModelDatabaseName": null, 10 | "name": null, 11 | "connectionType": "pbiServiceXmlaStyleLive" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /tests/test_friendly_case.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import convert_to_friendly_case 2 | 3 | 4 | def test_convert_to_friendly_case(): 5 | 6 | assert convert_to_friendly_case('MyNewTable34') == 'My New Table34' 7 | 8 | assert convert_to_friendly_case('Testing_my_new function') == 'Testing My New Function' 9 | 10 | assert convert_to_friendly_case('testingMyNewFunction') == 'Testing My New Function' 11 | assert convert_to_friendly_case(None) is None 12 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. semantic-link-labs documentation master file, created by 2 | sphinx-quickstart on Thu Jun 6 15:52:49 2024. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to semantic-link-labs's documentation! 7 | ============================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | modules 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue--question---advice-needed.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 'Issue: Question / Advice needed' 3 | about: Ask a question about this project 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | **What are you trying to achieve?** 11 | Provide a brief background description of the problem you're trying to solve. Include information about the semantic engine you're working on (Power BI Desktop, Azure Analysis Services, etc.), as not everything is possible across engines. 12 | 13 | **What have you tried so far?** 14 | If applicable, describe the steps you have tried so far. For scripting-related questions, show the code that you have written. 15 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [main] 4 | pull_request: 5 | branches: [main] 6 | schedule: 7 | - cron: '20 14 * * 1' 8 | 9 | permissions: 10 | actions: read 11 | contents: read 12 | security-events: write # To upload sarif files 13 | 14 | jobs: 15 | analyze: 16 | name: Analyze 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - uses: actions/checkout@v3 21 | 22 | - name: Initialize CodeQL 23 | uses: github/codeql-action/init@v3 24 | with: 25 | languages: python 26 | 27 | - name: Perform CodeQL Analysis 28 | uses: github/codeql-action/analyze@v3 29 | 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /src/sempy_labs/graph/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.graph._groups import ( 2 | list_groups, 3 | list_group_owners, 4 | list_group_members, 5 | add_group_members, 6 | add_group_owners, 7 | resolve_group_id, 8 | renew_group, 9 | ) 10 | from sempy_labs.graph._users import ( 11 | resolve_user_id, 12 | get_user, 13 | list_users, 14 | send_mail, 15 | ) 16 | from sempy_labs.graph._teams import ( 17 | list_teams, 18 | ) 19 | 20 | __all__ = [ 21 | "list_groups", 22 | "list_group_owners", 23 | "list_group_members", 24 | "add_group_members", 25 | "add_group_owners", 26 | "renew_group", 27 | "resolve_group_id", 28 | "resolve_user_id", 29 | "get_user", 30 | "list_users", 31 | "send_mail", 32 | "list_teams", 33 | ] 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.12" 13 | # You can also specify other tool versions: 14 | # nodejs: "19" 15 | # rust: "1.64" 16 | # golang: "1.19" 17 | jobs: 18 | pre_build: 19 | - sphinx-apidoc -f -o docs/source src/sempy_labs/ 20 | 21 | # Build documentation in the "docs/" directory with Sphinx 22 | sphinx: 23 | configuration: docs/source/conf.py 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 31 | python: 32 | install: 33 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /src/sempy_labs/lakehouse/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.lakehouse._get_lakehouse_columns import ( 2 | get_lakehouse_columns, 3 | ) 4 | from sempy_labs.lakehouse._get_lakehouse_tables import ( 5 | get_lakehouse_tables, 6 | ) 7 | from sempy_labs.lakehouse._lakehouse import ( 8 | lakehouse_attached, 9 | optimize_lakehouse_tables, 10 | vacuum_lakehouse_tables, 11 | run_table_maintenance, 12 | ) 13 | from sempy_labs.lakehouse._shortcuts import ( 14 | # create_shortcut, 15 | create_shortcut_onelake, 16 | delete_shortcut, 17 | reset_shortcut_cache, 18 | list_shortcuts, 19 | ) 20 | from sempy_labs.lakehouse._blobs import ( 21 | recover_lakehouse_object, 22 | list_blobs, 23 | ) 24 | 25 | __all__ = [ 26 | "get_lakehouse_columns", 27 | "get_lakehouse_tables", 28 | "lakehouse_attached", 29 | "optimize_lakehouse_tables", 30 | # create_shortcut, 31 | "create_shortcut_onelake", 32 | "delete_shortcut", 33 | "vacuum_lakehouse_tables", 34 | "reset_shortcut_cache", 35 | "run_table_maintenance", 36 | "list_shortcuts", 37 | "recover_lakehouse_object", 38 | "list_blobs", 39 | ] 40 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/0c171de9d2683d10b930/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "0c171de9d2683d10b930", 4 | "position": { 5 | "x": 25, 6 | "y": 120, 7 | "z": 6003, 8 | "height": 53.75, 9 | "width": 562.5, 10 | "tabOrder": 6003 11 | }, 12 | "visual": { 13 | "visualType": "textbox", 14 | "objects": { 15 | "general": [ 16 | { 17 | "properties": { 18 | "paragraphs": [ 19 | { 20 | "textRuns": [ 21 | { 22 | "value": "Select a model within a workspace ---------------------------->", 23 | "textStyle": { 24 | "fontWeight": "bold", 25 | "fontSize": "14pt" 26 | } 27 | } 28 | ] 29 | } 30 | ] 31 | } 32 | } 33 | ] 34 | }, 35 | "drillFilterOtherVisuals": true 36 | } 37 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/d37dce724a0ccc30044b/visuals/ce8532a7e25020271077/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "ce8532a7e25020271077", 4 | "position": { 5 | "x": 10, 6 | "y": 0, 7 | "z": 0, 8 | "height": 220, 9 | "width": 280, 10 | "tabOrder": 0 11 | }, 12 | "visual": { 13 | "visualType": "tableEx", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Description" 27 | } 28 | }, 29 | "queryRef": "BPAResults.Description", 30 | "nativeQueryRef": "Description" 31 | } 32 | ] 33 | } 34 | } 35 | }, 36 | "drillFilterOtherVisuals": true 37 | } 38 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_get_shared_expression.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from uuid import UUID 3 | 4 | 5 | def get_shared_expression( 6 | lakehouse: Optional[str] = None, workspace: Optional[str | UUID] = None 7 | ) -> str: 8 | """ 9 | Dynamically generates the M expression used by a Direct Lake model for a given lakehouse. 10 | 11 | Parameters 12 | ---------- 13 | lakehouse : str, default=None 14 | The Fabric lakehouse used by the Direct Lake semantic model. 15 | Defaults to None which resolves to the lakehouse attached to the notebook. 16 | workspace : str | uuid.UUID, default=None 17 | The Fabric workspace name or ID used by the lakehouse. 18 | Defaults to None which resolves to the workspace of the attached lakehouse 19 | or if no lakehouse attached, resolves to the workspace of the notebook. 20 | 21 | Returns 22 | ------- 23 | str 24 | Shows the expression which can be used to connect a Direct Lake semantic model to its SQL Endpoint. 25 | """ 26 | from sempy_labs.directlake._generate_shared_expression import ( 27 | generate_shared_expression, 28 | ) 29 | 30 | return generate_shared_expression( 31 | item_name=lakehouse, item_type="Lakehouse", workspace=workspace 32 | ) 33 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). CSS will work with/help you to determine next steps. 7 | - **Not sure?** Fill out an intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /src/sempy_labs/migration/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.migration._create_pqt_file import create_pqt_file 2 | from sempy_labs.migration._migrate_calctables_to_lakehouse import ( 3 | migrate_calc_tables_to_lakehouse, 4 | migrate_field_parameters, 5 | ) 6 | from sempy_labs.migration._migrate_calctables_to_semantic_model import ( 7 | migrate_calc_tables_to_semantic_model, 8 | ) 9 | from sempy_labs.migration._migrate_model_objects_to_semantic_model import ( 10 | migrate_model_objects_to_semantic_model, 11 | ) 12 | from sempy_labs.migration._migrate_tables_columns_to_semantic_model import ( 13 | migrate_tables_columns_to_semantic_model, 14 | ) 15 | from sempy_labs.migration._migration_validation import ( 16 | migration_validation, 17 | ) 18 | from sempy_labs.migration._refresh_calc_tables import ( 19 | refresh_calc_tables, 20 | ) 21 | from sempy_labs.migration._direct_lake_to_import import ( 22 | migrate_direct_lake_to_import, 23 | ) 24 | 25 | __all__ = [ 26 | "create_pqt_file", 27 | "migrate_calc_tables_to_lakehouse", 28 | "migrate_field_parameters", 29 | "migrate_calc_tables_to_semantic_model", 30 | "migrate_model_objects_to_semantic_model", 31 | "migrate_tables_columns_to_semantic_model", 32 | "migration_validation", 33 | "refresh_calc_tables", 34 | "migrate_direct_lake_to_import", 35 | ] 36 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "setuptools-scm"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name="semantic-link-labs" 7 | authors = [ 8 | { name = "Microsoft Corporation" }, 9 | ] 10 | version="0.9.9" 11 | description="Semantic Link Labs for Microsoft Fabric" 12 | readme="README.md" 13 | requires-python=">=3.10,<3.12" 14 | classifiers = [ 15 | "Development Status :: 3 - Alpha", 16 | "Intended Audience :: Developers", 17 | "Intended Audience :: Education", 18 | "Intended Audience :: Science/Research", 19 | "Programming Language :: Python :: 3.10", 20 | "Programming Language :: Python :: 3 :: Only", 21 | "Framework :: Jupyter" 22 | ] 23 | license= { text = "MIT License" } 24 | 25 | dependencies = [ 26 | "semantic-link-sempy>=0.9.3", 27 | "anytree", 28 | "powerbiclient", 29 | "polib", 30 | "jsonpath_ng", 31 | ] 32 | 33 | [tool.setuptools.packages.find] 34 | where = ["src"] 35 | 36 | [tool.setuptools.package-data] 37 | "*" = ["*.*"] 38 | 39 | [project.optional-dependencies] 40 | test = [ 41 | "pytest>=8.2.1", 42 | ] 43 | 44 | [project.urls] 45 | Repository = "https://github.com/microsoft/semantic-link-labs.git" 46 | 47 | [[tool.mypy.overrides]] 48 | module = "sempy.*,Microsoft.*,System.*,anytree.*,powerbiclient.*,synapse.ml.services.*,polib.*,jsonpath_ng.*" 49 | ignore_missing_imports = true 50 | 51 | [tool.flake8] 52 | max-line-length = 200 -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/.pbi/localSettings.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "remoteArtifacts": [ 4 | { 5 | "reportId": "6a91c344-dba8-4ebf-bedb-e07134f2a204" 6 | } 7 | ], 8 | "securityBindingsSignature": "AQAAANCMnd8BFdERjHoAwE/Cl+sBAAAAMAVu7l76YU6Sl11KOiJdgQAAAAACAAAAAAAQZgAAAAEAACAAAAD1Ty+c7tZLT9/Sjguxhn/5ivfLWfUMNtgudyJ3BKzzZgAAAAAOgAAAAAIAACAAAABAyGf+iKMwvmNtcoRczjgebeIm0nlc9SFYTBYv3N7yvVADAADQN3JsIsvJUcKKc9WMo2EhiE21odezpd35tb+yudHwA/RYhypMA3fwiCTwArLefBZQ3vZ7KYh4MjihXS07i9o1XVqxAmDoli83Yhs/Wei+0HIfYOT5HOVHLUEul5x41Yx/7Bdfhc881SK6IoaJogBdwsiJVxPne+niMYqJQA6qLEPyJ33g6ucUxLA40lwdbN2cMWFzRn6tymmicDPwH0hcGPDMWwseAU+OuUeidkneRWhUGs6lkiiXLiO6kmY5RKq+S4FdtR19/e1B6EjAd94zSw+M5jQzYxn4eCZzWYiB+8Zd/jy07lfyLoGwagNqiQzbcNONqQd5w0n+8/+n4zGkBi2UojfRXoGaYDirQeZMTbt3pfPx2PArxsJ8dF0iT634pHiCF1ZFdtY+79JaFLUUG+Yf7JJv8IxuuuF74tAp4NYmuOij4hTDaf8Jafa5IoRVh7ICkwrjJyVQ8dG7I3tr0VvR+toBPG3Zlbm9BijcaBxhh1AINhnRAIkENOnPFQVH7l3Ml7B60H8Tst6ic3ihCCMYjtmN+NNWqFrJKT2trilh5TAxN+ei4H5fPwM9S7zb2bH5jhExcYTtoe7iCzxOvBsoYoFM+7FMjn9R2FATNICktYdbKDo1Of+u4oZ1+RsvBHQBVaMhSCoZ7+K5T5pZayNK3V2UID3wOuLOYvouxXXr4NVFsdgiV2oMuxTWeqmd/4bLxeqe3uTkGFmQU4mumF2YVsNbdO3IcRXhhrCCZ27ffzXBsH+lE3EhusD37Z0dsVbVVlG8AHXCh7Atgd8n73/eSI5mvj36DCOSRBVauItIATIa2FXueKA7vU6lRDYBSX8FCC2qkeN6dWpMoN5uXXEBsb5Yot1Fgrovcyl5lk7rh772Xon4FaIYFHZpklsY3JK5EXp3bF8UOE6ByN1ZucmkGgYRcTT/up/Uc86TLN6env9XXL4FQYPlReiOGWKBLVi9OoXGRLDshspniULtV3EwQ6WsjF2AyQ+WdLj3bbWKzG5Mg9jvANLrjycZAGWskh4X5JDGiv4TiJmnYQ/xPZAKKiowpVIHikLeG76uXFI+bxtpihV9+DaEJy4UxisHQxwuvUsQs38u3SHgpJmT8CNssZl41+T/IJdoQwJFLUAAAACnUQZGV9DvcOyrj8HBpXBVB5PuOQDxLB4HZOevHqCB5dc5z787E93B51QmN7I15fF6GCdWwN5f94gv1er2dtN3" 9 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.report._save_report import ( 2 | save_report_as_pbip, 3 | ) 4 | from sempy_labs.report._reportwrapper import ( 5 | ReportWrapper, 6 | ) 7 | from sempy_labs.report._paginated import ( 8 | get_report_datasources, 9 | ) 10 | from sempy_labs.report._generate_report import ( 11 | create_report_from_reportjson, 12 | get_report_definition, 13 | update_report_from_reportjson, 14 | create_model_bpa_report, 15 | ) 16 | from sempy_labs.report._download_report import download_report 17 | from sempy_labs.report._report_functions import ( 18 | get_report_json, 19 | # report_dependency_tree, 20 | clone_report, 21 | launch_report, 22 | # translate_report_titles 23 | ) 24 | from sempy_labs.report._report_rebind import ( 25 | report_rebind, 26 | report_rebind_all, 27 | ) 28 | from sempy_labs.report._report_bpa_rules import report_bpa_rules 29 | from sempy_labs.report._report_bpa import run_report_bpa 30 | from sempy_labs.report._export_report import ( 31 | export_report, 32 | ) 33 | 34 | __all__ = [ 35 | "create_report_from_reportjson", 36 | "update_report_from_reportjson", 37 | "get_report_json", 38 | # report_dependency_tree, 39 | "export_report", 40 | "clone_report", 41 | "launch_report", 42 | # translate_report_titles, 43 | "report_rebind", 44 | "report_rebind_all", 45 | "get_report_definition", 46 | "create_model_bpa_report", 47 | "ReportWrapper", 48 | "report_bpa_rules", 49 | "run_report_bpa", 50 | "get_report_datasources", 51 | "download_report", 52 | "save_report_as_pbip", 53 | ] 54 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | sys.path.insert(0, os.path.abspath('../../src/')) 4 | 5 | # Configuration file for the Sphinx documentation builder. 6 | # 7 | # For the full list of built-in configuration values, see the documentation: 8 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 9 | 10 | # -- Project information ----------------------------------------------------- 11 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 12 | 13 | project = 'semantic-link-labs' 14 | copyright = '2024, Microsoft and community' 15 | author = 'Microsoft and community' 16 | release = '0.9.9' 17 | 18 | # -- General configuration --------------------------------------------------- 19 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 20 | 21 | extensions = [ 22 | 'sphinx.ext.autodoc', 23 | 'sphinx.ext.napoleon', 24 | "sphinx.ext.intersphinx", 25 | ] 26 | 27 | intersphinx_mapping = { 28 | 'python': ('http://docs.python.org/', None), 29 | 'numpy': ('https://numpy.org/doc/stable/', None), 30 | 'pandas': ('http://pandas.pydata.org/pandas-docs/dev', None) 31 | } 32 | 33 | templates_path = ['_templates'] 34 | exclude_patterns = [] 35 | 36 | 37 | # -- Options for HTML output ------------------------------------------------- 38 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 39 | 40 | html_theme = 'sphinx_rtd_theme' 41 | html_static_path = ['_static'] 42 | 43 | # List of packages we don't want to install in the environment 44 | autodoc_mock_imports = ['delta', 'synapse', 'jwt', 'semantic-link-sempy', 'pyspark', 'powerbiclient'] 45 | 46 | napoleon_numpy_docstring = True -------------------------------------------------------------------------------- /src/sempy_labs/_utils.py: -------------------------------------------------------------------------------- 1 | item_types = { 2 | "Dashboard": ["Dashboard", "dashboards"], 3 | "DataPipeline": ["Data Pipeline", "dataPipelines", "pipeline-content.json"], 4 | "Datamart": ["Datamart", "datamarts"], 5 | "Environment": ["Environment", "environments"], 6 | "Eventhouse": ["Eventhouse", "eventhouses", "EventhouseProperties.json"], 7 | "Eventstream": ["Eventstream", "eventstreams", "eventstream.json"], 8 | "GraphQLApi": ["GraphQL Api", "GraphQLApis"], 9 | "KQLDashboard": ["KQL Dashboard", "kqlDashboards", "RealTimeDashboard.json"], 10 | "KQLDatabase": [ 11 | "KQL Database", 12 | "kqlDatabases", 13 | ], # "DatabaseProperties.json", "DatabaseSchema.kql" 14 | "KQLQueryset": ["KQL Queryset", "kqlQuerysets", "RealTimeQueryset.json"], 15 | "Lakehouse": ["Lakehouse", "lakehouses"], 16 | "MLExperiment": ["ML Experiment", "mlExperiments"], 17 | "MLModel": ["ML Model", "mlModels"], 18 | "MirroredDatabase": [ 19 | "Mirrored Database", 20 | "mirroredDatabases", 21 | "mirroredDatabase.json", 22 | ], 23 | "MirroredWarehouse": ["Mirrored Warehouse", "mirroredWarehouses"], 24 | "MountedDataFactory": [ 25 | "Mounted Data Factory", 26 | "mountedDataFactories", 27 | "mountedDataFactory-content.json", 28 | ], 29 | "Notebook": ["Notebook", "notebooks"], 30 | "PaginatedReport": ["Paginated Report", "paginatedReports"], 31 | "Reflex": ["Reflex", "reflexes", "ReflexEntities.json"], 32 | "Report": ["Report", "reports", "report.json"], 33 | "SQLDatabase": ["SQL Database", "sqlDatabases"], 34 | "SQLEndpoint": ["SQL Endpoint", "sqlEndpoints"], 35 | "SemanticModel": ["Semantic Model", "semanticModels", "model.bim"], 36 | "SparkJobDefinition": [ 37 | "Spark Job Definition", 38 | "sparkJobDefinitions", 39 | "SparkJobDefinitionV1.json", 40 | ], 41 | "Warehouse": ["Warehouse", "warehouses"], 42 | } 43 | -------------------------------------------------------------------------------- /src/sempy_labs/_mirrored_warehouses.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _base_api, 6 | _create_dataframe, 7 | ) 8 | from uuid import UUID 9 | 10 | 11 | def list_mirrored_warehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 12 | """ 13 | Shows the mirrored warehouses within a workspace. 14 | 15 | This is a wrapper function for the following API: `Items - List Mirrored Warehouses `_. 16 | 17 | Parameters 18 | ---------- 19 | workspace : str | uuid.UUID, default=None 20 | The Fabric workspace name or ID. 21 | Defaults to None which resolves to the workspace of the attached lakehouse 22 | or if no lakehouse attached, resolves to the workspace of the notebook. 23 | 24 | Returns 25 | ------- 26 | pandas.DataFrame 27 | A pandas dataframe showing the mirrored warehouses within a workspace. 28 | """ 29 | 30 | columns = { 31 | "Mirrored Warehouse Name": "string", 32 | "Mirrored Warehouse Id": "string", 33 | "Description": "string", 34 | } 35 | df = _create_dataframe(columns=columns) 36 | 37 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 38 | responses = _base_api( 39 | request=f"/v1/workspaces/{workspace_id}/mirroredWarehouses", 40 | status_codes=200, 41 | uses_pagination=True, 42 | ) 43 | 44 | for r in responses: 45 | for v in r.get("value", []): 46 | new_data = { 47 | "Mirrored Warehouse Name": v.get("displayName"), 48 | "Mirrored Warehouse Id": v.get("id"), 49 | "Description": v.get("description"), 50 | } 51 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 52 | 53 | return df 54 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Semantic Link Labs 2 | 3 | on: [push] 4 | 5 | permissions: 6 | contents: write # This is required for actions/checkout@v1 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | - name: Set up Python 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: "3.10" 18 | 19 | - name: Setup dotnet 20 | uses: actions/setup-dotnet@v4 21 | with: 22 | dotnet-version: | 23 | 8.0.x 24 | 25 | - name: Get Date 26 | id: get-date 27 | run: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT 28 | shell: bash 29 | 30 | - name: Cache conda 31 | uses: actions/cache@v3 32 | env: 33 | # Increase this value to reset cache if environment.yml has not changed 34 | CACHE_NUMBER: 0 35 | with: 36 | path: ~/conda_pkgs_dir 37 | key: 38 | ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ steps.get-date.outputs.today }}-${{ hashFiles('environment.yml') }} 39 | 40 | - uses: conda-incubator/setup-miniconda@v3 41 | with: 42 | auto-update-conda: true 43 | activate-environment: fabric 44 | python-version: "3.10" 45 | environment-file: environment.yml 46 | channel-priority: strict 47 | 48 | - name: Install package 49 | shell: bash -el {0} 50 | run: | 51 | conda info 52 | pip install -e . 53 | 54 | # - name: Lint with flake8 55 | # shell: bash -el {0} 56 | # run: | 57 | # flake8 sempy_labs tests --count --show-source --statistics 58 | # continue-on-error: false 59 | 60 | # - name: Lint with mypy 61 | # shell: bash -el {0} 62 | # run: | 63 | # mypy sempy_labs tests 64 | # continue-on-error: false 65 | 66 | - name: Test with pytest 67 | shell: bash -el {0} 68 | run: | 69 | pytest -s tests/ -------------------------------------------------------------------------------- /src/sempy_labs/_dashboards.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from uuid import UUID 3 | import pandas as pd 4 | from sempy_labs._helper_functions import ( 5 | _create_dataframe, 6 | _base_api, 7 | resolve_workspace_name_and_id, 8 | _update_dataframe_datatypes, 9 | ) 10 | 11 | 12 | def list_dashboards(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 13 | """ 14 | Shows a list of the dashboards within a workspace. 15 | 16 | Parameters 17 | ---------- 18 | workspace : str | uuid.UUID, default=None 19 | The Fabric workspace name or ID. 20 | Defaults to None which resolves to the workspace of the attached lakehouse 21 | or if no lakehouse attached, resolves to the workspace of the notebook. 22 | 23 | Returns 24 | ------- 25 | pandas.DataFrame 26 | A pandas dataframe showing the dashboards within a workspace. 27 | """ 28 | 29 | columns = { 30 | "Dashboard ID": "string", 31 | "Dashboard Name": "string", 32 | "Read Only": "bool", 33 | "Web URL": "string", 34 | "Embed URL": "string", 35 | "Data Classification": "string", 36 | "Users": "string", 37 | "Subscriptions": "string", 38 | } 39 | df = _create_dataframe(columns=columns) 40 | 41 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 42 | 43 | response = _base_api(request=f"/v1.0/myorg/groups/{workspace_id}/dashboards") 44 | 45 | for v in response.json().get("value", []): 46 | new_data = { 47 | "Dashboard ID": v.get("id"), 48 | "Dashboard Name": v.get("displayName"), 49 | "Read Only": v.get("isReadOnly"), 50 | "Web URL": v.get("webUrl"), 51 | "Embed URL": v.get("embedUrl"), 52 | "Data Classification": v.get("dataClassification"), 53 | "Users": v.get("users"), 54 | "Subscriptions": v.get("subscriptions"), 55 | } 56 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 57 | 58 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 59 | 60 | return df 61 | -------------------------------------------------------------------------------- /src/sempy_labs/_dax_query_view.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from uuid import UUID 3 | from sempy_labs._helper_functions import ( 4 | resolve_dataset_id, 5 | _get_fabric_context_setting, 6 | resolve_workspace_id, 7 | ) 8 | from sempy._utils._log import log 9 | import gzip 10 | import base64 11 | import urllib.parse 12 | 13 | 14 | @log 15 | def generate_dax_query_view_url( 16 | dataset: str | UUID, dax_string: str, workspace: Optional[str | UUID] = None 17 | ): 18 | """ 19 | Prints a URL based on query provided. This URL opens `DAX query view `_ in the Power BI service, connected to the semantic model and using the query provided. 20 | 21 | Parameters 22 | ---------- 23 | dataset : str | uuid.UUID 24 | The semantic model name or ID. 25 | dax_string : str 26 | The DAX query string. 27 | workspace : str | uuid.UUID, default=None 28 | The workspace name or ID. 29 | Defaults to None which resolves to the workspace of the attached lakehouse 30 | or if no lakehouse attached, resolves to the workspace of the notebook. 31 | """ 32 | 33 | workspace_id = resolve_workspace_id(workspace=workspace) 34 | dataset_id = resolve_dataset_id(dataset=dataset, workspace=workspace_id) 35 | 36 | prefix = _get_fabric_context_setting(name="spark.trident.pbienv").lower() 37 | 38 | if prefix == "prod": 39 | prefix = "app" 40 | 41 | def gzip_base64_urlsafe(input_string): 42 | # Compress the string with gzip 43 | compressed_data = gzip.compress(input_string.encode("utf-8")) 44 | 45 | # Encode the compressed data in base64 46 | base64_data = base64.b64encode(compressed_data) 47 | 48 | # Make the base64 string URL-safe 49 | urlsafe_data = urllib.parse.quote_plus(base64_data.decode("utf-8")) 50 | 51 | return urlsafe_data 52 | 53 | formatted_query = gzip_base64_urlsafe(dax_string) 54 | 55 | url = f"https://{prefix}.powerbi.com/groups/{workspace_id}/modeling/{dataset_id}/daxQueryView?query={formatted_query}" 56 | 57 | print(url) 58 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.directlake._generate_shared_expression import generate_shared_expression 2 | from sempy_labs.directlake._directlake_schema_compare import direct_lake_schema_compare 3 | from sempy_labs.directlake._directlake_schema_sync import direct_lake_schema_sync 4 | from sempy_labs.directlake._dl_helper import ( 5 | check_fallback_reason, 6 | generate_direct_lake_semantic_model, 7 | get_direct_lake_source, 8 | ) 9 | from sempy_labs.directlake._get_directlake_lakehouse import get_direct_lake_lakehouse 10 | from sempy_labs.directlake._get_shared_expression import get_shared_expression 11 | from sempy_labs.directlake._guardrails import ( 12 | get_direct_lake_guardrails, 13 | get_sku_size, 14 | get_directlake_guardrails_for_sku, 15 | ) 16 | from sempy_labs.directlake._list_directlake_model_calc_tables import ( 17 | list_direct_lake_model_calc_tables, 18 | ) 19 | from sempy_labs.directlake._show_unsupported_directlake_objects import ( 20 | show_unsupported_direct_lake_objects, 21 | ) 22 | from sempy_labs.directlake._update_directlake_model_lakehouse_connection import ( 23 | update_direct_lake_model_lakehouse_connection, 24 | update_direct_lake_model_connection, 25 | ) 26 | from sempy_labs.directlake._update_directlake_partition_entity import ( 27 | update_direct_lake_partition_entity, 28 | add_table_to_direct_lake_semantic_model, 29 | ) 30 | from sempy_labs.directlake._warm_cache import ( 31 | warm_direct_lake_cache_isresident, 32 | warm_direct_lake_cache_perspective, 33 | ) 34 | 35 | __all__ = [ 36 | "generate_shared_expression", 37 | "direct_lake_schema_compare", 38 | "direct_lake_schema_sync", 39 | "check_fallback_reason", 40 | "get_direct_lake_lakehouse", 41 | "get_shared_expression", 42 | "get_direct_lake_guardrails", 43 | "get_sku_size", 44 | "get_directlake_guardrails_for_sku", 45 | "list_direct_lake_model_calc_tables", 46 | "show_unsupported_direct_lake_objects", 47 | "update_direct_lake_model_lakehouse_connection", 48 | "update_direct_lake_partition_entity", 49 | "warm_direct_lake_cache_isresident", 50 | "warm_direct_lake_cache_perspective", 51 | "add_table_to_direct_lake_semantic_model", 52 | "generate_direct_lake_semantic_model", 53 | "get_direct_lake_source", 54 | "update_direct_lake_model_connection", 55 | ] 56 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_paginated.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | import pandas as pd 3 | from uuid import UUID 4 | from sempy_labs._helper_functions import ( 5 | resolve_workspace_name_and_id, 6 | _base_api, 7 | resolve_item_id, 8 | _create_dataframe, 9 | ) 10 | 11 | 12 | def get_report_datasources( 13 | report: str | UUID, 14 | workspace: Optional[str | UUID] = None, 15 | ) -> pd.DataFrame: 16 | """ 17 | Returns a list of data sources for the specified paginated report (RDL) from the specified workspace. 18 | 19 | Parameters 20 | ---------- 21 | report : str | uuid.UUID 22 | Name or ID of the Power BI report. 23 | workspace : str | uuid.UUID, default=None 24 | The name or ID of the Fabric workspace in which the report resides. 25 | Defaults to None which resolves to the workspace of the attached lakehouse 26 | or if no lakehouse attached, resolves to the workspace of the notebook. 27 | 28 | Returns 29 | ------- 30 | pandas.DataFrame 31 | A pandas dataframe showing a list of data sources for the specified paginated report (RDL) from the specified workspace. 32 | """ 33 | 34 | columns = { 35 | "Report Name": "str", 36 | "Report Id": "str", 37 | "Datasource Id": "str", 38 | "Datasource Type": "str", 39 | "Gateway Id": "str", 40 | "Server": "str", 41 | "Database": "str", 42 | } 43 | df = _create_dataframe(columns=columns) 44 | 45 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 46 | report_id = resolve_item_id( 47 | item=report, type="PaginatedReport", workspace=workspace 48 | ) 49 | 50 | response = _base_api( 51 | request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/datasources" 52 | ) 53 | 54 | for i in response.json().get("value", []): 55 | conn = i.get("connectionDetails", {}) 56 | new_data = { 57 | "Report Name": report, 58 | "Report Id": report_id, 59 | "Datasource Id": i.get("datasourceId"), 60 | "Datasource Type": i.get("datasourceType"), 61 | "Gateway Id": i.get("gatewayId"), 62 | "Server": conn.get("server") if conn else None, 63 | "Database": conn.get("database") if conn else None, 64 | } 65 | 66 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 67 | 68 | return df 69 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_artifacts.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sempy_labs._helper_functions import ( 3 | _base_api, 4 | ) 5 | from uuid import UUID 6 | from typing import Optional 7 | from sempy_labs.admin._basic_functions import ( 8 | _resolve_workspace_name_and_id, 9 | _create_dataframe, 10 | _update_dataframe_datatypes, 11 | ) 12 | 13 | 14 | def list_unused_artifacts(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 15 | """ 16 | Returns a list of datasets, reports, and dashboards that have not been used within 30 days for the specified workspace. 17 | 18 | This is a wrapper function for the following API: `Admin - Groups GetUnusedArtifactsAsAdmin `_. 19 | 20 | Service Principal Authentication is supported (see `here `_ for examples). 21 | 22 | Returns 23 | ------- 24 | pandas.DataFrame 25 | A pandas dataframe showing a list of datasets, reports, and dashboards that have not been used within 30 days for the specified workspace. 26 | """ 27 | 28 | (workspace_name, workspace_id) = _resolve_workspace_name_and_id(workspace) 29 | 30 | columns = { 31 | "Artifact Name": "string", 32 | "Artifact Id": "string", 33 | "Artifact Type": "string", 34 | "Artifact Size in MB": "int", 35 | "Created Date Time": "datetime", 36 | "Last Accessed Date Time": "datetime", 37 | } 38 | 39 | df = _create_dataframe(columns=columns) 40 | 41 | responses = _base_api( 42 | request=f"/v1.0/myorg/admin/groups/{workspace_id}/unused", 43 | client="fabric_sp", 44 | uses_pagination=True, 45 | ) 46 | 47 | for r in responses: 48 | for i in r.get("unusedArtifactEntities", []): 49 | new_data = { 50 | "Artifact Name": i.get("artifactId"), 51 | "Artifact Id": i.get("displayName"), 52 | "Artifact Type": i.get("artifactType"), 53 | "Artifact Size in MB": i.get("artifactSizeInMB"), 54 | "Created Date Time": i.get("createdDateTime"), 55 | "Last Accessed Date Time": i.get("lastAccessedDateTime"), 56 | } 57 | 58 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 59 | 60 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 61 | 62 | return df 63 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_git.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import ( 2 | _base_api, 3 | _create_dataframe, 4 | ) 5 | import pandas as pd 6 | from sempy_labs.admin._basic_functions import list_workspaces 7 | 8 | 9 | def list_git_connections() -> pd.DataFrame: 10 | """ 11 | Shows a list of Git connections. 12 | 13 | This is a wrapper function for the following API: `Workspaces - List Git Connections `_. 14 | 15 | Service Principal Authentication is supported (see `here `_ for examples). 16 | 17 | Returns 18 | ------- 19 | pandas.DataFrame 20 | A pandas dataframe showing a list of Git connections. 21 | """ 22 | 23 | columns = { 24 | "Workspace Id": "string", 25 | "Organization Name": "string", 26 | "Owner Name": "string", 27 | "Project Name": "string", 28 | "Git Provider Type": "string", 29 | "Repository Name": "string", 30 | "Branch Name": "string", 31 | "Directory Name": "string", 32 | } 33 | df = _create_dataframe(columns=columns) 34 | 35 | responses = _base_api( 36 | request="/v1/admin/workspaces/discoverGitConnections", 37 | client="fabric_sp", 38 | uses_pagination=True, 39 | ) 40 | 41 | for r in responses: 42 | for v in r.get("value", []): 43 | git = v.get("gitProviderDetails", {}) 44 | new_data = { 45 | "Workspace Id": v.get("workspaceId"), 46 | "Organization Name": git.get("organizationName"), 47 | "Owner Name": git.get("ownerName"), 48 | "Project Name": git.get("projectName"), 49 | "Git Provider Type": git.get("gitProviderType"), 50 | "Repository Name": git.get("repositoryName"), 51 | "Branch Name": git.get("branchName"), 52 | "Directory Name": git.get("directoryName"), 53 | } 54 | 55 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 56 | 57 | dfW = list_workspaces() 58 | df = pd.merge( 59 | df, dfW[["Id", "Name"]], left_on="Workspace Id", right_on="Id", how="left" 60 | ) 61 | new_col_name = "Workspace Name" 62 | df = df.rename(columns={"Name": new_col_name}) 63 | df.insert(1, new_col_name, df.pop(new_col_name)) 64 | 65 | df = df.drop(columns=["Id"]) 66 | 67 | return df 68 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_get_directlake_lakehouse.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | from sempy_labs._helper_functions import ( 3 | resolve_lakehouse_id, 4 | resolve_lakehouse_name, 5 | get_direct_lake_sql_endpoint, 6 | resolve_workspace_name_and_id, 7 | resolve_dataset_name_and_id, 8 | ) 9 | from typing import Optional, Tuple 10 | from uuid import UUID 11 | import sempy_labs._icons as icons 12 | 13 | 14 | def get_direct_lake_lakehouse( 15 | dataset: str | UUID, 16 | workspace: Optional[str | UUID] = None, 17 | lakehouse: Optional[str] = None, 18 | lakehouse_workspace: Optional[str | UUID] = None, 19 | ) -> Tuple[str, UUID]: 20 | """ 21 | Identifies the lakehouse used by a Direct Lake semantic model. 22 | 23 | Parameters 24 | ---------- 25 | dataset : str | uuid.UUID 26 | Name or ID of the semantic model. 27 | workspace : str | uuid.UUID, default=None 28 | The Fabric workspace name or ID. 29 | Defaults to None which resolves to the workspace of the attached lakehouse 30 | or if no lakehouse attached, resolves to the workspace of the notebook. 31 | lakehouse : str, default=None 32 | The Fabric lakehouse used by the Direct Lake semantic model. 33 | Defaults to None which resolves to the lakehouse attached to the notebook. 34 | lakehouse_workspace : str | uuid.UUID, default=None 35 | The Fabric workspace name or ID used by the lakehouse. 36 | Defaults to None which resolves to the workspace of the attached lakehouse 37 | or if no lakehouse attached, resolves to the workspace of the notebook. 38 | 39 | Returns 40 | ------- 41 | Tuple[str, uuid.UUID] 42 | The lakehouse name and lakehouse ID. 43 | """ 44 | 45 | from sempy_labs.directlake._dl_helper import get_direct_lake_source 46 | 47 | artifact_type, artifact_name, artifact_id, workspace_id = get_direct_lake_source( 48 | dataset=dataset, workspace=workspace 49 | ) 50 | 51 | if artifact_type in ["Lakehouse", "Warehouse"]: 52 | return artifact_name, artifact_id 53 | else: 54 | fabric.refresh_tom_cache(workspace=workspace) 55 | dfP = fabric.list_partitions(dataset=dataset, workspace=workspace) 56 | dfP_filt = dfP[dfP["Mode"] == "DirectLake"] 57 | if dfP_filt.empty: 58 | raise ValueError( 59 | f"{icons.red_dot} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." 60 | ) 61 | lakehouse_id = resolve_lakehouse_id( 62 | lakehouse=lakehouse, workspace=lakehouse_workspace 63 | ) 64 | return lakehouse, lakehouse_id 65 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_list_directlake_model_calc_tables.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | import pandas as pd 3 | from sempy_labs._list_functions import list_tables 4 | from sempy_labs.tom import connect_semantic_model 5 | from typing import Optional 6 | from sempy._utils._log import log 7 | import sempy_labs._icons as icons 8 | from uuid import UUID 9 | from sempy_labs._helper_functions import ( 10 | resolve_dataset_name_and_id, 11 | resolve_workspace_name_and_id, 12 | ) 13 | 14 | 15 | @log 16 | def list_direct_lake_model_calc_tables( 17 | dataset: str | UUID, workspace: Optional[str | UUID] = None 18 | ) -> pd.DataFrame: 19 | """ 20 | Shows the calculated tables and their respective DAX expression for a Direct Lake model (which has been migrated from import/DirectQuery). 21 | 22 | Parameters 23 | ---------- 24 | dataset : str | uuid.UUID 25 | Name or ID of the semantic model. 26 | workspace : str | uuid.UUID, default=None 27 | The Fabric workspace name or ID. 28 | Defaults to None which resolves to the workspace of the attached lakehouse 29 | or if no lakehouse attached, resolves to the workspace of the notebook. 30 | 31 | Returns 32 | ------- 33 | pandas.DataFrame 34 | A pandas dataframe showing the calculated tables which were migrated to Direct Lake and whose DAX expressions are stored as model annotations. 35 | """ 36 | 37 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 38 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) 39 | 40 | df = pd.DataFrame(columns=["Table Name", "Source Expression"]) 41 | 42 | with connect_semantic_model( 43 | dataset=dataset_id, readonly=True, workspace=workspace_id 44 | ) as tom: 45 | 46 | is_direct_lake = tom.is_direct_lake() 47 | 48 | if not is_direct_lake: 49 | raise ValueError( 50 | f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake mode." 51 | ) 52 | else: 53 | fabric.refresh_tom_cache(workspace=workspace) 54 | dfA = fabric.list_annotations(dataset=dataset_id, workspace=workspace_id) 55 | dfT = list_tables(dataset_id, workspace_id) 56 | dfA_filt = dfA[ 57 | (dfA["Object Type"] == "Model") 58 | & (dfA["Annotation Name"].isin(dfT["Name"])) 59 | ] 60 | 61 | for i, r in dfA_filt.iterrows(): 62 | tName = r["Annotation Name"] 63 | se = r["Annotation Value"] 64 | 65 | new_data = {"Table Name": tName, "Source Expression": se} 66 | df = pd.concat( 67 | [df, pd.DataFrame(new_data, index=[0])], ignore_index=True 68 | ) 69 | 70 | return df 71 | -------------------------------------------------------------------------------- /src/sempy_labs/_workspace_identity.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import ( 2 | resolve_workspace_name_and_id, 3 | _base_api, 4 | ) 5 | from typing import Optional 6 | import sempy_labs._icons as icons 7 | from uuid import UUID 8 | 9 | 10 | def provision_workspace_identity(workspace: Optional[str | UUID] = None): 11 | """ 12 | Provisions a workspace identity for a workspace. 13 | 14 | This is a wrapper function for the following API: `Workspaces - Provision Identity `_. 15 | 16 | Service Principal Authentication is supported (see `here `_ for examples). 17 | 18 | Parameters 19 | ---------- 20 | workspace : str | uuid.UUID, default=None 21 | The Fabric workspace name or ID. 22 | Defaults to None which resolves to the workspace of the attached lakehouse 23 | or if no lakehouse attached, resolves to the workspace of the notebook. 24 | """ 25 | 26 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 27 | 28 | _base_api( 29 | request=f"/v1/workspaces/{workspace_id}/provisionIdentity", 30 | method="post", 31 | lro_return_status_code=True, 32 | status_codes=None, 33 | client="fabric_sp", 34 | ) 35 | 36 | print( 37 | f"{icons.green_dot} A workspace identity has been provisioned for the '{workspace_name}' workspace." 38 | ) 39 | 40 | 41 | def deprovision_workspace_identity(workspace: Optional[str | UUID] = None): 42 | """ 43 | Deprovisions a workspace identity for a workspace. 44 | 45 | This is a wrapper function for the following API: `Workspaces - Derovision Identity `_. 46 | 47 | Service Principal Authentication is supported (see `here `_ for examples). 48 | 49 | Parameters 50 | ---------- 51 | workspace : str | uuid.UUID, default=None 52 | The Fabric workspace name or ID. 53 | Defaults to None which resolves to the workspace of the attached lakehouse 54 | or if no lakehouse attached, resolves to the workspace of the notebook. 55 | """ 56 | 57 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 58 | 59 | _base_api( 60 | request=f"/v1/workspaces/{workspace_id}/deprovisionIdentity", 61 | method="post", 62 | lro_return_status_code=True, 63 | status_codes=None, 64 | client="fabric_sp", 65 | ) 66 | 67 | print( 68 | f"{icons.green_dot} The workspace identity has been deprovisioned from the '{workspace_name}' workspace." 69 | ) 70 | -------------------------------------------------------------------------------- /src/sempy_labs/migration/_migration_validation.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | import pandas as pd 3 | from typing import Optional 4 | from sempy_labs._list_functions import list_semantic_model_objects 5 | from sempy._utils._log import log 6 | import sempy_labs._icons as icons 7 | 8 | 9 | @log 10 | def migration_validation( 11 | dataset: str, 12 | new_dataset: str, 13 | workspace: Optional[str] = None, 14 | new_dataset_workspace: Optional[str] = None, 15 | ) -> pd.DataFrame: 16 | """ 17 | Shows the objects in the original semantic model and whether then were migrated successfully or not. 18 | 19 | Parameters 20 | ---------- 21 | dataset : str 22 | Name of the import/DirectQuery semantic model. 23 | new_dataset : str 24 | Name of the Direct Lake semantic model. 25 | workspace : str, default=None 26 | The Fabric workspace name in which the import/DirectQuery semantic model exists. 27 | Defaults to None which resolves to the workspace of the attached lakehouse 28 | or if no lakehouse attached, resolves to the workspace of the notebook. 29 | new_dataset_workspace : str 30 | The Fabric workspace name in which the Direct Lake semantic model will be created. 31 | Defaults to None which resolves to the workspace of the attached lakehouse 32 | or if no lakehouse attached, resolves to the workspace of the notebook. 33 | 34 | Returns 35 | ------- 36 | pandas.DataFrame 37 | A pandas dataframe showing a list of objects and whether they were successfully migrated. Also shows the % of objects which were migrated successfully. 38 | """ 39 | 40 | if dataset == new_dataset: 41 | raise ValueError( 42 | f"{icons.red_dot} The 'dataset' and 'new_dataset' parameters are both set to '{dataset}'. These parameters must be set to different values." 43 | ) 44 | 45 | icons.sll_tags.append("DirectLakeMigration") 46 | 47 | dfA = list_semantic_model_objects(dataset=dataset, workspace=workspace) 48 | dfB = list_semantic_model_objects( 49 | dataset=new_dataset, workspace=new_dataset_workspace 50 | ) 51 | 52 | def is_migrated(row): 53 | if row["Object Type"] == "Calculated Table": 54 | return ( 55 | (dfB["Parent Name"] == row["Parent Name"]) 56 | & (dfB["Object Name"] == row["Object Name"]) 57 | & (dfB["Object Type"].isin(["Calculated Table", "Table"])) 58 | ).any() 59 | else: 60 | return ( 61 | (dfB["Parent Name"] == row["Parent Name"]) 62 | & (dfB["Object Name"] == row["Object Name"]) 63 | & (dfB["Object Type"] == row["Object Type"]) 64 | ).any() 65 | 66 | dfA["Migrated"] = dfA.apply(is_migrated, axis=1) 67 | 68 | denom = len(dfA) 69 | num = len(dfA[dfA["Migrated"]]) 70 | print(f"{100 * round(num / denom,2)}% migrated") 71 | 72 | return dfA 73 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_download_report.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | import sempy_labs._icons as icons 3 | from typing import Optional 4 | from sempy_labs._helper_functions import ( 5 | resolve_workspace_name_and_id, 6 | resolve_lakehouse_name_and_id, 7 | _base_api, 8 | resolve_item_id, 9 | _mount, 10 | resolve_workspace_name, 11 | ) 12 | from sempy_labs.lakehouse._lakehouse import lakehouse_attached 13 | from uuid import UUID 14 | 15 | 16 | def download_report( 17 | report: str | UUID, 18 | file_name: Optional[str] = None, 19 | download_type: str = "LiveConnect", 20 | workspace: Optional[str | UUID] = None, 21 | ): 22 | """ 23 | Downloads the specified report from the specified workspace to a Power BI .pbix file. 24 | 25 | This is a wrapper function for the following API: `Reports - Export Report In Group `. 26 | 27 | Parameters 28 | ---------- 29 | report: str | uuid.UUID 30 | Name or ID of the report. 31 | file_name : str, default=None 32 | Name of the .pbix file to be saved. 33 | Defaults to None which resolves to the name of the report. 34 | download_type : str, default="LiveConnect" 35 | The type of download. Valid values are "LiveConnect" and "IncludeModel". 36 | workspace : str | uuid.UUID, default=None 37 | The Fabric workspace name or ID. 38 | Defaults to None which resolves to the workspace of the attached lakehouse 39 | or if no lakehouse attached, resolves to the workspace of the notebook. 40 | """ 41 | 42 | if not lakehouse_attached(): 43 | raise ValueError( 44 | f"{icons.red_dot} A lakehouse must be attached to the notebook." 45 | ) 46 | 47 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 48 | (lakehouse_name, lakehouse_id) = resolve_lakehouse_name_and_id() 49 | lakehouse_workspace = resolve_workspace_name() 50 | 51 | download_types = ["LiveConnect", "IncludeModel"] 52 | if download_type not in download_types: 53 | raise ValueError( 54 | f"{icons.red_dot} Invalid download_type parameter. Valid options: {download_types}." 55 | ) 56 | 57 | file_name = file_name or report 58 | report_id = resolve_item_id(item=report, type="Report", workspace=workspace) 59 | 60 | response = _base_api( 61 | request=f"v1.0/myorg/groups/{workspace_id}/reports/{report_id}/Export?downloadType={download_type}" 62 | ) 63 | 64 | # Save file to the attached lakehouse 65 | local_path = _mount() 66 | save_file = f"{local_path}/Files/{file_name}.pbix" 67 | with open(save_file, "wb") as file: 68 | file.write(response.content) 69 | 70 | print( 71 | f"{icons.green_dot} The '{report}' report within the '{workspace_name}' workspace has been exported as the '{file_name}' file in the '{lakehouse_name}' lakehouse within the '{lakehouse_workspace}' workspace." 72 | ) 73 | -------------------------------------------------------------------------------- /src/sempy_labs/_graphQL.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from uuid import UUID 3 | from typing import Optional 4 | from sempy_labs._helper_functions import ( 5 | _base_api, 6 | _create_dataframe, 7 | resolve_workspace_name_and_id, 8 | create_item, 9 | ) 10 | 11 | 12 | def list_graphql_apis(workspace: Optional[str | UUID]) -> pd.DataFrame: 13 | """ 14 | Shows the Graph QL APIs within a workspace. 15 | 16 | This is a wrapper function for the following API: `Items - List GraphQLApis `_. 17 | 18 | Service Principal Authentication is supported (see `here `_ for examples). 19 | 20 | Parameters 21 | ---------- 22 | workspace : str | uuid.UUID, default=None 23 | The Fabric workspace name or ID. 24 | Defaults to None which resolves to the workspace of the attached lakehouse 25 | or if no lakehouse attached, resolves to the workspace of the notebook. 26 | 27 | Returns 28 | ------- 29 | pandas.DataFrame 30 | A pandas dataframe showing the GraphQL APIs within a workspace. 31 | """ 32 | 33 | columns = { 34 | "GraphQL API Name": "string", 35 | "GraphQL API Id": "string", 36 | "Description": "string", 37 | } 38 | df = _create_dataframe(columns=columns) 39 | 40 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 41 | 42 | responses = _base_api( 43 | request=f"/v1/workspaces/{workspace_id}/GraphQLApis", 44 | uses_pagination=True, 45 | client="fabric_sp", 46 | ) 47 | 48 | for r in responses: 49 | for v in r.get("value", []): 50 | new_data = { 51 | "GraphQL API Name": v.get("displayName"), 52 | "GraphQL API Id": v.get("id"), 53 | "Description": v.get("description"), 54 | } 55 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 56 | 57 | return df 58 | 59 | 60 | def create_graphql_api( 61 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 62 | ): 63 | """ 64 | Creates a GraphQL API. 65 | 66 | This is a wrapper function for the following API: `Items - Create GraphQLApi `_. 67 | 68 | Parameters 69 | ---------- 70 | name: str 71 | Name of the GraphQL API. 72 | description : str, default=None 73 | A description of the GraphQL API. 74 | workspace : str | uuid.UUID, default=None 75 | The Fabric workspace name or ID. 76 | Defaults to None which resolves to the workspace of the attached lakehouse 77 | or if no lakehouse attached, resolves to the workspace of the notebook. 78 | """ 79 | 80 | create_item( 81 | name=name, description=description, type="GraphQLApi", workspace=workspace 82 | ) 83 | -------------------------------------------------------------------------------- /src/sempy_labs/lakehouse/_get_lakehouse_columns.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sempy_labs._helper_functions import ( 3 | format_dax_object_name, 4 | resolve_workspace_name_and_id, 5 | resolve_lakehouse_name_and_id, 6 | _create_dataframe, 7 | _create_spark_session, 8 | ) 9 | from typing import Optional 10 | from sempy._utils._log import log 11 | from uuid import UUID 12 | 13 | 14 | @log 15 | def get_lakehouse_columns( 16 | lakehouse: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None 17 | ) -> pd.DataFrame: 18 | """ 19 | Shows the tables and columns of a lakehouse and their respective properties. 20 | 21 | Parameters 22 | ---------- 23 | lakehouse : str | uuid.UUID, default=None 24 | The Fabric lakehouse name or ID. 25 | Defaults to None which resolves to the lakehouse attached to the notebook. 26 | lakehouse_workspace : str | uuid.UUID, default=None 27 | The Fabric workspace name or ID used by the lakehouse. 28 | Defaults to None which resolves to the workspace of the attached lakehouse 29 | or if no lakehouse attached, resolves to the workspace of the notebook. 30 | 31 | Returns 32 | ------- 33 | pandas.DataFrame 34 | Shows the tables/columns within a lakehouse and their properties. 35 | """ 36 | from sempy_labs.lakehouse._get_lakehouse_tables import get_lakehouse_tables 37 | from delta import DeltaTable 38 | 39 | columns = { 40 | "Workspace Name": "string", 41 | "Lakehouse Name": "string", 42 | "Table Name": "string", 43 | "Column Name": "string", 44 | "Full Column Name": "string", 45 | "Data Type": "string", 46 | } 47 | df = _create_dataframe(columns=columns) 48 | 49 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 50 | (lakehouse_name, lakehouse_id) = resolve_lakehouse_name_and_id( 51 | lakehouse=lakehouse, workspace=workspace_id 52 | ) 53 | 54 | spark = _create_spark_session() 55 | 56 | tables = get_lakehouse_tables( 57 | lakehouse=lakehouse_id, workspace=workspace_id, extended=False, count_rows=False 58 | ) 59 | tables_filt = tables[tables["Format"] == "delta"] 60 | 61 | for _, r in tables_filt.iterrows(): 62 | table_name = r["Table Name"] 63 | path = r["Location"] 64 | delta_table = DeltaTable.forPath(spark, path) 65 | sparkdf = delta_table.toDF() 66 | 67 | for col_name, data_type in sparkdf.dtypes: 68 | full_column_name = format_dax_object_name(table_name, col_name) 69 | new_data = { 70 | "Workspace Name": workspace_name, 71 | "Lakehouse Name": lakehouse, 72 | "Table Name": table_name, 73 | "Column Name": col_name, 74 | "Full Column Name": full_column_name, 75 | "Data Type": data_type, 76 | } 77 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 78 | 79 | return df 80 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_guardrails.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | import pandas as pd 3 | from typing import Optional 4 | import sempy_labs._icons as icons 5 | from uuid import UUID 6 | from sempy_labs._helper_functions import ( 7 | resolve_workspace_name_and_id, 8 | ) 9 | 10 | 11 | def get_direct_lake_guardrails() -> pd.DataFrame: 12 | """ 13 | Shows the guardrails for when Direct Lake semantic models will fallback to Direct Query 14 | based on Microsoft's `online documentation `_. 15 | 16 | Returns 17 | ------- 18 | pandas.DataFrame 19 | A table showing the Direct Lake guardrails by SKU. 20 | """ 21 | 22 | url = "https://learn.microsoft.com/power-bi/enterprise/directlake-overview" 23 | 24 | tables = pd.read_html(url) 25 | for df in tables: 26 | first_column_name = df.columns[0] 27 | if first_column_name.startswith("Fabric"): 28 | df[first_column_name] = df[first_column_name].str.split("/") 29 | df = df.explode(first_column_name, ignore_index=True) 30 | break 31 | 32 | return df 33 | 34 | 35 | def get_sku_size(workspace: Optional[str | UUID] = None) -> str: 36 | """ 37 | Shows the SKU size for a workspace. 38 | 39 | Parameters 40 | ---------- 41 | workspace : str | uuid.UUID, default=None 42 | The Fabric workspace name or ID. 43 | Defaults to None which resolves to the workspace of the attached lakehouse 44 | or if no lakehouse attached, resolves to the workspace of the notebook. 45 | 46 | Returns 47 | ------- 48 | str 49 | The SKU size for a workspace. 50 | """ 51 | from sempy_labs._capacities import list_capacities 52 | 53 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 54 | 55 | dfW = fabric.list_workspaces(filter=f"id eq '{workspace_id}'") 56 | 57 | if len(dfW) == 0: 58 | raise ValueError( 59 | f"{icons.red_dot} The '{workspace_name}' is not a valid workspace." 60 | ) 61 | 62 | capacity_id = dfW["Capacity Id"].iloc[0] 63 | dfC = list_capacities() 64 | dfC_filt = dfC[dfC["Id"] == capacity_id] 65 | 66 | if len(dfC_filt) == 0: 67 | raise ValueError( 68 | f"{icons.red_dot} The '{capacity_id}' Id is not a valid capacity Id." 69 | ) 70 | 71 | return dfC_filt["Sku"].iloc[0] 72 | 73 | 74 | def get_directlake_guardrails_for_sku(sku_size: str) -> pd.DataFrame: 75 | """ 76 | Shows the guardrails for Direct Lake based on the SKU used by your workspace's capacity. 77 | * Use the result of the 'get_sku_size' function as an input for this function's sku_size parameter.* 78 | 79 | Parameters 80 | ---------- 81 | sku_size : str 82 | Sku size of a workspace/capacity. 83 | 84 | Returns 85 | ------- 86 | pandas.DataFrame 87 | A table showing the Direct Lake guardrails for the given SKU. 88 | """ 89 | 90 | df = get_direct_lake_guardrails() 91 | col_name = df.columns[0] 92 | filtered_df = df[df[col_name] == sku_size] 93 | 94 | return filtered_df 95 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_shared.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from sempy_labs._helper_functions import ( 3 | _base_api, 4 | _create_dataframe, 5 | ) 6 | 7 | 8 | def list_widely_shared_artifacts( 9 | api_name: str = "LinksSharedToWholeOrganization", 10 | ) -> pd.DataFrame: 11 | """ 12 | Returns a list of Power BI reports that are shared with the whole organization through links or a list of Power BI items (such as reports or dashboards) that are published to the web. 13 | 14 | This is a wrapper function for the following APIs: 15 | `Admin - WidelySharedArtifacts LinksSharedToWholeOrganization `_. 16 | `Admin - WidelySharedArtifacts PublishedToWeb `_. 17 | 18 | Service Principal Authentication is supported (see `here `_ for examples). 19 | 20 | Parameters 21 | ---------- 22 | api_name : str, default = "LinksSharedToWholeOrganization" 23 | The name of the API to call. Either "LinksSharedToWholeOrganization" or "PublishedToWeb". 24 | 25 | Returns 26 | ------- 27 | pandas.DataFrame 28 | A pandas dataframe showing a list of Power BI reports that are shared with the whole organization through links or a list of Power BI items (such as reports or dashboards) that are published to the web. 29 | """ 30 | 31 | columns = { 32 | "Artifact Id": "string", 33 | "Artifact Name": "string", 34 | "Artifact Type": "string", 35 | "Access Right": "string", 36 | "Share Type": "string", 37 | "Sharer Name": "string", 38 | "Sharer Email Address": "string", 39 | "Sharer Identifier": "string", 40 | "Sharer Graph Id": "string", 41 | "Sharer Principal Type": "string", 42 | } 43 | 44 | df = _create_dataframe(columns=columns) 45 | 46 | api = ( 47 | "linksSharedToWholeOrganization" 48 | if api_name == "LinksSharedToWholeOrganization" 49 | else "publishedToWeb" 50 | ) 51 | 52 | responses = _base_api( 53 | request=f"/v1.0/myorg/admin/widelySharedArtifacts/{api}", 54 | client="fabric_sp", 55 | uses_pagination=True, 56 | ) 57 | 58 | for r in responses: 59 | for v in r.get("ArtifactAccessEntities", []): 60 | sharer = v.get("sharer", {}) 61 | new_data = { 62 | "Artifact Id": v.get("artifactId"), 63 | "Artifact Name": v.get("displayName"), 64 | "Artifact Type": v.get("artifactType"), 65 | "Access Right": v.get("accessRight"), 66 | "Share Type": v.get("shareType"), 67 | "Sharer Name": sharer.get("displayName"), 68 | "Sharer Email Address": sharer.get("emailAddress"), 69 | "Sharer Identifier": sharer.get("identifier"), 70 | "Sharer Graph Id": sharer.get("graphId"), 71 | "Sharer Principal Type": sharer.get("principalType"), 72 | } 73 | 74 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 75 | 76 | return df 77 | -------------------------------------------------------------------------------- /notebooks/Best Practice Analyzer Report.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","id":"5c27dfd1-4fe0-4a97-92e6-ddf78889aa93","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Install the latest .whl package\n","\n","Check [here](https://pypi.org/project/semantic-link-labs/) to see the latest version."]},{"cell_type":"code","execution_count":null,"id":"d5cae9db-cef9-48a8-a351-9c5fcc99645c","metadata":{"jupyter":{"outputs_hidden":true,"source_hidden":false},"nteract":{"transient":{"deleting":false}}},"outputs":[],"source":["%pip install semantic-link-labs"]},{"cell_type":"markdown","id":"b195eae8","metadata":{},"source":["### Import the library and necessary packages"]},{"cell_type":"code","execution_count":null,"id":"1344e286","metadata":{},"outputs":[],"source":["import sempy_labs as labs\n","import sempy_labs.report as rep"]},{"cell_type":"markdown","id":"5a3fe6e8-b8aa-4447-812b-7931831e07fe","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Collect semantic model Best Practice Analyzer stats"]},{"cell_type":"markdown","id":"8702e95b","metadata":{},"source":["#### Collect stats for all semantic models within a single workspace"]},{"cell_type":"code","execution_count":null,"id":"9e349954","metadata":{},"outputs":[],"source":["labs.run_model_bpa_bulk(workspace='Workspace 1')"]},{"cell_type":"markdown","id":"8281d30d","metadata":{},"source":["#### Collect stats for all semantic models within a list of workspaces"]},{"cell_type":"code","execution_count":null,"id":"d6b09b86","metadata":{},"outputs":[],"source":["labs.run_model_bpa_bulk(workspace=['Workspace 1', 'Workspace 2'])"]},{"cell_type":"markdown","id":"ec9109e4","metadata":{},"source":["#### Collect stats for all semantic models within all accessible workspaces"]},{"cell_type":"code","execution_count":null,"id":"e08860da","metadata":{},"outputs":[],"source":["labs.run_model_bpa_bulk(workspace=None)"]},{"cell_type":"markdown","id":"113b04a7","metadata":{},"source":["#### Create a Direct Lake semantic model (called 'ModelBPA') for analyzing the Best Practice Analyzer results"]},{"cell_type":"code","execution_count":null,"id":"b4e1296b","metadata":{},"outputs":[],"source":["labs.create_model_bpa_semantic_model()"]},{"cell_type":"markdown","id":"7f94b13a","metadata":{},"source":["#### Create a Power BI report called 'ModelBPA' based semantic model created in the previous cell, which can be used to analyze the Best Practice Analyzer results"]},{"cell_type":"code","execution_count":null,"id":"17565d35","metadata":{},"outputs":[],"source":["rep.create_model_bpa_report()"]},{"cell_type":"markdown","id":"d41bdae4","metadata":{},"source":["
\n","Note: The 'BPAReport' Power BI report is located within the workspace in which the default lakehouse attached to this notebook resides. Navigate to this workspace to open the report and view the Best Practice Analyzer results.\n","
\n","\n","Going forward, you just need to run the 'run_model_bpa_bulk' function which will append BPA results to the 'modelbparesults' delta table in your lakehouse. Since the 'BPAModel' semantic model is in Direct Lake mode, the data will appear in the semantic model and report automatically without any need for processing the semantic model.\n","\n"]}],"metadata":{"kernel_info":{"name":"synapse_pyspark"},"kernelspec":{"display_name":"Synapse PySpark","language":"Python","name":"synapse_pyspark"},"language_info":{"name":"python"},"microsoft":{"language":"python"},"nteract":{"version":"nteract-front-end@1.0.0"},"spark_compute":{"compute_id":"/trident/default"},"synapse_widget":{"state":{},"version":"0.1"},"widgets":{}},"nbformat":4,"nbformat_minor":5} 2 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/88d8141cb8500b60030c/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "88d8141cb8500b60030c", 4 | "position": { 5 | "x": 879.058334055647, 6 | "y": 110.76987085030771, 7 | "z": 2000, 8 | "height": 191.71708416399412, 9 | "width": 217.27936205252666, 10 | "tabOrder": 2000 11 | }, 12 | "visual": { 13 | "visualType": "slicer", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Category" 27 | } 28 | }, 29 | "queryRef": "BPAResults.Category", 30 | "nativeQueryRef": "Category", 31 | "active": true 32 | } 33 | ] 34 | } 35 | } 36 | }, 37 | "objects": { 38 | "data": [ 39 | { 40 | "properties": { 41 | "mode": { 42 | "expr": { 43 | "Literal": { 44 | "Value": "'Basic'" 45 | } 46 | } 47 | } 48 | } 49 | } 50 | ], 51 | "general": [ 52 | { 53 | "properties": { 54 | "orientation": { 55 | "expr": { 56 | "Literal": { 57 | "Value": "0D" 58 | } 59 | } 60 | } 61 | } 62 | } 63 | ], 64 | "header": [ 65 | { 66 | "properties": { 67 | "textSize": { 68 | "expr": { 69 | "Literal": { 70 | "Value": "14D" 71 | } 72 | } 73 | } 74 | } 75 | } 76 | ] 77 | }, 78 | "visualContainerObjects": { 79 | "border": [ 80 | { 81 | "properties": { 82 | "show": { 83 | "expr": { 84 | "Literal": { 85 | "Value": "true" 86 | } 87 | } 88 | }, 89 | "color": { 90 | "solid": { 91 | "color": { 92 | "expr": { 93 | "ThemeDataColor": { 94 | "ColorId": 0, 95 | "Percent": 0 96 | } 97 | } 98 | } 99 | } 100 | }, 101 | "radius": { 102 | "expr": { 103 | "Literal": { 104 | "Value": "20D" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | ], 111 | "dropShadow": [ 112 | { 113 | "properties": { 114 | "show": { 115 | "expr": { 116 | "Literal": { 117 | "Value": "true" 118 | } 119 | } 120 | } 121 | } 122 | } 123 | ] 124 | }, 125 | "drillFilterOtherVisuals": true 126 | } 127 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/3b1182230aa6c600b43a/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "3b1182230aa6c600b43a", 4 | "position": { 5 | "x": 873.37782785819536, 6 | "y": 12.781138944266274, 7 | "z": 2000, 8 | "height": 191.71708416399412, 9 | "width": 217.27936205252666, 10 | "tabOrder": 2000 11 | }, 12 | "visual": { 13 | "visualType": "slicer", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Category" 27 | } 28 | }, 29 | "queryRef": "BPAResults.Category", 30 | "nativeQueryRef": "Category", 31 | "active": true 32 | } 33 | ] 34 | } 35 | } 36 | }, 37 | "objects": { 38 | "data": [ 39 | { 40 | "properties": { 41 | "mode": { 42 | "expr": { 43 | "Literal": { 44 | "Value": "'Basic'" 45 | } 46 | } 47 | } 48 | } 49 | } 50 | ], 51 | "general": [ 52 | { 53 | "properties": { 54 | "orientation": { 55 | "expr": { 56 | "Literal": { 57 | "Value": "0D" 58 | } 59 | } 60 | } 61 | } 62 | } 63 | ], 64 | "header": [ 65 | { 66 | "properties": { 67 | "textSize": { 68 | "expr": { 69 | "Literal": { 70 | "Value": "14D" 71 | } 72 | } 73 | } 74 | } 75 | } 76 | ] 77 | }, 78 | "visualContainerObjects": { 79 | "border": [ 80 | { 81 | "properties": { 82 | "show": { 83 | "expr": { 84 | "Literal": { 85 | "Value": "true" 86 | } 87 | } 88 | }, 89 | "color": { 90 | "solid": { 91 | "color": { 92 | "expr": { 93 | "ThemeDataColor": { 94 | "ColorId": 0, 95 | "Percent": 0 96 | } 97 | } 98 | } 99 | } 100 | }, 101 | "radius": { 102 | "expr": { 103 | "Literal": { 104 | "Value": "20D" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | ], 111 | "dropShadow": [ 112 | { 113 | "properties": { 114 | "show": { 115 | "expr": { 116 | "Literal": { 117 | "Value": "true" 118 | } 119 | } 120 | } 121 | } 122 | } 123 | ] 124 | }, 125 | "drillFilterOtherVisuals": true 126 | } 127 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/b6a80ee459e716e170b1/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "b6a80ee459e716e170b1", 4 | "position": { 5 | "x": 1114.7993412498915, 6 | "y": 12.781138944266274, 7 | "z": 3000, 8 | "height": 191.71708416399412, 9 | "width": 153.37366733119529, 10 | "tabOrder": 3000 11 | }, 12 | "visual": { 13 | "visualType": "slicer", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Severity" 27 | } 28 | }, 29 | "queryRef": "BPAResults.RuleSeverity", 30 | "nativeQueryRef": "RuleSeverity", 31 | "active": true 32 | } 33 | ] 34 | } 35 | } 36 | }, 37 | "objects": { 38 | "data": [ 39 | { 40 | "properties": { 41 | "mode": { 42 | "expr": { 43 | "Literal": { 44 | "Value": "'Basic'" 45 | } 46 | } 47 | } 48 | } 49 | } 50 | ], 51 | "general": [ 52 | { 53 | "properties": { 54 | "orientation": { 55 | "expr": { 56 | "Literal": { 57 | "Value": "0D" 58 | } 59 | } 60 | } 61 | } 62 | } 63 | ], 64 | "header": [ 65 | { 66 | "properties": { 67 | "textSize": { 68 | "expr": { 69 | "Literal": { 70 | "Value": "14D" 71 | } 72 | } 73 | } 74 | } 75 | } 76 | ] 77 | }, 78 | "visualContainerObjects": { 79 | "border": [ 80 | { 81 | "properties": { 82 | "show": { 83 | "expr": { 84 | "Literal": { 85 | "Value": "true" 86 | } 87 | } 88 | }, 89 | "color": { 90 | "solid": { 91 | "color": { 92 | "expr": { 93 | "ThemeDataColor": { 94 | "ColorId": 0, 95 | "Percent": 0 96 | } 97 | } 98 | } 99 | } 100 | }, 101 | "radius": { 102 | "expr": { 103 | "Literal": { 104 | "Value": "20D" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | ], 111 | "dropShadow": [ 112 | { 113 | "properties": { 114 | "show": { 115 | "expr": { 116 | "Literal": { 117 | "Value": "true" 118 | } 119 | } 120 | } 121 | } 122 | } 123 | ] 124 | }, 125 | "drillFilterOtherVisuals": true 126 | } 127 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/c597da16dc7e63222a82/visuals/b8fdc82cddd61ac447bc/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "b8fdc82cddd61ac447bc", 4 | "position": { 5 | "x": 1114.7993412498915, 6 | "y": 110.76987085030771, 7 | "z": 3000, 8 | "height": 191.71708416399412, 9 | "width": 153.37366733119529, 10 | "tabOrder": 3000 11 | }, 12 | "visual": { 13 | "visualType": "slicer", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Severity" 27 | } 28 | }, 29 | "queryRef": "BPAResults.RuleSeverity", 30 | "nativeQueryRef": "RuleSeverity", 31 | "active": true 32 | } 33 | ] 34 | } 35 | } 36 | }, 37 | "objects": { 38 | "data": [ 39 | { 40 | "properties": { 41 | "mode": { 42 | "expr": { 43 | "Literal": { 44 | "Value": "'Basic'" 45 | } 46 | } 47 | } 48 | } 49 | } 50 | ], 51 | "general": [ 52 | { 53 | "properties": { 54 | "orientation": { 55 | "expr": { 56 | "Literal": { 57 | "Value": "0D" 58 | } 59 | } 60 | } 61 | } 62 | } 63 | ], 64 | "header": [ 65 | { 66 | "properties": { 67 | "textSize": { 68 | "expr": { 69 | "Literal": { 70 | "Value": "14D" 71 | } 72 | } 73 | } 74 | } 75 | } 76 | ] 77 | }, 78 | "visualContainerObjects": { 79 | "border": [ 80 | { 81 | "properties": { 82 | "show": { 83 | "expr": { 84 | "Literal": { 85 | "Value": "true" 86 | } 87 | } 88 | }, 89 | "color": { 90 | "solid": { 91 | "color": { 92 | "expr": { 93 | "ThemeDataColor": { 94 | "ColorId": 0, 95 | "Percent": 0 96 | } 97 | } 98 | } 99 | } 100 | }, 101 | "radius": { 102 | "expr": { 103 | "Literal": { 104 | "Value": "20D" 105 | } 106 | } 107 | } 108 | } 109 | } 110 | ], 111 | "dropShadow": [ 112 | { 113 | "properties": { 114 | "show": { 115 | "expr": { 116 | "Literal": { 117 | "Value": "true" 118 | } 119 | } 120 | } 121 | } 122 | } 123 | ] 124 | }, 125 | "drillFilterOtherVisuals": true 126 | } 127 | } -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/92735ae19b31712208ad/visuals/66e60dfb526437cd78d1/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "66e60dfb526437cd78d1", 4 | "position": { 5 | "x": 10, 6 | "y": 0, 7 | "z": 0, 8 | "height": 578.75, 9 | "width": 1270, 10 | "tabOrder": 0 11 | }, 12 | "visual": { 13 | "visualType": "tableEx", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Category" 27 | } 28 | }, 29 | "queryRef": "BPAResults.Category", 30 | "nativeQueryRef": "Category" 31 | }, 32 | { 33 | "field": { 34 | "Column": { 35 | "Expression": { 36 | "SourceRef": { 37 | "Entity": "BPAResults" 38 | } 39 | }, 40 | "Property": "Severity" 41 | } 42 | }, 43 | "queryRef": "BPAResults.Severity", 44 | "nativeQueryRef": "Severity" 45 | }, 46 | { 47 | "field": { 48 | "Column": { 49 | "Expression": { 50 | "SourceRef": { 51 | "Entity": "BPAResults" 52 | } 53 | }, 54 | "Property": "Rule Name" 55 | } 56 | }, 57 | "queryRef": "BPAResults.Rule Name", 58 | "nativeQueryRef": "Rule Name" 59 | }, 60 | { 61 | "field": { 62 | "Column": { 63 | "Expression": { 64 | "SourceRef": { 65 | "Entity": "BPAResults" 66 | } 67 | }, 68 | "Property": "Description" 69 | } 70 | }, 71 | "queryRef": "BPAResults.Description", 72 | "nativeQueryRef": "Description" 73 | }, 74 | { 75 | "field": { 76 | "Column": { 77 | "Expression": { 78 | "SourceRef": { 79 | "Entity": "BPAResults" 80 | } 81 | }, 82 | "Property": "URL" 83 | } 84 | }, 85 | "queryRef": "BPAResults.URL", 86 | "nativeQueryRef": "URL" 87 | } 88 | ] 89 | } 90 | } 91 | }, 92 | "objects": { 93 | "columnWidth": [ 94 | { 95 | "properties": { 96 | "value": { 97 | "expr": { 98 | "Literal": { 99 | "Value": "452.5D" 100 | } 101 | } 102 | } 103 | }, 104 | "selector": { 105 | "metadata": "BPAResults.Rule Name" 106 | } 107 | } 108 | ] 109 | }, 110 | "drillFilterOtherVisuals": true 111 | } 112 | } -------------------------------------------------------------------------------- /src/sempy_labs/graph/_teams.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from uuid import UUID 3 | from sempy._utils._log import log 4 | from sempy_labs._helper_functions import ( 5 | _base_api, 6 | _create_dataframe, 7 | _update_dataframe_datatypes, 8 | ) 9 | 10 | 11 | @log 12 | def list_teams() -> pd.DataFrame: 13 | """ 14 | Shows a list of teams and their properties. 15 | 16 | This is a wrapper function for the following API: `List teams `_. 17 | 18 | Service Principal Authentication is required (see `here `_ for examples). 19 | 20 | Returns 21 | ------- 22 | pandas.DataFrame 23 | A pandas dataframe showing a list of teams and their properties. 24 | """ 25 | 26 | result = _base_api(request="teams", client="graph").json() 27 | 28 | columns = { 29 | "Team Id": "str", 30 | "Team Name": "str", 31 | "Description": "str", 32 | "Creation Date Time": "datetime", 33 | "Classification": "str", 34 | "Specialization": "str", 35 | "Visibility": "str", 36 | "Web Url": "str", 37 | "Archived": "bool", 38 | "Favorite By Me": "bool", 39 | "Discoverable By Me": "bool", 40 | "Member Count": "int_fillna", 41 | } 42 | 43 | df = _create_dataframe(columns=columns) 44 | 45 | for v in result.get("value"): 46 | new_data = { 47 | "Team Id": v.get("id"), 48 | "Team Name": v.get("displayName"), 49 | "Description": v.get("description"), 50 | "Creation Date Time": v.get("createdDateTime"), 51 | "Classification": v.get("classification"), 52 | "Specialization": v.get("specialization"), 53 | "Visibility": v.get("visibility"), 54 | "Web Url": v.get("webUrl"), 55 | "Archived": v.get("isArchived"), 56 | "Favorite By Me": v.get("isFavoriteByMe"), 57 | "Discoverable By Me": v.get("isDiscoverableByMe"), 58 | "Member Count": v.get("memberCount"), 59 | } 60 | 61 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 62 | 63 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 64 | 65 | return df 66 | 67 | 68 | def list_chats(user: str | UUID) -> pd.DataFrame: 69 | """ 70 | In progress... 71 | """ 72 | 73 | from sempy_labs.graph._users import resolve_user_id 74 | 75 | user_id = resolve_user_id(user=user) 76 | result = _base_api(request=f"users/{user_id}/chats", client="graph").json() 77 | 78 | columns = { 79 | "Chat Id": "str", 80 | "Type": "str", 81 | "Members": "str", 82 | } 83 | 84 | df = _create_dataframe(columns=columns) 85 | 86 | for v in result.get("value"): 87 | new_data = { 88 | "Chat Id": v.get("id"), 89 | "Type": v.get("chatType"), 90 | "Members": v.get("members"), 91 | } 92 | 93 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 94 | 95 | return df 96 | 97 | 98 | def send_teams_message(chat_id: str, message: str): 99 | """ 100 | In progress... 101 | """ 102 | 103 | payload = { 104 | "body": { 105 | "content": message, 106 | } 107 | } 108 | 109 | _base_api( 110 | request=f"chats/{chat_id}/messages", 111 | client="graph", 112 | method="post", 113 | payload=payload, 114 | status_codes=201, 115 | ) 116 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_generate_shared_expression.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import ( 2 | resolve_workspace_name_and_id, 3 | _base_api, 4 | resolve_lakehouse_name_and_id, 5 | resolve_item_name_and_id, 6 | ) 7 | from typing import Optional 8 | import sempy_labs._icons as icons 9 | from uuid import UUID 10 | 11 | 12 | def generate_shared_expression( 13 | item_name: Optional[str] = None, 14 | item_type: str = "Lakehouse", 15 | workspace: Optional[str | UUID] = None, 16 | use_sql_endpoint: bool = True, 17 | ) -> str: 18 | """ 19 | Dynamically generates the M expression used by a Direct Lake model for a given lakehouse/warehouse. 20 | 21 | Parameters 22 | ---------- 23 | item_name : str, default=None 24 | The Fabric lakehouse or warehouse name. 25 | Defaults to None which resolves to the lakehouse attached to the notebook. 26 | item_type : str, default="Lakehouse" 27 | The Fabric item name. Valid options: 'Lakehouse', 'Warehouse'. 28 | workspace : str | uuid.UUID, default=None 29 | The Fabric workspace name or ID used by the item. 30 | Defaults to None which resolves to the workspace of the attached lakehouse 31 | or if no lakehouse attached, resolves to the workspace of the notebook. 32 | use_sql_endpoint : bool, default=True 33 | Whether to use the SQL Endpoint for the lakehouse/warehouse. 34 | If False, the expression will be generated without using the SQL Endpoint. 35 | 36 | Returns 37 | ------- 38 | str 39 | Shows the expression which can be used to connect a Direct Lake semantic model to its SQL Endpoint. 40 | """ 41 | 42 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 43 | item_types = ["Lakehouse", "Warehouse"] 44 | item_type = item_type.capitalize() 45 | if item_type not in item_types: 46 | raise ValueError( 47 | f"{icons.red_dot} Invalid item type. Valid options: {item_types}." 48 | ) 49 | 50 | if item_type == "Lakehouse": 51 | (item_name, item_id) = resolve_lakehouse_name_and_id( 52 | lakehouse=item_name, workspace=workspace_id 53 | ) 54 | else: 55 | (item_name, item_id) = resolve_item_name_and_id( 56 | item=item_name, type=item_type, workspace=workspace_id 57 | ) 58 | 59 | item_type_rest = f"{item_type.lower()}s" 60 | response = _base_api( 61 | request=f"/v1/workspaces/{workspace_id}/{item_type_rest}/{item_id}" 62 | ) 63 | 64 | prop = response.json().get("properties") 65 | 66 | if item_type == "Lakehouse": 67 | sqlprop = prop.get("sqlEndpointProperties") 68 | sqlEPCS = sqlprop.get("connectionString") 69 | sqlepid = sqlprop.get("id") 70 | provStatus = sqlprop.get("provisioningStatus") 71 | elif item_type == "Warehouse": 72 | sqlEPCS = prop.get("connectionString") 73 | sqlepid = item_id 74 | provStatus = None 75 | 76 | if provStatus == "InProgress": 77 | raise ValueError( 78 | f"{icons.red_dot} The SQL Endpoint for the '{item_name}' lakehouse within the '{workspace_name}' workspace has not yet been provisioned. Please wait until it has been provisioned." 79 | ) 80 | 81 | start_expr = "let\n\tdatabase = " 82 | end_expr = "\nin\n\tdatabase" 83 | mid_expr = f'Sql.Database("{sqlEPCS}", "{sqlepid}")' 84 | 85 | # Build DL/OL expression 86 | if not use_sql_endpoint and item_type == "Lakehouse": 87 | return f'AzureDataLakeStorage{{"server":"onelake.dfs.fabric.microsoft.com","path":"/{workspace_id}/{item_id}/"}}' 88 | else: 89 | return f"{start_expr}{mid_expr}{end_expr}" 90 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_external_data_share.py: -------------------------------------------------------------------------------- 1 | from uuid import UUID 2 | import sempy_labs._icons as icons 3 | import pandas as pd 4 | from sempy_labs.admin._basic_functions import _resolve_workspace_name_and_id 5 | from sempy_labs._helper_functions import ( 6 | _base_api, 7 | _create_dataframe, 8 | _update_dataframe_datatypes, 9 | ) 10 | 11 | 12 | def list_external_data_shares() -> pd.DataFrame: 13 | """ 14 | Lists external data shares in the tenant. This function is for admins. 15 | 16 | This is a wrapper function for the following API: `External Data Shares - List External Data Shares `_. 17 | 18 | Returns 19 | ------- 20 | pandas.DataFrame 21 | A pandas dataframe showing a list of external data shares in the tenant. 22 | """ 23 | 24 | columns = { 25 | "External Data Share Id": "string", 26 | "Paths": "string", 27 | "Creater Principal Id": "string", 28 | "Creater Principal Name": "string", 29 | "Creater Principal Type": "string", 30 | "Creater Principal UPN": "string", 31 | "Recipient UPN": "string", 32 | "Status": "string", 33 | "Expiration Time UTC": "datetime", 34 | "Workspace Id": "string", 35 | "Item Id": "string", 36 | "Invitation URL": "string", 37 | } 38 | df = _create_dataframe(columns=columns) 39 | 40 | response = _base_api(request="/v1/admin/items/externalDataShares") 41 | 42 | for i in response.json().get("value", []): 43 | cp = i.get("creatorPrincipal", {}) 44 | new_data = { 45 | "External Data Share Id": i.get("id"), 46 | "Paths": [i.get("paths", [])], 47 | "Creater Principal Id": cp.get("id"), 48 | "Creater Principal Name": cp.get("displayName"), 49 | "Creater Principal Type": cp.get("type"), 50 | "Creater Principal UPN": cp.get("userDetails", {}).get("userPrincipalName"), 51 | "Recipient UPN": i.get("recipient", {}).get("userPrincipalName"), 52 | "Status": i.get("status"), 53 | "Expiration Time UTC": i.get("expirationTimeUtc"), 54 | "Workspace Id": i.get("workspaceId"), 55 | "Item Id": i.get("itemId"), 56 | "Invitation URL": i.get("invitationUrl"), 57 | } 58 | 59 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 60 | 61 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 62 | 63 | return df 64 | 65 | 66 | def revoke_external_data_share( 67 | external_data_share_id: UUID, item_id: UUID, workspace: str | UUID 68 | ): 69 | """ 70 | Revokes the specified external data share. Note: This action cannot be undone. 71 | 72 | This is a wrapper function for the following API: `External Data Shares - Revoke External Data Share `_. 73 | 74 | Parameters 75 | ---------- 76 | external_data_share_id : uuid.UUID 77 | The external data share ID. 78 | item_id : uuid.UUID, default=None 79 | The Item ID 80 | workspace : str | uuid.UUID 81 | The Fabric workspace name or id. 82 | """ 83 | (workspace, workspace_id) = _resolve_workspace_name_and_id(workspace) 84 | 85 | _base_api( 86 | request=f"/v1/admin/workspaces/{workspace_id}/items/{item_id}/externalDataShares/{external_data_share_id}/revoke", 87 | method="post", 88 | ) 89 | 90 | print( 91 | f"{icons.green_dot} The '{external_data_share_id}' external data share for the '{item_id}' item within the '{workspace}' workspace has been revoked." 92 | ) 93 | -------------------------------------------------------------------------------- /src/sempy_labs/_ml_models.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _base_api, 6 | delete_item, 7 | _create_dataframe, 8 | create_item, 9 | ) 10 | from uuid import UUID 11 | 12 | 13 | def list_ml_models(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 14 | """ 15 | Shows the ML models within a workspace. 16 | 17 | This is a wrapper function for the following API: `Items - List ML Models `_. 18 | 19 | Parameters 20 | ---------- 21 | workspace : str | uuid.UUID, default=None 22 | The Fabric workspace name or ID. 23 | Defaults to None which resolves to the workspace of the attached lakehouse 24 | or if no lakehouse attached, resolves to the workspace of the notebook. 25 | 26 | Returns 27 | ------- 28 | pandas.DataFrame 29 | A pandas dataframe showing the ML models within a workspace. 30 | """ 31 | 32 | columns = { 33 | "ML Model Name": "string", 34 | "ML Model Id": "string", 35 | "Description": "string", 36 | } 37 | df = _create_dataframe(columns=columns) 38 | 39 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 40 | 41 | responses = _base_api( 42 | request=f"/v1/workspaces/{workspace_id}/mlModels", 43 | status_codes=200, 44 | uses_pagination=True, 45 | ) 46 | 47 | for r in responses: 48 | for v in r.get("value", []): 49 | model_id = v.get("id") 50 | modelName = v.get("displayName") 51 | desc = v.get("description") 52 | 53 | new_data = { 54 | "ML Model Name": modelName, 55 | "ML Model Id": model_id, 56 | "Description": desc, 57 | } 58 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 59 | 60 | return df 61 | 62 | 63 | def create_ml_model( 64 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 65 | ): 66 | """ 67 | Creates a Fabric ML model. 68 | 69 | This is a wrapper function for the following API: `Items - Create ML Model `_. 70 | 71 | Parameters 72 | ---------- 73 | name: str 74 | Name of the ML model. 75 | description : str, default=None 76 | A description of the ML model. 77 | workspace : str | uuid.UUID, default=None 78 | The Fabric workspace name or ID. 79 | Defaults to None which resolves to the workspace of the attached lakehouse 80 | or if no lakehouse attached, resolves to the workspace of the notebook. 81 | """ 82 | 83 | create_item(name=name, description=description, type="MLModel", workspace=workspace) 84 | 85 | 86 | def delete_ml_model(name: str | UUID, workspace: Optional[str | UUID] = None): 87 | """ 88 | Deletes a Fabric ML model. 89 | 90 | This is a wrapper function for the following API: `Items - Delete ML Model `_. 91 | 92 | Parameters 93 | ---------- 94 | name: str | uuid.UUID 95 | Name or ID of the ML model. 96 | workspace : str | uuid.UUID, default=None 97 | The Fabric workspace name or ID. 98 | Defaults to None which resolves to the workspace of the attached lakehouse 99 | or if no lakehouse attached, resolves to the workspace of the notebook. 100 | """ 101 | 102 | delete_item(item=name, type="MLModel", workspace=workspace) 103 | -------------------------------------------------------------------------------- /src/sempy_labs/directlake/_show_unsupported_directlake_objects.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | import pandas as pd 3 | from sempy_labs._helper_functions import ( 4 | format_dax_object_name, 5 | resolve_dataset_name_and_id, 6 | resolve_workspace_name_and_id, 7 | ) 8 | from typing import Optional, Tuple 9 | from sempy._utils._log import log 10 | from uuid import UUID 11 | 12 | 13 | @log 14 | def show_unsupported_direct_lake_objects( 15 | dataset: str | UUID, workspace: Optional[str | UUID] = None 16 | ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: 17 | """ 18 | Returns a list of a semantic model's objects which are not supported by Direct Lake based on 19 | `official documentation `_. 20 | 21 | Parameters 22 | ---------- 23 | dataset : str | uuid.UUID 24 | Name or ID of the semantic model. 25 | workspace : str | uuid.UUID, default=None 26 | The Fabric workspace name or ID. 27 | Defaults to None which resolves to the workspace of the attached lakehouse 28 | or if no lakehouse attached, resolves to the workspace of the notebook. 29 | 30 | Returns 31 | ------- 32 | pandas.DataFrame, pandas.DataFrame, pandas.DataFrame 33 | 3 pandas dataframes showing objects in a semantic model which are not supported by Direct Lake. 34 | """ 35 | 36 | pd.options.mode.chained_assignment = None 37 | 38 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 39 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) 40 | 41 | fabric.refresh_tom_cache(workspace=workspace) 42 | 43 | dfT = fabric.list_tables(dataset=dataset_id, workspace=workspace_id) 44 | dfC = fabric.list_columns(dataset=dataset_id, workspace=workspace_id) 45 | dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id) 46 | 47 | # Calc tables 48 | dfT_filt = dfT[dfT["Type"] == "Calculated Table"] 49 | dfT_filt.rename(columns={"Name": "Table Name"}, inplace=True) 50 | t = dfT_filt[["Table Name", "Type"]] 51 | 52 | # Calc columns 53 | dfC_filt = dfC[(dfC["Type"] == "Calculated") | (dfC["Data Type"] == "Binary")] 54 | c = dfC_filt[["Table Name", "Column Name", "Type", "Data Type", "Source"]] 55 | 56 | # Relationships 57 | dfC["Column Object"] = format_dax_object_name(dfC["Table Name"], dfC["Column Name"]) 58 | dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"]) 59 | dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"]) 60 | merged_from = pd.merge( 61 | dfR, dfC, left_on="From Object", right_on="Column Object", how="left" 62 | ) 63 | merged_to = pd.merge( 64 | dfR, dfC, left_on="To Object", right_on="Column Object", how="left" 65 | ) 66 | 67 | dfR["From Column Data Type"] = merged_from["Data Type"] 68 | dfR["To Column Data Type"] = merged_to["Data Type"] 69 | 70 | dfR_filt = dfR[(dfR["From Column Data Type"] != dfR["To Column Data Type"])] 71 | r = dfR_filt[ 72 | [ 73 | "From Table", 74 | "From Column", 75 | "To Table", 76 | "To Column", 77 | "From Column Data Type", 78 | "To Column Data Type", 79 | ] 80 | ] 81 | 82 | # print('Calculated Tables are not supported...') 83 | # display(t) 84 | # print("Learn more about Direct Lake limitations here: https://learn.microsoft.com/power-bi/enterprise/directlake-overview#known-issues-and-limitations") 85 | # print('Calculated columns are not supported. Columns of binary data type are not supported.') 86 | # display(c) 87 | # print('Columns used for relationship cannot be of data type datetime and they also must be of the same data type.') 88 | # display(r) 89 | 90 | return t, c, r 91 | -------------------------------------------------------------------------------- /src/sempy_labs/_ml_experiments.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _base_api, 6 | delete_item, 7 | _create_dataframe, 8 | create_item, 9 | ) 10 | from uuid import UUID 11 | 12 | 13 | def list_ml_experiments(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 14 | """ 15 | Shows the ML experiments within a workspace. 16 | 17 | This is a wrapper function for the following API: `Items - List ML Experiments `_. 18 | 19 | Parameters 20 | ---------- 21 | workspace : str | uuid.UUID, default=None 22 | The Fabric workspace name or ID. 23 | Defaults to None which resolves to the workspace of the attached lakehouse 24 | or if no lakehouse attached, resolves to the workspace of the notebook. 25 | 26 | Returns 27 | ------- 28 | pandas.DataFrame 29 | A pandas dataframe showing the ML models within a workspace. 30 | """ 31 | 32 | columns = { 33 | "ML Experiment Name": "string", 34 | "ML Experiment Id": "string", 35 | "Description": "string", 36 | } 37 | df = _create_dataframe(columns=columns) 38 | 39 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 40 | 41 | responses = _base_api( 42 | request=f"/v1/workspaces/{workspace_id}/mlExperiments", 43 | status_codes=200, 44 | uses_pagination=True, 45 | ) 46 | 47 | for r in responses: 48 | for v in r.get("value", []): 49 | model_id = v.get("id") 50 | modelName = v.get("displayName") 51 | desc = v.get("description") 52 | 53 | new_data = { 54 | "ML Experiment Name": modelName, 55 | "ML Experiment Id": model_id, 56 | "Description": desc, 57 | } 58 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 59 | 60 | return df 61 | 62 | 63 | def create_ml_experiment( 64 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 65 | ): 66 | """ 67 | Creates a Fabric ML experiment. 68 | 69 | This is a wrapper function for the following API: `Items - Create ML Experiment `_. 70 | 71 | Parameters 72 | ---------- 73 | name: str 74 | Name of the ML experiment. 75 | description : str, default=None 76 | A description of the ML experiment. 77 | workspace : str | uuid.UUID, default=None 78 | The Fabric workspace name or ID. 79 | Defaults to None which resolves to the workspace of the attached lakehouse 80 | or if no lakehouse attached, resolves to the workspace of the notebook. 81 | """ 82 | 83 | create_item( 84 | name=name, description=description, type="MLExperiment", workspace=workspace 85 | ) 86 | 87 | 88 | def delete_ml_experiment(name: str, workspace: Optional[str | UUID] = None): 89 | """ 90 | Deletes a Fabric ML experiment. 91 | 92 | This is a wrapper function for the following API: `Items - Delete ML Experiment `_. 93 | 94 | Parameters 95 | ---------- 96 | name: str 97 | Name of the ML experiment. 98 | workspace : str | uuid.UUID, default=None 99 | The Fabric workspace name or ID. 100 | Defaults to None which resolves to the workspace of the attached lakehouse 101 | or if no lakehouse attached, resolves to the workspace of the notebook. 102 | """ 103 | 104 | delete_item(item=name, type="MLExperiment", workspace=workspace) 105 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /src/sempy_labs/_eventstreams.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _base_api, 6 | delete_item, 7 | _create_dataframe, 8 | create_item, 9 | ) 10 | from uuid import UUID 11 | import sempy_labs._icons as icons 12 | 13 | 14 | def list_eventstreams(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 15 | """ 16 | Shows the eventstreams within a workspace. 17 | 18 | This is a wrapper function for the following API: `Items - List Eventstreams `_. 19 | 20 | Parameters 21 | ---------- 22 | workspace : str | uuid.UUID, default=None 23 | The Fabric workspace name or ID. 24 | Defaults to None which resolves to the workspace of the attached lakehouse 25 | or if no lakehouse attached, resolves to the workspace of the notebook. 26 | 27 | Returns 28 | ------- 29 | pandas.DataFrame 30 | A pandas dataframe showing the eventstreams within a workspace. 31 | """ 32 | 33 | columns = { 34 | "Eventstream Name": "string", 35 | "Eventstream Id": "string", 36 | "Description": "string", 37 | } 38 | df = _create_dataframe(columns=columns) 39 | 40 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 41 | responses = _base_api( 42 | request=f"/v1/workspaces/{workspace_id}/eventstreams", uses_pagination=True 43 | ) 44 | 45 | for r in responses: 46 | for v in r.get("value", []): 47 | new_data = { 48 | "Eventstream Name": v.get("displayName"), 49 | "Eventstream Id": v.get("id"), 50 | "Description": v.get("description"), 51 | } 52 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 53 | 54 | return df 55 | 56 | 57 | def create_eventstream( 58 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 59 | ): 60 | """ 61 | Creates a Fabric eventstream. 62 | 63 | This is a wrapper function for the following API: `Items - Create Eventstream `_. 64 | 65 | Parameters 66 | ---------- 67 | name: str 68 | Name of the eventstream. 69 | description : str, default=None 70 | A description of the environment. 71 | workspace : str | uuid.UUID, default=None 72 | The Fabric workspace name or ID. 73 | Defaults to None which resolves to the workspace of the attached lakehouse 74 | or if no lakehouse attached, resolves to the workspace of the notebook. 75 | """ 76 | 77 | create_item( 78 | name=name, description=description, type="Eventstream", workspace=workspace 79 | ) 80 | 81 | 82 | def delete_eventstream( 83 | eventstream: str | UUID, workspace: Optional[str | UUID] = None, **kwargs 84 | ): 85 | """ 86 | Deletes a Fabric eventstream. 87 | 88 | This is a wrapper function for the following API: `Items - Delete Eventstream `_. 89 | 90 | Parameters 91 | ---------- 92 | eventstream: str | uuid.UUID 93 | Name or ID of the eventstream. 94 | workspace : str | uuid.UUID, default=None 95 | The Fabric workspace name or ID. 96 | Defaults to None which resolves to the workspace of the attached lakehouse 97 | or if no lakehouse attached, resolves to the workspace of the notebook. 98 | """ 99 | 100 | if "name" in kwargs: 101 | eventstream = kwargs["name"] 102 | print( 103 | f"{icons.warning} The 'name' parameter is deprecated. Please use 'eventstream' instead." 104 | ) 105 | 106 | delete_item(item=eventstream, type="Eventstream", workspace=workspace) 107 | -------------------------------------------------------------------------------- /src/sempy_labs/_kql_querysets.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import sempy_labs._icons as icons 3 | from typing import Optional 4 | from sempy_labs._helper_functions import ( 5 | resolve_workspace_name_and_id, 6 | _base_api, 7 | _create_dataframe, 8 | delete_item, 9 | create_item, 10 | ) 11 | from uuid import UUID 12 | 13 | 14 | def list_kql_querysets(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 15 | """ 16 | Shows the KQL querysets within a workspace. 17 | 18 | This is a wrapper function for the following API: `Items - List KQL Querysets `_. 19 | 20 | Parameters 21 | ---------- 22 | workspace : str | uuid.UUID, default=None 23 | The Fabric workspace name or ID. 24 | Defaults to None which resolves to the workspace of the attached lakehouse 25 | or if no lakehouse attached, resolves to the workspace of the notebook. 26 | 27 | Returns 28 | ------- 29 | pandas.DataFrame 30 | A pandas dataframe showing the KQL querysets within a workspace. 31 | """ 32 | 33 | columns = { 34 | "KQL Queryset Name": "string", 35 | "KQL Queryset Id": "string", 36 | "Description": "string", 37 | } 38 | df = _create_dataframe(columns=columns) 39 | 40 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 41 | 42 | responses = _base_api( 43 | request=f"v1/workspaces/{workspace_id}/kqlQuerysets", uses_pagination=True 44 | ) 45 | 46 | for r in responses: 47 | for v in r.get("value", []): 48 | new_data = { 49 | "KQL Queryset Name": v.get("displayName"), 50 | "KQL Queryset Id": v.get("id"), 51 | "Description": v.get("description"), 52 | } 53 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 54 | 55 | return df 56 | 57 | 58 | def create_kql_queryset( 59 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 60 | ): 61 | """ 62 | Creates a KQL queryset. 63 | 64 | This is a wrapper function for the following API: `Items - Create KQL Queryset `_. 65 | 66 | Parameters 67 | ---------- 68 | name: str 69 | Name of the KQL queryset. 70 | description : str, default=None 71 | A description of the environment. 72 | workspace : str | uuid.UUID, default=None 73 | The Fabric workspace name or ID. 74 | Defaults to None which resolves to the workspace of the attached lakehouse 75 | or if no lakehouse attached, resolves to the workspace of the notebook. 76 | """ 77 | 78 | create_item( 79 | name=name, description=description, type="KQLQueryset", workspace=workspace 80 | ) 81 | 82 | 83 | def delete_kql_queryset( 84 | kql_queryset: str | UUID, workspace: Optional[str | UUID] = None, **kwargs 85 | ): 86 | """ 87 | Deletes a KQL queryset. 88 | 89 | This is a wrapper function for the following API: `Items - Delete KQL Queryset `_. 90 | 91 | Parameters 92 | ---------- 93 | kql_queryset: str | uuid.UUID 94 | Name or ID of the KQL queryset. 95 | workspace : str | uuid.UUID, default=None 96 | The Fabric workspace name or ID. 97 | Defaults to None which resolves to the workspace of the attached lakehouse 98 | or if no lakehouse attached, resolves to the workspace of the notebook. 99 | """ 100 | 101 | if "name" in kwargs: 102 | kql_queryset = kwargs["name"] 103 | print( 104 | f"{icons.warning} The 'name' parameter is deprecated. Please use 'kql_queryset' instead." 105 | ) 106 | 107 | delete_item(item=kql_queryset, type="KQLQueryset", workspace=workspace) 108 | -------------------------------------------------------------------------------- /src/sempy_labs/_icons.py: -------------------------------------------------------------------------------- 1 | green_dot = "\U0001f7e2" 2 | yellow_dot = "\U0001f7e1" 3 | red_dot = "\U0001f534" 4 | in_progress = "⌛" 5 | checked = "\u2611" 6 | unchecked = "\u2610" 7 | start_bold = "\033[1m" 8 | end_bold = "\033[0m" 9 | bullet = "\u2022" 10 | warning = "⚠️" 11 | error = "\u274c" 12 | info = "ℹ️" 13 | measure_icon = "\u2211" 14 | table_icon = "\u229e" 15 | column_icon = "\u229f" 16 | model_bpa_name = "ModelBPA" 17 | report_bpa_name = "ReportBPA" 18 | severity_mapping = {warning: "Warning", error: "Error", info: "Info"} 19 | special_characters = ['"', "/", '"', ":", "|", "<", ">", "*", "?", "'", "!"] 20 | 21 | language_map = { 22 | "it-IT": "Italian", 23 | "es-ES": "Spanish", 24 | "he-IL": "Hebrew", 25 | "pt-PT": "Portuguese", 26 | "zh-CN": "Chinese", 27 | "fr-FR": "French", 28 | "da-DK": "Danish", 29 | "cs-CZ": "Czech", 30 | "de-DE": "German", 31 | "el-GR": "Greek", 32 | "fa-IR": "Persian", 33 | "ga-IE": "Irish", 34 | "hi-IN": "Hindi", 35 | "hu-HU": "Hungarian", 36 | "is-IS": "Icelandic", 37 | "ja-JP": "Japanese", 38 | "nl-NL": "Dutch", 39 | "pl-PL": "Polish", 40 | "pt-BR": "Portuguese", 41 | "ru-RU": "Russian", 42 | "te-IN": "Telugu", 43 | "ta-IN": "Tamil", 44 | "th-TH": "Thai", 45 | "zu-ZA": "Zulu", 46 | "am-ET": "Amharic", 47 | "ar-AE": "Arabic", 48 | "sv-SE": "Swedish", 49 | "ko-KR": "Korean", 50 | "id-ID": "Indonesian", 51 | "mt-MT": "Maltese", 52 | "ro-RO": "Romanian", 53 | "sk-SK": "Slovak", 54 | "sl-SL": "Slovenian", 55 | "tr-TR": "Turkish", 56 | "uk-UA": "Ukrainian", 57 | "bg-BG": "Bulgarian", 58 | "ca-ES": "Catalan", 59 | "fi-FI": "Finnish", 60 | } 61 | workspace_roles = ["Admin", "Member", "Viewer", "Contributor"] 62 | principal_types = ["App", "Group", "None", "User"] 63 | azure_api_version = "2023-11-01" 64 | migrate_capacity_suffix = "fsku" 65 | sku_mapping = { 66 | "A1": "F8", 67 | "EM1": "F8", 68 | "A2": "F16", 69 | "EM2": "F16", 70 | "A3": "F32", 71 | "EM3": "F32", 72 | "A4": "F64", 73 | "P1": "F64", 74 | "A5": "F128", 75 | "P2": "F128", 76 | "A6": "F256", 77 | "P3": "F256", 78 | "A7": "F512", 79 | "P4": "F512", 80 | "P5": "F1024", 81 | } 82 | 83 | refresh_type_mapping = { 84 | "full": "full", 85 | "auto": "automatic", 86 | "data": "dataOnly", 87 | "calc": "calculate", 88 | "clear": "clearValues", 89 | "defrag": "defragment", 90 | } 91 | 92 | itemTypes = { 93 | "DataPipeline": "dataPipelines", 94 | "Eventstream": "eventstreams", 95 | "KQLDatabase": "kqlDatabases", 96 | "KQLQueryset": "kqlQuerysets", 97 | "Lakehouse": "lakehouses", 98 | "MLExperiment": "mlExperiments", 99 | "MLModel": "mlModels", 100 | "Notebook": "notebooks", 101 | "Warehouse": "warehouses", 102 | } 103 | default_schema = "dbo" 104 | 105 | data_type_string = "string" 106 | data_type_long = "long" 107 | data_type_timestamp = "timestamp" 108 | data_type_double = "double" 109 | data_type_bool = "bool" 110 | int_format = "int" 111 | pct_format = "pct" 112 | no_format = "" 113 | 114 | bpa_schema = { 115 | "Capacity Name": data_type_string, 116 | "Capacity Id": data_type_string, 117 | "Workspace Name": data_type_string, 118 | "Workspace Id": data_type_string, 119 | "Dataset Name": data_type_string, 120 | "Dataset Id": data_type_string, 121 | "Configured By": data_type_string, 122 | "Rule Name": data_type_string, 123 | "Category": data_type_string, 124 | "Severity": data_type_string, 125 | "Object Type": data_type_string, 126 | "Object Name": data_type_string, 127 | "Description": data_type_string, 128 | "URL": data_type_string, 129 | "RunId": data_type_long, 130 | "Timestamp": data_type_timestamp, 131 | } 132 | 133 | sll_ann_name = "PBI_ProTooling" 134 | sll_prefix = "SLL_" 135 | sll_tags = [] 136 | base_cols = ["EventClass", "EventSubclass", "CurrentTime", "TextData"] 137 | end_cols = base_cols + [ 138 | "StartTime", 139 | "EndTime", 140 | "Duration", 141 | "CpuTime", 142 | "Success", 143 | "IntegerData", 144 | "ObjectID", 145 | ] 146 | refresh_event_schema = { 147 | "JobGraph": base_cols, 148 | "ProgressReportEnd": end_cols, 149 | } 150 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/__init__.py: -------------------------------------------------------------------------------- 1 | from sempy_labs.admin._users import ( 2 | list_access_entities, 3 | list_user_subscriptions, 4 | ) 5 | from sempy_labs.admin._workspaces import ( 6 | add_user_to_workspace, 7 | delete_user_from_workspace, 8 | restore_deleted_workspace, 9 | ) 10 | from sempy_labs.admin._artifacts import ( 11 | list_unused_artifacts, 12 | ) 13 | from sempy_labs.admin._shared import ( 14 | list_widely_shared_artifacts, 15 | ) 16 | from sempy_labs.admin._datasets import ( 17 | list_datasets, 18 | list_dataset_users, 19 | ) 20 | from sempy_labs.admin._apps import ( 21 | list_apps, 22 | list_app_users, 23 | ) 24 | from sempy_labs.admin._reports import ( 25 | list_reports, 26 | list_report_users, 27 | list_report_subscriptions, 28 | ) 29 | from sempy_labs.admin._activities import ( 30 | list_activity_events, 31 | ) 32 | from sempy_labs.admin._scanner import ( 33 | scan_workspaces, 34 | ) 35 | from sempy_labs.admin._capacities import ( 36 | patch_capacity, 37 | list_capacities, 38 | get_capacity_assignment_status, 39 | get_capacity_state, 40 | list_capacity_users, 41 | ) 42 | from sempy_labs.admin._tenant import ( 43 | list_tenant_settings, 44 | delete_capacity_tenant_setting_override, 45 | update_tenant_setting, 46 | update_capacity_tenant_setting_override, 47 | list_workspaces_tenant_settings_overrides, 48 | list_capacity_tenant_settings_overrides, 49 | list_capacities_delegated_tenant_settings, 50 | list_domain_tenant_settings_overrides, 51 | ) 52 | from sempy_labs.admin._basic_functions import ( 53 | assign_workspaces_to_capacity, 54 | unassign_workspaces_from_capacity, 55 | list_workspaces, 56 | list_workspace_access_details, 57 | list_modified_workspaces, 58 | list_workspace_users, 59 | ) 60 | from sempy_labs.admin._domains import ( 61 | list_domains, 62 | list_domain_workspaces, 63 | assign_domain_workspaces, 64 | assign_domain_workspaces_by_capacities, 65 | create_domain, 66 | update_domain, 67 | delete_domain, 68 | resolve_domain_id, 69 | unassign_domain_workspaces, 70 | unassign_all_domain_workspaces, 71 | ) 72 | from sempy_labs.admin._items import ( 73 | list_item_access_details, 74 | list_items, 75 | ) 76 | from sempy_labs.admin._external_data_share import ( 77 | list_external_data_shares, 78 | revoke_external_data_share, 79 | ) 80 | from sempy_labs.admin._git import ( 81 | list_git_connections, 82 | ) 83 | 84 | __all__ = [ 85 | "list_items", 86 | "list_workspace_access_details", 87 | "list_access_entities", 88 | "list_item_access_details", 89 | "list_datasets", 90 | "list_workspaces", 91 | "assign_workspaces_to_capacity", 92 | "list_capacities", 93 | "list_tenant_settings", 94 | "list_domains", 95 | "list_domain_workspaces", 96 | "assign_domain_workspaces", 97 | "assign_domain_workspaces_by_capacities", 98 | "create_domain", 99 | "update_domain", 100 | "delete_domain", 101 | "resolve_domain_id", 102 | "unassign_domain_workspaces", 103 | "unassign_all_domain_workspaces", 104 | "list_capacities_delegated_tenant_settings", 105 | "unassign_workspaces_from_capacity", 106 | "list_external_data_shares", 107 | "revoke_external_data_share", 108 | "list_activity_events", 109 | "list_modified_workspaces", 110 | "list_git_connections", 111 | "list_reports", 112 | "get_capacity_assignment_status", 113 | "scan_workspaces", 114 | "get_capacity_state", 115 | "list_apps", 116 | "list_app_users", 117 | "list_dataset_users", 118 | "list_report_users", 119 | "patch_capacity", 120 | "list_workspace_users", 121 | "list_widely_shared_artifacts", 122 | "delete_capacity_tenant_setting_override", 123 | "update_tenant_setting", 124 | "update_capacity_tenant_setting_override", 125 | "list_workspaces_tenant_settings_overrides", 126 | "list_capacity_tenant_settings_overrides", 127 | "list_capacities_delegated_tenant_settings", 128 | "list_domain_tenant_settings_overrides", 129 | "list_unused_artifacts", 130 | "add_user_to_workspace", 131 | "delete_user_from_workspace", 132 | "restore_deleted_workspace", 133 | "list_capacity_users", 134 | "list_user_subscriptions", 135 | "list_report_subscriptions", 136 | ] 137 | -------------------------------------------------------------------------------- /src/sempy_labs/migration/_direct_lake_to_import.py: -------------------------------------------------------------------------------- 1 | import sempy 2 | from uuid import UUID 3 | import sempy_labs._icons as icons 4 | from typing import Optional 5 | 6 | 7 | def migrate_direct_lake_to_import( 8 | dataset: str | UUID, 9 | workspace: Optional[str | UUID] = None, 10 | mode: str = "import", 11 | ): 12 | """ 13 | Migrates a semantic model or specific table(s) from a Direct Lake mode to import or DirectQuery mode. After running this function, you must go to the semantic model settings and update the cloud connection. Not doing so will result in an inablity to refresh/use the semantic model. 14 | 15 | Parameters 16 | ---------- 17 | dataset : str | uuid.UUID 18 | Name or ID of the semantic model. 19 | workspace : str | uuid.UUID, default=None 20 | The Fabric workspace name or ID. 21 | Defaults to None which resolves to the workspace of the attached lakehouse 22 | or if no lakehouse attached, resolves to the workspace of the notebook. 23 | mode : str, default="import" 24 | The mode to migrate to. Can be either "import" or "directquery". 25 | """ 26 | 27 | sempy.fabric._client._utils._init_analysis_services() 28 | import Microsoft.AnalysisServices.Tabular as TOM 29 | from sempy_labs.tom import connect_semantic_model 30 | 31 | modes = { 32 | "import": "Import", 33 | "directquery": "DirectQuery", 34 | "dq": "DirectQuery", 35 | } 36 | 37 | # Resolve mode 38 | mode = mode.lower() 39 | actual_mode = modes.get(mode) 40 | if actual_mode is None: 41 | raise ValueError(f"Invalid mode '{mode}'. Must be one of {list(modes.keys())}.") 42 | 43 | # if isinstance(tables, str): 44 | # tables = [tables] 45 | 46 | with connect_semantic_model( 47 | dataset=dataset, workspace=workspace, readonly=False 48 | ) as tom: 49 | 50 | if not tom.is_direct_lake(): 51 | print( 52 | f"{icons.warning} The '{dataset}' semantic model within the '{workspace}' workspace is not in Direct Lake mode." 53 | ) 54 | return 55 | 56 | # if tables is None: 57 | table_list = [t for t in tom.model.Tables] 58 | # else: 59 | # table_list = [t for t in tom.model.Tables if t.Name in tables] 60 | # if not table_list: 61 | # raise ValueError(f"{icons.red_dot} No tables found to migrate.") 62 | 63 | for t in table_list: 64 | table_name = t.Name 65 | if t.Partitions.Count == 1 and all( 66 | p.Mode == TOM.ModeType.DirectLake for p in t.Partitions 67 | ): 68 | p = next(p for p in t.Partitions) 69 | partition_name = p.Name 70 | entity_name = p.Source.EntityName 71 | schema_name = p.Source.SchemaName or "dbo" 72 | # Rename Direct Lake partition 73 | t.Partitions[partition_name].Name = f"{partition_name}_remove" 74 | 75 | # Generate M expression for import partition 76 | expression = f"""let\n\tSource = DatabaseQuery,\n\tData = Source{{[Schema="{schema_name}",Item="{entity_name}"]}}[Data]\nin\n\tData""" 77 | 78 | # Generate M partition 79 | tom.add_m_partition( 80 | table_name=table_name, 81 | partition_name=partition_name, 82 | expression=expression, 83 | mode=actual_mode, 84 | ) 85 | # Remove Direct Lake partition 86 | tom.remove_object(object=p) 87 | # if tables is not None: 88 | # print( 89 | # f"{icons.green_dot} The '{table_name}' table has been migrated to '{actual_mode}' mode." 90 | # ) 91 | 92 | tom.model.Model.DefaultMode = TOM.ModeType.Import 93 | # if tables is None: 94 | print( 95 | f"{icons.green_dot} All tables which were in Direct Lake mode have been migrated to '{actual_mode}' mode." 96 | ) 97 | 98 | # Check 99 | # for t in tom.model.Tables: 100 | # if t.Partitions.Count == 1 and all(p.Mode == TOM.ModeType.Import for p in t.Partitions) and t.CalculationGroup is None: 101 | # p = next(p for p in t.Partitions) 102 | # print(p.Name) 103 | # print(p.Source.Expression) 104 | -------------------------------------------------------------------------------- /notebooks/Delta Analyzer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c27dfd1-4fe0-4a97-92e6-ddf78889aa93", 6 | "metadata": { 7 | "nteract": { 8 | "transient": { 9 | "deleting": false 10 | } 11 | } 12 | }, 13 | "source": [ 14 | "### Install the latest .whl package\n", 15 | "\n", 16 | "Check [here](https://pypi.org/project/semantic-link-labs/) to see the latest version." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "id": "d5cae9db-cef9-48a8-a351-9c5fcc99645c", 23 | "metadata": { 24 | "jupyter": { 25 | "outputs_hidden": true, 26 | "source_hidden": false 27 | }, 28 | "nteract": { 29 | "transient": { 30 | "deleting": false 31 | } 32 | } 33 | }, 34 | "outputs": [], 35 | "source": [ 36 | "%pip install semantic-link-labs" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "id": "b195eae8", 42 | "metadata": {}, 43 | "source": [ 44 | "### Import the library" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "1344e286", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "import sempy_labs as labs\n", 55 | "table_name = 'MyTable' # Enter the name of the delta table\n", 56 | "lakehouse = 'MyLakehouse' # Enter the name or ID of the lakehouse in which the delta table resides\n", 57 | "workspace = 'MyWorkspace' # Enter the name or ID of the workspace in which the lakehouse resides" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "id": "baa24264", 63 | "metadata": {}, 64 | "source": [ 65 | "### Run Delta Analyzer for a given table in the lakehouse attached to your notebook" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "id": "0a1903c0", 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "x = labs.delta_analyzer(\n", 76 | " table_name=table_name,\n", 77 | " lakehouse=lakehouse,\n", 78 | " workspace=workspace\n", 79 | ")" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "id": "285986d1", 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "for name, df in x.items():\n", 90 | " print(name)\n", 91 | " display(df)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "id": "e1d118dd", 97 | "metadata": {}, 98 | "source": [ 99 | "### Get actual (not approximate) distinct counts" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": null, 105 | "id": "d1ecf538", 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "x = labs.delta_analyzer(\n", 110 | " table_name=table_name,\n", 111 | " approx_distinct_count=False,\n", 112 | " lakehouse=lakehouse,\n", 113 | " workspace=workspace\n", 114 | " )" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "id": "60aa6592", 120 | "metadata": {}, 121 | "source": [ 122 | "### Export the results of Delta Analyzer to a set of delta tables in your lakehouse\n", 123 | "The export always appends results to the delta tables" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "id": "1d4235ff", 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "x = labs.delta_analyzer(\n", 134 | " table_name=table_name,\n", 135 | " lakehouse=lakehouse,\n", 136 | " workspace=workspace,\n", 137 | " export=True\n", 138 | " )" 139 | ] 140 | } 141 | ], 142 | "metadata": { 143 | "kernel_info": { 144 | "name": "synapse_pyspark" 145 | }, 146 | "kernelspec": { 147 | "display_name": "Synapse PySpark", 148 | "language": "Python", 149 | "name": "synapse_pyspark" 150 | }, 151 | "language_info": { 152 | "name": "python" 153 | }, 154 | "microsoft": { 155 | "language": "python" 156 | }, 157 | "nteract": { 158 | "version": "nteract-front-end@1.0.0" 159 | }, 160 | "spark_compute": { 161 | "compute_id": "/trident/default" 162 | }, 163 | "synapse_widget": { 164 | "state": {}, 165 | "version": "0.1" 166 | }, 167 | "widgets": {} 168 | }, 169 | "nbformat": 4, 170 | "nbformat_minor": 5 171 | } 172 | -------------------------------------------------------------------------------- /src/sempy_labs/_mounted_data_factories.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import json 3 | from typing import Optional 4 | from sempy_labs._helper_functions import ( 5 | resolve_workspace_name_and_id, 6 | _base_api, 7 | _create_dataframe, 8 | _update_dataframe_datatypes, 9 | resolve_item_id, 10 | _decode_b64, 11 | delete_item, 12 | get_item_definition, 13 | ) 14 | 15 | from uuid import UUID 16 | 17 | 18 | def list_mounted_data_factories( 19 | workspace: Optional[str | UUID] = None, 20 | ) -> pd.DataFrame: 21 | """ 22 | Shows a list of mounted data factories from the specified workspace. 23 | 24 | This is a wrapper function for the following API: `Items - List Mounted Data Factories `_. 25 | 26 | Parameters 27 | ---------- 28 | workspace : str | uuid.UUID, default=None 29 | The Fabric workspace name or ID. 30 | Defaults to None which resolves to the workspace of the attached lakehouse 31 | or if no lakehouse attached, resolves to the workspace of the notebook. 32 | 33 | Returns 34 | ------- 35 | pandas.DataFrame 36 | A pandas dataframe showing a list of mounted data factories from the specified workspace. 37 | """ 38 | 39 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 40 | 41 | columns = { 42 | "Mounted Data Factory Name": "str", 43 | "Mounted Data Factory Id": "str", 44 | "Description": "str", 45 | } 46 | 47 | df = _create_dataframe(columns=columns) 48 | responses = _base_api( 49 | request=f"/v1/workspaces/{workspace_id}/mountedDataFactories", 50 | uses_pagination=True, 51 | ) 52 | 53 | for r in responses: 54 | for v in r.get("value", []): 55 | new_data = { 56 | "Mounted Data Factory Name": v.get("displayName"), 57 | "Mounted Data Factory Id": v.get("id"), 58 | "Description": v.get("description"), 59 | } 60 | 61 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 62 | 63 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 64 | 65 | return df 66 | 67 | 68 | def get_mounted_data_factory_definition( 69 | mounted_data_factory: str | UUID, workspace: Optional[str | UUID] = None 70 | ) -> dict: 71 | """ 72 | Returns the specified MountedDataFactory public definition. 73 | 74 | This is a wrapper function for the following API: `Items - Get Mounted Data Factory Definition `_. 75 | 76 | Parameters 77 | ---------- 78 | mounted_data_factory : str | uuid.UUID 79 | The name or ID of the mounted data factory. 80 | workspace : str | uuid.UUID, default=None 81 | The Fabric workspace name or ID. 82 | Defaults to None which resolves to the workspace of the attached lakehouse 83 | or if no lakehouse attached, resolves to the workspace of the notebook. 84 | 85 | Returns 86 | ------- 87 | dict 88 | The 'mountedDataFactory-content.json' file from the mounted data factory definition. 89 | """ 90 | 91 | return get_item_definition( 92 | item=mounted_data_factory, 93 | type="MountedDataFactory", 94 | workspace=workspace, 95 | return_dataframe=False, 96 | ) 97 | 98 | 99 | def delete_mounted_data_factory( 100 | mounted_data_factory: str | UUID, workspace: Optional[str | UUID] 101 | ): 102 | """ 103 | Deletes the specified mounted data factory. 104 | 105 | This is a wrapper function for the following API: `Items - Delete Mounted Data Factory `_. 106 | 107 | Parameters 108 | ---------- 109 | mounted_data_factory : str | uuid.UUID 110 | The name or ID of the mounted data factory. 111 | workspace : str | uuid.UUID, default=None 112 | The Fabric workspace name or ID. 113 | Defaults to None which resolves to the workspace of the attached lakehouse 114 | or if no lakehouse attached, resolves to the workspace of the notebook. 115 | """ 116 | 117 | delete_item( 118 | item=mounted_data_factory, type="MountedDataFactory", workspace=workspace 119 | ) 120 | -------------------------------------------------------------------------------- /notebooks/Semantic Model Management.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c27dfd1-4fe0-4a97-92e6-ddf78889aa93", 6 | "metadata": { 7 | "nteract": { 8 | "transient": { 9 | "deleting": false 10 | } 11 | } 12 | }, 13 | "source": [ 14 | "### Install the latest .whl package\n", 15 | "\n", 16 | "Check [here](https://pypi.org/project/semantic-link-labs/) to see the latest version." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "id": "d5cae9db-cef9-48a8-a351-9c5fcc99645c", 23 | "metadata": { 24 | "jupyter": { 25 | "outputs_hidden": true, 26 | "source_hidden": false 27 | }, 28 | "nteract": { 29 | "transient": { 30 | "deleting": false 31 | } 32 | } 33 | }, 34 | "outputs": [], 35 | "source": [ 36 | "%pip install semantic-link-labs" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "id": "b195eae8", 42 | "metadata": {}, 43 | "source": [ 44 | "### Import the library and necessary packages" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "1344e286", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "import sempy_labs as labs\n", 55 | "source_dataset = '' # Name of the semantic model to backup\n", 56 | "target_dataset = '' # Name of the semantic model to restore\n", 57 | "source_workspace = '' # Name of the workspace in which the semantic model resides\n", 58 | "target_workspace = '' # Destination workspace of the semantic model\n", 59 | "source_file_path = '' # Name/path of the backup file to create\n", 60 | "target_file_path = '' # Name/path of the backup file to be copied to the target workspace\n", 61 | "storage_account = '' # Name of the ADLS Gen2 storage account associated with both source & target workspaces" 62 | ] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "id": "d4f5356a", 67 | "metadata": {}, 68 | "source": [ 69 | "#### Prerequisites\n", 70 | "* [Create an ADLS Gen2 storage account (in the Azure Portal)](https://learn.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal)\n", 71 | "* Assign the ADLS Gen2 storage account to both source and target workspaces\n", 72 | " * Navigate to your workspace.\n", 73 | " * Select 'Workspace settings'.\n", 74 | " * Select 'Azure connections'.\n", 75 | " * Within 'Azure Data Lake Gen2 Storage' click 'Configure'.\n", 76 | " * Enter your Subscription, Resource Group and Storage Account.\n", 77 | " * Click 'Save'.\n" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "id": "55e5ca67", 83 | "metadata": {}, 84 | "source": [ 85 | "### Backup, copy and restore a semantic model to a new workspace" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "5a985c1f", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "labs.backup_semantic_model(\n", 96 | " dataset=source_dataset,\n", 97 | " file_path=source_file_path,\n", 98 | " workspace=source_workspace,\n", 99 | ")\n", 100 | "labs.copy_semantic_model_backup_file(\n", 101 | " source_workspace=source_workspace,\n", 102 | " target_workspace=target_workspace,\n", 103 | " source_file_name=source_file_path,\n", 104 | " target_file_name=target_file_path,\n", 105 | " storage_account=storage_account,\n", 106 | ")\n", 107 | "labs.restore_semantic_model(\n", 108 | " dataset=target_dataset,\n", 109 | " file_path=target_file_path,\n", 110 | " workspace=target_workspace,\n", 111 | ")" 112 | ] 113 | } 114 | ], 115 | "metadata": { 116 | "kernel_info": { 117 | "name": "synapse_pyspark" 118 | }, 119 | "kernelspec": { 120 | "display_name": "Synapse PySpark", 121 | "language": "Python", 122 | "name": "synapse_pyspark" 123 | }, 124 | "language_info": { 125 | "name": "python" 126 | }, 127 | "microsoft": { 128 | "language": "python" 129 | }, 130 | "nteract": { 131 | "version": "nteract-front-end@1.0.0" 132 | }, 133 | "spark_compute": { 134 | "compute_id": "/trident/default" 135 | }, 136 | "synapse_widget": { 137 | "state": {}, 138 | "version": "0.1" 139 | }, 140 | "widgets": {} 141 | }, 142 | "nbformat": 4, 143 | "nbformat_minor": 5 144 | } 145 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_report_list_functions.py: -------------------------------------------------------------------------------- 1 | import sempy.fabric as fabric 2 | from typing import Optional 3 | import pandas as pd 4 | from sempy_labs._helper_functions import ( 5 | format_dax_object_name, 6 | resolve_workspace_name_and_id, 7 | resolve_dataset_name_and_id, 8 | ) 9 | from sempy_labs.report._reportwrapper import ReportWrapper 10 | from sempy_labs._list_functions import list_reports_using_semantic_model 11 | from uuid import UUID 12 | 13 | 14 | def list_unused_objects_in_reports( 15 | dataset: str | UUID, workspace: Optional[str | UUID] = None 16 | ) -> pd.DataFrame: 17 | """ 18 | Shows a list of all columns in the semantic model which are not used in any related Power BI reports (including dependencies). 19 | Note: As with all functions which rely on the ReportWrapper, this function requires the report to be in the 'PBIR' format. 20 | 21 | Parameters 22 | ---------- 23 | dataset : str | uuid.UUID 24 | Name or ID of the semantic model. 25 | workspace : str | uuid.UUID, default=None 26 | The Fabric workspace name or ID. 27 | Defaults to None which resolves to the workspace of the attached lakehouse 28 | or if no lakehouse attached, resolves to the workspace of the notebook. 29 | 30 | Returns 31 | ------- 32 | pandas.DataFrame 33 | A pandas dataframe showing a list of all columns in the semantic model which are not used in any related Power BI reports (including dependencies). 34 | """ 35 | 36 | # TODO: what about relationships/RLS? 37 | 38 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 39 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) 40 | 41 | fabric.refresh_tom_cache(workspace=workspace) 42 | 43 | dfR = _list_all_report_semantic_model_objects( 44 | dataset=dataset_id, workspace=workspace_id 45 | ) 46 | dfR_filt = ( 47 | dfR[dfR["Object Type"] == "Column"][["Table Name", "Object Name"]] 48 | .drop_duplicates() 49 | .reset_index(drop=True) 50 | ) 51 | dfR_filt["Column Object"] = format_dax_object_name( 52 | dfR_filt["Table Name"], dfR_filt["Object Name"] 53 | ) 54 | 55 | dfC = fabric.list_columns(dataset=dataset_id, workspace=workspace_id) 56 | dfC["Column Object"] = format_dax_object_name(dfC["Table Name"], dfC["Column Name"]) 57 | 58 | df = dfC[~(dfC["Column Object"].isin(dfR_filt["Column Object"].values))] 59 | df = df.drop("Column Object", axis=1) 60 | 61 | return df 62 | 63 | 64 | def _list_all_report_semantic_model_objects( 65 | dataset: str | UUID, workspace: Optional[str | UUID] = None 66 | ) -> pd.DataFrame: 67 | """ 68 | Shows a unique list of all semantic model objects (columns, measures, hierarchies) which are used in all reports which leverage the semantic model. 69 | Note: As with all functions which rely on the ReportWrapper, this function requires the report to be in the 'PBIR' format. 70 | 71 | Parameters 72 | ---------- 73 | dataset : str | uuid.UUID 74 | Name or ID of the semantic model. 75 | workspace : str | uuid.UUID, default=None 76 | The Fabric workspace name or ID. 77 | Defaults to None which resolves to the workspace of the attached lakehouse 78 | or if no lakehouse attached, resolves to the workspace of the notebook. 79 | 80 | Returns 81 | ------- 82 | pandas.DataFrame 83 | A pandas dataframe. 84 | """ 85 | 86 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 87 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) 88 | 89 | dfR = list_reports_using_semantic_model(dataset=dataset_id, workspace=workspace_id) 90 | dfs = [] 91 | 92 | for _, r in dfR.iterrows(): 93 | report_name = r["Report Name"] 94 | report_workspace = r["Report Workspace Name"] 95 | 96 | rpt = ReportWrapper(report=report_name, workspace=report_workspace) 97 | 98 | new_data = rpt._list_all_semantic_model_objects() 99 | new_data["Report Name"] = report_name 100 | new_data["Report Workspace"] = report_workspace 101 | dfs.append(new_data) 102 | 103 | df = pd.concat(dfs, ignore_index=True) 104 | 105 | colName = "Report Name" 106 | df.insert(2, colName, df.pop(colName)) 107 | colName = "Report Workspace" 108 | df.insert(3, colName, df.pop(colName)) 109 | 110 | return df 111 | -------------------------------------------------------------------------------- /notebooks/Semantic Model Refresh.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"markdown","id":"5c27dfd1-4fe0-4a97-92e6-ddf78889aa93","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Install the latest .whl package\n","\n","Check [here](https://pypi.org/project/semantic-link-labs/) to see the latest version."]},{"cell_type":"code","execution_count":null,"id":"d5cae9db-cef9-48a8-a351-9c5fcc99645c","metadata":{"jupyter":{"outputs_hidden":true,"source_hidden":false},"nteract":{"transient":{"deleting":false}}},"outputs":[],"source":["%pip install semantic-link-labs"]},{"cell_type":"markdown","id":"b195eae8","metadata":{},"source":["### Import the library and set the initial parameters"]},{"cell_type":"code","execution_count":null,"id":"1344e286","metadata":{},"outputs":[],"source":["import sempy.fabric as fabric\n","import sempy_labs as labs\n","dataset = '' # Enter your dataset name\n","workspace = None # Enter your workspace name (if set to None it will use the workspace in which the notebook is running)"]},{"cell_type":"markdown","id":"5a3fe6e8-b8aa-4447-812b-7931831e07fe","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Refresh a semantic model"]},{"cell_type":"code","execution_count":null,"id":"9e349954","metadata":{},"outputs":[],"source":["labs.refresh_semantic_model(dataset=dataset, workspace=workspace)"]},{"cell_type":"markdown","id":"65db8187","metadata":{},"source":["#### Visualize the refresh of a semantic model\n","The resulting dataframe returns the trace logs capturing the details of the refresh operation"]},{"cell_type":"code","execution_count":null,"id":"24de391e","metadata":{},"outputs":[],"source":["df = labs.refresh_semantic_model(dataset=dataset, workspace=workspace, visualize=True)"]},{"cell_type":"markdown","id":"113b04a7","metadata":{},"source":["#### Refresh specific tables"]},{"cell_type":"code","execution_count":null,"id":"b4e1296b","metadata":{},"outputs":[],"source":["labs.refresh_semantic_model(dataset=dataset, workspace=workspace, tables=['Sales', 'Geography'])"]},{"cell_type":"markdown","id":"7f94b13a","metadata":{},"source":["#### Refresh specific partitions"]},{"cell_type":"code","execution_count":null,"id":"17565d35","metadata":{},"outputs":[],"source":["labs.refresh_semantic_model(dataset=dataset, workspace=workspace, partitions=[\"'Sales'[Sales FY20]\", \"'Sales'[Sales FY21]\"])"]},{"cell_type":"markdown","id":"aab5ca7c","metadata":{},"source":["#### Refresh a combination of tables and partitions"]},{"cell_type":"code","execution_count":null,"id":"e5818bd1","metadata":{},"outputs":[],"source":["labs.refresh_semantic_model(dataset=dataset, workspace=workspace, tables=['Geography', 'Calendar'], partitions=[\"'Sales'[Sales FY20]\", \"'Sales'[Sales FY21]\"])"]},{"cell_type":"markdown","id":"7f7074ea","metadata":{},"source":["#### Clear the values of a table"]},{"cell_type":"code","execution_count":null,"id":"3b1eb772","metadata":{},"outputs":[],"source":["labs.refresh_semantic_model(dataset=dataset, workspace=workspace, refresh_type='clearValues')"]},{"cell_type":"markdown","id":"29afede1","metadata":{},"source":["### View semantic model refreshes"]},{"cell_type":"code","execution_count":null,"id":"95c52cc0","metadata":{},"outputs":[],"source":["fabric.list_refresh_requests(dataset=dataset, workspace=workspace)"]},{"cell_type":"markdown","id":"fa7c525c","metadata":{},"source":["### Cancel a semantic model refresh"]},{"cell_type":"code","execution_count":null,"id":"5bb6f79f","metadata":{},"outputs":[],"source":["labs.cancel_dataset_refresh(dataset=dataset, workspace=workspace)"]},{"cell_type":"code","execution_count":null,"id":"acd34900","metadata":{},"outputs":[],"source":["# Specify the request_id based on a value from list_refresh_requests\n","labs.cancel_dataset_refresh(dataset=dataset, workspace=workspace, request_id='')"]},{"cell_type":"markdown","id":"26300103","metadata":{},"source":["### View a semantic model's refresh history"]},{"cell_type":"code","execution_count":null,"id":"60cec3f8","metadata":{},"outputs":[],"source":["labs.get_semantic_model_refresh_history(dataset=dataset, workspace=workspace)"]},{"cell_type":"markdown","id":"538d5f1e","metadata":{},"source":["### View details of a specific semantic model refresh"]},{"cell_type":"code","execution_count":null,"id":"c043f9bc","metadata":{},"outputs":[],"source":["labs.get_semantic_model_refresh_history(dataset=dataset, workspace=workspace, request_id='')"]}],"metadata":{"kernel_info":{"name":"synapse_pyspark"},"kernelspec":{"display_name":"Synapse PySpark","language":"Python","name":"synapse_pyspark"},"language_info":{"name":"python"},"microsoft":{"language":"python"},"nteract":{"version":"nteract-front-end@1.0.0"},"spark_compute":{"compute_id":"/trident/default"},"synapse_widget":{"state":{},"version":"0.1"},"widgets":{}},"nbformat":4,"nbformat_minor":5} 2 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_apps.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | _build_url, 5 | _base_api, 6 | _create_dataframe, 7 | _update_dataframe_datatypes, 8 | _is_valid_uuid, 9 | ) 10 | from uuid import UUID 11 | import sempy_labs._icons as icons 12 | 13 | 14 | def list_apps( 15 | top: Optional[int] = 1000, 16 | skip: Optional[int] = None, 17 | ) -> pd.DataFrame: 18 | """ 19 | Shows a list of apps in the organization. 20 | 21 | This is a wrapper function for the following API: `Admin - Apps GetAppsAsAdmin `_. 22 | 23 | Service Principal Authentication is supported (see `here `_ for examples). 24 | 25 | Parameters 26 | ---------- 27 | top : int, default=1000 28 | Returns only the first n results. 29 | skip : int, default=None 30 | Skips the first n results. 31 | 32 | Returns 33 | ------- 34 | pandas.DataFrame 35 | A pandas dataframe showing a list of apps in the organization. 36 | """ 37 | 38 | columns = { 39 | "App Name": "string", 40 | "App Id": "string", 41 | "Description": "string", 42 | "Published By": "string", 43 | "Last Update": "datetime_coerce", 44 | } 45 | 46 | df = _create_dataframe(columns=columns) 47 | 48 | params = {} 49 | url = "/v1.0/myorg/admin/apps" 50 | 51 | params["$top"] = top 52 | 53 | if skip is not None: 54 | params["$skip"] = skip 55 | 56 | url = _build_url(url, params) 57 | response = _base_api(request=url, client="fabric_sp") 58 | 59 | rows = [] 60 | for v in response.json().get("value", []): 61 | rows.append( 62 | { 63 | "App Name": v.get("name"), 64 | "App Id": v.get("id"), 65 | "Description": v.get("description"), 66 | "Published By": v.get("publishedBy"), 67 | "Last Update": v.get("lastUpdate"), 68 | } 69 | ) 70 | 71 | if rows: 72 | df = pd.DataFrame(rows, columns=list(columns.keys())) 73 | 74 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 75 | 76 | return df 77 | 78 | 79 | def _resolve_app_id(app: str | UUID) -> str: 80 | if _is_valid_uuid(app): 81 | return app 82 | else: 83 | df = list_apps() 84 | df_filt = df[df["App Name"] == app] 85 | if df_filt.empty: 86 | raise ValueError(f"{icons.red_dot} The '{app}' app does not exist.") 87 | return df_filt["App Id"].iloc[0] 88 | 89 | 90 | def list_app_users(app: str | UUID) -> pd.DataFrame: 91 | """ 92 | Shows a list of users that have access to the specified app. 93 | 94 | This is a wrapper function for the following API: `Admin - Apps GetAppUsersAsAdmin `_. 95 | 96 | Service Principal Authentication is supported (see `here `_ for examples). 97 | 98 | Parameters 99 | ---------- 100 | app : str | uuid.UUID 101 | The name or ID of the app. 102 | 103 | Returns 104 | ------- 105 | pandas.DataFrame 106 | A pandas dataframe showing a list of users that have access to the specified app. 107 | """ 108 | 109 | app_id = _resolve_app_id(app) 110 | 111 | columns = { 112 | "User Name": "string", 113 | "Email Address": "string", 114 | "App User Access Right": "string", 115 | "Identifier": "string", 116 | "Graph Id": "string", 117 | "Principal Type": "string", 118 | } 119 | 120 | df = _create_dataframe(columns=columns) 121 | 122 | url = f"/v1.0/myorg/admin/apps/{app_id}/users" 123 | response = _base_api(request=url, client="fabric_sp") 124 | 125 | rows = [] 126 | for v in response.json().get("value", []): 127 | rows.append( 128 | { 129 | "User Name": v.get("displayName"), 130 | "Email Address": v.get("emailAddress"), 131 | "App User Access Right": v.get("appUserAccessRight"), 132 | "Identifier": v.get("identifier"), 133 | "Graph Id": v.get("graphId"), 134 | "Principal Type": v.get("principalType"), 135 | } 136 | ) 137 | 138 | if rows: 139 | df = pd.DataFrame(rows, columns=list(columns.keys())) 140 | 141 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 142 | 143 | return df 144 | -------------------------------------------------------------------------------- /src/sempy_labs/_kql_databases.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _base_api, 6 | _create_dataframe, 7 | delete_item, 8 | create_item, 9 | ) 10 | from uuid import UUID 11 | import sempy_labs._icons as icons 12 | 13 | 14 | def list_kql_databases(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 15 | """ 16 | Shows the KQL databases within a workspace. 17 | 18 | This is a wrapper function for the following API: `Items - List KQL Databases `_. 19 | 20 | Service Principal Authentication is supported (see `here `_ for examples). 21 | 22 | Parameters 23 | ---------- 24 | workspace : str | uuid.UUID, default=None 25 | The Fabric workspace name or ID. 26 | Defaults to None which resolves to the workspace of the attached lakehouse 27 | or if no lakehouse attached, resolves to the workspace of the notebook. 28 | 29 | Returns 30 | ------- 31 | pandas.DataFrame 32 | A pandas dataframe showing the KQL databases within a workspace. 33 | """ 34 | 35 | columns = { 36 | "KQL Database Name": "string", 37 | "KQL Database Id": "string", 38 | "Description": "string", 39 | "Parent Eventhouse Item Id": "string", 40 | "Query Service URI": "string", 41 | "Ingestion Service URI": "string", 42 | "Database Type": "string", 43 | } 44 | df = _create_dataframe(columns=columns) 45 | 46 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 47 | 48 | responses = _base_api( 49 | request=f"v1/workspaces/{workspace_id}/kqlDatabases", 50 | uses_pagination=True, 51 | client="fabric_sp", 52 | ) 53 | 54 | for r in responses: 55 | for v in r.get("value", []): 56 | prop = v.get("properties", {}) 57 | 58 | new_data = { 59 | "KQL Database Name": v.get("displayName"), 60 | "KQL Database Id": v.get("id"), 61 | "Description": v.get("description"), 62 | "Parent Eventhouse Item Id": prop.get("parentEventhouseItemId"), 63 | "Query Service URI": prop.get("queryServiceUri"), 64 | "Ingestion Service URI": prop.get("ingestionServiceUri"), 65 | "Database Type": prop.get("databaseType"), 66 | } 67 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 68 | 69 | return df 70 | 71 | 72 | def _create_kql_database( 73 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 74 | ): 75 | """ 76 | Creates a KQL database. 77 | 78 | This is a wrapper function for the following API: `Items - Create KQL Database `_. 79 | 80 | Parameters 81 | ---------- 82 | name: str 83 | Name of the KQL database. 84 | description : str, default=None 85 | A description of the environment. 86 | workspace : str | uuid.UUID, default=None 87 | The Fabric workspace name or ID. 88 | Defaults to None which resolves to the workspace of the attached lakehouse 89 | or if no lakehouse attached, resolves to the workspace of the notebook. 90 | """ 91 | 92 | create_item( 93 | name=name, description=description, type="KQLDatabase", workspace=workspace 94 | ) 95 | 96 | 97 | def delete_kql_database( 98 | kql_database: str | UUID, 99 | workspace: Optional[str | UUID] = None, 100 | **kwargs, 101 | ): 102 | """ 103 | Deletes a KQL database. 104 | 105 | This is a wrapper function for the following API: `Items - Delete KQL Database `_. 106 | 107 | Parameters 108 | ---------- 109 | kql_database: str | uuid.UUID 110 | Name or ID of the KQL database. 111 | workspace : str | uuid.UUID, default=None 112 | The Fabric workspace name or ID. 113 | Defaults to None which resolves to the workspace of the attached lakehouse 114 | or if no lakehouse attached, resolves to the workspace of the notebook. 115 | """ 116 | 117 | if "name" in kwargs: 118 | kql_database = kwargs["name"] 119 | print( 120 | f"{icons.warning} The 'name' parameter is deprecated. Please use 'kql_database' instead." 121 | ) 122 | 123 | delete_item(item=kql_database, type="KQLDatabase", workspace=workspace) 124 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/report.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/report/1.1.0/schema.json", 3 | "themeCollection": { 4 | "baseTheme": { 5 | "name": "CY24SU06", 6 | "reportVersionAtImport": "5.56", 7 | "type": "SharedResources" 8 | } 9 | }, 10 | "layoutOptimization": "None", 11 | "filterConfig": { 12 | "filters": [ 13 | { 14 | "name": "5b2185023b97e36774cc", 15 | "ordinal": 0, 16 | "field": { 17 | "Column": { 18 | "Expression": { 19 | "SourceRef": { 20 | "Entity": "BPAResults" 21 | } 22 | }, 23 | "Property": "RunId" 24 | } 25 | }, 26 | "howCreated": "User", 27 | "objects": { 28 | "general": [ 29 | { 30 | "properties": {} 31 | } 32 | ] 33 | } 34 | }, 35 | { 36 | "name": "415731532a7ccb2104eb", 37 | "ordinal": 1, 38 | "field": { 39 | "Column": { 40 | "Expression": { 41 | "SourceRef": { 42 | "Entity": "BPAResults" 43 | } 44 | }, 45 | "Property": "Workspace" 46 | } 47 | }, 48 | "howCreated": "User" 49 | }, 50 | { 51 | "name": "3f38df1aeb48209d5d17", 52 | "ordinal": 2, 53 | "field": { 54 | "Column": { 55 | "Expression": { 56 | "SourceRef": { 57 | "Entity": "BPAResults" 58 | } 59 | }, 60 | "Property": "Model" 61 | } 62 | }, 63 | "howCreated": "User" 64 | }, 65 | { 66 | "name": "39a759a0bca91d4c0e20", 67 | "ordinal": 3, 68 | "field": { 69 | "Column": { 70 | "Expression": { 71 | "SourceRef": { 72 | "Entity": "BPAResults" 73 | } 74 | }, 75 | "Property": "Model Owner" 76 | } 77 | }, 78 | "howCreated": "User" 79 | }, 80 | { 81 | "name": "2dd3fa2832177488d851", 82 | "ordinal": 4, 83 | "field": { 84 | "Column": { 85 | "Expression": { 86 | "SourceRef": { 87 | "Entity": "BPAResults" 88 | } 89 | }, 90 | "Property": "Category" 91 | } 92 | }, 93 | "howCreated": "User" 94 | }, 95 | { 96 | "name": "5c24dae01050ee86d5d0", 97 | "ordinal": 5, 98 | "field": { 99 | "Column": { 100 | "Expression": { 101 | "SourceRef": { 102 | "Entity": "BPAResults" 103 | } 104 | }, 105 | "Property": "Severity" 106 | } 107 | }, 108 | "howCreated": "User" 109 | }, 110 | { 111 | "name": "e7fda6595b95a0e13898", 112 | "ordinal": 6, 113 | "field": { 114 | "Column": { 115 | "Expression": { 116 | "SourceRef": { 117 | "Entity": "BPAResults" 118 | } 119 | }, 120 | "Property": "Rule Name" 121 | } 122 | }, 123 | "howCreated": "User" 124 | } 125 | ], 126 | "filterSortOrder": "Custom" 127 | }, 128 | "objects": { 129 | "section": [ 130 | { 131 | "properties": { 132 | "verticalAlignment": { 133 | "expr": { 134 | "Literal": { 135 | "Value": "'Top'" 136 | } 137 | } 138 | } 139 | } 140 | } 141 | ], 142 | "outspacePane": [ 143 | { 144 | "properties": { 145 | "expanded": { 146 | "expr": { 147 | "Literal": { 148 | "Value": "true" 149 | } 150 | } 151 | } 152 | } 153 | } 154 | ] 155 | }, 156 | "resourcePackages": [ 157 | { 158 | "name": "SharedResources", 159 | "type": "SharedResources", 160 | "items": [ 161 | { 162 | "name": "CY24SU06", 163 | "path": "BaseThemes/CY24SU06.json", 164 | "type": "BaseTheme" 165 | } 166 | ] 167 | } 168 | ], 169 | "settings": { 170 | "useStylableVisualContainerHeader": true, 171 | "defaultDrillFilterOtherVisuals": true, 172 | "allowChangeFilterTypes": true, 173 | "useEnhancedTooltips": true, 174 | "useDefaultAggregateDisplayName": true 175 | } 176 | } -------------------------------------------------------------------------------- /src/sempy_labs/admin/_scanner.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, List 2 | from uuid import UUID 3 | from sempy.fabric.exceptions import FabricHTTPException 4 | import time 5 | import sempy_labs._icons as icons 6 | from sempy_labs.admin._basic_functions import list_workspaces 7 | from sempy._utils._log import log 8 | from sempy_labs._helper_functions import ( 9 | _base_api, 10 | _is_valid_uuid, 11 | _build_url, 12 | resolve_workspace_name, 13 | ) 14 | 15 | 16 | @log 17 | def scan_workspaces( 18 | data_source_details: bool = False, 19 | dataset_schema: bool = False, 20 | dataset_expressions: bool = False, 21 | lineage: bool = False, 22 | artifact_users: bool = False, 23 | workspace: Optional[str | List[str] | UUID | List[UUID]] = None, 24 | ) -> dict: 25 | """ 26 | Gets the scan result for the specified scan. 27 | 28 | This is a wrapper function for the following APIs: 29 | `Admin - WorkspaceInfo PostWorkspaceInfo `_. 30 | `Admin - WorkspaceInfo GetScanStatus `_. 31 | `Admin - WorkspaceInfo GetScanResult `_. 32 | 33 | Service Principal Authentication is supported (see `here `_ for examples). 34 | 35 | Parameters 36 | ---------- 37 | data_source_details : bool, default=False 38 | Whether to return dataset expressions (DAX and Mashup queries). If you set this parameter to true, you must fully enable metadata scanning in order for data to be returned. For more information, see Enable tenant settings for metadata scanning. 39 | dataset_schema: bool = False 40 | Whether to return dataset schema (tables, columns and measures). If you set this parameter to true, you must fully enable metadata scanning in order for data to be returned. For more information, see Enable tenant settings for metadata scanning. 41 | dataset_expressions : bool, default=False 42 | Whether to return data source details. 43 | lineage : bool, default=False 44 | Whether to return lineage info (upstream dataflows, tiles, data source IDs) 45 | artifact_users : bool, default=False 46 | Whether to return user details for a Power BI item (such as a report or a dashboard). 47 | workspace : str | List[str] | UUID | List[UUID], default=None 48 | The required workspace name(s) or id(s) to be scanned. It supports a limit of 100 workspaces and only IDs in GUID format. 49 | 50 | Returns 51 | ------- 52 | dict 53 | A json object with the scan result. 54 | """ 55 | 56 | if workspace is None: 57 | workspace = resolve_workspace_name() 58 | 59 | if isinstance(workspace, str): 60 | workspace = [workspace] 61 | 62 | if len(workspace) > 100: 63 | print( 64 | f"{icons.yellow_dot} More than 100 workspaces where provided. Truncating to the fist 100." 65 | ) 66 | workspace = workspace[:100] 67 | 68 | workspace_list = [] 69 | 70 | for w in workspace: 71 | if _is_valid_uuid(w): 72 | workspace_list.append(w) 73 | else: 74 | dfW = list_workspaces(workspace=w) 75 | workspace_list = ( 76 | workspace_list + dfW[dfW["Name"].isin(workspace)]["Id"].tolist() 77 | ) 78 | 79 | url = "/v1.0/myorg/admin/workspaces/getInfo" 80 | params = {} 81 | params["lineage"] = lineage 82 | params["datasourceDetails"] = data_source_details 83 | params["datasetSchema"] = dataset_schema 84 | params["datasetExpressions"] = dataset_expressions 85 | params["getArtifactUsers"] = artifact_users 86 | 87 | url = _build_url(url, params) 88 | 89 | payload = {"workspaces": workspace_list} 90 | 91 | response = _base_api( 92 | request=url, 93 | method="post", 94 | payload=payload, 95 | status_codes=202, 96 | client="fabric_sp", 97 | ) 98 | 99 | scan_id = response.json()["id"] 100 | scan_status = response.json().get("status") 101 | 102 | while scan_status not in ["Succeeded", "Failed"]: 103 | time.sleep(1) 104 | response = _base_api( 105 | request=f"/v1.0/myorg/admin/workspaces/scanStatus/{scan_id}", 106 | client="fabric_sp", 107 | ) 108 | scan_status = response.json().get("status") 109 | 110 | if scan_status == "Failed": 111 | raise FabricHTTPException(response) 112 | 113 | response = _base_api( 114 | request=f"/v1.0/myorg/admin/workspaces/scanResult/{scan_id}", 115 | client="fabric_sp", 116 | ) 117 | 118 | return response.json() 119 | -------------------------------------------------------------------------------- /src/sempy_labs/_semantic_models.py: -------------------------------------------------------------------------------- 1 | from uuid import UUID 2 | from typing import Optional 3 | import pandas as pd 4 | from sempy_labs._helper_functions import ( 5 | _create_dataframe, 6 | _base_api, 7 | _update_dataframe_datatypes, 8 | resolve_workspace_name_and_id, 9 | resolve_dataset_name_and_id, 10 | delete_item, 11 | ) 12 | import sempy_labs._icons as icons 13 | 14 | 15 | def get_semantic_model_refresh_schedule( 16 | dataset: str | UUID, workspace: Optional[str | UUID] = None 17 | ) -> pd.DataFrame: 18 | """ 19 | Gets the refresh schedule for the specified dataset from the specified workspace. 20 | 21 | Parameters 22 | ---------- 23 | dataset : str | uuid.UUID 24 | Name or ID of the semantic model. 25 | workspace : str | uuid.UUID, default=None 26 | The workspace name or ID. 27 | Defaults to None which resolves to the workspace of the attached lakehouse 28 | or if no lakehouse attached, resolves to the workspace of the notebook. 29 | 30 | Returns 31 | ------- 32 | pandas.DataFrame 33 | Shows the refresh schedule for the specified dataset from the specified workspace. 34 | """ 35 | 36 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 37 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace) 38 | 39 | columns = { 40 | "Days": "str", 41 | "Times": "str", 42 | "Enabled": "bool", 43 | "Local Time Zone Id": "str", 44 | "Notify Option": "str", 45 | } 46 | 47 | column_map = { 48 | "days": "Days", 49 | "times": "Times", 50 | "enabled": "Enabled", 51 | "localTimeZoneId": "Local Time Zone Id", 52 | "notifyOption": "Notify Option", 53 | } 54 | 55 | df = _create_dataframe(columns) 56 | 57 | result = _base_api( 58 | request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule" 59 | ).json() 60 | 61 | df = ( 62 | pd.json_normalize(result) 63 | .drop(columns=["@odata.context"], errors="ignore") 64 | .rename(columns=column_map) 65 | ) 66 | 67 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 68 | 69 | return df 70 | 71 | 72 | def enable_semantic_model_scheduled_refresh( 73 | dataset: str | UUID, 74 | workspace: Optional[str | UUID] = None, 75 | enable: bool = True, 76 | ): 77 | """ 78 | Enables the scheduled refresh for the specified dataset from the specified workspace. 79 | 80 | Parameters 81 | ---------- 82 | dataset : str | uuid.UUID 83 | Name or ID of the semantic model. 84 | workspace : str | uuid.UUID, default=None 85 | The workspace name or ID. 86 | Defaults to None which resolves to the workspace of the attached lakehouse 87 | or if no lakehouse attached, resolves to the workspace of the notebook. 88 | enable : bool, default=True 89 | If True, enables the scheduled refresh. 90 | If False, disables the scheduled refresh. 91 | """ 92 | 93 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 94 | (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace) 95 | 96 | df = get_semantic_model_refresh_schedule(dataset=dataset, workspace=workspace) 97 | status = df["Enabled"].iloc[0] 98 | 99 | if enable and status: 100 | print( 101 | f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already enabled." 102 | ) 103 | elif not enable and not status: 104 | print( 105 | f"{icons.info} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace is already disabled." 106 | ) 107 | else: 108 | payload = {"value": {"enabled": enable}} 109 | 110 | _base_api( 111 | request=f"/v1.0/myorg/groups/{workspace_id}/datasets/{dataset_id}/refreshSchedule", 112 | method="patch", 113 | payload=payload, 114 | ) 115 | 116 | print( 117 | f"{icons.green_dot} Scheduled refresh for the '{dataset_name}' within the '{workspace_name}' workspace has been enabled." 118 | ) 119 | 120 | 121 | def delete_semantic_model(dataset: str | UUID, workspace: Optional[str | UUID] = None): 122 | """ 123 | Deletes a semantic model. 124 | 125 | This is a wrapper function for the following API: `Items - Delete Semantic Model `_. 126 | 127 | Parameters 128 | ---------- 129 | dataset: str | uuid.UUID 130 | Name or ID of the semantic model. 131 | workspace : str | uuid.UUID, default=None 132 | The Fabric workspace name or ID. 133 | Defaults to None which resolves to the workspace of the attached lakehouse 134 | or if no lakehouse attached, resolves to the workspace of the notebook. 135 | """ 136 | 137 | delete_item(item=dataset, type="SemanticModel", workspace=workspace) 138 | -------------------------------------------------------------------------------- /src/sempy_labs/admin/_users.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import ( 2 | _base_api, 3 | _create_dataframe, 4 | _update_dataframe_datatypes, 5 | ) 6 | from uuid import UUID 7 | import pandas as pd 8 | 9 | 10 | def list_access_entities( 11 | user_email_address: str, 12 | ) -> pd.DataFrame: 13 | """ 14 | Shows a list of permission details for Fabric and Power BI items the specified user can access. 15 | 16 | This is a wrapper function for the following API: `Users - List Access Entities `_. 17 | 18 | Service Principal Authentication is supported (see `here `_ for examples). 19 | 20 | Parameters 21 | ---------- 22 | user_email_address : str 23 | The user's email address. 24 | 25 | Returns 26 | ------- 27 | pandas.DataFrame 28 | A pandas dataframe showing a list of permission details for Fabric and Power BI items the specified user can access. 29 | """ 30 | 31 | columns = { 32 | "Item Id": "string", 33 | "Item Name": "string", 34 | "Item Type": "string", 35 | "Permissions": "string", 36 | "Additional Permissions": "string", 37 | } 38 | df = _create_dataframe(columns=columns) 39 | 40 | responses = _base_api( 41 | request=f"/v1/admin/users/{user_email_address}/access", 42 | client="fabric_sp", 43 | uses_pagination=True, 44 | ) 45 | 46 | for r in responses: 47 | for v in r.get("accessEntities", []): 48 | new_data = { 49 | "Item Id": v.get("id"), 50 | "Item Name": v.get("displayName"), 51 | "Item Type": v.get("itemAccessDetails", {}).get("type"), 52 | "Permissions": v.get("itemAccessDetails", {}).get("permissions"), 53 | "Additional Permissions": v.get("itemAccessDetails", {}).get( 54 | "additionalPermissions" 55 | ), 56 | } 57 | df = pd.concat([df, pd.DataFrame([new_data])], ignore_index=True) 58 | 59 | return df 60 | 61 | 62 | def list_user_subscriptions(user: str | UUID) -> pd.DataFrame: 63 | """ 64 | Shows a list of subscriptions for the specified user. This is a preview API call. 65 | 66 | This is a wrapper function for the following API: `Admin - Users GetUserSubscriptionsAsAdmin `_. 67 | 68 | Service Principal Authentication is supported (see `here `_ for examples). 69 | 70 | Parameters 71 | ---------- 72 | user : str | uuid.UUID 73 | The graph ID or user principal name (UPN) of the user. 74 | 75 | Returns 76 | ------- 77 | pandas.DataFrame 78 | A pandas dataframe showing a list of subscriptions for the specified user. This is a preview API call. 79 | """ 80 | 81 | columns = { 82 | "Subscription Id": "string", 83 | "Title": "string", 84 | "Artifact Id": "string", 85 | "Artifact Name": "string", 86 | "Sub Artifact Name": "string", 87 | "Artifact Type": "string", 88 | "Is Enabled": "bool", 89 | "Frequency": "string", 90 | "Start Date": "datetime", 91 | "End Date": "string", 92 | "Link To Content": "bool", 93 | "Preview Image": "bool", 94 | "Attachment Format": "string", 95 | "Users": "string", 96 | } 97 | 98 | df = _create_dataframe(columns=columns) 99 | 100 | responses = _base_api( 101 | request=f"/v1.0/myorg/admin/users/{user}/subscriptions", 102 | client="fabric_sp", 103 | uses_pagination=True, 104 | ) 105 | 106 | rows = [] 107 | for r in responses: 108 | for v in r.get("subscriptionEntities", []): 109 | rows.append( 110 | { 111 | "Subscription Id": v.get("id"), 112 | "Title": v.get("title"), 113 | "Artifact Id": v.get("artifactId"), 114 | "Artifact Name": v.get("artifactDisplayName"), 115 | "Sub Artifact Name": v.get("subArtifactDisplayName"), 116 | "Artifact Type": v.get("artifactType"), 117 | "Is Enabled": v.get("isEnabled"), 118 | "Frequency": v.get("frequency"), 119 | "Start Date": v.get("startDate"), 120 | "End Date": v.get("endDate"), 121 | "Link To Content": v.get("linkToContent"), 122 | "Preview Image": v.get("previewImage"), 123 | "Attachment Format": v.get("attachmentFormat"), 124 | "Users": str(v.get("users")), 125 | } 126 | ) 127 | 128 | if rows: 129 | df = pd.DataFrame(rows, columns=list(columns.keys())) 130 | 131 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 132 | 133 | return df 134 | -------------------------------------------------------------------------------- /src/sempy_labs/_workloads.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | import sempy_labs._icons as icons 4 | from sempy_labs._helper_functions import ( 5 | _update_dataframe_datatypes, 6 | _base_api, 7 | _create_dataframe, 8 | ) 9 | from uuid import UUID 10 | 11 | 12 | def list_workloads(capacity: str | UUID, **kwargs) -> pd.DataFrame: 13 | """ 14 | Returns the current state of the specified capacity workloads. 15 | If a workload is enabled, the percentage of maximum memory that the workload can consume is also returned. 16 | 17 | This is a wrapper function for the following API: `Capacities - Get Workloads `_. 18 | 19 | Parameters 20 | ---------- 21 | capacity : str | uuid.UUID 22 | The capacity name or ID. 23 | 24 | Returns 25 | ------- 26 | pandas.DataFrame 27 | A pandas dataframe showing the current state of the specified capacity workloads. 28 | """ 29 | 30 | from sempy_labs._helper_functions import resolve_capacity_id 31 | 32 | if "capacity_name" in kwargs: 33 | capacity = kwargs["capacity_name"] 34 | print( 35 | f"{icons.warning} The 'capacity_name' parameter is deprecated. Please use 'capacity' instead." 36 | ) 37 | 38 | columns = { 39 | "Workload Name": "string", 40 | "State": "string", 41 | "Max Memory Percentage Set By User": "int", 42 | } 43 | df = _create_dataframe(columns=columns) 44 | 45 | capacity_id = resolve_capacity_id(capacity=capacity) 46 | 47 | response = _base_api(request=f"/v1.0/myorg/capacities/{capacity_id}/Workloads") 48 | 49 | for v in response.json().get("value", []): 50 | new_data = { 51 | "Workload Name": v.get("name"), 52 | "State": v.get("state"), 53 | "Max Memory Percentage Set By User": v.get("maxMemoryPercentageSetByUser"), 54 | } 55 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 56 | 57 | _update_dataframe_datatypes(dataframe=df, column_map=columns) 58 | 59 | return df 60 | 61 | 62 | def patch_workload( 63 | capacity: str | UUID, 64 | workload_name: str, 65 | state: Optional[str] = None, 66 | max_memory_percentage: Optional[int] = None, 67 | **kwargs, 68 | ): 69 | """ 70 | Changes the state of a specific workload to Enabled or Disabled. 71 | When enabling a workload, specify the percentage of maximum memory that the workload can consume. 72 | 73 | This is a wrapper function for the following API: `Capacities - Patch Workload `_. 74 | 75 | Parameters 76 | ---------- 77 | capacity : str | uuid.UUID 78 | The capacity name or ID. 79 | workload_name : str 80 | The workload name. 81 | state : str, default=None 82 | The capacity workload state. 83 | max_memory_percentage : int, default=None 84 | The percentage of the maximum memory that a workload can consume (set by the user). 85 | """ 86 | 87 | from sempy_labs._helper_functions import resolve_capacity_id 88 | 89 | if "capacity_name" in kwargs: 90 | capacity = kwargs["capacity_name"] 91 | print( 92 | f"{icons.warning} The 'capacity_name' parameter is deprecated. Please use 'capacity' instead." 93 | ) 94 | 95 | capacity_id = resolve_capacity_id(capacity=capacity) 96 | 97 | states = ["Disabled", "Enabled", "Unsupported"] 98 | state = state.capitalize() 99 | if state is not None and state not in states: 100 | raise ValueError( 101 | f"{icons.red_dot} Invalid 'state' parameter. Please choose from these options: {states}." 102 | ) 103 | if max_memory_percentage is not None and ( 104 | max_memory_percentage < 0 or max_memory_percentage > 100 105 | ): 106 | raise ValueError( 107 | f"{icons.red_dot} Invalid max memory percentage. Must be a value between 0-100." 108 | ) 109 | 110 | url = f"/v1.0/myorg/capacities/{capacity_id}/Workloads/{workload_name}" 111 | 112 | get_response = _base_api(request=url) 113 | get_json = get_response.json().get("value") 114 | current_state = get_json.get("state") 115 | current_max_memory = get_json.get("maxMemoryPercentageSetByUser") 116 | 117 | if current_state == state and str(current_max_memory) == str(max_memory_percentage): 118 | print( 119 | f"{icons.info} The current workload settings are the same as those specified in the parameters of this function. The workload has not been updated." 120 | ) 121 | return 122 | 123 | payload = {} 124 | if state is not None: 125 | payload["state"] = state 126 | else: 127 | payload["state"] = current_state 128 | if max_memory_percentage is not None: 129 | payload["maxMemoryPercentageSetByUser"] = max_memory_percentage 130 | else: 131 | payload["maxMemoryPercentageSetByUser"] = current_max_memory 132 | 133 | _base_api(request=url, method="patch", payload=payload) 134 | 135 | print( 136 | f"The '{workload_name}' workload within the '{capacity}' capacity has been updated accordingly." 137 | ) 138 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_report_bpa_rules.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | def report_bpa_rules() -> pd.DataFrame: 5 | """ 6 | Shows the default rules for the report BPA used by the run_report_bpa function. 7 | 8 | Returns 9 | ------- 10 | pandas.DataFrame 11 | A pandas dataframe containing the default rules for the run_report_bpa function. 12 | """ 13 | 14 | rules = pd.DataFrame( 15 | [ 16 | ( 17 | "Error Prevention", 18 | "Semantic Model", 19 | "Error", 20 | "Fix report objects which reference invalid semantic model objects", 21 | lambda df: df["Valid Semantic Model Object"] == False, 22 | "This rule highlights visuals, report filters, page filters or visual filters which reference an invalid semantic model object (i.e Measure/Column/Hierarchy).", 23 | ), 24 | ( 25 | "Performance", 26 | "Custom Visual", 27 | "Warning", 28 | "Remove custom visuals which are not used in the report", 29 | lambda df: df["Used in Report"] == False, 30 | "Removing unused custom visuals from a report may lead to faster report performance.", 31 | ), 32 | ( 33 | "Performance", 34 | "Page", 35 | "Warning", 36 | "Reduce the number of visible visuals on the page", 37 | lambda df: df["Visible Visual Count"] > 15, 38 | 'Reducing the number of visable visuals on a page will lead to faster report performance. This rule flags pages with over " + visVisuals + " visible visuals.', 39 | ), 40 | ( 41 | "Performance", 42 | "Visual", 43 | "Warning", 44 | "Reduce the number of objects within visuals", 45 | lambda df: df["Visual Object Count"] > 5, 46 | "Reducing the number of objects (i.e. measures, columns) which are used in a visual will lead to faster report performance.", 47 | ), 48 | ( 49 | "Performance", 50 | ["Report Filter", "Page Filter", "Visual Filter"], 51 | "Warning", 52 | "Reduce usage of filters on measures", 53 | lambda df: df["Object Type"] == "Measure", 54 | "Measure filters may cause performance degradation, especially against a large semantic model.", 55 | ), 56 | ( 57 | "Performance", 58 | "Visual", 59 | "Warning", 60 | "Avoid setting 'Show items with no data' on columns", 61 | lambda df: df["Show Items With No Data"], 62 | "This setting will show all column values for all columns in the visual which may lead to performance degradation.", 63 | "https://learn.microsoft.com/power-bi/create-reports/desktop-show-items-no-data", 64 | ), 65 | ( 66 | "Performance", 67 | "Page", 68 | "Warning", 69 | "Avoid tall report pages with vertical scrolling", 70 | lambda df: df["Height"] > 720, 71 | "Report pages are designed to be in a single view and not scroll. Pages with scrolling is an indicator that the page has too many elements.", 72 | ), 73 | ( 74 | "Performance", 75 | "Custom Visual", 76 | "Info", 77 | "Reduce usage of custom visuals", 78 | lambda df: df["Custom Visual Name"] == df["Custom Visual Name"], 79 | "Using custom visuals may lead to performance degradation.", 80 | ), 81 | ( 82 | "Maintenance", 83 | "Report Level Measure", 84 | "Info", 85 | "Move report-level measures into the semantic model.", 86 | lambda df: df["Measure Name"] == df["Measure Name"], 87 | "It is a best practice to keep measures defined in the semantic model and not in the report.", 88 | ), 89 | ( 90 | "Performance", 91 | ["Report Filter", "Page Filter", "Visual Filter"], 92 | "Info", 93 | "Reduce usage of TopN filtering within visuals", 94 | lambda df: df["Type"] == "TopN", 95 | "TopN filtering may cause performance degradation, especially against a high cardinality column.", 96 | ), 97 | # ('Performance', 'Custom Visual', 'Warning', "Set 'Edit Interactions' for non-data visuals to 'none'", 98 | # lambda df: df['Custom Visual Name'] == df['Custom Visual Name'], 99 | # "Setting 'Edit Interactions' to 'None' for non-data visuals may improve performance (since these visuals do not necessitate interactions between other visuals). 'Edit Interactions' may be found in the 'Format' tab of the ribbon in Power BI Desktop.", 100 | # ) 101 | ], 102 | columns=[ 103 | "Category", 104 | "Scope", 105 | "Severity", 106 | "Rule Name", 107 | "Expression", 108 | "Description", 109 | "URL", 110 | ], 111 | ) 112 | 113 | return rules 114 | -------------------------------------------------------------------------------- /src/sempy_labs/_data_pipelines.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from typing import Optional 3 | from sempy_labs._helper_functions import ( 4 | resolve_workspace_name_and_id, 5 | _decode_b64, 6 | _base_api, 7 | resolve_item_id, 8 | _create_dataframe, 9 | delete_item, 10 | create_item, 11 | ) 12 | from uuid import UUID 13 | 14 | 15 | def list_data_pipelines(workspace: Optional[str | UUID] = None) -> pd.DataFrame: 16 | """ 17 | Shows the data pipelines within a workspace. 18 | 19 | This is a wrapper function for the following API: `Items - List Data Pipelines `_. 20 | 21 | Parameters 22 | ---------- 23 | workspace : str | uuid.UUID, default=None 24 | The Fabric workspace name or ID. 25 | Defaults to None which resolves to the workspace of the attached lakehouse 26 | or if no lakehouse attached, resolves to the workspace of the notebook. 27 | 28 | Returns 29 | ------- 30 | pandas.DataFrame 31 | A pandas dataframe showing the data pipelines within a workspace. 32 | """ 33 | 34 | columns = { 35 | "Data Pipeline Name": "string", 36 | "Data Pipeline ID": "string", 37 | "Description": "string", 38 | } 39 | df = _create_dataframe(columns=columns) 40 | 41 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 42 | 43 | responses = _base_api( 44 | request=f"/v1/workspaces/{workspace_id}/dataPipelines", uses_pagination=True 45 | ) 46 | 47 | for r in responses: 48 | for v in r.get("value", []): 49 | new_data = { 50 | "Data Pipeline Name": v.get("displayName"), 51 | "Data Pipeline ID": v.get("id"), 52 | "Description": v.get("description"), 53 | } 54 | df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) 55 | 56 | return df 57 | 58 | 59 | def create_data_pipeline( 60 | name: str, description: Optional[str] = None, workspace: Optional[str | UUID] = None 61 | ): 62 | """ 63 | Creates a Fabric data pipeline. 64 | 65 | This is a wrapper function for the following API: `Items - Create Data Pipeline `_. 66 | 67 | Parameters 68 | ---------- 69 | name: str 70 | Name of the data pipeline. 71 | description : str, default=None 72 | A description of the environment. 73 | workspace : str | uuid.UUID, default=None 74 | The Fabric workspace name or ID. 75 | Defaults to None which resolves to the workspace of the attached lakehouse 76 | or if no lakehouse attached, resolves to the workspace of the notebook. 77 | """ 78 | 79 | create_item( 80 | name=name, description=description, type="DataPipeline", workspace=workspace 81 | ) 82 | 83 | 84 | def delete_data_pipeline(name: str | UUID, workspace: Optional[str | UUID] = None): 85 | """ 86 | Deletes a Fabric data pipeline. 87 | 88 | This is a wrapper function for the following API: `Items - Delete Data Pipeline `_. 89 | 90 | Parameters 91 | ---------- 92 | name: str | uuid.UUID 93 | Name or ID of the data pipeline. 94 | workspace : str | uuid.UUID, default=None 95 | The Fabric workspace name. 96 | Defaults to None which resolves to the workspace of the attached lakehouse 97 | or if no lakehouse attached, resolves to the workspace of the notebook. 98 | """ 99 | 100 | delete_item(item=name, type="DataPipeline", workspace=workspace) 101 | 102 | 103 | def get_data_pipeline_definition( 104 | name: str | UUID, workspace: Optional[str | UUID] = None, decode: bool = True 105 | ) -> dict | pd.DataFrame: 106 | """ 107 | Obtains the definition of a data pipeline. 108 | 109 | Parameters 110 | ---------- 111 | name : str or uuid.UUID 112 | The name or ID of the data pipeline. 113 | workspace : str | uuid.UUID, default=None 114 | The Fabric workspace name or ID. 115 | Defaults to None which resolves to the workspace of the attached lakehouse 116 | or if no lakehouse attached, resolves to the workspace of the notebook. 117 | decode : bool, default=True 118 | decode : bool, default=True 119 | If True, decodes the data pipeline definition file into .json format. 120 | If False, obtains the data pipeline definition file a pandas DataFrame format. 121 | 122 | Returns 123 | ------- 124 | dict | pandas.DataFrame 125 | A pandas dataframe showing the data pipelines within a workspace. 126 | """ 127 | 128 | (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) 129 | 130 | item_id = resolve_item_id(item=name, type="DataPipeline", workspace=workspace) 131 | result = _base_api( 132 | request=f"/v1/workspaces/{workspace_id}/dataPipelines/{item_id}/getDefinition", 133 | method="post", 134 | lro_return_json=True, 135 | status_codes=None, 136 | ) 137 | df = pd.json_normalize(result["definition"]["parts"]) 138 | 139 | if not decode: 140 | return df 141 | content = df[df["path"] == "pipeline-content.json"] 142 | payload = content["payload"].iloc[0] 143 | 144 | return _decode_b64(payload) 145 | -------------------------------------------------------------------------------- /src/sempy_labs/report/_bpareporttemplate/definition/pages/01d72098bda5055bd500/visuals/1b08bce3bebabb0a27a8/visual.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://developer.microsoft.com/json-schemas/fabric/item/report/definition/visualContainer/1.1.0/schema.json", 3 | "name": "1b08bce3bebabb0a27a8", 4 | "position": { 5 | "x": 610.65441622605533, 6 | "y": 12.781138944266274, 7 | "z": 7000, 8 | "height": 191.71708416399412, 9 | "width": 247.10201958914797, 10 | "tabOrder": 7000 11 | }, 12 | "visual": { 13 | "visualType": "slicer", 14 | "query": { 15 | "queryState": { 16 | "Values": { 17 | "projections": [ 18 | { 19 | "field": { 20 | "Column": { 21 | "Expression": { 22 | "SourceRef": { 23 | "Entity": "BPAResults" 24 | } 25 | }, 26 | "Property": "Workspace" 27 | } 28 | }, 29 | "queryRef": "BPAResults.Workspace Name", 30 | "nativeQueryRef": "Workspace", 31 | "active": true 32 | }, 33 | { 34 | "field": { 35 | "Column": { 36 | "Expression": { 37 | "SourceRef": { 38 | "Entity": "BPAResults" 39 | } 40 | }, 41 | "Property": "Model" 42 | } 43 | }, 44 | "queryRef": "BPAResults.Model Name", 45 | "nativeQueryRef": "Model", 46 | "active": true 47 | } 48 | ] 49 | } 50 | } 51 | }, 52 | "expansionStates": [ 53 | { 54 | "roles": [ 55 | "Values" 56 | ], 57 | "levels": [ 58 | { 59 | "queryRefs": [ 60 | "BPAResults.Workspace Name" 61 | ], 62 | "isCollapsed": true, 63 | "identityKeys": [ 64 | { 65 | "Column": { 66 | "Expression": { 67 | "SourceRef": { 68 | "Entity": "BPAResults" 69 | } 70 | }, 71 | "Property": "Workspace" 72 | } 73 | } 74 | ], 75 | "isPinned": true 76 | }, 77 | { 78 | "queryRefs": [ 79 | "BPAResults.Model Name" 80 | ], 81 | "isCollapsed": true, 82 | "isPinned": true 83 | } 84 | ], 85 | "root": { 86 | "children": [ 87 | { 88 | "identityValues": [ 89 | { 90 | "Literal": { 91 | "Value": "'MK Demo 4'" 92 | } 93 | } 94 | ], 95 | "isToggled": true 96 | } 97 | ] 98 | } 99 | } 100 | ], 101 | "objects": { 102 | "data": [ 103 | { 104 | "properties": { 105 | "mode": { 106 | "expr": { 107 | "Literal": { 108 | "Value": "'Basic'" 109 | } 110 | } 111 | } 112 | } 113 | } 114 | ], 115 | "general": [ 116 | { 117 | "properties": { 118 | "orientation": { 119 | "expr": { 120 | "Literal": { 121 | "Value": "0D" 122 | } 123 | } 124 | } 125 | } 126 | } 127 | ], 128 | "header": [ 129 | { 130 | "properties": { 131 | "textSize": { 132 | "expr": { 133 | "Literal": { 134 | "Value": "14D" 135 | } 136 | } 137 | } 138 | } 139 | } 140 | ] 141 | }, 142 | "visualContainerObjects": { 143 | "border": [ 144 | { 145 | "properties": { 146 | "show": { 147 | "expr": { 148 | "Literal": { 149 | "Value": "true" 150 | } 151 | } 152 | }, 153 | "color": { 154 | "solid": { 155 | "color": { 156 | "expr": { 157 | "ThemeDataColor": { 158 | "ColorId": 0, 159 | "Percent": 0 160 | } 161 | } 162 | } 163 | } 164 | }, 165 | "radius": { 166 | "expr": { 167 | "Literal": { 168 | "Value": "20D" 169 | } 170 | } 171 | } 172 | } 173 | } 174 | ], 175 | "dropShadow": [ 176 | { 177 | "properties": { 178 | "show": { 179 | "expr": { 180 | "Literal": { 181 | "Value": "true" 182 | } 183 | } 184 | } 185 | } 186 | } 187 | ] 188 | }, 189 | "drillFilterOtherVisuals": true 190 | } 191 | } -------------------------------------------------------------------------------- /src/sempy_labs/admin/_workspaces.py: -------------------------------------------------------------------------------- 1 | from sempy_labs._helper_functions import ( 2 | _base_api, 3 | _build_url, 4 | _encode_user, 5 | ) 6 | from uuid import UUID 7 | from typing import Optional 8 | from sempy_labs.admin._basic_functions import ( 9 | _resolve_workspace_name_and_id, 10 | ) 11 | import sempy_labs._icons as icons 12 | 13 | 14 | def add_user_to_workspace( 15 | user: str | UUID, 16 | role: str = "Member", 17 | principal_type: str = "User", 18 | workspace: Optional[str | UUID] = None, 19 | ): 20 | """ 21 | Grants user permissions to the specified workspace. 22 | 23 | This is a wrapper function for the following API: `Admin - Groups AddUserAsAdmin `_. 24 | 25 | Parameters 26 | ---------- 27 | user : str | uuid.UUID 28 | The user identifier or email address. For service principals and groups you must use the user identifier. 29 | role : str, default="Member" 30 | The role of the user in the workspace. Options are: 'Admin', 'Contributor', 'Member', 'None', 'Viewer'. 31 | principal_type : str, default="User" 32 | The principal type of the user. Options are: 'App', 'Group', 'None', 'User'. 33 | workspace : str | uuid.UUID, default=None 34 | The Fabric workspace name or ID. 35 | Defaults to None which resolves to the workspace of the attached lakehouse 36 | or if no lakehouse attached, resolves to the workspace of the notebook. 37 | """ 38 | 39 | (workspace_name, workspace_id) = _resolve_workspace_name_and_id(workspace) 40 | 41 | # Validation 42 | role = role.capitalize() 43 | roles = ["Admin", "Contributor", "Member", "None", "Viewer"] 44 | if role not in roles: 45 | raise ValueError(f"{icons.red_dot} Invalid role. Please choose from {roles}") 46 | principal_types = ["App", "Group", "None", "User"] 47 | if principal_type not in principal_types: 48 | raise ValueError( 49 | f"{icons.red_dot} Invalid principal type. Please choose from {principal_types}" 50 | ) 51 | 52 | user = _encode_user(user) 53 | 54 | payload = { 55 | "identifier": user, # identifier or emailAddress? 56 | "principalType": principal_type, 57 | "groupUserAccessRight": role, 58 | } 59 | 60 | _base_api( 61 | request=f"/v1.0/myorg/admin/groups/{workspace_id}/users", 62 | method="post", 63 | payload=payload, 64 | ) 65 | 66 | print( 67 | f"{icons.green_dot} The '{user}' user has been added with '{role.lower()}' permissions to the '{workspace_name}' workspace." 68 | ) 69 | 70 | 71 | def delete_user_from_workspace( 72 | user: str | UUID, 73 | workspace: Optional[str | UUID] = None, 74 | is_group: Optional[bool] = None, 75 | profile_id: Optional[str] = None, 76 | ): 77 | """ 78 | Removes user permissions from the specified workspace. 79 | 80 | This is a wrapper function for the following API: `Admin - Groups DeleteUserAsAdmin `_. 81 | 82 | Parameters 83 | ---------- 84 | user : str | uuid.UUID 85 | The user identifier or email address. For service principals and groups you must use the user identifier. 86 | workspace : str | uuid.UUID, default=None 87 | The Fabric workspace name or ID. 88 | Defaults to None which resolves to the workspace of the attached lakehouse 89 | or if no lakehouse attached, resolves to the workspace of the notebook. 90 | is_group : bool, default=None 91 | Whether a given user is a group or not. This parameter is required when user to delete is group. 92 | profile_id : str, default=None 93 | The service principal profile ID to delete. 94 | """ 95 | 96 | (workspace_name, workspace_id) = _resolve_workspace_name_and_id(workspace) 97 | 98 | user = _encode_user(user) 99 | url = f"/v1.0/myorg/admin/groups/{workspace_id}/users/{user}" 100 | 101 | params = {} 102 | if profile_id is not None: 103 | params["profileId"] = profile_id 104 | if is_group is not None: 105 | params["isGroup"] = is_group 106 | 107 | url = _build_url(url, params) 108 | 109 | _base_api( 110 | request=url, 111 | method="delete", 112 | ) 113 | 114 | print( 115 | f"{icons.green_dot} The '{user}' user has been removed from the '{workspace_name}' workspace." 116 | ) 117 | 118 | 119 | def restore_deleted_workspace(workspace_id: UUID, name: str, email_address: str): 120 | """ 121 | Restores a deleted workspace. 122 | 123 | This is a wrapper function for the following API: `Admin - Groups RestoreDeletedGroupAsAdmin `_. 124 | 125 | Parameters 126 | ---------- 127 | workspace_id : uuid.UUID 128 | The ID of the workspace to restore. 129 | name : str 130 | The name of the group to be restored 131 | email_address : str 132 | The email address of the owner of the group to be restored 133 | """ 134 | 135 | payload = { 136 | "name": name, 137 | "emailAddress": email_address, 138 | } 139 | 140 | _base_api( 141 | request=f"/v1.0/myorg/admin/groups/{workspace_id}/restore", 142 | method="post", 143 | payload=payload, 144 | ) 145 | 146 | print( 147 | f"{icons.green_dot} The '{workspace_id}' workspace has been restored as '{name}'." 148 | ) 149 | --------------------------------------------------------------------------------