├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── LICENSE-SAMPLECODE ├── LICENSE-SUMMARY ├── README.md ├── content ├── adopting-lake-formation │ ├── considerations-when-adopting-lake-formation.md │ ├── general-best-practices.md │ ├── images │ │ ├── crossaccount-hybridaccessmode.png │ │ ├── hybridaccessmode.png │ │ ├── lakeformationmode.png │ │ └── oneaccount-hybridaccessmode.png │ ├── lake-formation-adoption-modes.md │ └── overview.md ├── data-sharing │ ├── cross-account-faq.md │ ├── cross-account-versions.md │ ├── general-data-sharing.md │ ├── images │ │ └── RAM-setting-for-sharing.png │ ├── overview.md │ └── upgrading-cross-account-versions.md ├── index.md └── lf-tags │ ├── basics.md │ ├── best-practices.md │ ├── common-ontologies.md │ ├── developing-an-lftag-ontology.md │ ├── example-usecase.md │ ├── images │ ├── lf-tags-example.png │ ├── lf-tags-expression-grouping-example.png │ ├── lf-tags-expression-principal-mapping.png │ ├── lf-tags-grouping.png │ ├── lf-tags-resource-mapping.png │ └── lf-tags-vs-named-resources-example.png │ ├── limitations.md │ └── overview.md ├── mkdocs.yml ├── poetry.lock └── pyproject.toml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | #IDE 34 | .idea/ 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *,cover 49 | .hypothesis/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Scrapy stuff: 56 | .scrapy 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | # IPython Notebook 62 | .ipynb_checkpoints 63 | 64 | # pyenv 65 | .python-version 66 | 67 | # virtualenv 68 | venv/ 69 | ENV/ 70 | .venv/ 71 | pyvenv.cfg 72 | 73 | # MkDocs documentation 74 | site/ 75 | .DS_Store 76 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-ShareAlike 4.0 International Public License 2 | 3 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 4 | 5 | Section 1 – Definitions. 6 | 7 | a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 8 | 9 | b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 10 | 11 | c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. 12 | 13 | d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 14 | 15 | e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 16 | 17 | f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 18 | 19 | g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. 20 | 21 | h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 22 | 23 | i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 24 | 25 | j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. 26 | 27 | k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 28 | 29 | l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 30 | 31 | m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 32 | 33 | Section 2 – Scope. 34 | 35 | a. License grant. 36 | 37 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 38 | 39 | A. reproduce and Share the Licensed Material, in whole or in part; and 40 | 41 | B. produce, reproduce, and Share Adapted Material. 42 | 43 | 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 44 | 45 | 3. Term. The term of this Public License is specified in Section 6(a). 46 | 47 | 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 48 | 49 | 5. Downstream recipients. 50 | 51 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 52 | 53 | B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 54 | 55 | C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 56 | 57 | 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 58 | 59 | b. Other rights. 60 | 61 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 62 | 63 | 2. Patent and trademark rights are not licensed under this Public License. 64 | 65 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. 66 | 67 | Section 3 – License Conditions. 68 | 69 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 70 | 71 | a. Attribution. 72 | 73 | 1. If You Share the Licensed Material (including in modified form), You must: 74 | 75 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 76 | 77 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 78 | 79 | ii. a copyright notice; 80 | 81 | iii. a notice that refers to this Public License; 82 | 83 | iv. a notice that refers to the disclaimer of warranties; 84 | 85 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 86 | 87 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 88 | 89 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 90 | 91 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 92 | 93 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 94 | 95 | b. ShareAlike.In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 96 | 97 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 98 | 99 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 100 | 101 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 102 | 103 | Section 4 – Sui Generis Database Rights. 104 | 105 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 106 | 107 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; 108 | 109 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 110 | 111 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 112 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 113 | 114 | Section 5 – Disclaimer of Warranties and Limitation of Liability. 115 | 116 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. 117 | 118 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. 119 | 120 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 121 | 122 | Section 6 – Term and Termination. 123 | 124 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 125 | 126 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 127 | 128 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 129 | 130 | 2. upon express reinstatement by the Licensor. 131 | 132 | c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 133 | 134 | d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 135 | 136 | e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 137 | 138 | Section 7 – Other Terms and Conditions. 139 | 140 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 141 | 142 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 143 | 144 | Section 8 – Interpretation. 145 | 146 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 147 | 148 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 149 | 150 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 151 | 152 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 153 | -------------------------------------------------------------------------------- /LICENSE-SAMPLECODE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /LICENSE-SUMMARY: -------------------------------------------------------------------------------- 1 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. 4 | 5 | The sample code within this documentation is made available under the MIT-0 license. See the LICENSE-SAMPLECODE file. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## AWS Lake Formation Best Practices 2 | 3 | A best practices guide for using Lake Formation. 4 | 5 | This guide is open for anyone to make changes and suggest new content. If there is new content that you would like to see or inaccurate information that needs to be fixed, please open an Issue or contribute it. 6 | 7 | ## Adding new content 8 | 9 | This repo uses mkdocs and is hosted on Github. To get started and creating content, the easiest way is to use [poetry](https://python-poetry.org/). The following commands should get you started: 10 | * You can install poetry using ```brew install poetry``` if you are running on MacOS. If you are using a different OS, please see the [installation instructions](https://python-poetry.org/docs/#installation) for your OS. 11 | * ```poetry install``` that will create a virtual python environment and install the necessary dependencies. 12 | * To run the local version of mkdocs and to preview changes in real-time, run ```poetry shell``` and once in the virtual environment, run ```mkdocs serve```. 13 | * Make your changes, preferably on a branch for large updates. 14 | * Create a Github pull request with your changes to be reviewed and merged. 15 | 16 | ## Rules on contributions 17 | 18 | 1. We will try to review and provide comment within 1 week. If we do not meet this timeline, or have an urgent request, please email us (see below). 19 | 2. If we feel that content is best served to be within our public documentation, we will submit your content to our doc writers and close the pull request. 20 | 3. We will not accept any code that is written as utilities from those that are outside of AWS as anything we publish here must go through Security Reviews within AWS. 21 | 4. Before writing any significant content, its recommend that you open an Issue and validate that the content that you intend to write 22 | 5. We currently have a backlog of content that we want to write. If you are interested in contributing but do not know what topics to write about, please email us (see below). 23 | 24 | Return to [Live Docs](https://aws.github.io/aws-lakeformation-best-practices/). 25 | 26 | ## Contact 27 | 28 | If you have any feedback or wish to contact the owners of this repo, please email aws-bda-lf+best-practices AT amazon.com. 29 | 30 | ## License Summary 31 | 32 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. 33 | 34 | The sample code within this documentation is made available under the MIT-0 license. See the LICENSE-SAMPLECODE file. 35 | -------------------------------------------------------------------------------- /content/adopting-lake-formation/considerations-when-adopting-lake-formation.md: -------------------------------------------------------------------------------- 1 | # Considerations when adopting Lake Formation 2 | 3 | [AWS Lake Formation](https://aws.amazon.com/lake-formation/) centralizes permissions management of your data and makes it easier to share data across your organization and with external customers. With the AWS Glue Data Catalog as the central point of metadata for data in AWS data sources such as Amazon S3, you can manage fine-grained data lake access permissions using familiar database-like features of Lake Formation. Lake Formation manages the metadata permissions of tables in Data Catalog and the corresponding data permissions for S3 in one place. When you set up permissions for users, you define Grant/Revoke style policies for the databases and tables and register the data location with Lake Formation. Further, Lake Formation provides LF-Tags to scale permissions policies and provides CloudTrail logging for auditing and reports. 4 | 5 | We recommend you to consider the below process while you are evaluating and choosing to adopt Lake Formation. They are: 6 | 7 | 1. Take inventory of your data stores. Lake Formation provides data governance for data on S3, while extending it to Amazon Redshift data shares and extending the Data Catalog to federate into external hive metastores. 8 | 2. Review your current data access management model. What is your current data access model? Is it Apache Ranger, a combination of S3 Bucket Policies, IAM policies and Glue Resource policies? 9 | 3. Review what analytics engines and AWS services you use for your data analysis and ETL pipelines. Validate that your query engines are supported with the granularity of permissions (example- table level support, column level, or row level, support for Glue Views) that you need. AWS analytics services that integrate with Lake Formation are detailed in [this documentation page](https://docs.aws.amazon.com/lake-formation/latest/dg/service-integrations.html). 10 | 4. Are you using Open source transactional table formats such as Apache Iceberg, Apache Hudi or Linux Foundation’s Deltalake? You can look at what levels of Lake Formation fine grained permissions are supported for each table format by each integrated analytics service in [our documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/working-with-services.html). 11 | 5. What permissions model suits your data platform? Do you want to keep your existing model or do you want to review Lake formation? Many customers who choose Lake Formation do so because they want to adopt permissions based on tagging (that is, LF-Tags ) or want to adopt a model that is more centralized with the metadata in Glue Data Catalog or want to integrate tightly with AWS analytics services. You could continue to use your existing permissions model if it provides constructs and policy mechanisms not yet supported by Lake Formation (for example, Attribute Based Access Control). 12 | 6. What is your account topology? You may consider splitting monolothic AWS accounts to smaller accounts and leverage Lake Formation’s cross account sharing capabilities. This provides benefits like reducing single points of failures, allows teams or organizations to own and manage their data processing infrastructure, and scale their usage of AWS services. 13 | 7. Do you want to apply Lake Formation permissions to all resources, a subset of resources, all users, or a subset of users. Please see [Lake Formation Adoption Modes](lake-formation-adoption-modes.md) 14 | 8. Lastly, if you are moving from a different authorization engine, how are you model the permissions to your users? Lake Formation can create policies against AWS IAM users and roles, AWS QuickSight Authors, and AWS Identity Center users and groups. If you need identity based policies, then you will need to integrate with AWS Identity Center using a feature called [Trusted Identity Propogation](https://docs.aws.amazon.com/singlesignon/latest/userguide/trustedidentitypropagation-overview.html). At the time of writing this, not all AWS services support Identity center so please refer to the [public docs](https://docs.aws.amazon.com/singlesignon/latest/userguide/trustedidentitypropagation-integrations.html). 15 | 16 | 17 | Once all of these points have been considered, the general process to adopt Lake Formation can be found in detail in the [Lake Formation public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/upgrade-glue-lake-formation.html). 18 | -------------------------------------------------------------------------------- /content/adopting-lake-formation/general-best-practices.md: -------------------------------------------------------------------------------- 1 | # General Best Practices 2 | 3 | ## Lake Formation Service Linked Role vs Customer Managed Roles 4 | 5 | We do not recommend using Lake Formation’s Service Linked Role (SLR) in production environments. Although using SLR is provided for convenience, there are few points to consider if you choose it. You cannot edit the SLR’s policy if needed. Encrypted catalogs are not supported with SLR for cross account sharing. Significantly large number of S3 locations registered with Lake Formation may cause IAM policy limits to be breached. EMR on EC2 does not support SLR registered locations for data access. Lastly, SLR's do not adhere to [AWS Service Control Policies](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html). Hence, we recommend creating a custom IAM role to always register your data locations with Lake Formation. 6 | 7 | ## Using Lake Formation Administrator Role 8 | 9 | Lake Formation Administrator permissions could be over-permissive for specific operations on the data lake. There is also a limit of 30 on the number of administrators that can be registered. We recommend you to create roles with limited IAM and Lake Formation permissions for targeted operations. Limit who can be a Lake Formation administrator role to those that need to administer the AWS account and processes that must have full access to the Glue Data Catalog. For read-only access to the entire Glue Data Catalog, we recommend using Read-Only Lake Formation Administrator for operations such as auditing that do not require write access. For use cases where a user needs to be able to grant access to other users within the account, provide them with Grantable permissions on resources that they manage. 10 | 11 | Below are examples of when not to use Lake Formation administrator: 12 | 13 | * Glue crawler role’s - Do not make Glue crawler roles as Lake Formation administrator. Grant permissions to the crawler role to create tables in select databases in the catalog. Grant data location permission to the crawler role on specific S3 locations to which the databases point to. 14 | * Data producer/owner/steward - Grant data location permission to create databases and tables in specific S3 locations. Provide Grantable permissions to these personas on the databases and tables that they manage. 15 | 16 | ## When to use Data Filters versus a Glue View ? 17 | 18 | A data filter can be created only on individual/specific table while a Glue View can be created across multiple tables. A data filter could be used when you have simple and specific row and column filtering needs on a table. 19 | 20 | The [Glue Views](https://docs.aws.amazon.com/athena/latest/ug/views-glue.html) are different from that of the standard table views that you create in [Amazon Athena](https://docs.aws.amazon.com/athena/latest/ug/create-view.html), [Apache Hive](https://docs.aws.amazon.com/athena/latest/ug/hive-views.html), Apache Spark, Presto, Trino, etc. The Glue Views is a feature of Data Catalog in Preview and are based on definer semantics, where access to the Glue Views are defined by the user who creates the view. 21 | 22 | You can share a Glue View without sharing the underlying tables. The recipient can query only the Glue View and not access the tables. You can share a Glue View from one account to another but cannot create a new Glue View that contains tables shared from other accounts. 23 | 24 | Glue Views is currently in [Preview](https://docs.aws.amazon.com/lake-formation/latest/dg/working-with-views.html). We will add more best practices as the feature becomes generally available. 25 | 26 | -------------------------------------------------------------------------------- /content/adopting-lake-formation/images/crossaccount-hybridaccessmode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/adopting-lake-formation/images/crossaccount-hybridaccessmode.png -------------------------------------------------------------------------------- /content/adopting-lake-formation/images/hybridaccessmode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/adopting-lake-formation/images/hybridaccessmode.png -------------------------------------------------------------------------------- /content/adopting-lake-formation/images/lakeformationmode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/adopting-lake-formation/images/lakeformationmode.png -------------------------------------------------------------------------------- /content/adopting-lake-formation/images/oneaccount-hybridaccessmode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/adopting-lake-formation/images/oneaccount-hybridaccessmode.png -------------------------------------------------------------------------------- /content/adopting-lake-formation/lake-formation-adoption-modes.md: -------------------------------------------------------------------------------- 1 | # Lake Formation adoption modes 2 | 3 | Before Lake Formation was available, users of Glue Data Catalog can use a combination of Data Catalog resource policies and IAM permissions (that is, S3 bucket policies and IAM principal policies) to manage access to both the metadata in the catalog and the data in S3 that the metadata points to. The combination of S3 bucket policies, IAM principal policies and Glue catalog policies can be referred to as resource based policies. 4 | 5 | However, you do not have to adopt Lake Formation completely in order to use it. You can mix and match permission models between Lake Formation and resource based policies for your catalog resources. We will go through some of the best practices and reasons to choose one over the other. 6 | 7 | ## Hybrid access mode versus Lake Formation mode - what to choose? 8 | 9 | There are two modes in which you can register an S3 data location with Lake Formation - Lake Formation mode and [Hybrid access mode](https://docs.aws.amazon.com/lake-formation/latest/dg/hybrid-access-mode.html). When you register a database or table location in Lake Formation mode, you are choosing Lake Formation as the only permissions model to manage all permissions to those catalog resources. In this case, after setting up permissions to users, you need to revoke resource based policies. 10 | 11 | ![Lake formation mode](images/lakeformationmode.png) 12 | 13 | When you register a database or table location with Lake Formation in hybrid access mode, you can choose to provide Lake Formation permissions to some users and maintain the resource based policy permissions to others, without interruptions. One user gets access to the catalog resources through only one permissions model. 14 | 15 | ![hybrid access mode](images/hybridaccessmode.png) 16 | 17 | 18 | ## Use-cases to adopt Hybrid access mode 19 | 20 | When you have existing users of Data Catalog with resource based policy permissions and you want to adopt Lake Formation, we recommend you to use hybrid access mode. You can select the databases and tables that you want to use Lake Formation permissions with, register them with Lake Formation as hybrid access mode resources ( RegisterResource() API ). Then, you can start adding Lake Formation permissions for new users on these resources. You can also gradually add Lake Formation permissions for your existing users and remove their resource based policy permissions. The hybrid access mode provides minimal operational changes as your existing access to users is not interrupted when you adopt Lake Formation. 21 | 22 | For example, if you have extract, transform and load (ETL) processes currently in production that use resource based policy permissions, enabling Lake Formation permissions to these jobs can impact them. You can onboard your new team members like data scientists and data analysts to these same databases and tables with Lake Formation permissions while continuing to use resource based policy permissions for your production workflows uninterruptedly. Some ETL workflows that require full table access could use the resource based policy permissions while you can add fine grained access for the same tables to your data scientists and analysts. You do not have to maintain two copies of the same tables, in order to share them with different users. 23 | 24 | The below picture illustrates hybrid access mode permissions on the same catalog resource within an account for a data-analyst role versus a data-engineer role. 25 | 26 | ![single account hybrid access mode](images/oneaccount-hybridaccessmode.png) 27 | 28 | The below picture illustrates hybrid access mode permissions on the same catalog resource shared cross account for a data-analyst role versus a data-engineer role. 29 | 30 | ![cross account hybrid access mode](images/crossaccount-hybridaccessmode.png) 31 | 32 | For setting up Lake Formation permissions in hybrid access mode, refer to [documentation.](https://docs.aws.amazon.com/lake-formation/latest/dg/hybrid-access-setup.html) 33 | 34 | ## Use-cases to adopt Lake Formation mode 35 | 36 | If you are pretty new to Data Catalog, if your current users and ETL workloads do not heavily use Data Catalog with resource based policy permissions, and if you prefer to have only one permisisons model for ease of maintenance using Lake Formation, we recommend Lake Formation mode. You can crawl the new data sets in S3 using Glue Crawlers, create databases and tables in the Glue Data Catalog, set up Lake Formation permissions to your users and register them with Lake Formation in Lake Formation mode. Once the Lake Formation permissions are verified to work, you may clean up the IAM policy permissions, Data Catalog resource policies and S3 bucket policies. 37 | 38 | Large organizations that use the IAM based policy permission's for GDC and S3 access sharing may have come across S3 bucket policy limits and IAM policy limits. Lake Formation is designed to help avoid those limitations. 39 | 40 | We also recommend to continue using Lake Formation mode on a database once it is set up and not change it to hybrid access mode. We have provided [step by step instructions](https://docs.aws.amazon.com/lake-formation/latest/dg/hybrid-access-mode-update.html) if you prefer to change a Glue resource to hybrid resource though. In order to avoid impacting the existing Lake Formation users, we recommend doing it during offline hours. 41 | 42 | For setting up Lake Formation permissions in Lake Formation mode, refer to [documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/initial-LF-setup.html). 43 | 44 | 45 | ## Using LF-Tags along with hybrid access mode 46 | 47 | 48 | You can grant permissions on a database or table using either named resource method or LF-tags. For a hybrid resource, you can not opt-in the principal, that is, [CreateLakeFormationOptIn()](https://docs.aws.amazon.com/lake-formation/latest/APIReference/API_CreateLakeFormationOptIn.html) API to use Lake Formation permission by specifying the LF-Tags. You can only select the databases and tables by name and opt-in the principal to make Lake Formation permissions effective immediately. 49 | 50 | If you are using LF-Tags for granting permissions, then please follow the below steps to opt-in. 51 | 52 | 1. List out the catalog objects to which the LF-Tags are associated. 53 | 2. List out the principals who were granted permissions on those resources. 54 | 3. Opt-in the principals and resources as a separate step. For this, you can use the Hybrid access mode on the left navigation bar on the console or use SDK/API CreateLakeFormationOptIn(). 55 | 56 | 57 | ## Limitations 58 | 59 | Please refer to our public docs for current list of [considerations and limitations](https://docs.aws.amazon.com/lake-formation/latest/dg/notes-hybrid.html). 60 | 61 | -------------------------------------------------------------------------------- /content/adopting-lake-formation/overview.md: -------------------------------------------------------------------------------- 1 | # Adopting Lake Formation Best Practices 2 | 3 | To successfuly adopt Lake Formation, there are many different considerations that need to be taken. This section will go over some of the key areas, with best practices, on how to adopt Lake Formation. 4 | 5 | * See [Considerations when adopting Lake Formation](considerations-when-adopting-lake-formation.md) for a non-exhaustive list of areas that will need to be looked at the beginning of adopting Lake Formation. 6 | * See [General best practices](general-best-practices.md) for high level best practices when configuring Lake Formation. 7 | * See [Lake Formation adoption modes](lake-formation-adoption-modes.md) for looking into Lake Formations Hybrid Access Mode. 8 | -------------------------------------------------------------------------------- /content/data-sharing/cross-account-faq.md: -------------------------------------------------------------------------------- 1 | # Cross Account FAQS 2 | 3 | ## 1. What are the various cross account topologies customers can share resources? 4 | 5 | Lake formation cross account sharing feature is used for sharing catalog and underlying data lake access. Customer can share resource without duplicating data avoiding extensive pipelines to make the data available to other lines of business. These models enable various lines of business collaboration and require planning to standardize the enterprise level adaption. 6 | 7 | ### Peer to Peer (Pure Data Mesh) 8 | 9 | Peer to Peer model is one of the data mesh patterns that is used by customers who have high degree of autonomy established within the domain and wants to operate in decentralized fashion. In this model, data producers own data products and the data ownership remains with the data producer generating them. Data product owners will manage the lifecycle of the data and also control access to the data when sharing it with consumers. Data producers and data consumers operate independently and don’t depend on central governance platform/team for data distribution/fulfillment. There is a lot of information that we have left out, but if you wish to learn more about data meshes, a great resource is [Data Mesh Principles and Logical Architecture by Zhamak Dehghani](https://martinfowler.com/articles/data-mesh-principles.html) 10 | 11 | To see how to build peer to peer data meshes, refer to blog: 12 | [Securely share your data across AWS accounts using AWS Lake Formation](https://aws.amazon.com/blogs/big-data/securely-share-your-data-across-aws-accounts-using-aws-lake-formation/). 13 | 14 | ### Hub and Spoke 15 | 16 | This topology can be generalized as there is a central AWS account (hub) that always owns the catalog. Then there are nodes that interact with the central account (spokes). However, there are specific implementations where there are two choices: 1/ who owns data sets and 2/ who permissions access to those data sets. 17 | 18 | When looking at the decision on who owns the data sets, there are two possibilities: 1/ the central account owns the data, and data producers write their data to the central accounts or 2/ the producers down the data in their own accounts. When the central account owns all the data within the hub and spoke, there can be very strict controls on who can access it, and what data is written. For example, a central team can continously monitor the data to ensure that PII (personally identifiable information) is properly cataloged and secured. When producers own their own data, they have more freedom to do what they want with that data. 19 | 20 | When looking at the decision on who governs the datasets, there are two possibilities: 1/ the central account decides whether a consumer has the ability to consume a data set by a central governance team, or 2/ data producers can make those decisions. 21 | 22 | Based on whether the two decisions, customers can achieve different business goals. Some customers choose that all data is stored in the central account, and all decisions on who is allowed to access a dataset is governed centrally. This provides the greatest amount of control on the participants within the organization. Some customers have increasingly decided to decentralize as much as possible by having producers own their datasets, and make decisions on who can access their datasets, similar to how data meshes operate like in our next section. 23 | 24 | ### Centrally governed data mesh 25 | 26 | This a hybrid of both hub and spoke model and data mesh. Customers chose this topology when customers want to centralizes metadata discovery and permission management in governance account and make the data available to other AWS accounts. In this model producers publish their data assets with a central governance account that hosts the metadata for organizations to discovery and request access for the resource. Producers, like in data meshes, will permission consumers. Central governance account provides better audibility on resource access and scaling the permission across enterprise, while proper compliance controls can take place. 27 | 28 | Like data meshes, this requires orgnaizations to use a different paradigm that some organizations may not be used to. However, if this model is attractive, AWS highly recommend that you take a look at [AWS Data Zone](https://aws.amazon.com/datazone/), which leverages Lake Formation for security management, Glue Data Catalog for techincal data management, while offering a business catalog, approval workflows, and Interoperability with third party vendors. 29 | 30 | However, if you wish to implement this model without Data Zone, please refer to blog [Design a data mesh architecture using AWS Lake Formation and AWS Glue](https://aws.amazon.com/blogs/big-data/design-a-data-mesh-architecture-using-aws-lake-formation-and-aws-glue/) 31 | 32 | ## 2. What are the various cross account sharing versions available in Lake Formation? 33 | 34 | See [Cross Account versions](cross-account-versions.md) for various cross account versions available. 35 | 36 | 37 | ## 3. How does Lake Formation manage the AWS RAM resource shares across cross account sharing versions? 38 | 39 | In version 1, one RAM share is created for every catalog object shared between two accounts. For example, if account A shares database1 with DESCRIBE permission to account B, one RAM share is created. Next, if account A shares table1 from database1, a second RAM share is created. When table2 from database1 is shared, a third RAM share is created. If account A shares database1.ALL_Tables, then a 4th RAM share is created and so on. If account A shares a second database2 with account B, a new RAM share is created. As many Lake Formation cross account grants per account per Data Catalog object between two accounts, as many RAM shares are created. 40 | 41 | In version 2, RAM shares are optimized and continues to be optimized in version 3 and version 4. A set of RAM shares are created on the source account for every recipient account. Between version 2 and version 3 (or version 4), the number of RAM shares created are the same when named resource method is used for cross account sharing and it differs slightly for LF-TBAC. 42 | 43 | One RAM share is created in the source account for all databases shared with a recipient account. This database level RAM share gets reused by attaching all new databases shared to the same recipient account. One RAM share is created in the source account for all tables shared with a recipient account. This table level RAM share gets reused by attaching all new tables shared to the same recipient account. If the source account chooses to share a database with the ALL_Tables option, then a new RAM share is created. So, using the named resource method for cross account grants, a total of up to 3 RAM shares can be created for every account pair. If the source account does not choose to grant cross account permissions using ALL_Tables, then only 2 RAM shares are created for every account pair. 44 | 45 | For LF-Tags based cross account sharing, two additional RAM shares are created - one for databases and one for tables. Any additional LF-Tag based grants between the same two accounts reuses these 2 RAM shares. 46 | 47 | So, if you are using a combination of LF-TBAC and named resource method for cross account sharing, version 3 and version 4 can create up to 5 RAM shares in the source account for any two source and recipient account pairs. 48 | 49 | 50 | ## 4. What are the factors to consider when choosing account level sharing vs direct sharing with principal? 51 | 52 | | | Account to Account | Account to Principal| 53 | | -------- | ------- | -------- | 54 | |Control model | When a source account (producer) wants to share a resource with a target account (consumer) but expects the consumer's data lake administrator to manage the resource permissions within their account, the source account can grant access at the account level rather than specifying individual users. | When source account(producer) wants to have tighter control on resource permission and wants to completely control who have access to the resource, customers can choose sharing the resource to direct principal in target account. | 55 | | Cross-Account version requirement | All Cross Account version supports this model | Cross Account version V3 and above supports this model | 56 | | Receiver policies | Since the resource is shared at the account level, the administrator of the target account's data lake should have policies to create a resource link and delegate permissions. No additional policy requirements exist for other principals that are delegated permissions to the resource in the target account. For various personas and policies please refer to [public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/permissions-reference.html#lf-permissions-tables) | Since the resource is shared cross account to principal, each receiver principal the needs Lake Formation CREATE_TABLE or CREATE_DATABASE permission to create the resource link. They also need the glue:CreateTable or glue:CreateDatabase IAM permission in their IAM policy (based on resource type that is shared). | 57 | | Auditability | To understand who has access to a given dataset, you need to review the permissions defined in both the source account where the dataset originated and the target account with which the dataset has been shared. | To understand who has access to a given dataset, you can determine this information entirely from the source account. | 58 | 59 | For more details on direct sharing with principal, refer to blog [Enable cross-account sharing with direct IAM principals using AWS Lake Formation Tags](https://aws.amazon.com/blogs/big-data/enable-cross-account-sharing-with-direct-iam-principals-using-aws-lake-formation-tags/) 60 | 61 | ## 5. Can I share both my Glue Data catalog resources (catalog, database, table) and Redshift tables using Lake Formation cross account. 62 | 63 | Yes, you can share both data lake resource cataloged in Glue Data Catalog with storage on S3 and Redshift tables shared with Glue Data catalog via data shares (https://docs.aws.amazon.com/redshift/latest/dg/lf_datashare_overview.html) using Lake Formation for cross account access. 64 | 65 | For more details refer to blog [Implement tag-based access control for your data lake and Amazon Redshift data sharing with AWS Lake Formation](https://aws.amazon.com/blogs/big-data/implement-tag-based-access-control-for-your-data-lake-and-amazon-redshift-data-sharing-with-aws-lake-formation/) 66 | 67 | ## 6. Can I share my resource cross account cross region? 68 | 69 | yes, Lake Formation allows querying Data Catalog tables across regions using Athena, EMR, and Glue ETL. By creating resource links in other regions pointing to source databases and tables, you can access data across regions without copying the underlying data or metadata into the Data Catalog. Engines that query the data needs network connectivity to endpoints within the region to access S3 buckets. For example, EMR clusters or Glue ETL jobs running in a private subnet within an AWS VPC may require a NAT gateway, VPC peering, or transit gateway to reach external resources. Any network traffic between source instance to any AWS endpoint stays within the AWS network and does not go over the internet. Refer to (AWS VPC FAQs)[https://aws.amazon.com/vpc/faqs/] for details on network traffic and communication path. 70 | 71 | For more details on cross region setup refer to blog [Configure cross-Region table access with the AWS Glue Catalog and AWS Lake Formation](https://aws.amazon.com/blogs/big-data/configure-cross-region-table-access-with-the-aws-glue-catalog-and-aws-lake-formation/) 72 | 73 | ## 7. Can I share resource cross account using Lake Formation when resource management within that account is not done using Lake Formation? 74 | 75 | Yes, you can use Hybrid access Mode to register the S3 bucket containing your data in Lake Formation in the source account(grantor) and opt-in cross account(target) principal for using Lake Formation mode for sharing. This enables target(receiver) account access to the shared resource using Lake Formation. 76 | 77 | For more details refer on how hybrid access mode works, refer to blog [Introducing hybrid access mode for AWS Glue Data Catalog to secure access using AWS Lake Formation and IAM and Amazon S3 policies](https://aws.amazon.com/blogs/big-data/introducing-hybrid-access-mode-for-aws-glue-data-catalog-to-secure-access-using-aws-lake-formation-and-iam-and-amazon-s3-policies/) 78 | 79 | 80 | ## Other References 81 | 82 | - For details on Lake Formation cross account sharing, refer [public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/cross-account-permissions.html) 83 | - For more details on cross account sharing versions, refer [public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/optimize-ram.html). 84 | -------------------------------------------------------------------------------- /content/data-sharing/cross-account-versions.md: -------------------------------------------------------------------------------- 1 | # Cross Account Versions 2 | 3 | Lake Formation improves its sharing features over time, which sometimes requires changes that are not backwards compatible. To give customers flexibility in choosing when to upgrade, Lake Formation uses versioned sharing modes. Each new version retains the benefits of previous versions while adding enhancements. 4 | 5 | To maximize the benefits of cross-account sharing, it is recommended to use the newest version of cross-account sharing (currently Version 4) when first setting up Lake Formation permissions or when upgrading from older versions. 6 | 7 | ## Version 1: 8 | 9 | This version creates an AWS Resource Access Manager (RAM) resource share for every cross-account Lake Formation permission granted through the named resource method. If there are many shares, this can exceed limits on the number of resource shares. For Lake Formation tag-based data sharing, cross-account permissions don't use AWS RAM but they rely on Data Catalog resource policy. 10 | 11 | To avoid hitting any limits, it is best to upgrade to a newer version at your earliest convenience. 12 | 13 | ## Version 2: 14 | 15 | This version streamlines cross-account permission grants between two AWS accounts by using a single AWS RAM resource share to represent multiple grants. This mapping dramatically reduces the number of required resource shares compared to individually sharing each permission grant, making cross-account sharing more scalable. While named resources use AWS RAM for cross-account sharing, LF-Tags based sharing still relies on Data Catalog resource policies. Lake Formation allows sharing catalog resources at the external account level but not at the IAM principal level. 16 | 17 | If your system utilizes LF-Tags for sharing data, it is strongly advised that you upgrade to a more recent version. 18 | 19 | ## Version 3: 20 | 21 | This version streamlines cross-account resource sharing by using AWS RAM to map multiple cross-account permissions to a single resource share. Optimization of AWS RAM resource shares reduces the number of shares needed for cross-account access. Further, this version allows sharing resources directly to external account IAM principals as well as sharing at the account level. Additionally, LF-Tags based access control would use AWS RAM for sharing, eliminating the need to update Glue Data Catalog resource policies, hence streamlining sharing with named-resources. You can also establish Tag Based cross-account sharing to Organizations or Organizational units. 22 | 23 | ## Version 4: 24 | 25 | This version includes the benefits of V3. Additionally, it enables resources to be managed in Hybrid Access Mode and also allows sharing hybrid resources across accounts. 26 | 27 | For more details refer to [public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/hybrid-access-mode.html) 28 | 29 | -------------------------------------------------------------------------------- /content/data-sharing/general-data-sharing.md: -------------------------------------------------------------------------------- 1 | # General best practices when using Lake Formation data sharing 2 | 3 | ## Transitive sharing 4 | 5 | Sharing of catalog objects across accounts using Lake Formation is not transitive. For example, if account A shares a database to account B, then account B cannot share it with account C. In this case, account A have to share the database with account B and account C individually. This is designed to maintain secure ownership of the data assets for the producer account. 6 | 7 | 8 | ## Cross account sharing scope and AWS RAM shares 9 | 10 | 11 | While sharing a database or table using Lake Formation, we recommend sharing to the AWS Organization units, Account ids and AWS IAM Principals in external account, in that order. Setting up and sharing with AWS Organizations provides an option in the AWS RAM to enable sharing with AWS Organizations. A screenshot is shown below. 12 | 13 | ![RAM settings for sharing](images/RAM-setting-for-sharing.png) 14 | 15 | By checking the box, you enable sharing with AWS Organizations. Now, sharing to an AWS Organization in Lake Formation auto-accepts the RAM invites created by Lake Formation. 16 | 17 | * If resources are shared at account level, then the shared resources are visible to the Lake Formation admin on the consumer account, without having to accept the RAM invites. Using automation, you can create resource links and cascade permissions to additional users in the consumer account. 18 | * If resources are shared directly to a cross account IAM principal, the shared resources are not visible to the Lake Formation admin in the recipient account. The IAM principal in the recipient account would see the shared resources. For the IAM principal level cross account share, the recipient principal should have permissions to create resource links- either create database permissions for creating resource link of a shared database or create table permission inside a database for creating resource link of the shared table. 19 | 20 | 21 | ## Sharing Data Catalog databases versus tables 22 | 23 | 24 | When you share a database or table through Lake Formation from one account to another, we recommend to share at the Database level instead of individual table level share wherever you can. This is because you will need to create a resource link on the shared resource on the consumer account, if you plan to use Athena or Redshift Spectrum for querying the shared resource. Creating a database resource link is much easier and all tables could be referenced using rl_source_database.source_tablename. This is easier compared to creating resource links for individual shared tables. 25 | 26 | 27 | ## Resource Links 28 | 29 | * Athena and Redshift Spectrum require a resource link to query a cross account shared resource - either a database resource link or a table resource link. This is because Athena SQL query editor and Redshift Query Editor V2 console does not provide options to directly access the shared database or table names, which are not part of the consumer or recipient account catalog. Also, SQL queries cannot refer to the catalog id of the database and table names. On the other hand, a resource link is an object in the recipient account’s catalog that points to the shared resource. Hence, Athena and Spectrum queries can use a resource link to refer to the shared resource. 30 | 31 | In the below example, rl_database_name refers to a resource link name of the shared database. 32 | 33 | ```sql 34 | SELECT column_name1,column_name2 FROM rl_database_name.table_name; 35 | ``` 36 | 37 | * Spark jobs in Glue ETL and EMR can refer to the shared resource by providing the catalog id of the source account. Hence resource links are not required if your workflow uses only Glue ETL and EMR. 38 | 39 | In the below example, you directly point your query in the recipient account to the shared database name and table by providing the catalog_id of the sharing or source account. 40 | 41 | ```python 42 | AWSGlueDataCatalog_node1709588807746 = glueContext.create_dynamic_frame_from_catalog( 43 | database="source_account_dbname", 44 | table_name="source_account_tablename", 45 | transformation_ctx="AWSGlueDataCatalog_node1709588807746", 46 | catalog_id=123456789012 47 | ) 48 | ``` 49 | 50 | ## Recipient account considerations 51 | 52 | 53 | Data sharing through Lake Formation requires few actions to be performed on the consumer or recipient account side. These can be automated but cannot be avoided totally. 54 | 55 | The steps for the recipient of a shared resource are as follows: 56 | 57 | 1. Accept the RAM invite(s) if the account is not part of an AWS Organization or the if organization is not configured to automatically accept invites. 58 | 2. If you plan to have the shared resource accessible by Athena and/or Redshift Spectrum, create a resource link to the database or table that is shared using Lake Formtion administrator permissions. Grant DESCRIBE permission on the resource link to all the principals within the account that need access. 59 | 3. In addition, grant permissions on the shared resource themselves to principals within the account that need access to the shared resource. 60 | 61 | If the resources are shared with a new account first time, in Lake Formation cross account sharing Version 3 and higher, there will be two RAM invites sent from the owner to the consumer account - one for database level and one for table level. The consumer account has to accept both the RAM invites if AWS Organization is not set up as described earlier. 62 | 63 | Lake Formation users using automation to add and share resources could pass the RAM share ARN created by Lake Formation for each cross account share from producer to consumer. Consumer could verify the passed ARN with the ARN showing up in their AWS RAM under ‘Resources shared with me’ tab and accept the RAM invite. For any additional resources shared between the same producer and consumer, the new databases and tables are automatically added to the already accepted RAM share. No further action is needed from the consumer admin. 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /content/data-sharing/images/RAM-setting-for-sharing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/data-sharing/images/RAM-setting-for-sharing.png -------------------------------------------------------------------------------- /content/data-sharing/overview.md: -------------------------------------------------------------------------------- 1 | # Cross Account Data Sharing 2 | 3 | Lake Formation's cross-account features enable secure data sharing across multiple AWS accounts, AWS Organizations, or directly with IAM users in other accounts. This allows granular access to the Data Catalog metadata without duplicating the underlying data. 4 | 5 | * See [Cross Account Versions](cross-account-versions.md) for various cross account versions available. 6 | * See [Upgrade Cross Account Version](cross-account-versions.md) for steps on how to upgrade cross account version. 7 | * See [Cross Account FAQS](cross-account-faq.md) for cross account related FAQs. -------------------------------------------------------------------------------- /content/data-sharing/upgrading-cross-account-versions.md: -------------------------------------------------------------------------------- 1 | # Upgrading Cross Account Versions 2 | 3 | Lake Formation improves its sharing features over time, which sometimes requires changes that are not backwards compatible. To give customers flexibility in choosing when to upgrade, Lake Formation uses versioned sharing modes. Each new version retains the benefits of previous versions while adding enhancements. 4 | 5 | To maximize the benefits of cross-account sharing, it is recommended to use the newest version of cross-account sharing (currently Version 4) when first setting up Lake Formation permissions or when upgrading from older versions. 6 | 7 | ## Cross Account Versions 8 | 9 | See [Cross account Versions](cross-account-versions.md) for a summary of cross account versions and [Cross account public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/cross-account-permissions.html) for detailed information. 10 | 11 | ## Cross Account Compatibility Table 12 | 13 | Below table provides summary of various combinations for source and target account's cross account versions that is supported. 14 | 15 | | Source account\Target account | Target account(receiver) - V1 | Target account(receiver) - V2| Target account(receiver) - V3| Target account(receiver) - V4| 16 | | -------- | ------- | -------- | -------- | -------- | 17 | | Source Account(grantor) - V1 | supported | supported | not supported | not supported | 18 | | Source Account(grantor) - V2 | supported | supported | Sharing via Named Resources supported, LF-Tags not supported | Sharing via Named Resources supported, LF-Tags not supported | 19 | | Source Account(grantor) - V3 | supported | supported | supported | supported | 20 | | Source Account(grantor) - V4 | supported | supported | supported | supported | 21 | 22 | **Note:** 23 | Modifying the grantor account's cross-account version settings does not change existing permissions the receiver account has on shared resources. However, if you are using V1 sharing and hitting your RAM resource share limit, consider revoking and re-granting cross-account permissions in the source account. Doing so will consolidate multiple RAM resource shares into fewer shares, allowing you to share more resources. 24 | 25 | ## Steps to be taken in Source Account: 26 | 27 | 1. The IAM role or IAM user granting the permission (that is, the grantor) must have the policies mentioned in the AWS managed policy arn:aws:iam::aws:policy/AWSLakeFormationCrossAccountManager for granting the cross-account permissions. You can also attach the managed policy to your role. 28 | 2. If you currently use Glue catalog resource policy, add the following permission as well to the resource policy. Replace with the AWS account ID of the grantor account, and with the AWS region where your resources are located. If you do not have an existing Glue catalog resource policy, no additional steps are required. 29 | 30 | ```json 31 | { 32 | "Version": "2012-10-17", 33 | "Statement": [ 34 | { 35 | "Effect": "Allow", 36 | "Action": [ 37 | "glue:ShareResource" 38 | ], 39 | "Principal": {"Service": [ 40 | "ram.amazonaws.com" 41 | ]}, 42 | "Resource": [ 43 | "arn:aws:glue:::catalog", 44 | "arn:aws:glue:::database/*", 45 | "arn:aws:glue:::table/*" 46 | ] 47 | } 48 | ] 49 | } 50 | ``` 51 | 52 | 1. Update the cross-account version of the source or grantor account to V4. 53 | 54 | Once you choose Version 4, all new named resource grants will go through the new cross-account grant mode when using Lake Formation sharing. 55 | 56 | Optionally to optimize AWS RAM usage for your existing cross-account shares, you can follow the below steps: 57 | 58 | 1. Get the list of permissions defined in the account using list-permissions API, and filter out the cross-account permissions on resources owned by the account. In case of named resource sharing you can filter the permissions that have ResourceShare listed under AdditionalDetails. For more details refer to [List Permissions](https://docs.aws.amazon.com/cli/latest/reference/lakeformation/list-permissions.html) 59 | 60 | Example output of list-permissions for a cross account share. 61 | 62 | ```json 63 | 64 | { 65 | "Principal": { 66 | "DataLakePrincipalIdentifier": "987654321012" 67 | }, 68 | "Resource": { 69 | "Database": { 70 | "CatalogId": "123456789012", 71 | "Name": "salesdb" 72 | } 73 | }, 74 | "Permissions": [ 75 | "DESCRIBE" 76 | ], 77 | "PermissionsWithGrantOption": [ 78 | "DESCRIBE" 79 | ], 80 | "AdditionalDetails": { 81 | "ResourceShare": [ 82 | "arn:aws:ram:us-east-1:123456789012:resource-share/15bc4e61-1423-4c44-9452-c23fda161f3f" 83 | ] 84 | }, 85 | "LastUpdated": "2024-03-14T19:07:25.687000+00:00", 86 | "LastUpdatedBy": "arn:aws:iam::123456789012:role/LFAdmin" 87 | } 88 | ``` 89 | 90 | 1. Revoke the permission granted on resources to cross account principal: This removes the V1 RAM share corresponding to the resources. 91 | Caution: Once access is revoked for the external account from source account, any principals in the target account who have cascaded access will lose their access to the resource. 92 | 93 | 2. Re-Grant the permission on resources: This establishes V4 RAM share for the resources shared. 94 | 95 | Note: You can also revoke or grant permissions using batch APIs for each database and its tables that are shared across accounts incrementally, and validate if the shared resource is accessible in the target account. For more information refer to [public documentation](https://docs.aws.amazon.com/lake-formation/latest/dg/cross-account-notes.html) 96 | 97 | 1. Optionally you can validate the resources that are shared using RAM invites by invoking RAM API for named resource sharing. See [publc documetation](https://docs.aws.amazon.com/cli/latest/reference/ram/) for more information. 98 | 99 | Note: Permissions granted on the LF-Tags to cross account will stay in tact. 100 | 101 | ## Step to be taken in Target Account: 102 | 103 | 1. To receive and accept resource shares using AWS RAM, data lake administrators in target accounts must have an additional policy that grants permission to accept AWS RAM resource share invitations and enable resource sharing with organizations. For information on how to enable sharing with organizations, see Enable sharing with AWS organizations in the AWS RAM User Guide. 104 | 105 | ```json 106 | { 107 | "Version": "2012-10-17", 108 | "Statement": [ 109 | { 110 | "Effect": "Allow", 111 | "Action": [ 112 | "ram:AcceptResourceShareInvitation", 113 | "ram:RejectResourceShareInvitation", 114 | "ec2:DescribeAvailabilityZones", 115 | "ram:EnableSharingWithAwsOrganization" 116 | ], 117 | "Resource": "*" 118 | } 119 | ] 120 | } 121 | ``` 122 | 123 | 1. If the source and target accounts are part of an AWS Organization and if resource sharing is enabled, then the accounts within that Organization will automatically have access to the shared resources without needing RAM invitations. 124 | 2. When resource sharing is not enabled, for resources shared with target account, target account should see the new RAM invitations. Data lake admin needs to accept these RAM invites. This establishes Glue catalog resource policies for target account to access the shared resource. 125 | 3. In case of re-grant of existing shared resource from source account(grantor) with new cross account version, existing resource links and permissions on those resource links should be intact for account admin and other principals in the target account(receiver). 126 | 4. Validate access to the shared resource in the target account. 127 | 128 | ### Note: 129 | In order to query a shared database or table using Amazon Athena in the recipient account, you need a resource link. In the case where the database or table is shared to a direct IAM principal in the recipient account, the principal needs Lake Formation CREATE_TABLE or CREATE_DATABASE permission to create the resource link. They also need the glue:CreateTable or glue:CreateDatabase IAM permission in their IAM policy (based on resource type that is shared). 130 | For more information on this topic, refer to the blog: [Introducing hybrid access mode for AWS Glue Data Catalog to secure access using AWS Lake Formation and IAM and Amazon S3 policies](https://aws.amazon.com/blogs/big-data/introducing-hybrid-access-mode-for-aws-glue-data-catalog-to-secure-access-using-aws-lake-formation-and-iam-and-amazon-s3-policies/) 131 | 132 | ## Other References: 133 | 134 | - [Cross Account Permissions](https://docs.aws.amazon.com/lake-formation/latest/dg/cross-account-permissions.html) 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /content/index.md: -------------------------------------------------------------------------------- 1 | # AWS Lake Formation 2 | 3 | AWS Lake Formation helps you centrally govern, secure, and globally share data for analytics and machine learning. This guide provides readers best practices when using various features and aspects of Lake Formation. 4 | 5 | This guide is broken down in topics: 6 | 7 | - [Best Practices when adopting Lake Formation](adopting-lake-formation/overview.md) 8 | - [Best Practices when using Lake Formations data sharing feature](data-sharing/overview.md) 9 | - [Best Practices when using LF-Tags](lf-tags/overview.md) -------------------------------------------------------------------------------- /content/lf-tags/basics.md: -------------------------------------------------------------------------------- 1 | # Lake Formation Tags (LF-Tags) Basics 2 | 3 | ## Named Resource policies vs LF-tag policies 4 | 5 | When creating an access policy within Lake Formation, there are 3 parts: 6 | 1) the principals (eg. IAM users, IAM roles, external accounts, SAML users/groups, etc) that you want to create permissions to. 7 | 2) the resources you want to provide access to (eg. database db1, table tableA, etc) 8 | 3) the permissions you want to grant (eg. SELECT, DESCRIBE, ALTER, etc on a table) 9 | 10 | The difference between Named Resource policies and LF-Tag policies is #2, the resources you want to provide permissions on. 11 | 12 | With Named Resources policies, you specify a specific resource (database, table, column, etc). With LF-Tags, you provide an LF-Tag expression which is used to match to resources for permissions. This makes LF-Tags much more scalable as you can grant permissions on many resources at once and permissions are updated accordingly as tags on resources are added, changed or removed. 13 | 14 | ## Why use LF-Tags? 15 | 16 | LF-Tags is a mechanism that can be used to group similar resources together, and permission on the group of resources. For example, if you have multiple databases and tables that are used by a wide variety of different groups of users, you can tag databases and tables that own those resources, and grant full read-write permissions to those resources using a single grant. 17 | 18 | ![image](images/lf-tags-grouping.png) 19 | 20 | Using LF-Tags can greatly simplify the number of grants over using Named Resource policies. See below as an example: 21 | 22 | ![image](images/lf-tags-vs-named-resources-example.png) 23 | 24 | In the above example with 4 users and 5 tables, if you were use Named Resources policies, you would need to issue 20 grants. However, if you use LF-tags, and tagged all the tables with the same LF-Tag, you would need only one grant for each user to the tag for a total of 4 grants. If there were 100's of users and 1000's of resources, you would need 100*1000=100000 grants using named resources, but 100 grants for each user, and 1000 tagging operations for each resource. 25 | 26 | LF-Tags also hierarchical. If you tag a database, all tables and columns within the database would inherit the LF-Tag. If you tag a table, then all columns would inherit the tags. You can also override inherited tags, so if you tag a database with tag AccessLevel = 'public', but want to change the tag value for a table to AccessLevel = 'private', then the table and all columns would override that value. 27 | 28 | ## How LF-Tag expressions work 29 | 30 | When granting permissions using LF-Tags, you need to provide a LF-Tag expression. This expression, if it evaluates to true, will grant a principal to the resource. LF-Tag expressions contain one or more LF-Tag names, and for each LF-Tag, one or more values. Each tag name is AND'ed in the expression and each lf-tag value is OR'ed. For example, LF-Tag1 = 'abc' AND LF-Tag2 = ('edf' OR 'ghi'). LF-Tags cannot be OR'ed. 31 | 32 | Let's look an example: 33 | 34 | ![image](images/lf-tags-example.png) 35 | 36 | With the above example, we have three tables (although they can be columns or databases as well). 37 | 38 | If you were to create a grant with the LF-Tag expression of Sensitivity = "Public", then this expression would be true for the Customer table, and Sales Table. Notice that not all tags need to evaluate to true for the resource to be granted. 39 | 40 | If you were to create a grant with the LF-Tag expression Audit = "true", then only the Sales table would evaluate true. 41 | 42 | If you were to create a grant with the LF-Tag expression of Owner = 'Aarthi' and Audit = 'true', then again, only the Sales table would evaluate to true. 43 | 44 | ## Limitations 45 | Before using LF-Tags, please see Lake Formation documentation for current limitations located at https://docs.aws.amazon.com/lake-formation/latest/dg/TBAC-notes.html -------------------------------------------------------------------------------- /content/lf-tags/best-practices.md: -------------------------------------------------------------------------------- 1 | # LF-Tag best practices 2 | 3 | ## Integrating with LF-Tags 4 | 5 | When integrating with LF-Tags, there are few suggestions on how to successfully adopt LF-Tags. See the following list of advice: 6 | 7 | * Define your LF-Tag ontology for your current and future needs. Adding or changing the ontology will require additional work in the future. 8 | 9 | * Make note of the limitations that currently exists. For example, LF-Tags currently does not support tagging resources with multiple values from the same LF-Tag. See the [limitations](limitations.md) for details. 10 | 11 | * Keep the tagging ontology as simple as possible. Having too many LF-Tags can make it difficult to manage and track permissions. 12 | 13 | * Document the tagging ontology and share with data stewards so that they are very clear when/how to use the LF-Tags. 14 | 15 | * LF-Tag permissioning and Named Resource policies can exist together. The effective permissions for a user is the union of permissions of all named resource policies and LF-Tag policies. 16 | -------------------------------------------------------------------------------- /content/lf-tags/common-ontologies.md: -------------------------------------------------------------------------------- 1 | # Common LF-Tag Ontologies 2 | 3 | Here is a list of common LF-Tags that make up a tagging ontology that are commonly seen by customers: 4 | 5 | 1. Environment (eg. dev, beta, gamma, prod) - LF-Tag that denotes which stage a particular resource exists and granting developers to resource in dev and beta, but only application access to gamma and prod. 6 | 2. Departments (eg. sales, marketing, engineering, etc) - LF-Tag that groups department datasets using a single tag. This can be used for sharing purpose (ie departmentA shares with departmentB). 7 | 3. Product (eg. dataproduct1, dataproduct2, etc) - LF-Tag that groups similar datasets under a data product that be used for permissioning and sharing. 8 | 4. Owner (eg. GroupA, GroupB, GroupC) - LF-Tag that provides certain groups of principals the ability different set of permissions (like write/delete permissions) that is separate for sharing purposes. 9 | 5. Roles (eg. data scientist, data engineer, application, etc) - LF-Tag that provides different levels of permissions based on role. For example, a data steward could have grantable full access to a dataset where as a data scientist could have read only permissions. 10 | 6. Data Classification (eg. SSN, Address, PhoneNumber, etc ) - LF-Tag that allows to permission differently based on the classification of data. More sensitive classifications can be accessable by privileged consumers, etc. 11 | 7. Data Sensitivity (eg. public, private, PII, etc) - LF-Tag that allows different groups of principals to accessible columns or tables based on their sensitivity of the column to meet compliance rules, like GDPR. 12 | 8. Sharable (eg. sharable, notsharable) - LF-Tag that groups resources that can be shared with other departments. 13 | -------------------------------------------------------------------------------- /content/lf-tags/developing-an-lftag-ontology.md: -------------------------------------------------------------------------------- 1 | # Developing an LF-Tag Ontology 2 | 3 | Using LF-Tags can make permissioning much easier and scalable than using named resource policies. However, it may be a little tricky to figure what your LF-Tag ontology should look like. Here are some approaches and best practices when trying to figure out a tag ontology. 4 | 5 | 1. The best way to think of LF-Tags is that it's a mechanism to group resources together which then can be permissioned on. Permissions are granted based on the intersection of the tags on the group of resources like the diagram below: 6 | 7 | ![image](./images/lf-tags-expression-grouping-example.png) 8 | 9 | 2. There are two high level approaches you can take. Starting from scratch or trying to replicate the current permissions using LF-Tags. By starting from scratch, your design will tend to be simpler and more intuitive because the design is not constrained to fit an existing permissioning model. The draw back of this approach is that some users may get more permissions or less permissions than before. Some customers want this as this is an opportunity to simplify the permissions model. 10 | 3. Start with the permissions requirements, document it and build on top of that. From your requirements, what categories can you create? For example, sensitivity of data and what levels exist? For example, PII/Confidential/Public level of data. What are the groups of users that need access to the same resources? For example, users within an department within my organization? What different roles are there and how different levels of access, eg. Data Stewards, Data Engineers, Data Scientists, etc. Do I need finer grained permissions such as users within projects need access to only project level resources? See https://aws.github.io/aws-lakeformation-best-practices/lf-tags/common-ontologies/ for common tags that other users have created. 11 | 4. Create a table in a spreadsheet with roles/users on one axis and resources (tables/databases) in another axis. Mark which resources should be accessible to which roles. This will help you visualize and identify patterns that could lead to additional categories or help simplify permissions. 12 | 5. One important thing to remember is that LF-Tags can be used to define very broad range of resources or very fine range of resources. One LF-Tag can be used to group a very large group of resources, while additional tags can further split the large group into smaller groups. Having a balance to both will ensure that your ontology does not get too complex, and get too simple. 13 | 6. Lastly, although it's impossible to predict the future, consider any additional requirements that may come in the future and how that may impact your ontology. 14 | 15 | #### Simple Example: 16 | 17 | I have AnyCompany and they have the following objects in their Catalog: 18 | 19 | | Database | Table | 20 | |:------|:---------| 21 | | marketing | ads | 22 | | marketing | ad_compaigns | 23 | | sales | inventory | 24 | | sales | customer_sales | 25 | | hr | people | 26 | | hr | salaries | 27 | 28 | The roles that we have that want to access these data sets are: 29 | 30 | * marketing data engineers - has ability to read/write to any marketing datasets 31 | * marketing data analysts - has the ability to read only to any marketing datasets 32 | * sales data engineers - has ability to read/write to any sales datasets 33 | * sales data analysts - has the ability to read only to any sales datasets 34 | * CFO office analysts - has the ability to read only to any marketing and sales datasets 35 | * HR engineers - has the ability to read/write only to any HR datasets. 36 | * HR analysts - has the ability to read only to any HR datasets but only non-confidential information. Salaries are considered confidential. 37 | * HR managers - has the ability to read only to HR datasets, both confidential and non-confidential data. 38 | * data platform engineers - has the ability to describe all datasets, but not able to read/write to them. 39 | 40 | 41 | They are required to have the following access: 42 | 43 | | | | marketing data engineers | marketing data analysts | sales data engineers | sales data analysts | CFO office analysts | HR engineers | HR analysts | HR Managers | data platform engineers | 44 | |:--|:--|:--|:--|:--|:--|:--|:--|:--|:--|:--| 45 | | marketing | ads | read/write | read | | | read | | | | describe | 46 | | marketing | ad_compaigns | read/write | read | | | read | | | | describe | 47 | | sales | inventory | | | read/write | read | read | | | | describe | 48 | | sales | customer_sales | | | read/write | read | read | | | | describe | 49 | | hr | people | | | | | | read/write | read | read | describe | 50 | | hr | salaries | | | | | | read/write | | read | describe | 51 | 52 | From this, we can group some of these accesses together: 53 | 54 | * Department = Marketing, Sales, HR where Marketing is for the marketing database, Sales for the sales database, and HR for the hr databases. 55 | * Roles = Engineers, Analyst, Platform where Engineers have read/write access, analysts have read access, and platform that has describe access 56 | * Confidentiality = Public, Confidential where public is anyone can read the data, Confidential is data restricted to only a subset of the columns in a table. 57 | 58 | From the information above, we tag the resources in the following way: 59 | 60 | | | | Tag 1 | Tag 2 | 61 | |:--|:--|:--|:-- | 62 | | marketing | ads | Department = Marketing | Confidentiality=Public | 63 | | marketing | ad_compaigns | Department = Marketing | Confidentiality=Public | 64 | | sales | inventory | Department = Sales | Confidentiality=Public | 65 | | sales | customer_sales | Department = Sales | Confidentiality=Public | 66 | | hr | people | Department = HR | Confidentiality=Public | 67 | | hr | salaries | Department = HR | Confidentiality=Confidential | 68 | 69 | **Note 1:** if there are any confidential columns in these datasets, you can set the table as Confidentiality=Public and override the sensitive columns to Confidentiality=Confidential. 70 | **Note 2:** Tables inherit the Tags from their databases, and columns inherit their tags from their table. In this case, we would set the Department tag to only the respective database and the tables will inherit the values. 71 | 72 | And then we can grant permissions in the following way: 73 | 74 | | Principal: | Actions | LF Expression Grant | 75 | |:--|:--|:--| 76 | | marketing data engineers | read/write | Department = Marketing | 77 | | marketing data analysts | read | Department = Marketing | 78 | | sales data engineers | read/write | Department = Sales | 79 | | sales data analysts | read | Department = Sales | 80 | | CFO office analysts | read | Department = (Marketing OR Sales) | 81 | | HR engineers | read/write | Department = HR | 82 | | HR analysts | read | Department = HR and Confidentiality = Public | 83 | | HR manager | read | Department = HR | 84 | | data platform engineers | describe | Department = (Sales OR Marketing OR HR) | 85 | 86 | **Note 1:** you will notice that Confidentiality are not specified for many of the grants above. If Confidentiality is not specified, then permission is granted to the resource, regardless if the resource has the tag or not and its values. 87 | 88 | **Note 2:** for some complex use cases, you can grant to multiple subsets of resources using multiple grants. For example, if the data platform engineers needed read/write access to the sales and marketing databases, but could only need describe permissions to the hr databases, this can be achieved by having one grant for the sales and marketing databases for read/write access, and a separate grant to the HR database for describe. 89 | 90 | For a more complex example, please see https://aws.github.io/aws-lakeformation-best-practices/lf-tags/example-usecase/ 91 | -------------------------------------------------------------------------------- /content/lf-tags/example-usecase.md: -------------------------------------------------------------------------------- 1 | # Example Customer use case 2 | Customer X has marketing departing with following personas and requirements: 3 | 4 | - Executive : Should be able to access all the marketing datasets 5 | 6 | - Insight Analyst : Should have read access to all non-sensitive datasets that are shared with the department. 7 | 8 | - Application Manager: Manages 2 teams - AppTeamA & AppTeamB and should have readwrite access to all the datasets they use. 9 | 10 | - AppTeamA : 11 | Developers : Needs readwrite access to dataset that is used by the project A. 12 | Data Scientists : Needs readwrite access to non-sensitive datasets that is used by the project A 13 | 14 | - AppTeamB : 15 | Developers : Needs readwrite access to dataset that is used by the project B. 16 | Data Scientists : Needs readwrite access to non-sensitive datasets that is used by the project B 17 | 18 | Marketing departments have following datasets to cater to the needs of the users in the MarketingDB database: 19 | 20 | | Table Name | Table Description | 21 | |:-----------|:------------------| 22 | |Customer|Contains customer details including sensitive information| 23 | |Product|Contains product details that are maintained by the department.| 24 | |Campaigns|Contains campaign information that are available for each of the product.| 25 | |Recommendation|Contains customer preferences and recommendation based on user profile.| 26 | |ExecutiveDashboard|Contains revenue reports for executive access.| 27 | 28 | Following LF Tag Ontologies are defined for the datasets: 29 | 30 | |Tag Key|Tag Values| 31 | |:------|:---------| 32 | |Domain|
  • Marketing - the only allowed value for now
| 33 | |level|
  • 1 - Executive only
  • 2 - Non-executive users in the department
  • 3 - open to all users.
| 34 | |Dataset|
  • Shared - Common datasets that are shared with other department.
  • Non-Shared - Non-shared datasets that are specific to the department.
| 35 | |Classification|
  • Restricted - Contains sensitive data
  • Public - Contains non-sensitive data
| 36 | 37 | Project team A and team B might have overlapping dataset needs for their usage. Say for example: If we have Tag AppTeam with values as TeamA, TeamB and they need access to same table. The limitation that currently exist is that you can't associate/tag multiple values of the same tag key to given table. Workaround is to create Tag for each team as shown below: 38 | 39 | |Tag Key|Tag Values| 40 | |:------|:---------| 41 | |AppTeam:TeamA| yes - For datasets used in Project A| 42 | |AppTeam:TeamB| yes - For datasets used in Project B| 43 | 44 | Tags are associated with resources as shown below: 45 | ![image](images/lf-tags-resource-mapping.png) 46 | 47 | Resources access are granted to user using LF Tags expression as shown below: 48 | ![image](images/lf-tags-expression-principal-mapping.png) 49 | 50 | Let us review the LF Tags expression permission granted to the personas and effective resource access available for them 51 | 52 | **Executive** - gets read write access to all tables under MarketingDB. 53 | 54 | **Insight Analyst** - gets read access to product and campaigns table which are shared dataset and are available to non-executive or all users. 55 | 56 | **Application Manager** - has 2 sets of permissions granted to provide access to both AppteamA and AppteamB dataset. So the effective permission is union of all the resources granted to each team which includes readwrite access to customer,product,campaigns,recommendation tables. 57 | 58 | **AppTeamA:** 59 | 60 | *Developer* - gets read write access to tables that are tagged with AppTeam:TeamA - customer,product,recomendations. 61 | 62 | *Data Scientists* - gets read write access to non-sensitive data in tables that are tagged with AppTeam:TeamA - customer(all columns except SSN,EMAIL,NAME),product,recomendations. 63 | 64 | **AppTeamB:** 65 | *Developer* - gets read write access to tables that are tagged with AppTeam:TeamB - product, campaigns 66 | 67 | *Data Scientists* - gets read write access to non-sensitive data in tables that are tagged with AppTeam:TeamB - product, campaigns. 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-example.png -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-expression-grouping-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-expression-grouping-example.png -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-expression-principal-mapping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-expression-principal-mapping.png -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-grouping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-grouping.png -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-resource-mapping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-resource-mapping.png -------------------------------------------------------------------------------- /content/lf-tags/images/lf-tags-vs-named-resources-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-lakeformation-best-practices/010d6a82bcd2b3980e06657e7340eb88d65d3854/content/lf-tags/images/lf-tags-vs-named-resources-example.png -------------------------------------------------------------------------------- /content/lf-tags/limitations.md: -------------------------------------------------------------------------------- 1 | # LF-Tag limitations 2 | 3 | Please see Lake Formations public documentation for the current limitations as they may change. Many of the limits associated with LF-Tags are soft limits so if you need to increase size limits, please ensure a support case. 4 | 5 | ## Limitation #1 - You can only attach a single value of an LF-Tag tag to a resource 6 | 7 | Suppose you want to create an LF-Tag, *AccessibleRoles* that contains a list of roles, *Engineering, Sales, Marketing* that can access a resource. If you need multiple roles to access a single resource, then you would need to specify multiple values to the resource. This is currently not supported today. 8 | 9 | The solution here is to create multiple LF-Tags that represents each value. From the above example, we can create LF-Tags *AccessibleRoles:Engineering*, *AccessibleRoles:Sales*, and *AccessibleRoles:Marketing* with each with a single value of *true*. You can then tag a resource with appropriate LF-Tags. 10 | 11 | ## Limitation #2 - No OR expression in grant expression for multiple LF-Tags 12 | 13 | Suppose we take the example in limitation #1, where we have LF-Tags *AccessibleRoles:Engineering*, *AccessibleRoles:Sales*, and *AccessibleRoles:Marketing*, and you want to grant a principal all resources that are tagged with *AccessibleRoles:Sales*, and *AccessibleRoles:Marketing*. LF-Tag expressions do not support OR operators between LF-Tags, ie grant user User1 to all resources tagged with *AccessibleRoles:Engineering = true OR AccessibleRoles:Sales = true". This is currently not supported with LF-Tags. 14 | 15 | The solution here is to perform multiple grant operations, one for each LF-Tag. For example, we can have one grant expression to be *AccessibleRoles:Sales = true* , and another being *AccessibleRoles:Marketing = true*. 16 | 17 | ## Limitation #3 - No Less than operator 18 | 19 | Some customers may want to create an LF-Tag that has some scale or hierarchy and when a grant occurs at one level, then the user should get permissions at that level and everything below it. Since LF-Tag expressions support OR operator for LF-Tag values, you can mimic the behavor. For example, if you want to create a *SensitivityLevel* tag with values *1,2,3,4,5*, if you want to grant a user resources that have *SensitivityLevel* = 3, then your grant expression can be *Sensitivity Level = 1 OR 2 OR 3* 20 | -------------------------------------------------------------------------------- /content/lf-tags/overview.md: -------------------------------------------------------------------------------- 1 | # Lake Formation Tags (LF-Tags) 2 | 3 | Lake Formation Tags (LF-tags) were created to help customers scale their permissions by providing an alternative way of creating access policies than using named resources. 4 | 5 | * See [LF-Tag Basics](basics.md) for how LF-Tags work. 6 | * See [Common LF-Tag Ontologies](common-ontologies.md) for common tags customers tend to use. 7 | * See [Best Practices](best-practices.md) for best practices when using LF-Tags. 8 | * See [Developing an LF-Tag Ontology](developing-an-lftag-ontology.md) for best practices when trying to develop an LF-Tag ontology. 9 | * See [Example Usage](example-usecase.md) for an example usage of LF-Tags. 10 | * See [Limitations](limitations.md) for limitations to be aware of. 11 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: "AWS Lake Formation Best Practices Guides" 2 | repo_name: "aws/aws-lakeformation-best-practices" 3 | repo_url: "https://github.com/aws/aws-lakeformation-best-practices" 4 | docs_dir: "content" 5 | theme: 6 | name: material 7 | features: 8 | - tabs 9 | - content.code.copy 10 | - content.code.select 11 | - content.code.copy 12 | - content.code.select 13 | nav: 14 | - Introduction: 'index.md' 15 | - 'Adopting Lake Formation': 16 | - 'Overview': 'adopting-lake-formation/overview.md' 17 | - 'Considerations when adopting Lake Formation': 'adopting-lake-formation/considerations-when-adopting-lake-formation.md' 18 | - 'Lake formation Adoption Modes': 'adopting-lake-formation/lake-formation-adoption-modes.md' 19 | - 'General Best Practices': 'adopting-lake-formation/general-best-practices.md' 20 | - 'Data Sharing': 21 | - 'Overview': 'data-sharing/overview.md' 22 | - 'General Best Practices when using Lake Formation Data Sharing': 'data-sharing/general-data-sharing.md' 23 | - 'Cross Account Versions': 'data-sharing/cross-account-versions.md' 24 | - 'Upgrading Cross Account Version': 'data-sharing/upgrading-cross-account-versions.md' 25 | - 'Cross Account FAQS': 'data-sharing/cross-account-faq.md' 26 | - 'LF-Tags': 27 | - 'Overview': 'lf-tags/overview.md' 28 | - 'LF-Tags Basics': 'lf-tags/basics.md' 29 | - 'Common Ontologies': 'lf-tags/common-ontologies.md' 30 | - 'Developing an LF-Tag Ontology': 'lf-tags/developing-an-lftag-ontology.md' 31 | - 'Best Practices': 'lf-tags/best-practices.md' 32 | - 'Example Usecase': 'lf-tags/example-usecase.md' 33 | - 'Limitations': 'lf-tags/limitations.md' 34 | markdown_extensions: 35 | - toc: 36 | permalink: true 37 | - admonition 38 | - codehilite 39 | - pymdownx.highlight: 40 | anchor_linenums: true 41 | line_spans: __span 42 | pygments_lang_class: true 43 | - pymdownx.inlinehilite 44 | - pymdownx.snippets 45 | - pymdownx.superfences 46 | - attr_list 47 | - sane_lists 48 | - tables 49 | - pymdownx.highlight: 50 | anchor_linenums: true 51 | line_spans: __span 52 | pygments_lang_class: true 53 | - pymdownx.inlinehilite 54 | - pymdownx.snippets 55 | - pymdownx.superfences 56 | - attr_list 57 | - sane_lists 58 | - tables -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "babel" 5 | version = "2.15.0" 6 | description = "Internationalization utilities" 7 | optional = false 8 | python-versions = ">=3.8" 9 | files = [ 10 | {file = "Babel-2.15.0-py3-none-any.whl", hash = "sha256:08706bdad8d0a3413266ab61bd6c34d0c28d6e1e7badf40a2cebe67644e2e1fb"}, 11 | {file = "babel-2.15.0.tar.gz", hash = "sha256:8daf0e265d05768bc6c7a314cf1321e9a123afc328cc635c18622a2f30a04413"}, 12 | ] 13 | 14 | [package.extras] 15 | dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] 16 | 17 | [[package]] 18 | name = "certifi" 19 | version = "2024.7.4" 20 | description = "Python package for providing Mozilla's CA Bundle." 21 | optional = false 22 | python-versions = ">=3.6" 23 | files = [ 24 | {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, 25 | {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, 26 | ] 27 | 28 | [[package]] 29 | name = "charset-normalizer" 30 | version = "3.3.2" 31 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 32 | optional = false 33 | python-versions = ">=3.7.0" 34 | files = [ 35 | {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, 36 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, 37 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, 38 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, 39 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, 40 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, 41 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, 42 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, 43 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, 44 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, 45 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, 46 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, 47 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, 48 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, 49 | {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, 50 | {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, 51 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, 52 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, 53 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, 54 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, 55 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, 56 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, 57 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, 58 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, 59 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, 60 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, 61 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, 62 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, 63 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, 64 | {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, 65 | {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, 66 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, 67 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, 68 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, 69 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, 70 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, 71 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, 72 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, 73 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, 74 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, 75 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, 76 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, 77 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, 78 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, 79 | {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, 80 | {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, 81 | {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, 82 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, 83 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, 84 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, 85 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, 86 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, 87 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, 88 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, 89 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, 90 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, 91 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, 92 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, 93 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, 94 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, 95 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, 96 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, 97 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, 98 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, 99 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, 100 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, 101 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, 102 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, 103 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, 104 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, 105 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, 106 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, 107 | {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, 108 | {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, 109 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, 110 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, 111 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, 112 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, 113 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, 114 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, 115 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, 116 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, 117 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, 118 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, 119 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, 120 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, 121 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, 122 | {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, 123 | {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, 124 | {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, 125 | ] 126 | 127 | [[package]] 128 | name = "click" 129 | version = "8.1.7" 130 | description = "Composable command line interface toolkit" 131 | optional = false 132 | python-versions = ">=3.7" 133 | files = [ 134 | {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, 135 | {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, 136 | ] 137 | 138 | [package.dependencies] 139 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 140 | 141 | [[package]] 142 | name = "colorama" 143 | version = "0.4.6" 144 | description = "Cross-platform colored terminal text." 145 | optional = false 146 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" 147 | files = [ 148 | {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, 149 | {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, 150 | ] 151 | 152 | [[package]] 153 | name = "ghp-import" 154 | version = "2.1.0" 155 | description = "Copy your docs directly to the gh-pages branch." 156 | optional = false 157 | python-versions = "*" 158 | files = [ 159 | {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, 160 | {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, 161 | ] 162 | 163 | [package.dependencies] 164 | python-dateutil = ">=2.8.1" 165 | 166 | [package.extras] 167 | dev = ["flake8", "markdown", "twine", "wheel"] 168 | 169 | [[package]] 170 | name = "idna" 171 | version = "3.7" 172 | description = "Internationalized Domain Names in Applications (IDNA)" 173 | optional = false 174 | python-versions = ">=3.5" 175 | files = [ 176 | {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, 177 | {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, 178 | ] 179 | 180 | [[package]] 181 | name = "jinja2" 182 | version = "3.1.4" 183 | description = "A very fast and expressive template engine." 184 | optional = false 185 | python-versions = ">=3.7" 186 | files = [ 187 | {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, 188 | {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, 189 | ] 190 | 191 | [package.dependencies] 192 | MarkupSafe = ">=2.0" 193 | 194 | [package.extras] 195 | i18n = ["Babel (>=2.7)"] 196 | 197 | [[package]] 198 | name = "markdown" 199 | version = "3.6" 200 | description = "Python implementation of John Gruber's Markdown." 201 | optional = false 202 | python-versions = ">=3.8" 203 | files = [ 204 | {file = "Markdown-3.6-py3-none-any.whl", hash = "sha256:48f276f4d8cfb8ce6527c8f79e2ee29708508bf4d40aa410fbc3b4ee832c850f"}, 205 | {file = "Markdown-3.6.tar.gz", hash = "sha256:ed4f41f6daecbeeb96e576ce414c41d2d876daa9a16cb35fa8ed8c2ddfad0224"}, 206 | ] 207 | 208 | [package.extras] 209 | docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] 210 | testing = ["coverage", "pyyaml"] 211 | 212 | [[package]] 213 | name = "markupsafe" 214 | version = "2.1.5" 215 | description = "Safely add untrusted strings to HTML/XML markup." 216 | optional = false 217 | python-versions = ">=3.7" 218 | files = [ 219 | {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, 220 | {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, 221 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, 222 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, 223 | {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, 224 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, 225 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, 226 | {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, 227 | {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, 228 | {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, 229 | {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, 230 | {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, 231 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, 232 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, 233 | {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, 234 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, 235 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, 236 | {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, 237 | {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, 238 | {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, 239 | {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, 240 | {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, 241 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, 242 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, 243 | {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, 244 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, 245 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, 246 | {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, 247 | {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, 248 | {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, 249 | {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, 250 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, 251 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, 252 | {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, 253 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, 254 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, 255 | {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, 256 | {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, 257 | {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, 258 | {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, 259 | {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, 260 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, 261 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, 262 | {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, 263 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, 264 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, 265 | {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, 266 | {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, 267 | {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, 268 | {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, 269 | {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, 270 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, 271 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, 272 | {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, 273 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, 274 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, 275 | {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, 276 | {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, 277 | {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, 278 | {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, 279 | ] 280 | 281 | [[package]] 282 | name = "mergedeep" 283 | version = "1.3.4" 284 | description = "A deep merge function for 🐍." 285 | optional = false 286 | python-versions = ">=3.6" 287 | files = [ 288 | {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, 289 | {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, 290 | ] 291 | 292 | [[package]] 293 | name = "mkdocs" 294 | version = "1.6.0" 295 | description = "Project documentation with Markdown." 296 | optional = false 297 | python-versions = ">=3.8" 298 | files = [ 299 | {file = "mkdocs-1.6.0-py3-none-any.whl", hash = "sha256:1eb5cb7676b7d89323e62b56235010216319217d4af5ddc543a91beb8d125ea7"}, 300 | {file = "mkdocs-1.6.0.tar.gz", hash = "sha256:a73f735824ef83a4f3bcb7a231dcab23f5a838f88b7efc54a0eef5fbdbc3c512"}, 301 | ] 302 | 303 | [package.dependencies] 304 | click = ">=7.0" 305 | colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} 306 | ghp-import = ">=1.0" 307 | jinja2 = ">=2.11.1" 308 | markdown = ">=3.3.6" 309 | markupsafe = ">=2.0.1" 310 | mergedeep = ">=1.3.4" 311 | mkdocs-get-deps = ">=0.2.0" 312 | packaging = ">=20.5" 313 | pathspec = ">=0.11.1" 314 | pyyaml = ">=5.1" 315 | pyyaml-env-tag = ">=0.1" 316 | watchdog = ">=2.0" 317 | 318 | [package.extras] 319 | i18n = ["babel (>=2.9.0)"] 320 | min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.4)", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] 321 | 322 | [[package]] 323 | name = "mkdocs-get-deps" 324 | version = "0.2.0" 325 | description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" 326 | optional = false 327 | python-versions = ">=3.8" 328 | files = [ 329 | {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, 330 | {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, 331 | ] 332 | 333 | [package.dependencies] 334 | mergedeep = ">=1.3.4" 335 | platformdirs = ">=2.2.0" 336 | pyyaml = ">=5.1" 337 | 338 | [[package]] 339 | name = "mkdocs-material" 340 | version = "9.5.29" 341 | description = "Documentation that simply works" 342 | optional = false 343 | python-versions = ">=3.8" 344 | files = [ 345 | {file = "mkdocs_material-9.5.29-py3-none-any.whl", hash = "sha256:afc1f508e2662ded95f0a35a329e8a5acd73ee88ca07ba73836eb6fcdae5d8b4"}, 346 | {file = "mkdocs_material-9.5.29.tar.gz", hash = "sha256:3e977598ec15a4ddad5c4dfc9e08edab6023edb51e88f0729bd27be77e3d322a"}, 347 | ] 348 | 349 | [package.dependencies] 350 | babel = ">=2.10,<3.0" 351 | colorama = ">=0.4,<1.0" 352 | jinja2 = ">=3.0,<4.0" 353 | markdown = ">=3.2,<4.0" 354 | mkdocs = ">=1.6,<2.0" 355 | mkdocs-material-extensions = ">=1.3,<2.0" 356 | paginate = ">=0.5,<1.0" 357 | pygments = ">=2.16,<3.0" 358 | pymdown-extensions = ">=10.2,<11.0" 359 | regex = ">=2022.4" 360 | requests = ">=2.26,<3.0" 361 | 362 | [package.extras] 363 | git = ["mkdocs-git-committers-plugin-2 (>=1.1,<2.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] 364 | imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] 365 | recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] 366 | 367 | [[package]] 368 | name = "mkdocs-material-extensions" 369 | version = "1.3.1" 370 | description = "Extension pack for Python Markdown and MkDocs Material." 371 | optional = false 372 | python-versions = ">=3.8" 373 | files = [ 374 | {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, 375 | {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, 376 | ] 377 | 378 | [[package]] 379 | name = "mkdocs-pymdownx-material-extras" 380 | version = "2.5.6" 381 | description = "Plugin to extend MkDocs Material theme." 382 | optional = false 383 | python-versions = ">=3.8" 384 | files = [ 385 | {file = "mkdocs_pymdownx_material_extras-2.5.6-py3-none-any.whl", hash = "sha256:ed5bfc23c6f42f485603e05abc22926c27c2b31ef9972a0132582b73f49557e9"}, 386 | {file = "mkdocs_pymdownx_material_extras-2.5.6.tar.gz", hash = "sha256:e0cf0aa4f284a78ecab9caf0bc62a12d8b836a5abbd49f9638cc03b2698a021c"}, 387 | ] 388 | 389 | [package.dependencies] 390 | mkdocs-material = ">=8.3.3" 391 | 392 | [[package]] 393 | name = "packaging" 394 | version = "24.1" 395 | description = "Core utilities for Python packages" 396 | optional = false 397 | python-versions = ">=3.8" 398 | files = [ 399 | {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, 400 | {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, 401 | ] 402 | 403 | [[package]] 404 | name = "paginate" 405 | version = "0.5.6" 406 | description = "Divides large result sets into pages for easier browsing" 407 | optional = false 408 | python-versions = "*" 409 | files = [ 410 | {file = "paginate-0.5.6.tar.gz", hash = "sha256:5e6007b6a9398177a7e1648d04fdd9f8c9766a1a945bceac82f1929e8c78af2d"}, 411 | ] 412 | 413 | [[package]] 414 | name = "pathspec" 415 | version = "0.12.1" 416 | description = "Utility library for gitignore style pattern matching of file paths." 417 | optional = false 418 | python-versions = ">=3.8" 419 | files = [ 420 | {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, 421 | {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, 422 | ] 423 | 424 | [[package]] 425 | name = "platformdirs" 426 | version = "4.2.2" 427 | description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." 428 | optional = false 429 | python-versions = ">=3.8" 430 | files = [ 431 | {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, 432 | {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, 433 | ] 434 | 435 | [package.extras] 436 | docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] 437 | test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] 438 | type = ["mypy (>=1.8)"] 439 | 440 | [[package]] 441 | name = "pygments" 442 | version = "2.18.0" 443 | description = "Pygments is a syntax highlighting package written in Python." 444 | optional = false 445 | python-versions = ">=3.8" 446 | files = [ 447 | {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, 448 | {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, 449 | ] 450 | 451 | [package.extras] 452 | windows-terminal = ["colorama (>=0.4.6)"] 453 | 454 | [[package]] 455 | name = "pymdown-extensions" 456 | version = "10.8.1" 457 | description = "Extension pack for Python Markdown." 458 | optional = false 459 | python-versions = ">=3.8" 460 | files = [ 461 | {file = "pymdown_extensions-10.8.1-py3-none-any.whl", hash = "sha256:f938326115884f48c6059c67377c46cf631c733ef3629b6eed1349989d1b30cb"}, 462 | {file = "pymdown_extensions-10.8.1.tar.gz", hash = "sha256:3ab1db5c9e21728dabf75192d71471f8e50f216627e9a1fa9535ecb0231b9940"}, 463 | ] 464 | 465 | [package.dependencies] 466 | markdown = ">=3.6" 467 | pyyaml = "*" 468 | 469 | [package.extras] 470 | extra = ["pygments (>=2.12)"] 471 | 472 | [[package]] 473 | name = "python-dateutil" 474 | version = "2.9.0.post0" 475 | description = "Extensions to the standard Python datetime module" 476 | optional = false 477 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" 478 | files = [ 479 | {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, 480 | {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, 481 | ] 482 | 483 | [package.dependencies] 484 | six = ">=1.5" 485 | 486 | [[package]] 487 | name = "pyyaml" 488 | version = "6.0.1" 489 | description = "YAML parser and emitter for Python" 490 | optional = false 491 | python-versions = ">=3.6" 492 | files = [ 493 | {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, 494 | {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, 495 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, 496 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, 497 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, 498 | {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, 499 | {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, 500 | {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, 501 | {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, 502 | {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, 503 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, 504 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, 505 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, 506 | {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, 507 | {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, 508 | {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, 509 | {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, 510 | {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, 511 | {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, 512 | {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, 513 | {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, 514 | {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, 515 | {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, 516 | {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, 517 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, 518 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, 519 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, 520 | {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, 521 | {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, 522 | {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, 523 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, 524 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, 525 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, 526 | {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, 527 | {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, 528 | {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, 529 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, 530 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, 531 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, 532 | {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, 533 | {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, 534 | {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, 535 | {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, 536 | {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, 537 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, 538 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, 539 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, 540 | {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, 541 | {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, 542 | {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, 543 | {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, 544 | ] 545 | 546 | [[package]] 547 | name = "pyyaml-env-tag" 548 | version = "0.1" 549 | description = "A custom YAML tag for referencing environment variables in YAML files. " 550 | optional = false 551 | python-versions = ">=3.6" 552 | files = [ 553 | {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, 554 | {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, 555 | ] 556 | 557 | [package.dependencies] 558 | pyyaml = "*" 559 | 560 | [[package]] 561 | name = "regex" 562 | version = "2024.5.15" 563 | description = "Alternative regular expression module, to replace re." 564 | optional = false 565 | python-versions = ">=3.8" 566 | files = [ 567 | {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, 568 | {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, 569 | {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, 570 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, 571 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, 572 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, 573 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, 574 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, 575 | {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, 576 | {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, 577 | {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, 578 | {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, 579 | {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, 580 | {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, 581 | {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, 582 | {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, 583 | {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, 584 | {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, 585 | {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, 586 | {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, 587 | {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, 588 | {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, 589 | {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, 590 | {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, 591 | {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, 592 | {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, 593 | {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, 594 | {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, 595 | {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, 596 | {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, 597 | {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, 598 | {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, 599 | {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, 600 | {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, 601 | {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, 602 | {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, 603 | {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, 604 | {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, 605 | {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, 606 | {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, 607 | {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, 608 | {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, 609 | {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, 610 | {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, 611 | {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, 612 | {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, 613 | {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, 614 | {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, 615 | {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, 616 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, 617 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, 618 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, 619 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, 620 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, 621 | {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, 622 | {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, 623 | {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, 624 | {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, 625 | {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, 626 | {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, 627 | {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, 628 | {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, 629 | {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, 630 | {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, 631 | {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, 632 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, 633 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, 634 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, 635 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, 636 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, 637 | {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, 638 | {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, 639 | {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, 640 | {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, 641 | {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, 642 | {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, 643 | {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, 644 | {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, 645 | {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, 646 | ] 647 | 648 | [[package]] 649 | name = "requests" 650 | version = "2.32.3" 651 | description = "Python HTTP for Humans." 652 | optional = false 653 | python-versions = ">=3.8" 654 | files = [ 655 | {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, 656 | {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, 657 | ] 658 | 659 | [package.dependencies] 660 | certifi = ">=2017.4.17" 661 | charset-normalizer = ">=2,<4" 662 | idna = ">=2.5,<4" 663 | urllib3 = ">=1.21.1,<3" 664 | 665 | [package.extras] 666 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 667 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 668 | 669 | [[package]] 670 | name = "six" 671 | version = "1.16.0" 672 | description = "Python 2 and 3 compatibility utilities" 673 | optional = false 674 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 675 | files = [ 676 | {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, 677 | {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, 678 | ] 679 | 680 | [[package]] 681 | name = "urllib3" 682 | version = "2.2.2" 683 | description = "HTTP library with thread-safe connection pooling, file post, and more." 684 | optional = false 685 | python-versions = ">=3.8" 686 | files = [ 687 | {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, 688 | {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, 689 | ] 690 | 691 | [package.extras] 692 | brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] 693 | h2 = ["h2 (>=4,<5)"] 694 | socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] 695 | zstd = ["zstandard (>=0.18.0)"] 696 | 697 | [[package]] 698 | name = "watchdog" 699 | version = "4.0.1" 700 | description = "Filesystem events monitoring" 701 | optional = false 702 | python-versions = ">=3.8" 703 | files = [ 704 | {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:da2dfdaa8006eb6a71051795856bedd97e5b03e57da96f98e375682c48850645"}, 705 | {file = "watchdog-4.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e93f451f2dfa433d97765ca2634628b789b49ba8b504fdde5837cdcf25fdb53b"}, 706 | {file = "watchdog-4.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef0107bbb6a55f5be727cfc2ef945d5676b97bffb8425650dadbb184be9f9a2b"}, 707 | {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:17e32f147d8bf9657e0922c0940bcde863b894cd871dbb694beb6704cfbd2fb5"}, 708 | {file = "watchdog-4.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03e70d2df2258fb6cb0e95bbdbe06c16e608af94a3ffbd2b90c3f1e83eb10767"}, 709 | {file = "watchdog-4.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:123587af84260c991dc5f62a6e7ef3d1c57dfddc99faacee508c71d287248459"}, 710 | {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:093b23e6906a8b97051191a4a0c73a77ecc958121d42346274c6af6520dec175"}, 711 | {file = "watchdog-4.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:611be3904f9843f0529c35a3ff3fd617449463cb4b73b1633950b3d97fa4bfb7"}, 712 | {file = "watchdog-4.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62c613ad689ddcb11707f030e722fa929f322ef7e4f18f5335d2b73c61a85c28"}, 713 | {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d4925e4bf7b9bddd1c3de13c9b8a2cdb89a468f640e66fbfabaf735bd85b3e35"}, 714 | {file = "watchdog-4.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cad0bbd66cd59fc474b4a4376bc5ac3fc698723510cbb64091c2a793b18654db"}, 715 | {file = "watchdog-4.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a3c2c317a8fb53e5b3d25790553796105501a235343f5d2bf23bb8649c2c8709"}, 716 | {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c9904904b6564d4ee8a1ed820db76185a3c96e05560c776c79a6ce5ab71888ba"}, 717 | {file = "watchdog-4.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:667f3c579e813fcbad1b784db7a1aaa96524bed53437e119f6a2f5de4db04235"}, 718 | {file = "watchdog-4.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d10a681c9a1d5a77e75c48a3b8e1a9f2ae2928eda463e8d33660437705659682"}, 719 | {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0144c0ea9997b92615af1d94afc0c217e07ce2c14912c7b1a5731776329fcfc7"}, 720 | {file = "watchdog-4.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:998d2be6976a0ee3a81fb8e2777900c28641fb5bfbd0c84717d89bca0addcdc5"}, 721 | {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e7921319fe4430b11278d924ef66d4daa469fafb1da679a2e48c935fa27af193"}, 722 | {file = "watchdog-4.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f0de0f284248ab40188f23380b03b59126d1479cd59940f2a34f8852db710625"}, 723 | {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bca36be5707e81b9e6ce3208d92d95540d4ca244c006b61511753583c81c70dd"}, 724 | {file = "watchdog-4.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ab998f567ebdf6b1da7dc1e5accfaa7c6992244629c0fdaef062f43249bd8dee"}, 725 | {file = "watchdog-4.0.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dddba7ca1c807045323b6af4ff80f5ddc4d654c8bce8317dde1bd96b128ed253"}, 726 | {file = "watchdog-4.0.1-py3-none-manylinux2014_armv7l.whl", hash = "sha256:4513ec234c68b14d4161440e07f995f231be21a09329051e67a2118a7a612d2d"}, 727 | {file = "watchdog-4.0.1-py3-none-manylinux2014_i686.whl", hash = "sha256:4107ac5ab936a63952dea2a46a734a23230aa2f6f9db1291bf171dac3ebd53c6"}, 728 | {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64.whl", hash = "sha256:6e8c70d2cd745daec2a08734d9f63092b793ad97612470a0ee4cbb8f5f705c57"}, 729 | {file = "watchdog-4.0.1-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f27279d060e2ab24c0aa98363ff906d2386aa6c4dc2f1a374655d4e02a6c5e5e"}, 730 | {file = "watchdog-4.0.1-py3-none-manylinux2014_s390x.whl", hash = "sha256:f8affdf3c0f0466e69f5b3917cdd042f89c8c63aebdb9f7c078996f607cdb0f5"}, 731 | {file = "watchdog-4.0.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ac7041b385f04c047fcc2951dc001671dee1b7e0615cde772e84b01fbf68ee84"}, 732 | {file = "watchdog-4.0.1-py3-none-win32.whl", hash = "sha256:206afc3d964f9a233e6ad34618ec60b9837d0582b500b63687e34011e15bb429"}, 733 | {file = "watchdog-4.0.1-py3-none-win_amd64.whl", hash = "sha256:7577b3c43e5909623149f76b099ac49a1a01ca4e167d1785c76eb52fa585745a"}, 734 | {file = "watchdog-4.0.1-py3-none-win_ia64.whl", hash = "sha256:d7b9f5f3299e8dd230880b6c55504a1f69cf1e4316275d1b215ebdd8187ec88d"}, 735 | {file = "watchdog-4.0.1.tar.gz", hash = "sha256:eebaacf674fa25511e8867028d281e602ee6500045b57f43b08778082f7f8b44"}, 736 | ] 737 | 738 | [package.extras] 739 | watchmedo = ["PyYAML (>=3.10)"] 740 | 741 | [metadata] 742 | lock-version = "2.0" 743 | python-versions = "^3.12" 744 | content-hash = "3b80b3b24c7f314cc42389077e8604779b1830df23075c5430808375bb204e18" 745 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "aws-lakeformation-best-practices-clean" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Your Name "] 6 | license = "Creative Commons Attribution-ShareAlike 4.0 International Public License" 7 | readme = "README.md" 8 | package-mode = false 9 | 10 | [tool.poetry.dependencies] 11 | python = "^3.12" 12 | mkdocs = "^1.6.0" 13 | mkdocs-material = "^9.5.29" 14 | mkdocs-pymdownx-material-extras = "^2.5.6" 15 | 16 | 17 | [build-system] 18 | requires = ["poetry-core"] 19 | build-backend = "poetry.core.masonry.api" 20 | --------------------------------------------------------------------------------