├── VERSION ├── tst ├── __init__.py ├── tools │ ├── __init__.py │ └── test_api_coverage.py ├── generated │ ├── __init__.py │ ├── test_config_schema.py │ ├── test_shapes.py │ ├── test_user_agent.py │ └── test_logs.py └── test_codec.py ├── workflow_helper ├── __init__.py ├── compute_resource_coverage.py └── compute_boto_api_coverage.py ├── src └── sagemaker_core │ ├── __init__.py │ ├── helper │ └── __init__.py │ ├── main │ ├── __init__.py │ ├── code_injection │ │ ├── __init__.py │ │ ├── constants.py │ │ ├── base.py │ │ └── codec.py │ ├── user_agent.py │ ├── exceptions.py │ ├── logs.py │ └── default_configs_helper.py │ ├── shapes │ └── __init__.py │ ├── resources │ └── __init__.py │ ├── tools │ ├── api_coverage.json │ ├── __init__.py │ ├── method.py │ ├── codegen.py │ ├── data_extractor.py │ ├── constants.py │ └── shapes_codegen.py │ └── _version.py ├── CODEOWNERS ├── .env ├── docs ├── requirements.txt ├── index.rst ├── Makefile ├── make.bat └── conf.py ├── sample ├── sagemaker-edge │ └── 2020-09-23 │ │ ├── paginators-1.json │ │ ├── examples-1.json │ │ └── endpoint-rule-set-1.json ├── sagemaker-metrics │ └── 2022-09-30 │ │ ├── paginators-1.json │ │ └── service-2.json ├── sagemaker-runtime │ └── 2017-05-13 │ │ ├── paginators-1.json │ │ └── examples-1.json ├── sagemaker-featurestore-runtime │ └── 2020-07-01 │ │ ├── paginators-1.json │ │ └── examples-1.json ├── sagemaker │ └── 2017-07-24 │ │ ├── examples-1.json │ │ ├── default-configs.json │ │ ├── waiters-2.json │ │ └── paginators-1.json ├── sagemaker-a2i-runtime │ └── 2019-11-07 │ │ ├── examples-1.json │ │ └── paginators-1.json └── sagemaker-geospatial │ └── 2020-05-27 │ └── paginators-1.json ├── .gitignore ├── setup.cfg ├── branding └── icon │ └── sagemaker-banner.png ├── example_notebooks └── images │ ├── experiment_created.png │ ├── experiment_run_metrics.png │ ├── experiment_run_parameters.png │ ├── experiment_run_analyze_plot.png │ └── experiment_runs_comparison.png ├── .github ├── workflows │ ├── auto-approve.yml │ ├── botocore-sync.yml │ ├── create-release.yml │ ├── codeql.yml │ ├── auto-merge.yml │ └── pr-checks.yml └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── .readthedocs.yaml ├── CONTRIBUTING.md ├── pyproject.toml ├── .pylintrc ├── README.rst ├── integ ├── sagemaker_cleaner.py └── test_codegen.py ├── LICENSE └── CHANGELOG.md /VERSION: -------------------------------------------------------------------------------- 1 | 1.0.72 -------------------------------------------------------------------------------- /tst/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tst/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tst/generated/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /workflow_helper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/sagemaker_core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/sagemaker_core/helper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @aws/sagemaker-ml-frameworks -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | export PYTHONPATH="$PYTHONPATH:$PWD/src" 2 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/code_injection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==7.4.7 2 | sphinx-rtd-theme==2.0.0 -------------------------------------------------------------------------------- /src/sagemaker_core/shapes/__init__.py: -------------------------------------------------------------------------------- 1 | from ..main.shapes import * 2 | -------------------------------------------------------------------------------- /src/sagemaker_core/resources/__init__.py: -------------------------------------------------------------------------------- 1 | from ..main.resources import * 2 | -------------------------------------------------------------------------------- /sample/sagemaker-edge/2020-09-23/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": {} 3 | } 4 | -------------------------------------------------------------------------------- /sample/sagemaker-metrics/2022-09-30/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": {} 3 | } 4 | -------------------------------------------------------------------------------- /sample/sagemaker-runtime/2017-05-13/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": {} 3 | } 4 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/api_coverage.json: -------------------------------------------------------------------------------- 1 | {"SupportedAPIs": 371, "UnsupportedAPIs": 17} -------------------------------------------------------------------------------- /src/sagemaker_core/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from ..main.code_injection.codec import pascal_to_snake 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.pyc 2 | **.pyc 3 | build/ 4 | dist/ 5 | *.egg-info/ 6 | *_build/ 7 | *html/ 8 | data/ -------------------------------------------------------------------------------- /sample/sagemaker-featurestore-runtime/2020-07-01/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": {} 3 | } 4 | -------------------------------------------------------------------------------- /sample/sagemaker/2017-07-24/examples-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "examples": { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /sample/sagemaker-edge/2020-09-23/examples-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "examples": { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [options] 2 | packages = find: 3 | package_dir = =src 4 | 5 | [options.packages.find] 6 | where = src -------------------------------------------------------------------------------- /sample/sagemaker-runtime/2017-05-13/examples-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "examples": { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /branding/icon/sagemaker-banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/branding/icon/sagemaker-banner.png -------------------------------------------------------------------------------- /sample/sagemaker-a2i-runtime/2019-11-07/examples-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "examples": { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /sample/sagemaker-featurestore-runtime/2020-07-01/examples-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "examples": { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /src/sagemaker_core/_version.py: -------------------------------------------------------------------------------- 1 | import importlib.metadata 2 | 3 | __version__ = importlib.metadata.version("sagemaker_core") 4 | -------------------------------------------------------------------------------- /example_notebooks/images/experiment_created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/example_notebooks/images/experiment_created.png -------------------------------------------------------------------------------- /example_notebooks/images/experiment_run_metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/example_notebooks/images/experiment_run_metrics.png -------------------------------------------------------------------------------- /example_notebooks/images/experiment_run_parameters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/example_notebooks/images/experiment_run_parameters.png -------------------------------------------------------------------------------- /example_notebooks/images/experiment_run_analyze_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/example_notebooks/images/experiment_run_analyze_plot.png -------------------------------------------------------------------------------- /example_notebooks/images/experiment_runs_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/sagemaker-core/HEAD/example_notebooks/images/experiment_runs_comparison.png -------------------------------------------------------------------------------- /sample/sagemaker-a2i-runtime/2019-11-07/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": { 3 | "ListHumanLoops": { 4 | "input_token": "NextToken", 5 | "output_token": "NextToken", 6 | "limit_key": "MaxResults", 7 | "result_key": "HumanLoopSummaries" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | 3 | ######################## 4 | SageMaker Core Resources 5 | ######################## 6 | 7 | .. automodule:: sagemaker_core.main.resources 8 | :members: 9 | :noindex: 10 | 11 | 12 | ######################## 13 | SageMaker Core Shapes 14 | ######################## 15 | 16 | .. automodule:: sagemaker_core.main.shapes 17 | :members: 18 | :noindex: -------------------------------------------------------------------------------- /.github/workflows/auto-approve.yml: -------------------------------------------------------------------------------- 1 | name: Auto Approve 2 | on: 3 | pull_request_target: 4 | types: [ labeled, unlabeled, opened, synchronize, reopened, ready_for_review, review_requested ] 5 | 6 | jobs: 7 | auto-approve: 8 | if: contains(github.event.pull_request.labels.*.name, 'auto-approve') && github.event.pull_request.user.login == 'sagemaker-bot' 9 | runs-on: ubuntu-latest 10 | permissions: 11 | pull-requests: write 12 | steps: 13 | - uses: hmarr/auto-approve-action@v4.0.0 14 | with: 15 | github-token: "${{ secrets.GITHUB_TOKEN }}" 16 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # ReadTheDocs environment customization to allow us to use conda to install 2 | # libraries which have C dependencies for the doc build. See: 3 | # https://docs.readthedocs.io/en/latest/config-file/v2.html 4 | 5 | version: 2 6 | 7 | build: 8 | os: ubuntu-22.04 9 | tools: 10 | python: "3.10" 11 | 12 | 13 | python: 14 | install: 15 | - method: pip 16 | path: . 17 | - requirements: docs/requirements.txt 18 | 19 | 20 | sphinx: 21 | configuration: docs/conf.py 22 | fail_on_warning: true # http://www.sphinx-doc.org/en/master/man/sphinx-build.html#id6 -------------------------------------------------------------------------------- /workflow_helper/compute_resource_coverage.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def main(): 5 | """ 6 | Every run of the pytest-cov command creates the report of coverages per class into a json file - i.e coverage.json 7 | This file is being parsed to print the coverage for resources.py which will be used as a tracking metric 8 | """ 9 | json_file = "coverage.json" 10 | 11 | with open(json_file, "r") as f: 12 | data = json.load(f) 13 | print(data["files"]["src/sagemaker_core/main/resources.py"]["summary"]["percent_covered"]) 14 | 15 | 16 | if __name__ == "__main__": 17 | main() 18 | -------------------------------------------------------------------------------- /workflow_helper/compute_boto_api_coverage.py: -------------------------------------------------------------------------------- 1 | from sagemaker_core.main.utils import configure_logging 2 | from sagemaker_core.tools.resources_extractor import ResourcesExtractor 3 | 4 | 5 | def main(): 6 | """ 7 | This function computes the number of APIs covered and uncovered by sagemaker core to the ones in Botocore. 8 | """ 9 | configure_logging("ERROR") # Disable other log messages 10 | resources_extractor = ResourcesExtractor() 11 | # Print the number of unsupported Botocore API and supported Botocore API 12 | print(len(resources_extractor.actions), len(resources_extractor.actions_under_resource)) 13 | 14 | 15 | if __name__ == "__main__": 16 | main() 17 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /sample/sagemaker/2017-07-24/default-configs.json: -------------------------------------------------------------------------------- 1 | { 2 | "SageMaker": { 3 | "PythonSDK": { 4 | "Resources": { 5 | "GlobalDefaults": { 6 | "vpc_config": { 7 | "security_group_ids": [ 8 | "group-1", 9 | "group-2" 10 | ], 11 | "subnets": [ 12 | "subnet-1", 13 | "subnet-2" 14 | ] 15 | } 16 | }, 17 | "Cluster": { 18 | "vpc_config": { 19 | "security_group_ids": [ 20 | "group-4" 21 | ], 22 | "subnets": [ 23 | "subnet-4" 24 | ] 25 | } 26 | } 27 | } 28 | } 29 | } 30 | } -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest new functionality for this library 4 | title: '' 5 | labels: 'feature request' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature you'd like** 11 | A clear and concise description of the functionality you want. 12 | 13 | **How would this feature be used? Please describe.** 14 | A clear and concise description of the use case for this feature. Please provide an example, if possible. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /sample/sagemaker-geospatial/2020-05-27/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": { 3 | "ListEarthObservationJobs": { 4 | "input_token": "NextToken", 5 | "output_token": "NextToken", 6 | "limit_key": "MaxResults", 7 | "result_key": "EarthObservationJobSummaries" 8 | }, 9 | "ListRasterDataCollections": { 10 | "input_token": "NextToken", 11 | "output_token": "NextToken", 12 | "limit_key": "MaxResults", 13 | "result_key": "RasterDataCollectionSummaries" 14 | }, 15 | "ListVectorEnrichmentJobs": { 16 | "input_token": "NextToken", 17 | "output_token": "NextToken", 18 | "limit_key": "MaxResults", 19 | "result_key": "VectorEnrichmentJobSummaries" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /.github/workflows/botocore-sync.yml: -------------------------------------------------------------------------------- 1 | name: Daily Sync with Botocore 2 | 3 | on: 4 | schedule: 5 | # Every Monday to Friday at 10:00 UTC (3:00 PDT) 6 | - cron: 00 10 * * 1-5 7 | 8 | permissions: 9 | id-token: write # This is required for requesting the JWT 10 | 11 | jobs: 12 | sync-with-botocore: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Configure AWS Credentials 16 | uses: aws-actions/configure-aws-credentials@v4 17 | with: 18 | role-to-assume: ${{ secrets.CODEBUILD_ROLE_ARN }} 19 | role-duration-seconds: 10800 20 | aws-region: us-west-2 21 | 22 | - name: Run CodeBuild 23 | uses: aws-actions/aws-codebuild-run-build@v1 24 | with: 25 | project-name: sagemaker-core-botocore-sync 26 | 27 | -------------------------------------------------------------------------------- /tst/generated/test_config_schema.py: -------------------------------------------------------------------------------- 1 | from sagemaker_core.main.config_schema import SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA 2 | 3 | 4 | def test_config_schema(): 5 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA 6 | 7 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["required"] == ["SageMaker"] 8 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["properties"]["SageMaker"] 9 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["properties"]["SageMaker"]["required"] == [ 10 | "PythonSDK" 11 | ] 12 | 13 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["properties"]["SageMaker"]["properties"]["PythonSDK"] 14 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["properties"]["SageMaker"]["properties"]["PythonSDK"][ 15 | "required" 16 | ] == ["Resources"] 17 | 18 | assert SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA["properties"]["SageMaker"]["properties"]["PythonSDK"][ 19 | "properties" 20 | ]["Resources"] 21 | -------------------------------------------------------------------------------- /.github/workflows/create-release.yml: -------------------------------------------------------------------------------- 1 | name: Create Release 2 | 3 | on: 4 | pull_request: 5 | types: [closed] 6 | branches: 7 | - main 8 | 9 | permissions: 10 | id-token: write # This is required for requesting the JWT 11 | 12 | jobs: 13 | create-release: 14 | if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.title, 'Daily Sync with Botocore') && github.event.pull_request.user.login == 'sagemaker-bot' 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Configure AWS Credentials 18 | uses: aws-actions/configure-aws-credentials@v4 19 | with: 20 | role-to-assume: ${{ secrets.CODEBUILD_ROLE_ARN }} 21 | role-duration-seconds: 10800 22 | aws-region: us-west-2 23 | 24 | - name: Run CodeBuild 25 | uses: aws-actions/aws-codebuild-run-build@v1 26 | with: 27 | project-name: sagemaker-core-auto-release 28 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=sagemaker-core 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/method.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | from sagemaker_core.main.utils import remove_html_tags 4 | 5 | 6 | class MethodType(Enum): 7 | CLASS = "class" 8 | OBJECT = "object" 9 | STATIC = "static" 10 | 11 | 12 | class Method: 13 | """ 14 | A class to store the information of methods to be generated 15 | """ 16 | 17 | operation_name: str 18 | resource_name: str 19 | method_name: str 20 | return_type: str 21 | method_type: MethodType 22 | service_name: str 23 | docstring_title: str 24 | 25 | def __init__(self, **kwargs): 26 | self.__dict__.update(kwargs) 27 | 28 | def get_docstring_title(self, operation): 29 | documentation = operation.get("documentation") 30 | title = remove_html_tags(documentation) if documentation else None 31 | self.docstring_title = title.split(".")[0] + "." if title else None 32 | 33 | # TODO: add some templates for common methods 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: File a report to help us reproduce and fix the problem 4 | title: '' 5 | labels: 'bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To reproduce** 14 | A clear, step-by-step set of instructions to reproduce the bug. 15 | The provided code need to be **complete** and **runnable**, if additional data is needed, please include them in the issue. 16 | 17 | **Expected behavior** 18 | A clear and concise description of what you expected to happen. 19 | 20 | **Screenshots or logs** 21 | If applicable, add screenshots or logs to help explain your problem. 22 | 23 | **Bug information** 24 | A description of your system. Please provide: 25 | - **SageMaker Core version**: 26 | - **Python version**: 27 | - **Is the issue with autogen code or with generate code ?**: 28 | 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: ["main"] 7 | pull_request: 8 | branches: ["main"] 9 | schedule: 10 | - cron: "0 0 * * 5" 11 | 12 | permissions: "read-all" 13 | 14 | jobs: 15 | analyze: 16 | name: "Analyze" 17 | runs-on: "ubuntu-latest" 18 | permissions: 19 | actions: read 20 | contents: read 21 | security-events: write 22 | steps: 23 | - name: "Checkout repository" 24 | uses: "actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608" 25 | 26 | - name: "Run CodeQL init" 27 | uses: "github/codeql-action/init@9fdb3e49720b44c48891d036bb502feb25684276" 28 | with: 29 | languages: "python" 30 | 31 | - name: "Run CodeQL autobuild" 32 | uses: "github/codeql-action/autobuild@9fdb3e49720b44c48891d036bb502feb25684276" 33 | 34 | - name: "Run CodeQL analyze" 35 | uses: "github/codeql-action/analyze@9fdb3e49720b44c48891d036bb502feb25684276" 36 | -------------------------------------------------------------------------------- /tst/tools/test_api_coverage.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from sagemaker_core.tools.constants import API_COVERAGE_JSON_FILE_PATH 4 | from sagemaker_core.tools.resources_extractor import ResourcesExtractor 5 | 6 | 7 | class TestAPICoverage: 8 | def test_api_coverage(self): 9 | with open(API_COVERAGE_JSON_FILE_PATH, "r") as file: 10 | coverage_json = json.load(file) 11 | previous_supported_apis = coverage_json["SupportedAPIs"] 12 | previous_unsupported_apis = coverage_json["UnsupportedAPIs"] 13 | resources_extractor = ResourcesExtractor() 14 | current_supported_apis = len(resources_extractor.actions_under_resource) 15 | current_unsupported_apis = len(resources_extractor.actions) 16 | # Check the numbers of current and previous apis being the same here 17 | # to ensure that developers update api_coverage.json when updating codegen 18 | assert current_supported_apis == previous_supported_apis 19 | assert current_unsupported_apis == previous_unsupported_apis 20 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/code_injection/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | """Constants used in the code_injection modules.""" 14 | from enum import Enum 15 | 16 | BASIC_TYPES = ["string", "boolean", "integer", "long", "double", "timestamp", "float"] 17 | STRUCTURE_TYPE = "structure" 18 | MAP_TYPE = "map" 19 | LIST_TYPE = "list" 20 | 21 | 22 | class Color(Enum): 23 | RED = "rgb(215,0,0)" 24 | GREEN = "rgb(0,135,0)" 25 | BLUE = "rgb(0,105,255)" 26 | YELLOW = "rgb(215,175,0)" 27 | PURPLE = "rgb(225,0,225)" 28 | BRIGHT_RED = "rgb(255,0,0)" 29 | -------------------------------------------------------------------------------- /.github/workflows/auto-merge.yml: -------------------------------------------------------------------------------- 1 | name: Merge Botocore Sync PR 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - "main*" 7 | paths: 8 | - "sample/**/*.json" 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.head_ref }} 12 | cancel-in-progress: true 13 | 14 | permissions: 15 | id-token: write # This is required for requesting the JWT 16 | contents: read 17 | 18 | jobs: 19 | merge-botocore-sync-pr: 20 | runs-on: ubuntu-latest 21 | if: github.event.pull_request.user.login == 'sagemaker-bot' && startsWith(github.event.pull_request.head.ref, 'botocore-sync') && startsWith(github.event.pull_request.title, 'Daily Sync with Botocore') 22 | steps: 23 | - name: Configure AWS Credentials 24 | uses: aws-actions/configure-aws-credentials@v4 25 | with: 26 | role-to-assume: ${{ secrets.CODEBUILD_ROLE_ARN }} 27 | role-duration-seconds: 10800 28 | aws-region: us-west-2 29 | - name: Auto Merge Botocore Sync PRs 30 | uses: aws-actions/aws-codebuild-run-build@v1 31 | with: 32 | project-name: sagemaker-core-merge-botocore-pr 33 | env-vars-for-codebuild: | 34 | PR_NUMBER, 35 | COMMIT_SHA 36 | env: 37 | PR_NUMBER: ${{ github.event.pull_request.number }} 38 | COMMIT_SHA: ${{ github.event.pull_request.head.sha }} 39 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution guidelines for sagemaker-code-gen 2 | 3 | ## Setting up Enviornment using Pyenv 4 | * Set up prerequisites following guide here - https://github.com/pyenv/pyenv/wiki#suggested-build-environment 5 | 6 | * Install Pyenv 7 | ``` 8 | curl https://pyenv.run | bash 9 | ``` 10 | 11 | * Add the following to ~/.zshrc to load Pyenv automatically 12 | ``` 13 | export PYENV_ROOT="$HOME/.pyenv" 14 | export PATH="$PYENV_ROOT/bin:$PATH" 15 | eval "$(pyenv init path)" 16 | eval "$(pyenv init -)" 17 | eval "$(pyenv virtualenv-init -)" 18 | ``` 19 | 20 | * Install Python Version and setup virtual-env 21 | ``` 22 | pyenv install 3.10.14 23 | pyenv virtualenv 3.10.14 py3.10.14 24 | pyenv activate py3.10.14 25 | ``` 26 | 27 | * Install dependencies required for CodeGen and set PYTHONPATH 28 | ``` 29 | pip install ".[codegen]" 30 | source .env 31 | ``` 32 | 33 | ## Run CodeGen 34 | * To generate all CodeGen code run the below 35 | ``` 36 | python src/sagemaker_core/tools/codegen.py 37 | ``` 38 | 39 | ## Testing 40 | * To check for regressions in existing flows, make sure to run: `pytest tst`. For new unit test coverage added make sure `pytest tst` validates them. 41 | ``` 42 | pytest tst 43 | ``` 44 | * Use Pylint to detect errors and improve code quality. For code style errors use `black` to format the files. 45 | ``` 46 | black . 47 | pylint **/*.py 48 | ``` 49 | 50 | ## Building Distribution 51 | * To build a distribution of SageMakerCore run below 52 | ``` 53 | pip install --upgrade build 54 | python -m build 55 | ``` -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=64"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "sagemaker-core" 7 | dynamic = ["version"] 8 | description = "An python package for sagemaker core functionalities" 9 | authors = [ 10 | {name = "AWS", email = "sagemaker-interests@amazon.com"} 11 | ] 12 | readme = "README.rst" 13 | dependencies = [ 14 | # Add your dependencies here (Include lower and upper bounds as applicable) 15 | "boto3>=1.35.36,<2.0.0", 16 | "pydantic>=2.0.0,<3.0.0", 17 | "PyYAML>=6.0, <7.0", 18 | "jsonschema<5.0.0", 19 | "platformdirs>=4.0.0, <5.0.0", 20 | "rich>=13.0.0, <15.0.0", 21 | "mock>4.0, <5.0", 22 | "importlib-metadata<=9.0,>=1.4.0", 23 | ] 24 | requires-python = ">=3.8" 25 | classifiers = [ 26 | "Development Status :: 3 - Alpha", 27 | "Intended Audience :: Developers", 28 | "License :: OSI Approved :: Apache Software License", 29 | "Programming Language :: Python :: 3.8", 30 | "Programming Language :: Python :: 3.9", 31 | "Programming Language :: Python :: 3.10", 32 | "Programming Language :: Python :: 3.11", 33 | "Programming Language :: Python :: 3.12", 34 | ] 35 | 36 | [project.optional-dependencies] 37 | codegen = [ 38 | "black>=24.3.0, <25.0.0", 39 | "pandas>=2.0.0, <3.0.0", 40 | "pytest>=8.0.0, <9.0.0", 41 | "pylint>=3.0.0, <4.0.0" 42 | ] 43 | 44 | [project.urls] 45 | Repository = "https://github.com/aws/sagemaker-core.git" 46 | 47 | [tool.black] 48 | line-length = 100 49 | exclude = '\.ipynb$' 50 | 51 | [tool.setuptools.dynamic] 52 | version = { file = "VERSION"} 53 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/code_injection/base.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | import os 14 | import boto3 15 | from botocore.config import Config 16 | 17 | 18 | class Base: 19 | def __init__(self, session=None, region=None): 20 | aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID") 21 | aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY") 22 | aws_session_token = os.getenv("AWS_SESSION_TOKEN") 23 | profile_name = os.getenv("AWS_PROFILE") 24 | 25 | if session is None: 26 | if all([aws_access_key_id, aws_secret_access_key, aws_session_token]): 27 | self.session = boto3.Session( 28 | aws_access_key_id=aws_access_key_id, 29 | aws_secret_access_key=aws_secret_access_key, 30 | aws_session_token=aws_session_token, 31 | ) 32 | elif profile_name: 33 | self.session = boto3.Session(profile_name=profile_name) 34 | else: 35 | self.session = boto3.Session() 36 | 37 | self.region = region if region else os.getenv("AWS_REGION") 38 | 39 | # Create a custom config with the user agent 40 | custom_config = Config(region_name=self.region, user_agent_extra="SageMakerSDK/3.0") 41 | 42 | self.client = self.session.client("sagemaker", config=custom_config) 43 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MESSAGES CONTROL] 2 | 3 | # Only show warnings with the listed confidence levels. Leave empty to show 4 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 5 | confidence= 6 | 7 | # Enable the message, report, category or checker with the given id(s). You can 8 | # either give multiple identifier separated by comma (,) or put this option 9 | # multiple time (only on the command line, not in the configuration file where 10 | # it should appear only once). See also the "--disable" option for examples. 11 | #enable= 12 | 13 | # Disable the message, report, category or checker with the given id(s). You 14 | # can either give multiple identifiers separated by comma (,) or put this 15 | # option multiple times (only on the command line, not in the configuration 16 | # file where it should appear only once).You can also use "--disable=all" to 17 | # disable everything first and then reenable specific checks. For example, if 18 | # you want to run only the similarities checker, you can use "--disable=all 19 | # --enable=similarities". If you want to run only the classes checker, but have 20 | # no Warning level messages displayed, use"--disable=all --enable=classes 21 | # --disable=W" 22 | disable= 23 | duplicate-code, 24 | fixme, 25 | line-too-long, 26 | logging-fstring-interpolation, 27 | missing-class-docstring, 28 | missing-function-docstring, 29 | missing-module-docstring, 30 | too-few-public-methods, 31 | too-many-arguments, 32 | too-many-branches, 33 | too-many-instance-attributes, 34 | too-many-lines, 35 | too-many-locals, 36 | too-many-nested-blocks, 37 | unidiomatic-typecheck, 38 | unspecified-encoding, 39 | unused-wildcard-import, 40 | wrong-import-order, 41 | 42 | [REPORTS] 43 | # Set the output format. Available formats are text, parseable, colorized, msvs 44 | # (visual studio) and html. You can also give a reporter class, eg 45 | # mypackage.mymodule.MyReporterClass. 46 | output-format=colorized 47 | 48 | # Tells whether to display a full report or only the messages 49 | reports=no 50 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/codegen.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | """Generates the code for the service model.""" 14 | from sagemaker_core.main.utils import reformat_file_with_black 15 | from sagemaker_core.tools.shapes_codegen import ShapesCodeGen 16 | from sagemaker_core.tools.resources_codegen import ResourcesCodeGen 17 | from typing import Optional 18 | 19 | from sagemaker_core.tools.data_extractor import ServiceJsonData, load_service_jsons 20 | 21 | 22 | def generate_code( 23 | shapes_code_gen: Optional[ShapesCodeGen] = None, 24 | resources_code_gen: Optional[ShapesCodeGen] = None, 25 | ) -> None: 26 | """ 27 | Generates the code for the given code generators. If any code generator is not 28 | provided when calling this function, the function will initiate the generator. 29 | 30 | Note ordering is important, generate the utils and lower level classes first 31 | then generate the higher level classes. 32 | 33 | Args: 34 | shapes_code_gen (ShapesCodeGen): The code generator for shape classes. 35 | resources_code_gen (ResourcesCodeGen): The code generator for resource classes. 36 | 37 | Returns: 38 | None 39 | """ 40 | service_json_data: ServiceJsonData = load_service_jsons() 41 | 42 | shapes_code_gen = shapes_code_gen or ShapesCodeGen() 43 | resources_code_gen = resources_code_gen or ResourcesCodeGen( 44 | service_json=service_json_data.sagemaker 45 | ) 46 | 47 | shapes_code_gen.generate_shapes() 48 | reformat_file_with_black(".") 49 | 50 | 51 | """ 52 | Initializes all the code generator classes and triggers generator. 53 | """ 54 | if __name__ == "__main__": 55 | generate_code() 56 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/data_extractor.py: -------------------------------------------------------------------------------- 1 | import json 2 | from functools import lru_cache 3 | 4 | from pydantic import BaseModel 5 | 6 | from sagemaker_core.tools.constants import ( 7 | ADDITIONAL_OPERATION_FILE_PATH, 8 | FEATURE_STORE_SERVICE_JSON_FILE_PATH, 9 | METRICS_SERVICE_JSON_FILE_PATH, 10 | SERVICE_JSON_FILE_PATH, 11 | RUNTIME_SERVICE_JSON_FILE_PATH, 12 | ) 13 | 14 | 15 | class ServiceJsonData(BaseModel): 16 | sagemaker: dict 17 | sagemaker_runtime: dict 18 | sagemaker_feature_store: dict 19 | sagemaker_metrics: dict 20 | 21 | 22 | @lru_cache(maxsize=1) 23 | def load_service_jsons() -> ServiceJsonData: 24 | with open(SERVICE_JSON_FILE_PATH, "r") as file: 25 | service_json = json.load(file) 26 | with open(RUNTIME_SERVICE_JSON_FILE_PATH, "r") as file: 27 | runtime_service_json = json.load(file) 28 | with open(FEATURE_STORE_SERVICE_JSON_FILE_PATH, "r") as file: 29 | feature_store_service_json = json.load(file) 30 | with open(METRICS_SERVICE_JSON_FILE_PATH, "r") as file: 31 | metrics_service_json = json.load(file) 32 | return ServiceJsonData( 33 | sagemaker=service_json, 34 | sagemaker_runtime=runtime_service_json, 35 | sagemaker_feature_store=feature_store_service_json, 36 | sagemaker_metrics=metrics_service_json, 37 | ) 38 | 39 | 40 | @lru_cache(maxsize=1) 41 | def load_combined_shapes_data() -> dict: 42 | service_json_data = load_service_jsons() 43 | return { 44 | **service_json_data.sagemaker_runtime["shapes"], 45 | **service_json_data.sagemaker_feature_store["shapes"], 46 | **service_json_data.sagemaker_metrics["shapes"], 47 | **service_json_data.sagemaker["shapes"], 48 | } 49 | 50 | 51 | @lru_cache(maxsize=1) 52 | def load_combined_operations_data() -> dict: 53 | service_json_data = load_service_jsons() 54 | return { 55 | **service_json_data.sagemaker_runtime["operations"], 56 | **service_json_data.sagemaker_feature_store["operations"], 57 | **service_json_data.sagemaker_metrics["operations"], 58 | **service_json_data.sagemaker["operations"], 59 | } 60 | 61 | 62 | @lru_cache(maxsize=1) 63 | def load_additional_operations_data() -> dict: 64 | with open(ADDITIONAL_OPERATION_FILE_PATH, "r") as file: 65 | additional_operation_json = json.load(file) 66 | return additional_operation_json 67 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | from datetime import datetime 9 | 10 | project = "sagemaker-core" 11 | copyright = ( 12 | "%s, Amazon Web Services, Inc. or its affiliates. All rights reserved." % datetime.now().year 13 | ) 14 | author = "Amazon Web Services" 15 | 16 | # -- General configuration --------------------------------------------------- 17 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 18 | 19 | pygments_style = "sphinx" 20 | 21 | extensions = [ 22 | "sphinx.ext.autodoc", 23 | "sphinx.ext.autosummary", 24 | "sphinx.ext.coverage", 25 | "sphinx.ext.napoleon", 26 | "sphinx.ext.intersphinx", 27 | "sphinx.ext.viewcode", 28 | "sphinx.ext.doctest", 29 | "sphinx_rtd_theme", 30 | ] 31 | 32 | # -- Options for HTML output ------------------------------------------------- 33 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 34 | 35 | html_theme = "sphinx_rtd_theme" 36 | 37 | html_theme_options = { 38 | "collapse_navigation": True, 39 | "sticky_navigation": True, 40 | "navigation_depth": 6, 41 | "includehidden": True, 42 | "titles_only": False, 43 | } 44 | 45 | htmlhelp_basename = "%sdocs" % project 46 | 47 | # Add any paths that contain custom static files (such as style sheets) here, 48 | # relative to this directory. They are copied after the builtin static files, 49 | # so a file named "default.css" will overwrite the builtin "default.css". 50 | html_static_path = [] 51 | 52 | # Example configuration for intersphinx: refer to the Python standard library. 53 | intersphinx_mapping = {"python": ("http://docs.python.org/", None)} 54 | 55 | # -- Options for autodoc ---------------------------------------------------- 56 | # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration 57 | 58 | # Automatically extract typehints when specified and place them in 59 | # descriptions of the relevant function/method. 60 | autodoc_typehints = "description" 61 | 62 | # autosummary 63 | autosummary_generate = True 64 | 65 | # autosectionlabel 66 | autosectionlabel_prefix_document = True 67 | -------------------------------------------------------------------------------- /tst/generated/test_shapes.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import unittest 3 | 4 | from pydantic import BaseModel, ValidationError 5 | 6 | from sagemaker_core.main.shapes import Base, AdditionalS3DataSource 7 | from sagemaker_core.main.utils import Unassigned 8 | from sagemaker_core.tools.constants import GENERATED_CLASSES_LOCATION, SHAPES_CODEGEN_FILE_NAME 9 | 10 | FILE_NAME = GENERATED_CLASSES_LOCATION + "/" + SHAPES_CODEGEN_FILE_NAME 11 | 12 | 13 | class TestGeneratedShape(unittest.TestCase): 14 | def test_generated_shapes_have_pydantic_enabled(self): 15 | # This test ensures that all main shapes inherit Base which inherits BaseModel, thereby forcing pydantic validiation 16 | assert issubclass(Base, BaseModel) 17 | assert ( 18 | self._fetch_number_of_classes_in_file_not_inheriting_a_class(FILE_NAME, "Base") == 1 19 | ) # 1 Because Base class itself does not inherit 20 | 21 | def test_pydantic_validation_for_generated_class_success(self): 22 | additional_s3_data_source = AdditionalS3DataSource( 23 | s3_data_type="filestring", s3_uri="s3/uri" 24 | ) 25 | assert isinstance(additional_s3_data_source.s3_data_type, str) 26 | assert isinstance(additional_s3_data_source.s3_uri, str) 27 | assert isinstance(additional_s3_data_source.compression_type, Unassigned) 28 | 29 | def test_pydantic_validation_for_generated_class_success_with_optional_attributes_provided( 30 | self, 31 | ): 32 | additional_s3_data_source = AdditionalS3DataSource( 33 | s3_data_type="filestring", s3_uri="s3/uri", compression_type="zip" 34 | ) 35 | assert isinstance(additional_s3_data_source.s3_data_type, str) 36 | assert isinstance(additional_s3_data_source.s3_uri, str) 37 | assert isinstance(additional_s3_data_source.compression_type, str) 38 | 39 | def test_pydantic_validation_for_generated_class_throws_error_for_incorrect_input( 40 | self, 41 | ): 42 | with self.assertRaises(ValidationError): 43 | AdditionalS3DataSource(s3_data_type="str", s3_uri=12) 44 | 45 | def _fetch_number_of_classes_in_file_not_inheriting_a_class( 46 | self, filepath: str, base_class_name: str 47 | ): 48 | count = 0 49 | with open(filepath, "r") as file: 50 | tree = ast.parse(file.read(), filename=filepath) 51 | for node in tree.body: 52 | if isinstance(node, ast.ClassDef): 53 | if not any(base_class.id == base_class_name for base_class in node.bases): 54 | count = count + 1 55 | return count 56 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/user_agent.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | from __future__ import absolute_import 14 | 15 | import json 16 | import os 17 | 18 | import importlib_metadata 19 | 20 | SagemakerCore_PREFIX = "AWS-SageMakerCore" 21 | STUDIO_PREFIX = "AWS-SageMaker-Studio" 22 | NOTEBOOK_PREFIX = "AWS-SageMaker-Notebook-Instance" 23 | 24 | NOTEBOOK_METADATA_FILE = "/etc/opt/ml/sagemaker-notebook-instance-version.txt" 25 | STUDIO_METADATA_FILE = "/opt/ml/metadata/resource-metadata.json" 26 | 27 | SagemakerCore_VERSION = importlib_metadata.version("sagemaker-core") 28 | 29 | 30 | def process_notebook_metadata_file() -> str: 31 | """Check if the platform is SageMaker Notebook, if yes, return the InstanceType 32 | 33 | Returns: 34 | str: The InstanceType of the SageMaker Notebook if it exists, otherwise None 35 | """ 36 | if os.path.exists(NOTEBOOK_METADATA_FILE): 37 | with open(NOTEBOOK_METADATA_FILE, "r") as sagemaker_nbi_file: 38 | return sagemaker_nbi_file.read().strip() 39 | 40 | return None 41 | 42 | 43 | def process_studio_metadata_file() -> str: 44 | """Check if the platform is SageMaker Studio, if yes, return the AppType 45 | 46 | Returns: 47 | str: The AppType of the SageMaker Studio if it exists, otherwise None 48 | """ 49 | if os.path.exists(STUDIO_METADATA_FILE): 50 | with open(STUDIO_METADATA_FILE, "r") as sagemaker_studio_file: 51 | metadata = json.load(sagemaker_studio_file) 52 | return metadata.get("AppType") 53 | 54 | return None 55 | 56 | 57 | def get_user_agent_extra_suffix() -> str: 58 | """Get the user agent extra suffix string specific to SageMakerCore 59 | 60 | Adhers to new boto recommended User-Agent 2.0 header format 61 | 62 | Returns: 63 | str: The user agent extra suffix string to be appended 64 | """ 65 | suffix = "lib/{}#{}".format(SagemakerCore_PREFIX, SagemakerCore_VERSION) 66 | 67 | # Get the notebook instance type and prepend it to the user agent string if exists 68 | notebook_instance_type = process_notebook_metadata_file() 69 | if notebook_instance_type: 70 | suffix = "{} md/{}#{}".format(suffix, NOTEBOOK_PREFIX, notebook_instance_type) 71 | 72 | # Get the studio app type and prepend it to the user agent string if exists 73 | studio_app_type = process_studio_metadata_file() 74 | if studio_app_type: 75 | suffix = "{} md/{}#{}".format(suffix, STUDIO_PREFIX, studio_app_type) 76 | 77 | return suffix 78 | -------------------------------------------------------------------------------- /tst/generated/test_user_agent.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | from __future__ import absolute_import 14 | 15 | import json 16 | from mock import patch, mock_open 17 | 18 | 19 | from sagemaker_core.main.user_agent import ( 20 | SagemakerCore_PREFIX, 21 | SagemakerCore_VERSION, 22 | NOTEBOOK_PREFIX, 23 | STUDIO_PREFIX, 24 | process_notebook_metadata_file, 25 | process_studio_metadata_file, 26 | get_user_agent_extra_suffix, 27 | ) 28 | from sagemaker_core.main.user_agent import SagemakerCore_PREFIX 29 | 30 | 31 | # Test process_notebook_metadata_file function 32 | def test_process_notebook_metadata_file_exists(tmp_path): 33 | notebook_file = tmp_path / "sagemaker-notebook-instance-version.txt" 34 | notebook_file.write_text("instance_type") 35 | 36 | with patch("os.path.exists", return_value=True): 37 | with patch("builtins.open", mock_open(read_data=notebook_file.read_text())): 38 | assert process_notebook_metadata_file() == "instance_type" 39 | 40 | 41 | def test_process_notebook_metadata_file_not_exists(tmp_path): 42 | with patch("os.path.exists", return_value=False): 43 | assert process_notebook_metadata_file() is None 44 | 45 | 46 | # Test process_studio_metadata_file function 47 | def test_process_studio_metadata_file_exists(tmp_path): 48 | studio_file = tmp_path / "resource-metadata.json" 49 | studio_file.write_text(json.dumps({"AppType": "studio_type"})) 50 | 51 | with patch("os.path.exists", return_value=True): 52 | with patch("builtins.open", mock_open(read_data=studio_file.read_text())): 53 | assert process_studio_metadata_file() == "studio_type" 54 | 55 | 56 | def test_process_studio_metadata_file_not_exists(tmp_path): 57 | with patch("os.path.exists", return_value=False): 58 | assert process_studio_metadata_file() is None 59 | 60 | 61 | # Test get_user_agent_extra_suffix function 62 | def test_get_user_agent_extra_suffix(): 63 | assert get_user_agent_extra_suffix() == f"lib/{SagemakerCore_PREFIX}#{SagemakerCore_VERSION}" 64 | 65 | with patch( 66 | "sagemaker_core.main.user_agent.process_notebook_metadata_file", 67 | return_value="instance_type", 68 | ): 69 | assert ( 70 | get_user_agent_extra_suffix() 71 | == f"lib/{SagemakerCore_PREFIX}#{SagemakerCore_VERSION} md/{NOTEBOOK_PREFIX}#instance_type" 72 | ) 73 | 74 | with patch( 75 | "sagemaker_core.main.user_agent.process_studio_metadata_file", return_value="studio_type" 76 | ): 77 | assert ( 78 | get_user_agent_extra_suffix() 79 | == f"lib/{SagemakerCore_PREFIX}#{SagemakerCore_VERSION} md/{STUDIO_PREFIX}#studio_type" 80 | ) 81 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://github.com/aws/sagemaker-python-sdk/raw/master/branding/icon/sagemaker-banner.png 2 | :height: 100px 3 | :alt: SageMaker 4 | 5 | ==================== 6 | SageMaker Core 7 | ==================== 8 | 9 | .. image:: https://img.shields.io/pypi/v/sagemaker-core.svg 10 | :target: https://pypi.python.org/pypi/sagemaker-core 11 | :alt: Latest Version 12 | 13 | .. image:: https://img.shields.io/pypi/pyversions/sagemaker-core.svg 14 | :target: https://pypi.python.org/pypi/sagemaker-core 15 | :alt: Supported Python Versions 16 | 17 | 18 | 19 | Introduction 20 | ------------ 21 | 22 | Welcome to the sagemaker-core Python SDK, an SDK designed to provide an object-oriented interface for interacting with Amazon SageMaker resources. It offers full parity with SageMaker APIs, allowing developers to leverage all SageMaker capabilities directly through the SDK. sagemaker-core introduces features such as dedicated resource classes, resource chaining, auto code completion, comprehensive documentation and type hints to enhance the developer experience as well as productivity. 23 | 24 | 25 | Key Features 26 | ------------ 27 | 28 | * **Object-Oriented Interface**: Provides a structured way to interact with SageMaker resources, making it easier to manage them using familiar object-oriented programming techniques. 29 | * **Resource Chaining**: Allows seamless connection of SageMaker resources by passing outputs as inputs between them, simplifying workflows and reducing the complexity of parameter management. 30 | * **Full Parity with SageMaker APIs**: Ensures access to all SageMaker capabilities through the SDK, providing a comprehensive toolset for building and deploying machine learning models. 31 | * **Abstraction of Low-Level Details**: Automatically handles resource state transitions and polling logic, freeing developers from managing these intricacies and allowing them to focus on higher-level tasks. 32 | * **Auto Code Completion**: Enhances the developer experience by offering real-time suggestions and completions in popular IDEs, reducing syntax errors and speeding up the coding process. 33 | * **Comprehensive Documentation and Type Hints**: Provides detailed guidance and type hints to help developers understand functionalities, write code faster, and reduce errors without complex API navigation. 34 | * **Incorporation of Default Configs**: Integrates the previous SageMaker SDK feature of default configs, allowing developers to set default values for parameters like IAM roles and VPC configurations. This streamlines the setup process, enabling developers to focus on customizations specific to their use case. 35 | 36 | 37 | Benefits 38 | -------- 39 | 40 | * **Simplified Development**: By abstracting low-level details and providing default configs, developers can focus on building and deploying machine learning models without getting bogged down by repetitive tasks. 41 | * **Increased Productivity**: The SDK's features, such as auto code completion and type hints, help developers write code faster and with fewer errors. 42 | * **Enhanced Readability**: Resource chaining and dedicated resource classes result in more readable and maintainable code. 43 | 44 | 45 | Docs and Examples 46 | ----------------- 47 | Learn more about the sagemaker-core SDK and its features by visting the `What's New Announcement `_. 48 | 49 | For examples and walkthroughs, see the `SageMaker Core Examples `_. 50 | 51 | For detailed documentation, including the API reference, see `Read the Docs `_. 52 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | """Constants used in the code_generator modules.""" 14 | import os 15 | 16 | CLASS_METHODS = set(["create", "add", "start", "register", "import", "list", "get"]) 17 | OBJECT_METHODS = set( 18 | ["refresh", "delete", "update", "stop", "deregister", "wait", "wait_for_status"] 19 | ) 20 | 21 | TERMINAL_STATES = set(["Completed", "Stopped", "Deleted", "Failed", "Succeeded", "Cancelled"]) 22 | 23 | RESOURCE_WITH_LOGS = set(["TrainingJob", "ProcessingJob", "TransformJob"]) 24 | 25 | CONFIGURABLE_ATTRIBUTE_SUBSTRINGS = [ 26 | "kms", 27 | "s3", 28 | "subnet", 29 | "tags", 30 | "role", 31 | "security_group", 32 | ] 33 | 34 | BASIC_JSON_TYPES_TO_PYTHON_TYPES = { 35 | "string": "str", 36 | "integer": "int", 37 | "boolean": "bool", 38 | "long": "int", 39 | "float": "float", 40 | "map": "dict", 41 | "double": "float", 42 | "list": "list", 43 | "timestamp": "datetime.datetime", 44 | "blob": "Any", 45 | } 46 | 47 | BASIC_RETURN_TYPES = {"str", "int", "bool", "float", "datetime.datetime"} 48 | 49 | SHAPE_DAG_FILE_PATH = os.getcwd() + "/src/sagemaker_core/main/code_injection/shape_dag.py" 50 | PYTHON_TYPES_TO_BASIC_JSON_TYPES = { 51 | "str": "string", 52 | "int": "integer", 53 | "bool": "boolean", 54 | "float": "double", 55 | "datetime.datetime": "timestamp", 56 | } 57 | 58 | LICENCES_STRING = """ 59 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 60 | # 61 | # Licensed under the Apache License, Version 2.0 (the "License"). You 62 | # may not use this file except in compliance with the License. A copy of 63 | # the License is located at 64 | # 65 | # http://aws.amazon.com/apache2.0/ 66 | # 67 | # or in the "license" file accompanying this file. This file is 68 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 69 | # ANY KIND, either express or implied. See the License for the specific 70 | # language governing permissions and limitations under the License. 71 | """ 72 | 73 | LOGGER_STRING = """ 74 | logger = get_textual_rich_logger(__name__) 75 | 76 | """ 77 | 78 | # TODO: The file name should be injected, we should update it to be more generic 79 | ADDITIONAL_OPERATION_FILE_PATH = ( 80 | os.getcwd() + "/src/sagemaker_core/tools/additional_operations.json" 81 | ) 82 | SERVICE_JSON_FILE_PATH = os.getcwd() + "/sample/sagemaker/2017-07-24/service-2.json" 83 | RUNTIME_SERVICE_JSON_FILE_PATH = os.getcwd() + "/sample/sagemaker-runtime/2017-05-13/service-2.json" 84 | FEATURE_STORE_SERVICE_JSON_FILE_PATH = ( 85 | os.getcwd() + "/sample/sagemaker-featurestore-runtime/2020-07-01/service-2.json" 86 | ) 87 | METRICS_SERVICE_JSON_FILE_PATH = os.getcwd() + "/sample/sagemaker-metrics/2022-09-30/service-2.json" 88 | 89 | GENERATED_CLASSES_LOCATION = os.getcwd() + "/src/sagemaker_core/main" 90 | UTILS_CODEGEN_FILE_NAME = "utils.py" 91 | DEFAULT_CONFIGS_CODEGEN_FILE_NAME = "default_configs_helper.py" 92 | 93 | RESOURCES_CODEGEN_FILE_NAME = "resources.py" 94 | 95 | SHAPES_CODEGEN_FILE_NAME = "shapes.py" 96 | 97 | CONFIG_SCHEMA_FILE_NAME = "config_schema.py" 98 | 99 | API_COVERAGE_JSON_FILE_PATH = os.getcwd() + "/src/sagemaker_core/tools/api_coverage.json" 100 | 101 | SHAPES_WITH_JSON_FIELD_ALIAS = ["MonitoringDatasetFormat"] # Shapes with field name with "json" 102 | -------------------------------------------------------------------------------- /tst/generated/test_logs.py: -------------------------------------------------------------------------------- 1 | import botocore 2 | import pytest 3 | from unittest.mock import patch, MagicMock 4 | from sagemaker_core.main.logs import LogStreamHandler, MultiLogStreamHandler 5 | 6 | 7 | def test_single_stream_handler_get_latest(): 8 | mock_log_events = [ 9 | { 10 | "nextForwardToken": "nextToken1", 11 | "events": [ 12 | {"ingestionTime": 123456789, "message": "test message", "timestamp": 123456789} 13 | ], 14 | }, 15 | {"nextForwardToken": "nextToken2", "events": []}, 16 | ] 17 | 18 | log_stream_handler = LogStreamHandler("logGroupName", "logStreamName", 0) 19 | 20 | with patch.object(log_stream_handler, "cw_client") as mock_cw_client: 21 | mock_cw_client.get_log_events.side_effect = mock_log_events 22 | events = log_stream_handler.get_latest_log_events() 23 | 24 | result = next(events) 25 | 26 | assert result == ( 27 | "logStreamName", 28 | {"ingestionTime": 123456789, "message": "test message", "timestamp": 123456789}, 29 | ) 30 | 31 | mock_cw_client.get_log_events.assert_called_once_with( 32 | logGroupName="logGroupName", logStreamName="logStreamName", startFromHead=True 33 | ) 34 | 35 | with pytest.raises(StopIteration): 36 | next(events) 37 | 38 | 39 | @patch("sagemaker_core.main.logs.MultiLogStreamHandler.ready", autospec=True) 40 | def test_multi_stream_handler_get_latest(mock_ready): 41 | mock_ready.return_value = True 42 | 43 | mock_stream = MagicMock(spec=LogStreamHandler) 44 | mock_stream.get_latest_log_events.return_value = iter( 45 | [ 46 | ( 47 | "streamName", 48 | {"ingestionTime": 123456789, "message": "test message", "timestamp": 123456789}, 49 | ) 50 | ] 51 | ) 52 | 53 | mulit_log_stream_handler = MultiLogStreamHandler("log_group_name", "training_job_name", 1) 54 | mulit_log_stream_handler.streams = [mock_stream] 55 | 56 | events = mulit_log_stream_handler.get_latest_log_events() 57 | 58 | result = next(events) 59 | 60 | assert result == ( 61 | "streamName", 62 | {"ingestionTime": 123456789, "message": "test message", "timestamp": 123456789}, 63 | ) 64 | 65 | with pytest.raises(StopIteration): 66 | next(events) 67 | 68 | 69 | def test_ready(): 70 | mock_streams = { 71 | "logStreams": [{"logStreamName": "streamName"}], 72 | "nextToken": None, 73 | } 74 | 75 | multi_log_stream_handler = MultiLogStreamHandler("logGroupName", "logStreamNamePrefix", 1) 76 | with patch.object(multi_log_stream_handler, "cw_client") as mock_cw_client: 77 | mock_cw_client.describe_log_streams.return_value = mock_streams 78 | 79 | result = multi_log_stream_handler.ready() 80 | 81 | assert result == True 82 | mock_cw_client.describe_log_streams.assert_called_once() 83 | 84 | 85 | def test_ready_streams_set(): 86 | log_stream = LogStreamHandler("logGroupName", "logStreamName", 0) 87 | multi_log_stream_handler = MultiLogStreamHandler("logGroupName", "logStreamNamePrefix", 1) 88 | multi_log_stream_handler.streams = [log_stream] 89 | 90 | with patch.object(multi_log_stream_handler, "cw_client") as mock_cw_client: 91 | result = multi_log_stream_handler.ready() 92 | 93 | assert result == True 94 | mock_cw_client.describe_log_streams.assert_not_called() 95 | 96 | 97 | def test_not_ready(): 98 | mock_streams = {"logStreams": [], "nextToken": None} 99 | 100 | multi_log_stream_handler = MultiLogStreamHandler("logGroupName", "logStreamNamePrefix", 1) 101 | with patch.object(multi_log_stream_handler, "cw_client") as mock_cw_client: 102 | mock_cw_client.describe_log_streams.return_value = mock_streams 103 | 104 | result = multi_log_stream_handler.ready() 105 | 106 | assert result == False 107 | mock_cw_client.describe_log_streams.assert_called_once() 108 | 109 | 110 | def test_ready_resource_not_found(): 111 | 112 | multi_log_stream_handler = MultiLogStreamHandler("logGroupName", "logStreamNamePrefix", 1) 113 | with patch.object(multi_log_stream_handler, "cw_client") as mock_cw_client: 114 | mock_cw_client.describe_log_streams.side_effect = botocore.exceptions.ClientError( 115 | error_response={"Error": {"Code": "ResourceNotFoundException"}}, operation_name="test" 116 | ) 117 | 118 | result = multi_log_stream_handler.ready() 119 | 120 | assert result == False 121 | mock_cw_client.describe_log_streams.assert_called_once() 122 | -------------------------------------------------------------------------------- /integ/sagemaker_cleaner.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from sagemaker_core.main.resources import Model, EndpointConfig, Endpoint 3 | 4 | 5 | class SageMakerCleaner: 6 | """Provides methods to cleanup SageMaker resources""" 7 | 8 | def __init__(self): 9 | """Initialize a SageMakerCleaner 10 | 11 | Args: 12 | client (client): A boto3 Session SageMaker Client 13 | """ 14 | self.resource_tracker = {"total_deleted": 0, "total_failed": 0} 15 | 16 | def handle_cleanup(self, before_timestamp, after_timestamp): 17 | """Handles deletion for Sagmeker resources 18 | 19 | Args: 20 | before_timestamp (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter 21 | after_timestamp (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter 22 | """ 23 | RESOURCE_TYPE_ORDER = [ 24 | "Endpoints", 25 | "EndpointConfigs", 26 | "Models", 27 | ] 28 | 29 | CLEANUP_METHODS = { 30 | "Endpoints": self.cleanup_endpoints, 31 | "EndpointConfigs": self.cleanup_endpoint_configs, 32 | "Models": self.cleanup_models, 33 | } 34 | for resource_type in RESOURCE_TYPE_ORDER: 35 | CLEANUP_METHODS[resource_type](before_timestamp, after_timestamp) 36 | 37 | def cleanup_endpoints(self, creation_time_before, creation_time_after): 38 | """Deletes Models before a given timestamp 39 | 40 | Args: 41 | creation_time_before (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter 42 | creation_time_after (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter 43 | """ 44 | endpoints = Endpoint.get_all( 45 | creation_time_before=creation_time_before, creation_time_after=creation_time_after 46 | ) 47 | for endpoint in endpoints: 48 | try: 49 | endpoint.delete() 50 | except: 51 | self._track_resource(failed=1) 52 | self._track_resource(deleted=1) 53 | 54 | def cleanup_endpoint_configs(self, creation_time_before, creation_time_after): 55 | """Deletes Models before a given timestamp 56 | 57 | Args: 58 | creation_time_before (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter 59 | creation_time_after (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter 60 | """ 61 | endpoint_configs = EndpointConfig.get_all( 62 | creation_time_before=creation_time_before, creation_time_after=creation_time_after 63 | ) 64 | for endpoint_config in endpoint_configs: 65 | try: 66 | endpoint_config.delete() 67 | except: 68 | self._track_resource(failed=1) 69 | self._track_resource(deleted=1) 70 | 71 | def cleanup_models(self, creation_time_before, creation_time_after): 72 | """Deletes Models before a given timestamp 73 | 74 | Args: 75 | creation_time_before (datetime): timestamp for 'CreationTimeBefore' or 'CreatedBefore' boto3 parameter 76 | creation_time_after (datetime): timestamp for 'CreationTimeAfter' or 'CreatedAfter' boto3 parameter 77 | """ 78 | models = Model.get_all( 79 | creation_time_before=creation_time_before, creation_time_after=creation_time_after 80 | ) 81 | for model in models: 82 | try: 83 | model.delete() 84 | except: 85 | self._track_resource(failed=1) 86 | self._track_resource(deleted=1) 87 | 88 | def _track_resource(self, deleted=0, failed=0): 89 | """Updates the resource tracker with # of deleted, or failed resources 90 | 91 | Args: 92 | deleted (int): # of deleted resources to add to tracker 93 | failed (int): # of failed resources to add to tracker 94 | """ 95 | self.resource_tracker["total_deleted"] += deleted 96 | self.resource_tracker["total_failed"] += failed 97 | 98 | 99 | def handle_cleanup(): 100 | region = "us-west-2" 101 | print(f"\n\n=========== Cleaning SageMaker Resources in {region} ===========") 102 | before_timestamp = datetime.datetime.now(datetime.timezone.utc) 103 | after_timestamp = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(weeks=2) 104 | 105 | sagemaker_cleaner = SageMakerCleaner() 106 | sagemaker_cleaner.handle_cleanup(before_timestamp, after_timestamp) 107 | 108 | print(f"resource_tracker: {sagemaker_cleaner.resource_tracker}") 109 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/exceptions.py: -------------------------------------------------------------------------------- 1 | class SageMakerCoreError(Exception): 2 | """Base class for all exceptions in SageMaker Core""" 3 | 4 | fmt = "An unspecified error occurred." 5 | 6 | def __init__(self, **kwargs): 7 | """Initialize a SageMakerCoreError exception. 8 | 9 | Args: 10 | **kwargs: Keyword arguments to be formatted into the custom error message template. 11 | """ 12 | msg = self.fmt.format(**kwargs) 13 | Exception.__init__(self, msg) 14 | 15 | 16 | ### Generic Validation Errors 17 | class ValidationError(SageMakerCoreError): 18 | """Raised when a validation error occurs.""" 19 | 20 | fmt = "An error occurred while validating user input/setup. {message}" 21 | 22 | def __init__(self, message="", **kwargs): 23 | """Initialize a ValidationError exception. 24 | 25 | Args: 26 | message (str): A message describing the error. 27 | """ 28 | super().__init__(message=message, **kwargs) 29 | 30 | 31 | ### Waiter Errors 32 | class WaiterError(SageMakerCoreError): 33 | """Raised when an error occurs while waiting.""" 34 | 35 | fmt = "An error occurred while waiting for {resource_type}. Final Resource State: {status}." 36 | 37 | def __init__(self, resource_type="(Unkown)", status="(Unkown)", **kwargs): 38 | """Initialize a WaiterError exception. 39 | 40 | Args: 41 | resource_type (str): The type of resource being waited on. 42 | status (str): The final status of the resource. 43 | """ 44 | super().__init__(resource_type=resource_type, status=status, **kwargs) 45 | 46 | 47 | class FailedStatusError(WaiterError): 48 | """Raised when a resource enters a failed state.""" 49 | 50 | fmt = "Encountered unexpected failed state while waiting for {resource_type}. Final Resource State: {status}. Failure Reason: {reason}" 51 | 52 | def __init__(self, resource_type="(Unkown)", status="(Unkown)", reason="(Unkown)"): 53 | """Initialize a FailedStatusError exception. 54 | 55 | Args: 56 | resource_type (str): The type of resource being waited on. 57 | status (str): The final status of the resource. 58 | reason (str): The reason the resource entered a failed state. 59 | """ 60 | super().__init__(resource_type=resource_type, status=status, reason=reason) 61 | 62 | 63 | class DeleteFailedStatusError(WaiterError): 64 | """Raised when a resource enters a delete_failed state.""" 65 | 66 | fmt = "Encountered unexpected delete_failed state while deleting {resource_type}. Failure Reason: {reason}" 67 | 68 | def __init__(self, resource_type="(Unkown)", reason="(Unkown)"): 69 | """Initialize a FailedStatusError exception. 70 | 71 | Args: 72 | resource_type (str): The type of resource being waited on. 73 | status (str): The final status of the resource. 74 | reason (str): The reason the resource entered a failed state. 75 | """ 76 | super().__init__(resource_type=resource_type, reason=reason) 77 | 78 | 79 | class TimeoutExceededError(WaiterError): 80 | """Raised when a specified timeout is exceeded""" 81 | 82 | fmt = "Timeout exceeded while waiting for {resource_type}. Final Resource State: {status}. Increase the timeout and try again." 83 | 84 | def __init__(self, resource_type="(Unkown)", status="(Unkown)", reason="(Unkown)"): 85 | """Initialize a TimeoutExceededError exception. 86 | Args: 87 | resource_type (str): The type of resource being waited on. 88 | status (str): The final status of the resource. 89 | reason (str): The reason the resource entered a failed state. 90 | """ 91 | super().__init__(resource_type=resource_type, status=status, reason=reason) 92 | 93 | 94 | ### Default Configs Errors 95 | class DefaultConfigsError(SageMakerCoreError): 96 | """Raised when an error occurs in the Default Configs""" 97 | 98 | fmt = "An error occurred while loading Default Configs. {message}" 99 | 100 | def __init__(self, message="", **kwargs): 101 | """Initialize an DefaultConfigsError exception. 102 | Args: 103 | message (str): A message describing the error. 104 | """ 105 | super().__init__(message=message, **kwargs) 106 | 107 | 108 | class LocalConfigNotFoundError(DefaultConfigsError): 109 | """Raised when a configuration file is not found in local file system""" 110 | 111 | fmt = "Failed to load configuration file from location: {file_path}. {message}" 112 | 113 | def __init__(self, file_path="(Unkown)", message=""): 114 | """Initialize a LocalConfigNotFoundError exception. 115 | Args: 116 | file_path (str): The path to the configuration file. 117 | message (str): A message describing the error. 118 | """ 119 | super().__init__(file_path=file_path, message=message) 120 | 121 | 122 | class S3ConfigNotFoundError(DefaultConfigsError): 123 | """Raised when a configuration file is not found in S3""" 124 | 125 | fmt = "Failed to load configuration file from S3 location: {s3_uri}. {message}" 126 | 127 | def __init__(self, s3_uri="(Unkown)", message=""): 128 | """Initialize a S3ConfigNotFoundError exception. 129 | Args: 130 | s3_uri (str): The S3 URI path to the configuration file. 131 | message (str): A message describing the error. 132 | """ 133 | super().__init__(s3_uri=s3_uri, message=message) 134 | 135 | 136 | class ConfigSchemaValidationError(DefaultConfigsError, ValidationError): 137 | """Raised when a configuration file does not adhere to the schema""" 138 | 139 | fmt = "Failed to validate configuration file from location: {file_path}. {message}" 140 | 141 | def __init__(self, file_path="(Unkown)", message=""): 142 | """Initialize a ConfigSchemaValidationError exception. 143 | Args: 144 | file_path (str): The path to the configuration file. 145 | message (str): A message describing the error. 146 | """ 147 | super().__init__(file_path=file_path, message=message) 148 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/logs.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import botocore 3 | 4 | from boto3.session import Session 5 | import botocore.client 6 | from botocore.config import Config 7 | from typing import Generator, Tuple, List 8 | from sagemaker_core.main.utils import SingletonMeta 9 | 10 | 11 | class CloudWatchLogsClient(metaclass=SingletonMeta): 12 | """ 13 | A singleton class for creating a CloudWatchLogs client. 14 | """ 15 | 16 | client: botocore.client = None 17 | 18 | def __init__(self): 19 | if not self.client: 20 | session = Session() 21 | self.client = session.client( 22 | "logs", 23 | session.region_name, 24 | config=Config(retries={"max_attempts": 10, "mode": "standard"}), 25 | ) 26 | 27 | 28 | class LogStreamHandler: 29 | log_group_name: str = None 30 | log_stream_name: str = None 31 | stream_id: int = None 32 | next_token: str = None 33 | cw_client = None 34 | 35 | def __init__(self, log_group_name: str, log_stream_name: str, stream_id: int): 36 | self.log_group_name = log_group_name 37 | self.log_stream_name = log_stream_name 38 | self.cw_client = CloudWatchLogsClient().client 39 | self.stream_id = stream_id 40 | 41 | def get_latest_log_events(self) -> Generator[Tuple[str, dict], None, None]: 42 | """ 43 | This method gets all the latest log events for this stream that exist at this moment in time. 44 | 45 | cw_client.get_log_events() always returns a nextForwardToken even if the current batch of events is empty. 46 | You can keep calling cw_client.get_log_events() with the same token until a new batch of log events exist. 47 | 48 | API Reference: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs/client/get_log_events.html 49 | 50 | Returns: 51 | Generator[tuple[str, dict], None, None]: Generator that yields a tuple that consists for two values 52 | str: stream_name, 53 | dict: event dict in format 54 | { 55 | "ingestionTime": number, 56 | "message": "string", 57 | "timestamp": number 58 | } 59 | """ 60 | while True: 61 | if not self.next_token: 62 | token_args = {} 63 | else: 64 | token_args = {"nextToken": self.next_token} 65 | 66 | response = self.cw_client.get_log_events( 67 | logGroupName=self.log_group_name, 68 | logStreamName=self.log_stream_name, 69 | startFromHead=True, 70 | **token_args, 71 | ) 72 | 73 | self.next_token = response["nextForwardToken"] 74 | if not response["events"]: 75 | break 76 | 77 | for event in response["events"]: 78 | yield self.log_stream_name, event 79 | 80 | 81 | class MultiLogStreamHandler: 82 | log_group_name: str = None 83 | log_stream_name_prefix: str = None 84 | expected_stream_count: int = None 85 | streams: List[LogStreamHandler] = [] 86 | cw_client = None 87 | 88 | def __init__( 89 | self, log_group_name: str, log_stream_name_prefix: str, expected_stream_count: int 90 | ): 91 | self.log_group_name = log_group_name 92 | self.log_stream_name_prefix = log_stream_name_prefix 93 | self.expected_stream_count = expected_stream_count 94 | self.cw_client = CloudWatchLogsClient().client 95 | 96 | def get_latest_log_events(self) -> Generator[Tuple[str, dict], None, None]: 97 | """ 98 | This method gets all the latest log events from each stream that exist at this moment. 99 | 100 | Returns: 101 | Generator[tuple[str, dict], None, None]: Generator that yields a tuple that consists for two values 102 | str: stream_name, 103 | dict: event dict in format - 104 | { 105 | "ingestionTime": number, 106 | "message": "string", 107 | "timestamp": number 108 | } 109 | """ 110 | if not self.ready(): 111 | return [] 112 | 113 | for stream in self.streams: 114 | yield from stream.get_latest_log_events() 115 | 116 | def ready(self) -> bool: 117 | """ 118 | Checks whether or not MultiLogStreamHandler is ready to serve new log events at this moment. 119 | 120 | If self.streams is already set, return True. 121 | Otherwise, check if the current number of log streams in the log group match the exptected stream count. 122 | 123 | Returns: 124 | bool: Whether or not MultiLogStreamHandler is ready to serve new log events. 125 | """ 126 | 127 | if len(self.streams) >= self.expected_stream_count: 128 | return True 129 | 130 | try: 131 | response = self.cw_client.describe_log_streams( 132 | logGroupName=self.log_group_name, 133 | logStreamNamePrefix=self.log_stream_name_prefix + "/", 134 | orderBy="LogStreamName", 135 | ) 136 | stream_names = [stream["logStreamName"] for stream in response["logStreams"]] 137 | 138 | next_token = response.get("nextToken") 139 | while next_token: 140 | response = self.cw_client.describe_log_streams( 141 | logGroupName=self.log_group_name, 142 | logStreamNamePrefix=self.log_stream_name_prefix + "/", 143 | orderBy="LogStreamName", 144 | nextToken=next_token, 145 | ) 146 | stream_names.extend([stream["logStreamName"] for stream in response["logStreams"]]) 147 | next_token = response.get("nextToken", None) 148 | 149 | if len(stream_names) >= self.expected_stream_count: 150 | self.streams = [ 151 | LogStreamHandler(self.log_group_name, log_stream_name, index) 152 | for index, log_stream_name in enumerate(stream_names) 153 | ] 154 | 155 | return True 156 | else: 157 | # Log streams are created whenever a container starts writing to stdout/err, 158 | # so if the stream count is less than the expected number, return False 159 | return False 160 | 161 | except botocore.exceptions.ClientError as e: 162 | # On the very first training job run on an account, there's no log group until 163 | # the container starts logging, so ignore any errors thrown about that 164 | if e.response["Error"]["Code"] == "ResourceNotFoundException": 165 | return False 166 | else: 167 | raise 168 | -------------------------------------------------------------------------------- /sample/sagemaker/2017-07-24/waiters-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 2, 3 | "waiters": { 4 | "NotebookInstanceInService": { 5 | "delay": 30, 6 | "maxAttempts": 60, 7 | "operation": "DescribeNotebookInstance", 8 | "acceptors": [ 9 | { 10 | "expected": "InService", 11 | "matcher": "path", 12 | "state": "success", 13 | "argument": "NotebookInstanceStatus" 14 | }, 15 | { 16 | "expected": "Failed", 17 | "matcher": "path", 18 | "state": "failure", 19 | "argument": "NotebookInstanceStatus" 20 | } 21 | ] 22 | }, 23 | "NotebookInstanceStopped": { 24 | "delay": 30, 25 | "operation": "DescribeNotebookInstance", 26 | "maxAttempts": 60, 27 | "acceptors": [ 28 | { 29 | "expected": "Stopped", 30 | "matcher": "path", 31 | "state": "success", 32 | "argument": "NotebookInstanceStatus" 33 | }, 34 | { 35 | "expected": "Failed", 36 | "matcher": "path", 37 | "state": "failure", 38 | "argument": "NotebookInstanceStatus" 39 | } 40 | ] 41 | }, 42 | "NotebookInstanceDeleted": { 43 | "delay": 30, 44 | "maxAttempts": 60, 45 | "operation": "DescribeNotebookInstance", 46 | "acceptors": [ 47 | { 48 | "expected": "ValidationException", 49 | "matcher": "error", 50 | "state": "success" 51 | }, 52 | { 53 | "expected": "Failed", 54 | "matcher": "path", 55 | "state": "failure", 56 | "argument": "NotebookInstanceStatus" 57 | } 58 | ] 59 | }, 60 | "TrainingJobCompletedOrStopped": { 61 | "delay": 120, 62 | "maxAttempts": 180, 63 | "operation": "DescribeTrainingJob", 64 | "acceptors": [ 65 | { 66 | "expected": "Completed", 67 | "matcher": "path", 68 | "state": "success", 69 | "argument": "TrainingJobStatus" 70 | }, 71 | { 72 | "expected": "Stopped", 73 | "matcher": "path", 74 | "state": "success", 75 | "argument": "TrainingJobStatus" 76 | }, 77 | { 78 | "expected": "Failed", 79 | "matcher": "path", 80 | "state": "failure", 81 | "argument": "TrainingJobStatus" 82 | }, 83 | { 84 | "expected": "ValidationException", 85 | "matcher": "error", 86 | "state": "failure" 87 | } 88 | ] 89 | }, 90 | "EndpointInService": { 91 | "delay": 30, 92 | "maxAttempts": 120, 93 | "operation": "DescribeEndpoint", 94 | "acceptors": [ 95 | { 96 | "expected": "InService", 97 | "matcher": "path", 98 | "state": "success", 99 | "argument": "EndpointStatus" 100 | }, 101 | { 102 | "expected": "Failed", 103 | "matcher": "path", 104 | "state": "failure", 105 | "argument": "EndpointStatus" 106 | }, 107 | { 108 | "expected": "ValidationException", 109 | "matcher": "error", 110 | "state": "failure" 111 | } 112 | ] 113 | }, 114 | "EndpointDeleted": { 115 | "delay": 30, 116 | "maxAttempts": 60, 117 | "operation": "DescribeEndpoint", 118 | "acceptors": [ 119 | { 120 | "expected": "ValidationException", 121 | "matcher": "error", 122 | "state": "success" 123 | }, 124 | { 125 | "expected": "Failed", 126 | "matcher": "path", 127 | "state": "failure", 128 | "argument": "EndpointStatus" 129 | } 130 | ] 131 | }, 132 | "TransformJobCompletedOrStopped": { 133 | "delay": 60, 134 | "maxAttempts": 60, 135 | "operation": "DescribeTransformJob", 136 | "acceptors": [ 137 | { 138 | "expected": "Completed", 139 | "matcher": "path", 140 | "state": "success", 141 | "argument": "TransformJobStatus" 142 | }, 143 | { 144 | "expected": "Stopped", 145 | "matcher": "path", 146 | "state": "success", 147 | "argument": "TransformJobStatus" 148 | }, 149 | { 150 | "expected": "Failed", 151 | "matcher": "path", 152 | "state": "failure", 153 | "argument": "TransformJobStatus" 154 | }, 155 | { 156 | "expected": "ValidationException", 157 | "matcher": "error", 158 | "state": "failure" 159 | } 160 | ] 161 | }, 162 | "ProcessingJobCompletedOrStopped": { 163 | "delay": 60, 164 | "maxAttempts": 60, 165 | "operation": "DescribeProcessingJob", 166 | "acceptors": [ 167 | { 168 | "expected": "Completed", 169 | "matcher": "path", 170 | "state": "success", 171 | "argument": "ProcessingJobStatus" 172 | }, 173 | { 174 | "expected": "Stopped", 175 | "matcher": "path", 176 | "state": "success", 177 | "argument": "ProcessingJobStatus" 178 | }, 179 | { 180 | "expected": "Failed", 181 | "matcher": "path", 182 | "state": "failure", 183 | "argument": "ProcessingJobStatus" 184 | }, 185 | { 186 | "expected": "ValidationException", 187 | "matcher": "error", 188 | "state": "failure" 189 | } 190 | ] 191 | }, 192 | "ImageCreated": { 193 | "delay": 60, 194 | "maxAttempts": 60, 195 | "operation": "DescribeImage", 196 | "acceptors": [ 197 | { 198 | "expected": "CREATED", 199 | "matcher": "path", 200 | "state": "success", 201 | "argument": "ImageStatus" 202 | }, 203 | { 204 | "expected": "CREATE_FAILED", 205 | "matcher": "path", 206 | "state": "failure", 207 | "argument": "ImageStatus" 208 | }, 209 | { 210 | "expected": "ValidationException", 211 | "matcher": "error", 212 | "state": "failure" 213 | } 214 | ] 215 | }, 216 | "ImageUpdated": { 217 | "delay": 60, 218 | "maxAttempts": 60, 219 | "operation": "DescribeImage", 220 | "acceptors": [ 221 | { 222 | "expected": "CREATED", 223 | "matcher": "path", 224 | "state": "success", 225 | "argument": "ImageStatus" 226 | }, 227 | { 228 | "expected": "UPDATE_FAILED", 229 | "matcher": "path", 230 | "state": "failure", 231 | "argument": "ImageStatus" 232 | }, 233 | { 234 | "expected": "ValidationException", 235 | "matcher": "error", 236 | "state": "failure" 237 | } 238 | ] 239 | }, 240 | "ImageDeleted": { 241 | "delay": 60, 242 | "maxAttempts": 60, 243 | "operation": "DescribeImage", 244 | "acceptors": [ 245 | { 246 | "expected": "ResourceNotFoundException", 247 | "matcher": "error", 248 | "state": "success" 249 | }, 250 | { 251 | "expected": "DELETE_FAILED", 252 | "matcher": "path", 253 | "state": "failure", 254 | "argument": "ImageStatus" 255 | }, 256 | { 257 | "expected": "ValidationException", 258 | "matcher": "error", 259 | "state": "failure" 260 | } 261 | ] 262 | }, 263 | "ImageVersionCreated": { 264 | "delay": 60, 265 | "maxAttempts": 60, 266 | "operation": "DescribeImageVersion", 267 | "acceptors": [ 268 | { 269 | "expected": "CREATED", 270 | "matcher": "path", 271 | "state": "success", 272 | "argument": "ImageVersionStatus" 273 | }, 274 | { 275 | "expected": "CREATE_FAILED", 276 | "matcher": "path", 277 | "state": "failure", 278 | "argument": "ImageVersionStatus" 279 | }, 280 | { 281 | "expected": "ValidationException", 282 | "matcher": "error", 283 | "state": "failure" 284 | } 285 | ] 286 | }, 287 | "ImageVersionDeleted": { 288 | "delay": 60, 289 | "maxAttempts": 60, 290 | "operation": "DescribeImageVersion", 291 | "acceptors": [ 292 | { 293 | "expected": "ResourceNotFoundException", 294 | "matcher": "error", 295 | "state": "success" 296 | }, 297 | { 298 | "expected": "DELETE_FAILED", 299 | "matcher": "path", 300 | "state": "failure", 301 | "argument": "ImageVersionStatus" 302 | }, 303 | { 304 | "expected": "ValidationException", 305 | "matcher": "error", 306 | "state": "failure" 307 | } 308 | ] 309 | } 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/default_configs_helper.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | 14 | import logging 15 | 16 | import os 17 | import jsonschema 18 | import boto3 19 | import yaml 20 | import pathlib 21 | 22 | from functools import lru_cache 23 | from typing import List 24 | from platformdirs import site_config_dir, user_config_dir 25 | 26 | from botocore.utils import merge_dicts 27 | from six.moves.urllib.parse import urlparse 28 | from sagemaker_core.main.config_schema import SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA 29 | from sagemaker_core.main.exceptions import ( 30 | LocalConfigNotFoundError, 31 | S3ConfigNotFoundError, 32 | DefaultConfigsError, 33 | ConfigSchemaValidationError, 34 | ) 35 | from sagemaker_core.main.utils import get_textual_rich_logger 36 | 37 | logger = get_textual_rich_logger(__name__) 38 | 39 | 40 | _APP_NAME = "sagemaker" 41 | # The default name of the config file. 42 | _CONFIG_FILE_NAME = "config.yaml" 43 | # The default config file location of the Administrator provided config file. This path can be 44 | # overridden with `SAGEMAKER_CORE_ADMIN_CONFIG_OVERRIDE` environment variable. 45 | _DEFAULT_ADMIN_CONFIG_FILE_PATH = os.path.join(site_config_dir(_APP_NAME), _CONFIG_FILE_NAME) 46 | # The default config file location of the user provided config file. This path can be 47 | # overridden with `SAGEMAKER_USER_CONFIG_OVERRIDE` environment variable. 48 | _DEFAULT_USER_CONFIG_FILE_PATH = os.path.join(user_config_dir(_APP_NAME), _CONFIG_FILE_NAME) 49 | # The default config file location of the local mode. 50 | _DEFAULT_LOCAL_MODE_CONFIG_FILE_PATH = os.path.join( 51 | os.path.expanduser("~"), ".sagemaker", _CONFIG_FILE_NAME 52 | ) 53 | ENV_VARIABLE_ADMIN_CONFIG_OVERRIDE = "SAGEMAKER_CORE_ADMIN_CONFIG_OVERRIDE" 54 | ENV_VARIABLE_USER_CONFIG_OVERRIDE = "SAGEMAKER_CORE_USER_CONFIG_OVERRIDE" 55 | 56 | S3_PREFIX = "s3://" 57 | 58 | 59 | def load_default_configs(additional_config_paths: List[str] = None, s3_resource=None): 60 | default_config_path = os.getenv( 61 | ENV_VARIABLE_ADMIN_CONFIG_OVERRIDE, _DEFAULT_ADMIN_CONFIG_FILE_PATH 62 | ) 63 | user_config_path = os.getenv(ENV_VARIABLE_USER_CONFIG_OVERRIDE, _DEFAULT_USER_CONFIG_FILE_PATH) 64 | 65 | config_paths = [default_config_path, user_config_path] 66 | if additional_config_paths: 67 | config_paths += additional_config_paths 68 | config_paths = list(filter(lambda item: item is not None, config_paths)) 69 | merged_config = {} 70 | for file_path in config_paths: 71 | config_from_file = {} 72 | if file_path.startswith(S3_PREFIX): 73 | config_from_file = _load_config_from_s3(file_path, s3_resource) 74 | else: 75 | try: 76 | config_from_file = _load_config_from_file(file_path) 77 | except ValueError: 78 | error = LocalConfigNotFoundError(file_path=file_path) 79 | if file_path not in ( 80 | _DEFAULT_ADMIN_CONFIG_FILE_PATH, 81 | _DEFAULT_USER_CONFIG_FILE_PATH, 82 | ): 83 | # Throw exception only when User provided file path is invalid. 84 | # If there are no files in the Default config file locations, don't throw 85 | # Exceptions. 86 | raise error 87 | 88 | logger.debug(error) 89 | if config_from_file: 90 | try: 91 | validate_sagemaker_config(config_from_file) 92 | except jsonschema.exceptions.ValidationError as error: 93 | raise ConfigSchemaValidationError(file_path=file_path, message=str(error)) 94 | merge_dicts(merged_config, config_from_file) 95 | logger.debug("Fetched defaults config from location: %s", file_path) 96 | else: 97 | logger.debug("Not applying SDK defaults from location: %s", file_path) 98 | 99 | return merged_config 100 | 101 | 102 | def validate_sagemaker_config(sagemaker_config: dict = None): 103 | """Validates whether a given dictionary adheres to the schema. 104 | 105 | Args: 106 | sagemaker_config: A dictionary containing default values for the 107 | SageMaker Python SDK. (default: None). 108 | """ 109 | jsonschema.validate(sagemaker_config, SAGEMAKER_PYTHON_SDK_CONFIG_SCHEMA) 110 | 111 | 112 | def _load_config_from_s3(s3_uri, s3_resource_for_config) -> dict: 113 | """Placeholder docstring""" 114 | if not s3_resource_for_config: 115 | # Constructing a default Boto3 S3 Resource from a default Boto3 session. 116 | boto_session = boto3.DEFAULT_SESSION or boto3.Session() 117 | boto_region_name = boto_session.region_name 118 | if boto_region_name is None: 119 | raise DefaultConfigsError( 120 | message=( 121 | "Valid region is not provided in the Boto3 session." 122 | + "Setup local AWS configuration with a valid region supported by SageMaker." 123 | ) 124 | ) 125 | s3_resource_for_config = boto_session.resource("s3", region_name=boto_region_name) 126 | 127 | logger.debug("Fetching defaults config from location: %s", s3_uri) 128 | inferred_s3_uri = _get_inferred_s3_uri(s3_uri, s3_resource_for_config) 129 | parsed_url = urlparse(inferred_s3_uri) 130 | bucket, key_prefix = parsed_url.netloc, parsed_url.path.lstrip("/") 131 | s3_object = s3_resource_for_config.Object(bucket, key_prefix) 132 | s3_file_content = s3_object.get()["Body"].read() 133 | return yaml.safe_load(s3_file_content.decode("utf-8")) 134 | 135 | 136 | def _get_inferred_s3_uri(s3_uri, s3_resource_for_config): 137 | """Placeholder docstring""" 138 | parsed_url = urlparse(s3_uri) 139 | bucket, key_prefix = parsed_url.netloc, parsed_url.path.lstrip("/") 140 | s3_bucket = s3_resource_for_config.Bucket(name=bucket) 141 | s3_objects = s3_bucket.objects.filter(Prefix=key_prefix).all() 142 | s3_files_with_same_prefix = [ 143 | "{}{}/{}".format(S3_PREFIX, bucket, s3_object.key) for s3_object in s3_objects 144 | ] 145 | if len(s3_files_with_same_prefix) == 0: 146 | # Customer provided us with an incorrect s3 path. 147 | raise S3ConfigNotFoundError( 148 | s3_uri=s3_uri, 149 | message="Provide a valid S3 URI in the format s3:////{_CONFIG_FILE_NAME}.", 150 | ) 151 | if len(s3_files_with_same_prefix) > 1: 152 | # Customer has provided us with a S3 URI which points to a directory 153 | # search for s3:///directory-key-prefix/config.yaml 154 | inferred_s3_uri = str(pathlib.PurePosixPath(s3_uri, _CONFIG_FILE_NAME)).replace( 155 | "s3:/", "s3://" 156 | ) 157 | if inferred_s3_uri not in s3_files_with_same_prefix: 158 | # We don't know which file we should be operating with. 159 | raise S3ConfigNotFoundError( 160 | s3_uri=s3_uri, 161 | message="Provide an S3 URI pointing to a directory that contains a {_CONFIG_FILE_NAME} file.", 162 | ) 163 | # Customer has a config.yaml present in the directory that was provided as the S3 URI 164 | return inferred_s3_uri 165 | return s3_uri 166 | 167 | 168 | def _load_config_from_file(file_path: str) -> dict: 169 | """Placeholder docstring""" 170 | inferred_file_path = file_path 171 | if os.path.isdir(file_path): 172 | inferred_file_path = os.path.join(file_path, _CONFIG_FILE_NAME) 173 | if not os.path.exists(inferred_file_path): 174 | raise ValueError 175 | logger.debug("Fetching defaults config from location: %s", file_path) 176 | with open(inferred_file_path, "r") as f: 177 | content = yaml.safe_load(f) 178 | return content 179 | 180 | 181 | @lru_cache(maxsize=None) 182 | def load_default_configs_for_resource_name(resource_name: str): 183 | configs_data = load_default_configs() 184 | if not configs_data: 185 | logger.debug("No default configurations found for resource: %s", resource_name) 186 | return {} 187 | return configs_data["SageMaker"]["PythonSDK"]["Resources"].get(resource_name) 188 | 189 | 190 | def get_config_value(attribute, resource_defaults, global_defaults): 191 | if resource_defaults and attribute in resource_defaults: 192 | return resource_defaults[attribute] 193 | if global_defaults and attribute in global_defaults: 194 | return global_defaults[attribute] 195 | logger.debug( 196 | f"Configurable value {attribute} not entered in parameters or present in the Config" 197 | ) 198 | return None 199 | -------------------------------------------------------------------------------- /integ/test_codegen.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import json 3 | import logging 4 | import time 5 | import unittest 6 | import pandas as pd 7 | import os 8 | 9 | from sklearn.datasets import load_iris 10 | from sklearn.model_selection import train_test_split 11 | 12 | from sagemaker_cleaner import handle_cleanup 13 | from sagemaker_core.main.shapes import ( 14 | ContainerDefinition, 15 | ProductionVariant, 16 | ProfilerConfig, 17 | AlgorithmSpecification, 18 | Channel, 19 | DataSource, 20 | S3DataSource, 21 | OutputDataConfig, 22 | ResourceConfig, 23 | StoppingCondition, 24 | ) 25 | from sagemaker_core.main.resources import ( 26 | TrainingJob, 27 | Model, 28 | EndpointConfig, 29 | Endpoint, 30 | ) 31 | from sagemaker_core.helper.session_helper import Session, get_execution_role 32 | 33 | logger = logging.getLogger() 34 | 35 | sagemaker_session = Session() 36 | region = sagemaker_session.boto_region_name 37 | role = get_execution_role() 38 | bucket = sagemaker_session.default_bucket() 39 | 40 | iris = load_iris() 41 | iris_df = pd.DataFrame(iris.data, columns=iris.feature_names) 42 | iris_df["target"] = iris.target 43 | 44 | # Prepare Data 45 | os.makedirs("./data", exist_ok=True) 46 | iris_df = iris_df[["target"] + [col for col in iris_df.columns if col != "target"]] 47 | train_data, test_data = train_test_split(iris_df, test_size=0.2, random_state=42) 48 | train_data.to_csv("./data/train.csv", index=False, header=False) 49 | 50 | # Upload Data 51 | prefix = "DEMO-scikit-iris" 52 | TRAIN_DATA = "train.csv" 53 | DATA_DIRECTORY = "data" 54 | 55 | train_input = sagemaker_session.upload_data( 56 | DATA_DIRECTORY, bucket=bucket, key_prefix="{}/{}".format(prefix, DATA_DIRECTORY) 57 | ) 58 | s3_input_path = "s3://{}/{}/data/{}".format(bucket, prefix, TRAIN_DATA) 59 | s3_output_path = "s3://{}/{}/output".format(bucket, prefix) 60 | image = "433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest" 61 | 62 | # To be replaced with representing strings when executing from personal account 63 | SUBNET_ONE = os.environ["SUBNET_ONE"] 64 | SUBNET_TWO = os.environ["SUBNET_TWO"] 65 | SECURITY_GROUP_ONE = os.environ["SECURITY_GROUP_ONE"] 66 | 67 | 68 | class TestSageMakerCore(unittest.TestCase): 69 | 70 | def test_training_and_inference(self): 71 | os.environ["SAGEMAKER_CORE_ADMIN_CONFIG_OVERRIDE"] = "" 72 | job_name_v3 = "xgboost-iris-" + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) 73 | training_job = TrainingJob.create( 74 | training_job_name=job_name_v3, 75 | hyper_parameters={ 76 | "objective": "multi:softmax", 77 | "num_class": "3", 78 | "num_round": "10", 79 | "eval_metric": "merror", 80 | }, 81 | algorithm_specification=AlgorithmSpecification( 82 | training_image=image, training_input_mode="File" 83 | ), 84 | role_arn=role, 85 | input_data_config=[ 86 | Channel( 87 | channel_name="train", 88 | content_type="csv", 89 | compression_type="None", 90 | record_wrapper_type="None", 91 | data_source=DataSource( 92 | s3_data_source=S3DataSource( 93 | s3_data_type="S3Prefix", 94 | s3_uri=s3_input_path, 95 | s3_data_distribution_type="FullyReplicated", 96 | ) 97 | ), 98 | ) 99 | ], 100 | profiler_config=ProfilerConfig(profiling_interval_in_milliseconds=1000), 101 | output_data_config=OutputDataConfig(s3_output_path=s3_output_path), 102 | resource_config=ResourceConfig( 103 | instance_type="ml.m4.xlarge", 104 | instance_count=1, 105 | volume_size_in_gb=30, 106 | ), 107 | stopping_condition=StoppingCondition(max_runtime_in_seconds=600), 108 | ) 109 | training_job.wait() 110 | 111 | fetched_training_job = TrainingJob.get(training_job_name=job_name_v3) 112 | assert fetched_training_job.output_data_config.s3_output_path == s3_output_path 113 | assert fetched_training_job.profiler_config.profiling_interval_in_milliseconds == 1000 114 | 115 | creation_time_after = datetime.datetime.now() - datetime.timedelta(days=5) 116 | 117 | resource_iterator = TrainingJob.get_all(creation_time_after=creation_time_after) 118 | training_jobs = [job.training_job_name for job in resource_iterator] 119 | 120 | assert len(training_jobs) > 1 121 | assert job_name_v3 in training_jobs 122 | 123 | model_data_url = fetched_training_job.model_artifacts.s3_model_artifacts 124 | 125 | key = f'xgboost-iris-test-{time.strftime("%H-%M-%S", time.gmtime())}' 126 | print("Key:", key) 127 | 128 | model = Model.create( 129 | model_name=key, 130 | primary_container=ContainerDefinition( 131 | image=image, 132 | model_data_url=model_data_url, 133 | ), 134 | execution_role_arn=role, 135 | ) 136 | 137 | # Testing Resource Chaining 138 | endpoint_config = EndpointConfig.create( 139 | endpoint_config_name=key, 140 | production_variants=[ 141 | ProductionVariant( 142 | variant_name=key, 143 | initial_instance_count=1, 144 | instance_type="ml.m5.xlarge", 145 | model_name=model, # Pass `Model`` object created above 146 | ) 147 | ], 148 | ) 149 | endpoint: Endpoint = Endpoint.create( 150 | endpoint_name=key, 151 | # Pass `EndpointConfig` object created above 152 | endpoint_config_name=endpoint_config, 153 | ) 154 | endpoint.wait_for_status("InService") 155 | 156 | def test_default_configs(self): 157 | os.environ["SAGEMAKER_CORE_ADMIN_CONFIG_OVERRIDE"] = ( 158 | self._setup_default_configs_and_fetch_path() 159 | ) 160 | job_name_v3 = "xgboost-test-default-configs-" + time.strftime( 161 | "%Y-%m-%d-%H-%M-%S", time.gmtime() 162 | ) 163 | 164 | training_job = TrainingJob.create( 165 | training_job_name=job_name_v3, 166 | hyper_parameters={ 167 | "objective": "multi:softmax", 168 | "num_class": "3", 169 | "num_round": "10", 170 | "eval_metric": "merror", 171 | }, 172 | algorithm_specification=AlgorithmSpecification( 173 | training_image=image, training_input_mode="File" 174 | ), 175 | role_arn=role, 176 | input_data_config=[ 177 | Channel( 178 | channel_name="train", 179 | content_type="csv", 180 | compression_type="None", 181 | record_wrapper_type="None", 182 | data_source=DataSource( 183 | s3_data_source=S3DataSource( 184 | s3_data_type="S3Prefix", 185 | s3_uri=s3_input_path, 186 | s3_data_distribution_type="FullyReplicated", 187 | ) 188 | ), 189 | ) 190 | ], 191 | output_data_config=OutputDataConfig(s3_output_path=s3_output_path), 192 | resource_config=ResourceConfig( 193 | instance_type="ml.m4.xlarge", 194 | instance_count=1, 195 | volume_size_in_gb=30, 196 | ), 197 | stopping_condition=StoppingCondition(max_runtime_in_seconds=600), 198 | ) 199 | training_job.wait() 200 | 201 | assert training_job.vpc_config.subnets == [ 202 | SUBNET_ONE, 203 | SUBNET_TWO, 204 | ] 205 | assert training_job.vpc_config.security_group_ids == [SECURITY_GROUP_ONE] 206 | 207 | def tearDown(self) -> None: 208 | handle_cleanup() 209 | 210 | def _setup_default_configs_and_fetch_path(self) -> str: 211 | DEFAULTS_CONTENT = { 212 | "SchemaVesion": "1.0", 213 | "SageMaker": { 214 | "PythonSDK": { 215 | "Resources": { 216 | "GlobalDefaults": { 217 | "vpc_config": { 218 | "security_group_ids": [SECURITY_GROUP_ONE], 219 | "subnets": [SUBNET_ONE, SUBNET_TWO], 220 | } 221 | }, 222 | "TrainingJob": { 223 | "role_arn": role, 224 | "output_data_config": {"s3_output_path": s3_output_path}, 225 | }, 226 | } 227 | } 228 | }, 229 | } 230 | 231 | path_to_defaults = os.path.join(DATA_DIRECTORY, "defaults.json") 232 | with open(os.path.join(DATA_DIRECTORY, "defaults.json"), "w") as f: 233 | json.dump(DEFAULTS_CONTENT, f, indent=4) 234 | return path_to_defaults 235 | -------------------------------------------------------------------------------- /src/sagemaker_core/main/code_injection/codec.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | import logging 14 | 15 | from dataclasses import asdict 16 | import re 17 | 18 | from sagemaker_core.main.code_injection.shape_dag import SHAPE_DAG 19 | from sagemaker_core.main.code_injection.constants import ( 20 | BASIC_TYPES, 21 | STRUCTURE_TYPE, 22 | LIST_TYPE, 23 | MAP_TYPE, 24 | ) 25 | from io import BytesIO 26 | 27 | 28 | def pascal_to_snake(pascal_str): 29 | """ 30 | Converts a PascalCase string to snake_case. 31 | 32 | Args: 33 | pascal_str (str): The PascalCase string to be converted. 34 | 35 | Returns: 36 | str: The converted snake_case string. 37 | """ 38 | snake_case = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", pascal_str) 39 | return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake_case).lower() 40 | 41 | 42 | def deserialize(data, cls) -> object: 43 | """ 44 | Deserialize the given data into an instance of the specified class. 45 | 46 | Args: 47 | data (dict): The data to be deserialized. 48 | cls (str or type): The class or class name to deserialize into. 49 | 50 | Returns: 51 | object: An instance of the specified class with the deserialized data. 52 | """ 53 | # Convert the keys to snake_case 54 | logging.debug(f"Deserialize: pascal cased data: {data}") 55 | data = {pascal_to_snake(k): v for k, v in data.items()} 56 | logging.debug(f"Deserialize: snake cased data: {data}") 57 | 58 | # Get the class from the cls_name string 59 | if type(cls) == str: 60 | cls = globals()[cls] 61 | 62 | # Create a new instance of the class 63 | instance = cls(**data) 64 | 65 | return instance 66 | 67 | 68 | def snake_to_pascal(snake_str): 69 | """ 70 | Convert a snake_case string to PascalCase. 71 | 72 | Args: 73 | snake_str (str): The snake_case string to be converted. 74 | 75 | Returns: 76 | str: The PascalCase string. 77 | 78 | """ 79 | components = snake_str.split("_") 80 | return "".join(x.title() for x in components[0:]) 81 | 82 | 83 | def serialize(data) -> object: 84 | """ 85 | Serializes the given data object into a dictionary. 86 | 87 | Args: 88 | data: The data object to be serialized. 89 | 90 | Returns: 91 | A dictionary containing the serialized data. 92 | 93 | """ 94 | data_dict = asdict(data) 95 | 96 | # Convert the keys to pascalCase 97 | data_dict = {snake_to_pascal(k): v for k, v in data_dict.items() if v is not None} 98 | 99 | return data_dict 100 | 101 | 102 | def _evaluate_list_type(raw_list, shape) -> list: 103 | """ 104 | Evaluates a list type based on the given shape. 105 | 106 | Args: 107 | raw_list (list): The raw list to be evaluated. 108 | shape (dict): The shape of the list. 109 | 110 | Returns: 111 | list: The evaluated list based on the shape. 112 | 113 | Raises: 114 | ValueError: If an unhandled list member type is encountered. 115 | 116 | """ 117 | _shape_member_type = shape["member_type"] 118 | _shape_member_shape = shape["member_shape"] 119 | _evaluated_list = [] 120 | if _shape_member_type in BASIC_TYPES: 121 | # if basic types directly assign list value. 122 | _evaluated_list = raw_list 123 | elif _shape_member_type == STRUCTURE_TYPE: 124 | # if structure type, transform each list item and assign value. 125 | # traverse through response list and evaluate item 126 | for item in raw_list: 127 | _evaluated_item = transform(item, _shape_member_shape) 128 | _evaluated_list.append(_evaluated_item) 129 | elif _shape_member_type == LIST_TYPE: 130 | # if list type, transform each list item and assign value. 131 | # traverse through response list and evaluate item 132 | for item in raw_list: 133 | _list_type_shape = SHAPE_DAG[_shape_member_shape] 134 | _evaluated_item = _evaluate_list_type(item, _list_type_shape) 135 | _evaluated_list.append(_evaluated_item) 136 | elif _shape_member_type == MAP_TYPE: 137 | # if structure type, transform each list item and assign value. 138 | # traverse through response list and evaluate item 139 | for item in raw_list: 140 | _map_type_shape = SHAPE_DAG[_shape_member_shape] 141 | _evaluated_item = _evaluate_map_type(item, _map_type_shape) 142 | _evaluated_list.append(_evaluated_item) 143 | else: 144 | raise ValueError( 145 | f"Unhandled List member type " 146 | f"[{_shape_member_type}] encountered. " 147 | "Needs additional logic for support" 148 | ) 149 | return _evaluated_list 150 | 151 | 152 | def _evaluate_map_type(raw_map, shape) -> dict: 153 | """ 154 | Evaluates a map type based on the given shape. 155 | 156 | Args: 157 | raw_map (dict): The raw map to be evaluated. 158 | shape (dict): The shape of the map. 159 | 160 | Returns: 161 | dict: The evaluated map. 162 | 163 | Raises: 164 | ValueError: If an unhandled map key type or list member type is encountered. 165 | """ 166 | _shape_key_type = shape["key_type"] 167 | _shape_value_type = shape["value_type"] 168 | _shape_value_shape = shape["value_shape"] 169 | if _shape_key_type != "string": 170 | raise ValueError( 171 | f"Unhandled Map key type " 172 | f"[{_shape_key_type}] encountered. " 173 | "Needs additional logic for support" 174 | ) 175 | 176 | _evaluated_map = {} 177 | if _shape_value_type in BASIC_TYPES: 178 | # if basic types directly assign value. 179 | # Ex. response["map_member"] = {"key":"value"} 180 | _evaluated_map = raw_map 181 | elif _shape_value_type == STRUCTURE_TYPE: 182 | # if structure type loop through and evaluate values 183 | for k, v in raw_map.items(): 184 | _evaluated_value = transform(v, _shape_value_shape) 185 | _evaluated_map[k] = _evaluated_value 186 | elif _shape_value_type == LIST_TYPE: 187 | for k, v in raw_map.items(): 188 | _list_type_shape = SHAPE_DAG[_shape_value_shape] 189 | evaluated_values = _evaluate_list_type(v, _list_type_shape) 190 | _evaluated_map[k] = evaluated_values 191 | elif _shape_value_type == MAP_TYPE: 192 | for k, v in raw_map.items(): 193 | _map_type_shape = SHAPE_DAG[_shape_value_shape] 194 | evaluated_values = _evaluate_map_type(v, _map_type_shape) 195 | _evaluated_map[k] = evaluated_values 196 | else: 197 | raise ValueError( 198 | f"Unhandled List member type " 199 | f"[{_shape_value_type}] encountered. " 200 | "Needs additional logic for support" 201 | ) 202 | 203 | return _evaluated_map 204 | 205 | 206 | def transform(data, shape, object_instance=None) -> dict: 207 | """ 208 | Transforms the given data based on the given shape. 209 | 210 | Args: 211 | data (dict): The data to be transformed. 212 | shape (str): The shape of the data. 213 | object_instance (object): The object to be transformed. (Optional) 214 | 215 | Returns: 216 | dict: The transformed data. 217 | 218 | Raises: 219 | ValueError: If an unhandled shape type is encountered. 220 | """ 221 | result = {} 222 | _shape = SHAPE_DAG[shape] 223 | 224 | if _shape["type"] in BASIC_TYPES: 225 | raise ValueError("Unexpected low-level operation model shape") 226 | 227 | for member in _shape["members"]: 228 | _member_name = member["name"] 229 | _member_shape = member["shape"] 230 | _member_type = member["type"] 231 | if data.get(_member_name) is None: 232 | # skip members that are not in the response 233 | continue 234 | # 1. set snake case attribute name 235 | attribute_name = pascal_to_snake(_member_name) 236 | # 2. assign response value 237 | if _member_type in BASIC_TYPES: 238 | evaluated_value = data[_member_name] 239 | elif _member_type == STRUCTURE_TYPE: 240 | evaluated_value = transform(data[_member_name], _member_shape) 241 | elif _member_type == LIST_TYPE: 242 | _list_type_shape = SHAPE_DAG[_member_shape] 243 | # 2. assign response value 244 | evaluated_value = _evaluate_list_type(data[_member_name], _list_type_shape) 245 | elif _member_type == MAP_TYPE: 246 | _map_type_shape = SHAPE_DAG[_member_shape] 247 | evaluated_value = _evaluate_map_type(data[_member_name], _map_type_shape) 248 | elif _member_type == "blob": 249 | blob_data = data[_member_name] 250 | if isinstance(blob_data, bytes): 251 | evaluated_value = BytesIO(blob_data) 252 | elif hasattr(blob_data, "read"): 253 | # If it's already a file-like object, use it as is 254 | evaluated_value = blob_data 255 | else: 256 | raise ValueError(f"Unexpected blob data type: {type(blob_data)}") 257 | else: 258 | raise ValueError(f"Unexpected member type encountered: {_member_type}") 259 | 260 | result[attribute_name] = evaluated_value 261 | if object_instance: 262 | # 3. set attribute value 263 | setattr(object_instance, attribute_name, evaluated_value) 264 | 265 | return result 266 | -------------------------------------------------------------------------------- /sample/sagemaker-metrics/2022-09-30/service-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "version":"2.0", 3 | "metadata":{ 4 | "apiVersion":"2022-09-30", 5 | "endpointPrefix":"metrics.sagemaker", 6 | "protocol":"rest-json", 7 | "protocols":["rest-json"], 8 | "serviceAbbreviation":"SageMaker Metrics", 9 | "serviceFullName":"Amazon SageMaker Metrics Service", 10 | "serviceId":"SageMaker Metrics", 11 | "signatureVersion":"v4", 12 | "signingName":"sagemaker", 13 | "uid":"sagemaker-metrics-2022-09-30", 14 | "auth":["aws.auth#sigv4"] 15 | }, 16 | "operations":{ 17 | "BatchGetMetrics":{ 18 | "name":"BatchGetMetrics", 19 | "http":{ 20 | "method":"POST", 21 | "requestUri":"/BatchGetMetrics" 22 | }, 23 | "input":{"shape":"BatchGetMetricsRequest"}, 24 | "output":{"shape":"BatchGetMetricsResponse"}, 25 | "documentation":"

Used to retrieve training metrics from SageMaker.

" 26 | }, 27 | "BatchPutMetrics":{ 28 | "name":"BatchPutMetrics", 29 | "http":{ 30 | "method":"PUT", 31 | "requestUri":"/BatchPutMetrics" 32 | }, 33 | "input":{"shape":"BatchPutMetricsRequest"}, 34 | "output":{"shape":"BatchPutMetricsResponse"}, 35 | "documentation":"

Used to ingest training metrics into SageMaker. These metrics can be visualized in SageMaker Studio.

" 36 | } 37 | }, 38 | "shapes":{ 39 | "BatchGetMetricsRequest":{ 40 | "type":"structure", 41 | "required":["MetricQueries"], 42 | "members":{ 43 | "MetricQueries":{ 44 | "shape":"MetricQueryList", 45 | "documentation":"

Queries made to retrieve training metrics from SageMaker.

" 46 | } 47 | } 48 | }, 49 | "BatchGetMetricsResponse":{ 50 | "type":"structure", 51 | "members":{ 52 | "MetricQueryResults":{ 53 | "shape":"MetricQueryResultList", 54 | "documentation":"

The results of a query to retrieve training metrics from SageMaker.

" 55 | } 56 | } 57 | }, 58 | "BatchPutMetricsError":{ 59 | "type":"structure", 60 | "members":{ 61 | "Code":{ 62 | "shape":"PutMetricsErrorCode", 63 | "documentation":"

The error code of an error that occured when attempting to put metrics.

  • METRIC_LIMIT_EXCEEDED: The maximum amount of metrics per resource is exceeded.

  • INTERNAL_ERROR: An internal error occured.

  • VALIDATION_ERROR: The metric data failed validation.

  • CONFLICT_ERROR: Multiple requests attempted to modify the same data simultaneously.

" 64 | }, 65 | "MetricIndex":{ 66 | "shape":"Integer", 67 | "documentation":"

An index that corresponds to the metric in the request.

" 68 | } 69 | }, 70 | "documentation":"

An error that occured when putting the metric data.

" 71 | }, 72 | "BatchPutMetricsErrorList":{ 73 | "type":"list", 74 | "member":{"shape":"BatchPutMetricsError"}, 75 | "max":10, 76 | "min":1 77 | }, 78 | "BatchPutMetricsRequest":{ 79 | "type":"structure", 80 | "required":[ 81 | "TrialComponentName", 82 | "MetricData" 83 | ], 84 | "members":{ 85 | "TrialComponentName":{ 86 | "shape":"ExperimentEntityName", 87 | "documentation":"

The name of the Trial Component to associate with the metrics. The Trial Component name must be entirely lowercase.

" 88 | }, 89 | "MetricData":{ 90 | "shape":"RawMetricDataList", 91 | "documentation":"

A list of raw metric values to put.

" 92 | } 93 | } 94 | }, 95 | "BatchPutMetricsResponse":{ 96 | "type":"structure", 97 | "members":{ 98 | "Errors":{ 99 | "shape":"BatchPutMetricsErrorList", 100 | "documentation":"

Lists any errors that occur when inserting metric data.

" 101 | } 102 | } 103 | }, 104 | "Double":{"type":"double"}, 105 | "ExperimentEntityName":{ 106 | "type":"string", 107 | "max":120, 108 | "min":1, 109 | "pattern":"^[a-z0-9](-*[a-z0-9]){0,119}" 110 | }, 111 | "Integer":{"type":"integer"}, 112 | "Long":{"type":"long"}, 113 | "Message":{ 114 | "type":"string", 115 | "max":2048, 116 | "pattern":".*" 117 | }, 118 | "MetricName":{ 119 | "type":"string", 120 | "max":255, 121 | "min":1, 122 | "pattern":".+" 123 | }, 124 | "MetricQuery":{ 125 | "type":"structure", 126 | "required":[ 127 | "MetricName", 128 | "ResourceArn", 129 | "MetricStat", 130 | "Period", 131 | "XAxisType" 132 | ], 133 | "members":{ 134 | "MetricName":{ 135 | "shape":"MetricName", 136 | "documentation":"

The name of the metric to retrieve.

" 137 | }, 138 | "ResourceArn":{ 139 | "shape":"SageMakerResourceArn", 140 | "documentation":"

The ARN of the SageMaker resource to retrieve metrics for.

" 141 | }, 142 | "MetricStat":{ 143 | "shape":"MetricStatistic", 144 | "documentation":"

The metrics stat type of metrics to retrieve.

" 145 | }, 146 | "Period":{ 147 | "shape":"Period", 148 | "documentation":"

The time period of metrics to retrieve.

" 149 | }, 150 | "XAxisType":{ 151 | "shape":"XAxisType", 152 | "documentation":"

The x-axis type of metrics to retrieve.

" 153 | }, 154 | "Start":{ 155 | "shape":"Long", 156 | "documentation":"

The start time of metrics to retrieve.

", 157 | "box":true 158 | }, 159 | "End":{ 160 | "shape":"Long", 161 | "documentation":"

The end time of metrics to retrieve.

", 162 | "box":true 163 | } 164 | }, 165 | "documentation":"

Specifies a query to retrieve training metrics from SageMaker.

" 166 | }, 167 | "MetricQueryList":{ 168 | "type":"list", 169 | "member":{"shape":"MetricQuery"}, 170 | "max":100, 171 | "min":1 172 | }, 173 | "MetricQueryResult":{ 174 | "type":"structure", 175 | "required":[ 176 | "Status", 177 | "XAxisValues", 178 | "MetricValues" 179 | ], 180 | "members":{ 181 | "Status":{ 182 | "shape":"MetricQueryResultStatus", 183 | "documentation":"

The status of the metric query.

" 184 | }, 185 | "Message":{ 186 | "shape":"Message", 187 | "documentation":"

A message describing the status of the metric query.

" 188 | }, 189 | "XAxisValues":{ 190 | "shape":"XAxisValues", 191 | "documentation":"

The values for the x-axis of the metrics.

" 192 | }, 193 | "MetricValues":{ 194 | "shape":"MetricValues", 195 | "documentation":"

The metric values retrieved by the query.

" 196 | } 197 | }, 198 | "documentation":"

The result of a query to retrieve training metrics from SageMaker.

" 199 | }, 200 | "MetricQueryResultList":{ 201 | "type":"list", 202 | "member":{"shape":"MetricQueryResult"}, 203 | "max":100, 204 | "min":1 205 | }, 206 | "MetricQueryResultStatus":{ 207 | "type":"string", 208 | "enum":[ 209 | "Complete", 210 | "Truncated", 211 | "InternalError", 212 | "ValidationError" 213 | ] 214 | }, 215 | "MetricStatistic":{ 216 | "type":"string", 217 | "enum":[ 218 | "Min", 219 | "Max", 220 | "Avg", 221 | "Count", 222 | "StdDev", 223 | "Last" 224 | ] 225 | }, 226 | "MetricValues":{ 227 | "type":"list", 228 | "member":{"shape":"Double"} 229 | }, 230 | "Period":{ 231 | "type":"string", 232 | "enum":[ 233 | "OneMinute", 234 | "FiveMinute", 235 | "OneHour", 236 | "IterationNumber" 237 | ] 238 | }, 239 | "PutMetricsErrorCode":{ 240 | "type":"string", 241 | "enum":[ 242 | "METRIC_LIMIT_EXCEEDED", 243 | "INTERNAL_ERROR", 244 | "VALIDATION_ERROR", 245 | "CONFLICT_ERROR" 246 | ] 247 | }, 248 | "RawMetricData":{ 249 | "type":"structure", 250 | "required":[ 251 | "MetricName", 252 | "Timestamp", 253 | "Value" 254 | ], 255 | "members":{ 256 | "MetricName":{ 257 | "shape":"MetricName", 258 | "documentation":"

The name of the metric.

" 259 | }, 260 | "Timestamp":{ 261 | "shape":"Timestamp", 262 | "documentation":"

The time that the metric was recorded.

" 263 | }, 264 | "Step":{ 265 | "shape":"Step", 266 | "documentation":"

The metric step (epoch).

" 267 | }, 268 | "Value":{ 269 | "shape":"Double", 270 | "documentation":"

The metric value.

" 271 | } 272 | }, 273 | "documentation":"

The raw metric data to associate with the resource.

" 274 | }, 275 | "RawMetricDataList":{ 276 | "type":"list", 277 | "member":{"shape":"RawMetricData"}, 278 | "max":10, 279 | "min":1 280 | }, 281 | "SageMakerResourceArn":{ 282 | "type":"string", 283 | "max":2048, 284 | "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:[a-z\\-].*/.*" 285 | }, 286 | "Step":{ 287 | "type":"integer", 288 | "min":0 289 | }, 290 | "Timestamp":{"type":"timestamp"}, 291 | "XAxisType":{ 292 | "type":"string", 293 | "enum":[ 294 | "IterationNumber", 295 | "Timestamp" 296 | ] 297 | }, 298 | "XAxisValues":{ 299 | "type":"list", 300 | "member":{"shape":"Long"} 301 | } 302 | }, 303 | "documentation":"

Contains all data plane API operations and data types for Amazon SageMaker Metrics. Use these APIs to put and retrieve (get) features related to your training run.

" 304 | } 305 | -------------------------------------------------------------------------------- /.github/workflows/pr-checks.yml: -------------------------------------------------------------------------------- 1 | name: PR Checks 2 | 3 | on: 4 | pull_request_target: 5 | branches: 6 | - "main*" 7 | 8 | permissions: 9 | id-token: write # This is required for requesting the JWT 10 | contents: read 11 | packages: write 12 | 13 | 14 | jobs: 15 | collab-check: 16 | runs-on: ubuntu-latest 17 | outputs: 18 | approval-env: ${{ steps.collab-check.outputs.result }} 19 | steps: 20 | - name: Collaborator Check 21 | uses: actions/github-script@v7 22 | id: collab-check 23 | with: 24 | github-token: ${{ secrets.GITHUB_TOKEN }} 25 | result-encoding: string 26 | script: | 27 | try { 28 | const res = await github.rest.repos.checkCollaborator({ 29 | owner: context.repo.owner, 30 | repo: context.repo.repo, 31 | username: "${{ github.event.pull_request.user.login }}", 32 | }); 33 | console.log("Verifed ${{ github.event.pull_request.user.login }} is a repo collaborator. Auto Approving PR Checks.") 34 | return res.status == "204" ? "auto-approve" : "manual-approval" 35 | } catch (error) { 36 | console.log(error) 37 | console.log("${{ github.event.pull_request.user.login }} is not a collaborator for "+ context.repo.repo + ". Requiring Manual Approval to run PR Checks.") 38 | return "manual-approval" 39 | } 40 | wait-for-approval: 41 | runs-on: ubuntu-latest 42 | needs: [ collab-check ] 43 | environment: ${{ needs.collab-check.outputs.approval-env }} 44 | steps: 45 | - run: echo "Workflow Approved! Starting Integration tests" 46 | integ-tests: 47 | runs-on: ubuntu-latest 48 | needs: [wait-for-approval] 49 | strategy: 50 | fail-fast: false 51 | matrix: 52 | python-version: [ "3.12" ] 53 | env: 54 | SUBNET_ONE: ${{ secrets.SUBNET_ONE }} 55 | SUBNET_TWO: ${{ secrets.SUBNET_TWO }} 56 | SECURITY_GROUP_ONE: ${{ secrets.SECURITY_GROUP_ONE }} 57 | steps: 58 | - uses: actions/checkout@v4 59 | with: 60 | ref: ${{ github.event.pull_request.head.sha }} 61 | - name: Set up Python ${{ matrix.python-version }} 62 | uses: actions/setup-python@v5 63 | with: 64 | python-version: ${{ matrix.python-version }} 65 | - name: Install dependencies 66 | run: | 67 | export PYTHONPATH="$PYTHONPATH:$PWD" 68 | python -m pip install --upgrade pip 69 | pip install scikit-learn setuptools awscli 70 | pip install -e ".[codegen]" 71 | - name: Configure AWS Credentials 72 | uses: aws-actions/configure-aws-credentials@v4 73 | with: 74 | role-to-assume: ${{ secrets.INTEG_TEST_ROLE_ARN }} 75 | aws-region: us-west-2 76 | role-duration-seconds: 10800 77 | - name: Run Integration Tests 78 | id: run-integration-tests 79 | run: | 80 | pytest integ 81 | - name: Put Failure Metrics to CloudWatch 82 | if: ${{ failure() }} 83 | run: | 84 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 85 | echo "Integration test Failed. Putting Failure Metrics = 1 and Success = 0 onto Cloudwatch" 86 | aws cloudwatch put-metric-data --metric-name IntegrationTestFailure --namespace SageMakerPySdkCoreMonitoringMetrics --value 1 --unit Count --dimensions MetricCategory=Integration 87 | aws cloudwatch put-metric-data --metric-name IntegrationTestSuccess --namespace SageMakerPySdkCoreMonitoringMetrics --value 0 --unit Count --dimensions MetricCategory=Integration 88 | else 89 | echo "Putting metrics has been skipped" 90 | fi 91 | - name: Put Success Metrics to CloudWatch 92 | if: ${{ success() }} 93 | run: | 94 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 95 | echo "Integration test Succeeded. Putting Success Metrics = 1 and Failure = 0 onto Cloudwatch" 96 | aws cloudwatch put-metric-data --metric-name IntegrationTestSuccess --namespace SageMakerPySdkCoreMonitoringMetrics --value 1 --unit Count --dimensions MetricCategory=Integration 97 | aws cloudwatch put-metric-data --metric-name IntegrationTestFailure --namespace SageMakerPySdkCoreMonitoringMetrics --value 0 --unit Count --dimensions MetricCategory=Integration 98 | else 99 | echo "Putting metrics has been skipped" 100 | fi 101 | unit-tests: 102 | runs-on: ubuntu-latest 103 | needs: [wait-for-approval] 104 | strategy: 105 | fail-fast: false 106 | matrix: 107 | python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] 108 | 109 | steps: 110 | - uses: actions/checkout@v4 111 | with: 112 | ref: ${{ github.event.pull_request.head.sha }} 113 | - name: Set up Python ${{ matrix.python-version }} 114 | uses: actions/setup-python@v5 115 | with: 116 | python-version: ${{ matrix.python-version }} 117 | - name: Install dependencies 118 | run: | 119 | export PYTHONPATH="$PYTHONPATH:$PWD" 120 | python -m pip install --upgrade pip 121 | pip install -e ".[codegen]" 122 | - name: Configure AWS Credentials 123 | uses: aws-actions/configure-aws-credentials@v4 124 | with: 125 | role-to-assume: ${{ secrets.INTEG_TEST_ROLE_ARN }} 126 | aws-region: us-west-2 127 | role-duration-seconds: 10800 128 | - name: Run Unit Tests 129 | run: | 130 | pytest tst 131 | - name: Put Failure Metrics 132 | if: ${{ failure() }} 133 | run: | 134 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 135 | echo "Unit test run Failed. Putting Failure Metrics = 1 and Success = 0 onto Cloudwatch" 136 | aws cloudwatch put-metric-data --metric-name UnitTestFailure --namespace SageMakerPySdkCoreMonitoringMetrics --value 1 --unit Count --dimensions MetricCategory=Unit-${{ matrix.python-version }} 137 | aws cloudwatch put-metric-data --metric-name UnitTestSuccess --namespace SageMakerPySdkCoreMonitoringMetrics --value 0 --unit Count --dimensions MetricCategory=Unit-${{ matrix.python-version }} 138 | else 139 | echo "Putting metrics has been skipped" 140 | fi 141 | - name: Put Success Metrics 142 | if: ${{ success() }} 143 | run: | 144 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 145 | echo "Unit test run Succeeded. Putting Success Metrics = 1 and Failure = 0 onto Cloudwatch" 146 | aws cloudwatch put-metric-data --metric-name UnitTestSuccess --namespace SageMakerPySdkCoreMonitoringMetrics --value 1 --unit Count --dimensions MetricCategory=Unit-${{ matrix.python-version }} 147 | aws cloudwatch put-metric-data --metric-name UnitTestFailure --namespace SageMakerPySdkCoreMonitoringMetrics --value 0 --unit Count --dimensions MetricCategory=Unit-${{ matrix.python-version }} 148 | else 149 | echo "Putting metrics has been skipped" 150 | fi 151 | 152 | resources-coverage: 153 | runs-on: ubuntu-latest 154 | needs: [wait-for-approval] 155 | strategy: 156 | fail-fast: false 157 | matrix: 158 | python-version: [ "3.12" ] 159 | steps: 160 | - uses: actions/checkout@v3 161 | with: 162 | ref: ${{ github.event.pull_request.head.sha }} 163 | - name: Set up Python ${{ matrix.python-version }} 164 | uses: actions/setup-python@v5 165 | with: 166 | python-version: ${{ matrix.python-version }} 167 | - name: Configure AWS Credentials 168 | uses: aws-actions/configure-aws-credentials@v4 169 | with: 170 | role-to-assume: ${{ secrets.INTEG_TEST_ROLE_ARN }} 171 | aws-region: us-west-2 172 | role-duration-seconds: 10800 173 | - name: Install dependencies 174 | run: | 175 | export PYTHONPATH="$PYTHONPATH:$PWD" 176 | python -m pip install --upgrade pip 177 | pip install coverage 178 | pip install pytest-cov 179 | pip install -e ".[codegen]" 180 | - name: Run Unit Tests 181 | run: | 182 | pytest --cov-report json --cov=src tst/generated/test_resources.py 183 | - name: Execute Python Coverage compute script and capture output 184 | id: execute_coverage_parse_script 185 | run: | 186 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 187 | aws cloudwatch put-metric-data --metric-name UnitTestCoverage --namespace SageMakerPySdkCoreMonitoringMetrics --value $(python workflow_helper/compute_resource_coverage.py) --unit Count --dimensions MetricCategory=Unit-${{ matrix.python-version }} 188 | else 189 | echo "Putting metrics has been skipped" 190 | fi 191 | put-api-coverage: 192 | runs-on: ubuntu-latest 193 | needs: [wait-for-approval] 194 | steps: 195 | - uses: actions/checkout@v3 196 | with: 197 | ref: ${{ github.event.pull_request.head.sha }} 198 | - name: Install dependencies 199 | run: | 200 | export PYTHONPATH="$PYTHONPATH:$PWD" 201 | python -m pip install --upgrade pip 202 | pip install -e ".[codegen]" 203 | - name: Configure AWS Credentials 204 | uses: aws-actions/configure-aws-credentials@v4 205 | with: 206 | role-to-assume: ${{ secrets.INTEG_TEST_ROLE_ARN }} 207 | aws-region: us-west-2 208 | role-duration-seconds: 10800 209 | - name: Execute Boto API Coverage compute script and capture output 210 | run: | 211 | if [[ "${{ github.event.pull_request.user.login }}" == ${{ secrets.SAGEMAKER_BOT_USER_LOGIN }} ]]; then 212 | output=$(python workflow_helper/compute_boto_api_coverage.py) 213 | unsupported_apis=$(echo $output | cut -d ' ' -f1) 214 | supported_apis=$(echo $output | cut -d ' ' -f2) 215 | aws cloudwatch put-metric-data --metric-name UnsupportedAPIs --namespace SageMakerPySdkCoreMonitoringMetrics --value $unsupported_apis --unit Count --dimensions MetricCategory=BotocoreAPICoverage 216 | aws cloudwatch put-metric-data --metric-name SupportedAPIs --namespace SageMakerPySdkCoreMonitoringMetrics --value $supported_apis --unit Count --dimensions MetricCategory=BotocoreAPICoverage 217 | else 218 | echo "Putting metrics has been skipped" 219 | fi 220 | pylint-codestyle: 221 | runs-on: ubuntu-latest 222 | needs: [wait-for-approval] 223 | steps: 224 | - uses: actions/checkout@v4 225 | with: 226 | ref: ${{ github.event.pull_request.head.sha }} 227 | - name: Set up Python 228 | uses: actions/setup-python@v5 229 | with: 230 | python-version: '3.10' 231 | - name: Install dependencies 232 | run: | 233 | export PYTHONPATH="$PYTHONPATH:$PWD" 234 | python -m pip install --upgrade pip 235 | pip install -e ".[codegen]" 236 | - name: Run Pylint 237 | run: | 238 | python_files=$(find . -name '*.py') 239 | pylint $python_files --persistent=y --fail-under=8 240 | - name: Run Black Check 241 | run: | 242 | black --check . 243 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v1.0.72 (2025-12-18) 4 | 5 | * Daily Sync with Botocore v1.42.12 on 2025/12/18 (#356) 6 | 7 | ## v1.0.71 (2025-12-03) 8 | 9 | * Daily Sync with Botocore v1.42.2 on 2025/12/03 (#355) 10 | 11 | ## v1.0.70 (2025-12-03) 12 | 13 | * Daily Sync with Botocore v1.42.1 on 2025/12/03 (#354) 14 | * prepare release v1.0.69 15 | 16 | ## v1.0.69 (2025-11-24) 17 | 18 | * Daily Sync with Botocore v1.41.2 on 2025/11/24 (#353) 19 | 20 | ## v1.0.68 (2025-11-21) 21 | 22 | * Daily Sync with Botocore v1.41.1 on 2025/11/21 (#352) 23 | 24 | ## v1.0.67 (2025-11-20) 25 | 26 | * Daily Sync with Botocore v1.41.0 on 2025/11/20 (#351) 27 | 28 | ## v1.0.66 (2025-11-14) 29 | 30 | * Daily Sync with Botocore v1.40.73 on 2025/11/14 (#350) 31 | 32 | ## v1.0.65 (2025-11-13) 33 | 34 | * Daily Sync with Botocore v1.40.72 on 2025/11/13 (#349) 35 | 36 | ## v1.0.64 (2025-11-07) 37 | 38 | * Daily Sync with Botocore v1.40.68 on 2025/11/07 (#348) 39 | 40 | ## v1.0.63 (2025-11-06) 41 | 42 | * Daily Sync with Botocore v1.40.67 on 2025/11/06 (#347) 43 | 44 | ## v1.0.62 (2025-11-03) 45 | 46 | * Daily Sync with Botocore v1.40.64 on 2025/11/03 (#346) 47 | 48 | ## v1.0.61 (2025-10-29) 49 | 50 | * Daily Sync with Botocore v1.40.61 on 2025/10/29 (#345) 51 | 52 | ## v1.0.60 (2025-10-27) 53 | 54 | * Daily Sync with Botocore v1.40.59 on 2025/10/27 (#344) 55 | 56 | ## v1.0.59 (2025-09-10) 57 | 58 | * Daily Sync with Botocore v1.40.27 on 2025/09/10 (#343) 59 | * chore: remove deleted bucket (#342) 60 | * Fix missing put metrics (#341) 61 | 62 | ## v1.0.58 (2025-09-08) 63 | 64 | * Daily Sync with Botocore v1.40.25 on 2025/09/08 (#340) 65 | * add both failure and success update (#339) 66 | 67 | ## v1.0.57 (2025-08-28) 68 | 69 | * Daily Sync with Botocore v1.40.19 on 2025/08/28 (#338) 70 | 71 | ## v1.0.56 (2025-08-26) 72 | 73 | * Daily Sync with Botocore v1.40.17 on 2025/08/26 (#336) 74 | 75 | ## v1.0.55 (2025-08-25) 76 | 77 | * Daily Sync with Botocore v1.40.16 on 2025/08/25 (#335) 78 | 79 | ## v1.0.54 (2025-08-21) 80 | 81 | * Daily Sync with Botocore v1.40.14 on 2025/08/21 (#333) 82 | 83 | ## v1.0.53 (2025-08-19) 84 | 85 | * Daily Sync with Botocore v1.40.12 on 2025/08/19 (#332) 86 | 87 | ## v1.0.52 (2025-08-14) 88 | 89 | * Daily Sync with Botocore v1.40.9 on 2025/08/14 (#330) 90 | 91 | ## v1.0.51 (2025-08-13) 92 | 93 | * Daily Sync with Botocore v1.40.8 on 2025/08/13 (#329) 94 | 95 | ## v1.0.50 (2025-08-11) 96 | 97 | * Daily Sync with Botocore v1.40.6 on 2025/08/11 (#328) 98 | 99 | ## v1.0.49 (2025-08-06) 100 | 101 | * Daily Sync with Botocore v1.40.3 on 2025/08/06 (#327) 102 | 103 | ## v1.0.48 (2025-08-05) 104 | 105 | * Daily Sync with Botocore v1.40.2 on 2025/08/05 (#326) 106 | * Update Rich logging to be enabled only when required. (#325) 107 | 108 | ## v1.0.47 (2025-07-23) 109 | 110 | * Update boto3 version constraint (#324) 111 | 112 | ## v1.0.46 (2025-07-22) 113 | 114 | * Daily Sync with Botocore v1.39.10 on 2025/07/22 (#323) 115 | 116 | ## v1.0.45 (2025-07-18) 117 | 118 | * Update rich boundary (#322) 119 | * Address pylint issue in PR:320 (#321) 120 | 121 | ## v1.0.44 (2025-07-17) 122 | 123 | * Fix: Do not generate empty classes (#320) 124 | * Update importlib-metadata version boundary (#319) 125 | 126 | ## v1.0.43 (2025-07-16) 127 | 128 | * Daily Sync with Botocore v1.39.6 on 2025/07/16 (#316) 129 | 130 | ## v1.0.42 (2025-07-04) 131 | 132 | * Daily Sync with Botocore v1.39.3 on 2025/07/04 (#314) 133 | 134 | ## v1.0.41 (2025-07-02) 135 | 136 | * Daily Sync with Botocore v1.39.1 on 2025/07/02 (#313) 137 | 138 | ## v1.0.40 (2025-06-20) 139 | 140 | * Daily Sync with Botocore v1.38.40 on 2025/06/20 (#311) 141 | 142 | ## v1.0.39 (2025-06-19) 143 | 144 | * Daily Sync with Botocore v1.38.39 on 2025/06/19 (#310) 145 | 146 | ## v1.0.38 (2025-06-17) 147 | 148 | * Daily Sync with Botocore v1.38.37 on 2025/06/17 (#309) 149 | 150 | ## v1.0.37 (2025-06-05) 151 | 152 | * Daily Sync with Botocore v1.38.30 on 2025/06/05 (#307) 153 | * Refactoring Intelligent Defaults for parity with documentations (#304) 154 | 155 | ## v1.0.36 (2025-06-02) 156 | 157 | * Daily Sync with Botocore v1.38.27 on 2025/06/02 (#305) 158 | 159 | ## v1.0.35 (2025-05-30) 160 | 161 | * Daily Sync with Botocore v1.38.26 on 2025/05/30 (#302) 162 | * Fix: next_token causing infinite loop in fetching Clusters (#301) 163 | * Add blob handler to transform endpoint output method (#299) 164 | 165 | ## v1.0.34 (2025-05-13) 166 | 167 | * Daily Sync with Botocore v1.38.14 on 2025/05/13 (#298) 168 | 169 | ## v1.0.33 (2025-05-08) 170 | 171 | * Support rich 14.0.0 (#297) 172 | * Add extract_name_mapping logic to fix get_all method (#294) 173 | 174 | ## v1.0.32 (2025-05-08) 175 | 176 | * Daily Sync with Botocore v1.38.11 on 2025/05/08 (#296) 177 | 178 | ## v1.0.31 (2025-05-02) 179 | 180 | * Daily Sync with Botocore v1.38.7 on 2025/05/02 (#295) 181 | 182 | ## v1.0.30 (2025-04-30) 183 | 184 | * Daily Sync with Botocore v1.38.5 on 2025/04/30 (#292) 185 | * Daily Sync with Botocore v1.37.37 on 2025/04/21 (#290) 186 | * Fix logging behavior (#287) 187 | * fix: explicit reference of shapes in resources.py (#285) 188 | * fix: Changes to address the field name error: json shadows a BaseModel attribute (#270) 189 | * fix: include version file in the distribution (#249) 190 | 191 | ## v1.0.29 (2025-04-10) 192 | 193 | * Daily Sync with Botocore v1.37.31 on 2025/04/10 (#284) 194 | * Add auto-merge for botocore sync PRs (#281) 195 | 196 | ## v1.0.28 (2025-04-04) 197 | 198 | * Daily Sync with Botocore v1.37.26 on 2025/04/03 (#277) 199 | 200 | ## v1.0.27 (2025-03-27) 201 | 202 | * Daily Sync with Botocore v1.37.21 on 2025/03/27 (#271) 203 | * Remove sagemaker from integ test dependency (#272) 204 | 205 | ## v1.0.26 (2025-03-25) 206 | 207 | * Daily Sync with Botocore v1.37.19 on 2025/03/25 (#267) 208 | * UNIT TEST FIXES FROM BOTOCORE GENERATION (#263) 209 | * Add CODEOWNERS file (#240) 210 | 211 | ## v1.0.25 (2025-02-27) 212 | 213 | * Daily Sync with Botocore v1.37.2 on 2025/02/27 (#251) 214 | 215 | ## v1.0.24 (2025-02-21) 216 | 217 | * Daily Sync with Botocore v1.36.25 on 2025/02/21 (#247) 218 | 219 | ## v1.0.23 (2025-02-20) 220 | 221 | * Daily Sync with Botocore v1.36.24 on 2025/02/20 (#246) 222 | 223 | ## v1.0.22 (2025-02-14) 224 | 225 | * Daily Sync with Botocore v1.36.20 on 2025/02/14 (#245) 226 | 227 | ## v1.0.21 (2025-02-05) 228 | 229 | * Daily Sync with Botocore v1.36.13 on 2025/02/05 (#239) 230 | 231 | ## v1.0.20 (2025-02-03) 232 | 233 | * Daily Sync with Botocore v1.36.11 on 2025/02/03 (#238) 234 | 235 | ## v1.0.19 (2025-01-20) 236 | 237 | * Daily Sync with Botocore v1.36.2 on 2025/01/20 (#236) 238 | 239 | ## v1.0.18 (2025-01-17) 240 | 241 | * Daily Sync with Botocore v1.36.1 on 2025/01/17 (#235) 242 | * Latest Botocore changes and unit test updates (#233) 243 | * fix: fetch version dynamically for useragent string and fix workflows (#230) 244 | * Use OIDC Role in workflows (#229) 245 | 246 | ## v1.0.17 (2024-12-04) 247 | 248 | * Daily Sync with Botocore v1.35.75 on 2024/12/04 (#227) 249 | * Add support for map-in-list, list-in-list structures (#224) 250 | 251 | ## v1.0.16 (2024-11-25) 252 | 253 | * Daily Sync with Botocore v1.35.68 on 2024/11/25 (#223) 254 | 255 | ## v1.0.15 (2024-11-19) 256 | 257 | * fix: update pydantic dep version (#222) 258 | 259 | ## v1.0.14 (2024-11-15) 260 | 261 | * Daily Sync with Botocore v1.35.62 on 2024/11/15 (#221) 262 | * Support BatchDeleteClusterNodes from sagemaker (#220) 263 | 264 | ## v1.0.13 (2024-11-01) 265 | 266 | * Daily Sync with Botocore v1.35.53 on 2024/11/01 (#219) 267 | 268 | ## v1.0.12 (2024-10-31) 269 | 270 | * Daily Sync with Botocore v1.35.52 on 2024/10/31 (#218) 271 | 272 | ## v1.0.11 (2024-10-30) 273 | 274 | * Daily Sync with Botocore v1.35.51 on 2024/10/30 (#217) 275 | 276 | ## v1.0.10 (2024-10-03) 277 | 278 | * Daily Sync with Botocore v1.35.32 on 2024/10/03 (#215) 279 | * fix: set rich panel to transient (#214) 280 | * Feature: Add wait with logs to subset of Job types (#201) 281 | 282 | ## v1.0.9 (2024-09-27) 283 | 284 | * Daily Sync with Botocore v1.35.28 on 2024/09/27 (#210) 285 | 286 | ## v1.0.8 (2024-09-25) 287 | 288 | * Daily Sync with Botocore v1.35.26 on 2024/09/25 (#209) 289 | * Support BatchGetMetrics from sagemaker-metrics (#207) 290 | 291 | ## v1.0.7 (2024-09-23) 292 | 293 | * Daily Sync with Botocore v1.35.24 on 2024/09/23 (#206) 294 | 295 | ## v1.0.6 (2024-09-20) 296 | 297 | * Daily Sync with Botocore v1.35.23 on 2024/09/20 (#205) 298 | * Update all PR Checks to have collab check (#202) 299 | 300 | ## v1.0.5 (2024-09-16) 301 | 302 | * Daily Sync with Botocore v1.35.19 on 2024/09/16 (#200) 303 | * Issue template (#196) 304 | * Collab check (#195) 305 | * Support APIs from sagemaker-featurestore-runtime and sagemaker-metrics (#181) 306 | 307 | ## v1.0.4 (2024-09-10) 308 | 309 | * Daily Sync with Botocore v1.35.15 on 2024/09/10 (#182) 310 | 311 | ## v1.0.3 (2024-09-06) 312 | 313 | * Daily Sync with Botocore v1.35.13 on 2024/09/06 (#180) 314 | * Add test to check API coverage (#165) 315 | * Update README.rst (#178) 316 | 317 | ## v1.0.2 (2024-09-04) 318 | 319 | * Daily Sync with Botocore v1.35.11 on 2024/09/04 (#179) 320 | * Add serialization for all methods (#177) 321 | * Add forbid extra for pydantic BaseModel (#173) 322 | * Add black check (#174) 323 | 324 | ## v1.0.1 (2024-08-30) 325 | 326 | * fix: SMD pydantic issue (#170) 327 | * feat: Add get started notebook (#160) 328 | * update notebooks (#168) 329 | * fix pyproject.toml (#167) 330 | 331 | ## v0.1.10 (2024-08-28) 332 | 333 | 334 | ## v0.1.9 (2024-08-28) 335 | 336 | * Update counting method of botocore api coverage (#159) 337 | * Example notebook for tracking local pytorch experiment (#158) 338 | * Add gen AI examples (#155) 339 | * Fix _serialize_args() for dict parameters (#157) 340 | 341 | ## v0.1.8 (2024-08-21) 342 | 343 | * Daily Sync with Botocore v1.35.2 on 2024/08/21 (#153) 344 | 345 | ## v0.1.7 (2024-08-13) 346 | 347 | * Daily Sync with Botocore v1.34.159 on 2024/08/13 (#150) 348 | * feat: add param validation with pydantic validate_call (#149) 349 | * Update create-release.yml 350 | * Support textual rich logging for wait methods (#146) 351 | * Refactor Package structure (#144) 352 | * Separate environment variable for Sagemaker Core (#147) 353 | * Add styling for textual rich logging (#145) 354 | * Replace all Sagemaker V2 Calls (#142) 355 | * Daily Sync with Botocore v1.34.153 on 2024/08/05 (#143) 356 | * Update auto-approve.yml 357 | * Use textual rich logging handler for all loggers (#138) 358 | * Update auto-approve.yml 359 | * Add user agent to Sagemaker Core (#140) 360 | * Switch to sagemaker-bot account (#137) 361 | * Metrics for boto API coverage (#136) 362 | * Fix volume_size_in_g_b attribute in example notebooks (#130) 363 | 364 | ## v0.1.6 (2024-07-25) 365 | 366 | * Add private preview feedback for denesting simplifications (#128) 367 | * Put Metrics only for Daily Sync API (#125) 368 | 369 | ## v0.1.5 (2024-07-22) 370 | 371 | * Daily Sync with Botocore v1.34.145 on 2024/07/22 (#127) 372 | 373 | ## v0.1.4 (2024-07-22) 374 | 375 | * Cleanup Resources created by Integration tests (#120) 376 | * Enable Botocore sync workflow (#92) 377 | 378 | ## v0.1.3 (2024-07-18) 379 | 380 | * Daily Sync with Botocore v1.34.143 on 2024/07/11 (#91) 381 | * Update license classifier (#119) 382 | * Metrics (#118) 383 | * Support wait_for_delete method (#114) 384 | 385 | ## v0.1.2 (2024-07-08) 386 | 387 | * Add additional methods to the unit test framework (#83) 388 | * Integration tests (#82) 389 | * Add exception and return type docstring for additional methods (#58) 390 | * Support SagemakerServicecatalogPortfolio resource (#49) 391 | * Support last few additional methods (#52) 392 | * Integration tests (#53) 393 | * Fix Intelligent defaults decorator conversion (#51) 394 | * Fix for Issues that came up in integration tests (#50) 395 | * Resource Unit test Framework and tests (#46) 396 | * Support resources by create method and move methods under EdgeDeploymentStage to EdgeDeploymentPlan (#48) 397 | * Support resources that have the List operation but do not have the Describe operation (#45) 398 | * Fix class method (#44) 399 | * Update docstring for additional methods (#43) 400 | * Add Python 3.11 and 3.12 to PR checks (#42) 401 | * change: update s3 bucket in notebooks and add cell to delete resources (#41) 402 | * Fix pascal_to_snake for consecutive capitalized characters (#38) 403 | * Intelligent Defaults with Snake cased arguments (#40) 404 | 405 | ## v0.1.1 (2024-06-14) 406 | 407 | * Rollback CHANGELOG.md 408 | * Rollback VERSION 409 | * prepare release v0.1.2 410 | * prepare release v0.1.1 411 | * Add resource class docstring (#37) 412 | * Create CHANGELOG.md (#39) 413 | 414 | ## v0.1.0 (2024-06-14) 415 | 416 | * Initial release of SageMaker Core 417 | -------------------------------------------------------------------------------- /tst/test_codec.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from dateutil.tz import tzlocal 3 | from pprint import pprint 4 | import unittest 5 | from sagemaker_core.main.code_injection.codec import pascal_to_snake 6 | from sagemaker_core.main.code_injection.codec import transform 7 | from sagemaker_core.main.resources import Model, TrialComponent, AutoMLJobV2 8 | 9 | 10 | class TestConversion(unittest.TestCase): 11 | def test_pascal_to_snake(self): 12 | self.assertEqual(pascal_to_snake("PascalCase"), "pascal_case") 13 | self.assertEqual(pascal_to_snake("AnotherExample"), "another_example") 14 | self.assertEqual(pascal_to_snake("test"), "test") 15 | self.assertEqual(pascal_to_snake("AutoMLJob"), "auto_ml_job") 16 | 17 | 18 | class DummyResourceClass: 19 | pass 20 | 21 | 22 | def test_deserializer_for_structure_type(): 23 | """Validate deserializer() - for structure type""" 24 | # The test validates the following relations 25 | # StructA → basic_type_member 26 | # StructA → StructB -> basic_type_member 27 | describe_model_response = { 28 | "CreationTime": datetime.datetime(2024, 3, 13, 15, 7, 44, 459000, tzinfo=tzlocal()), 29 | "DeploymentRecommendation": { 30 | "RealTimeInferenceRecommendations": [], 31 | "RecommendationStatus": "COMPLETED", 32 | }, 33 | "EnableNetworkIsolation": False, 34 | "ExecutionRoleArn": "arn:aws:iam::616250812882:role/SageMakerRole", 35 | "ModelArn": "arn:aws:sagemaker:us-west-2:616250812882:model/lmi-model-falcon-7b-1710367662-a49c", 36 | "ModelName": "lmi-model-falcon-7b-1710367662-a49c", 37 | "PrimaryContainer": { 38 | "Environment": {}, 39 | "Image": "763104351884.dkr.ecr.us-west-2.amazonaws.com/djl-inference:0.23.0-deepspeed0.9.5-cu118", 40 | "Mode": "SingleModel", 41 | "ModelDataSource": { 42 | "S3DataSource": { 43 | "CompressionType": "Gzip", 44 | "S3DataType": "S3Object", 45 | "S3Uri": "s3://sagemaker-us-west-2-616250812882/session-default-prefix/large-model-lmi/code/mymodel-7B.tar.gz", 46 | } 47 | }, 48 | "ModelDataUrl": "s3://sagemaker-us-west-2-616250812882/session-default-prefix/large-model-lmi/code/mymodel-7B.tar.gz", 49 | }, 50 | } 51 | transformed_data = transform(describe_model_response, "DescribeModelOutput") 52 | pprint(transformed_data) 53 | instance = Model(**transformed_data) 54 | assert instance.execution_role_arn == "arn:aws:iam::616250812882:role/SageMakerRole" 55 | assert not instance.enable_network_isolation 56 | assert instance.primary_container.model_data_source.s3_data_source.s3_data_type == "S3Object" 57 | 58 | 59 | def test_deserializer_for_list_type(): 60 | """Validate deserializer() - for list type""" 61 | # The test validates the following relations 62 | # StructA → StructB -> list(structure) 63 | # ToDo: Struct -> list(basic types) 64 | # StructA → StructB -> list(structure) -> map(string, string) 65 | describe_model_response = { 66 | "CreationTime": datetime.datetime(2024, 3, 13, 15, 7, 44, 459000, tzinfo=tzlocal()), 67 | "DeploymentRecommendation": { 68 | "RealTimeInferenceRecommendations": [ 69 | { 70 | "RecommendationId": "dummy-recomm-id-1", 71 | "InstanceType": "mlt4", 72 | "Environment": {"ENV_VAR_1": "ENV_VAR_1_VALUE"}, 73 | }, 74 | { 75 | "RecommendationId": "dummy-recomm-id-2", 76 | "InstanceType": "mlm4", 77 | "Environment": {"ENV_VAR_2": "ENV_VAR_2_VALUE"}, 78 | }, 79 | ], 80 | "RecommendationStatus": "COMPLETED", 81 | }, 82 | "ModelArn": "arn:aws:sagemaker:us-west-2:616250812882:model/lmi-model-falcon-7b-1710367662-a49c", 83 | "ModelName": "lmi-model-falcon-7b-1710367662-a49c", 84 | } 85 | transformed_data = transform(describe_model_response, "DescribeModelOutput") 86 | pprint(transformed_data) 87 | instance = Model(**transformed_data) 88 | real_time_inference_recommendations = ( 89 | instance.deployment_recommendation.real_time_inference_recommendations 90 | ) 91 | assert type(real_time_inference_recommendations) == list 92 | assert real_time_inference_recommendations[0].recommendation_id == "dummy-recomm-id-1" 93 | assert real_time_inference_recommendations[1].instance_type == "mlm4" 94 | assert real_time_inference_recommendations[1].environment == {"ENV_VAR_2": "ENV_VAR_2_VALUE"} 95 | 96 | 97 | def test_deserializer_for_map_type(): 98 | """Validate deserializer() - for map type""" 99 | # The test validates the following relations 100 | # StructA → map(string, structure) 101 | describe_trial_component_response = { 102 | "CreatedBy": {}, 103 | "DisplayName": "huggingface-pytorch-training-2024-01-10-02-32-59-730-aws-training-job", 104 | "OutputArtifacts": { 105 | "SageMaker.DebugHookOutput": { 106 | "Value": "s3://sagemaker-us-west-2-616250812882/session-default-prefix/" 107 | }, 108 | "SageMaker.ModelArtifact": { 109 | "Value": "s3://sagemaker-us-west-2-616250812882/session-default-prefix/huggingface-pytorch-training-2024-01-10-02-32-59-730/output/model.tar.gz" 110 | }, 111 | }, 112 | "Parameters": { 113 | "SageMaker.ImageUri": { 114 | "StringValue": "763104351884.dkr.ecr.us-west-2.amazonaws.com/huggingface-pytorch-training:2.0.0-transformers4.28.1-gpu-py310-cu118-ubuntu20.04" 115 | }, 116 | "SageMaker.InstanceCount": {"NumberValue": 1.0}, 117 | "SageMaker.InstanceType": {"StringValue": "ml.g5.4xlarge"}, 118 | }, 119 | "TrialComponentArn": "arn:aws:sagemaker:us-west-2:616250812882:experiment-trial-component/huggingface-pytorch-training-2024-01-10-02-32-59-730-aws-training-job", 120 | "TrialComponentName": "huggingface-pytorch-training-2024-01-10-02-32-59-730-aws-training-job", 121 | } 122 | transformed_data = transform( 123 | describe_trial_component_response, "DescribeTrialComponentResponse" 124 | ) 125 | pprint(transformed_data) 126 | instance = TrialComponent(**transformed_data) 127 | parameters = instance.parameters 128 | assert type(parameters) == dict 129 | assert parameters["SageMaker.InstanceType"].string_value == "ml.g5.4xlarge" 130 | assert parameters["SageMaker.InstanceCount"].number_value == 1.0 131 | output_artifacts = instance.output_artifacts 132 | assert type(output_artifacts) == dict 133 | assert ( 134 | output_artifacts["SageMaker.DebugHookOutput"].value 135 | == "s3://sagemaker-us-west-2-616250812882/session-default-prefix/" 136 | ) 137 | 138 | # StructA -> map(string, list) -> list(structure) -> map(string, string) 139 | describe_auto_ml_job_v2_response = { 140 | "AutoMLJobArn": "arn:aws:sagemaker:us-west-2:616250812882:automl-job/python-sdk-integ-test-base-job", 141 | "AutoMLJobInputDataConfig": [ 142 | { 143 | "ChannelType": "training", 144 | "ContentType": "text/csv;header=present", 145 | "DataSource": { 146 | "S3DataSource": { 147 | "S3DataType": "S3Prefix", 148 | "S3Uri": "s3://sagemaker-us-west-2-616250812882/sagemaker/beta-automl-xgboost/input/iris_training.csv", 149 | } 150 | }, 151 | } 152 | ], 153 | "AutoMLJobName": "python-sdk-integ-test-base-job", 154 | "AutoMLJobSecondaryStatus": "Completed", 155 | "AutoMLJobStatus": "Completed", 156 | "AutoMLProblemTypeConfig": { 157 | "TabularJobConfig": { 158 | "CompletionCriteria": {"MaxCandidates": 3}, 159 | "GenerateCandidateDefinitionsOnly": False, 160 | "TargetAttributeName": "virginica", 161 | }, 162 | "TimeSeriesForecastingJobConfig": { 163 | "Transformations": {"Filling": {"map1_key": {"map2_key": "map2_val"}}}, 164 | "ForecastFrequency": "dummy", 165 | "ForecastHorizon": 20, 166 | "TimeSeriesConfig": { 167 | "TargetAttributeName": "dummy", 168 | "TimestampAttributeName": "dummy", 169 | "ItemIdentifierAttributeName": "dummy", 170 | "GroupingAttributeNames": ["dummy"], 171 | }, 172 | }, 173 | }, 174 | "BestCandidate": { 175 | "CandidateName": "python-sdk-integ-test-base-jobTA-001-143b672d", 176 | "CandidateStatus": "Completed", 177 | "CandidateSteps": [ 178 | { 179 | "CandidateStepArn": "arn:aws:sagemaker:us-west-2:616250812882:processing-job/python-sdk-integ-test-base-job-db-1-0661642ca7be48d280cb7fe6197", 180 | "CandidateStepName": "python-sdk-integ-test-base-job-db-1-0661642ca7be48d280cb7fe6197", 181 | "CandidateStepType": "AWS::SageMaker::ProcessingJob", 182 | }, 183 | { 184 | "CandidateStepArn": "arn:aws:sagemaker:us-west-2:616250812882:training-job/python-sdk-integ-test-base-job-dpp1-1-e49c814570994bd98293d0087", 185 | "CandidateStepName": "python-sdk-integ-test-base-job-dpp1-1-e49c814570994bd98293d0087", 186 | "CandidateStepType": "AWS::SageMaker::TrainingJob", 187 | }, 188 | { 189 | "CandidateStepArn": "arn:aws:sagemaker:us-west-2:616250812882:transform-job/python-sdk-integ-test-base-job-dpp1-csv-1-73af2590ca7a4719988c3", 190 | "CandidateStepName": "python-sdk-integ-test-base-job-dpp1-csv-1-73af2590ca7a4719988c3", 191 | "CandidateStepType": "AWS::SageMaker::TransformJob", 192 | }, 193 | { 194 | "CandidateStepArn": "arn:aws:sagemaker:us-west-2:616250812882:training-job/python-sdk-integ-test-base-jobta-001-143b672d", 195 | "CandidateStepName": "python-sdk-integ-test-base-jobTA-001-143b672d", 196 | "CandidateStepType": "AWS::SageMaker::TrainingJob", 197 | }, 198 | ], 199 | "CreationTime": datetime.datetime(2021, 10, 4, 11, 5, 38, tzinfo=tzlocal()), 200 | "InferenceContainerDefinitions": { 201 | "def1": [ 202 | { 203 | "Image": "dummy-image-1", 204 | "ModelDataUrl": "dummy-model-data-url-1", 205 | "Environment": {"ENV_VAR_1": "ENV_VAR_1_VALUE"}, 206 | }, 207 | { 208 | "Image": "dummy-image-2", 209 | "ModelDataUrl": "dummy-model-data-url-2", 210 | "Environment": {"ENV_VAR_2": "ENV_VAR_2_VALUE"}, 211 | }, 212 | ] 213 | }, 214 | "LastModifiedTime": datetime.datetime(2021, 10, 4, 11, 8, 9, 941000, tzinfo=tzlocal()), 215 | "ObjectiveStatus": "Succeeded", 216 | }, 217 | "OutputDataConfig": {"S3OutputPath": "s3://sagemaker-us-west-2-616250812882/"}, 218 | "RoleArn": "arn:aws:iam::616250812882:role/SageMakerRole", 219 | "CreationTime": datetime.datetime(2024, 3, 13, 15, 7, 44, 459000, tzinfo=tzlocal()), 220 | "LastModifiedTime": datetime.datetime(2021, 10, 4, 11, 8, 9, 941000, tzinfo=tzlocal()), 221 | } 222 | transformed_data = transform(describe_auto_ml_job_v2_response, "DescribeAutoMLJobV2Response") 223 | instance = AutoMLJobV2(**transformed_data) 224 | best_candidate = instance.best_candidate 225 | inference_container_definitions = best_candidate.inference_container_definitions 226 | assert type(inference_container_definitions) == dict 227 | assert best_candidate.candidate_name == "python-sdk-integ-test-base-jobTA-001-143b672d" 228 | inference_container_definitions_def1 = inference_container_definitions["def1"] 229 | assert type(inference_container_definitions_def1) == list 230 | assert inference_container_definitions_def1[0].image == "dummy-image-1" 231 | assert inference_container_definitions_def1[1].environment == {"ENV_VAR_2": "ENV_VAR_2_VALUE"} 232 | # StructA -> map(string, map) 233 | assert ( 234 | instance.auto_ml_problem_type_config.time_series_forecasting_job_config.transformations.filling 235 | == {"map1_key": {"map2_key": "map2_val"}} 236 | ) 237 | 238 | 239 | if __name__ == "__main__": 240 | unittest.main() 241 | -------------------------------------------------------------------------------- /src/sagemaker_core/tools/shapes_codegen.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | """A class for generating class structure from Service Model JSON. 14 | 15 | To run the script be sure to set the PYTHONPATH 16 | export PYTHONPATH=:$PYTHONPATH 17 | """ 18 | import os 19 | 20 | from sagemaker_core.main.code_injection.codec import pascal_to_snake 21 | from sagemaker_core.tools.constants import ( 22 | LICENCES_STRING, 23 | GENERATED_CLASSES_LOCATION, 24 | SHAPES_CODEGEN_FILE_NAME, 25 | SHAPES_WITH_JSON_FIELD_ALIAS, 26 | ) 27 | from sagemaker_core.tools.shapes_extractor import ShapesExtractor 28 | from sagemaker_core.main.utils import ( 29 | add_indent, 30 | convert_to_snake_case, 31 | remove_html_tags, 32 | escape_special_rst_characters, 33 | ) 34 | from sagemaker_core.tools.templates import SHAPE_CLASS_TEMPLATE, SHAPE_BASE_CLASS_TEMPLATE 35 | from sagemaker_core.tools.data_extractor import ( 36 | load_combined_shapes_data, 37 | load_combined_operations_data, 38 | ) 39 | from .resources_extractor import ResourcesExtractor 40 | 41 | 42 | class ShapesCodeGen: 43 | """ 44 | Generates shape classes based on an input Botocore service.json. 45 | 46 | Args: 47 | service_json (dict): The Botocore service.json containing the shape definitions. 48 | 49 | Attributes: 50 | service_json (dict): The Botocore service.json containing the shape definitions. 51 | shapes_extractor (ShapesExtractor): An instance of the ShapesExtractor class. 52 | shape_dag (dict): Shape DAG generated from service.json 53 | 54 | Methods: 55 | build_graph(): Builds a directed acyclic graph (DAG) representing the dependencies between shapes. 56 | topological_sort(): Performs a topological sort on the DAG to determine the order in which shapes should be generated. 57 | generate_data_class_for_shape(shape): Generates a data class for a given shape. 58 | _generate_doc_string_for_shape(shape): Generates the docstring for a given shape. 59 | generate_imports(): Generates the import statements for the generated shape classes. 60 | generate_base_class(): Generates the base class for the shape classes. 61 | _filter_input_output_shapes(shape): Filters out shapes that are used as input or output for operations. 62 | generate_shapes(output_folder): Generates the shape classes and writes them to the specified output folder. 63 | """ 64 | 65 | def __init__(self): 66 | self.combined_shapes = load_combined_shapes_data() 67 | self.combined_operations = load_combined_operations_data() 68 | self.shapes_extractor = ShapesExtractor() 69 | self.shape_dag = self.shapes_extractor.get_shapes_dag() 70 | self.resources_extractor = ResourcesExtractor() 71 | self.resources_plan = self.resources_extractor.get_resource_plan() 72 | self.resource_methods = self.resources_extractor.get_resource_methods() 73 | 74 | def build_graph(self): 75 | """ 76 | Builds a directed acyclic graph (DAG) representing the dependencies between shapes. 77 | 78 | Steps: 79 | 1. Loop over the Service Json shapes. 80 | 1.1. If dependency(members) found, add association of node -> dependency. 81 | 1.1.1. Sometimes members are not shape themselves, but have associated links to actual shapes. 82 | In that case add link to node -> dependency (actual) 83 | CreateExperimentRequest -> [ExperimentEntityName, ExperimentDescription, TagList] 84 | 1.2. else leaf node found (no dependent members), add association of node -> None. 85 | 86 | :return: A dict which defines the structure of the DAG in the format: 87 | {key : [dependencies]} 88 | Example input: 89 | {'CreateExperimentRequest': ['ExperimentEntityName', 'ExperimentEntityName', 90 | 'ExperimentDescription', 'TagList'], 91 | 'CreateExperimentResponse': ['ExperimentArn'], 92 | 'DeleteExperimentRequest': ['ExperimentEntityName'], 93 | 'DeleteExperimentResponse': ['ExperimentArn']} 94 | """ 95 | graph = {} 96 | 97 | for node, attributes in self.combined_shapes.items(): 98 | if "members" in attributes: 99 | for member, member_attributes in attributes["members"].items(): 100 | # add shapes and not shape attribute 101 | # i.e. ExperimentEntityName taken over ExperimentName 102 | if member_attributes["shape"] in self.combined_shapes.keys(): 103 | node_deps = graph.get(node, []) 104 | # evaluate the member shape and then append to node deps 105 | member_shape = self.combined_shapes[member_attributes["shape"]] 106 | if member_shape["type"] == "list": 107 | node_deps.append(member_shape["member"]["shape"]) 108 | elif member_shape["type"] == "map": 109 | node_deps.append(member_shape["key"]["shape"]) 110 | node_deps.append(member_shape["value"]["shape"]) 111 | else: 112 | node_deps.append(member_attributes["shape"]) 113 | graph[node] = node_deps 114 | else: 115 | graph[node] = None 116 | return graph 117 | 118 | def topological_sort(self): 119 | """ 120 | Performs a topological sort on the DAG to determine the order in which shapes should be generated. 121 | 122 | :return: A list of shape names in the order of topological sort. 123 | """ 124 | graph = self.build_graph() 125 | visited = set() 126 | stack = [] 127 | 128 | def dfs(node): 129 | visited.add(node) 130 | # unless leaf node is reached do dfs 131 | if graph.get(node) is not None: 132 | for neighbor in graph.get(node, []): 133 | if neighbor not in visited: 134 | dfs(neighbor) 135 | stack.append(node) 136 | 137 | for node in graph: 138 | if node not in visited: 139 | dfs(node) 140 | 141 | return stack 142 | 143 | def generate_data_class_for_shape(self, shape): 144 | """ 145 | Generates a data class for a given shape. 146 | 147 | :param shape: The name of the shape. 148 | :return: The generated data class as a string. 149 | """ 150 | class_name = shape 151 | init_data = self.shapes_extractor.generate_data_shape_string_body( 152 | shape, self.resources_plan, add_shapes_prefix=False 153 | ) 154 | try: 155 | data_class_members = add_indent(init_data, 4) 156 | except Exception: 157 | print("DEBUG HELP\n", init_data) 158 | raise 159 | return SHAPE_CLASS_TEMPLATE.format( 160 | class_name=class_name + "(Base)", 161 | data_class_members=data_class_members, 162 | docstring=self._generate_doc_string_for_shape(shape), 163 | class_name_snake=pascal_to_snake(class_name), 164 | ) 165 | 166 | def _generate_doc_string_for_shape(self, shape): 167 | """ 168 | Generates the docstring for a given shape. 169 | 170 | :param shape: The name of the shape. 171 | :return: The generated docstring as a string. 172 | """ 173 | shape_dict = self.combined_shapes[shape] 174 | 175 | docstring = f"{shape}" 176 | if "documentation" in shape_dict: 177 | docstring += f"\n {shape_dict['documentation']}" 178 | 179 | docstring += "\n\nAttributes" 180 | docstring += "\n----------------------" 181 | 182 | if "members" in shape_dict: 183 | for member, member_attributes in shape_dict["members"].items(): 184 | # Add alias if field name is json, to address the Bug: https://github.com/aws/sagemaker-python-sdk/issues/4944 185 | if shape in SHAPES_WITH_JSON_FIELD_ALIAS and member == "Json": 186 | updated_member = "JsonFormat" 187 | docstring += f"\n{convert_to_snake_case(updated_member)}" 188 | else: 189 | docstring += f"\n{convert_to_snake_case(member)}" 190 | 191 | if "documentation" in member_attributes: 192 | docstring += f": {member_attributes['documentation']}" 193 | 194 | docstring = remove_html_tags(docstring) 195 | return escape_special_rst_characters(docstring) 196 | 197 | def generate_license(self): 198 | """ 199 | Generates the license string. 200 | 201 | Returns: 202 | str: The license string. 203 | """ 204 | return LICENCES_STRING 205 | 206 | def generate_imports(self): 207 | """ 208 | Generates the import statements for the generated shape classes. 209 | 210 | :return: The generated import statements as a string. 211 | """ 212 | imports = "import datetime\n" 213 | imports += "\n" 214 | imports += "from pydantic import BaseModel, ConfigDict, Field\n" 215 | imports += "from typing import List, Dict, Optional, Any, Union\n" 216 | imports += "from sagemaker_core.main.utils import Unassigned" 217 | imports += "\n" 218 | return imports 219 | 220 | def generate_base_class(self): 221 | """ 222 | Generates the base class for the shape classes. 223 | 224 | :return: The generated base class as a string. 225 | """ 226 | # more customizations would be added later 227 | return SHAPE_BASE_CLASS_TEMPLATE.format( 228 | class_name="Base(BaseModel)", 229 | ) 230 | 231 | def _filter_input_output_shapes(self, shape): 232 | """ 233 | Filters out shapes that are used as input or output for operations. 234 | 235 | :param shape: The name of the shape. 236 | :return: True if the shape should be generated, False otherwise. 237 | """ 238 | operation_input_output_shapes = set() 239 | for operation, attrs in self.combined_operations.items(): 240 | if attrs.get("input"): 241 | operation_input_output_shapes.add(attrs["input"]["shape"]) 242 | if attrs.get("output"): 243 | operation_input_output_shapes.add(attrs["output"]["shape"]) 244 | 245 | required_output_shapes = set() 246 | for resource_name in self.resource_methods: 247 | for method in self.resource_methods[resource_name].values(): 248 | required_output_shapes.add(method.return_type) 249 | 250 | if shape in operation_input_output_shapes and shape not in required_output_shapes: 251 | return False 252 | return True 253 | 254 | def generate_shapes( 255 | self, 256 | output_folder=GENERATED_CLASSES_LOCATION, 257 | file_name=SHAPES_CODEGEN_FILE_NAME, 258 | ) -> None: 259 | """ 260 | Generates the shape classes and writes them to the specified output folder. 261 | 262 | :param output_folder: The path to the output folder. 263 | """ 264 | # Check if the output folder exists, if not, create it 265 | os.makedirs(output_folder, exist_ok=True) 266 | 267 | # Create the full path for the output file 268 | output_file = os.path.join(output_folder, file_name) 269 | 270 | # Open the output file 271 | with open(output_file, "w") as file: 272 | # Generate and write the license to the file 273 | license = self.generate_license() 274 | file.write(license) 275 | 276 | # Generate and write the imports to the file 277 | imports = self.generate_imports() 278 | file.write(imports) 279 | 280 | # Generate and write Base Class 281 | base_class = self.generate_base_class() 282 | file.write(base_class) 283 | file.write("\n\n") 284 | 285 | # Iterate through shapes in topological order and generate classes 286 | topological_order = self.topological_sort() 287 | for shape in topological_order: 288 | 289 | # Extract the necessary data for the shape 290 | if self._filter_input_output_shapes(shape): 291 | shape_dict = self.combined_shapes[shape] 292 | shape_type = shape_dict["type"] 293 | if shape_type == "structure": 294 | 295 | # Generate and write data class for shape 296 | shape_class = self.generate_data_class_for_shape(shape) 297 | file.write(shape_class) 298 | -------------------------------------------------------------------------------- /sample/sagemaker/2017-07-24/paginators-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "pagination": { 3 | "ListTrainingJobs": { 4 | "result_key": "TrainingJobSummaries", 5 | "output_token": "NextToken", 6 | "input_token": "NextToken", 7 | "limit_key": "MaxResults" 8 | }, 9 | "ListEndpoints": { 10 | "result_key": "Endpoints", 11 | "output_token": "NextToken", 12 | "input_token": "NextToken", 13 | "limit_key": "MaxResults" 14 | }, 15 | "ListEndpointConfigs": { 16 | "result_key": "EndpointConfigs", 17 | "output_token": "NextToken", 18 | "input_token": "NextToken", 19 | "limit_key": "MaxResults" 20 | }, 21 | "ListNotebookInstances": { 22 | "result_key": "NotebookInstances", 23 | "output_token": "NextToken", 24 | "input_token": "NextToken", 25 | "limit_key": "MaxResults" 26 | }, 27 | "ListTags": { 28 | "result_key": "Tags", 29 | "output_token": "NextToken", 30 | "input_token": "NextToken", 31 | "limit_key": "MaxResults" 32 | }, 33 | "ListModels": { 34 | "result_key": "Models", 35 | "output_token": "NextToken", 36 | "input_token": "NextToken", 37 | "limit_key": "MaxResults" 38 | }, 39 | "ListAlgorithms": { 40 | "input_token": "NextToken", 41 | "limit_key": "MaxResults", 42 | "output_token": "NextToken", 43 | "result_key": "AlgorithmSummaryList" 44 | }, 45 | "ListCodeRepositories": { 46 | "input_token": "NextToken", 47 | "limit_key": "MaxResults", 48 | "output_token": "NextToken", 49 | "result_key": "CodeRepositorySummaryList" 50 | }, 51 | "ListCompilationJobs": { 52 | "input_token": "NextToken", 53 | "limit_key": "MaxResults", 54 | "output_token": "NextToken", 55 | "result_key": "CompilationJobSummaries" 56 | }, 57 | "ListHyperParameterTuningJobs": { 58 | "input_token": "NextToken", 59 | "limit_key": "MaxResults", 60 | "output_token": "NextToken", 61 | "result_key": "HyperParameterTuningJobSummaries" 62 | }, 63 | "ListLabelingJobs": { 64 | "input_token": "NextToken", 65 | "limit_key": "MaxResults", 66 | "output_token": "NextToken", 67 | "result_key": "LabelingJobSummaryList" 68 | }, 69 | "ListLabelingJobsForWorkteam": { 70 | "input_token": "NextToken", 71 | "limit_key": "MaxResults", 72 | "output_token": "NextToken", 73 | "result_key": "LabelingJobSummaryList" 74 | }, 75 | "ListModelPackages": { 76 | "input_token": "NextToken", 77 | "limit_key": "MaxResults", 78 | "output_token": "NextToken", 79 | "result_key": "ModelPackageSummaryList" 80 | }, 81 | "ListNotebookInstanceLifecycleConfigs": { 82 | "input_token": "NextToken", 83 | "limit_key": "MaxResults", 84 | "output_token": "NextToken", 85 | "result_key": "NotebookInstanceLifecycleConfigs" 86 | }, 87 | "ListSubscribedWorkteams": { 88 | "input_token": "NextToken", 89 | "limit_key": "MaxResults", 90 | "output_token": "NextToken", 91 | "result_key": "SubscribedWorkteams" 92 | }, 93 | "ListTrainingJobsForHyperParameterTuningJob": { 94 | "input_token": "NextToken", 95 | "limit_key": "MaxResults", 96 | "output_token": "NextToken", 97 | "result_key": "TrainingJobSummaries" 98 | }, 99 | "ListTransformJobs": { 100 | "input_token": "NextToken", 101 | "limit_key": "MaxResults", 102 | "output_token": "NextToken", 103 | "result_key": "TransformJobSummaries" 104 | }, 105 | "ListWorkteams": { 106 | "input_token": "NextToken", 107 | "limit_key": "MaxResults", 108 | "output_token": "NextToken", 109 | "result_key": "Workteams" 110 | }, 111 | "Search": { 112 | "input_token": "NextToken", 113 | "limit_key": "MaxResults", 114 | "output_token": "NextToken", 115 | "result_key": "Results" 116 | }, 117 | "ListApps": { 118 | "input_token": "NextToken", 119 | "output_token": "NextToken", 120 | "limit_key": "MaxResults", 121 | "result_key": "Apps" 122 | }, 123 | "ListAutoMLJobs": { 124 | "input_token": "NextToken", 125 | "output_token": "NextToken", 126 | "limit_key": "MaxResults", 127 | "result_key": "AutoMLJobSummaries" 128 | }, 129 | "ListCandidatesForAutoMLJob": { 130 | "input_token": "NextToken", 131 | "output_token": "NextToken", 132 | "limit_key": "MaxResults", 133 | "result_key": "Candidates" 134 | }, 135 | "ListDomains": { 136 | "input_token": "NextToken", 137 | "output_token": "NextToken", 138 | "limit_key": "MaxResults", 139 | "result_key": "Domains" 140 | }, 141 | "ListExperiments": { 142 | "input_token": "NextToken", 143 | "output_token": "NextToken", 144 | "limit_key": "MaxResults", 145 | "result_key": "ExperimentSummaries" 146 | }, 147 | "ListFlowDefinitions": { 148 | "input_token": "NextToken", 149 | "output_token": "NextToken", 150 | "limit_key": "MaxResults", 151 | "result_key": "FlowDefinitionSummaries" 152 | }, 153 | "ListHumanTaskUis": { 154 | "input_token": "NextToken", 155 | "output_token": "NextToken", 156 | "limit_key": "MaxResults", 157 | "result_key": "HumanTaskUiSummaries" 158 | }, 159 | "ListMonitoringExecutions": { 160 | "input_token": "NextToken", 161 | "output_token": "NextToken", 162 | "limit_key": "MaxResults", 163 | "result_key": "MonitoringExecutionSummaries" 164 | }, 165 | "ListMonitoringSchedules": { 166 | "input_token": "NextToken", 167 | "output_token": "NextToken", 168 | "limit_key": "MaxResults", 169 | "result_key": "MonitoringScheduleSummaries" 170 | }, 171 | "ListProcessingJobs": { 172 | "input_token": "NextToken", 173 | "output_token": "NextToken", 174 | "limit_key": "MaxResults", 175 | "result_key": "ProcessingJobSummaries" 176 | }, 177 | "ListTrialComponents": { 178 | "input_token": "NextToken", 179 | "output_token": "NextToken", 180 | "limit_key": "MaxResults", 181 | "result_key": "TrialComponentSummaries" 182 | }, 183 | "ListTrials": { 184 | "input_token": "NextToken", 185 | "output_token": "NextToken", 186 | "limit_key": "MaxResults", 187 | "result_key": "TrialSummaries" 188 | }, 189 | "ListUserProfiles": { 190 | "input_token": "NextToken", 191 | "output_token": "NextToken", 192 | "limit_key": "MaxResults", 193 | "result_key": "UserProfiles" 194 | }, 195 | "ListWorkforces": { 196 | "input_token": "NextToken", 197 | "output_token": "NextToken", 198 | "limit_key": "MaxResults", 199 | "result_key": "Workforces" 200 | }, 201 | "ListImageVersions": { 202 | "input_token": "NextToken", 203 | "output_token": "NextToken", 204 | "limit_key": "MaxResults", 205 | "result_key": "ImageVersions" 206 | }, 207 | "ListImages": { 208 | "input_token": "NextToken", 209 | "output_token": "NextToken", 210 | "limit_key": "MaxResults", 211 | "result_key": "Images" 212 | }, 213 | "ListActions": { 214 | "input_token": "NextToken", 215 | "output_token": "NextToken", 216 | "limit_key": "MaxResults", 217 | "result_key": "ActionSummaries" 218 | }, 219 | "ListAppImageConfigs": { 220 | "input_token": "NextToken", 221 | "output_token": "NextToken", 222 | "limit_key": "MaxResults", 223 | "result_key": "AppImageConfigs" 224 | }, 225 | "ListArtifacts": { 226 | "input_token": "NextToken", 227 | "output_token": "NextToken", 228 | "limit_key": "MaxResults", 229 | "result_key": "ArtifactSummaries" 230 | }, 231 | "ListAssociations": { 232 | "input_token": "NextToken", 233 | "output_token": "NextToken", 234 | "limit_key": "MaxResults", 235 | "result_key": "AssociationSummaries" 236 | }, 237 | "ListContexts": { 238 | "input_token": "NextToken", 239 | "output_token": "NextToken", 240 | "limit_key": "MaxResults", 241 | "result_key": "ContextSummaries" 242 | }, 243 | "ListFeatureGroups": { 244 | "input_token": "NextToken", 245 | "output_token": "NextToken", 246 | "limit_key": "MaxResults", 247 | "result_key": "FeatureGroupSummaries" 248 | }, 249 | "ListModelPackageGroups": { 250 | "input_token": "NextToken", 251 | "output_token": "NextToken", 252 | "limit_key": "MaxResults", 253 | "result_key": "ModelPackageGroupSummaryList" 254 | }, 255 | "ListPipelineExecutionSteps": { 256 | "input_token": "NextToken", 257 | "output_token": "NextToken", 258 | "limit_key": "MaxResults", 259 | "result_key": "PipelineExecutionSteps" 260 | }, 261 | "ListPipelineExecutions": { 262 | "input_token": "NextToken", 263 | "output_token": "NextToken", 264 | "limit_key": "MaxResults", 265 | "result_key": "PipelineExecutionSummaries" 266 | }, 267 | "ListPipelineParametersForExecution": { 268 | "input_token": "NextToken", 269 | "output_token": "NextToken", 270 | "limit_key": "MaxResults", 271 | "result_key": "PipelineParameters" 272 | }, 273 | "ListPipelines": { 274 | "input_token": "NextToken", 275 | "output_token": "NextToken", 276 | "limit_key": "MaxResults", 277 | "result_key": "PipelineSummaries" 278 | }, 279 | "ListDataQualityJobDefinitions": { 280 | "input_token": "NextToken", 281 | "output_token": "NextToken", 282 | "limit_key": "MaxResults", 283 | "result_key": "JobDefinitionSummaries" 284 | }, 285 | "ListDeviceFleets": { 286 | "input_token": "NextToken", 287 | "output_token": "NextToken", 288 | "limit_key": "MaxResults", 289 | "result_key": "DeviceFleetSummaries" 290 | }, 291 | "ListDevices": { 292 | "input_token": "NextToken", 293 | "output_token": "NextToken", 294 | "limit_key": "MaxResults", 295 | "result_key": "DeviceSummaries" 296 | }, 297 | "ListEdgePackagingJobs": { 298 | "input_token": "NextToken", 299 | "output_token": "NextToken", 300 | "limit_key": "MaxResults", 301 | "result_key": "EdgePackagingJobSummaries" 302 | }, 303 | "ListModelBiasJobDefinitions": { 304 | "input_token": "NextToken", 305 | "output_token": "NextToken", 306 | "limit_key": "MaxResults", 307 | "result_key": "JobDefinitionSummaries" 308 | }, 309 | "ListModelExplainabilityJobDefinitions": { 310 | "input_token": "NextToken", 311 | "output_token": "NextToken", 312 | "limit_key": "MaxResults", 313 | "result_key": "JobDefinitionSummaries" 314 | }, 315 | "ListModelQualityJobDefinitions": { 316 | "input_token": "NextToken", 317 | "output_token": "NextToken", 318 | "limit_key": "MaxResults", 319 | "result_key": "JobDefinitionSummaries" 320 | }, 321 | "ListStudioLifecycleConfigs": { 322 | "input_token": "NextToken", 323 | "output_token": "NextToken", 324 | "limit_key": "MaxResults", 325 | "result_key": "StudioLifecycleConfigs" 326 | }, 327 | "ListInferenceRecommendationsJobs": { 328 | "input_token": "NextToken", 329 | "output_token": "NextToken", 330 | "limit_key": "MaxResults", 331 | "result_key": "InferenceRecommendationsJobs" 332 | }, 333 | "ListLineageGroups": { 334 | "input_token": "NextToken", 335 | "output_token": "NextToken", 336 | "limit_key": "MaxResults", 337 | "result_key": "LineageGroupSummaries" 338 | }, 339 | "ListModelMetadata": { 340 | "input_token": "NextToken", 341 | "output_token": "NextToken", 342 | "limit_key": "MaxResults", 343 | "result_key": "ModelMetadataSummaries" 344 | }, 345 | "ListEdgeDeploymentPlans": { 346 | "input_token": "NextToken", 347 | "output_token": "NextToken", 348 | "limit_key": "MaxResults", 349 | "result_key": "EdgeDeploymentPlanSummaries" 350 | }, 351 | "ListStageDevices": { 352 | "input_token": "NextToken", 353 | "output_token": "NextToken", 354 | "limit_key": "MaxResults", 355 | "result_key": "DeviceDeploymentSummaries" 356 | }, 357 | "ListInferenceRecommendationsJobSteps": { 358 | "input_token": "NextToken", 359 | "output_token": "NextToken", 360 | "limit_key": "MaxResults", 361 | "result_key": "Steps" 362 | }, 363 | "ListInferenceExperiments": { 364 | "input_token": "NextToken", 365 | "output_token": "NextToken", 366 | "limit_key": "MaxResults", 367 | "result_key": "InferenceExperiments" 368 | }, 369 | "ListModelCardExportJobs": { 370 | "input_token": "NextToken", 371 | "output_token": "NextToken", 372 | "limit_key": "MaxResults", 373 | "result_key": "ModelCardExportJobSummaries" 374 | }, 375 | "ListModelCardVersions": { 376 | "input_token": "NextToken", 377 | "output_token": "NextToken", 378 | "limit_key": "MaxResults", 379 | "result_key": "ModelCardVersionSummaryList" 380 | }, 381 | "ListModelCards": { 382 | "input_token": "NextToken", 383 | "output_token": "NextToken", 384 | "limit_key": "MaxResults", 385 | "result_key": "ModelCardSummaries" 386 | }, 387 | "ListMonitoringAlertHistory": { 388 | "input_token": "NextToken", 389 | "output_token": "NextToken", 390 | "limit_key": "MaxResults", 391 | "result_key": "MonitoringAlertHistory" 392 | }, 393 | "ListMonitoringAlerts": { 394 | "input_token": "NextToken", 395 | "output_token": "NextToken", 396 | "limit_key": "MaxResults", 397 | "result_key": "MonitoringAlertSummaries" 398 | }, 399 | "ListSpaces": { 400 | "input_token": "NextToken", 401 | "output_token": "NextToken", 402 | "limit_key": "MaxResults", 403 | "result_key": "Spaces" 404 | }, 405 | "ListAliases": { 406 | "input_token": "NextToken", 407 | "output_token": "NextToken", 408 | "limit_key": "MaxResults", 409 | "result_key": "SageMakerImageVersionAliases" 410 | }, 411 | "ListResourceCatalogs": { 412 | "input_token": "NextToken", 413 | "output_token": "NextToken", 414 | "limit_key": "MaxResults", 415 | "result_key": "ResourceCatalogs" 416 | }, 417 | "ListClusterNodes": { 418 | "input_token": "NextToken", 419 | "output_token": "NextToken", 420 | "limit_key": "MaxResults", 421 | "result_key": "ClusterNodeSummaries" 422 | }, 423 | "ListClusters": { 424 | "input_token": "NextToken", 425 | "output_token": "NextToken", 426 | "limit_key": "MaxResults", 427 | "result_key": "ClusterSummaries" 428 | }, 429 | "ListInferenceComponents": { 430 | "input_token": "NextToken", 431 | "output_token": "NextToken", 432 | "limit_key": "MaxResults", 433 | "result_key": "InferenceComponents" 434 | } 435 | } 436 | } 437 | -------------------------------------------------------------------------------- /sample/sagemaker-edge/2020-09-23/endpoint-rule-set-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "parameters": { 4 | "Region": { 5 | "builtIn": "AWS::Region", 6 | "required": false, 7 | "documentation": "The AWS region used to dispatch the request.", 8 | "type": "String" 9 | }, 10 | "UseDualStack": { 11 | "builtIn": "AWS::UseDualStack", 12 | "required": true, 13 | "default": false, 14 | "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", 15 | "type": "Boolean" 16 | }, 17 | "UseFIPS": { 18 | "builtIn": "AWS::UseFIPS", 19 | "required": true, 20 | "default": false, 21 | "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", 22 | "type": "Boolean" 23 | }, 24 | "Endpoint": { 25 | "builtIn": "SDK::Endpoint", 26 | "required": false, 27 | "documentation": "Override the endpoint used to send this request", 28 | "type": "String" 29 | } 30 | }, 31 | "rules": [ 32 | { 33 | "conditions": [ 34 | { 35 | "fn": "isSet", 36 | "argv": [ 37 | { 38 | "ref": "Endpoint" 39 | } 40 | ] 41 | } 42 | ], 43 | "type": "tree", 44 | "rules": [ 45 | { 46 | "conditions": [ 47 | { 48 | "fn": "booleanEquals", 49 | "argv": [ 50 | { 51 | "ref": "UseFIPS" 52 | }, 53 | true 54 | ] 55 | } 56 | ], 57 | "error": "Invalid Configuration: FIPS and custom endpoint are not supported", 58 | "type": "error" 59 | }, 60 | { 61 | "conditions": [ 62 | { 63 | "fn": "booleanEquals", 64 | "argv": [ 65 | { 66 | "ref": "UseDualStack" 67 | }, 68 | true 69 | ] 70 | } 71 | ], 72 | "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", 73 | "type": "error" 74 | }, 75 | { 76 | "conditions": [], 77 | "endpoint": { 78 | "url": { 79 | "ref": "Endpoint" 80 | }, 81 | "properties": {}, 82 | "headers": {} 83 | }, 84 | "type": "endpoint" 85 | } 86 | ] 87 | }, 88 | { 89 | "conditions": [ 90 | { 91 | "fn": "isSet", 92 | "argv": [ 93 | { 94 | "ref": "Region" 95 | } 96 | ] 97 | } 98 | ], 99 | "type": "tree", 100 | "rules": [ 101 | { 102 | "conditions": [ 103 | { 104 | "fn": "aws.partition", 105 | "argv": [ 106 | { 107 | "ref": "Region" 108 | } 109 | ], 110 | "assign": "PartitionResult" 111 | } 112 | ], 113 | "type": "tree", 114 | "rules": [ 115 | { 116 | "conditions": [ 117 | { 118 | "fn": "booleanEquals", 119 | "argv": [ 120 | { 121 | "ref": "UseFIPS" 122 | }, 123 | true 124 | ] 125 | }, 126 | { 127 | "fn": "booleanEquals", 128 | "argv": [ 129 | { 130 | "ref": "UseDualStack" 131 | }, 132 | true 133 | ] 134 | } 135 | ], 136 | "type": "tree", 137 | "rules": [ 138 | { 139 | "conditions": [ 140 | { 141 | "fn": "booleanEquals", 142 | "argv": [ 143 | true, 144 | { 145 | "fn": "getAttr", 146 | "argv": [ 147 | { 148 | "ref": "PartitionResult" 149 | }, 150 | "supportsFIPS" 151 | ] 152 | } 153 | ] 154 | }, 155 | { 156 | "fn": "booleanEquals", 157 | "argv": [ 158 | true, 159 | { 160 | "fn": "getAttr", 161 | "argv": [ 162 | { 163 | "ref": "PartitionResult" 164 | }, 165 | "supportsDualStack" 166 | ] 167 | } 168 | ] 169 | } 170 | ], 171 | "type": "tree", 172 | "rules": [ 173 | { 174 | "conditions": [], 175 | "endpoint": { 176 | "url": "https://edge.sagemaker-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", 177 | "properties": {}, 178 | "headers": {} 179 | }, 180 | "type": "endpoint" 181 | } 182 | ] 183 | }, 184 | { 185 | "conditions": [], 186 | "error": "FIPS and DualStack are enabled, but this partition does not support one or both", 187 | "type": "error" 188 | } 189 | ] 190 | }, 191 | { 192 | "conditions": [ 193 | { 194 | "fn": "booleanEquals", 195 | "argv": [ 196 | { 197 | "ref": "UseFIPS" 198 | }, 199 | true 200 | ] 201 | } 202 | ], 203 | "type": "tree", 204 | "rules": [ 205 | { 206 | "conditions": [ 207 | { 208 | "fn": "booleanEquals", 209 | "argv": [ 210 | true, 211 | { 212 | "fn": "getAttr", 213 | "argv": [ 214 | { 215 | "ref": "PartitionResult" 216 | }, 217 | "supportsFIPS" 218 | ] 219 | } 220 | ] 221 | } 222 | ], 223 | "type": "tree", 224 | "rules": [ 225 | { 226 | "conditions": [], 227 | "endpoint": { 228 | "url": "https://edge.sagemaker-fips.{Region}.{PartitionResult#dnsSuffix}", 229 | "properties": {}, 230 | "headers": {} 231 | }, 232 | "type": "endpoint" 233 | } 234 | ] 235 | }, 236 | { 237 | "conditions": [], 238 | "error": "FIPS is enabled but this partition does not support FIPS", 239 | "type": "error" 240 | } 241 | ] 242 | }, 243 | { 244 | "conditions": [ 245 | { 246 | "fn": "booleanEquals", 247 | "argv": [ 248 | { 249 | "ref": "UseDualStack" 250 | }, 251 | true 252 | ] 253 | } 254 | ], 255 | "type": "tree", 256 | "rules": [ 257 | { 258 | "conditions": [ 259 | { 260 | "fn": "booleanEquals", 261 | "argv": [ 262 | true, 263 | { 264 | "fn": "getAttr", 265 | "argv": [ 266 | { 267 | "ref": "PartitionResult" 268 | }, 269 | "supportsDualStack" 270 | ] 271 | } 272 | ] 273 | } 274 | ], 275 | "type": "tree", 276 | "rules": [ 277 | { 278 | "conditions": [], 279 | "endpoint": { 280 | "url": "https://edge.sagemaker.{Region}.{PartitionResult#dualStackDnsSuffix}", 281 | "properties": {}, 282 | "headers": {} 283 | }, 284 | "type": "endpoint" 285 | } 286 | ] 287 | }, 288 | { 289 | "conditions": [], 290 | "error": "DualStack is enabled but this partition does not support DualStack", 291 | "type": "error" 292 | } 293 | ] 294 | }, 295 | { 296 | "conditions": [], 297 | "endpoint": { 298 | "url": "https://edge.sagemaker.{Region}.{PartitionResult#dnsSuffix}", 299 | "properties": {}, 300 | "headers": {} 301 | }, 302 | "type": "endpoint" 303 | } 304 | ] 305 | } 306 | ] 307 | }, 308 | { 309 | "conditions": [], 310 | "error": "Invalid Configuration: Missing Region", 311 | "type": "error" 312 | } 313 | ] 314 | } --------------------------------------------------------------------------------