├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── components ├── artifacts │ ├── aws.sagemaker.edgeManager │ │ └── 0.1.0 │ │ │ └── sagemaker_edge_config.json │ ├── aws.sagemaker.edgeManagerPythonClient │ │ └── 0.1.0 │ │ │ ├── edge_manager_python_client.py │ │ │ ├── rush_hour.jpg │ │ │ └── rush_hour.jpg.license │ ├── com.model.darknet │ │ └── 0.1.0 │ │ │ ├── rush_hour.jpg │ │ │ └── rush_hour.jpg.license │ └── com.model.mxnet_gluoncv_ssd │ │ └── 0.1.0 │ │ ├── rush_hour.jpg │ │ └── rush_hour.jpg.license └── recipes │ ├── aws.sagemaker.edgeManager-0.1.0.yaml │ ├── aws.sagemaker.edgeManagerPythonClient-0.1.0.yaml │ ├── com.model.darknet-0.1.0.yaml │ ├── com.model.keras-0.1.0.yaml │ └── com.model.mxnet_gluoncv_ssd-0.1.0.yaml ├── examples ├── mlops-console-example │ ├── CameraStreamimx8mplus.md │ ├── ImageClassificationimx8mqevkorvirtual.md │ ├── README.md │ ├── components │ │ ├── artifacts │ │ │ ├── aws.sagemaker.edgeManagerClientCameraIntegration │ │ │ │ └── 0.1.0 │ │ │ │ │ ├── agent_pb2.py │ │ │ │ │ ├── agent_pb2_grpc.py │ │ │ │ │ └── camera_integration_edgemanger_client.py │ │ │ └── aws.sagemaker.edgeManagerPythonClient │ │ │ │ └── 0.1.0 │ │ │ │ ├── agent_pb2.py │ │ │ │ ├── agent_pb2_grpc.py │ │ │ │ ├── dog.jpeg │ │ │ │ ├── edge_manager_python_client.py │ │ │ │ ├── frog.jpeg │ │ │ │ ├── rainbow.jpeg │ │ │ │ └── tomato.jpeg │ │ └── recipes │ │ │ ├── aws.sagemaker.edgeManagerClientCameraIntegration-0.1.0.yaml │ │ │ └── aws.sagemaker.edgeManagerPythonClient-0.1.0.yaml │ ├── images │ │ ├── Architecture.png │ │ ├── GGCore.png │ │ ├── applicationcomponent-imx8mplus.png │ │ ├── applicationcomponent.png │ │ ├── applicationcomponentconfiguration.png │ │ ├── applicationcomponentconfigurationcapture-imx8mplus.png │ │ ├── compilationjob-mobilenetv2.png │ │ ├── completedneojob.png │ │ ├── configureemcomponent.png │ │ ├── createdeployment.png │ │ ├── createedgepackagingjob-imx8mplus.png │ │ ├── createedgepackagingjob2-imx8mplus.png │ │ ├── createedgepackagingjob2.png │ │ ├── createedgepackagingjob2.png.png │ │ ├── createedgepackingjob.png │ │ ├── createemrecipe.png │ │ ├── deployableemcomponent.png │ │ ├── dog.jpeg │ │ ├── edgemanagerfleetoutput.png │ │ ├── edgemanagerpackagingiamrole.png │ │ ├── emconfiguration.png │ │ ├── emdevicefleet.png │ │ ├── emdevicefleet2.png │ │ ├── frog.jpeg │ │ ├── healthycore-imx8mplus.png │ │ ├── healthycore.png │ │ ├── mlops.png │ │ ├── mobilenetv2-gg-component-imx8mplus.png │ │ ├── modelandapplicationdeployment-imx8mplus.png │ │ ├── modelandapplicationdeployment.png │ │ ├── mqttresults-imx8mplus.png │ │ ├── mqttresults.png │ │ ├── neocompilationjob.png │ │ ├── rainbow.jpeg │ │ ├── registerdevice-imx8mplus.png │ │ ├── registerdevice.png │ │ ├── registerdevice2.png │ │ ├── s3compiledmodel.png │ │ ├── s3compiledmodel8mplus.png │ │ ├── s3inferenceresults-imx8mplus.png │ │ ├── s3inferenceresults.png │ │ ├── s3packagedjob.png │ │ ├── tomato.jpeg │ │ ├── trainingjob.png │ │ ├── viewdevicestatus.png │ │ └── viewfleetstatus.png │ ├── model-training │ │ └── Image-classification-fulltraining-highlevel.ipynb │ └── scripts │ │ └── setupresources.sh └── mxnet_gluon_ssd_lambda_function │ ├── README.md │ ├── greengrass_lambda.py │ ├── requirements.txt │ └── sagemaker_edge_example.ipynb └── scripts ├── add_agent_artifact.sh ├── compile_add_python_stub_artifacts.sh ├── create_device_fleet_register_device.sh ├── create_neo_compilation_job.sh ├── create_sagemaker_role.sh ├── delete_component.sh ├── download_edge_manager_package.sh ├── download_upload_sample_model.sh ├── install-ggv2-ssh-existing-role.sh ├── install-ggv2-ssh.sh ├── package_neo_model.sh └── upload_component_version.sh /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/pycharm,visualstudiocode,python 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=pycharm,visualstudiocode,python 4 | 5 | ### PyCharm ### 6 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 7 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 8 | 9 | # User-specific stuff 10 | .idea/**/workspace.xml 11 | .idea/**/tasks.xml 12 | .idea/**/usage.statistics.xml 13 | .idea/**/dictionaries 14 | .idea/**/shelf 15 | 16 | # Generated files 17 | .idea/**/contentModel.xml 18 | 19 | # Sensitive or high-churn files 20 | .idea/**/dataSources/ 21 | .idea/**/dataSources.ids 22 | .idea/**/dataSources.local.xml 23 | .idea/**/sqlDataSources.xml 24 | .idea/**/dynamic.xml 25 | .idea/**/uiDesigner.xml 26 | .idea/**/dbnavigator.xml 27 | 28 | # Gradle 29 | .idea/**/gradle.xml 30 | .idea/**/libraries 31 | 32 | # Gradle and Maven with auto-import 33 | # When using Gradle or Maven with auto-import, you should exclude module files, 34 | # since they will be recreated, and may cause churn. Uncomment if using 35 | # auto-import. 36 | # .idea/artifacts 37 | # .idea/compiler.xml 38 | # .idea/jarRepositories.xml 39 | # .idea/modules.xml 40 | # .idea/*.iml 41 | # .idea/modules 42 | # *.iml 43 | # *.ipr 44 | 45 | # CMake 46 | cmake-build-*/ 47 | 48 | # Mongo Explorer plugin 49 | .idea/**/mongoSettings.xml 50 | 51 | # File-based project format 52 | *.iws 53 | 54 | # IntelliJ 55 | out/ 56 | 57 | # mpeltonen/sbt-idea plugin 58 | .idea_modules/ 59 | 60 | # JIRA plugin 61 | atlassian-ide-plugin.xml 62 | 63 | # Cursive Clojure plugin 64 | .idea/replstate.xml 65 | 66 | # Crashlytics plugin (for Android Studio and IntelliJ) 67 | com_crashlytics_export_strings.xml 68 | crashlytics.properties 69 | crashlytics-build.properties 70 | fabric.properties 71 | 72 | # Editor-based Rest Client 73 | .idea/httpRequests 74 | 75 | # Android studio 3.1+ serialized cache file 76 | .idea/caches/build_file_checksums.ser 77 | 78 | ### PyCharm Patch ### 79 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 80 | 81 | # *.iml 82 | # modules.xml 83 | # .idea/misc.xml 84 | # *.ipr 85 | 86 | # Sonarlint plugin 87 | # https://plugins.jetbrains.com/plugin/7973-sonarlint 88 | .idea/**/sonarlint/ 89 | 90 | # SonarQube Plugin 91 | # https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin 92 | .idea/**/sonarIssues.xml 93 | 94 | # Markdown Navigator plugin 95 | # https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced 96 | .idea/**/markdown-navigator.xml 97 | .idea/**/markdown-navigator-enh.xml 98 | .idea/**/markdown-navigator/ 99 | 100 | # Cache file creation bug 101 | # See https://youtrack.jetbrains.com/issue/JBR-2257 102 | .idea/$CACHE_FILE$ 103 | 104 | # CodeStream plugin 105 | # https://plugins.jetbrains.com/plugin/12206-codestream 106 | .idea/codestream.xml 107 | 108 | ### Python ### 109 | # Byte-compiled / optimized / DLL files 110 | __pycache__/ 111 | *.py[cod] 112 | *$py.class 113 | 114 | # C extensions 115 | *.so 116 | 117 | # Distribution / packaging 118 | .Python 119 | build/ 120 | develop-eggs/ 121 | dist/ 122 | downloads/ 123 | eggs/ 124 | .eggs/ 125 | lib/ 126 | lib64/ 127 | parts/ 128 | sdist/ 129 | var/ 130 | wheels/ 131 | pip-wheel-metadata/ 132 | share/python-wheels/ 133 | *.egg-info/ 134 | .installed.cfg 135 | *.egg 136 | MANIFEST 137 | 138 | # PyInstaller 139 | # Usually these files are written by a python script from a template 140 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 141 | *.manifest 142 | *.spec 143 | 144 | # Installer logs 145 | pip-log.txt 146 | pip-delete-this-directory.txt 147 | 148 | # Unit test / coverage reports 149 | htmlcov/ 150 | .tox/ 151 | .nox/ 152 | .coverage 153 | .coverage.* 154 | .cache 155 | nosetests.xml 156 | coverage.xml 157 | *.cover 158 | *.py,cover 159 | .hypothesis/ 160 | .pytest_cache/ 161 | pytestdebug.log 162 | 163 | # Translations 164 | *.mo 165 | *.pot 166 | 167 | # Django stuff: 168 | *.log 169 | local_settings.py 170 | db.sqlite3 171 | db.sqlite3-journal 172 | 173 | # Flask stuff: 174 | instance/ 175 | .webassets-cache 176 | 177 | # Scrapy stuff: 178 | .scrapy 179 | 180 | # Sphinx documentation 181 | docs/_build/ 182 | doc/_build/ 183 | 184 | # PyBuilder 185 | target/ 186 | 187 | # Jupyter Notebook 188 | .ipynb_checkpoints 189 | 190 | # IPython 191 | profile_default/ 192 | ipython_config.py 193 | 194 | # pyenv 195 | .python-version 196 | 197 | # pipenv 198 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 199 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 200 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 201 | # install all needed dependencies. 202 | #Pipfile.lock 203 | 204 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 205 | __pypackages__/ 206 | 207 | # Celery stuff 208 | celerybeat-schedule 209 | celerybeat.pid 210 | 211 | # SageMath parsed files 212 | *.sage.py 213 | 214 | # Environments 215 | .env 216 | .venv 217 | env/ 218 | venv/ 219 | ENV/ 220 | env.bak/ 221 | venv.bak/ 222 | pythonenv* 223 | 224 | # Spyder project settings 225 | .spyderproject 226 | .spyproject 227 | 228 | # Rope project settings 229 | .ropeproject 230 | 231 | # mkdocs documentation 232 | /site 233 | 234 | # mypy 235 | .mypy_cache/ 236 | .dmypy.json 237 | dmypy.json 238 | 239 | # Pyre type checker 240 | .pyre/ 241 | 242 | # pytype static type analyzer 243 | .pytype/ 244 | 245 | # profiling data 246 | .prof 247 | 248 | ### VisualStudioCode ### 249 | .vscode/* 250 | !.vscode/tasks.json 251 | !.vscode/launch.json 252 | *.code-workspace 253 | 254 | ### VisualStudioCode Patch ### 255 | # Ignore all local history of files 256 | .history 257 | .ionide 258 | 259 | # End of https://www.toptal.com/developers/gitignore/api/pycharm,visualstudiocode,python 260 | 261 | linux-armv8/ 262 | models/ 263 | .idea/ 264 | 265 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## greengrass-v2-sagemaker-edge-manager-python 2 | 3 | This code sample demonstrates how to integrate SageMaker Edge Manager with Greengrass v2 via components. At the end of the sample, you will have a Python-based component running inference at the edge with the SageMaker Edge Manager binary agent, and a YOLOv3 Darknet model. 4 | 5 | In the folder examples/mlops-console-example there are two additional examples that use SageMaker Edge Manager and Greengrass v2 to implement a machine learning operations flow on NXP or virtual devices. They include step by step implementation instructions from the AWS Console. 6 | 7 | ### AWS CLI setup 8 | 9 | Ensure you have AWS CLI installed, a IAM user with an access key, and a named profile configured: 10 | 11 | * [Installing, updating, and uninstalling the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 12 | * [Configuration basics](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) 13 | * [Named profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) 14 | 15 | ### Define key variables 16 | > NOTE: In this demo, we are using a NVIDIA Xavier NX / AGX Xavier development kit. Please adjust PLATFORM as required for your device. 17 | 18 | Valid PLATFORM options include linux-armv8, linux-x64, windows-x86, and windows-x64. 19 | 20 | Valid TARGET_DEVICE options can be found here: [Supported edge devices for SageMaker Neo](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge.html). 21 | ```console 22 | export AWS_PROFILE= 23 | export AWS_REGION= 24 | export PLATFORM=linux-armv8 25 | export TARGET_DEVICE=jetson_xavier 26 | export SSH_USER= 27 | export SSH_HOST= 28 | export IOT_THING_NAME= 29 | export BUCKET_NAME= 30 | ``` 31 | 32 | ### Allow executions of scripts 33 | ```console 34 | chmod +x ./scripts/*.sh 35 | ``` 36 | 37 | ### Make S3 bucket for your custom components 38 | ```console 39 | aws s3 mb s3://$BUCKET_NAME --profile $AWS_PROFILE --region $AWS_REGION 40 | ``` 41 | 42 | ### Download/upload Darknet sample model 43 | ```console 44 | ./scripts/download_upload_sample_model.sh $AWS_PROFILE $BUCKET_NAME 45 | ``` 46 | 47 | ### Create SageMaker Execution Role 48 | ```console 49 | export SM_ROLE_NAME=smem-role 50 | ./scripts/create_sagemaker_role.sh $AWS_PROFILE $SM_ROLE_NAME 51 | ``` 52 | 53 | ### Compile model with SageMaker Neo 54 | ```console 55 | ./scripts/create_neo_compilation_job.sh $AWS_PROFILE $BUCKET_NAME $AWS_REGION $SM_ROLE_NAME $TARGET_DEVICE 56 | ``` 57 | 58 | ### Package Neo model in SageMaker Edge Manager 59 | ```console 60 | ./scripts/package_neo_model.sh $AWS_PROFILE $BUCKET_NAME $AWS_REGION $SM_ROLE_NAME 61 | ``` 62 | 63 | ### Download, install, provision, and start Greengrass v2 64 | > NOTE: this is done over SSH to avoid installing AWS CLI and credentials directly on the device. 65 | ```console 66 | ./scripts/install-ggv2-ssh.sh $AWS_PROFILE $SSH_USER $SSH_HOST $AWS_REGION $IOT_THING_NAME 67 | ``` 68 | 69 | ### Download SageMaker Edge Manager archive 70 | ```console 71 | ./scripts/download_edge_manager_package.sh $AWS_PROFILE $PLATFORM 72 | ``` 73 | 74 | ### Add SageMaker Edge Manager agent binary to artifacts 75 | ```console 76 | ./scripts/add_agent_artifact.sh $AWS_PROFILE $PLATFORM 0.1.0 $AWS_REGION 77 | ``` 78 | 79 | ### Compile and add SageMaker Edge Manager Python client stubs to artifacts 80 | ```console 81 | pip install grpcio-tools 82 | pip install --upgrade protobuf 83 | ./scripts/compile_add_python_stub_artifacts.sh $PLATFORM aws.sagemaker.edgeManagerPythonClient 0.1.0 84 | ``` 85 | 86 | ### Create device fleet in SageMaker Edge Manager, and add device to fleet 87 | ```console 88 | ./scripts/create_device_fleet_register_device.sh $AWS_PROFILE $AWS_REGION $BUCKET_NAME $IOT_THING_NAME 89 | ``` 90 | 91 | ### Update recipes 92 | * In all of the recipe files, replace **YOUR_BUCKET_NAME** with the value assigned to $BUCKET_NAME 93 | 94 | 95 | ### Upload your custom components to S3 bucket 96 | ```console 97 | ./scripts/upload_component_version.sh $AWS_PROFILE com.model.darknet 0.1.0 $BUCKET_NAME $AWS_REGION 98 | ./scripts/upload_component_version.sh $AWS_PROFILE aws.sagemaker.edgeManagerPythonClient 0.1.0 $BUCKET_NAME $AWS_REGION 99 | ``` 100 | 101 | > NOTE: you cannot overwrite an existing component version. To upload a new version, you will need to update the version number in the artifact directory, the recipe file name, and the version numbers in the recipe file. 102 | > As an alternative, you can also delete a specific component version. For this, use the following command: 103 | ```console 104 | ./delete_component.sh $AWS_PROFILE $AWS_REGION 105 | ``` 106 | 107 | ## Create/Update your Greengrass v2 deployment 108 | 109 | Create a new Greengrass v2 deployment, including the following components: 110 | * com.model.darknet (v0.1.0) 111 | * aws.greengrass.SageMakerEdgeManager (>=1.0.2) 112 | * aws.sagemaker.edgeManagerPythonClient (v0.1.0) 113 | 114 | ### Configure SageMaker Edge Manager public component 115 | 116 | Configure the **aws.greengrass.SageMakerEdgeManager** component, and use the following JSON as the **Configuration to merge** value. Be sure to update the ```BucketName``` attribute. 117 | 118 | ```json 119 | { 120 | "CaptureDataPeriodicUpload": "false", 121 | "CaptureDataPeriodicUploadPeriodSeconds": "8", 122 | "DeviceFleetName": "ggv2-smem-fleet", 123 | "BucketName": "", 124 | "CaptureDataBase64EmbedLimit": "3072", 125 | "CaptureDataPushPeriodSeconds": "4", 126 | "SagemakerEdgeLogVerbose": "false", 127 | "CaptureDataBatchSize": "10", 128 | "CaptureDataDestination": "Cloud", 129 | "FolderPrefix": "sme-capture", 130 | "UnixSocketName": "/tmp/sagemaker_edge_agent_example.sock", 131 | "CaptureDataBufferSize": "30" 132 | } 133 | ``` 134 | 135 | ## Security 136 | 137 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 138 | 139 | ## License 140 | 141 | This library is licensed under the MIT-0 License. See the LICENSE file. 142 | 143 | -------------------------------------------------------------------------------- /components/artifacts/aws.sagemaker.edgeManager/0.1.0/sagemaker_edge_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "sagemaker_edge_core_device_uuid": "DEVICE_NAME", 3 | "sagemaker_edge_core_device_fleet_name": "DEVICE_FLEET_NAME", 4 | "sagemaker_edge_core_capture_data_buffer_size": 30, 5 | "sagemaker_edge_core_capture_data_batch_size": 10, 6 | "sagemaker_edge_core_capture_data_push_period_seconds": 4, 7 | "sagemaker_edge_core_folder_prefix": "", 8 | "sagemaker_edge_core_region": "AWS_REGION", 9 | "sagemaker_edge_provider_aws_ca_cert_file": "/greengrass/v2/rootCA.pem", 10 | "sagemaker_edge_provider_aws_cert_file": "/greengrass/v2/thingCert.crt", 11 | "sagemaker_edge_provider_aws_cert_pk_file": "/greengrass/v2/privKey.key", 12 | "sagemaker_edge_provider_aws_iot_cred_endpoint": "ENDPOINT", 13 | "sagemaker_edge_provider_provider": "Aws", 14 | "sagemaker_edge_core_root_certs_path": "./certificates", 15 | "sagemaker_edge_provider_s3_bucket_name": "bucket", 16 | "sagemaker_edge_core_capture_data_destination": "Cloud" 17 | } -------------------------------------------------------------------------------- /components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/edge_manager_python_client.py: -------------------------------------------------------------------------------- 1 | import random 2 | import sys 3 | 4 | import agent_pb2_grpc 5 | import cv2 6 | import grpc 7 | import numpy as np 8 | from agent_pb2 import (ListModelsRequest, LoadModelRequest, PredictRequest, 9 | UnLoadModelRequest, DescribeModelRequest, Tensor, TensorMetadata) 10 | 11 | model_url = '../com.model.darknet' 12 | model_name = 'darknet-model' 13 | tensor_name = 'data' 14 | SIZE = 416 15 | tensor_shape = [1, 3, SIZE, SIZE] 16 | image_url = sys.argv[1] 17 | 18 | print('IMAGE URL IS {}'.format(image_url)) 19 | 20 | 21 | def run(): 22 | with grpc.insecure_channel('unix:///tmp/sagemaker_edge_agent_example.sock') as channel: 23 | 24 | edge_manager_client = agent_pb2_grpc.AgentStub(channel) 25 | 26 | try: 27 | response = edge_manager_client.LoadModel( 28 | LoadModelRequest(url=model_url, name=model_name)) 29 | except Exception as e: 30 | print(e) 31 | print('Model already loaded!') 32 | 33 | response = edge_manager_client.ListModels(ListModelsRequest()) 34 | 35 | response = edge_manager_client.DescribeModel( 36 | DescribeModelRequest(name=model_name)) 37 | 38 | # Mean and Std deviation of the RGB colors (collected from Imagenet dataset) 39 | mean = [123.68, 116.779, 103.939] 40 | std = [58.393, 57.12, 57.375] 41 | 42 | img = cv2.imread(image_url) 43 | frame = resize_short_within(img, short=SIZE, max_size=SIZE * 2) 44 | nn_input_size = SIZE 45 | nn_input = cv2.resize(frame, (nn_input_size, int(nn_input_size / 4 * 3))) 46 | nn_input = cv2.copyMakeBorder(nn_input, int(nn_input_size / 8), int(nn_input_size / 8), 47 | 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0)) 48 | copy_frame = nn_input[:] 49 | nn_input = nn_input.astype('float32') 50 | nn_input = nn_input.reshape((nn_input_size * nn_input_size, 3)) 51 | scaled_frame = np.transpose(nn_input) 52 | scaled_frame[0, :] = scaled_frame[0, :] - mean[0] 53 | scaled_frame[0, :] = scaled_frame[0, :] / std[0] 54 | scaled_frame[1, :] = scaled_frame[1, :] - mean[1] 55 | scaled_frame[1, :] = scaled_frame[1, :] / std[1] 56 | scaled_frame[2, :] = scaled_frame[2, :] - mean[2] 57 | scaled_frame[2, :] = scaled_frame[2, :] / std[2] 58 | 59 | request = PredictRequest(name=model_name, tensors=[Tensor(tensor_metadata=TensorMetadata( 60 | name=bytes(tensor_name, 'utf-8'), data_type=5, shape=tensor_shape), byte_data=scaled_frame.tobytes())]) 61 | 62 | response = edge_manager_client.Predict(request) 63 | 64 | # read output tensors 65 | i = 0 66 | detections = [] 67 | 68 | for t in response.tensors: 69 | print("Flattened RAW Output Tensor : " + str(i + 1)) 70 | i += 1 71 | deserialized_bytes = np.frombuffer(t.byte_data, dtype=np.float32) 72 | detections.append(np.asarray(deserialized_bytes)) 73 | 74 | print(detections) 75 | # convert the bounding boxes 76 | new_list = [] 77 | for index, item in enumerate(detections[2]): 78 | if index % 4 == 0: 79 | new_list.append(detections[2][index - 4:index]) 80 | detections[2] = new_list[1:] 81 | 82 | # get objects, scores, bboxes 83 | objects = detections[0] 84 | scores = detections[1] 85 | bounding_boxes = new_list[1:] 86 | 87 | print(objects) 88 | print(scores) 89 | print(bounding_boxes) 90 | 91 | response = edge_manager_client.UnLoadModel( 92 | UnLoadModelRequest(name=model_name)) 93 | 94 | 95 | def _get_interp_method(interp, sizes=()): 96 | """Get the interpolation method for resize functions. 97 | The major purpose of this function is to wrap a random interp method selection 98 | and a auto-estimation method. 99 | ​ 100 | Parameters 101 | ---------- 102 | interp : int 103 | interpolation method for all resizing operations 104 | ​ 105 | Possible values: 106 | 0: Nearest Neighbors Interpolation. 107 | 1: Bilinear interpolation. 108 | 2: Area-based (resampling using pixel area relation). It may be a 109 | preferred method for image decimation, as it gives moire-free 110 | results. But when the image is zoomed, it is similar to the Nearest 111 | Neighbors method. (used by default). 112 | 3: Bicubic interpolation over 4x4 pixel neighborhood. 113 | 4: Lanczos interpolation over 8x8 pixel neighborhood. 114 | 9: Cubic for enlarge, area for shrink, bilinear for others 115 | 10: Random select from interpolation method metioned above. 116 | Note: 117 | When shrinking an image, it will generally look best with AREA-based 118 | interpolation, whereas, when enlarging an image, it will generally look best 119 | with Bicubic (slow) or Bilinear (faster but still looks OK). 120 | More details can be found in the documentation of OpenCV, please refer to 121 | http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. 122 | sizes : tuple of int 123 | (old_height, old_width, new_height, new_width), if None provided, auto(9) 124 | will return Area(2) anyway. 125 | ​ 126 | Returns 127 | ------- 128 | int 129 | interp method from 0 to 4 130 | """ 131 | if interp == 9: 132 | if sizes: 133 | assert len(sizes) == 4 134 | oh, ow, nh, nw = sizes 135 | if nh > oh and nw > ow: 136 | return 2 137 | elif nh < oh and nw < ow: 138 | return 3 139 | else: 140 | return 1 141 | else: 142 | return 2 143 | if interp == 10: 144 | return random.randint(0, 4) 145 | if interp not in (0, 1, 2, 3, 4): 146 | raise ValueError('Unknown interp method %d' % interp) 147 | 148 | 149 | def resize_short_within(img, short=512, max_size=1024, mult_base=32, interp=2): 150 | """ 151 | resizes the short side of the image so the aspect ratio remains the same AND the short 152 | side matches the convolutional layer for the network 153 | ​ 154 | Args: 155 | ----- 156 | img: np.array 157 | image you want to resize 158 | short: int 159 | the size to reshape the image to 160 | max_size: int 161 | the max size of the short side 162 | mult_base: int 163 | the size scale to readjust the resizer 164 | interp: int 165 | see '_get_interp_method' 166 | Returns: 167 | -------- 168 | img: np.array 169 | the resized array 170 | """ 171 | h, w, _ = img.shape 172 | im_size_min, im_size_max = (h, w) if w > h else (w, h) 173 | scale = float(short) / float(im_size_min) 174 | if np.round(scale * im_size_max / mult_base) * mult_base > max_size: 175 | # fit in max_size 176 | scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max) 177 | new_w, new_h = ( 178 | int(np.round(w * scale / mult_base) * mult_base), 179 | int(np.round(h * scale / mult_base) * mult_base) 180 | ) 181 | img = cv2.resize(img, (new_w, new_h), 182 | interpolation=_get_interp_method(interp, (h, w, new_h, new_w))) 183 | return img 184 | 185 | 186 | if __name__ == '__main__': 187 | run() 188 | -------------------------------------------------------------------------------- /components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rush_hour.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rush_hour.jpg -------------------------------------------------------------------------------- /components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rush_hour.jpg.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: "Rush hour on Seymour going north at Dunsmuir" by roland (https://www.flickr.com/photos/35034347371@N01) 2 | 3 | SPDX-License-Identifier: CC0 1.0 -------------------------------------------------------------------------------- /components/artifacts/com.model.darknet/0.1.0/rush_hour.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/components/artifacts/com.model.darknet/0.1.0/rush_hour.jpg -------------------------------------------------------------------------------- /components/artifacts/com.model.darknet/0.1.0/rush_hour.jpg.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: "Rush hour on Seymour going north at Dunsmuir" by roland (https://www.flickr.com/photos/35034347371@N01) 2 | 3 | SPDX-License-Identifier: CC0 1.0 -------------------------------------------------------------------------------- /components/artifacts/com.model.mxnet_gluoncv_ssd/0.1.0/rush_hour.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/components/artifacts/com.model.mxnet_gluoncv_ssd/0.1.0/rush_hour.jpg -------------------------------------------------------------------------------- /components/artifacts/com.model.mxnet_gluoncv_ssd/0.1.0/rush_hour.jpg.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: "Rush hour on Seymour going north at Dunsmuir" by roland (https://www.flickr.com/photos/35034347371@N01) 2 | 3 | SPDX-License-Identifier: CC0 1.0 -------------------------------------------------------------------------------- /components/recipes/aws.sagemaker.edgeManager-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: 2020-01-25 3 | ComponentName: aws.sagemaker.edgeManager 4 | ComponentVersion: 0.1.0 5 | ComponentDescription: Deploys Sagemaker Edge Manager binary 6 | ComponentPublisher: Amazon Web Services, Inc. 7 | ComponentConfiguration: 8 | DefaultConfiguration: 9 | deviceName: sagemaker-ggv2-smem-device-012345678 10 | deviceFleetName: ggv2-smem-fleet 11 | bucketName: YOUR_BUCKET_NAME 12 | endpoint: arn:aws:iot:::rolealias/SageMakerEdge-ggv2-smem-fleet 13 | ComponentDependencies: 14 | aws.greengrass.TokenExchangeService: 15 | VersionRequirement: '>=0.0.0' 16 | DependencyType: HARD 17 | Manifests: 18 | - Platform: 19 | os: linux 20 | architecture: aarch64 21 | Lifecycle: 22 | install: |- 23 | chmod +x {artifacts:path}/sagemaker_edge_agent_binary 24 | mkdir -p certificates 25 | rm -f certificates/* 26 | cp {artifacts:path}/$AWS_REGION.pem ./certificates/$AWS_REGION.pem 27 | rm sagemaker_edge_config.json 28 | cp {artifacts:path}/sagemaker_edge_config.json . 29 | chmod +w sagemaker_edge_config.json 30 | sed -i -e "s|AWS_REGION|$AWS_DEFAULT_REGION|" sagemaker_edge_config.json 31 | sed -i -e "s|DEVICE_NAME|{configuration:/deviceName}|" sagemaker_edge_config.json 32 | sed -i -e "s|DEVICE_FLEET_NAME|{configuration:/deviceFleetName}|" sagemaker_edge_config.json 33 | sed -i -e "s|BUCKET_NAME|{configuration:/bucketName}|" sagemaker_edge_config.json 34 | sed -i -e "s|ENDPOINT|{configuration:/endpoint}|" sagemaker_edge_config.json 35 | run: 36 | script: |- 37 | rm /tmp/sagemaker_edge_agent_example.sock 38 | {artifacts:path}/sagemaker_edge_agent_binary -a /tmp/sagemaker_edge_agent_example.sock -c sagemaker_edge_config.json 39 | Artifacts: 40 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManager/0.1.0/sagemaker_edge_agent_binary 41 | Permission: 42 | Execute: OWNER 43 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManager/0.1.0/sagemaker_edge_config.json 44 | Permission: 45 | Execute: OWNER 46 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManager/0.1.0/.pem -------------------------------------------------------------------------------- /components/recipes/aws.sagemaker.edgeManagerPythonClient-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: 2020-01-25 3 | ComponentName: aws.sagemaker.edgeManagerPythonClient 4 | ComponentVersion: 0.1.0 5 | ComponentDescription: Deploys Sagemaker Edge Manager Python client 6 | ComponentPublisher: Amazon Web Services, Inc. 7 | ComponentDependencies: 8 | aws.greengrass.TokenExchangeService: 9 | VersionRequirement: '>=0.0.0' 10 | DependencyType: HARD 11 | aws.greengrass.SageMakerEdgeManager: 12 | VersionRequirement: '>=1.0.2' 13 | DependencyType: HARD 14 | com.model.darknet: 15 | VersionRequirement: '~0.1.0' 16 | DependencyType: HARD 17 | Manifests: 18 | - Platform: 19 | os: linux 20 | architecture: aarch64 21 | Lifecycle: 22 | run: 23 | script: |- 24 | sleep 5 && sudo python3 {artifacts:path}/edge_manager_python_client.py {artifacts:path}/rush_hour.jpg 25 | Artifacts: 26 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/edge_manager_python_client.py 27 | Permission: 28 | Execute: OWNER 29 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/agent_pb2.py 30 | Permission: 31 | Execute: OWNER 32 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/agent_pb2_grpc.py 33 | Permission: 34 | Execute: OWNER 35 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rush_hour.jpg 36 | Permission: 37 | Execute: OWNER 38 | - URI: s3://YOUR_BUCKET_NAME/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rush_hour.jpg.license 39 | Permission: 40 | Execute: OWNER -------------------------------------------------------------------------------- /components/recipes/com.model.darknet-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: 2020-01-25 3 | ComponentName: com.model.darknet 4 | ComponentVersion: 0.1.0 5 | ComponentDescription: A demo Darknet model for SageMaker Edge Manager. 6 | ComponentPublisher: Amazon Web Services, Inc. 7 | Manifests: 8 | - Platform: 9 | os: linux 10 | architecture: aarch64 11 | Lifecycle: 12 | Install: |- 13 | tar xf {artifacts:path}/darknet-model-1.0.tar.gz 14 | Artifacts: 15 | - URI: s3://YOUR_BUCKET_NAME/models/packaged/darknet-model-1.0.tar.gz 16 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.darknet/0.1.0/rush_hour.jpg 17 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.darknet/0.1.0/rush_hour.jpg.license -------------------------------------------------------------------------------- /components/recipes/com.model.keras-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: 2020-01-25 3 | ComponentName: com.model.keras 4 | ComponentVersion: 0.1.0 5 | ComponentDescription: A demo Keras model for SageMaker Edge Manager. 6 | ComponentPublisher: Amazon Web Services, Inc. 7 | Manifests: 8 | - Platform: 9 | os: linux 10 | architecture: aarch64 11 | Lifecycle: 12 | Install: |- 13 | tar xf {artifacts:path}/keras-model-1.0.tar.gz 14 | Artifacts: 15 | - URI: s3://YOUR_BUCKET_NAME/models/packaged/keras-model-1.0.tar.gz 16 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.keras/0.1.0/rush_hour.jpg 17 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.keras/0.1.0/rush_hour.jpg.license -------------------------------------------------------------------------------- /components/recipes/com.model.mxnet_gluoncv_ssd-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: 2020-01-25 3 | ComponentName: com.model.mxnet_gluoncv_ssd 4 | ComponentVersion: 0.1.0 5 | ComponentDescription: A demo MXNet + GluonCV object detection model for SageMaker Edge Manager. 6 | ComponentPublisher: Amazon Web Services, Inc. 7 | Manifests: 8 | - Platform: 9 | os: linux 10 | architecture: aarch64 11 | Lifecycle: 12 | Install: |- 13 | tar xf {artifacts:path}/gluoncv-model-1.0.tar.gz 14 | Artifacts: 15 | - URI: s3://YOUR_BUCKET_NAME/models/packaged/gluoncv-model-1.0.tar.gz 16 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.mxnet_gluoncv_ssd/0.1.0/rush_hour.jpg 17 | - URI: s3://YOUR_BUCKET_NAME/artifacts/com.model.mxnet_gluoncv_ssd/0.1.0/rush_hour.jpg.license -------------------------------------------------------------------------------- /examples/mlops-console-example/CameraStreamimx8mplus.md: -------------------------------------------------------------------------------- 1 | # AWS IoT Greengrass V2 and Amazon SageMaker Edge Manager with the NXP i.MX 8M Plus 2 | 3 | ![Architecture](images/Architecture.png) 4 | 5 | This workshop walks you through the end to end flow using a pre-trained mobilenetv2 image classification model to perform image classification at the edge on images captured from an RTSP stream. Inference is performed on the Neural Processing Unit of the i.MX8MPlus which allows for up to a 50x performance increase compared to running on a CPU only. Results are uploaded to AWS IoT and input and output tensors are uploaded to Amazon S3. 6 | 7 | This is an advanced workshop intended for those already familiar with basic AWS IoT Greengrass and AWS Cloud concepts. 8 | 9 | ## **Preparation:** 10 | 11 | This workshop must be run on an NXP i.MX 8M Plus EVK. 12 | 13 | You will need to build a Linux image for the i.MX8MPEVK that includes AWS IoT Greengrass V2. Follow the steps outlined in [https://github.com/aws-samples/meta-aws-demos/tree/master/nxp/imx8m](https://github.com/aws-samples/meta-aws-demos/tree/master/nxp/imx8m) to include the Greengrass V2 software with your [i.MX Yocto Linux build](https://www.nxp.com/docs/en/user-guide/IMX_YOCTO_PROJECT_USERS_GUIDE.pdf). This image will include all necessary dependencies to run this workshop. 14 | 15 | ![Greengrass Core](images/GGCore.png) 16 | 17 | **Requirements:** 18 | 19 | * NXP i.MX8MPEVK with [AWS IoT Greengrass V2](https://docs.aws.amazon.com/greengrass/v2/developerguide/install-greengrass-core-v2.html) installed 20 | * Follow the instructions here: [https://github.com/aws-samples/meta-aws-demos/tree/master/nxp/imx8m](https://github.com/aws-samples/meta-aws-demos/tree/master/nxp/imx8m) to build an image for the i.MX8MPEVK with all required dependencies. 21 | * A certificate and private key provisioned to your device. 22 | * Your device is connected and appears as a Greengrass Core device in AWS IoT Greengrass cloud service. 23 | * The Greengrass device has an IoT Thing Name that matches the regex: ``^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$`` due to SageMaker Edge Manager limitation 24 | * Host/development machine with a Unix terminal (Linux or Mac OS) 25 | * [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) installed and configured on host machine 26 | * An IP camera that has an RTSP stream accessible by the NXP i.MX8MPEVK 27 | 28 | ## **Clone the Example on Host** 29 | 30 | ``` 31 | git clone https://github.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python 32 | ``` 33 | 34 | ## **Setup workshop resources automatically** 35 | 36 | The following script will setup some necessary resources automatically, as long as you have already set up your AWS IoT Greengrass Core device per the pre-requisites. If you wish to perform these steps manually from the console, or have not set up an IoT Role Alias for your Greengrass device, you should skip this step and proceed to **Create or edit an IoT Role Alias for SageMaker Edge Manager**. 37 | 38 | Run the following script to perform the following: 39 | * Modify an existing IoT Role Alias IAM Role with the correct permissions for the workshop 40 | * Create an Amazon S3 bucket for edge inference results 41 | * Create an Amazon S3 bucket for Greengrass components 42 | * Create an Edge Manager Fleet 43 | * Register your Greengrass Core as a device in Edge Manager 44 | 45 | ``` 46 | cd ~/greengrass-v2-sagemaker-edge-manager-python/examples/mlops-console-example/scripts 47 | sh setupresources.sh 48 | ``` 49 | * Replace ```` with the name of your AWS IoT Role Alias that your Greengrass device is configured to use. 50 | * Replace ```` with the IoT Thing Name of your Greengrass Core device 51 | * Replace ```` with the region in which your Greengrass device is connected 52 | 53 | Once the script has successfully completed, you can skip to the step **Deploy the SageMaker Edge Manager Agent Greengrass component to the device**. 54 | 55 | If you wish to perform these steps manually, follow the steps below. 56 | 57 | ## **Create or edit an IoT Role Alias for SageMaker Edge Manager** 58 | 59 | The SageMaker Edge Manager Agent on the device will need to access resources in the Cloud. It uses the AWS IoT Credential Provider Role Alias to perform actions. 60 | 61 | ## **Setup resources from the AWS Management Console** 62 | 63 | First, we will need to create or edit the Role in IAM. When you set up your AWS IoT Greengrass Core device, you may have already performed this step. 64 | 65 | ### **If you already set up a Role for your AWS IoT Greengrass device:** 66 | 67 | If you have completed all of the necessary prerequisites for this workshop, your Greengrass device should already have an IoT Role Alias. 68 | 69 | Navigate to **AWS IoT Core Console → Secure → Role Aliases --> your Role Alias --> Edit IAM Role** 70 | 71 | Go to **Setup permissions for the IAM Role** 72 | 73 | ### **If you have not set up an IAM Role yet:** 74 | 75 | Navigate to **AWS IAM Console → Roles → Create role** 76 | 77 | **Select type of trusted entity → AWS service** 78 | Choose **IoT** and under **‘Select your use case’** choose **IoT.** 79 | 80 | Click on **‘Next: Permissions’, ‘Next: Tags’, ‘Next': Review'.** 81 | 82 | * Role name: SageMaker-IoTRole 83 | 84 | Click on ‘**Create role**’ 85 | 86 | ### Setup permissions for the IAM Role 87 | 88 | Once the role is created, we need to attach policies and authorize the IoT Credential Provider to access it. 89 | 90 | Choose your created role from the list of IAM Roles. Click on **‘Attach Policies’** and attach the following policies: 91 | 92 | * AmazonSageMakerEdgeDeviceFleetPolicy 93 | * AmazonS3FullAccess 94 | 95 | In addition, check that the following policies are attached: 96 | * AWSIoTLogging 97 | * AWSIoTRuleActions 98 | * AWSIoTThingsRegistration 99 | 100 | Click on **‘Trust relationships’ → ‘Edit trust relationship’** 101 | 102 | Add the following to the Policy Document and click on '**Update Trust Policy'**: 103 | 104 | ``` 105 | { 106 | "Version": "2012-10-17", 107 | "Statement": [ 108 | { 109 | "Effect": "Allow", 110 | "Principal": { 111 | "Service": "credentials.iot.amazonaws.com" 112 | }, 113 | "Action": "sts:AssumeRole" 114 | }, 115 | { 116 | "Effect": "Allow", 117 | "Principal": { 118 | "Service": "sagemaker.amazonaws.com" 119 | }, 120 | "Action": "sts:AssumeRole" 121 | } 122 | ] 123 | } 124 | ``` 125 | 126 | Copy the **Role ARN** for use in the next steps when you create the Edge Manager device fleet. 127 | 128 | ## **Setup S3 bucket for Inference Results** 129 | 130 | Edge Manager will upload inference results to this Amazon S3 Bucket. 131 | 132 | Amazon S3 bucket names need to be globally unique, so create a unique identifier to prepend the name of your S3 bucket and replace in the command below with this identifier. Replace with the region in which your AWS IoT Greengrass device is connected. The string `sagemaker` must also be present in the bucket name. 133 | 134 | Run the following command from your host machine: 135 | 136 | ``` 137 | aws s3 mb s3://-sagemaker-inference-results --region 138 | ``` 139 | 140 | _It is best practice to restrict all Amazon S3 buckets from Public Access. You should enable this security feature for all Amazon S3 buckets created in this workshop. For more information, refer to [documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)._ 141 | 142 | ## **Create the Edge Manager Device Fleet** 143 | 144 | Navigate to **Amazon Sagemaker Console → Edge Manager → Edge device fleets → Create device fleet** 145 | 146 | **Device fleet properties:** 147 | * **Device fleet name:** greengrassv2fleet 148 | * **IAM Role:** The ARN of the IAM Role modified in the previous step. It is the same one that is linked to your IoT Role Alias. 149 | * **Create IAM role alias:** If you do not already have an AWS IoT Role Alias for your Greengrass device, select this option to create it and attach the provided IAM Role. 150 | 151 | ![Create Device Fleet](images/emdevicefleet.png) 152 | 153 | Click ‘Next’. 154 | 155 | **Output configuration:** 156 | * **S3 bucket URI**: ``s3://`` 157 | * **Encryption key**: No Custom Encryption 158 | 159 | ![Create Device Fleet Output Configuration](images/edgemanagerfleetoutput.png) 160 | 161 | Click ‘Submit’ to finish creating the Edge Manager Fleet. 162 | 163 | Navigate to **AWS IoT Console → Secure → Role Aliases** 164 | 165 | You will see a role named SageMakerEdge-greengrassv2fleet with your IAM role attached if you chose to create a new IoT Role Alias. 166 | 167 | ## **Add your AWS IoT Greengrass Core Device to the Edge Manager Fleet** 168 | 169 | Open the Amazon Sagemaker AWS Console. Navigate to **Edge Manager → Edge devices → Register devices** 170 | 171 | **Device fleet name:** greengrassv2fleet (the name of your fleet) 172 | 173 | Click ‘Next’ 174 | 175 | * **Device Name:** The name of your AWS IoT Core Greengrass Core Thing Name (MUST MATCH regex: ``^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$``) 176 | * **IoT Name:** The name of your AWS IoT Core Greengrass Core Thing Name (Should be the same as Device Name) 177 | 178 | ![Register EM Device](images/registerdevice-imx8mplus.png) 179 | 180 | Click ‘Submit’ 181 | 182 | ## **Setup S3 Bucket for Greengrass Component Resources** 183 | 184 | Artifacts that will be deployed to the Greengrass device, such as scripts, need to be stored in an Amazon S3 bucket where the device will download them during a Greengrass deployment. 185 | 186 | Run this from your host machine: 187 | 188 | ``` 189 | aws s3 mb s3://-gg-components --region 190 | ``` 191 | 192 | _It is best practice to restrict all Amazon S3 buckets from Public Access. You should enable this security feature for all Amazon S3 buckets created in this workshop. For more information, refer to [documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)._ 193 | 194 | ## **Deploy the SageMaker Edge Manager Agent Greengrass component to the device** 195 | 196 | Next, we will deploy the Edge Manager Agent to the Greengrass device. 197 | 198 | Navigate to the **AWS IoT Console → Greengrass → Deployments → Create** 199 | 200 | * **Name:** ‘Deployment for ML using EM’ 201 | * **Deployment target:** Choose either ‘Core device’ to deploy this to a single IoT Thing, or ‘Thing group’ to deploy it to all Greengrass Cores in a Thing Group. In a production environment, Thing Groups can be used to deploy components to a fleet of devices 202 | 203 | ![Create Deployment](images/createdeployment.png) 204 | 205 | Click ‘Next’, and then select the components you want to include in your Greengrass Deployment. 206 | 207 | **Public components:** 208 | 209 | * **aws.greengrass.Nucleus** - this will update your Greengrass Nucleus to the latest version compatible with SageMaker Edge Manager. 210 | * **aws.greengrass.TokenExchangeService** - the Edge Manager component depends on this to access the Amazon S3 bucket. 211 | * **aws.greengrass.SageMakerEdgeManager** - this component deploys the SageMaker Edge Manager Agent component 212 | * _OPTIONAL **aws.greengrass.CLI**_ - you can use this component to see the status of your components and to deploy new components locally on your device. This is not required for the workshop, but can be helpful for advanced development and debugging. For more information, see the [documentation](https://docs.aws.amazon.com/greengrass/v2/developerguide/gg-cli-reference.html) 213 | 214 | Click ‘Next’ 215 | 216 | ![Configure component](images/configureemcomponent.png) 217 | 218 | aws.greengrass.SageMakerEdgeManager component needs to be configured. Select the component and then click on ‘Configure Component’. In the ‘Configuration update’ field, input the following and change the values to match your environment and setup: 219 | 220 | ``` 221 | { 222 | "CaptureDataPeriodicUpload": "false", 223 | "CaptureDataPeriodicUploadPeriodSeconds": "8", 224 | "DeviceFleetName": "", 225 | "BucketName": "", 226 | "CaptureDataBase64EmbedLimit": "3072", 227 | "CaptureDataPushPeriodSeconds": "4", 228 | "SagemakerEdgeLogVerbose": "false", 229 | "CaptureDataBatchSize": "10", 230 | "CaptureDataDestination": "Cloud", 231 | "FolderPrefix": "sme-capture", 232 | "UnixSocketName": "/tmp/sagemaker_edge_agent_example.sock", 233 | "CaptureDataBufferSize": "30" 234 | } 235 | ``` 236 | 237 | * **DeviceFleetName** is the name of your Edge Manager Device Fleet name created in the step Create the Edge Manager Device Fleet 238 | * **BucketName** is where your inference results will be uploaded to. This is the bucket you created in the step Setup S3 bucket for Inference Results. 239 | * **UnixSocketName** is the name of the socket in which other Greengrass compoenents can communicate with the Edge Manager Agent. Ensure it is set to ``/tmp/sagemaker_edge_agent_example.sock``. 240 | 241 | For a full list of SageMaker Edge Manager Agent Component configuration parameters, [see the documentation.](https://docs.aws.amazon.com/greengrass/v2/developerguide/sagemaker-edge-manager-component.html) 242 | 243 | ![Configured component](images/emconfiguration.png) 244 | 245 | Once you have configured your Greengrass component, click ‘Confirm’. Then click ‘Next’. If you do not wish to configure any advanced settings, click ‘Next’ again. Review the deployment for any errors, then click on ‘Deploy’. 246 | 247 | Monitor the state of the deployment from the AWS IoT Greengrass console. You can also monitor the state of the deployment from the device by running: 248 | 249 | ``` 250 | sudo tail -f /greengrass/v2/logs/greengrass.log 251 | ``` 252 | 253 | The output of the Greengrass log should contain lines similar to the following: 254 | 255 | ``` 256 | 2021-04-07T21:05:41.142Z [INFO] (pool-2-thread-19) com.aws.greengrass.componentmanager.ComponentStore: delete-component-start. {componentIdentifier=aws.greengrass.SageMakerEdgeManager-v0.1.0} 257 | 2021-04-07T21:05:41.157Z [INFO] (pool-2-thread-19) com.aws.greengrass.componentmanager.ComponentStore: delete-component-finish. {componentIdentifier=aws.greengrass.SageMakerEdgeManager-v0.1.0} 258 | 2021-04-07T21:05:44.580Z [INFO] (pool-2-thread-12) com.aws.greengrass.deployment.DeploymentService: Current deployment finished. {DeploymentId=be1736e2-cb47-4a6a-a561-5e089dd4822f, serviceName=DeploymentService, currentState=RUNNING} 259 | 2021-04-07T21:05:44.663Z [INFO] (pool-2-thread-12) com.aws.greengrass.deployment.IotJobsHelper: Updating status of persisted deployment. {Status=SUCCEEDED, StatusDetails={detailed-deployment-status=SUCCESSFUL}, ThingName=iMX8MQEVK_GG_Core_001, JobId=be1736e2-cb47-4a6a-a561-5e089dd4822f} 260 | 2021-04-07T21:05:49.410Z [INFO] (Thread-4) com.aws.greengrass.deployment.IotJobsHelper: Job status update was accepted. {Status=SUCCEEDED, ThingName=iMX8MQEVK_GG_Core_001, JobId=be1736e2-cb47-4a6a-a561-5e089dd4822f} 261 | 2021-04-07T21:05:49.849Z [INFO] (pool-2-thread-12) com.aws.greengrass.status.FleetStatusService: fss-status-update-published. Status update published to FSS. {serviceName=FleetStatusService, currentState=RUNNING} 262 | ``` 263 | 264 | To check if the Edge Manager Agent was successfully deployed, tail the component log on the device: 265 | 266 | ``` 267 | sudo tail -f /greengrass/v2/logs/aws.greengrass.SageMakerEdgeManager.log 268 | ``` 269 | 270 | You should see the Edge Manager Agent parse the configuration file and open up a socket on the device. 271 | 272 | ``` 273 | 2021-07-13T19:43:13.511Z [INFO] (Copier) aws.greengrass.SageMakerEdgeManager: stdout. Server listening on unix:///tmp/sagemaker_edge_agent_example.sock. {scriptName=services.aws.greengrass.SageMakerEdgeManager.lifecycle.run.script, serviceName=aws.greengrass.SageMakerEdgeManager, currentState=RUNNING} 274 | ``` 275 | 276 | This socket is used by the Edge Manager Agent to send and receive requests using GRPC. Additional Greengrass v2 components developed to talk to the Edge Manager Agent should utilize this socket to load ML models and request predictions by that model. 277 | 278 | ## Upload the uncompiled mobilenetv2 model to Amazon S3 279 | 280 | In this workshop, we will use a pre-trained model distributed by TensorFlow. Navigate to [TensorFlow Lite Sample Models](https://www.tensorflow.org/lite/guide/hosted_models) and download Mobilenet_v2_1.0_224_quant by clicking 'tflite&pb'. 281 | 282 | Upload the model to Amazon S3. 283 | 284 | ``` 285 | aws s3 cp mobilenetv2_1.0_224_quant.tgz s3:///models/uncompiled/mobilenet_v2_1.0_224_quant.tgz 286 | ``` 287 | 288 | Replace with the name of your bucket created in the step ****Setup S3 Bucket for Greengrass Component Resources**. 289 | 290 | ## Compile the TFLite mobilenetv2 model using SageMaker Neo 291 | 292 | Next, we will compile the model to optimize and take advantage of the architecture and hardware acceleration of the device on the edge. SageMaker Neo utilizes the Apache TVM open source compiler for CPU, GPU, and NPU machine learning accelerators. 293 | 294 | Open the **Amazon SageMaker console** **→ Inference → Compilation jobs → Create compilation job** 295 | 296 | * **Job settings:** 297 | * **Job name:** mobilenetv2-Quantized-model 298 | * **IAM Role:** 299 | * Create a new role 300 | * **S3 buckets you specify:** Any S3 Bucket 301 | * Click on ‘Create role’ 302 | * **Input configuration:** 303 | * **Location of model artifacts:** ``s3:///models/uncompiled/mobilenet_v2_1.0_224_quant.tgz`` 304 | * This is the location of the uncompiled model from the previous step. 305 | * **Data input configuration:** {"input":[1, 224, 224, 3]} 306 | * This is the input shape of the data for the model 307 | * **Machine learning framework:** TFLite 308 | 309 | ![Neo compilation job](images/compilationjob-mobilenetv2.png) 310 | 311 | * Output configuration 312 | * **Target device:** imx8mplus 313 | * **S3 Output location:** ``s3:///models/compiled`` 314 | 315 | Click on ‘Submit’. The compilation job will take 2-3 minutes. When it is finished, the Status will change to ‘COMPLETED’ 316 | 317 | Open the compilation job from the Amazon SageMaker console and note the S3 compiled model artifact S3 URI. Check that the compiled model is in the specified Amazon S3 bucket. 318 | 319 | ![Compiled Model in S3](images/s3compiledmodel8mplus.png) 320 | 321 | ## Package the compiled model for Edge Manager 322 | 323 | Next we will prepare the model to integrate with the Edge Manager Agent. The packaging job will sign the model’s hash so that the device can verify it’s integrity. 324 | 325 | Open the **Amazon SageMaker console** **→ Edge Manager→ Edge packaging job → Create Edge packaging job** 326 | 327 | * **Edge packaging job name:** imx8mplus-mobilenetv2-packaging-001 328 | * **Model name:** mobilenetv2-224-10-quant 329 | * **Model version:** 1.0 330 | * **IAM role:** 331 | * Create a new role 332 | * Any S3 bucket 333 | * Create Role 334 | * After the role is created, click on the created Role which should be named similar to: AmazonSageMaker-ExecutionRole-20210803T115161. Click on 'Attach policies' and add 'AWSGreengrassFullAccess'. 335 | 336 | ![Edge Packaging Job IAM Role](images/edgemanagerpackagingiamrole.png) 337 | 338 | ![Create Edge Packaging Job](images/createedgepackagingjob-imx8mplus.png) 339 | 340 | Click ‘Next’ 341 | 342 | * **Compilation job name:** mobilenetv2-Quantized-Model (this is the name of the compilation job from the Neo compilation job) 343 | 344 | Click ‘Next’ 345 | 346 | * **S3 bucket URI:** ``s3:///models/packaged/`` 347 | * Click on 'Greengrass V2 component' 348 | * **Component name:** mobilenetv2_224_quantized_model_component 349 | * **Component description:** Packaged Mobilenetv2 uint8 quantized model 350 | * **Component version:** 1.0.0 351 | 352 | ![Create Edge Packaging Job screen 2](images/createedgepackagingjob2-imx8mplus.png) 353 | 354 | Click ‘Submit’. The packaging job will take approximately 2-3 minutes. When it is done the Status will change to ‘COMPLETED’ 355 | 356 | Check that the packaged model is present in the Amazon S3 output location provided above. The Greengrass component should also be created. Navigate to the **AWS IoT Console → Greengrass → Components** and check that 'mobilenetv2_224_quantized_model_component' is present. 357 | 358 | ![mobilenetv2 Greengrass Component](images/mobilenetv2-gg-component-imx8mplus.png) 359 | 360 | ## **Create the Greengrass component for the application** 361 | 362 | The application is what implements the Edge Manager Agent client, opens the RTSP stream, does the pre-processing of frames, and post-processing of inference results. 363 | 364 | The Edge Manager Agent uses Protobuf and GRPC to communicate. The application needs to communicate with the Edge Manager Agent over GRPC and needs to implement the correct Protobuf calls. 365 | 366 | Review the code in the camera_integration_edgemanger_client.py script. Note the lifecycle of the model. 367 | 368 | 369 | Upload the GRPC client, Python application, and sample images to S3 from your host machine: 370 | 371 | ``` 372 | cd ~/greengrass-v2-sagemaker-edge-manager-python 373 | 374 | aws s3api put-object --bucket --key artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2_grpc.py --body components/artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2_grpc.py 375 | aws s3api put-object --bucket --key artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2.py --body components/artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2.py 376 | aws s3api put-object --bucket --key artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/camera_integration_edgemanger_client.py --body components/artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/camera_integration_edgemanger_client.py 377 | ``` 378 | 379 | Open the file ``components/recipes/aws.sagemaker.edgeManagerClientCameraIntegration-0.1.0.yaml``. Change the URIs under ‘Artifacts’ to include the correct S3 bucket name where you uploaded the artifacts. You should change to the name of your Greengrass component bucket. 380 | 381 | Navigate to the **AWS IoT Console → Greengrass → Components → Create Component** 382 | 383 | Choose ‘Enter recipe as YAML’ and copy the contents of /recipes/aws.sagemaker.edgeManagerClientCameraIntegration-0.1.0.yaml into the Recipe text box. 384 | 385 | Click on ‘Create Component’, and then check to ensure that the Status of the component is ‘Deployable’. Review the component description for any errors. 386 | 387 | ![Application Componenet](images/applicationcomponent-imx8mplus.png) 388 | 389 | ## **Deploy Model and Application Components to the Edge** 390 | 391 | Navigate to the **AWS IoT Console → Greengrass → Deployments** 392 | 393 | Click the checkmark box next to the previously created deployment ‘Deployment for ML using EM’ and then click on ‘Revise’. 394 | 395 | Click ‘Next’ and on the ‘Select components’ menu, turn off the option ‘Show only selected components’ for ‘My components’. 396 | 397 | Select ‘mobilenetv2_224_quantized_model_component’ and ‘aws.sagemaker.edgeManagerClientCameraIntegration’. 398 | 399 | ![Model and Application Deployment](images/modelandapplicationdeployment-imx8mplus.png) 400 | 401 | Click ‘Next’, and select the aws.sagemaker.edgeManagerClientCameraIntegration component. lick on ‘Configure Component’. In the ‘Configuration update’ field, input the following, and update the RTSP stream URL to point to the stream from your IP Camera. 402 | 403 | ``` 404 | { 405 | "rtspStreamURL": "rtspt://", 406 | "modelComponentName": "mobilenetv2_224_quantized_model_component", 407 | "modelName": "mobilenetv2-224-10-quant", 408 | "quantization": "True", 409 | "captureData": "False" 410 | } 411 | ``` 412 | 413 | then click ‘Confirm’ and then 'Next'. Leave the advanced settings as defaults and then click ‘Next’ again. 414 | 415 | Review the deployment, then when you are ready to deploy the components to the device click on ‘Deploy’. 416 | 417 | Wait 2-3 minutes and then check that your Greengrass Core device is HEALTHY from the AWS IoT Greengrass console: 418 | 419 | ![Healthy GG Core](images/healthycore-imx8mplus.png) 420 | 421 | To check if the application was successfully deployed, tail the SageMaker component log on the device: 422 | 423 | ``` 424 | sudo tail -f /greengrass/v2/logs/aws.greengrass.SageMakerEdgeManager.log 425 | ``` 426 | 427 | If the LoadModel request from the application was successful, the Edge Manager Agent log will show the meta data about the machine learning model: 428 | 429 | ``` 430 | 2021-10-05T22:05:18.387Z [INFO] (Copier) aws.greengrass.SageMakerEdgeManager: stdout. {"version":"1.20210512.96da6cc"}[2021-10-05T22:05:18.387][I] backend name is tvm. {scriptName=services.aws.greengrass.SageMakerEdgeManager.lifecycle.run.script, serviceName=aws.greengrass.SageMakerEdgeManager, currentState=RUNNING} 431 | 2021-10-05T22:05:18.387Z [INFO] (Copier) aws.greengrass.SageMakerEdgeManager: stdout. {"version":"1.20210512.96da6cc"}[2021-10-05T22:05:18.387][I] DLR backend = kTVM. {scriptName=services.aws.greengrass.SageMakerEdgeManager.lifecycle.run.script, serviceName=aws.greengrass.SageMakerEdgeManager, currentState=RUNNING} 432 | 2021-10-05T22:05:18.387Z [INFO] (Copier) aws.greengrass.SageMakerEdgeManager: stdout. {"version":"1.20210512.96da6cc"}[2021-10-05T22:05:18.387][I] Finished populating metadata. {scriptName=services.aws.greengrass.SageMakerEdgeManager.lifecycle.run.script, serviceName=aws.greengrass.SageMakerEdgeManager, currentState=RUNNING} 433 | 2021-10-05T22:05:18.387Z [INFO] (Copier) aws.greengrass.SageMakerEdgeManager: stdout. {"version":"1.20210512.96da6cc"}[2021-10-05T22:05:18.387][I] Model:mobilenetv2-224-10-quant loaded!. {scriptName=services.aws.greengrass.SageMakerEdgeManager.lifecycle.run.script, serviceName=aws.greengrass.SageMakerEdgeManager, currentState=RUNNING} 434 | ``` 435 | 436 | Next, check the logs of the application on the device: 437 | ``` 438 | sudo tail -f /greengrass/v2/logs/aws.sagemaker.edgeManagerClientCameraIntegration.log 439 | ``` 440 | You will see the prediction requests and results in the log. 441 | 442 | ## **Get the Inference Results on AWS IoT Core** 443 | 444 | Inference results should be published to AWS IoT Core. 445 | 446 | To check the inference results arriving in AWS IoT Core, Navigate to the **AWS IoT Console → Test → MQTT test client.** 447 | 448 | Under ‘Subscribe to a topic’, type in ‘em/inference’. Every second, inference results should arrive on the ‘em/inference’ topic with the result and confidence level. 449 | 450 | ![MQTT messages](images/mqttresults-imx8mplus.png) 451 | 452 | The result is an index to the Imagenet1000 image classification label. For example, '968' corresponds to 'cup'. 453 | 454 | ## **Turn on inference data capture** 455 | 456 | After your device is deployed to the field, you may want to see how your model is performing at the edge. You can turn on data capture in the Edge Manager Python application to have raw input and output tensors, as well as meta data, published for each inference call. 457 | 458 | First, modify your Greengrass v2 deployment to turn on this feature. 459 | 460 | Navigate to the **AWS IoT Console → Greengrass → Deployments** 461 | 462 | Click the checkmark box next to the previously created deployment ‘Deployment for ML using EM’ and then click on ‘Revise’. 463 | 464 | Click ‘Next’ and on the ‘Select components’ menu, and then 'Next' again to leave the same components in the deployment. 465 | 466 | Select ‘aws.sagemaker.edgeManagerClientCameraIntegration' and then click on 'Configure component'. 467 | 468 | Under 'Configuration to merge' input the following to turn on data capture: 469 | ``` 470 | { 471 | "captureData": "True" 472 | } 473 | ``` 474 | ![Configure the application component](images/applicationcomponentconfigurationcapture-imx8mplus.png) 475 | 476 | To check the inference results meta data, input and output tensors from Amazon S3,Navigate to the **Amazon S3 console → .** 477 | 478 | In this S3 bucket, the following folder hierarchy should be present: 479 | 480 | * ‘sme-capture’ ( you specified in your Edge Manager Greengrass component configuration and Edge Manager device setup) 481 | * greengrassv2fleet ( name of your Edge Manager Device Fleet) 482 | * mobilenetv2-224-10-quant (name of your model from the Edge Manager packaging job) 483 | * year 484 | * month 485 | * day 486 | * hour 487 | 488 | Inside the ‘hour’ folder there will be .jsonl objects. These .jsonl files contain meta-data about each inference prediction and results. The output tensor will be found in the jsonl file. It is a base64 encoded array of confidence levels to classification. In the raw-data/input-tensors is additional data including the input data shape and raw image. 489 | 490 | ![S3 Inference Results](images/s3inferenceresults-imx8mplus.png) 491 | 492 | ## **Conclusion** 493 | With AWS IoT Greengrass v2 and Amazon SageMaker, you can build models in the cloud, deploy them to the edge, and monitor them in the cloud. Taking advantage of the NPU on the i.MX8MPlus allows you to run fast, energy efficient inference at the edge. This completes the full Machine Learning Operations pipeline to manage your IoT ML fleets at scale. 494 | 495 | ## Clean up 496 | When you are done with this workshop example, you should clean up the AWS resources in your account to prevent any additional costs. 497 | 498 | ### Device 499 | First, shutdown AWS IoT Greengrass on the device. Run the following command on the device to stop Greengrass from running and prevent it from starting up on device reboot: 500 | ``` 501 | sudo systemctl stop greengrass 502 | sudo systemctl disable greengrass 503 | ``` 504 | 505 | This stops data from being published to AWS IoT Core, SageMaker Edge Manager S3 bucket uploads, and the application to stop running. 506 | 507 | ### Storage of Inference Results 508 | Delete Amazon S3 buckets containing inference data by following the instructions [in documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/delete-bucket.html) on the bucket named ```-sagemaker-inference-results```. 509 | 510 | ### AWS IoT Greengrass 511 | You might also wish to delete the AWS IoT Greengrass cloud resources. Leaving them provisioned in AWS IoT will not incur additional charges. 512 | 513 | #### Delete the Core 514 | From the host machine run the following command: 515 | ``` 516 | aws greengrassv2 delete-core-device --core-device-thing-name MyGreengrassCore 517 | ``` 518 | Replace 'MyGreengrassCore' with the IoT Thing Name of your Greengrass device. 519 | 520 | #### Delete the Components 521 | Delete Amazon S3 buckets containing inference data by following the instructions [in documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/delete-bucket.html) on the bucket named ```-gg-components```. 522 | 523 | Navigate to **AWS IoT Console --> Greengrass --> Components** 524 | 525 | Click on 'aws.sagemaker.edgeManagerClientCameraIntegration' and then 'Delete version' then confirm by clicking 'Delete' 526 | 527 | Click on 'mobilenetv2_224_quantized_model_component' and then 'Delete version' then confirm by clicking 'Delete' 528 | 529 | ### Delete SageMaker Edge Manager resources 530 | 531 | Navigate to **Amazon SageMaker Console --> Edge Manager --> Edge devices** 532 | 533 | Click on the device name to delete, and then click on 'Deregister'. Follow the prompt to deregister the device. -------------------------------------------------------------------------------- /examples/mlops-console-example/README.md: -------------------------------------------------------------------------------- 1 | # AWS IoT Greengrass V2 and Amazon SageMaker Edge Manager 2 | 3 | ![Architecture](images/Architecture.png) 4 | 5 | This workshop takes you through 2 different examples depending on the type of device you have. 6 | 7 | If you have an NXP i.MX8MQEVK, or would like to run this workshop on a virtual machine, follow the [Image Classification example](ImageClassificationimx8mqevkorvirtual.md) 8 | 9 | If you have an NXP i.MX8M Plus EVK and an IP camera, follow the [Camera Integration example with NPU inference](CameraStreamimx8mplus.md) -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import agent_pb2 as agent__pb2 6 | 7 | 8 | class AgentStub(object): 9 | """Missing associated documentation comment in .proto file.""" 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.Predict = channel.unary_unary( 18 | '/AWS.SageMaker.Edge.Agent/Predict', 19 | request_serializer=agent__pb2.PredictRequest.SerializeToString, 20 | response_deserializer=agent__pb2.PredictResponse.FromString, 21 | ) 22 | self.LoadModel = channel.unary_unary( 23 | '/AWS.SageMaker.Edge.Agent/LoadModel', 24 | request_serializer=agent__pb2.LoadModelRequest.SerializeToString, 25 | response_deserializer=agent__pb2.LoadModelResponse.FromString, 26 | ) 27 | self.UnLoadModel = channel.unary_unary( 28 | '/AWS.SageMaker.Edge.Agent/UnLoadModel', 29 | request_serializer=agent__pb2.UnLoadModelRequest.SerializeToString, 30 | response_deserializer=agent__pb2.UnLoadModelResponse.FromString, 31 | ) 32 | self.ListModels = channel.unary_unary( 33 | '/AWS.SageMaker.Edge.Agent/ListModels', 34 | request_serializer=agent__pb2.ListModelsRequest.SerializeToString, 35 | response_deserializer=agent__pb2.ListModelsResponse.FromString, 36 | ) 37 | self.DescribeModel = channel.unary_unary( 38 | '/AWS.SageMaker.Edge.Agent/DescribeModel', 39 | request_serializer=agent__pb2.DescribeModelRequest.SerializeToString, 40 | response_deserializer=agent__pb2.DescribeModelResponse.FromString, 41 | ) 42 | self.CaptureData = channel.unary_unary( 43 | '/AWS.SageMaker.Edge.Agent/CaptureData', 44 | request_serializer=agent__pb2.CaptureDataRequest.SerializeToString, 45 | response_deserializer=agent__pb2.CaptureDataResponse.FromString, 46 | ) 47 | self.GetCaptureDataStatus = channel.unary_unary( 48 | '/AWS.SageMaker.Edge.Agent/GetCaptureDataStatus', 49 | request_serializer=agent__pb2.GetCaptureDataStatusRequest.SerializeToString, 50 | response_deserializer=agent__pb2.GetCaptureDataStatusResponse.FromString, 51 | ) 52 | 53 | 54 | class AgentServicer(object): 55 | """Missing associated documentation comment in .proto file.""" 56 | 57 | def Predict(self, request, context): 58 | """ 59 | perform inference on a model. 60 | 61 | Note: 62 | 1. users can chose to send the tensor data in the protobuf message or 63 | through a shared memory segment on a per tensor basis, the Predict 64 | method with handle the decode transparently. 65 | 2. serializing large tensors into the protobuf message can be quite expensive, 66 | based on our measurements it is recommended to use shared memory of 67 | tenors larger than 256KB. 68 | 3. SMEdge IPC server will not use shared memory for returning output tensors, 69 | i.e., the output tensor data will always send in byte form encoded 70 | in the tensors of PredictResponse. 71 | 4. currently SMEdge IPC server cannot handle concurrent predict calls, all 72 | these call will be serialized under the hood. this shall be addressed 73 | in a later release. 74 | Status Codes: 75 | 1. OK - prediction is successful 76 | 2. UNKNOWN - unknown error has occurred 77 | 3. INTERNAL - an internal error has occurred 78 | 4. NOT_FOUND - when model not found 79 | 5. INVALID_ARGUMENT - when tenors types mismatch 80 | 81 | """ 82 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 83 | context.set_details('Method not implemented!') 84 | raise NotImplementedError('Method not implemented!') 85 | 86 | def LoadModel(self, request, context): 87 | """ 88 | perform load for a model 89 | Note: 90 | 1. currently only local filesystem paths are supported for loading models. 91 | 2. currently only one model could be loaded at any time, loading of multiple 92 | models simultaneously shall be implemented in the future. 93 | 3. users are required to unload any loaded model to load another model. 94 | Status Codes: 95 | 1. OK - load is successful 96 | 2. UNKNOWN - unknown error has occurred 97 | 3. INTERNAL - an internal error has occurred 98 | 4. NOT_FOUND - model doesn't exist at the url 99 | 5. ALREADY_EXISTS - model with the same name is already loaded 100 | 6. RESOURCE_EXHAUSTED - memory is not available to load the model 101 | 7. FAILED_PRECONDITION - model package could not be loaded 102 | 103 | """ 104 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 105 | context.set_details('Method not implemented!') 106 | raise NotImplementedError('Method not implemented!') 107 | 108 | def UnLoadModel(self, request, context): 109 | """ 110 | perform unload for a model 111 | Status Codes: 112 | 1. OK - unload is successful 113 | 2. UNKNOWN - unknown error has occurred 114 | 3. INTERNAL - an internal error has occurred 115 | 4. NOT_FOUND - model doesn't exist 116 | 117 | """ 118 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 119 | context.set_details('Method not implemented!') 120 | raise NotImplementedError('Method not implemented!') 121 | 122 | def ListModels(self, request, context): 123 | """ 124 | lists the loaded models 125 | Status Codes: 126 | 1. OK - unload is successful 127 | 2. UNKNOWN - unknown error has occurred 128 | 3. INTERNAL - an internal error has occurred 129 | 130 | """ 131 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 132 | context.set_details('Method not implemented!') 133 | raise NotImplementedError('Method not implemented!') 134 | 135 | def DescribeModel(self, request, context): 136 | """ 137 | describes a model 138 | Status Codes: 139 | 1. OK - load is successful 140 | 2. UNKNOWN - unknown error has occurred 141 | 3. INTERNAL - an internal error has occurred 142 | 4. NOT_FOUND - model doesn't exist at the url 143 | 144 | """ 145 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 146 | context.set_details('Method not implemented!') 147 | raise NotImplementedError('Method not implemented!') 148 | 149 | def CaptureData(self, request, context): 150 | """ 151 | allows users to capture input and output tensors along with auxiliary data. 152 | Status Codes: 153 | 1. OK - data capture successfully initiated 154 | 2. UNKNOWN - unknown error has occurred 155 | 3. INTERNAL - an internal error has occurred 156 | 5. ALREADY_EXISTS - capture initiated for the given `capture_id` 157 | 6. RESOURCE_EXHAUSTED - buffer is full cannot accept any more requests. 158 | 7. OUT_OF_RANGE - timestamp is in the future. 159 | 8. INVALID_ARGUMENT - capture_id is not of expected format or input tensor paramater is invalid 160 | 9. FAILED_PRECONDITION - Indicates failed network access, when using cloud for capture data. 161 | 162 | """ 163 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 164 | context.set_details('Method not implemented!') 165 | raise NotImplementedError('Method not implemented!') 166 | 167 | def GetCaptureDataStatus(self, request, context): 168 | """ 169 | allows users to query status of capture data operation 170 | Status Codes: 171 | 1. OK - data capture successfully initiated 172 | 2. UNKNOWN - unknown error has occurred 173 | 3. INTERNAL - an internal error has occurred 174 | 4. NOT_FOUND - given capture id doesn't exist. 175 | 176 | """ 177 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 178 | context.set_details('Method not implemented!') 179 | raise NotImplementedError('Method not implemented!') 180 | 181 | 182 | def add_AgentServicer_to_server(servicer, server): 183 | rpc_method_handlers = { 184 | 'Predict': grpc.unary_unary_rpc_method_handler( 185 | servicer.Predict, 186 | request_deserializer=agent__pb2.PredictRequest.FromString, 187 | response_serializer=agent__pb2.PredictResponse.SerializeToString, 188 | ), 189 | 'LoadModel': grpc.unary_unary_rpc_method_handler( 190 | servicer.LoadModel, 191 | request_deserializer=agent__pb2.LoadModelRequest.FromString, 192 | response_serializer=agent__pb2.LoadModelResponse.SerializeToString, 193 | ), 194 | 'UnLoadModel': grpc.unary_unary_rpc_method_handler( 195 | servicer.UnLoadModel, 196 | request_deserializer=agent__pb2.UnLoadModelRequest.FromString, 197 | response_serializer=agent__pb2.UnLoadModelResponse.SerializeToString, 198 | ), 199 | 'ListModels': grpc.unary_unary_rpc_method_handler( 200 | servicer.ListModels, 201 | request_deserializer=agent__pb2.ListModelsRequest.FromString, 202 | response_serializer=agent__pb2.ListModelsResponse.SerializeToString, 203 | ), 204 | 'DescribeModel': grpc.unary_unary_rpc_method_handler( 205 | servicer.DescribeModel, 206 | request_deserializer=agent__pb2.DescribeModelRequest.FromString, 207 | response_serializer=agent__pb2.DescribeModelResponse.SerializeToString, 208 | ), 209 | 'CaptureData': grpc.unary_unary_rpc_method_handler( 210 | servicer.CaptureData, 211 | request_deserializer=agent__pb2.CaptureDataRequest.FromString, 212 | response_serializer=agent__pb2.CaptureDataResponse.SerializeToString, 213 | ), 214 | 'GetCaptureDataStatus': grpc.unary_unary_rpc_method_handler( 215 | servicer.GetCaptureDataStatus, 216 | request_deserializer=agent__pb2.GetCaptureDataStatusRequest.FromString, 217 | response_serializer=agent__pb2.GetCaptureDataStatusResponse.SerializeToString, 218 | ), 219 | } 220 | generic_handler = grpc.method_handlers_generic_handler( 221 | 'AWS.SageMaker.Edge.Agent', rpc_method_handlers) 222 | server.add_generic_rpc_handlers((generic_handler,)) 223 | 224 | 225 | # This class is part of an EXPERIMENTAL API. 226 | class Agent(object): 227 | """Missing associated documentation comment in .proto file.""" 228 | 229 | @staticmethod 230 | def Predict(request, 231 | target, 232 | options=(), 233 | channel_credentials=None, 234 | call_credentials=None, 235 | insecure=False, 236 | compression=None, 237 | wait_for_ready=None, 238 | timeout=None, 239 | metadata=None): 240 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/Predict', 241 | agent__pb2.PredictRequest.SerializeToString, 242 | agent__pb2.PredictResponse.FromString, 243 | options, channel_credentials, 244 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 245 | 246 | @staticmethod 247 | def LoadModel(request, 248 | target, 249 | options=(), 250 | channel_credentials=None, 251 | call_credentials=None, 252 | insecure=False, 253 | compression=None, 254 | wait_for_ready=None, 255 | timeout=None, 256 | metadata=None): 257 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/LoadModel', 258 | agent__pb2.LoadModelRequest.SerializeToString, 259 | agent__pb2.LoadModelResponse.FromString, 260 | options, channel_credentials, 261 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 262 | 263 | @staticmethod 264 | def UnLoadModel(request, 265 | target, 266 | options=(), 267 | channel_credentials=None, 268 | call_credentials=None, 269 | insecure=False, 270 | compression=None, 271 | wait_for_ready=None, 272 | timeout=None, 273 | metadata=None): 274 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/UnLoadModel', 275 | agent__pb2.UnLoadModelRequest.SerializeToString, 276 | agent__pb2.UnLoadModelResponse.FromString, 277 | options, channel_credentials, 278 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 279 | 280 | @staticmethod 281 | def ListModels(request, 282 | target, 283 | options=(), 284 | channel_credentials=None, 285 | call_credentials=None, 286 | insecure=False, 287 | compression=None, 288 | wait_for_ready=None, 289 | timeout=None, 290 | metadata=None): 291 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/ListModels', 292 | agent__pb2.ListModelsRequest.SerializeToString, 293 | agent__pb2.ListModelsResponse.FromString, 294 | options, channel_credentials, 295 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 296 | 297 | @staticmethod 298 | def DescribeModel(request, 299 | target, 300 | options=(), 301 | channel_credentials=None, 302 | call_credentials=None, 303 | insecure=False, 304 | compression=None, 305 | wait_for_ready=None, 306 | timeout=None, 307 | metadata=None): 308 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/DescribeModel', 309 | agent__pb2.DescribeModelRequest.SerializeToString, 310 | agent__pb2.DescribeModelResponse.FromString, 311 | options, channel_credentials, 312 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 313 | 314 | @staticmethod 315 | def CaptureData(request, 316 | target, 317 | options=(), 318 | channel_credentials=None, 319 | call_credentials=None, 320 | insecure=False, 321 | compression=None, 322 | wait_for_ready=None, 323 | timeout=None, 324 | metadata=None): 325 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/CaptureData', 326 | agent__pb2.CaptureDataRequest.SerializeToString, 327 | agent__pb2.CaptureDataResponse.FromString, 328 | options, channel_credentials, 329 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 330 | 331 | @staticmethod 332 | def GetCaptureDataStatus(request, 333 | target, 334 | options=(), 335 | channel_credentials=None, 336 | call_credentials=None, 337 | insecure=False, 338 | compression=None, 339 | wait_for_ready=None, 340 | timeout=None, 341 | metadata=None): 342 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/GetCaptureDataStatus', 343 | agent__pb2.GetCaptureDataStatusRequest.SerializeToString, 344 | agent__pb2.GetCaptureDataStatusResponse.FromString, 345 | options, channel_credentials, 346 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 347 | -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/camera_integration_edgemanger_client.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2010-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # 4 | import cv2 5 | import numpy as np 6 | import random 7 | import argparse 8 | import time 9 | import json 10 | import awsiot.greengrasscoreipc 11 | from awsiot.greengrasscoreipc.model import ( 12 | QOS, 13 | PublishToIoTCoreRequest 14 | ) 15 | import math 16 | import agent_pb2_grpc 17 | import grpc 18 | from agent_pb2 import (ListModelsRequest, LoadModelRequest, PredictRequest, 19 | UnLoadModelRequest, DescribeModelRequest, CaptureDataRequest, Tensor, 20 | TensorMetadata, Timestamp) 21 | import signal 22 | import sys 23 | import uuid 24 | 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('-s', '--stream', action='store', type=str, required=True, dest='stream_path', help='RTSP Stream URL') 27 | parser.add_argument('-c', '--model-component', action='store', type=str, required=True, dest='model_component_name', help='Name of the GGv2 component containing the model') 28 | parser.add_argument('-m', '--model-name', action='store', type=str, required=True, dest='model_name', help='Friendly name of the model from Edge Packaging Job') 29 | parser.add_argument('-q', '--quantized', action='store', type=str, required=True, dest='quant', help='Is the model quantized?') 30 | parser.add_argument('-a', '--capture', action='store', type=str, required=True, dest='capture_data', default=False, help='Capture inference metadata and raw output') 31 | 32 | args = parser.parse_args() 33 | 34 | stream_path = args.stream_path 35 | model_component_name = args.model_component_name 36 | model_name = args.model_name 37 | quant = args.quant == 'True' 38 | capture_inference = args.capture_data == 'True' 39 | 40 | print ('RTSP stream is at ' + stream_path) 41 | print ('Model Greengrass v2 component name is ' + model_component_name) 42 | print ('Model name is ' + model_name) 43 | print ('Model is quantized: ' + str(quant)) 44 | 45 | model_url = '/greengrass/v2/work/' + model_component_name 46 | tensor_name = 'input' 47 | SIZE = 224 48 | tensor_shape = [1, SIZE, SIZE, 3] 49 | 50 | inference_result_topic = "em/inference" 51 | ipc_client = awsiot.greengrasscoreipc.connect() 52 | 53 | channel = grpc.insecure_channel('unix:///tmp/sagemaker_edge_agent_example.sock') 54 | edge_manager_client = agent_pb2_grpc.AgentStub(channel) 55 | 56 | # When the component is stopped. 57 | def sigterm_handler(signum, frame): 58 | global edge_manager_client 59 | try: 60 | response = edge_manager_client.UnLoadModel(UnLoadModelRequest(name=model_name)) 61 | print ('Model unloaded.') 62 | sys.exit(0) 63 | except Exception as e: 64 | print ('Model failed to unload') 65 | print (e) 66 | sys.exit(-1) 67 | 68 | signal.signal(signal.SIGINT, sigterm_handler) 69 | signal.signal(signal.SIGTERM, sigterm_handler) 70 | 71 | def preprocess_frame(captured_frame): 72 | 73 | if not quant: 74 | frame = resize_short_within(captured_frame, short=SIZE, max_size=SIZE * 2) 75 | scaled_frame = cv2.resize(frame, (SIZE, int(SIZE/4 * 3 ))) 76 | scaled_frame = cv2.copyMakeBorder(scaled_frame, int(SIZE / 8), int(SIZE / 8), 77 | 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0)) 78 | scaled_frame = np.asarray(scaled_frame) 79 | # normalization according to https://github.com/tensorflow/tensorflow/blob/a4dfb8d1a71385bd6d122e4f27f86dcebb96712d/tensorflow/python/keras/applications/imagenet_utils.py#L259 80 | scaled_frame = (scaled_frame/127.5).astype(np.float32) 81 | scaled_frame -= 1. 82 | return scaled_frame 83 | 84 | else: 85 | scaled_frame = cv2.resize(captured_frame, (SIZE,SIZE)) 86 | return scaled_frame 87 | 88 | def build_message(detections, performance, model_name): 89 | if not quant: 90 | index = np.argmax(detections[0]) 91 | confidence = detections[0][index] 92 | else: 93 | index = np.argmax(detections) 94 | confidence = (detections[0][index]/255) 95 | 96 | return { 97 | "index" : str(index-1), 98 | "confidence" : str(confidence), 99 | "performance" : str(performance), 100 | "model_name" : model_name 101 | } 102 | 103 | # read output tensors and append them to matrix 104 | def process_output_tensor(response): 105 | 106 | detections = [] 107 | for t in response.tensors: 108 | deserialized_bytes = np.frombuffer(t.byte_data, dtype=np.uint8) 109 | detections.append(np.asarray(deserialized_bytes)) 110 | 111 | return (detections) 112 | 113 | # IPC publish to IoT Core 114 | def publish_results_to_iot_core (message): 115 | # Publish highest confidence result to AWS IoT Core 116 | global ipc_client 117 | request = PublishToIoTCoreRequest() 118 | request.topic_name = inference_result_topic 119 | request.payload = bytes(json.dumps(message), "utf-8") 120 | request.qos = QOS.AT_LEAST_ONCE 121 | operation = ipc_client.new_publish_to_iot_core() 122 | operation.activate(request) 123 | future = operation.get_response() 124 | future.result(10) 125 | 126 | def run(): 127 | global edge_manager_client 128 | try: 129 | cap = cv2.VideoCapture(stream_path) 130 | fps = cap.get(cv2.CAP_PROP_FPS) 131 | ret, captured_frame = cap.read() 132 | except Exception as e: 133 | print('Stream failed to open.') 134 | cap.release() 135 | print(e) 136 | exit (-1) 137 | 138 | try: 139 | response = edge_manager_client.LoadModel( 140 | LoadModelRequest(url=model_url, name=model_name)) 141 | except Exception as e: 142 | print('Model failed to load.') 143 | print(e) 144 | 145 | while (cap.isOpened() and ret): 146 | 147 | frameId = cap.get(1) 148 | ret, frame = cap.read() 149 | 150 | #perform inference once per second 151 | if (frameId % math.floor(fps) == 0 ): 152 | img = preprocess_frame(frame) 153 | 154 | try: 155 | before = time.time() 156 | request = PredictRequest(name=model_name, tensors=[Tensor(tensor_metadata=TensorMetadata( 157 | name=tensor_name, data_type=5, shape=tensor_shape), byte_data=img.tobytes())]) 158 | response = edge_manager_client.Predict(request) 159 | after = time.time() 160 | performance = ((after)-(before))*1000 161 | detections = process_output_tensor(response) 162 | message = build_message(detections, performance, model_name) 163 | publish_results_to_iot_core (message) 164 | 165 | except Exception as e: 166 | print('Prediction failed') 167 | print(e) 168 | 169 | if capture_inference: 170 | print ('Capturing inference data in Amazon S3') 171 | now = time.time() 172 | seconds = int(now) 173 | nanos = int((now - seconds) * 10**9) 174 | timestamp = Timestamp(seconds=seconds, nanos=nanos) 175 | request = CaptureDataRequest( 176 | model_name=model_name, 177 | capture_id=str(uuid.uuid4()), 178 | inference_timestamp=timestamp, 179 | input_tensors=[Tensor(tensor_metadata=TensorMetadata(name="input", data_type=5, shape=tensor_shape), 180 | byte_data=img.tobytes())], 181 | output_tensors=[Tensor(tensor_metadata=TensorMetadata(name="output", data_type=5, shape=[1,257]), 182 | byte_data=detections[0].tobytes())] 183 | ) 184 | try: 185 | response = edge_manager_client.CaptureData(request) 186 | except Exception as e: 187 | print('CaptureData request failed') 188 | print(e) 189 | 190 | 191 | ## Scaling functions 192 | def _get_interp_method(interp, sizes=()): 193 | """Get the interpolation method for resize functions. 194 | The major purpose of this function is to wrap a random interp method selection 195 | and a auto-estimation method. 196 | ​ 197 | Parameters 198 | ---------- 199 | interp : int 200 | interpolation method for all resizing operations 201 | ​ 202 | Possible values: 203 | 0: Nearest Neighbors Interpolation. 204 | 1: Bilinear interpolation. 205 | 2: Area-based (resampling using pixel area relation). It may be a 206 | preferred method for image decimation, as it gives moire-free 207 | results. But when the image is zoomed, it is similar to the Nearest 208 | Neighbors method. (used by default). 209 | 3: Bicubic interpolation over 4x4 pixel neighborhood. 210 | 4: Lanczos interpolation over 8x8 pixel neighborhood. 211 | 9: Cubic for enlarge, area for shrink, bilinear for others 212 | 10: Random select from interpolation method metioned above. 213 | Note: 214 | When shrinking an image, it will generally look best with AREA-based 215 | interpolation, whereas, when enlarging an image, it will generally look best 216 | with Bicubic (slow) or Bilinear (faster but still looks OK). 217 | More details can be found in the documentation of OpenCV, please refer to 218 | http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. 219 | sizes : tuple of int 220 | (old_height, old_width, new_height, new_width), if None provided, auto(9) 221 | will return Area(2) anyway. 222 | ​ 223 | Returns 224 | ------- 225 | int 226 | interp method from 0 to 4 227 | """ 228 | if interp == 9: 229 | if sizes: 230 | assert len(sizes) == 4 231 | oh, ow, nh, nw = sizes 232 | if nh > oh and nw > ow: 233 | return 2 234 | elif nh < oh and nw < ow: 235 | return 3 236 | else: 237 | return 1 238 | else: 239 | return 2 240 | if interp == 10: 241 | return random.randint(0, 4) 242 | if interp not in (0, 1, 2, 3, 4): 243 | raise ValueError('Unknown interp method %d' % interp) 244 | 245 | 246 | def resize_short_within(img, short=512, max_size=1024, mult_base=32, interp=2): 247 | """ 248 | resizes the short side of the image so the aspect ratio remains the same AND the short 249 | side matches the convolutional layer for the network 250 | ​ 251 | Args: 252 | ----- 253 | img: np.array 254 | image you want to resize 255 | short: int 256 | the size to reshape the image to 257 | max_size: int 258 | the max size of the short side 259 | mult_base: int 260 | the size scale to readjust the resizer 261 | interp: int 262 | see '_get_interp_method' 263 | Returns: 264 | -------- 265 | img: np.array 266 | the resized array 267 | """ 268 | h, w, _ = img.shape 269 | im_size_min, im_size_max = (h, w) if w > h else (w, h) 270 | scale = float(short) / float(im_size_min) 271 | if np.round(scale * im_size_max / mult_base) * mult_base > max_size: 272 | # fit in max_size 273 | scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max) 274 | new_w, new_h = ( 275 | int(np.round(w * scale / mult_base) * mult_base), 276 | int(np.round(h * scale / mult_base) * mult_base) 277 | ) 278 | img = cv2.resize(img, (new_w, new_h), 279 | interpolation=_get_interp_method(interp, (h, w, new_h, new_w))) 280 | return img 281 | 282 | 283 | if __name__ == '__main__': 284 | run() -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/agent_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | """Client and server classes corresponding to protobuf-defined services.""" 3 | import grpc 4 | 5 | import agent_pb2 as agent__pb2 6 | 7 | 8 | class AgentStub(object): 9 | """Missing associated documentation comment in .proto file.""" 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.Predict = channel.unary_unary( 18 | '/AWS.SageMaker.Edge.Agent/Predict', 19 | request_serializer=agent__pb2.PredictRequest.SerializeToString, 20 | response_deserializer=agent__pb2.PredictResponse.FromString, 21 | ) 22 | self.LoadModel = channel.unary_unary( 23 | '/AWS.SageMaker.Edge.Agent/LoadModel', 24 | request_serializer=agent__pb2.LoadModelRequest.SerializeToString, 25 | response_deserializer=agent__pb2.LoadModelResponse.FromString, 26 | ) 27 | self.UnLoadModel = channel.unary_unary( 28 | '/AWS.SageMaker.Edge.Agent/UnLoadModel', 29 | request_serializer=agent__pb2.UnLoadModelRequest.SerializeToString, 30 | response_deserializer=agent__pb2.UnLoadModelResponse.FromString, 31 | ) 32 | self.ListModels = channel.unary_unary( 33 | '/AWS.SageMaker.Edge.Agent/ListModels', 34 | request_serializer=agent__pb2.ListModelsRequest.SerializeToString, 35 | response_deserializer=agent__pb2.ListModelsResponse.FromString, 36 | ) 37 | self.DescribeModel = channel.unary_unary( 38 | '/AWS.SageMaker.Edge.Agent/DescribeModel', 39 | request_serializer=agent__pb2.DescribeModelRequest.SerializeToString, 40 | response_deserializer=agent__pb2.DescribeModelResponse.FromString, 41 | ) 42 | self.CaptureData = channel.unary_unary( 43 | '/AWS.SageMaker.Edge.Agent/CaptureData', 44 | request_serializer=agent__pb2.CaptureDataRequest.SerializeToString, 45 | response_deserializer=agent__pb2.CaptureDataResponse.FromString, 46 | ) 47 | self.GetCaptureDataStatus = channel.unary_unary( 48 | '/AWS.SageMaker.Edge.Agent/GetCaptureDataStatus', 49 | request_serializer=agent__pb2.GetCaptureDataStatusRequest.SerializeToString, 50 | response_deserializer=agent__pb2.GetCaptureDataStatusResponse.FromString, 51 | ) 52 | 53 | 54 | class AgentServicer(object): 55 | """Missing associated documentation comment in .proto file.""" 56 | 57 | def Predict(self, request, context): 58 | """ 59 | perform inference on a model. 60 | 61 | Note: 62 | 1. users can chose to send the tensor data in the protobuf message or 63 | through a shared memory segment on a per tensor basis, the Predict 64 | method with handle the decode transparently. 65 | 2. serializing large tensors into the protobuf message can be quite expensive, 66 | based on our measurements it is recommended to use shared memory of 67 | tenors larger than 256KB. 68 | 3. SMEdge IPC server will not use shared memory for returning output tensors, 69 | i.e., the output tensor data will always send in byte form encoded 70 | in the tensors of PredictResponse. 71 | 4. currently SMEdge IPC server cannot handle concurrent predict calls, all 72 | these call will be serialized under the hood. this shall be addressed 73 | in a later release. 74 | Status Codes: 75 | 1. OK - prediction is successful 76 | 2. UNKNOWN - unknown error has occurred 77 | 3. INTERNAL - an internal error has occurred 78 | 4. NOT_FOUND - when model not found 79 | 5. INVALID_ARGUMENT - when tenors types mismatch 80 | 81 | """ 82 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 83 | context.set_details('Method not implemented!') 84 | raise NotImplementedError('Method not implemented!') 85 | 86 | def LoadModel(self, request, context): 87 | """ 88 | perform load for a model 89 | Note: 90 | 1. currently only local filesystem paths are supported for loading models. 91 | 2. currently only one model could be loaded at any time, loading of multiple 92 | models simultaneously shall be implemented in the future. 93 | 3. users are required to unload any loaded model to load another model. 94 | Status Codes: 95 | 1. OK - load is successful 96 | 2. UNKNOWN - unknown error has occurred 97 | 3. INTERNAL - an internal error has occurred 98 | 4. NOT_FOUND - model doesn't exist at the url 99 | 5. ALREADY_EXISTS - model with the same name is already loaded 100 | 6. RESOURCE_EXHAUSTED - memory is not available to load the model 101 | 7. FAILED_PRECONDITION - model package could not be loaded 102 | 103 | """ 104 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 105 | context.set_details('Method not implemented!') 106 | raise NotImplementedError('Method not implemented!') 107 | 108 | def UnLoadModel(self, request, context): 109 | """ 110 | perform unload for a model 111 | Status Codes: 112 | 1. OK - unload is successful 113 | 2. UNKNOWN - unknown error has occurred 114 | 3. INTERNAL - an internal error has occurred 115 | 4. NOT_FOUND - model doesn't exist 116 | 117 | """ 118 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 119 | context.set_details('Method not implemented!') 120 | raise NotImplementedError('Method not implemented!') 121 | 122 | def ListModels(self, request, context): 123 | """ 124 | lists the loaded models 125 | Status Codes: 126 | 1. OK - unload is successful 127 | 2. UNKNOWN - unknown error has occurred 128 | 3. INTERNAL - an internal error has occurred 129 | 130 | """ 131 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 132 | context.set_details('Method not implemented!') 133 | raise NotImplementedError('Method not implemented!') 134 | 135 | def DescribeModel(self, request, context): 136 | """ 137 | describes a model 138 | Status Codes: 139 | 1. OK - load is successful 140 | 2. UNKNOWN - unknown error has occurred 141 | 3. INTERNAL - an internal error has occurred 142 | 4. NOT_FOUND - model doesn't exist at the url 143 | 144 | """ 145 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 146 | context.set_details('Method not implemented!') 147 | raise NotImplementedError('Method not implemented!') 148 | 149 | def CaptureData(self, request, context): 150 | """ 151 | allows users to capture input and output tensors along with auxiliary data. 152 | Status Codes: 153 | 1. OK - data capture successfully initiated 154 | 2. UNKNOWN - unknown error has occurred 155 | 3. INTERNAL - an internal error has occurred 156 | 5. ALREADY_EXISTS - capture initiated for the given `capture_id` 157 | 6. RESOURCE_EXHAUSTED - buffer is full cannot accept any more requests. 158 | 7. OUT_OF_RANGE - timestamp is in the future. 159 | 8. INVALID_ARGUMENT - capture_id is not of expected format or input tensor paramater is invalid 160 | 9. FAILED_PRECONDITION - Indicates failed network access, when using cloud for capture data. 161 | 162 | """ 163 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 164 | context.set_details('Method not implemented!') 165 | raise NotImplementedError('Method not implemented!') 166 | 167 | def GetCaptureDataStatus(self, request, context): 168 | """ 169 | allows users to query status of capture data operation 170 | Status Codes: 171 | 1. OK - data capture successfully initiated 172 | 2. UNKNOWN - unknown error has occurred 173 | 3. INTERNAL - an internal error has occurred 174 | 4. NOT_FOUND - given capture id doesn't exist. 175 | 176 | """ 177 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 178 | context.set_details('Method not implemented!') 179 | raise NotImplementedError('Method not implemented!') 180 | 181 | 182 | def add_AgentServicer_to_server(servicer, server): 183 | rpc_method_handlers = { 184 | 'Predict': grpc.unary_unary_rpc_method_handler( 185 | servicer.Predict, 186 | request_deserializer=agent__pb2.PredictRequest.FromString, 187 | response_serializer=agent__pb2.PredictResponse.SerializeToString, 188 | ), 189 | 'LoadModel': grpc.unary_unary_rpc_method_handler( 190 | servicer.LoadModel, 191 | request_deserializer=agent__pb2.LoadModelRequest.FromString, 192 | response_serializer=agent__pb2.LoadModelResponse.SerializeToString, 193 | ), 194 | 'UnLoadModel': grpc.unary_unary_rpc_method_handler( 195 | servicer.UnLoadModel, 196 | request_deserializer=agent__pb2.UnLoadModelRequest.FromString, 197 | response_serializer=agent__pb2.UnLoadModelResponse.SerializeToString, 198 | ), 199 | 'ListModels': grpc.unary_unary_rpc_method_handler( 200 | servicer.ListModels, 201 | request_deserializer=agent__pb2.ListModelsRequest.FromString, 202 | response_serializer=agent__pb2.ListModelsResponse.SerializeToString, 203 | ), 204 | 'DescribeModel': grpc.unary_unary_rpc_method_handler( 205 | servicer.DescribeModel, 206 | request_deserializer=agent__pb2.DescribeModelRequest.FromString, 207 | response_serializer=agent__pb2.DescribeModelResponse.SerializeToString, 208 | ), 209 | 'CaptureData': grpc.unary_unary_rpc_method_handler( 210 | servicer.CaptureData, 211 | request_deserializer=agent__pb2.CaptureDataRequest.FromString, 212 | response_serializer=agent__pb2.CaptureDataResponse.SerializeToString, 213 | ), 214 | 'GetCaptureDataStatus': grpc.unary_unary_rpc_method_handler( 215 | servicer.GetCaptureDataStatus, 216 | request_deserializer=agent__pb2.GetCaptureDataStatusRequest.FromString, 217 | response_serializer=agent__pb2.GetCaptureDataStatusResponse.SerializeToString, 218 | ), 219 | } 220 | generic_handler = grpc.method_handlers_generic_handler( 221 | 'AWS.SageMaker.Edge.Agent', rpc_method_handlers) 222 | server.add_generic_rpc_handlers((generic_handler,)) 223 | 224 | 225 | # This class is part of an EXPERIMENTAL API. 226 | class Agent(object): 227 | """Missing associated documentation comment in .proto file.""" 228 | 229 | @staticmethod 230 | def Predict(request, 231 | target, 232 | options=(), 233 | channel_credentials=None, 234 | call_credentials=None, 235 | insecure=False, 236 | compression=None, 237 | wait_for_ready=None, 238 | timeout=None, 239 | metadata=None): 240 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/Predict', 241 | agent__pb2.PredictRequest.SerializeToString, 242 | agent__pb2.PredictResponse.FromString, 243 | options, channel_credentials, 244 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 245 | 246 | @staticmethod 247 | def LoadModel(request, 248 | target, 249 | options=(), 250 | channel_credentials=None, 251 | call_credentials=None, 252 | insecure=False, 253 | compression=None, 254 | wait_for_ready=None, 255 | timeout=None, 256 | metadata=None): 257 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/LoadModel', 258 | agent__pb2.LoadModelRequest.SerializeToString, 259 | agent__pb2.LoadModelResponse.FromString, 260 | options, channel_credentials, 261 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 262 | 263 | @staticmethod 264 | def UnLoadModel(request, 265 | target, 266 | options=(), 267 | channel_credentials=None, 268 | call_credentials=None, 269 | insecure=False, 270 | compression=None, 271 | wait_for_ready=None, 272 | timeout=None, 273 | metadata=None): 274 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/UnLoadModel', 275 | agent__pb2.UnLoadModelRequest.SerializeToString, 276 | agent__pb2.UnLoadModelResponse.FromString, 277 | options, channel_credentials, 278 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 279 | 280 | @staticmethod 281 | def ListModels(request, 282 | target, 283 | options=(), 284 | channel_credentials=None, 285 | call_credentials=None, 286 | insecure=False, 287 | compression=None, 288 | wait_for_ready=None, 289 | timeout=None, 290 | metadata=None): 291 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/ListModels', 292 | agent__pb2.ListModelsRequest.SerializeToString, 293 | agent__pb2.ListModelsResponse.FromString, 294 | options, channel_credentials, 295 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 296 | 297 | @staticmethod 298 | def DescribeModel(request, 299 | target, 300 | options=(), 301 | channel_credentials=None, 302 | call_credentials=None, 303 | insecure=False, 304 | compression=None, 305 | wait_for_ready=None, 306 | timeout=None, 307 | metadata=None): 308 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/DescribeModel', 309 | agent__pb2.DescribeModelRequest.SerializeToString, 310 | agent__pb2.DescribeModelResponse.FromString, 311 | options, channel_credentials, 312 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 313 | 314 | @staticmethod 315 | def CaptureData(request, 316 | target, 317 | options=(), 318 | channel_credentials=None, 319 | call_credentials=None, 320 | insecure=False, 321 | compression=None, 322 | wait_for_ready=None, 323 | timeout=None, 324 | metadata=None): 325 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/CaptureData', 326 | agent__pb2.CaptureDataRequest.SerializeToString, 327 | agent__pb2.CaptureDataResponse.FromString, 328 | options, channel_credentials, 329 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 330 | 331 | @staticmethod 332 | def GetCaptureDataStatus(request, 333 | target, 334 | options=(), 335 | channel_credentials=None, 336 | call_credentials=None, 337 | insecure=False, 338 | compression=None, 339 | wait_for_ready=None, 340 | timeout=None, 341 | metadata=None): 342 | return grpc.experimental.unary_unary(request, target, '/AWS.SageMaker.Edge.Agent/GetCaptureDataStatus', 343 | agent__pb2.GetCaptureDataStatusRequest.SerializeToString, 344 | agent__pb2.GetCaptureDataStatusResponse.FromString, 345 | options, channel_credentials, 346 | insecure, call_credentials, compression, wait_for_ready, timeout, metadata) 347 | -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/dog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/dog.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/edge_manager_python_client.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2010-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # 4 | import agent_pb2_grpc 5 | import cv2 6 | from agent_pb2 import (ListModelsRequest, LoadModelRequest, PredictRequest, 7 | UnLoadModelRequest, DescribeModelRequest, CaptureDataRequest, Tensor, 8 | TensorMetadata, Timestamp) 9 | import grpc 10 | import numpy as np 11 | import random 12 | import uuid 13 | import argparse 14 | import time 15 | import signal 16 | import sys 17 | import json 18 | import awsiot.greengrasscoreipc 19 | from awsiot.greengrasscoreipc.model import ( 20 | QOS, 21 | PublishToIoTCoreRequest 22 | ) 23 | 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('-i', '--image-path', action='store', type=str, required=True, dest='image_path', help='Path to Sample Images') 26 | parser.add_argument('-c', '--model-component', action='store', type=str, required=True, dest='model_component_name', help='Name of the GGv2 component containing the model') 27 | parser.add_argument('-m', '--model-name', action='store', type=str, required=True, dest='model_name', help='Friendly name of the model from Edge Packaging Job') 28 | parser.add_argument('-a', '--capture', action='store', type=str, required=True, dest='capture_data', default=False, help='Capture inference metadata and raw output') 29 | 30 | args = parser.parse_args() 31 | 32 | image_path = args.image_path 33 | model_component_name = args.model_component_name 34 | model_name = args.model_name 35 | capture_inference = args.capture_data == 'True' 36 | 37 | print ('Images stored in ' + image_path) 38 | print ('Model Greengrass v2 component name is ' + model_component_name) 39 | print ('Model name is ' + model_name) 40 | print ('Capture inference data is set to ' + str(capture_inference)) 41 | 42 | model_url = '/greengrass/v2/work/' + model_component_name 43 | tensor_name = 'data' 44 | SIZE = 224 45 | tensor_shape = [1, 3, SIZE, SIZE] 46 | image_urls = [image_path +'/rainbow.jpeg', image_path+'/tomato.jpeg', image_path+'/dog.jpeg', image_path+'/frog.jpeg'] 47 | 48 | channel = grpc.insecure_channel('unix:///tmp/sagemaker_edge_agent_example.sock') 49 | 50 | inference_result_topic = "em/inference" 51 | edge_manager_client = agent_pb2_grpc.AgentStub(channel) 52 | 53 | # When the component is stopped. 54 | def sigterm_handler(signum, frame): 55 | global edge_manager_client 56 | try: 57 | response = edge_manager_client.UnLoadModel(UnLoadModelRequest(name=model_name)) 58 | print ('Model unloaded.') 59 | sys.exit(0) 60 | except Exception as e: 61 | print ('Model failed to unload') 62 | print (e) 63 | sys.exit(-1) 64 | 65 | signal.signal(signal.SIGINT, sigterm_handler) 66 | signal.signal(signal.SIGTERM, sigterm_handler) 67 | 68 | # classifications for the model 69 | class_labels = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat', 70 | 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101', 71 | 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101', 72 | 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire', 73 | 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks', 74 | 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor', 75 | 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe', 76 | 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell', 77 | 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern', 78 | 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight', 79 | 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan', 80 | 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose', 81 | 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock', 82 | 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus', 83 | 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass', 84 | 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris', 85 | 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder', 86 | 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning', 87 | 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope', 88 | 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels', 89 | 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder', 90 | 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card', 91 | 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator', 92 | 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus', 93 | 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', 94 | 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile', 95 | 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass', 96 | 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan', 97 | 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee', 98 | 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato', 99 | 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops', 100 | 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn', 101 | 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask', 102 | 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101', 103 | 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter'] 104 | 105 | def run(): 106 | global edge_manager_client 107 | ipc_client = awsiot.greengrasscoreipc.connect() 108 | 109 | try: 110 | response = edge_manager_client.LoadModel( 111 | LoadModelRequest(url=model_url, name=model_name)) 112 | except Exception as e: 113 | print('Model failed to load.') 114 | print(e) 115 | 116 | while (True): 117 | time.sleep(30) 118 | print('New prediction') 119 | 120 | image_url = image_urls[random.randint(0,3)] 121 | print ('Picked ' + image_url + ' to perform inference on') 122 | 123 | # Scale image / preprocess 124 | img = cv2.imread(image_url) 125 | frame = resize_short_within(img, short=SIZE, max_size=SIZE * 2) 126 | nn_input_size = SIZE 127 | nn_input = cv2.resize(frame, (nn_input_size, int(nn_input_size/4 * 3 ))) 128 | nn_input = cv2.copyMakeBorder(nn_input, int(nn_input_size / 8), int(nn_input_size / 8), 129 | 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0)) 130 | copy_frame = nn_input[:] 131 | nn_input = nn_input.astype('float32') 132 | nn_input = nn_input.reshape((nn_input_size * nn_input_size, 3)) 133 | scaled_frame = np.transpose(nn_input) 134 | 135 | # Call prediction 136 | request = PredictRequest(name=model_name, tensors=[Tensor(tensor_metadata=TensorMetadata( 137 | name=tensor_name, data_type=5, shape=tensor_shape), byte_data=scaled_frame.tobytes())]) 138 | 139 | try: 140 | response = edge_manager_client.Predict(request) 141 | except Exception as e: 142 | print('Prediction failed') 143 | print(e) 144 | 145 | # read output tensors and append them to matrix 146 | detections = [] 147 | for t in response.tensors: 148 | deserialized_bytes = np.frombuffer(t.byte_data, dtype=np.float32) 149 | detections.append(np.asarray(deserialized_bytes)) 150 | 151 | # Get the highest confidence inference result 152 | index = np.argmax(detections[0]) 153 | result = class_labels[index] 154 | confidence = detections[0][index] 155 | 156 | # Print results in local log 157 | print('Result is ', result) 158 | print('Confidence is ', confidence) 159 | 160 | # Publish highest confidence result to AWS IoT Core 161 | print ('Got inference results, publishing to AWS IoT Core') 162 | message = { 163 | "result" : result, 164 | "confidence" : str(confidence) 165 | } 166 | request = PublishToIoTCoreRequest() 167 | request.topic_name = inference_result_topic 168 | request.payload = bytes(json.dumps(message), "utf-8") 169 | request.qos = QOS.AT_LEAST_ONCE 170 | operation = ipc_client.new_publish_to_iot_core() 171 | operation.activate(request) 172 | future = operation.get_response() 173 | future.result(10) 174 | 175 | # capture inference results in S3 if enabled 176 | if capture_inference: 177 | print ('Capturing inference data in Amazon S3') 178 | now = time.time() 179 | seconds = int(now) 180 | nanos = int((now - seconds) * 10**9) 181 | timestamp = Timestamp(seconds=seconds, nanos=nanos) 182 | request = CaptureDataRequest( 183 | model_name=model_name, 184 | capture_id=str(uuid.uuid4()), 185 | inference_timestamp=timestamp, 186 | input_tensors=[Tensor(tensor_metadata=TensorMetadata(name="input", data_type=5, shape=tensor_shape), 187 | byte_data=scaled_frame.tobytes())], 188 | output_tensors=[Tensor(tensor_metadata=TensorMetadata(name="output", data_type=5, shape=[1,257]), 189 | byte_data=detections[0].tobytes())] 190 | ) 191 | try: 192 | response = edge_manager_client.CaptureData(request) 193 | except Exception as e: 194 | print('CaptureData request failed') 195 | print(e) 196 | 197 | ## Scaling functions 198 | def _get_interp_method(interp, sizes=()): 199 | """Get the interpolation method for resize functions. 200 | The major purpose of this function is to wrap a random interp method selection 201 | and a auto-estimation method. 202 | ​ 203 | Parameters 204 | ---------- 205 | interp : int 206 | interpolation method for all resizing operations 207 | ​ 208 | Possible values: 209 | 0: Nearest Neighbors Interpolation. 210 | 1: Bilinear interpolation. 211 | 2: Area-based (resampling using pixel area relation). It may be a 212 | preferred method for image decimation, as it gives moire-free 213 | results. But when the image is zoomed, it is similar to the Nearest 214 | Neighbors method. (used by default). 215 | 3: Bicubic interpolation over 4x4 pixel neighborhood. 216 | 4: Lanczos interpolation over 8x8 pixel neighborhood. 217 | 9: Cubic for enlarge, area for shrink, bilinear for others 218 | 10: Random select from interpolation method metioned above. 219 | Note: 220 | When shrinking an image, it will generally look best with AREA-based 221 | interpolation, whereas, when enlarging an image, it will generally look best 222 | with Bicubic (slow) or Bilinear (faster but still looks OK). 223 | More details can be found in the documentation of OpenCV, please refer to 224 | http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. 225 | sizes : tuple of int 226 | (old_height, old_width, new_height, new_width), if None provided, auto(9) 227 | will return Area(2) anyway. 228 | ​ 229 | Returns 230 | ------- 231 | int 232 | interp method from 0 to 4 233 | """ 234 | if interp == 9: 235 | if sizes: 236 | assert len(sizes) == 4 237 | oh, ow, nh, nw = sizes 238 | if nh > oh and nw > ow: 239 | return 2 240 | elif nh < oh and nw < ow: 241 | return 3 242 | else: 243 | return 1 244 | else: 245 | return 2 246 | if interp == 10: 247 | return random.randint(0, 4) 248 | if interp not in (0, 1, 2, 3, 4): 249 | raise ValueError('Unknown interp method %d' % interp) 250 | 251 | 252 | def resize_short_within(img, short=512, max_size=1024, mult_base=32, interp=2): 253 | """ 254 | resizes the short side of the image so the aspect ratio remains the same AND the short 255 | side matches the convolutional layer for the network 256 | ​ 257 | Args: 258 | ----- 259 | img: np.array 260 | image you want to resize 261 | short: int 262 | the size to reshape the image to 263 | max_size: int 264 | the max size of the short side 265 | mult_base: int 266 | the size scale to readjust the resizer 267 | interp: int 268 | see '_get_interp_method' 269 | Returns: 270 | -------- 271 | img: np.array 272 | the resized array 273 | """ 274 | h, w, _ = img.shape 275 | im_size_min, im_size_max = (h, w) if w > h else (w, h) 276 | scale = float(short) / float(im_size_min) 277 | if np.round(scale * im_size_max / mult_base) * mult_base > max_size: 278 | # fit in max_size 279 | scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max) 280 | new_w, new_h = ( 281 | int(np.round(w * scale / mult_base) * mult_base), 282 | int(np.round(h * scale / mult_base) * mult_base) 283 | ) 284 | img = cv2.resize(img, (new_w, new_h), 285 | interpolation=_get_interp_method(interp, (h, w, new_h, new_w))) 286 | return img 287 | 288 | 289 | if __name__ == '__main__': 290 | run() -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/frog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/frog.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rainbow.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rainbow.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/tomato.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/components/artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/tomato.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/components/recipes/aws.sagemaker.edgeManagerClientCameraIntegration-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: "2020-01-25" 3 | ComponentName: "aws.sagemaker.edgeManagerClientCameraIntegration" 4 | ComponentVersion: "0.1.0" 5 | ComponentType: "aws.greengrass.generic" 6 | ComponentDescription: "Deploys Edge Manager client with rtsp stream" 7 | ComponentPublisher: "Amazon Web Services, Inc." 8 | ComponentConfiguration: 9 | DefaultConfiguration: 10 | accessControl: 11 | aws.greengrass.ipc.mqttproxy: 12 | aws.sagemaker.edgeManagerClientCameraIntegration:pubsub:1: 13 | policyDescription: "Allows access to publish to em/inference" 14 | operations: 15 | - "aws.greengrass#PublishToIoTCore" 16 | resources: 17 | - "em/inference" 18 | rtspStreamURL: "rtspt://" 19 | modelComponentName: "mobilenetv2_224_quantized_model_component" 20 | modelName: "mobilenetv2-224-10-quant" 21 | quantization: "True" 22 | captureData: "False" 23 | Manifests: 24 | - Platform: 25 | os: "linux" 26 | architecture: "aarch64" 27 | Lifecycle: 28 | run: 29 | script: "sleep 5 && python3 -u {artifacts:path}/camera_integration_edgemanger_client.py\ 30 | \ -s '{configuration:/rtspStreamURL}' -c '{configuration:/modelComponentName}'\ 31 | \ -m '{configuration:/modelName}' -q '{configuration:/quantization}'\ 32 | \ -a '{configuration:/captureData}'" 33 | RequiresPrivilege: true 34 | Artifacts: 35 | - Uri: "s3:///artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/camera_integration_edgemanger_client.py" 36 | Permission: 37 | Execute: OWNER 38 | - URI: "s3:///artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2.py" 39 | Permission: 40 | Execute: OWNER 41 | - URI: "s3:///artifacts/aws.sagemaker.edgeManagerClientCameraIntegration/0.1.0/agent_pb2_grpc.py" 42 | Permission: 43 | Execute: OWNER 44 | Lifecycle: {} 45 | -------------------------------------------------------------------------------- /examples/mlops-console-example/components/recipes/aws.sagemaker.edgeManagerPythonClient-0.1.0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | RecipeFormatVersion: "2020-01-25" 3 | ComponentName: "aws.sagemaker.edgeManagerPythonClient" 4 | ComponentVersion: "0.1.0" 5 | ComponentType: "aws.greengrass.generic" 6 | ComponentDescription: "Deploys Sagemaker Edge Manager Python client and example application" 7 | ComponentPublisher: "Amazon Web Services, Inc." 8 | ComponentConfiguration: 9 | DefaultConfiguration: 10 | accessControl: 11 | aws.greengrass.ipc.mqttproxy: 12 | aws.sagemaker.edgemanagerPythonClient:pubsub:1: 13 | policyDescription: "Allows access to publish to em/inference" 14 | operations: 15 | - "aws.greengrass#PublishToIoTCore" 16 | resources: 17 | - "em/inference" 18 | modelComponentName: "SMEM-Image-Classification-Model" 19 | modelName: "mxnetclassifier" 20 | captureInference: "False" 21 | 22 | ComponentDependencies: 23 | aws.greengrass.SageMakerEdgeManager: 24 | VersionRequirement: ">=1.0.0" 25 | DependencyType: "HARD" 26 | Manifests: 27 | - Platform: 28 | os: "linux" 29 | architecture: "aarch64" 30 | Lifecycle: 31 | run: 32 | script: "sleep 5 && python3 -u {artifacts:path}/edge_manager_python_client.py -i {artifacts:path} -c '{configuration:/modelComponentName}' -m '{configuration:/modelName}' -a {configuration:/captureInference}" 33 | RequiresPrivilege: true 34 | Artifacts: 35 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/edge_manager_python_client.py 36 | Permission: 37 | Execute: OWNER 38 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/agent_pb2.py 39 | Permission: 40 | Execute: OWNER 41 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/agent_pb2_grpc.py 42 | Permission: 43 | Execute: OWNER 44 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/dog.jpeg 45 | Permission: 46 | Execute: OWNER 47 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/frog.jpeg 48 | Permission: 49 | Execute: OWNER 50 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/tomato.jpeg 51 | Permission: 52 | Execute: OWNER 53 | - URI: s3:///artifacts/aws.sagemaker.edgeManagerPythonClient/0.1.0/rainbow.jpeg 54 | Permission: 55 | Execute: OWNER 56 | -------------------------------------------------------------------------------- /examples/mlops-console-example/images/Architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/Architecture.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/GGCore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/GGCore.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/applicationcomponent-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/applicationcomponent-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/applicationcomponent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/applicationcomponent.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/applicationcomponentconfiguration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/applicationcomponentconfiguration.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/applicationcomponentconfigurationcapture-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/applicationcomponentconfigurationcapture-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/compilationjob-mobilenetv2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/compilationjob-mobilenetv2.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/completedneojob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/completedneojob.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/configureemcomponent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/configureemcomponent.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createdeployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createdeployment.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createedgepackagingjob-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createedgepackagingjob-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createedgepackagingjob2-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createedgepackagingjob2-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createedgepackagingjob2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createedgepackagingjob2.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createedgepackagingjob2.png.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createedgepackagingjob2.png.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createedgepackingjob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createedgepackingjob.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/createemrecipe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/createemrecipe.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/deployableemcomponent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/deployableemcomponent.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/dog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/dog.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/images/edgemanagerfleetoutput.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/edgemanagerfleetoutput.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/edgemanagerpackagingiamrole.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/edgemanagerpackagingiamrole.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/emconfiguration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/emconfiguration.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/emdevicefleet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/emdevicefleet.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/emdevicefleet2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/emdevicefleet2.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/frog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/frog.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/images/healthycore-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/healthycore-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/healthycore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/healthycore.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/mlops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/mlops.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/mobilenetv2-gg-component-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/mobilenetv2-gg-component-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/modelandapplicationdeployment-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/modelandapplicationdeployment-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/modelandapplicationdeployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/modelandapplicationdeployment.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/mqttresults-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/mqttresults-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/mqttresults.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/mqttresults.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/neocompilationjob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/neocompilationjob.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/rainbow.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/rainbow.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/images/registerdevice-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/registerdevice-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/registerdevice.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/registerdevice.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/registerdevice2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/registerdevice2.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/s3compiledmodel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/s3compiledmodel.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/s3compiledmodel8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/s3compiledmodel8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/s3inferenceresults-imx8mplus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/s3inferenceresults-imx8mplus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/s3inferenceresults.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/s3inferenceresults.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/s3packagedjob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/s3packagedjob.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/tomato.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/tomato.jpeg -------------------------------------------------------------------------------- /examples/mlops-console-example/images/trainingjob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/trainingjob.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/viewdevicestatus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/viewdevicestatus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/images/viewfleetstatus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/be633143f56be2ef56270ec6744c70df851e85f1/examples/mlops-console-example/images/viewfleetstatus.png -------------------------------------------------------------------------------- /examples/mlops-console-example/model-training/Image-classification-fulltraining-highlevel.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Model Optimization with an Image Classification Example\n", 8 | "1. [Introduction](#Introduction)\n", 9 | "2. [Prerequisites and Preprocessing](#Prequisites-and-Preprocessing)\n", 10 | "3. [Train the model](#Train-the-model)" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "## Introduction\n", 18 | "\n", 19 | "***\n", 20 | "\n", 21 | "Welcome to our model optimization example for image classification. In this demo, we will use the Amazon SageMaker Image Classification algorithm to train on the [caltech-256 dataset](http://www.vision.caltech.edu/Image_Datasets/Caltech256/)." 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "## Prequisites and Preprocessing\n", 29 | "\n", 30 | "***\n", 31 | "\n", 32 | "### Setup\n", 33 | "\n", 34 | "To get started, we need to define a few variables and obtain certain permissions that will be needed later in the example. These are:\n", 35 | "* A SageMaker session\n", 36 | "* IAM role to give learning, storage & hosting access to your data\n", 37 | "* An S3 bucket, a folder & sub folders that will be used to store data and artifacts\n", 38 | "* SageMaker's specific Image Classification training image which should not be changed\n", 39 | "\n", 40 | "We also need to upgrade the [SageMaker SDK for Python](https://sagemaker.readthedocs.io/en/stable/v2.html) to v2.33.0 or greater and restart the kernel." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "!~/anaconda3/envs/mxnet_p36/bin/pip install --upgrade sagemaker>=2.33.0" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "import sagemaker\n", 59 | "from sagemaker import session, get_execution_role\n", 60 | "\n", 61 | "role = get_execution_role()\n", 62 | "sagemaker_session = session.Session()" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "# S3 bucket and folders for saving code and model artifacts.\n", 72 | "# Change to the name of your Greengrass Component bucket.\n", 73 | "bucket = '-gg-components'\n", 74 | "folder = 'models/uncompiled'\n", 75 | "model_with_custom_code_sub_folder = folder + '/model-with-custom-code'\n", 76 | "validation_data_sub_folder = folder + '/validation-data'\n", 77 | "training_data_sub_folder = folder + '/training-data'\n", 78 | "training_output_sub_folder = folder + '/training-output'" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "from sagemaker import session, get_execution_role\n", 88 | "from sagemaker.amazon.amazon_estimator import get_image_uri\n", 89 | "\n", 90 | "# S3 Location to save the model artifact after training\n", 91 | "s3_training_output_location = 's3://{}/{}'.format(bucket, training_output_sub_folder)\n", 92 | "\n", 93 | "# S3 Location to save your custom code in tar.gz format\n", 94 | "s3_model_with_custom_code_location = 's3://{}/{}'.format(bucket, model_with_custom_code_sub_folder)" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "from sagemaker.image_uris import retrieve\n", 104 | "aws_region = sagemaker_session.boto_region_name\n", 105 | "training_image = retrieve(framework='image-classification', region=aws_region, image_scope='training')" 106 | ] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "### Data preparation\n", 113 | "\n", 114 | "In this demo, we are using [Caltech-256](http://www.vision.caltech.edu/Image_Datasets/Caltech256/) dataset, pre-converted into `RecordIO` format using MXNet's [im2rec](https://mxnet.apache.org/versions/1.7/api/faq/recordio) tool. Caltech-256 dataset contains 30608 images of 256 objects. For the training and validation data, the splitting scheme followed is governed by this [MXNet example](https://github.com/apache/incubator-mxnet/blob/8ecdc49cf99ccec40b1e342db1ac6791aa97865d/example/image-classification/data/caltech256.sh). The example randomly selects 60 images per class for training, and uses the remaining data for validation. It takes around 50 seconds to convert the entire Caltech-256 dataset (~1.2GB) into `RecordIO` format on a p2.xlarge instance. SageMaker's training algorithm takes `RecordIO` files as input. For this demo, we will download the `RecordIO` files and upload it to S3. We then initialize the 256 object categories as well to a variable." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "import os \n", 124 | "import urllib.request\n", 125 | "\n", 126 | "def download(url):\n", 127 | " filename = url.split(\"/\")[-1]\n", 128 | " if not os.path.exists(filename):\n", 129 | " urllib.request.urlretrieve(url, filename)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "# Dowload caltech-256 data files from MXNet's website\n", 139 | "download('http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec')\n", 140 | "download('http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec')\n", 141 | "\n", 142 | "# Upload the file to S3\n", 143 | "s3_training_data_location = sagemaker_session.upload_data('caltech-256-60-train.rec', bucket, training_data_sub_folder)\n", 144 | "s3_validation_data_location = sagemaker_session.upload_data('caltech-256-60-val.rec', bucket, validation_data_sub_folder)" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "metadata": {}, 151 | "outputs": [], 152 | "source": [ 153 | "class_labels = ['ak47', 'american-flag', 'backpack', 'baseball-bat', 'baseball-glove', 'basketball-hoop', 'bat',\n", 154 | " 'bathtub', 'bear', 'beer-mug', 'billiards', 'binoculars', 'birdbath', 'blimp', 'bonsai-101',\n", 155 | " 'boom-box', 'bowling-ball', 'bowling-pin', 'boxing-glove', 'brain-101', 'breadmaker', 'buddha-101',\n", 156 | " 'bulldozer', 'butterfly', 'cactus', 'cake', 'calculator', 'camel', 'cannon', 'canoe', 'car-tire',\n", 157 | " 'cartman', 'cd', 'centipede', 'cereal-box', 'chandelier-101', 'chess-board', 'chimp', 'chopsticks',\n", 158 | " 'cockroach', 'coffee-mug', 'coffin', 'coin', 'comet', 'computer-keyboard', 'computer-monitor',\n", 159 | " 'computer-mouse', 'conch', 'cormorant', 'covered-wagon', 'cowboy-hat', 'crab-101', 'desk-globe',\n", 160 | " 'diamond-ring', 'dice', 'dog', 'dolphin-101', 'doorknob', 'drinking-straw', 'duck', 'dumb-bell',\n", 161 | " 'eiffel-tower', 'electric-guitar-101', 'elephant-101', 'elk', 'ewer-101', 'eyeglasses', 'fern',\n", 162 | " 'fighter-jet', 'fire-extinguisher', 'fire-hydrant', 'fire-truck', 'fireworks', 'flashlight',\n", 163 | " 'floppy-disk', 'football-helmet', 'french-horn', 'fried-egg', 'frisbee', 'frog', 'frying-pan',\n", 164 | " 'galaxy', 'gas-pump', 'giraffe', 'goat', 'golden-gate-bridge', 'goldfish', 'golf-ball', 'goose',\n", 165 | " 'gorilla', 'grand-piano-101', 'grapes', 'grasshopper', 'guitar-pick', 'hamburger', 'hammock',\n", 166 | " 'harmonica', 'harp', 'harpsichord', 'hawksbill-101', 'head-phones', 'helicopter-101', 'hibiscus',\n", 167 | " 'homer-simpson', 'horse', 'horseshoe-crab', 'hot-air-balloon', 'hot-dog', 'hot-tub', 'hourglass',\n", 168 | " 'house-fly', 'human-skeleton', 'hummingbird', 'ibis-101', 'ice-cream-cone', 'iguana', 'ipod', 'iris',\n", 169 | " 'jesus-christ', 'joy-stick', 'kangaroo-101', 'kayak', 'ketch-101', 'killer-whale', 'knife', 'ladder',\n", 170 | " 'laptop-101', 'lathe', 'leopards-101', 'license-plate', 'lightbulb', 'light-house', 'lightning',\n", 171 | " 'llama-101', 'mailbox', 'mandolin', 'mars', 'mattress', 'megaphone', 'menorah-101', 'microscope',\n", 172 | " 'microwave', 'minaret', 'minotaur', 'motorbikes-101', 'mountain-bike', 'mushroom', 'mussels',\n", 173 | " 'necktie', 'octopus', 'ostrich', 'owl', 'palm-pilot', 'palm-tree', 'paperclip', 'paper-shredder',\n", 174 | " 'pci-card', 'penguin', 'people', 'pez-dispenser', 'photocopier', 'picnic-table', 'playing-card',\n", 175 | " 'porcupine', 'pram', 'praying-mantis', 'pyramid', 'raccoon', 'radio-telescope', 'rainbow', 'refrigerator',\n", 176 | " 'revolver-101', 'rifle', 'rotary-phone', 'roulette-wheel', 'saddle', 'saturn', 'school-bus',\n", 177 | " 'scorpion-101', 'screwdriver', 'segway', 'self-propelled-lawn-mower', 'sextant', 'sheet-music', \n", 178 | " 'skateboard', 'skunk', 'skyscraper', 'smokestack', 'snail', 'snake', 'sneaker', 'snowmobile',\n", 179 | " 'soccer-ball', 'socks', 'soda-can', 'spaghetti', 'speed-boat', 'spider', 'spoon', 'stained-glass',\n", 180 | " 'starfish-101', 'steering-wheel', 'stirrups', 'sunflower-101', 'superman', 'sushi', 'swan',\n", 181 | " 'swiss-army-knife', 'sword', 'syringe', 'tambourine', 'teapot', 'teddy-bear', 'teepee',\n", 182 | " 'telephone-box', 'tennis-ball', 'tennis-court', 'tennis-racket', 'theodolite', 'toaster', 'tomato',\n", 183 | " 'tombstone', 'top-hat', 'touring-bike', 'tower-pisa', 'traffic-light', 'treadmill', 'triceratops',\n", 184 | " 'tricycle', 'trilobite-101', 'tripod', 't-shirt', 'tuning-fork', 'tweezer', 'umbrella-101', 'unicorn',\n", 185 | " 'vcr', 'video-projector', 'washing-machine', 'watch-101', 'waterfall', 'watermelon', 'welding-mask',\n", 186 | " 'wheelbarrow', 'windmill', 'wine-bottle', 'xylophone', 'yarmulke', 'yo-yo', 'zebra', 'airplanes-101',\n", 187 | " 'car-side-101', 'faces-easy-101', 'greyhound', 'tennis-shoes', 'toad', 'clutter']" 188 | ] 189 | }, 190 | { 191 | "cell_type": "markdown", 192 | "metadata": {}, 193 | "source": [ 194 | "## Train the model\n", 195 | "\n", 196 | "***\n", 197 | "\n", 198 | "Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sagemaker.estimator.Estimator`` object. This estimator is required to launch the training job.\n", 199 | "\n", 200 | "We specify the following parameters while creating the estimator:\n", 201 | "\n", 202 | "* ``image_uri``: This is set to the training_image uri we defined previously. Once set, this image will be used later while running the training job.\n", 203 | "* ``role``: This is the IAM role which we defined previously.\n", 204 | "* ``instance_count``: This is the number of instances on which to run the training. When the number of instances is greater than one, then the image classification algorithm will run in distributed settings. \n", 205 | "* ``instance_type``: This indicates the type of machine on which to run the training. For this example we will use `ml.p3.8xlarge`.\n", 206 | "* ``volume_size``: This is the size in GB of the EBS volume to use for storing input data during training. Must be large enough to store training data as File Mode is used.\n", 207 | "* ``max_run``: This is the timeout value in seconds for training. After this amount of time SageMaker terminates the job regardless of its current status.\n", 208 | "* ``input_mode``: This is set to `File` in this example. SageMaker copies the training dataset from the S3 location to a local directory.\n", 209 | "* ``output_path``: This is the S3 path in which the training output is stored. We are assigning it to `s3_training_output_location` defined previously.\n" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": {}, 216 | "outputs": [], 217 | "source": [ 218 | "ic_estimator = sagemaker.estimator.Estimator(image_uri=training_image,\n", 219 | " role=role,\n", 220 | " instance_count=1,\n", 221 | " instance_type='ml.p3.8xlarge',\n", 222 | " volume_size = 50,\n", 223 | " max_run = 360000,\n", 224 | " input_mode= 'File',\n", 225 | " output_path=s3_training_output_location,\n", 226 | " base_job_name='img-classification-training'\n", 227 | " )" 228 | ] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": {}, 233 | "source": [ 234 | "Following are certain hyperparameters that are specific to the algorithm which are also set:\n", 235 | "\n", 236 | "* ``num_layers``: The number of layers (depth) for the network. We use 18 in this samples but other values such as 50, 152 can be used.\n", 237 | "* ``image_shape``: The input image dimensions,'num_channels, height, width', for the network. It should be no larger than the actual image size. The number of channels should be same as the actual image.\n", 238 | "* ``num_classes``: This is the number of output classes for the new dataset. Imagenet was trained with 1000 output classes but the number of output classes can be changed for fine-tuning. For caltech, we use 257 because it has 256 object categories + 1 clutter class.\n", 239 | "* ``num_training_samples``: This is the total number of training samples. It is set to 15240 for caltech dataset with the current split.\n", 240 | "* ``mini_batch_size``: The number of training samples used for each mini batch. In distributed training, the number of training samples used per batch will be N * mini_batch_size where N is the number of hosts on which training is run.\n", 241 | "* ``epochs``: Number of training epochs.\n", 242 | "* ``learning_rate``: Learning rate for training.\n", 243 | "* ``top_k``: Report the top-k accuracy during training.\n", 244 | "* ``precision_dtype``: Training datatype precision (default: float32). If set to 'float16', the training will be done in mixed_precision mode and will be faster than float32 mode." 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": null, 250 | "metadata": {}, 251 | "outputs": [], 252 | "source": [ 253 | "ic_estimator.set_hyperparameters(num_layers=18,\n", 254 | " image_shape = \"3,224,224\",\n", 255 | " num_classes=257,\n", 256 | " num_training_samples=15420,\n", 257 | " mini_batch_size=128,\n", 258 | " epochs=5,\n", 259 | " learning_rate=0.01,\n", 260 | " top_k=2,\n", 261 | " use_pretrained_model=1,\n", 262 | " precision_dtype='float32')" 263 | ] 264 | }, 265 | { 266 | "cell_type": "markdown", 267 | "metadata": {}, 268 | "source": [ 269 | "Next we setup the input ``data_channels`` to be used later for training." 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": null, 275 | "metadata": {}, 276 | "outputs": [], 277 | "source": [ 278 | "train_data = sagemaker.inputs.TrainingInput(s3_training_data_location,\n", 279 | " content_type='application/x-recordio',\n", 280 | " s3_data_type='S3Prefix')\n", 281 | "\n", 282 | "validation_data = sagemaker.inputs.TrainingInput(s3_validation_data_location,\n", 283 | " content_type='application/x-recordio',\n", 284 | " s3_data_type='S3Prefix')\n", 285 | "\n", 286 | "data_channels = {'train': train_data, 'validation': validation_data}" 287 | ] 288 | }, 289 | { 290 | "cell_type": "markdown", 291 | "metadata": {}, 292 | "source": [ 293 | "After we've created the estimator object, we can train the model using ``fit()`` API" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "metadata": { 300 | "scrolled": true 301 | }, 302 | "outputs": [], 303 | "source": [ 304 | "ic_estimator.fit(inputs=data_channels, logs=True)" 305 | ] 306 | }, 307 | { 308 | "cell_type": "markdown", 309 | "metadata": {}, 310 | "source": [ 311 | "After the training job completes, your trained model will be stored in the bucket specified above. This should be in your Greengrass Components bucket models/uncompiled folder. Check in S3 that you can see the output of the training job." 312 | ] 313 | } 314 | ], 315 | "metadata": { 316 | "kernelspec": { 317 | "display_name": "conda_mxnet_p36", 318 | "language": "python", 319 | "name": "conda_mxnet_p36" 320 | }, 321 | "language_info": { 322 | "codemirror_mode": { 323 | "name": "ipython", 324 | "version": 3 325 | }, 326 | "file_extension": ".py", 327 | "mimetype": "text/x-python", 328 | "name": "python", 329 | "nbconvert_exporter": "python", 330 | "pygments_lexer": "ipython3", 331 | "version": "3.6.13" 332 | }, 333 | "notice": "Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." 334 | }, 335 | "nbformat": 4, 336 | "nbformat_minor": 4 337 | } 338 | -------------------------------------------------------------------------------- /examples/mlops-console-example/scripts/setupresources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2010-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # 5 | 6 | if [ $# -ne 3 ]; then 7 | echo 1>&2 "Usage: $0 ROLE_ALIAS_NAME IOT_THING_NAME REGION" 8 | exit 3 9 | fi 10 | 11 | # Arguments 12 | ROLE_ALIAS_NAME=$1 13 | echo "ROLE ALIAS: $ROLE_ALIAS_NAME" 14 | IOT_THING_NAME=$2 15 | echo "GREENGRASS CORE THING NAME: $IOT_THING_NAME" 16 | REGION=$3 17 | echo "REGION: $REGION" 18 | 19 | # Trust document 20 | ASSUME_POLICY_DOCUMENT="{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"credentials.iot.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"},{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"sagemaker.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" 21 | 22 | # Edge Manager device fleet parameters 23 | DEVICE_FLEET_NAME=greengrassv2fleet 24 | DEVICE_NAME=$IOT_THING_NAME 25 | 26 | # Get the role name from the IoT Role Alias 27 | IOT_ROLE_ALIAS_IAM_ROLE=$(aws iot describe-role-alias --role-alias $ROLE_ALIAS_NAME --region $REGION | grep roleArn) 28 | IAM_ROLE_ARN=$(echo "$IOT_ROLE_ALIAS_IAM_ROLE" | sed -e 's/\(^.*\"roleArn\"\:\ \"\)\(.*\)\(".*$\)/\2/') 29 | IAM_ROLE_NAME=$(echo "$IOT_ROLE_ALIAS_IAM_ROLE" | sed -e 's/\(^.*role\/\)\(.*\)\(".*$\)/\2/') 30 | 31 | echo "IAM Role Name : $IAM_ROLE_NAME" 32 | echo "IAM Role ARN : $IAM_ROLE_ARN" 33 | 34 | # Attaching policies to IAM Role 35 | echo "Attaching the following roles: AmazonSageMakerEdgeDeviceFleetPolicy, AmazonSageMakerFullAccess, AmazonS3FullAccess, AWSIoTFullAccess" 36 | aws iam attach-role-policy --role-name $IAM_ROLE_NAME \ 37 | --policy-arn arn:aws:iam::aws:policy/service-role/AmazonSageMakerEdgeDeviceFleetPolicy 38 | 39 | aws iam attach-role-policy --role-name $IAM_ROLE_NAME \ 40 | --policy-arn arn:aws:iam::aws:policy/AmazonSageMakerFullAccess 41 | 42 | aws iam attach-role-policy --role-name $IAM_ROLE_NAME \ 43 | --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess 44 | 45 | aws iam attach-role-policy --role-name $IAM_ROLE_NAME \ 46 | --policy-arn arn:aws:iam::aws:policy/AWSIoTFullAccess 47 | 48 | # Update trust relationship 49 | echo "Updating trust relationship" 50 | aws iam update-assume-role-policy --role-name $IAM_ROLE_NAME \ 51 | --policy-document $ASSUME_POLICY_DOCUMENT 52 | 53 | DATE=$(date +%s) 54 | 55 | # Create edge inference bucket 56 | EDGE_INFERENCE_BUCKET_NAME=$DATE-sagemaker-inference-results 57 | echo "Creating edge inference bucket: $EDGE_INFERENCE_BUCKET_NAME" 58 | aws s3 mb s3://$EDGE_INFERENCE_BUCKET_NAME --region $REGION 59 | 60 | #Create greengrass component bucket 61 | GG_COMPONENTS_BUCKET_NAME=$DATE-gg-components 62 | echo "Creating Greengrass components bucket: $GG_COMPONENTS_BUCKET_NAME" 63 | aws s3 mb s3://$GG_COMPONENTS_BUCKET_NAME --region $REGION 64 | 65 | # Create device fleet 66 | echo "Creating Edge Manager device fleet $DEVICE_FLEET_NAME" 67 | aws sagemaker create-device-fleet --region $REGION --device-fleet-name $DEVICE_FLEET_NAME \ 68 | --role-arn $IAM_ROLE_ARN --output-config "{\"S3OutputLocation\":\"s3://$EDGE_INFERENCE_BUCKET_NAME\"}" 69 | 70 | # Register device 71 | echo "Registering GG Core device $IOT_THING_NAME to Edge Manager device fleet" 72 | aws sagemaker register-devices --region $REGION --device-fleet-name $DEVICE_FLEET_NAME \ 73 | --devices "[{\"DeviceName\":\"$DEVICE_NAME\",\"IotThingName\":\"$IOT_THING_NAME\"}]" -------------------------------------------------------------------------------- /examples/mxnet_gluon_ssd_lambda_function/README.md: -------------------------------------------------------------------------------- 1 | # Python SageMaker Edge Manager Agent + Greengrass V2 Example 2 | This example uses boto3 APIs to create a model that we can deploy to an edge device. For deployment of artifacts using Greengrass V2, this will walk you through the steps that you can follow in the AWS Console. If you want to use AWSCLI instead, please follow this https://github.com/aws-samples/greengrass-v2-sagemaker-edge-manager-python/blob/main/README.md. 3 | 4 | ## Pre-Requisites 5 | To run this example end to end, you will need an edge device (NVIDIA Jetson TX2/Xavier) that has internet connectivity. To start with, clone this repo and open sagemaker_edge_example.ipynb on either Sagemaker Studio instance or on Classic Jupyter Notebook instance in Sagemaker. 6 | 7 | ## Model and Agent Artifacts 8 | Follow the sagemaker_edge_example.ipynb to create and compile a sample model for the edge device. In this example, we will use "jetson_xavier" as a target device. This notebook shows how you can use a pretrained darknet(object detection) or pretrained keras(image classification) model. If you want to experiment with gluoncv_ssd_mobilenet model, use this Jupyter notebook - https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker_neo_compilation_jobs/gluoncv_ssd_mobilenet/gluoncv_ssd_mobilenet_neo.ipynb 9 | 10 | ## Greengrass V2 Lambda function Component 11 | 12 | ### Write business/inference logic in the greengrass_lambda function. 13 | 1. See greengrass_lambda.py for example. Package this as a lambda function and upload it to AWS Console > AWS Lambda > Create Function 14 | 2. Choose Runtime as Python 3.7 15 | 3. Click on Deploy 16 | 4. Click on Actions > Publish new version. 17 | 18 | ### Now you are ready to deploy this function as a Greengrass v2 Component. 19 | 1. Goto AWS IoT Greengrass Console. 20 | 2. This step assumes a Greengrass Core device has already been created and we are deploying to that Greengrass Core. 21 | 3. Goto Greengrass > Components > Create component 22 | 4. Import from Lambda function 23 | 5. Choose the lambda function and version that you created earlier. You can run it as Greengrass container(requires explicit access to devices) or in No container mode(has access to devices connected to the core) 24 | 6. Add dependency : aws.sagemaker.edgeManager(Component) > >=0.1.0(Version) 25 | 7. Create component 26 | 27 | ### Update your Greengrass v2 deployment to include all the required components 28 | 29 | - com.model.darknet v0.1.0 30 | - com.model.mxnet_gluoncv_ssd v0.1.0 31 | - aws.sagemaker.edgeManager v0.1.0 32 | - greengrass_lambda v0.1.0 33 | 34 | -------------------------------------------------------------------------------- /examples/mxnet_gluon_ssd_lambda_function/greengrass_lambda.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # 4 | import grpc 5 | import cv2 6 | import numpy as np 7 | import agent_pb2_grpc 8 | from agent_pb2 import (ListModelsRequest, LoadModelRequest, PredictRequest, 9 | UnLoadModelRequest, DescribeModelRequest, Tensor, TensorMetadata) 10 | import logging 11 | import sys 12 | from threading import Timer 13 | import platform 14 | import os 15 | import re 16 | import subprocess 17 | import traceback 18 | import time 19 | import random 20 | 21 | #use USB Camera 1 22 | cap = cv2.VideoCapture(1,cv2.CAP_V4L) 23 | #use video file 24 | #cap = cv2.VideoCapture("") 25 | 26 | # Setup logging to stdout 27 | logger = logging.getLogger(__name__) 28 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 29 | 30 | #set up Model parameters 31 | #model_url = '/greengrass/v2/work/com.model.darknet/' 32 | model_url = '/greengrass/v2/work/com.model.mxnet_gluoncv_ssd/' 33 | #model_name = 'demo-darknet' 34 | model_name = 'demo-ssd-new' 35 | tensor_name = 'data' 36 | #tensor_shape = [1, 3, 416, 416] 37 | tensor_shape = [1, 3, 512, 512] 38 | input_size = 512 39 | object_categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 40 | 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 41 | 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] 42 | #img_url = '/greengrass-ml/images/darknet_original.bmp' 43 | 44 | # Mean and Std deviation of the RGB colors (collected from Imagenet dataset) 45 | mean=[123.68,116.779,103.939] 46 | std=[58.393,57.12,57.375] 47 | 48 | #Need to wait for sagemaker.edge.agent to start serving requests 49 | print("sleeping for sometime before loading") 50 | #time.sleep(30) 51 | print("Waking up") 52 | 53 | #Create a channel 54 | channel = grpc.insecure_channel('unix:///tmp/sagemaker_edge_agent_example.sock') 55 | print('getting stubs!') 56 | edge_manager_client = agent_pb2_grpc.AgentStub(channel) 57 | 58 | print('calling LoadModel!') 59 | try: 60 | response = edge_manager_client.LoadModel( 61 | LoadModelRequest(url=model_url, name=model_name)) 62 | except Exception as e: 63 | print(e) 64 | print('model already loaded!') 65 | 66 | print('calling ListModels!') 67 | response = edge_manager_client.ListModels(ListModelsRequest()) 68 | 69 | print('calling DescribeModel') 70 | response = edge_manager_client.DescribeModel( 71 | DescribeModelRequest(name=model_name)) 72 | 73 | def greengrass_hello_world_run(): 74 | global edge_manager_client 75 | print('running now!') 76 | try: 77 | if not cap.isOpened(): 78 | print("Cannot open camera\n") 79 | exit(1) 80 | 81 | ret, img = cap.read() 82 | #if reading from file 83 | #img = cv2.imread(img_url) 84 | 85 | print('calling PredictRequest on images !') 86 | #resize input before serving 87 | frame = resize_short_within(img, short=512) 88 | nn_input_size = input_size 89 | nn_input=cv2.resize(frame, (nn_input_size,int(nn_input_size/4*3))) 90 | nn_input=cv2.copyMakeBorder(nn_input,int(nn_input_size/8),int(nn_input_size/8), 91 | 0,0,cv2.BORDER_CONSTANT,value=(0,0,0)) 92 | copy_frame = nn_input[:] 93 | nn_input=nn_input.astype('float32') 94 | nn_input=nn_input.reshape((nn_input_size*nn_input_size ,3)) 95 | scaled_frame=np.transpose(nn_input) 96 | scaled_frame[0,:] = scaled_frame[0,:]-mean[0] 97 | scaled_frame[0,:] = scaled_frame[0,:]/std[0] 98 | scaled_frame[1,:] = scaled_frame[1,:]-mean[1] 99 | scaled_frame[1,:] = scaled_frame[1,:]/std[1] 100 | scaled_frame[2,:] = scaled_frame[2,:]-mean[2] 101 | scaled_frame[2,:] = scaled_frame[2,:]/std[2] 102 | print("SHAPE:" + str(img.shape)) 103 | 104 | request = PredictRequest(name=model_name, 105 | tensors=[Tensor(tensor_metadata=TensorMetadata( 106 | name=tensor_name, data_type=5, shape=tensor_shape), byte_data=scaled_frame.tobytes())]) 107 | 108 | print("Calling Predict on the Image...") 109 | response = edge_manager_client.Predict(request) 110 | 111 | #read output tensors 112 | i = 0 113 | output_detections = [] 114 | 115 | for t in response.tensors: 116 | print("Flattened RAW Output Tensor : " + str(i+1)) 117 | i += 1 118 | deserialized_bytes = np.frombuffer(t.byte_data, dtype=np.float32) 119 | output_detections.append(np.asarray(deserialized_bytes)) 120 | 121 | print(output_detections) 122 | #convert the bounding boxes 123 | new_list = [] 124 | for index,item in enumerate(output_detections[2]): 125 | if index % 4 == 0: 126 | new_list.append(output_detections[2][index-4:index]) 127 | output_detections[2] = new_list[1:] 128 | #write to an input image 129 | visualize_detection(copy_frame, output_detections, classes=object_categories, thresh=0.2) 130 | 131 | #save outputs 132 | save_path = os.path.join(os.getcwd(), "./", "output.jpg") 133 | cv2.imwrite(save_path, copy_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) 134 | except Exception as e: 135 | traceback.print_exc() 136 | channel.close() 137 | logger.error("Failed to Run: " + repr(e)) 138 | 139 | # Asynchronously schedule this function to be run again in X seconds 140 | Timer(2, greengrass_hello_world_run).start() 141 | 142 | 143 | def visualize_detection(img, dets, classes=[], thresh=0.): 144 | """ 145 | visualize detections in one image 146 | Parameters: 147 | ---------- 148 | img : numpy.array 149 | image 150 | dets : numpy.array 151 | ssd detections 152 | each row is one object 153 | classes : tuple or list of str 154 | class names 155 | thresh : float 156 | score threshold 157 | """ 158 | COLORS = np.random.uniform(0, 255, size=(len(classes), 3)) 159 | 160 | colors = dict() 161 | klasses = dets[0] 162 | scores = dets[1] 163 | bbox = dets[2] 164 | for i in range(len(dets)): 165 | klass = klasses[i] 166 | score = scores[i] 167 | print(bbox[i]) 168 | x0, y0, x1, y1 = bbox[i] 169 | if score < thresh: 170 | continue 171 | cls_id = int(klass) 172 | if cls_id not in colors: 173 | colors[cls_id] = (random.random(), random.random(), random.random()) 174 | 175 | xmin, ymin, xmax, ymax = int(x0), int(y0), int(x1), int(y1) 176 | if classes and len(classes) > cls_id: 177 | class_name = classes[cls_id] 178 | # display the prediction 179 | label = "{}: {:.2f}%".format(classes[cls_id], score * 100) 180 | print("[INFO] {}".format(label)) 181 | cv2.rectangle(img, (xmin, ymin), 182 | (xmax, ymax), 183 | COLORS[cls_id], 2) 184 | y = ymin - 15 if ymin - 15 > 15 else ymin + 15 185 | cv2.putText(img, label, (xmin, y), 186 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[cls_id], 2) 187 | 188 | 189 | def _get_interp_method(interp, sizes=()): 190 | """Get the interpolation method for resize functions. 191 | The major purpose of this function is to wrap a random interp method selection 192 | and a auto-estimation method. 193 | 194 | Parameters 195 | ---------- 196 | interp : int 197 | interpolation method for all resizing operations 198 | 199 | Possible values: 200 | 0: Nearest Neighbors Interpolation. 201 | 1: Bilinear interpolation. 202 | 2: Area-based (resampling using pixel area relation). It may be a 203 | preferred method for image decimation, as it gives moire-free 204 | results. But when the image is zoomed, it is similar to the Nearest 205 | Neighbors method. (used by default). 206 | 3: Bicubic interpolation over 4x4 pixel neighborhood. 207 | 4: Lanczos interpolation over 8x8 pixel neighborhood. 208 | 9: Cubic for enlarge, area for shrink, bilinear for others 209 | 10: Random select from interpolation method metioned above. 210 | Note: 211 | When shrinking an image, it will generally look best with AREA-based 212 | interpolation, whereas, when enlarging an image, it will generally look best 213 | with Bicubic (slow) or Bilinear (faster but still looks OK). 214 | More details can be found in the documentation of OpenCV, please refer to 215 | http://docs.opencv.org/master/da/d54/group__imgproc__transform.html. 216 | sizes : tuple of int 217 | (old_height, old_width, new_height, new_width), if None provided, auto(9) 218 | will return Area(2) anyway. 219 | 220 | Returns 221 | ------- 222 | int 223 | interp method from 0 to 4 224 | """ 225 | if interp == 9: 226 | if sizes: 227 | assert len(sizes) == 4 228 | oh, ow, nh, nw = sizes 229 | if nh > oh and nw > ow: 230 | return 2 231 | elif nh < oh and nw < ow: 232 | return 3 233 | else: 234 | return 1 235 | else: 236 | return 2 237 | if interp == 10: 238 | return random.randint(0, 4) 239 | if interp not in (0, 1, 2, 3, 4): 240 | raise ValueError('Unknown interp method %d' % interp) 241 | return interp 242 | 243 | 244 | def resize_short_within(img, short=512, max_size=1024, mult_base=32, interp=2): 245 | """ 246 | resizes the short side of the image so the aspect ratio remains the same AND the short 247 | side matches the convolutional layer for the network 248 | 249 | Args: 250 | ----- 251 | img: np.array 252 | image you want to resize 253 | short: int 254 | the size to reshape the image to 255 | max_size: int 256 | the max size of the short side 257 | mult_base: int 258 | the size scale to readjust the resizer 259 | interp: int 260 | see '_get_interp_method' 261 | Returns: 262 | -------- 263 | img: np.array 264 | the resized array 265 | """ 266 | h, w, _ = img.shape 267 | im_size_min, im_size_max = (h, w) if w > h else (w, h) 268 | scale = float(short) / float(im_size_min) 269 | if np.round(scale * im_size_max / mult_base) * mult_base > max_size: 270 | # fit in max_size 271 | scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max) 272 | new_w, new_h = ( 273 | int(np.round(w * scale / mult_base) * mult_base), 274 | int(np.round(h * scale / mult_base) * mult_base) 275 | ) 276 | img = cv2.resize(img, (new_w, new_h), 277 | interpolation=_get_interp_method(interp, (h, w, new_h, new_w))) 278 | return img 279 | 280 | # Start executing the function above 281 | greengrass_hello_world_run() 282 | 283 | # This is a dummy handler and will not be invoked 284 | # Instead the code above will be executed in an infinite loop for our example 285 | def function_handler(event, context): 286 | return 287 | -------------------------------------------------------------------------------- /examples/mxnet_gluon_ssd_lambda_function/requirements.txt: -------------------------------------------------------------------------------- 1 | awscli==1.18.69 2 | botocore==1.16.19 3 | grpcio==1.53.2 4 | grpcio-tools==1.34.1 5 | numpy==1.22.0 6 | protobuf==3.18.3 7 | scipy==1.10.0 8 | tensorrt==7.1.3.0 9 | uff==0.6.9 -------------------------------------------------------------------------------- /scripts/add_agent_artifact.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 4 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE PLATFORM EDGE-MANAGER-COMPONENT-VERSION REGION" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | PLATFORM=$2 8 | VERSION=$3 9 | AWS_REGION=$4 10 | 11 | cp ./$PLATFORM/edge-manager-package/bin/sagemaker_edge_agent_binary ./components/artifacts/aws.sagemaker.edgeManager/$VERSION/ 12 | 13 | aws s3 cp s3://sagemaker-edge-release-store-us-west-2-linux-x64/Certificates/$AWS_REGION/$AWS_REGION.pem ./components/artifacts/aws.sagemaker.edgeManager/$VERSION/ --profile $AWS_PROFILE -------------------------------------------------------------------------------- /scripts/compile_add_python_stub_artifacts.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 3 ]; then 2 | echo 1>&2 "Usage: $0 PLATFORM COMPONENT-NAME COMPONENT-VERSION" 3 | exit 3 4 | fi 5 | 6 | PLATFORM=$1 7 | COMPONENT=$2 8 | VERSION=$3 9 | 10 | python -m grpc_tools.protoc -I=$PLATFORM/edge-manager-package/docs/api/ \ 11 | --python_out=components/artifacts/$COMPONENT/$VERSION \ 12 | --grpc_python_out=components/artifacts/$COMPONENT/$VERSION agent.proto -------------------------------------------------------------------------------- /scripts/create_device_fleet_register_device.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 4 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME AWS-REGION OUTPUT-S3-BUCKET IOT-THING-NAME" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | AWS_REGION=$2 8 | OUTPUT_BUCKET=$3 9 | IOT_THING_NAME=$4 10 | DEVICE_NAME=sagemaker-ggv2-smem-device-012345678 11 | 12 | ROLE_NAME=SageMakerDeviceFleetRole 13 | ASSUME_POLICY_DOCUMENT="{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"credentials.iot.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"},{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"sagemaker.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" 14 | DEVICE_FLEET_NAME=ggv2-smem-fleet 15 | 16 | 17 | aws iam create-role --role-name $ROLE_NAME --assume-role-policy-document $ASSUME_POLICY_DOCUMENT --profile $AWS_PROFILE 18 | 19 | if [[ $? -ne 0 ]] 20 | then 21 | echo 'Role already exists, continue...' 22 | fi 23 | 24 | 25 | # Attach IAM policies 26 | 27 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $ROLE_NAME \ 28 | --policy-arn arn:aws:iam::aws:policy/service-role/AmazonSageMakerEdgeDeviceFleetPolicy 29 | 30 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $ROLE_NAME \ 31 | --policy-arn arn:aws:iam::aws:policy/AmazonSageMakerFullAccess 32 | 33 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $ROLE_NAME \ 34 | --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess 35 | 36 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $ROLE_NAME \ 37 | --policy-arn arn:aws:iam::aws:policy/AWSIoTFullAccess 38 | 39 | ROLE_ARN=$(aws iam get-role --role-name $ROLE_NAME --profile $AWS_PROFILE | jq -r .Role.Arn) 40 | 41 | # Create device fleet 42 | 43 | aws sagemaker create-device-fleet --profile $AWS_PROFILE --region $AWS_REGION --device-fleet-name $DEVICE_FLEET_NAME \ 44 | --role-arn $ROLE_ARN --output-config "{\"S3OutputLocation\":\"s3://$OUTPUT_BUCKET/collected_sample_data/\"}" 45 | 46 | # Register device 47 | 48 | aws sagemaker register-devices --profile $AWS_PROFILE --region $AWS_REGION --device-fleet-name $DEVICE_FLEET_NAME \ 49 | --devices "[{\"DeviceName\":\"$DEVICE_NAME\",\"IotThingName\":\"$IOT_THING_NAME\"}]" -------------------------------------------------------------------------------- /scripts/create_neo_compilation_job.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 5 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME S3-BUCKET AWS-REGION SAGEMAKER-ROLE-NAME TARGET-DEVICE" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | BUCKET_NAME=$2 8 | AWS_REGION=$3 9 | ROLE_NAME=$4 10 | TARGET_DEVICE=$5 11 | 12 | ROLE_ARN=$(aws iam get-role --role-name $ROLE_NAME | jq -r .Role.Arn) 13 | 14 | aws sagemaker create-compilation-job --compilation-job-name jetson-xavier-darknet-001 --role-arn $ROLE_ARN \ 15 | --input-config "{\"S3Uri\": \"s3:\/\/$BUCKET_NAME\/models\/yolo3-tiny.tar.gz\",\"DataInputConfig\": \"{\\\"data\\\":[1,3,416,416]}\",\"Framework\": \"DARKNET\"}" \ 16 | --output-config "{\"S3OutputLocation\":\"s3:\/\/$BUCKET_NAME\/models\/neo-compiled\/\",\"TargetDevice\":\"$TARGET_DEVICE\"}" \ 17 | --stopping-condition "{\"MaxWaitTimeInSeconds\":60,\"MaxRuntimeInSeconds\":900}" --profile $AWS_PROFILE --region $AWS_REGION 18 | 19 | 20 | -------------------------------------------------------------------------------- /scripts/create_sagemaker_role.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 2 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME ROLE-NAME" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | SM_ROLE_NAME=$2 8 | 9 | aws iam create-role --profile $AWS_PROFILE --role-name $SM_ROLE_NAME --assume-role-policy-document "{\"Version\": \"2012-10-17\",\"Statement\":[{\"Effect\": \"Allow\",\"Principal\":{\"Service\":\"sagemaker.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" 10 | 11 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $SM_ROLE_NAME \ 12 | --policy-arn arn:aws:iam::aws:policy/AmazonSageMakerFullAccess 13 | 14 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name $SM_ROLE_NAME \ 15 | --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess -------------------------------------------------------------------------------- /scripts/delete_component.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 4 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME COMPONENT_NAME COMPONENT_VERSION AWS_REGION" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | COMPONENT_NAME=$2 8 | COMPONENT_VERSION=$3 9 | AWS_REGION=$4 10 | 11 | ACCOUNT=$(aws sts get-caller-identity --profile $AWS_PROFILE | jq -r '.Account') 12 | 13 | aws greengrassv2 delete-component --profile $AWS_PROFILE --arn arn:aws:greengrass:$AWS_REGION:$ACCOUNT:components:$COMPONENT_NAME:versions:$COMPONENT_VERSION --region $AWS_REGION 14 | -------------------------------------------------------------------------------- /scripts/download_edge_manager_package.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 2 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME PLATFORM" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | PLATFORM=$2 8 | 9 | export VERSION=$(aws s3 ls s3://sagemaker-edge-release-store-us-west-2-$PLATFORM/Releases/ --profile $AWS_PROFILE | sort -r | sed -n 2p | xargs | cut -d' ' -f2 | cut -d'/' -f1) 10 | mkdir -p $PLATFORM/edge-manager-package 11 | aws s3 cp s3://sagemaker-edge-release-store-us-west-2-$PLATFORM/Releases/$VERSION/$VERSION.tgz ./$PLATFORM --profile $AWS_PROFILE 12 | aws s3 cp s3://sagemaker-edge-release-store-us-west-2-$PLATFORM/Releases/$VERSION/sha256_hex.shasum ./$PLATFORM --profile $AWS_PROFILE 13 | tar zxvf $PLATFORM/$VERSION.tgz -C $PLATFORM/edge-manager-package -------------------------------------------------------------------------------- /scripts/download_upload_sample_model.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 2 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME S3-BUCKET" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | BUCKET_NAME=$2 8 | 9 | mkdir -p ./models 10 | 11 | wget -O ./models/yolov3-tiny.cfg https://github.com/pjreddie/darknet/blob/master/cfg/yolov3-tiny.cfg?raw=true 12 | wget -O ./models/yolov3-tiny.weights https://pjreddie.com/media/files/yolov3-tiny.weights 13 | 14 | tar -czvf yolo3-tiny.tar.gz ./models/yolov3-tiny.cfg ./models/yolov3-tiny.weights 15 | mv yolo3-tiny.tar.gz ./models 16 | rm ./models/yolov3-tiny.cfg ./models/yolov3-tiny.weights 17 | 18 | aws s3 cp ./models/yolo3-tiny.tar.gz s3://$BUCKET_NAME/models/yolo3-tiny.tar.gz --profile $AWS_PROFILE -------------------------------------------------------------------------------- /scripts/install-ggv2-ssh-existing-role.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 5 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME SSH-USER SSH-HOST AWS-REGION IOT-THING-NAME" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | USER=$2 8 | HOST=$3 9 | AWS_REGION=$4 10 | IOT_THING_NAME=$5 11 | ROLE_NAME=$6 12 | ROLE_ALIAS=$7 13 | 14 | HOST_STRING="${USER}@${HOST}" 15 | 16 | echo "\n===[ Installing prerequisites ]===\n" 17 | ssh -t $HOST_STRING "sudo apt-get update && sudo apt-get install -y openjdk-8-jdk curl unzip" 18 | 19 | echo "\n===[ Downloading Greengrass v2 ]===\n" 20 | ssh -t $HOST_STRING "cd ~ && curl -s https://d2s8p88vqu9w66.cloudfront.net/releases/greengrass-nucleus-latest.zip > \\ 21 | greengrass-nucleus-latest.zip && yes | unzip greengrass-nucleus-latest.zip -d GreengrassCore && \\ 22 | rm greengrass-nucleus-latest.zip" 23 | 24 | export AWS_ACCESS_KEY_ID=$(aws configure get $AWS_PROFILE.aws_access_key_id) 25 | export AWS_SECRET_ACCESS_KEY=$(aws configure get $AWS_PROFILE.aws_secret_access_key) 26 | #export AWS_SESSION_TOKEN=$(aws configure get $AWS_PROFILE.aws_session_token) 27 | 28 | echo "\n===[ Installing and provisioning Greengrass v2 ]===\n" 29 | ssh -t $HOST_STRING "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \\ 30 | sudo -E java -Droot='/greengrass/v2' -Dlog.store=FILE -jar ./GreengrassCore/lib/Greengrass.jar \\ 31 | --aws-region ${AWS_REGION} --thing-name ${IOT_THING_NAME} --thing-group-name GreengrassEdgeManagerGroup \\ 32 | --tes-role-name ${ROLE_NAME} --tes-role-alias-name ${ROLE_ALIAS} \\ 33 | --component-default-user root:root --provision false --setup-system-service true --deploy-dev-tools true" 34 | 35 | echo "\n===[ Greengrass v2 started ]===\n" 36 | 37 | echo "\n===[ Adding S3 read permissions to TES role ]===\n" 38 | 39 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name ${ROLE_NAME} \ 40 | --policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess 41 | 42 | echo "\n===[ Complete ]===\n" 43 | -------------------------------------------------------------------------------- /scripts/install-ggv2-ssh.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 5 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME SSH-USER SSH-HOST AWS-REGION IOT-THING-NAME" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | USER=$2 8 | HOST=$3 9 | AWS_REGION=$4 10 | IOT_THING_NAME=$5 11 | 12 | HOST_STRING="${USER}@${HOST}" 13 | 14 | echo "\n===[ Installing prerequisites ]===\n" 15 | ssh -t $HOST_STRING "sudo apt-get update && sudo apt-get install -y openjdk-8-jdk curl unzip" 16 | 17 | echo "\n===[ Downloading Greengrass v2 ]===\n" 18 | ssh -t $HOST_STRING "cd ~ && curl -s https://d2s8p88vqu9w66.cloudfront.net/releases/greengrass-nucleus-latest.zip > \\ 19 | greengrass-nucleus-latest.zip && yes | unzip greengrass-nucleus-latest.zip -d GreengrassCore && \\ 20 | rm greengrass-nucleus-latest.zip" 21 | 22 | export AWS_ACCESS_KEY_ID=$(aws configure get $AWS_PROFILE.aws_access_key_id) 23 | export AWS_SECRET_ACCESS_KEY=$(aws configure get $AWS_PROFILE.aws_secret_access_key) 24 | #export AWS_SESSION_TOKEN=$(aws configure get $AWS_PROFILE.aws_session_token) 25 | 26 | echo "\n===[ Installing and provisioning Greengrass v2 ]===\n" 27 | ssh -t $HOST_STRING "AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \\ 28 | sudo -E java -Droot='/greengrass/v2' -Dlog.store=FILE -jar ./GreengrassCore/lib/Greengrass.jar \\ 29 | --aws-region ${AWS_REGION} --thing-name ${IOT_THING_NAME} --thing-group-name GreengrassEdgeManagerGroup \\ 30 | --tes-role-name MyGreengrassV2TokenExchangeRole --tes-role-alias-name MyGreengrassCoreTokenExchangeRoleAlias \\ 31 | --component-default-user root:root --provision true --setup-system-service true --deploy-dev-tools true" 32 | 33 | echo "\n===[ Greengrass v2 started ]===\n" 34 | 35 | echo "\n===[ Adding S3 read permissions to TES role ]===\n" 36 | 37 | aws iam attach-role-policy --profile $AWS_PROFILE --role-name MyGreengrassV2TokenExchangeRole \ 38 | --policy-arn arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess 39 | 40 | echo "\n===[ Complete ]===\n" 41 | -------------------------------------------------------------------------------- /scripts/package_neo_model.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 4 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME S3-BUCKET AWS-REGION SAGEMAKER-ROLE-NAME" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | BUCKET_NAME=$2 8 | AWS_REGION=$3 9 | ROLE_NAME=$4 10 | 11 | ROLE_ARN=$(aws iam get-role --role-name $ROLE_NAME | jq -r .Role.Arn) 12 | 13 | PACKAGED_MODEL_NAME=darknet-model 14 | PACKAGED_MODEL_VERSION=1.0 15 | MODEL_PACKAGE=$PACKAGED_MODEL_NAME-$PACKAGED_MODEL_VERSION.tar.gz 16 | COMPILATION_JOB_NAME=jetson-xavier-darknet-001 17 | PACKAGING_JOB_NAME=$COMPILATION_JOB_NAME-packaging 18 | 19 | aws sagemaker create-edge-packaging-job --edge-packaging-job-name $PACKAGING_JOB_NAME \ 20 | --compilation-job-name $COMPILATION_JOB_NAME --model-name $PACKAGED_MODEL_NAME --model-version $PACKAGED_MODEL_VERSION \ 21 | --role-arn $ROLE_ARN --output-config "{\"S3OutputLocation\":\"s3:\/\/$BUCKET_NAME\/models\/packaged\/\"}" --region $AWS_REGION --profile $AWS_PROFILE -------------------------------------------------------------------------------- /scripts/upload_component_version.sh: -------------------------------------------------------------------------------- 1 | if [ $# -ne 5 ]; then 2 | echo 1>&2 "Usage: $0 AWS-PROFILE-NAME COMPONENT_NAME COMPONENT_VERSION BUCKET_NAME AWS_REGION" 3 | exit 3 4 | fi 5 | 6 | AWS_PROFILE=$1 7 | COMPONENT_NAME=$2 8 | COMPONENT_VERSION=$3 9 | S3_BUCKET_NAME=$4 10 | AWS_REGION=$5 11 | 12 | cd components/artifacts/$COMPONENT_NAME/$COMPONENT_VERSION 13 | 14 | for FILE in *; do aws s3api put-object --profile $AWS_PROFILE --bucket $S3_BUCKET_NAME --key artifacts/$COMPONENT_NAME/$COMPONENT_VERSION/$FILE --body $FILE; done 15 | 16 | cd ../../.. 17 | 18 | aws greengrassv2 create-component-version --profile $AWS_PROFILE --inline-recipe fileb://recipes/$COMPONENT_NAME-$COMPONENT_VERSION.yaml --region $AWS_REGION --------------------------------------------------------------------------------