├── NOTICE ├── 02_deploy_and_monitor ├── postprocessor.py ├── source_dir │ └── deploy_xgboost.py ├── monitoringjob_utils.py └── deploy_and_monitor.ipynb ├── images ├── jupyter_clone.png ├── jupyter_lab_screen.png ├── search_sagemaker.png ├── jupyter_new_terminal.png ├── jupyter_terminal_tab.png ├── sagemaker_dashboard.png ├── notebook_instance_pending.png ├── notebook_instances_screen.png ├── notebook_instance_in_service.png ├── create_notebook_instance_role.png └── create_notebook_instance_screen.png ├── CODE_OF_CONDUCT.md ├── 01_train_and_debug ├── source_dir │ ├── train_xgboost_no_debug.py │ └── train_xgboost.py └── train_and_debug.ipynb ├── CONTRIBUTING.md ├── README.md └── LICENSE /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /02_deploy_and_monitor/postprocessor.py: -------------------------------------------------------------------------------- 1 | def postprocess_handler(): 2 | print("Hello from post-proc script!") -------------------------------------------------------------------------------- /images/jupyter_clone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/jupyter_clone.png -------------------------------------------------------------------------------- /images/jupyter_lab_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/jupyter_lab_screen.png -------------------------------------------------------------------------------- /images/search_sagemaker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/search_sagemaker.png -------------------------------------------------------------------------------- /images/jupyter_new_terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/jupyter_new_terminal.png -------------------------------------------------------------------------------- /images/jupyter_terminal_tab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/jupyter_terminal_tab.png -------------------------------------------------------------------------------- /images/sagemaker_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/sagemaker_dashboard.png -------------------------------------------------------------------------------- /images/notebook_instance_pending.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/notebook_instance_pending.png -------------------------------------------------------------------------------- /images/notebook_instances_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/notebook_instances_screen.png -------------------------------------------------------------------------------- /images/notebook_instance_in_service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/notebook_instance_in_service.png -------------------------------------------------------------------------------- /images/create_notebook_instance_role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/create_notebook_instance_role.png -------------------------------------------------------------------------------- /images/create_notebook_instance_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor/HEAD/images/create_notebook_instance_screen.png -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /02_deploy_and_monitor/source_dir/deploy_xgboost.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle as pkl 3 | import numpy as np 4 | 5 | from sagemaker_containers.beta.framework import ( 6 | content_types, encoders, env, modules, transformer, worker) 7 | 8 | def model_fn(model_dir): 9 | model_file = model_dir + '/model.bin' 10 | model = pkl.load(open(model_file, 'rb')) 11 | return model 12 | 13 | def output_fn(prediction, accept): 14 | 15 | pred_array_value = np.array(prediction) 16 | pred_value = int(pred_array_value[0]) 17 | 18 | return worker.Response(str(pred_value), accept, mimetype=accept) 19 | -------------------------------------------------------------------------------- /01_train_and_debug/source_dir/train_xgboost_no_debug.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import random 5 | import pandas as pd 6 | import glob 7 | import pickle as pkl 8 | 9 | import xgboost 10 | 11 | def parse_args(): 12 | 13 | parser = argparse.ArgumentParser() 14 | 15 | parser.add_argument("--max_depth", type=int, default=5) 16 | parser.add_argument("--eta", type=float, default=0.05) # 0.2 17 | parser.add_argument("--gamma", type=int, default=4) 18 | parser.add_argument("--min_child_weight", type=int, default=6) 19 | parser.add_argument("--silent", type=int, default=0) 20 | parser.add_argument("--objective", type=str, default="multi:softmax") 21 | parser.add_argument("--num_class", type=int, default=15) 22 | parser.add_argument("--num_round", type=int, default=10) 23 | 24 | parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) 25 | parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION')) 26 | 27 | args = parser.parse_args() 28 | 29 | return args 30 | 31 | def model_fn(model_dir): 32 | model_file = model_dir + '/model.bin' 33 | model = pkl.load(open(model_file, 'rb')) 34 | return model 35 | 36 | def main(): 37 | 38 | args = parse_args() 39 | train_files_path, validation_files_path = args.train, args.validation 40 | 41 | train_files_list = glob.glob(train_files_path + '/*.*') 42 | print(train_files_list) 43 | 44 | val_files_list = glob.glob(validation_files_path + '/*.*') 45 | print(val_files_list) 46 | 47 | print('Loading training dataframe...') 48 | df_train = pd.concat(map(pd.read_csv, train_files_list)) 49 | print('Loading validation dataframe...') 50 | df_val = pd.concat(map(pd.read_csv, val_files_list)) 51 | print('Data loading completed.') 52 | 53 | y = df_train.Target.values 54 | X = df_train.drop(['Target'], axis=1).values 55 | val_y = df_val.Target.values 56 | val_X = df_val.drop(['Target'], axis=1).values 57 | 58 | dtrain = xgboost.DMatrix(X, label=y) 59 | dval = xgboost.DMatrix(val_X, label=val_y) 60 | 61 | watchlist = [(dtrain, "train"), (dval, "validation")] 62 | 63 | params = { 64 | "max_depth": args.max_depth, 65 | "eta": args.eta, 66 | "gamma": args.gamma, 67 | "min_child_weight": args.min_child_weight, 68 | "silent": args.silent, 69 | "objective": args.objective, 70 | "num_class": args.num_class 71 | } 72 | 73 | bst = xgboost.train( 74 | params=params, 75 | dtrain=dtrain, 76 | evals=watchlist, 77 | num_boost_round=args.num_round) 78 | 79 | model_dir = os.environ.get('SM_MODEL_DIR') 80 | pkl.dump(bst, open(model_dir + '/model.bin', 'wb')) 81 | 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /01_train_and_debug/source_dir/train_xgboost.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import bz2 3 | import json 4 | import os 5 | import pickle 6 | import random 7 | import tempfile 8 | import urllib.request 9 | import pandas as pd 10 | import glob 11 | import pickle as pkl 12 | 13 | import xgboost 14 | 15 | from smdebug import SaveConfig 16 | from smdebug.xgboost import Hook 17 | 18 | def parse_args(): 19 | 20 | parser = argparse.ArgumentParser() 21 | 22 | parser.add_argument("--max_depth", type=int, default=5) 23 | parser.add_argument("--eta", type=float, default=0.05) # 0.2 24 | parser.add_argument("--gamma", type=int, default=4) 25 | parser.add_argument("--min_child_weight", type=int, default=6) 26 | parser.add_argument("--silent", type=int, default=0) 27 | parser.add_argument("--objective", type=str, default="multi:softmax") 28 | parser.add_argument("--num_class", type=int, default=15) 29 | parser.add_argument("--num_round", type=int, default=10) 30 | 31 | parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) 32 | parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION')) 33 | 34 | args = parser.parse_args() 35 | 36 | return args 37 | 38 | def main(): 39 | 40 | args = parse_args() 41 | train_files_path, validation_files_path = args.train, args.validation 42 | 43 | train_files_list = glob.glob(train_files_path + '/*.*') 44 | print(train_files_list) 45 | 46 | val_files_list = glob.glob(validation_files_path + '/*.*') 47 | print(val_files_list) 48 | 49 | print('Loading training data...') 50 | df_train = pd.concat(map(pd.read_csv, train_files_list)) 51 | print('Loading validation data...') 52 | df_val = pd.concat(map(pd.read_csv, val_files_list)) 53 | print('Data loading completed.') 54 | 55 | y = df_train.Target.values 56 | X = df_train.drop(['Target'], axis=1).values 57 | val_y = df_val.Target.values 58 | val_X = df_val.drop(['Target'], axis=1).values 59 | 60 | dtrain = xgboost.DMatrix(X, label=y) 61 | dval = xgboost.DMatrix(val_X, label=val_y) 62 | 63 | params = { 64 | "max_depth": args.max_depth, 65 | "eta": args.eta, 66 | "gamma": args.gamma, 67 | "min_child_weight": args.min_child_weight, 68 | "silent": args.silent, 69 | "objective": args.objective, 70 | "num_class": args.num_class} 71 | 72 | hook = Hook.create_from_json_file() 73 | hook.train_data = dtrain 74 | hook.validation_data = dval 75 | 76 | watchlist = [(dtrain, "train"), (dval, "validation")] 77 | 78 | bst = xgboost.train( 79 | params=params, 80 | dtrain=dtrain, 81 | evals=watchlist, 82 | num_boost_round=args.num_round, 83 | callbacks=[hook]) 84 | 85 | model_dir = os.environ.get('SM_MODEL_DIR') 86 | pkl.dump(bst, open(model_dir + '/model.bin', 'wb')) 87 | 88 | if __name__ == "__main__": 89 | main() -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /02_deploy_and_monitor/monitoringjob_utils.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | from urllib.parse import urlparse 3 | from sagemaker.processing import Processor, ProcessingInput, ProcessingOutput 4 | 5 | def get_model_monitor_container_uri(region): 6 | container_uri_format = '{0}.dkr.ecr.{1}.amazonaws.com/sagemaker-model-monitor-analyzer' 7 | 8 | regions_to_accounts = { 9 | 'eu-north-1': '895015795356', 10 | 'me-south-1': '607024016150', 11 | 'ap-south-1': '126357580389', 12 | 'us-east-2': '680080141114', 13 | 'us-east-2': '777275614652', 14 | 'eu-west-1': '468650794304', 15 | 'eu-central-1': '048819808253', 16 | 'sa-east-1': '539772159869', 17 | 'ap-east-1': '001633400207', 18 | 'us-east-1': '156813124566', 19 | 'ap-northeast-2': '709848358524', 20 | 'eu-west-2': '749857270468', 21 | 'ap-northeast-1': '574779866223', 22 | 'us-west-2': '159807026194', 23 | 'us-west-1': '890145073186', 24 | 'ap-southeast-1': '245545462676', 25 | 'ap-southeast-2': '563025443158', 26 | 'ca-central-1': '536280801234' 27 | } 28 | 29 | container_uri = container_uri_format.format(regions_to_accounts[region], region) 30 | return container_uri 31 | 32 | def get_file_name(url): 33 | a = urlparse(url) 34 | return os.path.basename(a.path) 35 | 36 | def run_model_monitor_job_processor(region, instance_type, role, data_capture_path, statistics_path, constraints_path, reports_path, 37 | instance_count=1, preprocessor_path=None, postprocessor_path=None, publish_cloudwatch_metrics='Disabled'): 38 | 39 | data_capture_sub_path = data_capture_path[data_capture_path.rfind('datacapture/') :] 40 | data_capture_sub_path = data_capture_sub_path[data_capture_sub_path.find('/') + 1 :] 41 | processing_output_paths = reports_path + '/' + data_capture_sub_path 42 | 43 | input_1 = ProcessingInput(input_name='input_1', 44 | source=data_capture_path, 45 | destination='/opt/ml/processing/input/endpoint/' + data_capture_sub_path, 46 | s3_data_type='S3Prefix', 47 | s3_input_mode='File') 48 | 49 | baseline = ProcessingInput(input_name='baseline', 50 | source=statistics_path, 51 | destination='/opt/ml/processing/baseline/stats', 52 | s3_data_type='S3Prefix', 53 | s3_input_mode='File') 54 | 55 | constraints = ProcessingInput(input_name='constraints', 56 | source=constraints_path, 57 | destination='/opt/ml/processing/baseline/constraints', 58 | s3_data_type='S3Prefix', 59 | s3_input_mode='File') 60 | 61 | outputs = ProcessingOutput(output_name='result', 62 | source='/opt/ml/processing/output', 63 | destination=processing_output_paths, 64 | s3_upload_mode='Continuous') 65 | 66 | env = {'baseline_constraints': '/opt/ml/processing/baseline/constraints/' + get_file_name(constraints_path), 67 | 'baseline_statistics': '/opt/ml/processing/baseline/stats/' + get_file_name(statistics_path), 68 | 'dataset_format': '{"sagemakerCaptureJson":{"captureIndexNames":["endpointInput","endpointOutput"]}}', 69 | 'dataset_source': '/opt/ml/processing/input/endpoint', 70 | 'output_path': '/opt/ml/processing/output', 71 | 'publish_cloudwatch_metrics': publish_cloudwatch_metrics } 72 | 73 | inputs=[input_1, baseline, constraints] 74 | 75 | if postprocessor_path: 76 | env['post_analytics_processor_script'] = '/opt/ml/processing/code/postprocessing/' + get_file_name(postprocessor_path) 77 | 78 | post_processor_script = ProcessingInput(input_name='post_processor_script', 79 | source=postprocessor_path, 80 | destination='/opt/ml/processing/code/postprocessing', 81 | s3_data_type='S3Prefix', 82 | s3_input_mode='File') 83 | inputs.append(post_processor_script) 84 | 85 | if preprocessor_path: 86 | env['record_preprocessor_script'] = '/opt/ml/processing/code/preprocessing/' + get_file_name(preprocessor_path) 87 | 88 | pre_processor_script = ProcessingInput(input_name='pre_processor_script', 89 | source=preprocessor_path, 90 | destination='/opt/ml/processing/code/preprocessing', 91 | s3_data_type='S3Prefix', 92 | s3_input_mode='File') 93 | 94 | inputs.append(pre_processor_script) 95 | 96 | processor = Processor(image_uri = get_model_monitor_container_uri(region), 97 | instance_count = instance_count, 98 | instance_type = instance_type, 99 | role=role, 100 | env = env) 101 | 102 | return processor.run(inputs=inputs, outputs=[outputs]) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Build, train & debug, and deploy & monitor with Amazon SageMaker 3 | 4 | ## Introduction 5 | 6 | Amazon SageMaker is a fully managed service that removes the heavy lifting from each step of the machine learning workflow, and provides every developer and data scientist with the ability to build, train, and deploy machine learning (ML) models quickly. In this interactive workshop, we will work on the different aspects of the ML workflow to build, train, and deploy a model using all the capabilities of Amazon SageMaker including the ones that we announced at re:Invent 2019. We will use the Amazon SageMaker to build, train & debug models with Amazon SageMaker Debugger, and deploy & monitor with Amazon SageMaker Model Monitor. Let’s build together! 7 | 8 | 9 | ## Datasets 10 | 11 | In this workshop, we will go through the steps of training, debugging, deploying and monitoring a **network traffic classification model**. 12 | 13 | For training our model we will be using datasets CSE-CIC-IDS2018 by CIC and ISCX which are used for security testing and malware prevention. 14 | These datasets include a huge amount of raw network traffic logs, plus pre-processed data where network connections have been reconstructed and relevant features have been extracted using CICFlowMeter, a tool that outputs network connection features as CSV files. Each record is classified as benign traffic, or it can be malicious traffic, with a total number of 15 classes. 15 | 16 | The goal is to demonstrate how to execute training of a network traffic classification model using the Amazon SageMaker framework container for XGBoost, training and debugging. Once trained how to then deploy and monitor the model performance. 17 | 18 | 19 | ## Getting started 20 | 21 | Initially have an open AWS account, with privileges to create and run Amazon SageMaker notebooks and access to S3 buckets. 22 | 23 | You can run this workshop in all commercial AWS regions where Amazon SageMaker is GA. 24 | 25 | ### Create a managed Jupyter Notebook instance 26 | First, let's create an Amazon SageMaker managed Jupyter notebook instance. 27 | An **Amazon SageMaker notebook instance** is a fully managed ML compute instance running the **Jupyter Notebook** application. Amazon SageMaker manages creating the instance and related resources. 28 | 29 | 1. In the AWS Management Console, click on Services, type “SageMaker” and press enter. 30 | 31 | Search SageMaker 32 | 2. You’ll be placed in the Amazon SageMaker dashboard. Click on **Notebook instances** either in the landing page or in the left menu. 33 | 34 | SageMaker dashboard 35 | 36 | 3. Once in the Notebook instances screen, click on the top-righ button **Create notebook instance**. 37 | 38 | Notebook Instances screen 39 | 40 | 4. In the **Create notebook instance** screen 41 | 42 | Create Notebook Instance screen 43 | 44 | 1. Give the Notebook Instance a name like _aim362-workshop_ or what you prefer 45 | 46 | 2. Choose **ml.t2.medium** as **Notebook instance type** 47 | 3. In the **IAM role** dropdown list you need to select an AWS IAM Role that is configured with security policies allowing access to Amazon SageMaker (full access) and Amazon S3 (default SageMaker buckets). If you don't have any role with those privileges, choose **Create New Role** and configure the role as follows: 48 | 49 | Create Notebook Instance Role 50 | 51 | 4. Keep **No VPC** selected in the **VPC** dropdown list 52 | 5. Keep **No configuration** selected in the **Lifecycle configuration** dropdown list 53 | 6. Keep **No Custom Encryption** selected in the **Encryption key** dropdown list 54 | 7. Finally, click on **Create notebook instance** 55 | 56 | 4. You will be redirected to the **Notebook instances** screen and you will see a new notebook instance in _Pending_ state. 57 | 58 | Notebook instance pending 59 | 60 | Wait until the notebook instance is status is _In Service_ and then click on the **Open Jupyter Lab** button to be redirected to Jupyter Lab. 61 | 62 | Notebook instance in service 63 | 64 | The Jupyter Lab interface will load, as shown below. 65 | 66 | Jupyter Lab screen 67 | 68 | ### Download workshop code to the notebook instance 69 | 70 | All the code of this workshop is implemented and available for download from this GitHub repository. 71 | 72 | As a consequence, in this section we will clone the GitHub repository into the Amazon SageMaker notebook instance and access the Jupyter Notebooks to run the workshop. 73 | 74 | 1. From the file menu, click on **New > Terminal** 75 | 76 | Jupyter New Terminal tab 77 | 78 | This will open a terminal tab in the Jupyter Lab interface 79 | 80 | Jupyter Terminal Tab 81 | 82 | 2. Execute the following commands in the terminal 83 | 84 | ``` 85 | cd SageMaker/ 86 | git clone https://github.com/aws-samples/reinvent2019-aim362-sagemaker-debugger-model-monitor.git 87 | ``` 88 | 89 | 3. When the clone operation completes, the folder **reinvent2019-aim362-sagemaker-debugger-model-monitor** will appear automatically in the file browser on the left (if not, you can hit the **Refresh** button) 90 | 91 | Jupyter Cloned Workshop Screen 92 | 93 | 4. Browse to the folder **01\_train\_and\_debug** and open the file **train\_and\_debug.ipynb** to get started. 94 | 95 | ## Modules 96 | 97 | This workshops consists of 2 modules: 98 | 99 | - **01\_train\_and\_debug** - Train and debug with Amazon SageMaker Debugger 100 | - **02\_deploy\_and\_monitor** - Deploy and Monitor with Amazon SageMaker Model Monitor 101 | 102 | You must comply with the order of modules, since the outputs of a module are inputs of the following one. 103 | 104 | 105 | ## License 106 | 107 | The contents of this workshop are licensed under the [Apache 2.0 License](./LICENSE). 108 | 109 | ## Authors 110 | 111 | [Giuseppe A. Porcelli](https://it.linkedin.com/in/giuporcelli) - Principal, ML Specialist Solutions Architect - Amazon Web Services EMEA
112 | [Paul Armstrong](https://www.linkedin.com/in/paul-armstrong-532bb41) - Principal Solutions Architect - Amazon Web Services EMEA 113 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /02_deploy_and_monitor/deploy_and_monitor.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Deploying and Monitoring\n", 8 | "\n", 9 | "In this notebook we will deploy the network traffic classification model that we have trained in the previous steps to Amazon SageMaker hosting, which will expose a fully-managed real-time endpoint to execute inferences.\n", 10 | "\n", 11 | "Amazon SageMaker is adding new capabilities that monitor ML models while in production and detect deviations in data quality in comparison to a baseline dataset (e.g. training data set). They enable you to capture the metadata and the input and output for invocations of the models that you deploy with Amazon SageMaker. They also enable you to analyze the data and monitor its quality. \n", 12 | "\n", 13 | "We will deploy the model to a real-time endpoint with data capture enabled and start collecting some inference inputs/outputs. Then, we will create a baseline and finally enable model monitoring to compare inference data with respect to the baseline and analyze the quality." 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "First, we set some variables, including the AWS region we are working in, the IAM execution role of the notebook instance and the Amazon S3 bucket where we will store data and outputs." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import os\n", 30 | "import boto3\n", 31 | "import sagemaker\n", 32 | "\n", 33 | "region = boto3.Session().region_name\n", 34 | "role = sagemaker.get_execution_role()\n", 35 | "sagemaker_session = sagemaker.Session()\n", 36 | "bucket_name = sagemaker_session.default_bucket()\n", 37 | "prefix = 'aim362'\n", 38 | "\n", 39 | "print(region)\n", 40 | "print(role)\n", 41 | "print(bucket_name)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "## Deployment with Data Capture\n", 49 | "\n", 50 | "We are going to deploy the latest network traffic classification model that we have trained. To deploy a model using the SM Python SDK, we need to make sure we have the Amazon S3 URI where the model artifacts are stored and the URI of the Docker container that will be used for hosting this model.\n", 51 | "\n", 52 | "First, let's determine the Amazon S3 URI of the model artifacts by using a couple of utility functions which query Amazon SageMaker service to get the latest training job whose name starts with 'nw-traffic-classification-xgb' and then describing the training job." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "import boto3\n", 62 | "\n", 63 | "def get_latest_training_job_name(base_job_name):\n", 64 | " client = boto3.client('sagemaker')\n", 65 | " response = client.list_training_jobs(NameContains=base_job_name, SortBy='CreationTime', \n", 66 | " SortOrder='Descending', StatusEquals='Completed')\n", 67 | " if len(response['TrainingJobSummaries']) > 0 :\n", 68 | " return response['TrainingJobSummaries'][0]['TrainingJobName']\n", 69 | " else:\n", 70 | " raise Exception('Training job not found.')\n", 71 | "\n", 72 | "def get_training_job_s3_model_artifacts(job_name):\n", 73 | " client = boto3.client('sagemaker')\n", 74 | " response = client.describe_training_job(TrainingJobName=job_name)\n", 75 | " s3_model_artifacts = response['ModelArtifacts']['S3ModelArtifacts']\n", 76 | " return s3_model_artifacts\n", 77 | "\n", 78 | "latest_training_job_name = get_latest_training_job_name('nw-traffic-classification-xgb')\n", 79 | "print(latest_training_job_name)\n", 80 | "model_path = get_training_job_s3_model_artifacts(latest_training_job_name)\n", 81 | "print(model_path)" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "For this model, we are going to use the same XGBoost Docker container we used for training, which also offers inference capabilities. As a consequence, we can just create the XGBoostModel object of the Amazon SageMaker Python SDK and then invoke its .deploy() method to execute deployment." 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "We will also provide an entrypoint script to be invoked at deployment/inference time. The purpose of this code is deserializing and loading the XGB model. In addition, we are re-defining the output functions as we want to extract the class value from the default array output. For example, for class 3 the XGB container would output [3.] but we want to extract only the 3 value." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "!pygmentize source_dir/deploy_xgboost.py" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "Now we are ready to create the XGBoostModel object." 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "from time import gmtime, strftime\n", 121 | "from sagemaker.xgboost import XGBoostModel\n", 122 | "\n", 123 | "model_name = 'nw-traffic-classification-xgb-model-' + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n", 124 | "\n", 125 | "code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n", 126 | "xgboost_model = XGBoostModel(model_data=model_path,\n", 127 | " entry_point='deploy_xgboost.py',\n", 128 | " source_dir='source_dir/',\n", 129 | " name=model_name,\n", 130 | " code_location=code_location,\n", 131 | " framework_version='0.90-2',\n", 132 | " role=role, \n", 133 | " sagemaker_session=sagemaker_session)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "Finally we create an endpoint with data capture enabled, for monitoring the model data quality.\n", 141 | "Data capture is enabled at enpoint configuration level for the Amazon SageMaker real-time endpoint. You can choose to capture the request payload, the response payload or both and captured data is stored in JSON format." 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "from time import gmtime, strftime\n", 151 | "from sagemaker.model_monitor import DataCaptureConfig\n", 152 | "\n", 153 | "s3_capture_upload_path = 's3://{}/{}/monitoring/datacapture'.format(bucket_name, prefix)\n", 154 | "print(s3_capture_upload_path)\n", 155 | "\n", 156 | "endpoint_name = 'nw-traffic-classification-xgb-ep-' + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n", 157 | "print(endpoint_name)\n", 158 | "\n", 159 | "pred = xgboost_model.deploy(initial_instance_count=1,\n", 160 | " instance_type='ml.m5.xlarge',\n", 161 | " endpoint_name=endpoint_name,\n", 162 | " data_capture_config=DataCaptureConfig(\n", 163 | " enable_capture=True,\n", 164 | " sampling_percentage=100,\n", 165 | " destination_s3_uri=s3_capture_upload_path))" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "metadata": {}, 171 | "source": [ 172 | "After the deployment has been completed, we can leverage on the RealTimePredictor object to execute HTTPs requests against the deployed endpoint and get inference results." 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "from sagemaker.predictor import RealTimePredictor\n", 182 | "\n", 183 | "pred = RealTimePredictor(endpoint_name)\n", 184 | "pred.content_type = 'text/csv'\n", 185 | "pred.accept = 'text/csv'\n", 186 | "\n", 187 | "# Expecting class 4\n", 188 | "test_values = \"80,1056736,3,4,20,964,20,0,6.666666667,11.54700538,964,0,241.0,482.0,931.1691850999999,6.6241710320000005,176122.6667,\\\n", 189 | "431204.4454,1056315,2,394,197.0,275.77164469999997,392,2,1056733,352244.3333,609743.1115,1056315,24,0,0,0,0,72,92,\\\n", 190 | "2.8389304419999997,3.78524059,0,964,123.0,339.8873763,115523.4286,0,0,1,1,0,0,0,1,1.0,140.5714286,6.666666667,\\\n", 191 | "241.0,0.0,0.0,0.0,0.0,0.0,0.0,3,20,4,964,8192,211,1,20,0.0,0.0,0,0,0.0,0.0,0,0,20,2,2018,1,0,1,0\"\n", 192 | "\n", 193 | "result = pred.predict(test_values)\n", 194 | "print(result)\n", 195 | "\n", 196 | "# Expecting class 7\n", 197 | "test_values = \"80,10151,2,0,0,0,0,0,0.0,0.0,0,0,0.0,0.0,0.0,197.0249237,10151.0,0.0,10151,10151,10151,10151.0,0.0,10151,10151,0,0.0,\\\n", 198 | "0.0,0,0,0,0,0,0,40,0,197.0249237,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2,0,0,0,32738,\\\n", 199 | "-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,21,2,2018,2,0,1,0\"\n", 200 | "\n", 201 | "result = pred.predict(test_values)\n", 202 | "print(result)\n", 203 | "\n", 204 | "# Expecting class 0\n", 205 | "test_values = \"80,54322832,2,0,0,0,0,0,0.0,0.0,0,0,0.0,0.0,0.0,0.0368169318,54322832.0,0.0,54322832,54322832,54322832,54322832.0,0.0,\\\n", 206 | "54322832,54322832,0,0.0,0.0,0,0,0,0,0,0,40,0,0.0368169318,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,\\\n", 207 | "0.0,0.0,0.0,0.0,2,0,0,0,279,-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,23,2,2018,4,0,1,0\"\n", 208 | "\n", 209 | "result = pred.predict(test_values)\n", 210 | "print(result)" 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "Now let's list the data capture files stored in S3. You should expect to see different files from different time periods organized based on the hour in which the invocation occurred.\n", 218 | "\n", 219 | "**Note that the delivery of capture data to Amazon S3 can require a couple of minutes so next cell might error. If this happens, please retry after a minute.**" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": null, 225 | "metadata": {}, 226 | "outputs": [], 227 | "source": [ 228 | "s3_client = boto3.Session().client('s3')\n", 229 | "current_endpoint_capture_prefix = '{}/monitoring/datacapture/{}'.format(prefix, endpoint_name)\n", 230 | "\n", 231 | "result = s3_client.list_objects(Bucket=bucket_name, Prefix=current_endpoint_capture_prefix)\n", 232 | "capture_files = ['s3://{0}/{1}'.format(bucket_name, capture_file.get(\"Key\")) for capture_file in result.get('Contents')]\n", 233 | "\n", 234 | "print(\"Capture Files: \")\n", 235 | "print(\"\\n \".join(capture_files))" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "We can also read the contents of one of these files and see how capture records are organized in JSON lines format." 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "!aws s3 cp {capture_files[0]} datacapture/captured_data_example.jsonl\n", 252 | "!head datacapture/captured_data_example.jsonl" 253 | ] 254 | }, 255 | { 256 | "cell_type": "markdown", 257 | "metadata": {}, 258 | "source": [ 259 | "In addition, we can better understand the content of each JSON line like follows:" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": null, 265 | "metadata": {}, 266 | "outputs": [], 267 | "source": [ 268 | "import json\n", 269 | "with open (\"datacapture/captured_data_example.jsonl\", \"r\") as myfile:\n", 270 | " data=myfile.read()\n", 271 | "\n", 272 | "print(json.dumps(json.loads(data.split('\\n')[0]), indent=2))" 273 | ] 274 | }, 275 | { 276 | "cell_type": "markdown", 277 | "metadata": {}, 278 | "source": [ 279 | "For each inference request, we get input data, output data and some metadata like the inference time captured and saved." 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "## Baselining" 287 | ] 288 | }, 289 | { 290 | "cell_type": "markdown", 291 | "metadata": {}, 292 | "source": [ 293 | "From our validation dataset let's ask Amazon SageMaker to suggest a set of baseline constraints and generate descriptive statistics for our features. Note that we are using the validation dataset for this workshop to make sure baselining time is short, and that file extension needs to be changed since the baselining jobs require .CSV file extension as default.\n", 294 | "In reality, you might be willing to use a larger dataset as baseline." 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": null, 300 | "metadata": {}, 301 | "outputs": [], 302 | "source": [ 303 | "import boto3\n", 304 | "\n", 305 | "s3 = boto3.resource('s3')\n", 306 | "\n", 307 | "bucket_key_prefix = \"aim362/data/val/\"\n", 308 | "bucket = s3.Bucket(bucket_name)\n", 309 | "\n", 310 | "for s3_object in bucket.objects.filter(Prefix=bucket_key_prefix):\n", 311 | " target_key = s3_object.key.replace('data/val/', 'monitoring/baselining/data/').replace('.part', '.csv')\n", 312 | " print('Copying {0} to {1} ...'.format(s3_object.key, target_key))\n", 313 | " \n", 314 | " copy_source = {\n", 315 | " 'Bucket': bucket_name,\n", 316 | " 'Key': s3_object.key\n", 317 | " }\n", 318 | " s3.Bucket(bucket_name).copy(copy_source, target_key)" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "metadata": {}, 325 | "outputs": [], 326 | "source": [ 327 | "baseline_data_path = 's3://{0}/{1}/monitoring/baselining/data'.format(bucket_name, prefix)\n", 328 | "baseline_results_path = 's3://{0}/{1}/monitoring/baselining/results'.format(bucket_name, prefix)\n", 329 | "\n", 330 | "print(baseline_data_path)\n", 331 | "print(baseline_results_path)" 332 | ] 333 | }, 334 | { 335 | "cell_type": "markdown", 336 | "metadata": {}, 337 | "source": [ 338 | "Please note that running the baselining job will require 8-10 minutes. In the meantime, you can take a look at the Deequ library, used to execute these analyses with the default Model Monitor container: https://github.com/awslabs/deequ" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": null, 344 | "metadata": {}, 345 | "outputs": [], 346 | "source": [ 347 | "from sagemaker.model_monitor import DefaultModelMonitor\n", 348 | "from sagemaker.model_monitor.dataset_format import DatasetFormat\n", 349 | "\n", 350 | "my_default_monitor = DefaultModelMonitor(\n", 351 | " role=role,\n", 352 | " instance_count=1,\n", 353 | " instance_type='ml.c5.4xlarge',\n", 354 | " volume_size_in_gb=20,\n", 355 | " max_runtime_in_seconds=3600,\n", 356 | ")" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": null, 362 | "metadata": {}, 363 | "outputs": [], 364 | "source": [ 365 | "my_default_monitor.suggest_baseline(\n", 366 | " baseline_dataset=baseline_data_path,\n", 367 | " dataset_format=DatasetFormat.csv(header=True),\n", 368 | " output_s3_uri=baseline_results_path,\n", 369 | " wait=True\n", 370 | ")" 371 | ] 372 | }, 373 | { 374 | "cell_type": "markdown", 375 | "metadata": {}, 376 | "source": [ 377 | "Let's display the statistics that were generated by the baselining job." 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "import pandas as pd\n", 387 | "\n", 388 | "baseline_job = my_default_monitor.latest_baselining_job\n", 389 | "schema_df = pd.io.json.json_normalize(baseline_job.baseline_statistics().body_dict[\"features\"])\n", 390 | "schema_df.head(10)" 391 | ] 392 | }, 393 | { 394 | "cell_type": "markdown", 395 | "metadata": {}, 396 | "source": [ 397 | "Then, we can also visualize the constraints." 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": null, 403 | "metadata": {}, 404 | "outputs": [], 405 | "source": [ 406 | "constraints_df = pd.io.json.json_normalize(baseline_job.suggested_constraints().body_dict[\"features\"])\n", 407 | "constraints_df.head(10)" 408 | ] 409 | }, 410 | { 411 | "cell_type": "markdown", 412 | "metadata": {}, 413 | "source": [ 414 | "#### Results\n", 415 | "\n", 416 | "The baselining job has inspected the validation dataset and generated constraints and statistics, that will be used to monitor our endpoint." 417 | ] 418 | }, 419 | { 420 | "cell_type": "markdown", 421 | "metadata": {}, 422 | "source": [ 423 | "## Generating violations artificially" 424 | ] 425 | }, 426 | { 427 | "cell_type": "markdown", 428 | "metadata": {}, 429 | "source": [ 430 | "In order to get some result relevant to monitoring analysis, we are going to generate artificially some inferences with feature values causing specific violations, and then invoke the endpoint with this data.\n", 431 | "\n", 432 | "This requires about 2 minutes for 1000 inferences." 433 | ] 434 | }, 435 | { 436 | "cell_type": "code", 437 | "execution_count": null, 438 | "metadata": {}, 439 | "outputs": [], 440 | "source": [ 441 | "import time\n", 442 | "import numpy as np\n", 443 | "dist_values = np.random.normal(1, 0.2, 1000)\n", 444 | "\n", 445 | "# Tot Fwd Pkts -> set to float (expected integer) [second feature]\n", 446 | "# Flow Duration -> set to empty (missing value) [third feature]\n", 447 | "# Fwd Pkt Len Mean -> sampled from random normal distribution [nineth feature]\n", 448 | "\n", 449 | "artificial_values = \"22,,40.3,0,0,0,0,0,{0},0.0,0,0,0.0,0.0,0.0,0.0368169318,54322832.0,0.0,54322832,54322832,54322832,54322832.0,0.0,\\\n", 450 | "54322832,54322832,0,0.0,0.0,0,0,0,0,0,0,40,0,0.0368169318,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,\\\n", 451 | "0.0,0.0,0.0,0.0,2,0,0,0,279,-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,23,2,2018,4,0,1,0\"\n", 452 | "\n", 453 | "for i in range(1000):\n", 454 | " pred.predict(artificial_values.format(str(dist_values[i])))\n", 455 | " time.sleep(0.15)\n", 456 | " if i > 0 and i % 100 == 0 :\n", 457 | " print('Executed {0} inferences.'.format(i))" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "metadata": {}, 463 | "source": [ 464 | "## Monitoring" 465 | ] 466 | }, 467 | { 468 | "cell_type": "markdown", 469 | "metadata": {}, 470 | "source": [ 471 | "Once we have built the baseline for our data, we can enable endpoint monitoring by creating a monitoring schedule.\n", 472 | "When the schedule fires, a monitoring job will be kicked-off and will inspect the data captured at the endpoint with respect to the baseline; then it will generate some report files that can be used to analyze monitoring results." 473 | ] 474 | }, 475 | { 476 | "cell_type": "markdown", 477 | "metadata": {}, 478 | "source": [ 479 | "### Create Monitoring Schedule" 480 | ] 481 | }, 482 | { 483 | "cell_type": "markdown", 484 | "metadata": {}, 485 | "source": [ 486 | "Let's create the monitoring schedule for the previously created endpoint. When we create the schedule, we can also specify two scripts that will preprocess the records before the analysis takes place and execute post-processing at the end.\n", 487 | "For this example, we are not going to use a record preprocessor, and we are just specifying a post-processor that outputs some text for demo purposes." 488 | ] 489 | }, 490 | { 491 | "cell_type": "code", 492 | "execution_count": null, 493 | "metadata": {}, 494 | "outputs": [], 495 | "source": [ 496 | "!pygmentize postprocessor.py" 497 | ] 498 | }, 499 | { 500 | "cell_type": "markdown", 501 | "metadata": {}, 502 | "source": [ 503 | "We copy the script to Amazon S3 and specify the path where the monitoring reports will be saved to." 504 | ] 505 | }, 506 | { 507 | "cell_type": "code", 508 | "execution_count": null, 509 | "metadata": {}, 510 | "outputs": [], 511 | "source": [ 512 | "import boto3\n", 513 | "\n", 514 | "monitoring_code_prefix = '{0}/monitoring/code'.format(prefix)\n", 515 | "print(monitoring_code_prefix)\n", 516 | "\n", 517 | "boto3.Session().resource('s3').Bucket(bucket_name).Object(monitoring_code_prefix + '/postprocessor.py').upload_file('postprocessor.py')\n", 518 | "postprocessor_path = 's3://{0}/{1}/monitoring/code/postprocessor.py'.format(bucket_name, prefix)\n", 519 | "print(postprocessor_path)\n", 520 | "\n", 521 | "reports_path = 's3://{0}/{1}/monitoring/reports'.format(bucket_name, prefix)\n", 522 | "print(reports_path)" 523 | ] 524 | }, 525 | { 526 | "cell_type": "markdown", 527 | "metadata": {}, 528 | "source": [ 529 | "Finally, we create the monitoring schedule with hourly schedule execution." 530 | ] 531 | }, 532 | { 533 | "cell_type": "code", 534 | "execution_count": null, 535 | "metadata": {}, 536 | "outputs": [], 537 | "source": [ 538 | "from sagemaker.model_monitor import CronExpressionGenerator\n", 539 | "from time import gmtime, strftime\n", 540 | "\n", 541 | "endpoint_name = pred.endpoint\n", 542 | "\n", 543 | "mon_schedule_name = 'nw-traffic-classification-xgb-mon-sch-' + strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n", 544 | "my_default_monitor.create_monitoring_schedule(\n", 545 | " monitor_schedule_name=mon_schedule_name,\n", 546 | " endpoint_input=endpoint_name,\n", 547 | " post_analytics_processor_script=postprocessor_path,\n", 548 | " output_s3_uri=reports_path,\n", 549 | " statistics=my_default_monitor.baseline_statistics(),\n", 550 | " constraints=my_default_monitor.suggested_constraints(),\n", 551 | " schedule_cron_expression=CronExpressionGenerator.hourly(),\n", 552 | " enable_cloudwatch_metrics=True\n", 553 | ")" 554 | ] 555 | }, 556 | { 557 | "cell_type": "markdown", 558 | "metadata": {}, 559 | "source": [ 560 | "### Describe Monitoring Schedule" 561 | ] 562 | }, 563 | { 564 | "cell_type": "code", 565 | "execution_count": null, 566 | "metadata": {}, 567 | "outputs": [], 568 | "source": [ 569 | "desc_schedule_result = my_default_monitor.describe_schedule()\n", 570 | "desc_schedule_result" 571 | ] 572 | }, 573 | { 574 | "cell_type": "markdown", 575 | "metadata": {}, 576 | "source": [ 577 | "### Delete Monitoring Schedule\n", 578 | "\n", 579 | "Once the schedule is created, it will kick of jobs at specified intervals. Note that if you are kicking this off after creating the hourly schedule, you might find the executions empty. \n", 580 | "You might have to wait till you cross the hour boundary (in UTC) to see executions kick off. Since we don't want to wait for the hour in this example we can delete the schedule and use the code in next steps to simulate what will happen when a schedule is triggered, by running an Amazon SageMaker Processing Job." 581 | ] 582 | }, 583 | { 584 | "cell_type": "code", 585 | "execution_count": null, 586 | "metadata": {}, 587 | "outputs": [], 588 | "source": [ 589 | "# Note: this is just for the purpose of running this example.\n", 590 | "my_default_monitor.delete_monitoring_schedule()" 591 | ] 592 | }, 593 | { 594 | "cell_type": "markdown", 595 | "metadata": {}, 596 | "source": [ 597 | "### Triggering execution manually\n", 598 | "\n", 599 | "In oder to trigger the execution manually, we first get all paths to data capture, baseline statistics, baseline constraints, etc.\n", 600 | "Then, we use a utility fuction, defined in monitoringjob_utils.py, to run the processing job." 601 | ] 602 | }, 603 | { 604 | "cell_type": "code", 605 | "execution_count": null, 606 | "metadata": {}, 607 | "outputs": [], 608 | "source": [ 609 | "result = s3_client.list_objects(Bucket=bucket_name, Prefix=current_endpoint_capture_prefix)\n", 610 | "capture_files = ['s3://{0}/{1}'.format(bucket_name, capture_file.get(\"Key\")) for capture_file in result.get('Contents')]\n", 611 | "\n", 612 | "print(\"Capture Files: \")\n", 613 | "print(\"\\n \".join(capture_files))\n", 614 | "\n", 615 | "data_capture_path = capture_files[len(capture_files) - 1][: capture_files[len(capture_files) - 1].rfind('/')]\n", 616 | "statistics_path = baseline_results_path + '/statistics.json'\n", 617 | "constraints_path = baseline_results_path + '/constraints.json'\n", 618 | "\n", 619 | "print(data_capture_path)\n", 620 | "print(postprocessor_path)\n", 621 | "print(statistics_path)\n", 622 | "print(constraints_path)\n", 623 | "print(reports_path)" 624 | ] 625 | }, 626 | { 627 | "cell_type": "code", 628 | "execution_count": null, 629 | "metadata": {}, 630 | "outputs": [], 631 | "source": [ 632 | "from monitoringjob_utils import run_model_monitor_job_processor\n", 633 | "\n", 634 | "run_model_monitor_job_processor(region, 'ml.m5.xlarge', role, data_capture_path, statistics_path, constraints_path, reports_path,\n", 635 | " postprocessor_path=postprocessor_path)" 636 | ] 637 | }, 638 | { 639 | "cell_type": "markdown", 640 | "metadata": {}, 641 | "source": [ 642 | "### Analysis" 643 | ] 644 | }, 645 | { 646 | "cell_type": "markdown", 647 | "metadata": {}, 648 | "source": [ 649 | "When the monitoring job completes, monitoring reports are saved to Amazon S3. Let's list the generated reports." 650 | ] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": null, 655 | "metadata": {}, 656 | "outputs": [], 657 | "source": [ 658 | "s3_client = boto3.Session().client('s3')\n", 659 | "monitoring_reports_prefix = '{}/monitoring/reports/{}'.format(prefix, pred.endpoint)\n", 660 | "\n", 661 | "result = s3_client.list_objects(Bucket=bucket_name, Prefix=monitoring_reports_prefix)\n", 662 | "try:\n", 663 | " monitoring_reports = ['s3://{0}/{1}'.format(bucket_name, capture_file.get(\"Key\")) for capture_file in result.get('Contents')]\n", 664 | " print(\"Monitoring Reports Files: \")\n", 665 | " print(\"\\n \".join(monitoring_reports))\n", 666 | "except:\n", 667 | " print('No monitoring reports found.')" 668 | ] 669 | }, 670 | { 671 | "cell_type": "markdown", 672 | "metadata": {}, 673 | "source": [ 674 | "We then copy monitoring reports locally." 675 | ] 676 | }, 677 | { 678 | "cell_type": "code", 679 | "execution_count": null, 680 | "metadata": {}, 681 | "outputs": [], 682 | "source": [ 683 | "!aws s3 cp {monitoring_reports[0]} monitoring/\n", 684 | "!aws s3 cp {monitoring_reports[1]} monitoring/\n", 685 | "!aws s3 cp {monitoring_reports[2]} monitoring/" 686 | ] 687 | }, 688 | { 689 | "cell_type": "markdown", 690 | "metadata": {}, 691 | "source": [ 692 | "Let's display the violations identified by the monitoring execution." 693 | ] 694 | }, 695 | { 696 | "cell_type": "code", 697 | "execution_count": null, 698 | "metadata": {}, 699 | "outputs": [], 700 | "source": [ 701 | "import pandas as pd\n", 702 | "pd.set_option('display.max_colwidth', -1)\n", 703 | "\n", 704 | "file = open('monitoring/constraint_violations.json', 'r')\n", 705 | "data = file.read()\n", 706 | "\n", 707 | "violations_df = pd.io.json.json_normalize(json.loads(data)['violations'])\n", 708 | "violations_df.head(10)" 709 | ] 710 | }, 711 | { 712 | "cell_type": "markdown", 713 | "metadata": {}, 714 | "source": [ 715 | "We can see that the violations identified correspond to the ones that we artificially generated and that there is a feature that is generating some drift from the baseline." 716 | ] 717 | }, 718 | { 719 | "cell_type": "markdown", 720 | "metadata": {}, 721 | "source": [ 722 | "### Advanced Hints" 723 | ] 724 | }, 725 | { 726 | "cell_type": "markdown", 727 | "metadata": {}, 728 | "source": [ 729 | "You might be asking yourself what are the type of violations that are monitored and how drift from the baseline is computed.\n", 730 | "\n", 731 | "The types of violations monitored are listed here: https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-interpreting-violations.html. Most of them use configurable thresholds, that are specified in the monitoring configuration section of the baseline constraints JSON. Let's take a look at this configuration from the baseline constraints file:" 732 | ] 733 | }, 734 | { 735 | "cell_type": "code", 736 | "execution_count": null, 737 | "metadata": {}, 738 | "outputs": [], 739 | "source": [ 740 | "!aws s3 cp {statistics_path} baseline/\n", 741 | "!aws s3 cp {constraints_path} baseline/" 742 | ] 743 | }, 744 | { 745 | "cell_type": "code", 746 | "execution_count": null, 747 | "metadata": {}, 748 | "outputs": [], 749 | "source": [ 750 | "import json\n", 751 | "with open (\"baseline/constraints.json\", \"r\") as myfile:\n", 752 | " data=myfile.read()\n", 753 | "\n", 754 | "print(json.dumps(json.loads(data)['monitoring_config'], indent=2))" 755 | ] 756 | }, 757 | { 758 | "cell_type": "markdown", 759 | "metadata": {}, 760 | "source": [ 761 | "This configuration is intepreted when the monitoring job is executed and used to compare captured data to the baseline. If you want to customize this section, you will have to update the **constraints.json** file and upload it back to Amazon S3 before launching the monitoring job.\n", 762 | "\n", 763 | "When data distributions are compared to detect potential drift, you can choose to use either a _Simple_ or _Robust_ comparison method, where the latter has to be preferred when dealing with small datasets. Additional info: https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-byoc-constraints.html." 764 | ] 765 | }, 766 | { 767 | "cell_type": "markdown", 768 | "metadata": {}, 769 | "source": [ 770 | "## Delete Endpoint" 771 | ] 772 | }, 773 | { 774 | "cell_type": "markdown", 775 | "metadata": {}, 776 | "source": [ 777 | "Finally we can delete the endpoint to free-up resources." 778 | ] 779 | }, 780 | { 781 | "cell_type": "code", 782 | "execution_count": null, 783 | "metadata": {}, 784 | "outputs": [], 785 | "source": [ 786 | "pred.delete_endpoint()\n", 787 | "pred.delete_model()" 788 | ] 789 | }, 790 | { 791 | "cell_type": "markdown", 792 | "metadata": {}, 793 | "source": [ 794 | "## References\n", 795 | "\n", 796 | "A Realistic Cyber Defense Dataset (CSE-CIC-IDS2018) https://registry.opendata.aws/cse-cic-ids2018/" 797 | ] 798 | } 799 | ], 800 | "metadata": { 801 | "kernelspec": { 802 | "display_name": "conda_python3", 803 | "language": "python", 804 | "name": "conda_python3" 805 | }, 806 | "language_info": { 807 | "codemirror_mode": { 808 | "name": "ipython", 809 | "version": 3 810 | }, 811 | "file_extension": ".py", 812 | "mimetype": "text/x-python", 813 | "name": "python", 814 | "nbconvert_exporter": "python", 815 | "pygments_lexer": "ipython3", 816 | "version": "3.6.5" 817 | } 818 | }, 819 | "nbformat": 4, 820 | "nbformat_minor": 4 821 | } 822 | -------------------------------------------------------------------------------- /01_train_and_debug/train_and_debug.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Introduction\n", 8 | "\n", 9 | "In this workshop, we will go through the steps of training, debugging, deploying and monitoring a **network traffic classification model**.\n", 10 | "\n", 11 | "For training our model we will be using datasets CSE-CIC-IDS2018 by CIC and ISCX which are used for security testing and malware prevention.\n", 12 | "These datasets include a huge amount of raw network traffic logs, plus pre-processed data where network connections have been reconstructed and relevant features have been extracted using CICFlowMeter, a tool that outputs network connection features as CSV files. Each record is classified as benign traffic, or it can be malicious traffic, with a total number of 15 classes.\n", 13 | "\n", 14 | "Starting from this featurized dataset, we have executed additional pre-processing for the purpose of this lab:\n", 15 | "\n", 23 | "\n", 24 | "Class are represented and have been encoded as follows (train + validation):\n", 25 | "\n", 26 | "\n", 27 | "| Label | Encoded | N. records |\n", 28 | "|:-------------------------|:-------:|-----------:|\n", 29 | "| Benign | 0 | 1000000 |\n", 30 | "| Bot | 1 | 200000 |\n", 31 | "| DoS attacks-GoldenEye | 2 | 40000 |\n", 32 | "| DoS attacks-Slowloris | 3 | 10000 |\n", 33 | "| DDoS attacks-LOIC-HTTP | 4 | 300000 |\n", 34 | "| Infilteration | 5 | 150000 |\n", 35 | "| DDOS attack-LOIC-UDP | 6 | 1730 |\n", 36 | "| DDOS attack-HOIC | 7 | 300000 |\n", 37 | "| Brute Force -Web | 8 | 611 |\n", 38 | "| Brute Force -XSS | 9 | 230 |\n", 39 | "| SQL Injection | 10 | 87 |\n", 40 | "| DoS attacks-SlowHTTPTest | 11 | 100000 |\n", 41 | "| DoS attacks-Hulk | 12 | 250000 |\n", 42 | "| FTP-BruteForce | 13 | 150000 |\n", 43 | "| SSH-Bruteforce | 14 | 150000 | \n", 44 | "\n", 45 | "The final pre-processed dataset has been saved to a public Amazon S3 bucket for your convenience, and will represent the inputs to the training processes.\n", 46 | "\n", 47 | "### Let's get started!\n", 48 | "\n", 49 | "First, we set some variables, including the AWS region we are working in, the IAM (Identity and Access Management) execution role of the notebook instance and the Amazon S3 bucket where we will store data, models, outputs, etc. We will use the Amazon SageMaker default bucket for the selected AWS region, and then define a key prefix to make sure all objects have share the same prefix for easier discoverability." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "import os\n", 59 | "import boto3\n", 60 | "import sagemaker\n", 61 | "\n", 62 | "region = boto3.Session().region_name\n", 63 | "role = sagemaker.get_execution_role()\n", 64 | "sagemaker_session = sagemaker.Session()\n", 65 | "bucket_name = sagemaker.Session().default_bucket()\n", 66 | "prefix = 'aim362'\n", 67 | "os.environ[\"AWS_REGION\"] = region\n", 68 | "\n", 69 | "print(region)\n", 70 | "print(role)\n", 71 | "print(bucket_name)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": {}, 77 | "source": [ 78 | "Now we can copy the dataset from the public Amazon S3 bucket to the Amazon SageMaker default bucket used in this workshop. To do this, we will leverage on the AWS Python SDK (boto3) as follows:" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "import boto3\n", 88 | "\n", 89 | "s3 = boto3.resource('s3')\n", 90 | "\n", 91 | "source_bucket_name = \"endtoendmlapp\"\n", 92 | "source_bucket_prefix = \"aim362/data/\"\n", 93 | "source_bucket = s3.Bucket(source_bucket_name)\n", 94 | "\n", 95 | "for s3_object in source_bucket.objects.filter(Prefix=source_bucket_prefix):\n", 96 | " copy_source = {\n", 97 | " 'Bucket': source_bucket_name,\n", 98 | " 'Key': s3_object.key\n", 99 | " }\n", 100 | " print('Copying {0} ...'.format(s3_object.key))\n", 101 | " s3.Bucket(bucket_name).copy(copy_source, s3_object.key)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "metadata": {}, 107 | "source": [ 108 | "Let's download some of the data to the notebook to quickly explore the dataset structure:" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "train_file_path = 's3://' + bucket_name + '/' + prefix + '/data/train/0.part'\n", 118 | "val_file_path = 's3://' + bucket_name + '/' + prefix + '/data/val/0.part'" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": null, 124 | "metadata": {}, 125 | "outputs": [], 126 | "source": [ 127 | "!mkdir -p data/train/ data/val/\n", 128 | "!aws s3 cp {train_file_path} data/train/\n", 129 | "!aws s3 cp {val_file_path} data/val/" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "metadata": {}, 136 | "outputs": [], 137 | "source": [ 138 | "import pandas as pd\n", 139 | "pd.options.display.max_columns = 100\n", 140 | "\n", 141 | "df = pd.read_csv('data/train/0.part')\n", 142 | "df.head(10)" 143 | ] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": {}, 148 | "source": [ 149 | "# Training and Debugging\n", 150 | "\n", 151 | "The network traffic classification model will be trained using the Amazon SageMaker framework container for XGBoost (https://github.com/aws/sagemaker-xgboost-container). Using XGBoost as a framework provides more flexibility than using it as a built-in algorithm as it enables more advanced scenarios that allow pre-processing and post-processing scripts or any kind of custom logic to be incorporated into your training script.\n", 152 | "\n", 153 | "First, we will execute basic training to make sure our training script works as expected and we are able to fit the model successfully, and then we will go through the steps for enabling debugging using Amazon SageMaker Debugger." 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": {}, 159 | "source": [ 160 | "## Basic Training\n", 161 | "\n", 162 | "We will execute the training script in local mode while building our model: local mode is a functionality enabled by the Amazon SageMaker Python SDK that allows running the same training code and container that will be used in Amazon SageMaker locally on the notebook instance, in order to speed-up experimentation and quickly fix errors before running training with Amazon SageMaker training.\n", 163 | "\n", 164 | "For local mode training, we can re-use the training and validation files downloaded on the notebook instance in the previous steps, as local file inputs." 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "metadata": {}, 170 | "source": [ 171 | "Let's take a look at our training script." 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "!pygmentize source_dir/train_xgboost_no_debug.py" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "The script parses arguments that are passed when the XGBoost Docker container code invokes the script for execution. These arguments represent the hyperparameters that you specify when strarting the training job plus the location of training and validation data; this behavior, named Script Mode execution, is enabled by a library that is installed in the XGBoost container (sagemaker-containers, https://github.com/aws/sagemaker-containers) and facilitates the development of SageMaker-compatible Docker containers.\n", 188 | "\n", 189 | "Then, we load training and validation data and execute XGBoost training with the provided parameters." 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "metadata": {}, 195 | "source": [ 196 | "Once we have our script ready, we can leverage on the XGBoost estimator of the Amazon SageMaker Python SDK to start training locally." 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "from sagemaker.xgboost import XGBoost\n", 206 | "\n", 207 | "hyperparameters = {\n", 208 | " \"max_depth\": \"3\",\n", 209 | " \"eta\": \"0.1\",\n", 210 | " \"gamma\": \"6\",\n", 211 | " \"min_child_weight\": \"6\",\n", 212 | " \"silent\": \"0\",\n", 213 | " \"objective\": \"multi:softmax\",\n", 214 | " \"num_class\": \"15\",\n", 215 | " \"num_round\": \"10\"\n", 216 | "}\n", 217 | "\n", 218 | "entry_point='train_xgboost_no_debug.py'\n", 219 | "source_dir='source_dir/'\n", 220 | "output_path = 's3://{0}/{1}/output/'.format(bucket_name, prefix)\n", 221 | "code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n", 222 | "\n", 223 | "estimator = XGBoost(\n", 224 | " base_job_name=\"nw-traffic-classification-xgb\",\n", 225 | " entry_point=entry_point,\n", 226 | " source_dir=source_dir,\n", 227 | " output_path=output_path,\n", 228 | " code_location=code_location,\n", 229 | " hyperparameters=hyperparameters,\n", 230 | " train_instance_type=\"local\", # Specifying local as instance type to run local-mode training\n", 231 | " train_instance_count=1,\n", 232 | " framework_version=\"0.90-2\",\n", 233 | " py_version=\"py3\",\n", 234 | " role=role\n", 235 | ")\n", 236 | "\n", 237 | "train_config = 'file://data/train/'\n", 238 | "val_config = 'file://data/val/'\n", 239 | "\n", 240 | "estimator.fit({'train': train_config, 'validation': val_config })" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "In order to make sure that our code works for inference, we can deploy the trained model locally and execute some inferences." 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": null, 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "predictor = estimator.deploy(initial_instance_count=1,\n", 257 | " instance_type='local') # Using local-mode deployment" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "from sagemaker.predictor import csv_serializer, json_deserializer\n", 267 | "from sagemaker.predictor import RealTimePredictor\n", 268 | "\n", 269 | "predictor.content_type = 'text/csv'\n", 270 | "predictor.serializer = csv_serializer\n", 271 | "predictor.deserializer = json_deserializer\n", 272 | "\n", 273 | "# We expect 4 - DDoS attacks-LOIC-HTTP as the predicted class for this instance.\n", 274 | "test_values = [80,1056736,3,4,20,964,20,0,6.666666667,11.54700538,964,0,241.0,482.0,931.1691850999999,6.6241710320000005,176122.6667,431204.4454,1056315,2,394,197.0,275.77164469999997,392,2,1056733,352244.3333,609743.1115,1056315,24,0,0,0,0,72,92,2.8389304419999997,3.78524059,0,964,123.0,339.8873763,115523.4286,0,0,1,1,0,0,0,1,1.0,140.5714286,6.666666667,241.0,0.0,0.0,0.0,0.0,0.0,0.0,3,20,4,964,8192,211,1,20,0.0,0.0,0,0,0.0,0.0,0,0,20,2,2018,1,0,1,0]\n", 275 | "result = predictor.predict(test_values)\n", 276 | "print(result)" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "Finally, let's gracefully stop the deployed local endpoint." 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "predictor.delete_endpoint()" 293 | ] 294 | }, 295 | { 296 | "cell_type": "markdown", 297 | "metadata": {}, 298 | "source": [ 299 | "#### Results\n", 300 | "During training, we have seen that both the train-merror and validation-merror are decreasing, although we don't have details on the accuracy per-class (we will address this later). We have also successfully deployed the model to a local endpoint and executed inferences." 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "## Debugging" 308 | ] 309 | }, 310 | { 311 | "cell_type": "markdown", 312 | "metadata": {}, 313 | "source": [ 314 | "### LossNotDecreasing\n", 315 | "\n", 316 | "Once we are confident our training script is working as expected and there are no major errors preventing its execution, we can enable debugging.\n", 317 | "\n", 318 | "During training, we will save the state of the tensors using Amazon SageMaker debugging features, and then analyze debugging outputs with jobs that are run while the training job is executed. For XGBoost, Amazon SageMaker debugging supports saving evaluation metrics, lebels and predictions, feature importances, and SHAP values.\n", 319 | "\n", 320 | "First, we need to modify our training script to enable Amazon SageMaker debugging. Note that this is required for the XGBoost framework, whilst for MXNet and Tensorflow debugging works also with no code changes.\n", 321 | "\n", 322 | "We created a Hook object which we pass as a callback function when creating a Booster. The Hook object is created by loading a JSON configuration that is available in a specific path in the Docker container (opt/ml/input/config/debughookconfig.json); this file is generated by Amazon SageMaker from the CreateTrainingJob() API call configuration. Note that Amazon SageMaker debugging is highly configurable, you can choose exactly what to save.\n", 323 | "\n", 324 | "Let's look at the modified script:" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "execution_count": null, 330 | "metadata": {}, 331 | "outputs": [], 332 | "source": [ 333 | "!pygmentize source_dir/train_xgboost.py" 334 | ] 335 | }, 336 | { 337 | "cell_type": "markdown", 338 | "metadata": {}, 339 | "source": [ 340 | "The modified script allows to **capture tensors** and **save to Amazon S3**, but doing this will not cause any debug analysis to run. In order to analyze debug outputs we need to configure the XGBoost estimator to define **a collection of rules that will be run while the training job is executed**." 341 | ] 342 | }, 343 | { 344 | "cell_type": "markdown", 345 | "metadata": {}, 346 | "source": [ 347 | "We are enabling a built-in (1P) debug rule named **LossNotDecreasing** which checks if the loss is not decreasing across step. In this scenario, we have chosen to run this rule at every step on the validation-merror metric values: this means that the new merror values must always go down at each step.\n", 348 | "\n", 349 | "When the estimator fit() method is called, Amazon SageMaker will start two jobs: a **Training Job**, where we also capture and save tensors, and a debug **Processing Job** (powered by **Amazon SageMaker Processing Jobs**), which will run in parallel and analyze tensor data to check if the rule conditions are met.\n", 350 | "\n", 351 | "Note that we are passing the **Wait=False** parameter to the fit() method to avoid waiting for the training job to complete and just fire and forget the API call." 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": null, 357 | "metadata": {}, 358 | "outputs": [], 359 | "source": [ 360 | "from sagemaker.xgboost import XGBoost\n", 361 | "from sagemaker.debugger import Rule, rule_configs, DebuggerHookConfig, CollectionConfig\n", 362 | "\n", 363 | "hyperparameters = {\n", 364 | " \"max_depth\": \"10\",\n", 365 | " \"eta\": \"0.2\",\n", 366 | " \"gamma\": \"1\",\n", 367 | " \"min_child_weight\": \"6\",\n", 368 | " \"silent\": \"0\",\n", 369 | " \"objective\": \"multi:softmax\",\n", 370 | " \"num_class\": \"15\",\n", 371 | " \"num_round\": \"20\"\n", 372 | "}\n", 373 | "\n", 374 | "entry_point='train_xgboost.py'\n", 375 | "source_dir='source_dir/'\n", 376 | "output_path = 's3://{0}/{1}/output/'.format(bucket_name, prefix)\n", 377 | "debugger_output_path = 's3://{0}/{1}/output/debug'.format(bucket_name, prefix) # Path where we save debug outputs\n", 378 | "code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n", 379 | "\n", 380 | "hook_config = DebuggerHookConfig(\n", 381 | " s3_output_path=debugger_output_path,\n", 382 | " hook_parameters={\n", 383 | " \"save_interval\": \"1\"\n", 384 | " },\n", 385 | " collection_configs=[\n", 386 | " CollectionConfig(\"hyperparameters\"),\n", 387 | " CollectionConfig(\"metrics\"),\n", 388 | " CollectionConfig(\"predictions\"),\n", 389 | " CollectionConfig(\"labels\"),\n", 390 | " CollectionConfig(\"feature_importance\")\n", 391 | " ]\n", 392 | ")\n", 393 | "\n", 394 | "estimator = XGBoost(\n", 395 | " base_job_name=\"nw-traffic-classification-xgb\",\n", 396 | " entry_point=entry_point,\n", 397 | " source_dir=source_dir,\n", 398 | " output_path=output_path,\n", 399 | " code_location=code_location,\n", 400 | " hyperparameters=hyperparameters,\n", 401 | " train_instance_type=\"ml.m5.4xlarge\",\n", 402 | " train_instance_count=1,\n", 403 | " framework_version=\"0.90-2\",\n", 404 | " py_version=\"py3\",\n", 405 | " role=role,\n", 406 | " \n", 407 | " # Initialize your hook.\n", 408 | " debugger_hook_config=hook_config,\n", 409 | " \n", 410 | " # Initialize your rules. These will read data for analyses from the path specified\n", 411 | " # for the hook\n", 412 | " rules=[Rule.sagemaker(rule_configs.loss_not_decreasing(),\n", 413 | " rule_parameters={\n", 414 | " # Rule does not use the default losses collection,\n", 415 | " # but uses a regex to look for specific tensor values\n", 416 | " \"use_losses_collection\": \"False\",\n", 417 | " \"tensor_regex\": \"validation-merror\",\n", 418 | " # Num steps is used to specify when to evaluate this rule (every num_steps)\n", 419 | " \"num_steps\" : \"1\"}\n", 420 | " )]\n", 421 | ")\n", 422 | "\n", 423 | "train_config = sagemaker.session.s3_input('s3://{0}/{1}/data/train/'.format(\n", 424 | " bucket_name, prefix), content_type='text/csv')\n", 425 | "val_config = sagemaker.session.s3_input('s3://{0}/{1}/data/val/'.format(\n", 426 | " bucket_name, prefix), content_type='text/csv')\n", 427 | "\n", 428 | "estimator.fit({'train': train_config, 'validation': val_config }, wait=False)" 429 | ] 430 | }, 431 | { 432 | "cell_type": "markdown", 433 | "metadata": {}, 434 | "source": [ 435 | "Once the training job has started, we can check its debug configuration and status:" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": null, 441 | "metadata": {}, 442 | "outputs": [], 443 | "source": [ 444 | "import time\n", 445 | "client = estimator.sagemaker_session.sagemaker_client\n", 446 | "\n", 447 | "description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)\n", 448 | "print('Debug Hook configuration: ')\n", 449 | "print(description['DebugHookConfig'])\n", 450 | "print()\n", 451 | "print('Debug rules configuration: ')\n", 452 | "print(description['DebugRuleConfigurations'])\n", 453 | "print()\n", 454 | "print('Training job status')\n", 455 | "print(description['TrainingJobStatus'])" 456 | ] 457 | }, 458 | { 459 | "cell_type": "markdown", 460 | "metadata": {}, 461 | "source": [ 462 | "We can also get all the logs for the training job being executed:" 463 | ] 464 | }, 465 | { 466 | "cell_type": "code", 467 | "execution_count": null, 468 | "metadata": {}, 469 | "outputs": [], 470 | "source": [ 471 | "sagemaker_session.logs_for_job(estimator.latest_training_job.name)" 472 | ] 473 | }, 474 | { 475 | "cell_type": "markdown", 476 | "metadata": {}, 477 | "source": [ 478 | "At the same time, we can check the status of the rule execution job as follows. Note that this requires some time, so you might be interested in looking at the SageMaker Debugger documentation while this runs: https://github.com/awslabs/sagemaker-debugger." 479 | ] 480 | }, 481 | { 482 | "cell_type": "code", 483 | "execution_count": null, 484 | "metadata": {}, 485 | "outputs": [], 486 | "source": [ 487 | "import time\n", 488 | "\n", 489 | "client = estimator.sagemaker_session.sagemaker_client\n", 490 | "\n", 491 | "iterate = True\n", 492 | "while(iterate):\n", 493 | " description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)\n", 494 | " eval_status = description['DebugRuleEvaluationStatuses'][0]\n", 495 | " print(eval_status)\n", 496 | " if eval_status['RuleEvaluationStatus'] != 'InProgress':\n", 497 | " iterate = False\n", 498 | " else:\n", 499 | " time.sleep(60)" 500 | ] 501 | }, 502 | { 503 | "cell_type": "markdown", 504 | "metadata": {}, 505 | "source": [ 506 | "The rule execution job raised an error since the rule evaluation condition is met. Let's review the configuration and logs of the rule execution job, executed by Amazon SageMaker Processing Jobs:" 507 | ] 508 | }, 509 | { 510 | "cell_type": "code", 511 | "execution_count": null, 512 | "metadata": {}, 513 | "outputs": [], 514 | "source": [ 515 | "processing_job_arn = eval_status['RuleEvaluationJobArn']\n", 516 | "processing_job_name = processing_job_arn[processing_job_arn.rfind('/') + 1 :]\n", 517 | "print(processing_job_name)\n", 518 | "\n", 519 | "client = estimator.sagemaker_session.sagemaker_client\n", 520 | "descr = client.describe_processing_job(ProcessingJobName=processing_job_name)\n", 521 | "descr" 522 | ] 523 | }, 524 | { 525 | "cell_type": "code", 526 | "execution_count": null, 527 | "metadata": {}, 528 | "outputs": [], 529 | "source": [ 530 | "sagemaker_session.logs_for_processing_job(descr['ProcessingJobName'])" 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "metadata": {}, 536 | "source": [ 537 | "#### Results\n", 538 | "We can see that the condition is being met at step 7 when validation-merror is not decreasing. When this happens, we might be interested in stopping training earlier. You can also leverage on Amazon CloudWatch Events to detect the rule condition met event and take specific actions automatically. " 539 | ] 540 | }, 541 | { 542 | "cell_type": "markdown", 543 | "metadata": {}, 544 | "source": [ 545 | "### Debugging - Confusion" 546 | ] 547 | }, 548 | { 549 | "cell_type": "markdown", 550 | "metadata": {}, 551 | "source": [ 552 | "As another example of using a first party (1P) rule provided by Amazon SageMaker debugging, let us again train and use a 1P rule `Confusion` to monitor the training job in realtime.\n", 553 | "\n", 554 | "During training, `Confusion` Rule job will monitor whether you are running into a situation where the ratio of on-diagonal and off-diagonal values in the confusion matrix is not within a specified range. In other words, this rule evaluates the goodness of a confusion matrix for a classification problem. It creates a matrix of size `category_no` $\\times$ `category_no` and populates it with data coming from (`y`, `y_hat`) pairs. For each (`y`, `y_hat`) pairs the count in `confusion[y][y_hat]` is incremented by 1. Once the matrix is fully populated, the ratio of data on- and off-diagonal will be evaluated according to:\n", 555 | "\n", 556 | "- For elements on the diagonal:\n", 557 | "\n", 558 | "$$ \\frac{ \\text{confusion}_{ii} }{ \\sum_j \\text{confusion}_{jj} } \\geq \\text{min_diag} $$\n", 559 | "\n", 560 | "- For elements off the diagonal:\n", 561 | "\n", 562 | "$$ \\frac{ \\text{confusion}_{ji} }{ \\sum_j \\text{confusion}_{ji} } \\leq \\text{max_off_diag} $$" 563 | ] 564 | }, 565 | { 566 | "cell_type": "markdown", 567 | "metadata": {}, 568 | "source": [ 569 | "Please note that in this case we are setting the `start_step` and `end_step` rule parameters, to make sure the rule is evaluated only during the latest steps." 570 | ] 571 | }, 572 | { 573 | "cell_type": "code", 574 | "execution_count": null, 575 | "metadata": {}, 576 | "outputs": [], 577 | "source": [ 578 | "from sagemaker.xgboost import XGBoost\n", 579 | "from sagemaker.debugger import Rule, rule_configs, DebuggerHookConfig\n", 580 | "\n", 581 | "hyperparameters = {\n", 582 | " \"max_depth\": \"10\",\n", 583 | " \"eta\": \"0.2\",\n", 584 | " \"gamma\": \"1\",\n", 585 | " \"min_child_weight\": \"6\",\n", 586 | " \"silent\": \"0\",\n", 587 | " \"objective\": \"multi:softmax\",\n", 588 | " \"num_class\": \"15\",\n", 589 | " \"num_round\": \"20\"\n", 590 | "}\n", 591 | "\n", 592 | "entry_point='train_xgboost.py'\n", 593 | "source_dir='source_dir/'\n", 594 | "output_path = 's3://{0}/{1}/output/'.format(bucket_name, prefix)\n", 595 | "debugger_output_path = 's3://{0}/{1}/output/debug'.format(bucket_name, prefix)\n", 596 | "code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)\n", 597 | "\n", 598 | "hook_config = DebuggerHookConfig(\n", 599 | " s3_output_path=debugger_output_path,\n", 600 | " hook_parameters={\n", 601 | " \"save_interval\": \"1\"\n", 602 | " },\n", 603 | " collection_configs=[\n", 604 | " CollectionConfig(\"hyperparameters\"),\n", 605 | " CollectionConfig(\"metrics\"),\n", 606 | " CollectionConfig(\"predictions\"),\n", 607 | " CollectionConfig(\"labels\"),\n", 608 | " CollectionConfig(\"feature_importance\")\n", 609 | " ]\n", 610 | ")\n", 611 | "\n", 612 | "estimator = XGBoost(\n", 613 | " base_job_name=\"nw-traffic-classification-xgb\",\n", 614 | " entry_point=entry_point,\n", 615 | " source_dir=source_dir,\n", 616 | " output_path=output_path,\n", 617 | " code_location=code_location,\n", 618 | " hyperparameters=hyperparameters,\n", 619 | " train_instance_type=\"ml.m5.4xlarge\",\n", 620 | " train_instance_count=1,\n", 621 | " framework_version=\"0.90-2\",\n", 622 | " py_version=\"py3\",\n", 623 | " role=role,\n", 624 | " \n", 625 | " # Initialize your hook.\n", 626 | " debugger_hook_config=hook_config,\n", 627 | " \n", 628 | " # Initialize your rules. These will read data for analyses from the path specified\n", 629 | " # for the hook\n", 630 | " rules=[Rule.sagemaker(rule_configs.confusion(),\n", 631 | " rule_parameters={\n", 632 | " \"category_no\": \"15\",\n", 633 | " \"min_diag\": \"0.7\",\n", 634 | " \"max_off_diag\": \"0.3\",\n", 635 | " \"start_step\": \"17\",\n", 636 | " \"end_step\": \"19\"}\n", 637 | " )]\n", 638 | ")\n", 639 | "\n", 640 | "train_config = sagemaker.session.s3_input('s3://{0}/{1}/data/train/'.format(\n", 641 | " bucket_name, prefix), content_type='text/csv')\n", 642 | "val_config = sagemaker.session.s3_input('s3://{0}/{1}/data/val/'.format(\n", 643 | " bucket_name, prefix), content_type='text/csv')\n", 644 | "\n", 645 | "estimator.fit({'train': train_config, 'validation': val_config }, wait=False)" 646 | ] 647 | }, 648 | { 649 | "cell_type": "markdown", 650 | "metadata": {}, 651 | "source": [ 652 | "Agan, let's review the training job status, configuration and logs:" 653 | ] 654 | }, 655 | { 656 | "cell_type": "code", 657 | "execution_count": null, 658 | "metadata": {}, 659 | "outputs": [], 660 | "source": [ 661 | "import time\n", 662 | "client = estimator.sagemaker_session.sagemaker_client\n", 663 | "\n", 664 | "description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)\n", 665 | "print('Debug Hook configuration: ')\n", 666 | "print(description['DebugHookConfig'])\n", 667 | "print()\n", 668 | "print('Debug rules configuration: ')\n", 669 | "print(description['DebugRuleConfigurations'])\n", 670 | "print()\n", 671 | "print('Training job status')\n", 672 | "print(description['TrainingJobStatus'])" 673 | ] 674 | }, 675 | { 676 | "cell_type": "code", 677 | "execution_count": null, 678 | "metadata": {}, 679 | "outputs": [], 680 | "source": [ 681 | "sagemaker_session.logs_for_job(estimator.latest_training_job.name)" 682 | ] 683 | }, 684 | { 685 | "cell_type": "markdown", 686 | "metadata": {}, 687 | "source": [ 688 | "Then, we can wait for the rule execution to complete:" 689 | ] 690 | }, 691 | { 692 | "cell_type": "code", 693 | "execution_count": null, 694 | "metadata": {}, 695 | "outputs": [], 696 | "source": [ 697 | "import time\n", 698 | "\n", 699 | "client = estimator.sagemaker_session.sagemaker_client\n", 700 | "\n", 701 | "iterate = True\n", 702 | "while(iterate):\n", 703 | " description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)\n", 704 | " eval_status = description['DebugRuleEvaluationStatuses'][0]\n", 705 | " print(eval_status)\n", 706 | " if eval_status['RuleEvaluationStatus'] != 'InProgress':\n", 707 | " iterate = False\n", 708 | " else:\n", 709 | " time.sleep(60)" 710 | ] 711 | }, 712 | { 713 | "cell_type": "markdown", 714 | "metadata": {}, 715 | "source": [ 716 | "Let's review the rule execution job, executed by Amazon SageMaker Processing:" 717 | ] 718 | }, 719 | { 720 | "cell_type": "code", 721 | "execution_count": null, 722 | "metadata": {}, 723 | "outputs": [], 724 | "source": [ 725 | "processing_job_arn = eval_status['RuleEvaluationJobArn']\n", 726 | "processing_job_name = processing_job_arn[processing_job_arn.rfind('/') + 1 :]\n", 727 | "print(processing_job_name)\n", 728 | "\n", 729 | "client = estimator.sagemaker_session.sagemaker_client\n", 730 | "descr = client.describe_processing_job(ProcessingJobName=processing_job_name)\n", 731 | "descr" 732 | ] 733 | }, 734 | { 735 | "cell_type": "code", 736 | "execution_count": null, 737 | "metadata": {}, 738 | "outputs": [], 739 | "source": [ 740 | "sagemaker_session.logs_for_processing_job(descr['ProcessingJobName'])" 741 | ] 742 | }, 743 | { 744 | "cell_type": "markdown", 745 | "metadata": {}, 746 | "source": [ 747 | "Let's also make sure the training job is completed." 748 | ] 749 | }, 750 | { 751 | "cell_type": "code", 752 | "execution_count": null, 753 | "metadata": {}, 754 | "outputs": [], 755 | "source": [ 756 | "import time\n", 757 | "\n", 758 | "client = estimator.sagemaker_session.sagemaker_client\n", 759 | "\n", 760 | "iterate = True\n", 761 | "while(iterate):\n", 762 | " description = client.describe_training_job(TrainingJobName=estimator.latest_training_job.name)\n", 763 | " training_job_status = description['TrainingJobStatus']\n", 764 | " print(training_job_status)\n", 765 | " if training_job_status != 'InProgress':\n", 766 | " iterate = False\n", 767 | " else:\n", 768 | " time.sleep(60)" 769 | ] 770 | }, 771 | { 772 | "cell_type": "markdown", 773 | "metadata": {}, 774 | "source": [ 775 | "#### Results\n", 776 | "We can see that the condition is being met and this gives evidences that our confusion matrix is not matching our thresholds.\n", 777 | "Let's review the confusion matrix by analyzing the debug outputs in next section." 778 | ] 779 | }, 780 | { 781 | "cell_type": "markdown", 782 | "metadata": {}, 783 | "source": [ 784 | "## Analyzing Debug Outputs\n", 785 | "\n", 786 | "In this section we will see how you can use the SDK to manually analyze debug outputs.\n", 787 | "\n", 788 | "First thing is creating a trial, which is the construct that allows accessing to tensors for a single training run." 789 | ] 790 | }, 791 | { 792 | "cell_type": "code", 793 | "execution_count": null, 794 | "metadata": {}, 795 | "outputs": [], 796 | "source": [ 797 | "!pip install smdebug" 798 | ] 799 | }, 800 | { 801 | "cell_type": "code", 802 | "execution_count": null, 803 | "metadata": {}, 804 | "outputs": [], 805 | "source": [ 806 | "from smdebug.trials import create_trial\n", 807 | "\n", 808 | "s3_output_path = description[\"DebugHookConfig\"][\"S3OutputPath\"] + '/' + estimator.latest_training_job.name + '/debug-output/'\n", 809 | "print(s3_output_path)\n", 810 | "trial = create_trial(s3_output_path)" 811 | ] 812 | }, 813 | { 814 | "cell_type": "markdown", 815 | "metadata": {}, 816 | "source": [ 817 | "Now we can print the list of all the tensors that were saved." 818 | ] 819 | }, 820 | { 821 | "cell_type": "code", 822 | "execution_count": null, 823 | "metadata": {}, 824 | "outputs": [], 825 | "source": [ 826 | "trial.tensor_names()" 827 | ] 828 | }, 829 | { 830 | "cell_type": "markdown", 831 | "metadata": {}, 832 | "source": [ 833 | "Given a specific tensor, we can ask at which steps we have data for the tensor. In this case, we have data for all steps since the frequency was set to 1." 834 | ] 835 | }, 836 | { 837 | "cell_type": "code", 838 | "execution_count": null, 839 | "metadata": {}, 840 | "outputs": [], 841 | "source": [ 842 | "trial.tensor(\"validation-merror\").steps()" 843 | ] 844 | }, 845 | { 846 | "cell_type": "markdown", 847 | "metadata": {}, 848 | "source": [ 849 | "We can also get the value of a specific tensor for a specific step as numpy.array" 850 | ] 851 | }, 852 | { 853 | "cell_type": "code", 854 | "execution_count": null, 855 | "metadata": {}, 856 | "outputs": [], 857 | "source": [ 858 | "trial.tensor(\"train-merror\").value(5)" 859 | ] 860 | }, 861 | { 862 | "cell_type": "markdown", 863 | "metadata": {}, 864 | "source": [ 865 | "### Performance metrics" 866 | ] 867 | }, 868 | { 869 | "cell_type": "markdown", 870 | "metadata": {}, 871 | "source": [ 872 | "We can also create a simple function that visualizes the training and validation errors as the training progresses. We expect each training errors to get smaller over time, as the system converges to a good solution. Now, remember that this is an interactive analysis - we are showing these tensors to give an idea of the data." 873 | ] 874 | }, 875 | { 876 | "cell_type": "code", 877 | "execution_count": null, 878 | "metadata": {}, 879 | "outputs": [], 880 | "source": [ 881 | "import matplotlib.pyplot as plt\n", 882 | "import seaborn as sns\n", 883 | "\n", 884 | "# Define a function that, for the given tensor name, walks through all \n", 885 | "# the iterations for which we have data and fetches the value.\n", 886 | "# Returns the set of steps and the values\n", 887 | "def get_data(trial, tname):\n", 888 | " tensor = trial.tensor(tname)\n", 889 | " steps = tensor.steps()\n", 890 | " vals = [tensor.value(s) for s in steps]\n", 891 | " return steps, vals" 892 | ] 893 | }, 894 | { 895 | "cell_type": "code", 896 | "execution_count": null, 897 | "metadata": {}, 898 | "outputs": [], 899 | "source": [ 900 | "metrics_to_plot = [\"train-merror\", \"validation-merror\"]\n", 901 | "for metric in metrics_to_plot:\n", 902 | " steps, data = get_data(trial, metric)\n", 903 | " plt.plot(steps, data, label=metric)\n", 904 | "plt.xlabel('Iteration')\n", 905 | "plt.ylabel('Classification error')\n", 906 | "plt.legend()\n", 907 | "plt.show()" 908 | ] 909 | }, 910 | { 911 | "cell_type": "markdown", 912 | "metadata": {}, 913 | "source": [ 914 | "### Feature importance" 915 | ] 916 | }, 917 | { 918 | "cell_type": "markdown", 919 | "metadata": {}, 920 | "source": [ 921 | "We can also visualize the feature importances as determined by xgboost.get_fscore(). Note that feature importances with zero values are not included here (which means that those features were not used in any split conditions).\n", 922 | "\n", 923 | "For more information on the metrics related to feature importance in XGBoost, please visit: https://towardsdatascience.com/be-careful-when-interpreting-your-features-importance-in-xgboost-6e16132588e7\n", 924 | "\n", 925 | "`weight` is the number of times a feature is used to split the data across all trees
\n", 926 | "`gain` represents fractional contribution of each feature to the model based on the total gain of this feature's splits. Higher percentage means a more important predictive feature
\n", 927 | "`cover` is a metric of the number of observation related to this feature
\n", 928 | "`total_gain` is the total gain across all splits the feature is used in
\n", 929 | "`total_cover` is the total coverage across all splits the feature is used in
" 930 | ] 931 | }, 932 | { 933 | "cell_type": "code", 934 | "execution_count": null, 935 | "metadata": {}, 936 | "outputs": [], 937 | "source": [ 938 | "import matplotlib.pyplot as plt\n", 939 | "import seaborn as sns\n", 940 | " \n", 941 | "def plot_feature_importance(trial, collection_name, step, metric):\n", 942 | " feature_importance_tensors = trial.collection(collection_name).tensor_names\n", 943 | "\n", 944 | " feature_names = []\n", 945 | " feature_values = []\n", 946 | " \n", 947 | " plt.subplots(figsize=(18,7))\n", 948 | " \n", 949 | " for tensor_name in feature_importance_tensors:\n", 950 | " if tensor_name.find('/' + metric) >= 0:\n", 951 | " index = tensor_name.rfind('/')\n", 952 | " feature_name = tensor_name[index+1:]\n", 953 | " feature_names.append(feature_name)\n", 954 | " tensor = trial.tensor(tensor_name)\n", 955 | " value_at_step = tensor.value(step)[0]\n", 956 | " feature_values.append(value_at_step)\n", 957 | "\n", 958 | " pos = range(len(feature_values))\n", 959 | " plt.bar(pos, feature_values, color='g')\n", 960 | " plt.xlabel('Features', fontsize=16)\n", 961 | " plt.ylabel('Feature Importance ({0})'.format(metric), fontsize=16)\n", 962 | " plt.xticks(pos, feature_names)\n", 963 | " plt.show()\n", 964 | " " 965 | ] 966 | }, 967 | { 968 | "cell_type": "code", 969 | "execution_count": null, 970 | "metadata": {}, 971 | "outputs": [], 972 | "source": [ 973 | "plot_feature_importance(trial, \"feature_importance\", 19, \"gain\")\n", 974 | "plot_feature_importance(trial, \"feature_importance\", 19, \"cover\")" 975 | ] 976 | }, 977 | { 978 | "cell_type": "markdown", 979 | "metadata": {}, 980 | "source": [ 981 | "### Confusion Matrix\n", 982 | "\n", 983 | "Finally, since we were logging labels and predictions, we can visualize the confusion matrix of the last step." 984 | ] 985 | }, 986 | { 987 | "cell_type": "code", 988 | "execution_count": null, 989 | "metadata": {}, 990 | "outputs": [], 991 | "source": [ 992 | "import matplotlib.pyplot as plt\n", 993 | "import seaborn as sns\n", 994 | "import numpy as np\n", 995 | "from sklearn.metrics import confusion_matrix\n", 996 | "from IPython.display import display, clear_output\n", 997 | "\n", 998 | "fig, ax = plt.subplots(figsize=(15,10))\n", 999 | "step = 19\n", 1000 | "\n", 1001 | "cm = confusion_matrix(\n", 1002 | " trial.tensor('labels').value(step),\n", 1003 | " trial.tensor('predictions').value(step)\n", 1004 | ")\n", 1005 | "normalized_cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n", 1006 | "sns.heatmap(normalized_cm, ax=ax, annot=cm, fmt='')\n", 1007 | "plt.show()" 1008 | ] 1009 | }, 1010 | { 1011 | "cell_type": "markdown", 1012 | "metadata": {}, 1013 | "source": [ 1014 | "You can now move to Deploy and Monitor to see how to deploy this model and monitor its inference performance over time using Amazon SageMaker Model Monitor." 1015 | ] 1016 | }, 1017 | { 1018 | "cell_type": "markdown", 1019 | "metadata": {}, 1020 | "source": [ 1021 | "## References\n", 1022 | "\n", 1023 | "A Realistic Cyber Defense Dataset (CSE-CIC-IDS2018) https://registry.opendata.aws/cse-cic-ids2018/" 1024 | ] 1025 | } 1026 | ], 1027 | "metadata": { 1028 | "kernelspec": { 1029 | "display_name": "conda_python3", 1030 | "language": "python", 1031 | "name": "conda_python3" 1032 | }, 1033 | "language_info": { 1034 | "codemirror_mode": { 1035 | "name": "ipython", 1036 | "version": 3 1037 | }, 1038 | "file_extension": ".py", 1039 | "mimetype": "text/x-python", 1040 | "name": "python", 1041 | "nbconvert_exporter": "python", 1042 | "pygments_lexer": "ipython3", 1043 | "version": "3.6.5" 1044 | } 1045 | }, 1046 | "nbformat": 4, 1047 | "nbformat_minor": 4 1048 | } 1049 | --------------------------------------------------------------------------------