├── workshop_3_mlops ├── utils │ ├── __init__.py │ ├── deploy_handler.py │ └── deploy_step.py ├── imgs │ ├── overview.png │ └── pipeline.png ├── README.md ├── scripts │ ├── evaluate.py │ ├── preprocessing.py │ └── train.py └── lab_1_sagemaker_pipeline.ipynb ├── imgs ├── cover.png ├── setup1.png ├── setup10.png ├── setup11.png ├── setup2.png ├── setup3.png ├── setup4.png ├── setup5.png ├── setup6.png ├── setup7.png ├── setup8.png ├── setup9.png ├── sm-endpoint.png ├── emotion-widget.png ├── batch-transform-v2.png ├── hf-inference-toolkit.png └── sagemaker-platform.png ├── workshop_2_going_production ├── imgs │ ├── sm-endpoint.png │ ├── scaling-options.jpeg │ ├── batch-transform-v2.png │ ├── autoscaling-endpoint.png │ ├── hf-inference-toolkit.png │ └── model-monitoring-dashboard.png ├── README.md ├── lab1_real_time_endpoint.ipynb ├── lab2_batch_transform.ipynb └── lab3_autoscaling.ipynb ├── workshop_1_getting_started_with_amazon_sagemaker ├── README.md ├── scripts │ └── train.py └── lab_3_spot_instances.ipynb ├── LICENSE ├── .gitignore └── README.md /workshop_3_mlops/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /imgs/cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/cover.png -------------------------------------------------------------------------------- /imgs/setup1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup1.png -------------------------------------------------------------------------------- /imgs/setup10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup10.png -------------------------------------------------------------------------------- /imgs/setup11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup11.png -------------------------------------------------------------------------------- /imgs/setup2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup2.png -------------------------------------------------------------------------------- /imgs/setup3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup3.png -------------------------------------------------------------------------------- /imgs/setup4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup4.png -------------------------------------------------------------------------------- /imgs/setup5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup5.png -------------------------------------------------------------------------------- /imgs/setup6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup6.png -------------------------------------------------------------------------------- /imgs/setup7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup7.png -------------------------------------------------------------------------------- /imgs/setup8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup8.png -------------------------------------------------------------------------------- /imgs/setup9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/setup9.png -------------------------------------------------------------------------------- /imgs/sm-endpoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/sm-endpoint.png -------------------------------------------------------------------------------- /imgs/emotion-widget.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/emotion-widget.png -------------------------------------------------------------------------------- /imgs/batch-transform-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/batch-transform-v2.png -------------------------------------------------------------------------------- /imgs/hf-inference-toolkit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/hf-inference-toolkit.png -------------------------------------------------------------------------------- /imgs/sagemaker-platform.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/imgs/sagemaker-platform.png -------------------------------------------------------------------------------- /workshop_3_mlops/imgs/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_3_mlops/imgs/overview.png -------------------------------------------------------------------------------- /workshop_3_mlops/imgs/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_3_mlops/imgs/pipeline.png -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/sm-endpoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/sm-endpoint.png -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/scaling-options.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/scaling-options.jpeg -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/batch-transform-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/batch-transform-v2.png -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/autoscaling-endpoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/autoscaling-endpoint.png -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/hf-inference-toolkit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/hf-inference-toolkit.png -------------------------------------------------------------------------------- /workshop_2_going_production/imgs/model-monitoring-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/My-Machine-Learning-Projects-CT/huggingface-sagemaker-workshop-series/main/workshop_2_going_production/imgs/model-monitoring-dashboard.png -------------------------------------------------------------------------------- /workshop_3_mlops/README.md: -------------------------------------------------------------------------------- 1 | ## Workshop 3: **MLOps: End-to-End Hugging Face Transformers with the Hub & SageMaker Pipelines** 2 | 3 | In Workshop 3 learn how to build an End-to-End MLOps Pipeline for Hugging Face Transformers from training to production using Amazon SageMaker. 4 | 5 | We are going to create an automated SageMaker Pipeline which: 6 | 7 | - processes a dataset and uploads it to s3 8 | - fine-tunes a Hugging Face Transformer model with the processed dataset 9 | - evaluates the model against an evaluation set 10 | - deploys the model if it performed better than a certain threshold 11 | 12 | --- 13 | 14 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_3_mlops](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_3_mlops) 15 | 16 | 📺 Youtube: [https://www.youtube.com/watch?v=XGyt8gGwbY0&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=7](https://www.youtube.com/watch?v=XGyt8gGwbY0&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=7) 17 | -------------------------------------------------------------------------------- /workshop_2_going_production/README.md: -------------------------------------------------------------------------------- 1 | ## Workshop 2: **Going Production: Deploying, Scaling & Monitoring Hugging Face Transformer models with Amazon SageMaker** 2 | 3 | In Workshop 2 Learn how to use Amazon SageMaker to deploy, scale & monitor your Hugging Face Transformer models for production workloads. 4 | 5 | - Run Batch Prediction on JSON files using a Batch Transform 6 | - Deploy a model from [hf.co/models](https://hf.co/models) to Amazon SageMaker and run predictions 7 | - Configure autoscaling for the deployed model 8 | - Monitor the model to see avg. request time and set up alarms 9 | 10 | --- 11 | 12 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_2_going_production](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_2_going_production) 13 | 14 | 📺 Youtube: [https://www.youtube.com/watch?v=whwlIEITXoY&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=61s](https://www.youtube.com/watch?v=whwlIEITXoY&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=61s) 15 | -------------------------------------------------------------------------------- /workshop_3_mlops/scripts/evaluate.py: -------------------------------------------------------------------------------- 1 | """Evaluation script for measuring mean squared error.""" 2 | 3 | import subprocess 4 | import sys 5 | import json 6 | import logging 7 | import pathlib 8 | import tarfile 9 | import os 10 | 11 | import numpy as np 12 | import pandas as pd 13 | 14 | 15 | logger = logging.getLogger() 16 | logger.setLevel(logging.INFO) 17 | logger.addHandler(logging.StreamHandler()) 18 | 19 | if __name__ == "__main__": 20 | logger.debug("Starting evaluation.") 21 | model_path = "/opt/ml/processing/model/model.tar.gz" 22 | with tarfile.open(model_path) as tar: 23 | tar.extractall(path="./hf_model") 24 | 25 | logger.debug(os.listdir("./hf_model")) 26 | 27 | with open("./hf_model/evaluation.json") as f: 28 | eval_result = json.load(f) 29 | 30 | logger.debug(eval_result) 31 | output_dir = "/opt/ml/processing/evaluation" 32 | pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True) 33 | 34 | evaluation_path = f"{output_dir}/evaluation.json" 35 | with open(evaluation_path, "w") as f: 36 | f.write(json.dumps(eval_result)) 37 | -------------------------------------------------------------------------------- /workshop_1_getting_started_with_amazon_sagemaker/README.md: -------------------------------------------------------------------------------- 1 | ## Workshop 1: **Getting Started with Amazon SageMaker: Training your first NLP Transformer model with Hugging Face and deploying it** 2 | 3 | In Workshop 1 you will learn how to use Amazon SageMaker to train a Hugging Face Transformer model and deploy it afterwards. 4 | 5 | - Prepare and upload a test dataset to S3 6 | - Prepare a fine-tuning script to be used with Amazon SageMaker Training jobs 7 | - Launch a training job and store the trained model into S3 8 | - Deploy the model after successful training 9 | 10 | --- 11 | 12 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_1_getting_started_with_amazon_sagemaker](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_1_getting_started_with_amazon_sagemaker) 13 | 14 | 📺 Youtube: [https://www.youtube.com/watch?v=pYqjCzoyWyo&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=5s&ab_channel=HuggingFace](https://www.youtube.com/watch?v=pYqjCzoyWyo&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=5s&ab_channel=HuggingFace) 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Philipp Schmid 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /workshop_3_mlops/utils/deploy_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | 4 | 5 | def lambda_handler(event, context): 6 | """ """ 7 | sm_client = boto3.client("sagemaker") 8 | 9 | # The name of the model created in the Pipeline CreateModelStep 10 | model_name = event["model_name"] 11 | model_package_arn = event["model_package_arn"] 12 | endpoint_config_name = event["endpoint_config_name"] 13 | endpoint_name = event["endpoint_name"] 14 | endpoint_instance_type = event["endpoint_instance_type"] 15 | role = event["role"] 16 | 17 | container = {"ModelPackageName": model_package_arn} 18 | 19 | create_model_respose = sm_client.create_model(ModelName=model_name, ExecutionRoleArn=role, Containers=[container]) 20 | 21 | create_endpoint_config_response = sm_client.create_endpoint_config( 22 | EndpointConfigName=endpoint_config_name, 23 | ProductionVariants=[ 24 | { 25 | "InstanceType": endpoint_instance_type, 26 | "InitialVariantWeight": 1, 27 | "InitialInstanceCount": 1, 28 | "ModelName": model_name, 29 | "VariantName": "AllTraffic", 30 | } 31 | ], 32 | ) 33 | 34 | create_endpoint_response = sm_client.create_endpoint( 35 | EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name 36 | ) 37 | 38 | return { 39 | "statusCode": 200, 40 | "body": json.dumps("Created Endpoint!"), 41 | "other_key": "example_value", 42 | } 43 | -------------------------------------------------------------------------------- /workshop_3_mlops/scripts/preprocessing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import pandas as pd 4 | import subprocess 5 | import sys 6 | import argparse 7 | import logging 8 | 9 | 10 | def install(package): 11 | subprocess.check_call([sys.executable, "-m", "pip", "install", package]) 12 | 13 | 14 | if __name__ == "__main__": 15 | parser = argparse.ArgumentParser() 16 | 17 | parser.add_argument("--model_id", type=str) 18 | parser.add_argument("--dataset_name", type=str) 19 | parser.add_argument("--transformers_version", type=str) 20 | parser.add_argument("--pytorch_version", type=str) 21 | 22 | args, _ = parser.parse_known_args() 23 | 24 | install(f"torch=={args.pytorch_version}") 25 | install(f"transformers=={args.transformers_version}") 26 | install("datasets[s3]") 27 | 28 | from datasets import load_dataset 29 | from transformers import AutoTokenizer 30 | 31 | # Set up logging 32 | logger = logging.getLogger(__name__) 33 | 34 | logging.basicConfig( 35 | level=logging.getLevelName("INFO"), 36 | handlers=[logging.StreamHandler(sys.stdout)], 37 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 38 | ) 39 | 40 | # download tokenizer 41 | tokenizer = AutoTokenizer.from_pretrained(args.model_id) 42 | 43 | # tokenizer helper function 44 | def tokenize(batch): 45 | return tokenizer(batch["text"], padding="max_length", truncation=True) 46 | 47 | # load dataset 48 | train_dataset, test_dataset = load_dataset(args.dataset_name, split=["train", "test"]) 49 | test_dataset = test_dataset.shuffle().select(range(1000)) # smaller the size for test dataset to 1k 50 | 51 | # tokenize dataset 52 | train_dataset = train_dataset.map(tokenize, batched=True) 53 | test_dataset = test_dataset.map(tokenize, batched=True) 54 | 55 | # set format for pytorch 56 | train_dataset = train_dataset.rename_column("label", "labels") 57 | train_dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"]) 58 | 59 | test_dataset = test_dataset.rename_column("label", "labels") 60 | test_dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"]) 61 | 62 | train_dataset.save_to_disk("/opt/ml/processing/train") 63 | test_dataset.save_to_disk("/opt/ml/processing/test") 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /workshop_3_mlops/utils/deploy_step.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import boto3 4 | import os 5 | from sagemaker.workflow.step_collections import StepCollection 6 | from sagemaker.workflow._utils import _RegisterModelStep 7 | from sagemaker.lambda_helper import Lambda 8 | from sagemaker.workflow.lambda_step import ( 9 | LambdaStep, 10 | LambdaOutput, 11 | LambdaOutputTypeEnum, 12 | ) 13 | 14 | 15 | class ModelDeployment(StepCollection): 16 | """custom step to deploy model as SageMaker Endpoint""" 17 | 18 | def __init__( 19 | self, 20 | model_name: str, 21 | registered_model: _RegisterModelStep, 22 | endpoint_instance_type, 23 | sagemaker_endpoint_role: str, 24 | autoscaling_policy: dict = None, 25 | ): 26 | self.name = "sagemaker-pipelines-model-deployment" 27 | self.model_package_arn = registered_model.properties.ModelPackageArn 28 | self.lambda_role = self.create_lambda_role(self.name) 29 | # Use the current time to define unique names for the resources created 30 | current_time = time.strftime("%m-%d-%H-%M-%S", time.localtime()) 31 | 32 | steps = [] 33 | lambda_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "deploy_handler.py") 34 | # Lambda helper class can be used to create the Lambda function 35 | self.func = Lambda( 36 | function_name=f"{self.name}-{current_time}", 37 | execution_role_arn=self.lambda_role, 38 | script=lambda_file, 39 | handler="deploy_handler.lambda_handler", 40 | timeout=600, 41 | memory_size=256, 42 | ) 43 | 44 | # The dictionary retured by the Lambda function is captured by LambdaOutput, each key in the dictionary corresponds to a 45 | # LambdaOutput 46 | 47 | output_param_1 = LambdaOutput(output_name="statusCode", output_type=LambdaOutputTypeEnum.String) 48 | output_param_2 = LambdaOutput(output_name="body", output_type=LambdaOutputTypeEnum.String) 49 | output_param_3 = LambdaOutput(output_name="other_key", output_type=LambdaOutputTypeEnum.String) 50 | 51 | # The inputs provided to the Lambda function can be retrieved via the `event` object within the `lambda_handler` function 52 | # in the Lambda 53 | lambda_step = LambdaStep( 54 | name="HuggingFaceModelDeployment", 55 | lambda_func=self.func, 56 | inputs={ 57 | "model_name": model_name + current_time, 58 | "endpoint_config_name": model_name + current_time, 59 | "endpoint_name": model_name, 60 | "endpoint_instance_type": endpoint_instance_type, 61 | "model_package_arn": self.model_package_arn, 62 | "role": sagemaker_endpoint_role, 63 | }, 64 | outputs=[output_param_1, output_param_2, output_param_3], 65 | ) 66 | steps.append(lambda_step) 67 | self.steps = steps 68 | 69 | def create_lambda_role(self, name): 70 | """ 71 | Create a role for the Lambda function 72 | """ 73 | role_name = f"{name}-role" 74 | iam = boto3.client("iam") 75 | try: 76 | response = iam.create_role( 77 | RoleName=role_name, 78 | AssumeRolePolicyDocument=json.dumps( 79 | { 80 | "Version": "2012-10-17", 81 | "Statement": [ 82 | { 83 | "Effect": "Allow", 84 | "Principal": {"Service": "lambda.amazonaws.com"}, 85 | "Action": "sts:AssumeRole", 86 | } 87 | ], 88 | } 89 | ), 90 | Description="Role for Lambda to call ECS Fargate task", 91 | ) 92 | 93 | role_arn = response["Role"]["Arn"] 94 | 95 | response = iam.attach_role_policy( 96 | RoleName=role_name, PolicyArn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" 97 | ) 98 | 99 | response = iam.attach_role_policy( 100 | PolicyArn="arn:aws:iam::aws:policy/AmazonSageMakerFullAccess", RoleName=role_name 101 | ) 102 | 103 | return role_arn 104 | 105 | except iam.exceptions.EntityAlreadyExistsException: 106 | print(f"Using ARN from existing role: {role_name}") 107 | response = iam.get_role(RoleName=role_name) 108 | return response["Role"]["Arn"] 109 | -------------------------------------------------------------------------------- /workshop_3_mlops/scripts/train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import sys 6 | 7 | import numpy as np 8 | import torch 9 | import json 10 | from datasets import load_from_disk, load_metric 11 | from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments 12 | from transformers.trainer_utils import get_last_checkpoint 13 | 14 | if __name__ == "__main__": 15 | 16 | parser = argparse.ArgumentParser() 17 | 18 | # hyperparameters sent by the client are passed as command-line arguments to the script. 19 | parser.add_argument("--epochs", type=int, default=3) 20 | parser.add_argument("--train_batch_size", type=int, default=32) 21 | parser.add_argument("--eval_batch_size", type=int, default=64) 22 | parser.add_argument("--warmup_steps", type=int, default=500) 23 | parser.add_argument("--model_id", type=str) 24 | parser.add_argument("--learning_rate", type=str, default=5e-5) 25 | parser.add_argument("--fp16", type=bool, default=True) 26 | 27 | # Data, model, and output directories 28 | parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) 29 | parser.add_argument("--output_dir", type=str, default=os.environ["SM_MODEL_DIR"]) 30 | parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) 31 | parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"]) 32 | parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"]) 33 | 34 | args, _ = parser.parse_known_args() 35 | 36 | # Set up logging 37 | logger = logging.getLogger(__name__) 38 | 39 | logging.basicConfig( 40 | level=logging.getLevelName("INFO"), 41 | handlers=[logging.StreamHandler(sys.stdout)], 42 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 43 | ) 44 | 45 | # load datasets 46 | train_dataset = load_from_disk(args.training_dir) 47 | test_dataset = load_from_disk(args.test_dir) 48 | 49 | logger.info(f" loaded train_dataset length is: {len(train_dataset)}") 50 | logger.info(f" loaded test_dataset length is: {len(test_dataset)}") 51 | 52 | metric = load_metric("accuracy") 53 | 54 | def compute_metrics(eval_pred): 55 | predictions, labels = eval_pred 56 | predictions = np.argmax(predictions, axis=1) 57 | return metric.compute(predictions=predictions, references=labels) 58 | 59 | # Prepare model labels - useful in inference API 60 | labels = train_dataset.features["labels"].names 61 | num_labels = len(labels) 62 | label2id, id2label = dict(), dict() 63 | for i, label in enumerate(labels): 64 | label2id[label] = str(i) 65 | id2label[str(i)] = label 66 | 67 | # download model from model hub 68 | model = AutoModelForSequenceClassification.from_pretrained( 69 | args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label 70 | ) 71 | tokenizer = AutoTokenizer.from_pretrained(args.model_id) 72 | 73 | # define training args 74 | training_args = TrainingArguments( 75 | output_dir=args.output_dir, 76 | overwrite_output_dir=True if get_last_checkpoint(args.output_dir) is not None else False, 77 | num_train_epochs=int(args.epochs), 78 | per_device_train_batch_size=int(args.train_batch_size), 79 | per_device_eval_batch_size=int(args.eval_batch_size), 80 | warmup_steps=args.warmup_steps, 81 | fp16=True, 82 | evaluation_strategy="epoch", 83 | save_strategy="epoch", 84 | save_total_limit=2, 85 | logging_dir=f"{args.output_data_dir}/logs", 86 | learning_rate=float(args.learning_rate), 87 | load_best_model_at_end=True, 88 | metric_for_best_model="accuracy", 89 | ) 90 | 91 | # create Trainer instance 92 | trainer = Trainer( 93 | model=model, 94 | args=training_args, 95 | compute_metrics=compute_metrics, 96 | train_dataset=train_dataset, 97 | eval_dataset=test_dataset, 98 | tokenizer=tokenizer, 99 | ) 100 | 101 | # train model 102 | if get_last_checkpoint(args.output_dir) is not None: 103 | logger.info("***** continue training *****") 104 | last_checkpoint = get_last_checkpoint(args.output_dir) 105 | trainer.train(resume_from_checkpoint=last_checkpoint) 106 | else: 107 | trainer.train() 108 | 109 | # evaluate model 110 | eval_result = trainer.evaluate(eval_dataset=test_dataset) 111 | 112 | # writes eval result to file which can be accessed later in s3 ouput 113 | with open(os.path.join(os.environ["SM_MODEL_DIR"], "evaluation.json"), "w") as writer: 114 | print(f"***** Eval results *****") 115 | print(eval_result) 116 | writer.write(json.dumps(eval_result)) 117 | 118 | # Saves the model to s3 uses os.environ["SM_MODEL_DIR"] to make sure checkpointing works 119 | trainer.save_model(os.environ["SM_MODEL_DIR"]) 120 | -------------------------------------------------------------------------------- /workshop_1_getting_started_with_amazon_sagemaker/scripts/train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import random 5 | import sys 6 | 7 | import numpy as np 8 | import torch 9 | from datasets import load_from_disk, load_metric 10 | from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments 11 | from transformers.trainer_utils import get_last_checkpoint 12 | 13 | if __name__ == "__main__": 14 | 15 | parser = argparse.ArgumentParser() 16 | 17 | # hyperparameters sent by the client are passed as command-line arguments to the script. 18 | parser.add_argument("--epochs", type=int, default=3) 19 | parser.add_argument("--train_batch_size", type=int, default=32) 20 | parser.add_argument("--eval_batch_size", type=int, default=64) 21 | parser.add_argument("--warmup_steps", type=int, default=500) 22 | parser.add_argument("--model_id", type=str) 23 | parser.add_argument("--learning_rate", type=str, default=5e-5) 24 | parser.add_argument("--fp16", type=bool, default=True) 25 | 26 | # Data, model, and output directories 27 | parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) 28 | parser.add_argument("--output_dir", type=str, default=os.environ["SM_MODEL_DIR"]) 29 | parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) 30 | parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"]) 31 | parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"]) 32 | 33 | args, _ = parser.parse_known_args() 34 | 35 | # Set up logging 36 | logger = logging.getLogger(__name__) 37 | 38 | logging.basicConfig( 39 | level=logging.getLevelName("INFO"), 40 | handlers=[logging.StreamHandler(sys.stdout)], 41 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", 42 | ) 43 | 44 | # load datasets 45 | train_dataset = load_from_disk(args.training_dir) 46 | test_dataset = load_from_disk(args.test_dir) 47 | 48 | logger.info(f" loaded train_dataset length is: {len(train_dataset)}") 49 | logger.info(f" loaded test_dataset length is: {len(test_dataset)}") 50 | 51 | metric = load_metric("accuracy") 52 | 53 | def compute_metrics(eval_pred): 54 | predictions, labels = eval_pred 55 | predictions = np.argmax(predictions, axis=1) 56 | return metric.compute(predictions=predictions, references=labels) 57 | 58 | # Prepare model labels - useful in inference API 59 | labels = train_dataset.features["labels"].names 60 | num_labels = len(labels) 61 | label2id, id2label = dict(), dict() 62 | for i, label in enumerate(labels): 63 | label2id[label] = str(i) 64 | id2label[str(i)] = label 65 | 66 | # download model from model hub 67 | model = AutoModelForSequenceClassification.from_pretrained( 68 | args.model_id, num_labels=num_labels, label2id=label2id, id2label=id2label 69 | ) 70 | tokenizer = AutoTokenizer.from_pretrained(args.model_id) 71 | 72 | # define training args 73 | training_args = TrainingArguments( 74 | output_dir=args.output_dir, 75 | overwrite_output_dir=True if get_last_checkpoint(args.output_dir) is not None else False, 76 | num_train_epochs=args.epochs, 77 | per_device_train_batch_size=args.train_batch_size, 78 | per_device_eval_batch_size=args.eval_batch_size, 79 | warmup_steps=args.warmup_steps, 80 | fp16=args.fp16, 81 | evaluation_strategy="epoch", 82 | save_strategy="epoch", 83 | save_total_limit=2, 84 | logging_dir=f"{args.output_data_dir}/logs", 85 | learning_rate=float(args.learning_rate), 86 | load_best_model_at_end=True, 87 | metric_for_best_model="accuracy", 88 | ) 89 | 90 | # create Trainer instance 91 | trainer = Trainer( 92 | model=model, 93 | args=training_args, 94 | compute_metrics=compute_metrics, 95 | train_dataset=train_dataset, 96 | eval_dataset=test_dataset, 97 | tokenizer=tokenizer, 98 | ) 99 | 100 | # train model 101 | if get_last_checkpoint(args.output_dir) is not None: 102 | logger.info("***** continue training *****") 103 | last_checkpoint = get_last_checkpoint(args.output_dir) 104 | trainer.train(resume_from_checkpoint=last_checkpoint) 105 | else: 106 | trainer.train() 107 | 108 | # evaluate model 109 | eval_result = trainer.evaluate(eval_dataset=test_dataset) 110 | 111 | # writes eval result to file which can be accessed later in s3 ouput 112 | with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer: 113 | print(f"***** Eval results *****") 114 | for key, value in sorted(eval_result.items()): 115 | writer.write(f"{key} = {value}\n") 116 | print(f"{key} = {value}\n") 117 | 118 | # Saves the model to s3 uses os.environ["SM_MODEL_DIR"] to make sure checkpointing works 119 | trainer.save_model(os.environ["SM_MODEL_DIR"]) 120 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Workshop: Enterprise-Scale NLP with Hugging Face & Amazon SageMaker 2 | 3 | ![](./imgs/cover.png) 4 | 5 | Earlier this year we announced a strategic collaboration with Amazon to make it easier for companies to use Hugging Face Transformers in Amazon SageMaker, and ship cutting-edge Machine Learning features faster. We introduced new Hugging Face Deep Learning Containers (DLCs) to train and deploy Hugging Face Transformers in Amazon SageMaker. 6 | 7 | In addition to the Hugging Face Inference DLCs, we created a [Hugging Face Inference Toolkit for SageMaker](https://github.com/aws/sagemaker-huggingface-inference-toolkit). This Inference Toolkit leverages the `pipelines` from the `transformers` library to allow zero-code deployments of models, without requiring any code for pre-or post-processing. 8 | 9 | In October and November, we held a workshop series on “**Enterprise-Scale NLP with Hugging Face & Amazon SageMaker**”. This workshop series consisted out of 3 parts and covers: 10 | 11 | - Getting Started with Amazon SageMaker: Training your first NLP Transformer model with Hugging Face and deploying it 12 | - Going Production: Deploying, Scaling & Monitoring Hugging Face Transformer models with Amazon SageMaker 13 | - MLOps: End-to-End Hugging Face Transformers with the Hub & SageMaker Pipelines 14 | 15 | We recorded all of them so you are now able to do the whole workshop series on your own to enhance your Hugging Face Transformers skills with Amazon SageMaker or vice-versa. 16 | 17 | Below you can find all the details of each workshop and how to get started. 18 | 19 | 🧑🏻‍💻 Github Repository: https://github.com/philschmid/huggingface-sagemaker-workshop-series 20 | 21 | 📺  Youtube Playlist: [https://www.youtube.com/playlist?list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ](https://www.youtube.com/playlist?list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ) 22 | 23 | *Note: The Repository contains instructions on how to access a temporary AWS, which was available during the workshops. To be able to do the workshop now you need to use your own or your company AWS Account.* 24 | 25 | In Addition to the workshop we created a fully dedicated [Documentation](https://huggingface.co/docs/sagemaker/main) for Hugging Face and Amazon SageMaker, which includes all the necessary information. 26 | If the workshop is not enough for you we also have 15 additional getting samples [Notebook Github repository](https://github.com/huggingface/notebooks/tree/master/sagemaker), which cover topics like distributed training or leveraging [Spot Instances](https://aws.amazon.com/ec2/spot/?nc1=h_ls&cards.sort-by=item.additionalFields.startDateTime&cards.sort-order=asc). 27 | 28 | 29 | ## Workshop 1: **Getting Started with Amazon SageMaker: Training your first NLP Transformer model with Hugging Face and deploying it** 30 | 31 | In Workshop 1 you will learn how to use Amazon SageMaker to train a Hugging Face Transformer model and deploy it afterwards. 32 | 33 | - Prepare and upload a test dataset to S3 34 | - Prepare a fine-tuning script to be used with Amazon SageMaker Training jobs 35 | - Launch a training job and store the trained model into S3 36 | - Deploy the model after successful training 37 | 38 | --- 39 | 40 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_1_getting_started_with_amazon_sagemaker](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_1_getting_started_with_amazon_sagemaker) 41 | 42 | 📺 Youtube: [https://www.youtube.com/watch?v=pYqjCzoyWyo&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=5s&ab_channel=HuggingFace](https://www.youtube.com/watch?v=pYqjCzoyWyo&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=5s&ab_channel=HuggingFace) 43 | 44 | ## Workshop 2: **Going Production: Deploying, Scaling & Monitoring Hugging Face Transformer models with Amazon SageMaker** 45 | 46 | In Workshop 2 learn how to use Amazon SageMaker to deploy, scale & monitor your Hugging Face Transformer models for production workloads. 47 | 48 | - Run Batch Prediction on JSON files using a Batch Transform 49 | - Deploy a model from [hf.co/models](https://hf.co/models) to Amazon SageMaker and run predictions 50 | - Configure autoscaling for the deployed model 51 | - Monitor the model to see avg. request time and set up alarms 52 | 53 | --- 54 | 55 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_2_going_production](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_2_going_production) 56 | 57 | 📺 Youtube: [https://www.youtube.com/watch?v=whwlIEITXoY&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=61s](https://www.youtube.com/watch?v=whwlIEITXoY&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=6&t=61s) 58 | 59 | ## Workshop 3: **MLOps: End-to-End Hugging Face Transformers with the Hub & SageMaker Pipelines** 60 | 61 | In Workshop 3 learn how to build an End-to-End MLOps Pipeline for Hugging Face Transformers from training to production using Amazon SageMaker. 62 | 63 | We are going to create an automated SageMaker Pipeline which: 64 | 65 | - processes a dataset and uploads it to s3 66 | - fine-tunes a Hugging Face Transformer model with the processed dataset 67 | - evaluates the model against an evaluation set 68 | - deploys the model if it performed better than a certain threshold 69 | 70 | --- 71 | 72 | 🧑🏻‍💻 Code Assets: [https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_3_mlops](https://github.com/philschmid/huggingface-sagemaker-workshop-series/tree/main/workshop_3_mlops) 73 | 74 | 📺 Youtube: [https://www.youtube.com/watch?v=XGyt8gGwbY0&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=7](https://www.youtube.com/watch?v=XGyt8gGwbY0&list=PLo2EIpI_JMQtPhGR5Eo2Ab0_Vb89XfhDJ&index=7) 75 | 76 | # Access Workshop AWS Account 77 | 78 | For this workshop you’ll get access to a temporary AWS Account already pre-configured with Amazon SageMaker Notebook Instances. Follow the steps in this section to login to your AWS Account and download the workshop material. 79 | 80 | 81 | ### 1. To get started navigate to - https://dashboard.eventengine.run/login 82 | 83 | ![setup1](./imgs/setup1.png) 84 | 85 | Click on Accept Terms & Login 86 | 87 | ### 2. Click on Email One-Time OTP (Allow for up to 2 mins to receive the passcode) 88 | 89 | ![setup2](./imgs/setup2.png) 90 | 91 | ### 3. Provide your email address 92 | 93 | ![setup3](./imgs/setup3.png) 94 | 95 | ### 4. Enter your OTP code 96 | 97 | ![setup4](./imgs/setup4.png) 98 | 99 | ### 5. Click on AWS Console 100 | 101 | ![setup5](./imgs/setup5.png) 102 | 103 | ### 6. Click on Open AWS Console 104 | 105 | ![setup6](./imgs/setup6.png) 106 | 107 | ### 7. In the AWS Console click on Amazon SageMaker 108 | 109 | ![setup7](./imgs/setup7.png) 110 | 111 | ### 8. Click on Notebook and then on Notebook instances 112 | 113 | ![setup8](./imgs/setup8.png) 114 | 115 | ### 9. Create a new Notebook instance 116 | 117 | ![setup9](./imgs/setup9.png) 118 | 119 | ### 10. Configure Notebook instances 120 | 121 | * Make sure to increase the Volume Size of the Notebook if you want to work with big models and datasets 122 | * Add your IAM_Role with permissions to run your SageMaker Training And Inference Jobs 123 | * Add the Workshop Github Repository to the Notebook to preload the notebooks: `https://github.com/philschmid/huggingface-sagemaker-workshop-series.git` 124 | 125 | ![setup10](./imgs/setup10.png) 126 | 127 | 128 | ### 11. Open the Lab and select the right kernel you want to do and have fun! 129 | 130 | Open the workshop you want to do (`workshop_1_getting_started_with_amazon_sagemaker/`) and select the pytorch kernel 131 | 132 | ![setup11](./imgs/setup11.png) 133 | 134 | -------------------------------------------------------------------------------- /workshop_2_going_production/lab1_real_time_endpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "00d2464c", 6 | "metadata": {}, 7 | "source": [ 8 | "# Huggingface Sagemaker-sdk - Deploy 🤗 Transformers for inference\n" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "a8ce3cec", 14 | "metadata": {}, 15 | "source": [ 16 | "Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy a transformer model for inference. \n", 17 | "In this example we directly deploy one of the 10 000+ Hugging Face Transformers from the [Hub](https://huggingface.co/models) to Amazon SageMaker for Inference." 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "id": "b7272df2", 23 | "metadata": {}, 24 | "source": [ 25 | "## API - [SageMaker Hugging Face Inference Toolkit](https://github.com/aws/sagemaker-huggingface-inference-toolkit)\n" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "id": "6daeacf3", 31 | "metadata": {}, 32 | "source": [ 33 | "Using the `transformers pipelines`, we designed an API, which makes it easy for you to benefit from all `pipelines` features. The API is oriented at the API of the [🤗 Accelerated Inference API](https://api-inference.huggingface.co/docs/python/html/detailed_parameters.html), meaning your inputs need to be defined in the `inputs` key and if you want additional supported `pipelines` parameters you can add them in the `parameters` key. Below you can find examples for requests. \n", 34 | "\n", 35 | "**text-classification request body**\n", 36 | "```python\n", 37 | "{\n", 38 | "\t\"inputs\": \"Camera - You are awarded a SiPix Digital Camera! call 09061221066 fromm landline. Delivery within 28 days.\"\n", 39 | "}\n", 40 | "```\n", 41 | "**question-answering request body**\n", 42 | "```python\n", 43 | "{\n", 44 | "\t\"inputs\": {\n", 45 | "\t\t\"question\": \"What is used for inference?\",\n", 46 | "\t\t\"context\": \"My Name is Philipp and I live in Nuremberg. This model is used with sagemaker for inference.\"\n", 47 | "\t}\n", 48 | "}\n", 49 | "```\n", 50 | "**zero-shot classification request body**\n", 51 | "```python\n", 52 | "{\n", 53 | "\t\"inputs\": \"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!\",\n", 54 | "\t\"parameters\": {\n", 55 | "\t\t\"candidate_labels\": [\n", 56 | "\t\t\t\"refund\",\n", 57 | "\t\t\t\"legal\",\n", 58 | "\t\t\t\"faq\"\n", 59 | "\t\t]\n", 60 | "\t}\n", 61 | "}\n", 62 | "```" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "03d984c3", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "!pip install \"sagemaker>=2.48.0\" --upgrade" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "id": "53db7eca", 78 | "metadata": {}, 79 | "source": [ 80 | "## Deploy one of the 10 000+ Hugging Face Transformers to Amazon SageMaker for Inference\n", 81 | "\n", 82 | "_This is an experimental feature, where the model will be loaded after the endpoint is created. This could lead to errors, e.g. models > 10GB_\n", 83 | "\n", 84 | "To deploy a model directly from the Hub to SageMaker we need to define 2 environment variables when creating the `HuggingFaceModel` . We need to define:\n", 85 | "\n", 86 | "- `HF_MODEL_ID`: defines the model id, which will be automatically loaded from [huggingface.co/models](http://huggingface.co/models) when creating or SageMaker Endpoint. The 🤗 Hub provides +10 000 models all available through this environment variable.\n", 87 | "- `HF_TASK`: defines the task for the used 🤗 Transformers pipeline. A full list of tasks can be find [here](https://huggingface.co/transformers/main_classes/pipelines.html)." 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "id": "8c03085f", 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "from sagemaker.huggingface import HuggingFaceModel\n", 98 | "import sagemaker \n", 99 | "\n", 100 | "role = sagemaker.get_execution_role()\n", 101 | "\n", 102 | "# Hub Model configuration. https://huggingface.co/models\n", 103 | "hub = {\n", 104 | " 'HF_MODEL_ID':'distilbert-base-uncased-distilled-squad', # model_id from hf.co/models\n", 105 | " 'HF_TASK':'question-answering' # NLP task you want to use for predictions\n", 106 | "}\n", 107 | "\n", 108 | "# create Hugging Face Model Class\n", 109 | "huggingface_model = HuggingFaceModel(\n", 110 | " env=hub,\n", 111 | " role=role, # iam role with permissions to create an Endpoint\n", 112 | " transformers_version=\"4.6\", # transformers version used\n", 113 | " pytorch_version=\"1.7\", # pytorch version used\n", 114 | " py_version=\"py36\", # python version of the DLC\n", 115 | ")" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "id": "1704b52b", 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "# deploy model to SageMaker Inference\n", 126 | "predictor = huggingface_model.deploy(\n", 127 | " initial_instance_count=1,\n", 128 | " instance_type=\"ml.m5.xlarge\"\n", 129 | ")" 130 | ] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "id": "9a84c3f8", 135 | "metadata": {}, 136 | "source": [ 137 | "**Architecture**\n", 138 | "\n", 139 | "The [Hugging Face Inference Toolkit for SageMaker](https://github.com/aws/sagemaker-huggingface-inference-toolkit) is an open-source library for serving Hugging Face transformer models on SageMaker. It utilizes the SageMaker Inference Toolkit for starting up the model server, which is responsible for handling inference requests. The SageMaker Inference Toolkit uses [Multi Model Server (MMS)](https://github.com/awslabs/multi-model-server) for serving ML models. It bootstraps MMS with a configuration and settings that make it compatible with SageMaker and allow you to adjust important performance parameters, such as the number of workers per model, depending on the needs of your scenario.\n", 140 | "\n", 141 | "![](./imgs/hf-inference-toolkit.png)\n", 142 | "\n", 143 | "**Deploying a model using SageMaker hosting services is a three-step process:**\n", 144 | "\n", 145 | "1. **Create a model in SageMaker** —By creating a model, you tell SageMaker where it can find the model components. \n", 146 | "2. **Create an endpoint configuration for an HTTPS endpoint** —You specify the name of one or more models in production variants and the ML compute instances that you want SageMaker to launch to host each production variant.\n", 147 | "3. **Create an HTTPS endpoint** —Provide the endpoint configuration to SageMaker. The service launches the ML compute instances and deploys the model or models as specified in the configuration\n", 148 | "\n", 149 | "![](./imgs/sm-endpoint.png)\n" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "id": "11a1a1cb", 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "# example request, you always need to define \"inputs\"\n", 160 | "data = {\n", 161 | "\"inputs\": {\n", 162 | " \"question\": \"What is used for inference?\",\n", 163 | " \"context\": \"My Name is Philipp and I live in Nuremberg. This model is used with sagemaker for inference.\"\n", 164 | " }\n", 165 | "}\n", 166 | "\n", 167 | "# request\n", 168 | "predictor.predict(data)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "id": "901166ce", 174 | "metadata": {}, 175 | "source": [ 176 | "## clean up" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 8, 182 | "id": "6b1bf7e0", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "# delete endpoint\n", 187 | "predictor.delete_endpoint()" 188 | ] 189 | } 190 | ], 191 | "metadata": { 192 | "interpreter": { 193 | "hash": "c281c456f1b8161c8906f4af2c08ed2c40c50136979eaae69688b01f70e9f4a9" 194 | }, 195 | "kernelspec": { 196 | "display_name": "conda_pytorch_latest_p36", 197 | "language": "python", 198 | "name": "conda_pytorch_latest_p36" 199 | }, 200 | "language_info": { 201 | "name": "python", 202 | "version": "" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 5 207 | } 208 | -------------------------------------------------------------------------------- /workshop_2_going_production/lab2_batch_transform.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "661cc03e", 6 | "metadata": {}, 7 | "source": [ 8 | "# Huggingface Sagemaker-sdk - Run a batch transform inference job with 🤗 Transformers\n" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "d5bbff57", 14 | "metadata": {}, 15 | "source": [ 16 | "1. [Introduction](#Introduction) \n", 17 | "2. [Run Batch Transform after training a model](#Run-Batch-Transform-after-training-a-model) \n", 18 | "3. [Run Batch Transform Inference Job with a fine-tuned model using `jsonl`](#Run-Batch-Transform-Inference-Job-with-a-fine-tuned-model-using-jsonl) \n", 19 | "\n", 20 | "Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy two transformer model for inference. \n", 21 | "In the first example we deploy a trained Hugging Face Transformer model on to SageMaker for inference.\n", 22 | "In the second example we directly deploy one of the 10 000+ Hugging Face Transformers from the [Hub](https://huggingface.co/models) to Amazon SageMaker for Inference.<" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "id": "1b1db259", 28 | "metadata": {}, 29 | "source": [ 30 | "## Run Batch Transform after training a model \n", 31 | "_not included in the notebook_\n", 32 | "\n", 33 | "After you train a model, you can use [Amazon SageMaker Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html) to perform inferences with the model. In Batch Transform you provide your inference data as a S3 uri and SageMaker will care of downloading it, running the prediction and uploading the results afterwards to S3 again. You can find more documentation for Batch Transform [here](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html)\n", 34 | "\n", 35 | "If you trained the model using the **HuggingFace estimator**, you can invoke `transformer()` method to create a transform job for a model based on the training job.\n", 36 | "\n", 37 | "```python\n", 38 | "# create the Estimator\n", 39 | "huggingface_estimator = HuggingFace(\n", 40 | "....\n", 41 | ")\n", 42 | "\n", 43 | "# run training\n", 44 | "huggingface_estimator.fit(data)\n", 45 | "\n", 46 | "# create Transformers based on training\n", 47 | "batch_job = huggingface_estimator.transformer(\n", 48 | " instance_count=1,\n", 49 | " instance_type='ml.c5.2xlarge',\n", 50 | " strategy='SingleRecord')\n", 51 | "\n", 52 | "# run transform job\n", 53 | "batch_job.transform(\n", 54 | " data='s3://s3-uri-to-batch-data',\n", 55 | " content_type='application/json', \n", 56 | " split_type='Line')\n", 57 | "```\n", 58 | "For more details about what can be specified here, see [API docs](https://sagemaker.readthedocs.io/en/stable/overview.html#sagemaker-batch-transform).\n", 59 | "\n" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "id": "69ac88e8", 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "!pip install \"sagemaker>=2.48.0\" \"datasets==1.11\" --upgrade" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "id": "cb8a47e4", 75 | "metadata": {}, 76 | "source": [ 77 | "# Run Batch Transform Inference Job with a fine-tuned model using `jsonl`" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "id": "7d004b76", 83 | "metadata": {}, 84 | "source": [ 85 | "## Data Pre-Processing\n", 86 | "\n", 87 | "In this example we are using the provided `tweet_data.csv` as dataset. The `csv` contains ~1800 tweets about different airlines. The `csv` contains 1 column `\"inputs\"` with the tweets. To use this `csv` we need to convert it into a `jsonl` file and upload it to s3. Due to the complex structure of text are only `jsonl` file supported for batch transform. As pre-processing we are removing the `@` in the beginning of the tweet to get the names/identities correct.\n", 88 | "\n", 89 | "_**NOTE**: While preprocessing you need to make sure that your `inputs` fit the `max_length`." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "id": "38ee4aaa", 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "import csv\n", 100 | "import json\n", 101 | "import sagemaker\n", 102 | "from sagemaker.s3 import S3Uploader,s3_path_join\n", 103 | "\n", 104 | "# get the s3 bucket\n", 105 | "sess = sagemaker.Session()\n", 106 | "role = sagemaker.get_execution_role()\n", 107 | "sagemaker_session_bucket = sess.default_bucket()\n", 108 | "\n", 109 | "# datset files\n", 110 | "dataset_csv_file=\"./data/tweet_data.csv\"\n", 111 | "dataset_jsonl_file=\"./tweet_data.jsonl\"\n", 112 | "\n", 113 | "with open(dataset_csv_file, \"r+\") as infile, open(dataset_jsonl_file, \"w+\") as outfile:\n", 114 | " reader = csv.DictReader(infile)\n", 115 | " for row in reader:\n", 116 | " # remove @\n", 117 | " row[\"inputs\"] = row[\"inputs\"].replace(\"@\",\"\")\n", 118 | " json.dump(row, outfile)\n", 119 | " outfile.write('\\n')\n", 120 | "\n", 121 | " \n", 122 | "# uploads a given file to S3.\n", 123 | "input_s3_path = s3_path_join(\"s3://\",sagemaker_session_bucket,\"batch_transform/input\")\n", 124 | "output_s3_path = s3_path_join(\"s3://\",sagemaker_session_bucket,\"batch_transform/output\")\n", 125 | "s3_file_uri = S3Uploader.upload(dataset_jsonl_file,input_s3_path)\n", 126 | "\n", 127 | "print(f\"{dataset_jsonl_file} uploaded to {s3_file_uri}\")" 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "id": "85f5b4f2", 133 | "metadata": {}, 134 | "source": [ 135 | "The created file looks like this\n", 136 | "\n", 137 | "```json\n", 138 | "{\"inputs\": \"VirginAmerica What dhepburn said.\"}\n", 139 | "{\"inputs\": \"VirginAmerica plus you've added commercials to the experience... tacky.\"}\n", 140 | "{\"inputs\": \"VirginAmerica I didn't today... Must mean I need to take another trip!\"}\n", 141 | "{\"inputs\": \"VirginAmerica it's really aggressive to blast obnoxious \\\"entertainment\\\"....\"}\n", 142 | "{\"inputs\": \"VirginAmerica and it's a really big bad thing about it\"}\n", 143 | "{\"inputs\": \"VirginAmerica seriously would pay $30 a flight for seats that didn't h....\"}\n", 144 | "{\"inputs\": \"VirginAmerica yes, nearly every time I fly VX this \\u201cear worm\\u201d won\\u2019t go away :)\"}\n", 145 | "{\"inputs\": \"VirginAmerica Really missed a prime opportunity for Men Without ...\"}\n", 146 | "{\"inputs\": \"virginamerica Well, I didn't\\u2026but NOW I DO! :-D\"}\n", 147 | "{\"inputs\": \"VirginAmerica it was amazing, and arrived an hour early. You're too good to me.\"}\n", 148 | "{\"inputs\": \"VirginAmerica did you know that suicide is the second leading cause of death among teens 10-24\"}\n", 149 | "{\"inputs\": \"VirginAmerica I <3 pretty graphics. so much better than minimal iconography. :D\"}\n", 150 | "{\"inputs\": \"VirginAmerica This is such a great deal! Already thinking about my 2nd trip ...\"}\n", 151 | "....\n", 152 | "```" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "id": "a49400b8", 158 | "metadata": {}, 159 | "source": [ 160 | "## Create Inference Transformer to run the batch job\n", 161 | "\n", 162 | "We use the [twitter-roberta-base-sentiment](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment) model running our batch transform job. This is a RoBERTa-base model trained on ~58M tweets and finetuned for sentiment analysis with the TweetEval benchmark.\n" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": null, 168 | "id": "37897523", 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "from sagemaker.huggingface.model import HuggingFaceModel\n", 173 | "\n", 174 | "# Hub Model configuration. \n", 175 | "hub = {\n", 176 | " 'HF_MODEL_ID':'cardiffnlp/twitter-roberta-base-sentiment',\n", 177 | " 'HF_TASK':'text-classification'\n", 178 | "}\n", 179 | "\n", 180 | "# create Hugging Face Model Class\n", 181 | "huggingface_model = HuggingFaceModel(\n", 182 | " env=hub, # configuration for loading model from Hub\n", 183 | " role=role, # iam role with permissions to create an Endpoint\n", 184 | " transformers_version=\"4.6\", # transformers version used\n", 185 | " pytorch_version=\"1.7\", # pytorch version used\n", 186 | " py_version='py36', # python version used\n", 187 | ")\n", 188 | "\n", 189 | "# create Transformer to run our batch job\n", 190 | "batch_job = huggingface_model.transformer(\n", 191 | " instance_count=1, # number of instances used for running the batch job\n", 192 | " instance_type='ml.g4dn.xlarge',# instance type for the batch job\n", 193 | " output_path=output_s3_path, # we are using the same s3 path to save the output with the input\n", 194 | " strategy='SingleRecord') # How we are sending the \"requests\" to the endpoint\n", 195 | "\n", 196 | "# starts batch transform job and uses s3 data as input\n", 197 | "batch_job.transform(\n", 198 | " data=s3_file_uri, # preprocessed file location on s3 \n", 199 | " content_type='application/json',# mime-type of the file \n", 200 | " split_type='Line') # how the datapoints are split, here lines since it is `.jsonl`" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "id": "60ccea94", 206 | "metadata": {}, 207 | "source": [ 208 | "Use batch transform when you:\n", 209 | "\n", 210 | "* Want to get inferences for an entire dataset and index them to serve inferences in real time\n", 211 | "* Don't need a persistent endpoint that applications (for example, web or mobile apps) can call to get inferences\n", 212 | "* Don't need the subsecond latency that SageMaker hosted endpoints provide\n", 213 | "\n", 214 | "You can also use batch transform to preprocess your data before using it to train a new model or generate inferences.\n", 215 | "The following diagram shows the workflow of a batch transform job:\n", 216 | "\n", 217 | "![batch-transform](./imgs/batch-transform-v2.png)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "markdown", 222 | "id": "4079348a", 223 | "metadata": {}, 224 | "source": [ 225 | "## Access Prediction file\n", 226 | "\n", 227 | "After the batch transform job successfully run, it creates an output file with the same name and the `.out` file extension. For multiple input files, such as `input1.jsonl` and `input2.jsonl`, the output files are named `input1.jsonl.out` and `input2.jsonl.out`. The batch transform job stores the output files in the specified location in Amazon S3, such as `s3://awsexamplebucket/output/`.\n", 228 | "\n", 229 | "It is only possible to merge the input file with the output file. Therefore you need to use `join_source` parameter in your `Transformer`. You can read more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html)\n", 230 | "\n" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 4, 236 | "id": "c192b7fd", 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "[{'label': 'LABEL_1', 'score': 0.766870379447937}, {'label': 'LABEL_0', 'score': 0.8912612199783325}, {'label': 'LABEL_1', 'score': 0.5760677456855774}]\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "import json\n", 249 | "from sagemaker.s3 import S3Downloader\n", 250 | "from ast import literal_eval\n", 251 | "# creating s3 uri for result file -> input file + .out\n", 252 | "output_file = f\"{dataset_jsonl_file}.out\"\n", 253 | "output_path = s3_path_join(output_s3_path,output_file)\n", 254 | "\n", 255 | "# download file\n", 256 | "S3Downloader.download(output_path,'.')\n", 257 | "\n", 258 | "batch_transform_result = []\n", 259 | "with open(output_file) as f:\n", 260 | " for line in f:\n", 261 | " # converts jsonline array to normal array\n", 262 | " line = \"[\" + line.replace(\"[\",\"\").replace(\"]\",\",\") + \"]\"\n", 263 | " batch_transform_result = literal_eval(line) \n", 264 | " \n", 265 | "# print results \n", 266 | "print(batch_transform_result[:3])" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "id": "64f180ed", 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [] 276 | } 277 | ], 278 | "metadata": { 279 | "interpreter": { 280 | "hash": "c281c456f1b8161c8906f4af2c08ed2c40c50136979eaae69688b01f70e9f4a9" 281 | }, 282 | "kernelspec": { 283 | "display_name": "conda_pytorch_latest_p36", 284 | "language": "python", 285 | "name": "conda_pytorch_latest_p36" 286 | }, 287 | "language_info": { 288 | "codemirror_mode": { 289 | "name": "ipython", 290 | "version": 3 291 | }, 292 | "file_extension": ".py", 293 | "mimetype": "text/x-python", 294 | "name": "python", 295 | "nbconvert_exporter": "python", 296 | "pygments_lexer": "ipython3", 297 | "version": "3.6.13" 298 | } 299 | }, 300 | "nbformat": 4, 301 | "nbformat_minor": 5 302 | } 303 | -------------------------------------------------------------------------------- /workshop_2_going_production/lab3_autoscaling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "8fde41d2", 6 | "metadata": {}, 7 | "source": [ 8 | "# Going Production: Auto-scale Hugging Face Transformer Endpoints with Amazon SageMaker\n" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "552a996c", 14 | "metadata": {}, 15 | "source": [ 16 | "Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy a transformer model for real-time inference. \n", 17 | "In this example we are going to deploy a trained Hugging Face Transformer model on to SageMaker for inference." 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "id": "89f94d7a", 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "!pip install \"sagemaker>=2.66.2\" --upgrade" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "id": "6e4c1bbf", 34 | "metadata": {}, 35 | "outputs": [ 36 | { 37 | "data": { 38 | "text/plain": [ 39 | "'2.67.0'" 40 | ] 41 | }, 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "output_type": "execute_result" 45 | } 46 | ], 47 | "source": [ 48 | "import sagemaker\n", 49 | "sagemaker.__version__" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "id": "166feef3", 55 | "metadata": {}, 56 | "source": [ 57 | "## Deploy one of the 15 000+ Hugging Face Transformers to Amazon SageMaker for Inference\n", 58 | "\n", 59 | "To deploy a model directly from the Hub to SageMaker we need to define 2 environment variables when creating the `HuggingFaceModel` . We need to define:\n", 60 | "\n", 61 | "- `HF_MODEL_ID`: defines the model id, which will be automatically loaded from [huggingface.co/models](http://huggingface.co/models) when creating or SageMaker Endpoint. The 🤗 Hub provides +15 000 models all available through this environment variable.\n", 62 | "- `HF_TASK`: defines the task for the used 🤗 Transformers pipeline. A full list of tasks can be find [here](https://huggingface.co/transformers/main_classes/pipelines.html)." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 3, 68 | "id": "0119b2eb", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "from sagemaker.huggingface import HuggingFaceModel\n", 73 | "from uuid import uuid4\n", 74 | "import sagemaker \n", 75 | "\n", 76 | "role = sagemaker.get_execution_role()\n", 77 | "\n", 78 | "# Hub Model configuration. https://huggingface.co/models\n", 79 | "hub = {\n", 80 | " 'HF_MODEL_ID':'yiyanghkust/finbert-tone', # model_id from hf.co/models\n", 81 | " 'HF_TASK':'text-classification' # NLP task you want to use for predictions\n", 82 | "}\n", 83 | "\n", 84 | "# endpoint name\n", 85 | "endpoint_name=f'{hub[\"HF_MODEL_ID\"].split(\"/\")[1]}-{str(uuid4())}' # model and endpoint name\n", 86 | "\n", 87 | "# create Hugging Face Model Class\n", 88 | "huggingface_model = HuggingFaceModel(\n", 89 | " env=hub,\n", 90 | " role=role, # iam role with permissions to create an Endpoint\n", 91 | " name=endpoint_name, # model and endpoint name\n", 92 | " transformers_version=\"4.11\", # transformers version used\n", 93 | " pytorch_version=\"1.9\", # pytorch version used\n", 94 | " py_version=\"py38\", # python version of the DLC\n", 95 | ")\n" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 4, 101 | "id": "653ca1e5", 102 | "metadata": {}, 103 | "outputs": [ 104 | { 105 | "name": "stdout", 106 | "output_type": "stream", 107 | "text": [ 108 | "------!" 109 | ] 110 | } 111 | ], 112 | "source": [ 113 | "# deploy model to SageMaker Inference\n", 114 | "predictor = huggingface_model.deploy(\n", 115 | " initial_instance_count=1,\n", 116 | " instance_type=\"ml.c5.large\"\n", 117 | ")\n", 118 | "# get aws region for dashboards\n", 119 | "aws_region = predictor.sagemaker_session.boto_region_name" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "id": "3f35b735", 125 | "metadata": {}, 126 | "source": [ 127 | "**Architecture**\n", 128 | "\n", 129 | "The [Hugging Face Inference Toolkit for SageMaker](https://github.com/aws/sagemaker-huggingface-inference-toolkit) is an open-source library for serving Hugging Face transformer models on SageMaker. It utilizes the SageMaker Inference Toolkit for starting up the model server, which is responsible for handling inference requests. The SageMaker Inference Toolkit uses [Multi Model Server (MMS)](https://github.com/awslabs/multi-model-server) for serving ML models. It bootstraps MMS with a configuration and settings that make it compatible with SageMaker and allow you to adjust important performance parameters, such as the number of workers per model, depending on the needs of your scenario.\n", 130 | "\n", 131 | "![](./imgs/hf-inference-toolkit.png)\n", 132 | "\n", 133 | "**Deploying a model using SageMaker hosting services is a three-step process:**\n", 134 | "\n", 135 | "1. **Create a model in SageMaker** —By creating a model, you tell SageMaker where it can find the model components. \n", 136 | "2. **Create an endpoint configuration for an HTTPS endpoint** —You specify the name of one or more models in production variants and the ML compute instances that you want SageMaker to launch to host each production variant.\n", 137 | "3. **Create an HTTPS endpoint** —Provide the endpoint configuration to SageMaker. The service launches the ML compute instances and deploys the model or models as specified in the configuration\n", 138 | "\n", 139 | "![](./imgs/sm-endpoint.png)\n" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 5, 145 | "id": "ad5d7aa3", 146 | "metadata": {}, 147 | "outputs": [ 148 | { 149 | "data": { 150 | "text/plain": [ 151 | "[{'label': 'negative', 'score': 0.9870443940162659}]" 152 | ] 153 | }, 154 | "execution_count": 5, 155 | "metadata": {}, 156 | "output_type": "execute_result" 157 | } 158 | ], 159 | "source": [ 160 | "# example request, you always need to define \"inputs\"\n", 161 | "data = {\n", 162 | " \"inputs\": \"There is a shortage of capital for project SageMaker. We need extra financing\"\n", 163 | "}\n", 164 | "\n", 165 | "# request\n", 166 | "predictor.predict(data)" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 6, 172 | "id": "a3fd586e", 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "for i in range(500):\n", 177 | " predictor.predict(data)" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "id": "55f5bb84", 183 | "metadata": {}, 184 | "source": [ 185 | "## Model Monitoring\n", 186 | "\n" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 34, 192 | "id": "8977a0b3", 193 | "metadata": {}, 194 | "outputs": [ 195 | { 196 | "name": "stdout", 197 | "output_type": "stream", 198 | "text": [ 199 | "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'ModelLatency~'EndpointName~'finbert-tone-2d863b7d-3aa4-47c8-a8ca-dd-2021-11-02-07-20-52-760~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~start~'-PT15M~end~'P0D~region~'us-east-1~stat~'SampleCount~period~30);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20finbert-tone-2d863b7d-3aa4-47c8-a8ca-dd-2021-11-02-07-20-52-760\n" 200 | ] 201 | } 202 | ], 203 | "source": [ 204 | "print(f\"https://console.aws.amazon.com/cloudwatch/home?region={aws_region}#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'ModelLatency~'EndpointName~'{predictor.endpoint_name}~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~start~'-PT15M~end~'P0D~region~'{aws_region}~stat~'SampleCount~period~30);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20{predictor.endpoint_name}\")\n" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "id": "f8caddfd", 210 | "metadata": {}, 211 | "source": [ 212 | "![model-monitoring-dashboard](./imgs/model-monitoring-dashboard.png)" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "id": "a1f65f0e", 218 | "metadata": {}, 219 | "source": [ 220 | "# Auto Scaling your Model\n", 221 | "\n", 222 | "[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a fully managed service that provides every developer and data scientist with the ability to quickly build, train, and deploy machine learning (ML) models at scale.\n", 223 | "\n", 224 | "Autoscaling is an out-of-the-box feature that monitors your workloads and dynamically adjusts the capacity to maintain steady and predictable performance at the possible lowest cost.\n", 225 | "\n", 226 | "The following diagram is a sample architecture that showcases how a model is served as a endpoint with autoscaling enabled.\n", 227 | "\n", 228 | "\n", 229 | "\n", 230 | "![autoscaling-endpoint](./imgs/autoscaling-endpoint.png)\n", 231 | "\n", 232 | "\n", 233 | "### Reference Blog post [Configuring autoscaling inference endpoints in Amazon SageMaker](https://aws.amazon.com/de/blogs/machine-learning/configuring-autoscaling-inference-endpoints-in-amazon-sagemaker/)" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "id": "bc744d93", 239 | "metadata": {}, 240 | "source": [ 241 | "## Configure Autoscaling for our Endpoint\n", 242 | "\n", 243 | "You can define minimum, desired, and maximum number of instances per endpoint and, based on the autoscaling configurations, instances are managed dynamically. The following diagram illustrates this architecture. \n", 244 | "\n", 245 | "![scaling-options](./imgs/scaling-options.jpeg)\n", 246 | "\n", 247 | "AWS offers many different [ways to auto-scale your endpoints](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html). One of them Simple-Scaling, where you scale the instance capacity based on `CPUUtilization` of the instances or `SageMakerVariantInvocationsPerInstance`. \n", 248 | "\n", 249 | "In this example we are going to use `SageMakerVariantInvocationsPerInstance` to auto-scale our Endpoint\n", 250 | "\n" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 9, 256 | "id": "7b733d07", 257 | "metadata": {}, 258 | "outputs": [], 259 | "source": [ 260 | "import boto3\n", 261 | "\n", 262 | "# Let us define a client to play with autoscaling options\n", 263 | "asg_client = boto3.client('application-autoscaling') # Common class representing Application Auto Scaling for SageMaker amongst other services\n", 264 | "\n", 265 | "# here resource type is variant and the unique identifier is the resource ID.\n", 266 | "# Example: endpoint/my-bert-fine-tuned/variant/AllTraffic .\n", 267 | "resource_id=f\"endpoint/{predictor.endpoint_name}/variant/AllTraffic\"\n", 268 | "\n", 269 | "# scaling configuration\n", 270 | "response = asg_client.register_scalable_target(\n", 271 | " ServiceNamespace='sagemaker', #\n", 272 | " ResourceId=resource_id,\n", 273 | " ScalableDimension='sagemaker:variant:DesiredInstanceCount', \n", 274 | " MinCapacity=1,\n", 275 | " MaxCapacity=4\n", 276 | ")\n" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "id": "38ce53a0", 282 | "metadata": {}, 283 | "source": [ 284 | "Create Scaling Policy with configuration details, e.g. `TargetValue` when the instance should be scaled." 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 10, 290 | "id": "fcc92f18", 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [ 294 | "response = asg_client.put_scaling_policy(\n", 295 | " PolicyName=f'Request-ScalingPolicy-{predictor.endpoint_name}',\n", 296 | " ServiceNamespace='sagemaker',\n", 297 | " ResourceId=resource_id,\n", 298 | " ScalableDimension='sagemaker:variant:DesiredInstanceCount',\n", 299 | " PolicyType='TargetTrackingScaling',\n", 300 | " TargetTrackingScalingPolicyConfiguration={\n", 301 | " 'TargetValue': 10.0, # Threshold\n", 302 | " 'PredefinedMetricSpecification': {\n", 303 | " 'PredefinedMetricType': 'SageMakerVariantInvocationsPerInstance',\n", 304 | " },\n", 305 | " 'ScaleInCooldown': 300, # duration until scale in\n", 306 | " 'ScaleOutCooldown': 60 # duration between scale out\n", 307 | " }\n", 308 | ")" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "id": "3a13397e", 314 | "metadata": {}, 315 | "source": [ 316 | "stress test the endpoint with threaded requests" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": null, 322 | "id": "de4c3f4f", 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "import time\n", 327 | "\n", 328 | "request_duration_in_seconds = 4*65\n", 329 | "end_time = time.time() + request_duration_in_seconds\n", 330 | "\n", 331 | "print(f\"test will run {request_duration_in_seconds} seconds\")\n", 332 | "\n", 333 | "while time.time() < end_time:\n", 334 | " predictor.predict(data)" 335 | ] 336 | }, 337 | { 338 | "cell_type": "markdown", 339 | "id": "63a324ee", 340 | "metadata": {}, 341 | "source": [ 342 | "Monitor the `InvocationsPerInstance` in cloudwatch " 343 | ] 344 | }, 345 | { 346 | "cell_type": "code", 347 | "execution_count": null, 348 | "id": "a75a6b3e", 349 | "metadata": {}, 350 | "outputs": [], 351 | "source": [ 352 | "print(f\"https://console.aws.amazon.com/cloudwatch/home?region={aws_region}#metricsV2:graph=~(metrics~(~(~'AWS*2fSageMaker~'InvocationsPerInstance~'EndpointName~'{predictor.endpoint_name}~'VariantName~'AllTraffic))~view~'timeSeries~stacked~false~region~'{aws_region}~start~'-PT15M~end~'P0D~stat~'SampleCount~period~60);query=~'*7bAWS*2fSageMaker*2cEndpointName*2cVariantName*7d*20{predictor.endpoint_name}\")" 353 | ] 354 | }, 355 | { 356 | "cell_type": "markdown", 357 | "id": "d17a0eff", 358 | "metadata": {}, 359 | "source": [ 360 | "check the endpoint instance_count number" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": null, 366 | "id": "34adaf5e", 367 | "metadata": {}, 368 | "outputs": [], 369 | "source": [ 370 | "bt_sm = boto3.client('sagemaker')\n", 371 | "response = bt_sm.describe_endpoint(EndpointName=predictor.endpoint_name)" 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": 36, 377 | "id": "8b32b7e6", 378 | "metadata": {}, 379 | "outputs": [ 380 | { 381 | "name": "stdout", 382 | "output_type": "stream", 383 | "text": [ 384 | "Endpoint finbert-tone-2d863b7d-3aa4-47c8-a8ca-dd-2021-11-02-07-20-52-760 has \n", 385 | "Current Instance Count: 4\n", 386 | "With a desired instance count of 4\n" 387 | ] 388 | } 389 | ], 390 | "source": [ 391 | "print(f\"Endpoint {response['EndpointName']} has \\nCurrent Instance Count: {response['ProductionVariants'][0]['CurrentInstanceCount']}\\nWith a desired instance count of {response['ProductionVariants'][0]['DesiredInstanceCount']}\")" 392 | ] 393 | }, 394 | { 395 | "cell_type": "code", 396 | "execution_count": null, 397 | "id": "346260a9", 398 | "metadata": {}, 399 | "outputs": [], 400 | "source": [ 401 | "print(f\"https://console.aws.amazon.com/sagemaker/home?region={aws_region}#/endpoints/{predictor.endpoint_name}\")" 402 | ] 403 | }, 404 | { 405 | "cell_type": "markdown", 406 | "id": "cecb31f5", 407 | "metadata": {}, 408 | "source": [ 409 | "## Clean up" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": 33, 415 | "id": "27187fc3", 416 | "metadata": {}, 417 | "outputs": [], 418 | "source": [ 419 | "# delete endpoint\n", 420 | "predictor.delete_endpoint()" 421 | ] 422 | } 423 | ], 424 | "metadata": { 425 | "instance_type": "ml.t3.medium", 426 | "interpreter": { 427 | "hash": "ec1370a512a4612a2908be3c3c8b0de1730d00dc30104daff827065aeaf438b7" 428 | }, 429 | "kernelspec": { 430 | "display_name": "conda_pytorch_latest_p36", 431 | "language": "python", 432 | "name": "conda_pytorch_latest_p36" 433 | }, 434 | "language_info": { 435 | "codemirror_mode": { 436 | "name": "ipython", 437 | "version": 3 438 | }, 439 | "file_extension": ".py", 440 | "mimetype": "text/x-python", 441 | "name": "python", 442 | "nbconvert_exporter": "python", 443 | "pygments_lexer": "ipython3", 444 | "version": "3.6.13" 445 | } 446 | }, 447 | "nbformat": 4, 448 | "nbformat_minor": 5 449 | } 450 | -------------------------------------------------------------------------------- /workshop_3_mlops/lab_1_sagemaker_pipeline.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# MLOps: End-to-End Hugging Face Transformers with the Hub & SageMaker Pipelines\n", 8 | "\n", 9 | "This notebook demonstrates how to use [SageMaker Pipelines](https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-sdk.html) to train a [Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) Transformer model and deploy it. The SageMaker integration with Hugging Face makes it easy to train and deploy advanced NLP models. A Lambda step in SageMaker Pipelines enables you to easily do lightweight model deployments and other serverless operations.\n", 10 | "\n", 11 | "In this example we are going to fine-tune and deploy a DistilBERT model on the imdb dataset.\n", 12 | "\n", 13 | "**Prerequisites**: \n", 14 | "- Make sure your notebook environment has IAM managed policy `AmazonSageMakerPipelinesIntegrations` as well as `AmazonSageMakerFullAccess`\n", 15 | "\n", 16 | "**Blog Post**\n", 17 | "* [Use a SageMaker Pipeline Lambda step for lightweight model deployments](https://aws.amazon.com/de/blogs/machine-learning/use-a-sagemaker-pipeline-lambda-step-for-lightweight-model-deployments/)" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "# Development Environment and Permissions " 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "## Installation & Imports\n", 32 | "\n", 33 | "We'll start by updating the SageMaker SDK, and importing some necessary packages." 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "!pip install \"sagemaker>=2.48.0\" --upgrade" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 1, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "import boto3\n", 52 | "import os\n", 53 | "import numpy as np\n", 54 | "import pandas as pd\n", 55 | "import sagemaker\n", 56 | "import sys\n", 57 | "import time\n", 58 | "\n", 59 | "from sagemaker.workflow.parameters import ParameterInteger, ParameterFloat, ParameterString\n", 60 | "\n", 61 | "from sagemaker.lambda_helper import Lambda\n", 62 | "\n", 63 | "from sagemaker.sklearn.processing import SKLearnProcessor\n", 64 | "\n", 65 | "from sagemaker.processing import ProcessingInput, ProcessingOutput\n", 66 | "from sagemaker.workflow.steps import CacheConfig, ProcessingStep\n", 67 | "\n", 68 | "from sagemaker.huggingface import HuggingFace, HuggingFaceModel\n", 69 | "import sagemaker.huggingface\n", 70 | "\n", 71 | "from sagemaker.inputs import TrainingInput\n", 72 | "from sagemaker.workflow.steps import TrainingStep\n", 73 | "\n", 74 | "from sagemaker.processing import ScriptProcessor\n", 75 | "from sagemaker.workflow.properties import PropertyFile\n", 76 | "from sagemaker.workflow.step_collections import CreateModelStep, RegisterModel\n", 77 | "\n", 78 | "from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo,ConditionGreaterThanOrEqualTo\n", 79 | "from sagemaker.workflow.condition_step import ConditionStep, JsonGet\n", 80 | "\n", 81 | "from sagemaker.workflow.pipeline import Pipeline, PipelineExperimentConfig\n", 82 | "from sagemaker.workflow.execution_variables import ExecutionVariables" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Permissions" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": {}, 95 | "source": [ 96 | "_If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "metadata": {}, 103 | "outputs": [], 104 | "source": [ 105 | "import sagemaker\n", 106 | "\n", 107 | "sess = sagemaker.Session()\n", 108 | "region = sess.boto_region_name\n", 109 | "\n", 110 | "# sagemaker session bucket -> used for uploading data, models and logs\n", 111 | "# sagemaker will automatically create this bucket if it not exists\n", 112 | "sagemaker_session_bucket=None\n", 113 | "if sagemaker_session_bucket is None and sess is not None:\n", 114 | " # set to default bucket if a bucket name is not given\n", 115 | " sagemaker_session_bucket = sess.default_bucket()\n", 116 | "\n", 117 | "role = sagemaker.get_execution_role()\n", 118 | "sagemaker_session = sagemaker.Session(default_bucket=sagemaker_session_bucket)\n", 119 | "\n", 120 | "print(f\"sagemaker role arn: {role}\")\n", 121 | "print(f\"sagemaker bucket: {sagemaker_session.default_bucket()}\")\n", 122 | "print(f\"sagemaker session region: {sagemaker_session.boto_region_name}\")" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "# Pipeline Overview" 130 | ] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "metadata": {}, 135 | "source": [ 136 | "![pipeline](./imgs/overview.png)" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "# Defining the Pipeline\n", 144 | "\n", 145 | "## 0. Pipeline parameters\n", 146 | "\n", 147 | "Before defining the pipeline, it is important to parameterize it. SageMaker Pipeline can directly be parameterized, including instance types and counts.\n", 148 | "\n", 149 | "Read more about Parameters in the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/build-and-manage-parameters.html)\n", 150 | "\n" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 3, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "# S3 prefix where every assets will be stored\n", 160 | "s3_prefix = \"hugging-face-pipeline-demo\"\n", 161 | "\n", 162 | "# s3 bucket used for storing assets and artifacts\n", 163 | "bucket = sagemaker_session.default_bucket()\n", 164 | "\n", 165 | "# aws region used\n", 166 | "region = sagemaker_session.boto_region_name\n", 167 | "\n", 168 | "# base name prefix for sagemaker jobs (training, processing, inference)\n", 169 | "base_job_prefix = s3_prefix\n", 170 | "\n", 171 | "# Cache configuration for workflow\n", 172 | "cache_config = CacheConfig(enable_caching=False, expire_after=\"30d\")\n", 173 | "\n", 174 | "\n", 175 | "# package versions\n", 176 | "transformers_version = \"4.11.0\"\n", 177 | "pytorch_version = \"1.9.0\"\n", 178 | "py_version = \"py38\"\n", 179 | "\n", 180 | "model_id_=\"distilbert-base-uncased\"\n", 181 | "dataset_name_=\"imdb\"\n", 182 | "\n", 183 | "model_id = ParameterString(name=\"ModelId\", default_value=\"distilbert-base-uncased\")\n", 184 | "dataset_name = ParameterString(name=\"DatasetName\", default_value=\"imdb\")" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "## 1. Processing Step\n", 192 | "\n", 193 | "A SKLearn Processing step is used to invoke a SageMaker Processing job with a custom python script - `preprocessing.py`. " 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": {}, 199 | "source": [ 200 | "### Processing Parameter" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 5, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "processing_instance_type = ParameterString(name=\"ProcessingInstanceType\", default_value=\"ml.c5.2xlarge\")\n", 210 | "processing_instance_count = ParameterInteger(name=\"ProcessingInstanceCount\", default_value=1)\n", 211 | "processing_script = ParameterString(name=\"ProcessingScript\", default_value=\"./scripts/preprocessing.py\")" 212 | ] 213 | }, 214 | { 215 | "cell_type": "markdown", 216 | "metadata": {}, 217 | "source": [ 218 | "### Processor" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": 6, 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [ 227 | "processing_output_destination = f\"s3://{bucket}/{s3_prefix}/data\"\n", 228 | "\n", 229 | "\n", 230 | "sklearn_processor = SKLearnProcessor(\n", 231 | " framework_version=\"0.23-1\",\n", 232 | " instance_type=processing_instance_type,\n", 233 | " instance_count=processing_instance_count,\n", 234 | " base_job_name=base_job_prefix + \"/preprocessing\",\n", 235 | " sagemaker_session=sagemaker_session,\n", 236 | " role=role,\n", 237 | ")\n", 238 | "\n", 239 | "step_process = ProcessingStep(\n", 240 | " name=\"ProcessDataForTraining\",\n", 241 | " cache_config=cache_config,\n", 242 | " processor=sklearn_processor,\n", 243 | " job_arguments=[\"--transformers_version\",transformers_version,\n", 244 | " \"--pytorch_version\",pytorch_version,\n", 245 | " \"--model_id\",model_id_,\n", 246 | " \"--dataset_name\",dataset_name_],\n", 247 | " outputs=[\n", 248 | " ProcessingOutput(\n", 249 | " output_name=\"train\",\n", 250 | " destination=f\"{processing_output_destination}/train\",\n", 251 | " source=\"/opt/ml/processing/train\",\n", 252 | " ),\n", 253 | " ProcessingOutput(\n", 254 | " output_name=\"test\",\n", 255 | " destination=f\"{processing_output_destination}/test\",\n", 256 | " source=\"/opt/ml/processing/test\",\n", 257 | " ),\n", 258 | " ProcessingOutput(\n", 259 | " output_name=\"validation\",\n", 260 | " destination=f\"{processing_output_destination}/test\",\n", 261 | " source=\"/opt/ml/processing/validation\",\n", 262 | " ),\n", 263 | " ],\n", 264 | " code=processing_script,\n", 265 | ")" 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "## 2. Model Training Step\n", 273 | "\n", 274 | "We use SageMaker's [Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html) Estimator class to create a model training step for the Hugging Face [DistilBERT](https://huggingface.co/distilbert-base-uncased) model. Transformer-based models such as the original BERT can be very large and slow to train. DistilBERT, however, is a small, fast, cheap and light Transformer model trained by distilling BERT base. It reduces the size of a BERT model by 40%, while retaining 97% of its language understanding capabilities and being 60% faster. " 275 | ] 276 | }, 277 | { 278 | "cell_type": "markdown", 279 | "metadata": {}, 280 | "source": [ 281 | "The Hugging Face estimator also takes hyperparameters as a dictionary. The training instance type and size are pipeline parameters that can be easily varied in future pipeline runs without changing any code. " 282 | ] 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "### Training Parameter" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 7, 294 | "metadata": {}, 295 | "outputs": [], 296 | "source": [ 297 | "# training step parameters\n", 298 | "training_entry_point = ParameterString(name=\"TrainingEntryPoint\", default_value=\"train.py\")\n", 299 | "training_source_dir = ParameterString(name=\"TrainingSourceDir\", default_value=\"./scripts\")\n", 300 | "training_instance_type = ParameterString(name=\"TrainingInstanceType\", default_value=\"ml.p3.2xlarge\")\n", 301 | "training_instance_count = ParameterInteger(name=\"TrainingInstanceCount\", default_value=1)\n", 302 | "\n", 303 | "# hyperparameters, which are passed into the training job\n", 304 | "epochs=ParameterString(name=\"Epochs\", default_value=\"1\")\n", 305 | "eval_batch_size=ParameterString(name=\"EvalBatchSize\", default_value=\"32\") \n", 306 | "train_batch_size=ParameterString(name=\"TrainBatchSize\", default_value=\"16\") \n", 307 | "learning_rate=ParameterString(name=\"LearningRate\", default_value=\"3e-5\") \n", 308 | "fp16=ParameterString(name=\"Fp16\", default_value=\"True\")" 309 | ] 310 | }, 311 | { 312 | "cell_type": "markdown", 313 | "metadata": {}, 314 | "source": [ 315 | "### Hugging Face Estimator" 316 | ] 317 | }, 318 | { 319 | "cell_type": "code", 320 | "execution_count": 8, 321 | "metadata": {}, 322 | "outputs": [], 323 | "source": [ 324 | "huggingface_estimator = HuggingFace(\n", 325 | " entry_point=training_entry_point,\n", 326 | " source_dir=training_source_dir,\n", 327 | " base_job_name=base_job_prefix + \"/training\",\n", 328 | " instance_type=training_instance_type,\n", 329 | " instance_count=training_instance_count,\n", 330 | " role=role,\n", 331 | " transformers_version=transformers_version,\n", 332 | " pytorch_version=pytorch_version,\n", 333 | " py_version=py_version,\n", 334 | " hyperparameters={\n", 335 | " 'epochs':epochs, \n", 336 | " 'eval_batch_size': eval_batch_size, \n", 337 | " 'train_batch_size': train_batch_size, \n", 338 | " 'learning_rate': learning_rate, \n", 339 | " 'model_id': model_id,\n", 340 | " 'fp16': fp16\n", 341 | " },\n", 342 | " sagemaker_session=sagemaker_session,\n", 343 | ")\n", 344 | "\n", 345 | "step_train = TrainingStep(\n", 346 | " name=\"TrainHuggingFaceModel\",\n", 347 | " estimator=huggingface_estimator,\n", 348 | " inputs={\n", 349 | " \"train\": TrainingInput(\n", 350 | " s3_data=step_process.properties.ProcessingOutputConfig.Outputs[\n", 351 | " \"train\"\n", 352 | " ].S3Output.S3Uri\n", 353 | " ),\n", 354 | " \"test\": TrainingInput(\n", 355 | " s3_data=step_process.properties.ProcessingOutputConfig.Outputs[\n", 356 | " \"test\"\n", 357 | " ].S3Output.S3Uri\n", 358 | " ),\n", 359 | " },\n", 360 | " cache_config=cache_config,\n", 361 | ")" 362 | ] 363 | }, 364 | { 365 | "cell_type": "markdown", 366 | "metadata": {}, 367 | "source": [ 368 | "## 3. Model evaluation Step\n", 369 | "\n", 370 | "A ProcessingStep is used to evaluate the performance of the trained model. Based on the results of the evaluation, either the model is created, registered, and deployed, or the pipeline stops.\n", 371 | "\n", 372 | "In the training job, the model was evaluated against the test dataset, and the result of the evaluation was stored in the `model.tar.gz` file saved by the training job. The results of that evaluation are copied into a `PropertyFile` in this ProcessingStep so that it can be used in the ConditionStep. " 373 | ] 374 | }, 375 | { 376 | "cell_type": "markdown", 377 | "metadata": {}, 378 | "source": [ 379 | "### Evaluation Parameter" 380 | ] 381 | }, 382 | { 383 | "cell_type": "code", 384 | "execution_count": 10, 385 | "metadata": {}, 386 | "outputs": [], 387 | "source": [ 388 | "evaluation_script = ParameterString(name=\"EvaluationScript\", default_value=\"./scripts/evaluate.py\")" 389 | ] 390 | }, 391 | { 392 | "cell_type": "markdown", 393 | "metadata": {}, 394 | "source": [ 395 | "### Evaluator" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": 11, 401 | "metadata": {}, 402 | "outputs": [], 403 | "source": [ 404 | "script_eval = SKLearnProcessor(\n", 405 | " framework_version=\"0.23-1\",\n", 406 | " instance_type=processing_instance_type,\n", 407 | " instance_count=processing_instance_count,\n", 408 | " base_job_name=base_job_prefix + \"/evaluation\",\n", 409 | " sagemaker_session=sagemaker_session,\n", 410 | " role=role,\n", 411 | ")\n", 412 | "\n", 413 | "evaluation_report = PropertyFile(\n", 414 | " name=\"HuggingFaceEvaluationReport\",\n", 415 | " output_name=\"evaluation\",\n", 416 | " path=\"evaluation.json\",\n", 417 | ")\n", 418 | "\n", 419 | "step_eval = ProcessingStep(\n", 420 | " name=\"HuggingfaceEvalLoss\",\n", 421 | " processor=script_eval,\n", 422 | " inputs=[\n", 423 | " ProcessingInput(\n", 424 | " source=step_train.properties.ModelArtifacts.S3ModelArtifacts,\n", 425 | " destination=\"/opt/ml/processing/model\",\n", 426 | " )\n", 427 | " ],\n", 428 | " outputs=[\n", 429 | " ProcessingOutput(\n", 430 | " output_name=\"evaluation\",\n", 431 | " source=\"/opt/ml/processing/evaluation\",\n", 432 | " destination=f\"s3://{bucket}/{s3_prefix}/evaluation_report\",\n", 433 | " ),\n", 434 | " ],\n", 435 | " code=evaluation_script,\n", 436 | " property_files=[evaluation_report],\n", 437 | " cache_config=cache_config,\n", 438 | ")" 439 | ] 440 | }, 441 | { 442 | "cell_type": "markdown", 443 | "metadata": {}, 444 | "source": [ 445 | "## 4. Register the model\n", 446 | "\n", 447 | "The trained model is registered in the Model Registry under a Model Package Group. Each time a new model is registered, it is given a new version number by default. The model is registered in the \"Approved\" state so that it can be deployed. Registration will only happen if the output of the [6. Condition for deployment](#6.-Condition-for-deployment) is true, i.e, the metrics being checked are within the threshold defined." 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "execution_count": 12, 453 | "metadata": {}, 454 | "outputs": [], 455 | "source": [ 456 | "model = HuggingFaceModel(\n", 457 | " model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,\n", 458 | " role=role,\n", 459 | " transformers_version=transformers_version,\n", 460 | " pytorch_version=pytorch_version,\n", 461 | " py_version=py_version,\n", 462 | " sagemaker_session=sagemaker_session,\n", 463 | ")\n", 464 | "model_package_group_name = \"HuggingFaceModelPackageGroup\"\n", 465 | "step_register = RegisterModel(\n", 466 | " name=\"HuggingFaceRegisterModel\",\n", 467 | " model=model,\n", 468 | " content_types=[\"application/json\"],\n", 469 | " response_types=[\"application/json\"],\n", 470 | " inference_instances=[\"ml.g4dn.xlarge\", \"ml.m5.xlarge\"],\n", 471 | " transform_instances=[\"ml.g4dn.xlarge\", \"ml.m5.xlarge\"],\n", 472 | " model_package_group_name=model_package_group_name,\n", 473 | " approval_status=\"Approved\",\n", 474 | ")" 475 | ] 476 | }, 477 | { 478 | "cell_type": "markdown", 479 | "metadata": {}, 480 | "source": [ 481 | "## 5. Model Deployment\n", 482 | "\n", 483 | "We create a custom step `ModelDeployment` derived from the provided `LambdaStep`. This Step will create a Lambda function and invocate to deploy our model as SageMaker Endpoint." 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": 13, 489 | "metadata": {}, 490 | "outputs": [ 491 | { 492 | "name": "stdout", 493 | "output_type": "stream", 494 | "text": [ 495 | "Using ARN from existing role: sagemaker-pipelines-model-deployment-role\n" 496 | ] 497 | } 498 | ], 499 | "source": [ 500 | "# custom Helper Step for ModelDeployment\n", 501 | "from utils.deploy_step import ModelDeployment\n", 502 | "\n", 503 | "# we will use the iam role from the notebook session for the created endpoint\n", 504 | "# this role will be attached to our endpoint and need permissions, e.g. to download assets from s3\n", 505 | "sagemaker_endpoint_role=sagemaker.get_execution_role()\n", 506 | "\n", 507 | "\n", 508 | "step_deployment = ModelDeployment(\n", 509 | " model_name=f\"{model_id_}-{dataset_name_}\",\n", 510 | " registered_model=step_register.steps[0],\n", 511 | " endpoint_instance_type=\"ml.g4dn.xlarge\",\n", 512 | " sagemaker_endpoint_role=sagemaker_endpoint_role,\n", 513 | " autoscaling_policy=None,\n", 514 | ")" 515 | ] 516 | }, 517 | { 518 | "cell_type": "markdown", 519 | "metadata": {}, 520 | "source": [ 521 | "## 6. Condition for deployment\n", 522 | "\n", 523 | "For the condition to be `True` and the steps after evaluation to run, the evaluated accuracy of the Hugging Face model must be greater than our `TresholdAccuracy` parameter." 524 | ] 525 | }, 526 | { 527 | "cell_type": "markdown", 528 | "metadata": {}, 529 | "source": [ 530 | "### Condition Parameter" 531 | ] 532 | }, 533 | { 534 | "cell_type": "code", 535 | "execution_count": 14, 536 | "metadata": {}, 537 | "outputs": [], 538 | "source": [ 539 | "threshold_accuracy = ParameterFloat(name=\"ThresholdAccuracy\", default_value=0.8)" 540 | ] 541 | }, 542 | { 543 | "cell_type": "markdown", 544 | "metadata": {}, 545 | "source": [ 546 | "### Condition" 547 | ] 548 | }, 549 | { 550 | "cell_type": "code", 551 | "execution_count": 15, 552 | "metadata": {}, 553 | "outputs": [ 554 | { 555 | "name": "stderr", 556 | "output_type": "stream", 557 | "text": [ 558 | "The class JsonGet has been renamed in sagemaker>=2.\n", 559 | "See: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" 560 | ] 561 | } 562 | ], 563 | "source": [ 564 | "cond_gte = ConditionGreaterThanOrEqualTo(\n", 565 | " left=JsonGet(\n", 566 | " step=step_eval,\n", 567 | " property_file=evaluation_report,\n", 568 | " json_path=\"eval_accuracy\",\n", 569 | " ),\n", 570 | " right=threshold_accuracy,\n", 571 | ")\n", 572 | "\n", 573 | "step_cond = ConditionStep(\n", 574 | " name=\"CheckHuggingfaceEvalAccuracy\",\n", 575 | " conditions=[cond_gte],\n", 576 | " if_steps=[step_register, step_deployment],\n", 577 | " else_steps=[],\n", 578 | ")" 579 | ] 580 | }, 581 | { 582 | "cell_type": "markdown", 583 | "metadata": {}, 584 | "source": [ 585 | "# Pipeline definition and execution\n", 586 | "\n", 587 | "SageMaker Pipelines constructs the pipeline graph from the implicit definition created by the way pipeline steps inputs and outputs are specified. There's no need to specify that a step is a \"parallel\" or \"serial\" step. Steps such as model registration after the condition step are not listed in the pipeline definition because they do not run unless the condition is true. If so, they are run in order based on their specified inputs and outputs.\n", 588 | "\n", 589 | "Each Parameter we defined holds a default value, which can be overwritten before starting the pipeline. [Parameter Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/build-and-manage-parameters.html)\n", 590 | "\n", 591 | "\n" 592 | ] 593 | }, 594 | { 595 | "cell_type": "markdown", 596 | "metadata": {}, 597 | "source": [ 598 | "### Overwriting Parameters" 599 | ] 600 | }, 601 | { 602 | "cell_type": "code", 603 | "execution_count": 16, 604 | "metadata": {}, 605 | "outputs": [], 606 | "source": [ 607 | "# define parameter which should be overwritten\n", 608 | "pipeline_parameters=dict(\n", 609 | " ModelId=\"distilbert-base-uncased\",\n", 610 | " ThresholdAccuracy=0.7,\n", 611 | " Epochs=\"3\",\n", 612 | " TrainBatchSize=\"32\",\n", 613 | " EvalBatchSize=\"64\",\n", 614 | " )" 615 | ] 616 | }, 617 | { 618 | "cell_type": "markdown", 619 | "metadata": {}, 620 | "source": [ 621 | "### Create Pipeline" 622 | ] 623 | }, 624 | { 625 | "cell_type": "code", 626 | "execution_count": 17, 627 | "metadata": {}, 628 | "outputs": [], 629 | "source": [ 630 | "pipeline = Pipeline(\n", 631 | " name=f\"HuggingFaceDemoPipeline\",\n", 632 | " parameters=[\n", 633 | " model_id,\n", 634 | " dataset_name,\n", 635 | " processing_instance_type,\n", 636 | " processing_instance_count,\n", 637 | " processing_script,\n", 638 | " training_entry_point,\n", 639 | " training_source_dir,\n", 640 | " training_instance_type,\n", 641 | " training_instance_count,\n", 642 | " evaluation_script,\n", 643 | " threshold_accuracy,\n", 644 | " epochs,\n", 645 | " eval_batch_size,\n", 646 | " train_batch_size,\n", 647 | " learning_rate,\n", 648 | " fp16\n", 649 | " ],\n", 650 | " steps=[step_process, step_train, step_eval, step_cond],\n", 651 | " sagemaker_session=sagemaker_session,\n", 652 | ")" 653 | ] 654 | }, 655 | { 656 | "cell_type": "markdown", 657 | "metadata": {}, 658 | "source": [ 659 | "We can examine the pipeline definition in JSON format. You also can inspect the pipeline graph in SageMaker Studio by going to the page for your pipeline. " 660 | ] 661 | }, 662 | { 663 | "cell_type": "code", 664 | "execution_count": null, 665 | "metadata": {}, 666 | "outputs": [], 667 | "source": [ 668 | "import json\n", 669 | "\n", 670 | "json.loads(pipeline.definition())" 671 | ] 672 | }, 673 | { 674 | "cell_type": "markdown", 675 | "metadata": {}, 676 | "source": [ 677 | "![pipeline](./imgs/pipeline.png)" 678 | ] 679 | }, 680 | { 681 | "cell_type": "markdown", 682 | "metadata": {}, 683 | "source": [ 684 | "`upsert` creates or updates the pipeline." 685 | ] 686 | }, 687 | { 688 | "cell_type": "code", 689 | "execution_count": null, 690 | "metadata": {}, 691 | "outputs": [], 692 | "source": [ 693 | "pipeline.upsert(role_arn=role)" 694 | ] 695 | }, 696 | { 697 | "cell_type": "markdown", 698 | "metadata": {}, 699 | "source": [ 700 | "### Run the pipeline" 701 | ] 702 | }, 703 | { 704 | "cell_type": "code", 705 | "execution_count": 20, 706 | "metadata": {}, 707 | "outputs": [], 708 | "source": [ 709 | "execution = pipeline.start(parameters=pipeline_parameters)" 710 | ] 711 | }, 712 | { 713 | "cell_type": "code", 714 | "execution_count": null, 715 | "metadata": {}, 716 | "outputs": [], 717 | "source": [ 718 | "execution.wait()" 719 | ] 720 | }, 721 | { 722 | "cell_type": "markdown", 723 | "metadata": {}, 724 | "source": [ 725 | "## Getting predictions from the endpoint\n", 726 | "\n", 727 | "After the previous cell completes, you can check whether the endpoint has finished deploying.\n", 728 | "\n", 729 | "We can use the `endpoint_name` to create up a `HuggingFacePredictor` object that will be used to get predictions." 730 | ] 731 | }, 732 | { 733 | "cell_type": "code", 734 | "execution_count": 24, 735 | "metadata": {}, 736 | "outputs": [], 737 | "source": [ 738 | "from sagemaker.huggingface import HuggingFacePredictor\n", 739 | "\n", 740 | "endpoint_name = f\"{model_id}-{dataset_name}\"\n", 741 | "\n", 742 | "# check if endpoint is up and running\n", 743 | "print(f\"https://console.aws.amazon.com/sagemaker/home?region={region}#/endpoints/{endpoint_name}\")\n" 744 | ] 745 | }, 746 | { 747 | "cell_type": "code", 748 | "execution_count": null, 749 | "metadata": {}, 750 | "outputs": [], 751 | "source": [ 752 | "hf_predictor = HuggingFacePredictor(endpoint_name,sagemaker_session=sagemaker_session)" 753 | ] 754 | }, 755 | { 756 | "cell_type": "markdown", 757 | "metadata": {}, 758 | "source": [ 759 | "### Test data\n", 760 | "\n", 761 | "Here are a couple of sample reviews we would like to classify as positive (`pos`) or negative (`neg`). Demonstrating the power of advanced Transformer-based models such as this Hugging Face model, the model should do quite well even though the reviews are mixed. " 762 | ] 763 | }, 764 | { 765 | "cell_type": "code", 766 | "execution_count": 25, 767 | "metadata": {}, 768 | "outputs": [ 769 | { 770 | "data": { 771 | "text/plain": [ 772 | "[{'label': 'pos', 'score': 0.9690886735916138}]" 773 | ] 774 | }, 775 | "execution_count": 25, 776 | "metadata": {}, 777 | "output_type": "execute_result" 778 | } 779 | ], 780 | "source": [ 781 | "sentiment_input1 = {\"inputs\":\"Although the movie had some plot weaknesses, it was engaging. Special effects were mind boggling. Can't wait to see what this creative team does next.\"}\n", 782 | "\n", 783 | "hf_predictor.predict(sentiment_input1)" 784 | ] 785 | }, 786 | { 787 | "cell_type": "code", 788 | "execution_count": 26, 789 | "metadata": {}, 790 | "outputs": [ 791 | { 792 | "data": { 793 | "text/plain": [ 794 | "[{'label': 'neg', 'score': 0.9938264489173889}]" 795 | ] 796 | }, 797 | "execution_count": 26, 798 | "metadata": {}, 799 | "output_type": "execute_result" 800 | } 801 | ], 802 | "source": [ 803 | "sentiment_input2 = {\"inputs\":\"There was some good acting, but the story was ridiculous. The other sequels in this franchise were better. It's time to take a break from this IP, but if they switch it up for the next one, I'll check it out.\"}\n", 804 | "\n", 805 | "hf_predictor.predict(sentiment_input2)" 806 | ] 807 | }, 808 | { 809 | "cell_type": "markdown", 810 | "metadata": {}, 811 | "source": [ 812 | "## Cleanup Resources\n", 813 | "\n", 814 | "The following cell will delete the resources created by the Lambda function and the Lambda itself. \n", 815 | "Deleting other resources such as the S3 bucket and the IAM role for the Lambda function are the responsibility of the notebook user. " 816 | ] 817 | }, 818 | { 819 | "cell_type": "code", 820 | "execution_count": null, 821 | "metadata": {}, 822 | "outputs": [], 823 | "source": [ 824 | "sm_client = boto3.client(\"sagemaker\")\n", 825 | "\n", 826 | "# Delete the Lambda function\n", 827 | "step_deployment.func.delete()\n", 828 | "\n", 829 | "# Delete the endpoint\n", 830 | "hf_predictor.delete_endpoint()" 831 | ] 832 | } 833 | ], 834 | "metadata": { 835 | "instance_type": "ml.t3.medium", 836 | "kernelspec": { 837 | "display_name": "Python 3", 838 | "language": "python", 839 | "name": "python3" 840 | }, 841 | "language_info": { 842 | "codemirror_mode": { 843 | "name": "ipython", 844 | "version": 3 845 | }, 846 | "file_extension": ".py", 847 | "mimetype": "text/x-python", 848 | "name": "python", 849 | "nbconvert_exporter": "python", 850 | "pygments_lexer": "ipython3", 851 | "version": "3.8.5" 852 | } 853 | }, 854 | "nbformat": 4, 855 | "nbformat_minor": 4 856 | } 857 | -------------------------------------------------------------------------------- /workshop_1_getting_started_with_amazon_sagemaker/lab_3_spot_instances.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Huggingface Sagemaker-sdk - Spot instances example\n", 8 | "### Binary Classification with `Trainer` and `imdb` dataset" 9 | ] 10 | }, 11 | { 12 | "attachments": { 13 | "image.png": { 14 | "image/png": "iVBORw0KGgoAAAANSUhEUgAABHQAAAKVCAYAAACuxLyrAAAgAElEQVR4Aezd8W8b573n+/4F/S0/LLABFrj9sbhYrIALaAOsigIRcFIBB9H+EAiLQAGCFVpACwNanC2EAJGDREcHsFodSHuuWmgT9sRRfBELcSOfXCnr61UTVes6Vipbsiu5sku7sqqkrCtbikPZtPS5eIYz5MxwhqRIipwR3wJYURI588zr+Q6b5+NnnvnWwcGBGvnY399Xsz2ePn0qHhhQA9QANUANUAPUADVADVAD1AA1QA3EuQaabSxvjreR+Yl/39/y/6IePzdDp8f5pKTt/J8KNUANUAPUADVADVAD1AA1QA1QA9RANTXQDOP+euQnxfZR90DnOHZqNUXOe/mQpAaoAWqAGqAGqAFqgBqgBqgBaoAaaIYaOI55QLHA5aj/VrdA57h0XDOcZBwj/2dCDVAD1AA1QA1QA9QANUANUAPUADVQjxo4LlmBOY6jDnD82z/yQCfOnVOP4mUffEhSA9QANUANUAPUADVADVAD1AA1QA1QA/kaiHOO4A9djvLnIwt04tgBnED5EwgLLKgBaoAaoAaoAWqAGqAGqAFqgBqgBqJQA3HMF44yyHG2XfNAJ27QUShO2sCHJDVADVAD1AA1QA1QA9QANUANUAPUADVQugbiljk44ctRfK9poBMHWE6Q0icIRhhRA9QANUANUAPUADVADVAD1AA1QA3EoQbikEMcRZhjtlmTQCfqgHEoQtrIhyU1QA1QA9QANUANUAPUADVADVAD1AA1UHkNRD2bqHWwU3WgE1UwToLKTwLssKMGqAFqgBqgBqgBaoAaoAaoAWqAGohzDUQ1q6hlqFNxoFMKx3T83t6eHj16pK+//lo7Ozt6+PChHjx4wAMDaoAaoAaoAWqAGqAGqAFqgBqgBqgBaoAaiH0NmJzD5B0m9zD5h8lBTB5SKjOpRbBTUaAT1rBMJqN0Oq3d3V2rU8x3czCPHz/OHZD4QgABBBBAAAEEEEAAAQQQQAABBI6BgMlHTIBjcg+Tf7jzEJOPmJwkLEOpNtQ5VKAT1gjT+G+++cYKcUwq9eTJE5mG8YUAAggggAACCCCAAAIIIIAAAgg0k4DJQ0wuYvIRc5WSyUuKzdqpNNgpO9AJCnP8QY55DV8IIIAAAggggAACCCCAAAIIIIAAArJm55hgx1yaZWbshAU7lYQ6ZQU6QWGOmTZkrhMz04nMc74QQAABBBBAAAEEEEAAAQQQQAABBAoFTJBj8hOTo4RdhnXYUKdkoBMU5phrw5xpQ4XN5DcIIIAAAggggAACCCCAAAIIIIAAAn4BZ7kac0lWUN5ymFAnNNAJ2rD5nZkiZMIcs3O+EEAAAQQQQAABBBBAAAEEEEAAAQTKFzB5inMJVlj2Uk6wExjohG3QmZlj/s4XAggggAACCCCAAAIIIIAAAggggMDhBcwlWGayjMlZwjKYUqFOQaATtiFzjZdJkFgv5/AdxTsQQAABBBBAAAEEEEAAAQQQQAABt4A7ZwnLYoqFOp5AJ2wDJjkyYY5JjvhCAAEEEEAAAQQQQAABBBBAAAEEEKheYG9vz1ooOezuVyanCQt1coFOWJhjfv/o0SPrUX1T2QICCCCAAAIIIIAAAggggAACCCCAgCNgFko2uUuxXCYo1LECnWJvcqYAmTfzhQACCCCAAAIIIIAAAggggAACCCBQOwGTt5iroorN0jG5jT/U+VaxMMf8zaREZgoQXwgggAACCCCAAAIIIIAAAggggAACtRcwuYuZqVMqo3GHOkUDHWd2Tu2byhYRQAABBBBAAAEEEEAAAQQQQAABBByBcmbpuGfqFA10TDpkHnwhgAACCCCAAAIIIIAAAggggAACCBydgMlf0ul0yVk6TqhTNNDZ3d3lNuVH11dsGQEEEEAAAQQQQAABBBBAAAEEELAEzFVSJocpddmV8/fQQMcsxvPgwQNYEUAAAQQQQAABBBBAAAEEEEAAAQTqIFDuZVcm1AkNdMyCPGZBZL4QQAABBBBAAAEEEEAAAQQQQAABBI5ewLkxlTMLp9j30EDHbOTx48dH31r2gAACCCCAAAIIIIAAAggggAACCCBg3WXc5DHFghznb6GBztdff60nT57AiQACCCCAAAIIIIAAAggggAACCCBQBwGTw5g8xgltin0PDXR2dnZk1tHhCwEEEEAAAQQQQAABBBBAAAEEEEDg6AUOszByaKBjFuI5ODg4+tayBwQQQAABBBBAAAEEEEAAAQQQQAABa2aOyWOKzcxx/hYa6HCHKyoJAQQQQAABBBBAAAEEEEAAAQQQqK+AyWOc0KbYdwKd+vYLe0MAAQQQQAABBBBAAAEEEEAAAQRCBQh0Qmn4AwIIIIAAAggggAACCCCAAAIIIBBNAQKdaPYLrUIAAQQQQAABBBBAAAEEEEAAAQRCBQh0Qmn4AwIIIIAAAggggAACCCCAAAIIIBBNAQKdaPYLrUIAAQQQQAABBBBAAAEEEEAAAQRCBQh0Qmn4AwIIIIAAAggggAACCCCAAAIIIBBNAQKdaPYLrUIAAQQQQAABBBBAAAEEEEAAAQRCBQh0Qmn4AwIIIIAAAggggAACCCCAAAIIIBBNAQKdaPYLrUIAAQQQQAABBBBAAAEEEEAAAQRCBQh0Qmn4AwIIIIAAAggggAACCCCAAAIIIBBNgaoCnadPn8psgC8EEEAAAQQQQAABBBBAAAEEEEAAgfoJmDzG5DL7+/tFH98KegGBTv06ij0hgAACCCCAAAIIIIAAAggggAACjkDFgY4Jcwh0HEa+I4AAAggggAACCCCAAAIIIIAAAvUTcAKdUrN0PDN0nDCHQKd+HcWeEEAAAQQQQAABBBBAAAEEEEAAAUfAHegUC3UIdBwxvkdUIKPU+pKuLi3penI7om2sQbPSW7o+f14fnjmt90+f0fT5eV3fStdgw2wCAQQQQAABBBBAAAEEEEAgTgKHDnTcs3OiPkPnF7/4hWZnZ/WrX/3KevzmN7/RnTt3tL1d+wG/2abZttmHsz+zb9MGvuohkNLC+Em9dfKk3jq9okw9dlnTfaR159JZvZ84rekrGwqMaHZX9OGIfYzmOK3Hm3p/yby6jPfXtL012lhmS1+cO613T5/VQnK3RhtlMwgggAACCCCAAAIIIIDA8RfwBzphs3SsGTr+MCfqgc7JkydV7GHCll/+8pe6evWq0unAIXRgBZjXmveY95ptFNuH+Rtf9RCIeaCzdUHjuZBmXAsFmWNGt86eskOcU3p35oquryzpi/lLumVykJLvr0cfHH4f25cm7GM6qbdGzuve4TfBOxBAAAEEEEAAAQQQQACBphQICnSCQp1vBYU5cQ90/EHMz372M2t2TVC4Y35nZt6Y1/jfV+rnI6ms1JKmz5zRB+eXVDD2P5IdlrHRhrYp5oHO7pLefdOedTN0Rtf9+WJmXR8OZf8+cna9cAZSqfeX0X21fsnu6ow+OHNGH17ZCt10evWsTtlB1tDElejUcmiL+QMCCCCAAAIIIIAAAgggEA2BsEDHH+rEOtAZHx/X6uqq5ubmrEcikZD5XVgQ8w//8A/W7BtzGZV5mJk45ndhrzfbMtt0tm/25d7+kXT1uj0QjtKshoa2KeaBjqTM9rquLq3oznbABWO7l/SOHXy8a11iVVhVRd9f+PIj/829mRFr9s2ps6uFAVRu7xltb6zo6tK6UgGHnXsZTxBAAAEEEEAAAQQQQAABBDwCTRHomLAl7CuZTFpBjDuACQtunN+b15rwxrw37Mvs03l92Guq+X1m6XT2UpUIBTqNbVP8A52i9bA9rwk70Hl/JR7Jx62zQ2UEOkWPmj8igAACCCCAAAIIIIAAAgiECBQLdNyzdGI9Q6dYoON2MbNxZmZmNDQ0lAtjnFDG/M78zbymnK+jDnRya49EKNBpbJsIdMqpy/q9JqPrp98k0KkfOHtCAAEEEEAAAQQQQACBJhMg0AnocLNezpkzZ3KhjnketK5OwFtzvzqaQCetraV5XZw5rw/Hs7Mf3npzTO+fO6dp+/Hx/HrAHZJ2dW9pRh8mxjR2akinTo1oInFWnyxtqOC+QqklfWJt67y+2AieCZLZuKKPrddc0Np2pW3KURV5klFq9ZI+PjOh8ZFTOjU0pJGRcb1//oruFDa8vLtcpVO6deWCPkyMe7d5bl63gi51yrVuV3eunNcHE47hKY2Pn9b0/IpC7xqe3tDVmbN6d3xEI0N594tB7krr1vx5qx8/dq85kzG3Kb+gT85O5NaaGUs4/X1el5POYjsh78+1P/8kvbWihXOn9c6Y064xvXNmRlc3nG3lX2s9O5RZSmumvTPn9M4pe02gkUSuPk2dXlx1haKZDX1x3hyP+1h8+zc/WrdrP6f3x8c04qphYxnSamsj26sXrH3n9pkx7Turd+1jHxkZ07tnS/V9QHv4FQIIIIAAAggggAACCCDQYIFSgY4zS6cpZui4+8JcUuXMzjHPD/t1NIHOdj60yN0Rybl9tf19fF4pd2N3V/XxeHamRPY2197Xn5qY1z1PbmNmutivH5vx/c1sOP/3UwmziG0FbXK3L+z5blIXJ5y7OnnbbB3Hm2OaXnWnOiVm6GRSuj5zWmPOwsNBfkMJXfbg2Y1Lb+jihB2gBb5vXJ+se2OFzNYlvesEGgHvGRo7q+uuXCPrah+n+7br2/m1c4L6751LzkZKHL91KNtaOz+eC4YKtzekdy4k8wFJJWbpJb0fcLzufY2d38j3enpJ79qvf+eSuz9dL0le0Dv2gtDu7TjPh8bPa81hyL/NenbvvLOOz7rSW1f0wUjIuXDqtK6GbMO3SX5EAAEEEEAAAQQQQAABBCIhUE6gY0IdAp1DdtfRBDoZba1c0uVL8/rQCV2GJvTxvPmd/VjZyg/I0+uaHrFDglMJXVzZ0HY6o0x6W/dWZnIzKMxdk9xxRGbrgias4ONNTcx771C0u3Q6Gwi8OWGHH4dsUzmO6aQ+HnMG3kOaODuv68mUdbnb1voVfWzClSFn/84GSwQauyv6wAoFhjR+ZkZfrCa1ldrWdmpDa/OnNWaHCkNWSOVs03zPyFnc18yGml7a0q4JwDJpbSeX9EnilIZOndF1TxaRD73eGjmjLza2s4sCZ4z7BStUODVxyRu8WUFZQKBjZuiYvp05rRG7jeNn53P9fX3LSeNKHL92tZa77flJjUyc0+XVDaW2t5XaWNflsyboOaUPVlwHUpFZSmumvfPncyHM0PhZLTj1eemSriZd+ygR6GQ2ZjTuhHCnEvr4yrq2zGLlW0ldv+AK6EbO6Za7iO0udAKdt0bGNW76f2hMH1xY0lpyQ/fWV7RwZkRDtuvIuWSRxZvdNcFzBBBAAAEEEEAAAQQQQKDxAgQ6IX0QzRk6TmMzWrMXnH0rdA2djG6ds2e4FAQO2e2Y4GbcGsyOa8EzM8WEGGPZRZffTOgLZ+ZCJqmP7YBo/MKWb/BbTpuc9hf7ntEde1bFWyeH9P6VlG8/5r1ppVL+0XupQEPaXr2i6yG3Utq6MJ493pN+iw19Yh+zZ2ZJ7hAySqedUMX+ZW4B4yF9uOr7m/WStNL+5ocFOs5+cts8qeBFkYsff3rlTG5mztjZlcDbg++m7ODJ2acqNTMb2MjVStG7XBULdDIb+mTMCSTP+GY0ZRuZXj9v1/BJjZwrvJ17LtA5eVJDY+d0veCyOpfbqfO64zp2niKAAAIIIIAAAggggAACURYg0AnpndgHOrtX9K49y+b9JdeMCM/xpnU1kZ0JMzHvpDb2C1zhzakzK9YMntSliexshpHzulOQU9Qo0Nldstt9UkOnlwrX+PG03/2Da2DuvmTJ/ZJiz1POXaTe1Pue24InNW1fOjU247pUqNi2ckHZkD5cL4AKeWeJ9lcV6Li2feqs1grCpJAmlfp1qJl5Y/WBTnrVCaFMn4TVsCu4NOGj72W5QOfN07rq+5tzeKl5J8xL6GqtbJyN8x0BBBBAAAEEEEAAAQQQOCIBAp0Q2LgHOuml09nw5c0zul4kU9i6kJ2JM3RmtWAmTGb9nH0p0oimV5bsS5ZO6UPfejFZwtoEOrl2nzxMGGJa4AotDhvomMunNub1jn3pjTfcSuv6mfwC1NNLQTOGfEWUyYdA2UuuykkJSrS/mkDH9d6yQynfIRX8WNTMvLraQMdVTyVqWMnz9uVo/jBOygU6p86Fzr7JLJ3Ozc667Ms1C46bXyCAAAIIIIAAAggggAACEREg0AnpiLgHOvmBbEIfz89rIeTxyWn7sqzEFc86OlmWtGfdFbMIrZmtEzzRwTUAD7wMzKytEtKOK8ncNnPtPnnY2RIlAhF3P+9uae3KBU2bu2edKlzsePyC5/ozaXtFH7gWOB4yd22aX9G9YAhrT9alQM7aLyff1MjEWV1cSqrgip9cu0q03xXKHPaSq8zqWXudmMOGZLnGSYc1qzrQcXmMzwdeIpZrXWZF79vW/svicvVULNBZOWP7jGuBQCfHyhMEEEAAAQQQQAABBBCItgCBTkj/xD3QueWssWPPOnHuCBT6PbEUEOhImeT53ILBbxWsL+PGKxHomEF3WFtcAVCu3UNndcu9+ZLPXQFA2Ayd9Ia9+K+9LosJWsYm9P7Z87o4fza3FktBoGP2bd57bkIjuZDGbGNIE2fMLa+DG5feWtLHifyiu5b90Jjen1lR4VI+JdpfTaCTm4Eyok/KvGosd0QVm1U7Qye/dtFbIbWZa6PW9aF9F6xTZ9fzv1aZM3QIdDxm/IAAAggggAACCCCAAALxECDQCemn2Ac6uQWRT2thZVVrqyUeG0HTTVK6POHcbSobgozPbBRcmpUlLBHoyL77ketuR7k7cy3lZ+jkFnIudZlNQb+VCER2V/N3/Boa14fzq9qybldlb8gVOAUGOs7+0ltau3RO77pvf23uuJW725Tzwvz3dGpdX5w/nb3Lkh1qDY3P6I7nSqwS7a8m0FmxL787eUrTyXy7Sj6ryqzaQGdLF50FkRNXcjO4AtucWdUHdtBm7lTl/mKGjluD5wgggAACCCCAAAIIIHCcBAh0Qnoz7oFObqHXQwcjeZDtK4nsnZFGzunqJfv5m+O6GBhelAp08tst9izX7qKzgYK2UCwQyeiOK+C6GjSjptxAJ7frtLaWzuVuqT00Pu+7DXnuhfknmW2tzUzk7zZ13j1dplj7ze2mnEWbK7jLVW6NmZN690pQcJdvYv5ZtWbVBjpmwW57JpVrBle+fa5n25c0kVv/yHu5HIGOy4mnCCCAAAIIIIAAAgggcKwECHRCujPagY6UuzQpbLCbG8SbW2d7poKEHLHv17tLet+6jGVIH6yYEGA7N1vHhBdbvpebH0u2KeA9Bb9KnrMXuD2piXnv4LzgtZ5fFAtE8pfvhC4KfOhAJ7vz7UsT9oK6hXdY8jQv90Na10/bs57GLrhCoGLtrzLQyazYC1qf1FCi3DuHVWuWf//Q2cIFt/McS3rXDmPeueQNm/Lh3pguBhWcvZHdKwl7DZwRfezOyLjkKsfMEwQQQAABBBBAAAEEEDh+AgQ6IX0a9UDnznl7MeOhs1oLvIvVlj5xLlkZm9G9wNeEHLzyiyF7Zp5sXdCEdWnLm3onIGwp3aaw/bl/nw8C3ho6o+veMb77hb7nxQKR/F2nxi8EJwOZjZnQNXQyu+mQy8ykzKqzoK470Mlo1305l6+lOad6BTrK6NZZu15Ojmg68C5lvkaqOjNz17H8JVPB6zNZe0yHBzravqR37EupQhfjziT18Yg9k2fsQkHQyAwdf7/yMwIIIIAAAggggAACCBwXAQKdkJ6MeqCTn5XgzKApPJD0en62y6mJGa0V3GJpV/eWZvTJkvcapPz7RvRx0p0EZXRvJnub87fenNBl3wSactpU2MrC3+yunMnN0hkaO6ervku8dpOX9MHYmC+YMDOI7IF9wSK6rr+NnNMtz4SlXd27dCZ36ZRZuNizhs62man0psbPXtIdf7iUca0x5AoT0utnNfbmiN6/sF54V6vtpdwds8x6L3ndYoFUlTN0DPH2Fb1rLxz81tC4Plnddu1bymyv6+LpEU3k1kiqwszq0ozWcrd7n1Do7cCLBTpy1dvJNzVxftXruZvUxYRzl7LgoIpAp/D84jcIIIAAAggggAACCCBwPAQIdEL6MeqBjnbzl9GYuy2NT5zW+4kJjY+d153cMWW0deV0LhwxrxsbT+j9M2f0wemJ/CK9p1yzfFwzHkbOrhbe+cr196HEFe/tpMtqU65xRZ6kdefCuH0ZjQlpsnejejeR0MSIM4A/qVOn3ZcPudbwMR6n53XPtQcTEp1y7rI1NKJ3ThuDhMZPZS9/Gpk4pw/sGU3uQGd7KR8uOc4fnjun6bOnNWG/9y3PrJe0bp1z3dnqzey+zHs+PDOhMecuWaf8s4+OONCRtLuaX/PHBFdDp8b1TiKhd8Zd7T11TrfslKlSM4c94woU37IcTuv9iTGNnV3Jh0lFAx2zpZSunhmxL2s7qbeGRjQxkdC742P5/jx5Su9e2spv02kAl1y5JHiKAAIIIIAAAggggAACx02AQCekRyMf6Jg7aScv6J1T9qwUJ6x4c7xw5kzykqYnTrkCEuc9b2oscV5fJJ2pJ64ZEUMJXXV+7TPKz+AZ0rtXfLN7ymyTb5MBP2a0vTqj98fyAU7ulutDY/ogYPZLZsO5JMwc37gWPE1L696l0/lAxfYaOjWh6SsbVnDlrAHkDnRMwzKpFV08M+67ZXnW8NTYGS3k/JzDyGh7fV4fhpoHzZY6+kDHOpagW6lbFqc0UTALqXKzrEQm0PytkZl82FYy0DFbSuvelXN6JxegOfV7Upb/ekihEug4Bcl3BBBAAAEEEEAAAQQQOIYCVQU6mUxGZgNR/Tp58qTMI5FIHLqJcQh0rIPK7GoruarrKyu6ldzStudyIu9hZ3a3dGc9+9q19Q2l0vkLfryvrPKnQ7SpnD2ltzd0Z3VF11dWdWtju3DWkGsj5hhvrazo+uqG9/Ic5zXpbd2zDFZ1Z8N72ZHzktDv1nGta83a/rrupYpg2xvJpFP2/la0tp5UqsjaOqH7PYo/uNtl6qZYKVRjZtqeTrn6L6XKyy6j7a2kbplaWF3XvWLFfhRmbBMBBBBAAAEEEEAAAQQQiJCAyWNMLvP06dOij28FvSAugc7PfvYz3bmTvxCpHP9qAh2zL7NPJ1AqZ3+8BgEEEEAAAQQQQAABBBBAAAEEEChXoOJAx4Q5cQl0nGDF+W7Cll/84hf61a9+pbW1NW1ve67NsexKBTrmPea9ZhtmW+4Ax9mP873czuB1CCCAAAIIIIAAAggggAACCCCAQDkCTqBTapZOwQydOAc6TtDi/v6P//iP+uUvf6mrV68qnU7LH+iY35m/mdeY17rfW+p5OR3BaxBAAAEEEEAAAQQQQAABBBBAAIFyBSoKdJwwJ+ozdEwo4zzMOjrmMTIyUlYY4w5t3M+LhTdm285+nP2a73whgAACCCCAAAIIIIAAAggggAACtRRwBzrFZul4ZujEJdApBrW1taXV1VXNzMxYIUyxoCbobya4Me812zDb4gsBBBBAAAEEEEAAAQQQQAABBBCol0DTBjpBwCacOXPmTOgMHvM38xq+EEAAAQQQQAABBBBAAAEEEEAAgUYKHDrQcc/OifolV5XCmvVylpaWrHDHhDjmufkdXwgggAACCCCAAAIIIIAAAggggEAUBPyBTthlV9YlV/4w57gGOlHoGNqAAAIIIIAAAggggAACCCCAAAIIhAkEBTpBoQ6BTpggv0cAAQQQQAABBBBAAAEEEEAAAQTqLFB2oBM0O4cZOnXuLXaHAAIIIIAAAggggAACCCCAAAIISAoLdPyzdL5FoEO9IIAAAggggAACCCCAAAIIIIAAAtEQINCJRj/QCgQQQAABBBBAAAEEEEAAAQQQQKBsAQKdsql4IQIIIIAAAggggAACCCCAAAIIIBANgWKBjvuyKy65ikZ/0QoEEEAAAQQQQAABBBBAAAEEEECg6Bo6BDoUCAIIIIAAAggggAACCCCAAAIIIBBBAWboRLBTaBICCCCAAAIIIIAAAggggAACCCBQTKBUoOPM0uGSq2KK/A0BBBBAAAEEEEAAAQQQQAABBBCoowCBTh2x2RUCCCCAAAIIIIAAAggggAACCCBQC4FyAh0zS4cZOrXQZhsIIIAAAggggAACCCCAAAIIIIBADQQIdGqAyCYQQAABBBBAAAEEEEAAAQQQQACBegoQ6NRTm30hgAACCCCAAAIIIIAAAggggAACNRAg0KkBIptAAAEEEEAAAQQQQAABBBBAAAEE6ilAoFNPbfaFAAIIIIAAAggggAACCCCAAAII1ECAQKcGiGwCAQQQQAABBBBAAAEEEEAAAQQQqKcAgU49tdkXAggggAACCCCAAAIIIIAAAgggUAMBAp0aILIJBBBAAAEEEEAAAQQQQAABBBBAoJ4CBDr11GZfCCCAAAIIIIAAAggggAACCCCAQA0ECHRqgMgmEEAAAQQQQAABBBBAAAEEEEAAgXoKEOjUU5t9IYAAAggggAACCCCAAAIIIIAAAjUQINCpASKbQAABBBBAAAEEEEAAAQQQQAABBOopQKBTT232hQACCCCAAAIIIIAAAggggAACCNRAgECnBohsAgEEEEAAAQQQQAABBBBAAAEEEKinAIFOPbXZFwIIIIAAAggggAACCCCAAAIIIFADAQKdGiCyCQQQQAABBBBAAAEEEEAAAQQQQKCeAgQ69dRmXwgggAACCCCAAAIIIIAAAggggEANBKoKdJ48eSKzAb4aL/D7Kwf69dS+zv/3p/rlP/LAgBqgBqgBaoAaoAaoAWqAGqAGqAFqoHY1YMaaZsy5vnjQ+AEwLbAETB5jcplMJlP08a2gFxDoNL6Kdv8q/a/TtTtJ+cDDkhqgBqgBaoAaoAaoAWqAGqAGqAFqoFgNzE0+1dfbjR8PN3sLCHRiXD+RkeMAACAASURBVAH7T6X/+Q4fNMU+aPgb9UENUAPUADVADVAD1AA1QA1QA9RA7Wvg//vnpzJjUr4aJ0Cg0zj7qvf8u4X93OVVn7zzVObn3y+aKXD7Wv8i+7j1233lHkv7umUe9u9uL+2LBwbUADVADVAD1AA1QA1QA9QANUANNG8NOONDa6zoGi+a3zvjSjPGNGPN3/3vA5mxpxOQrf7v/arHtWygcgECncrtGv7OT/+f/Im0dvnAOtlufZEPbKwT0wltru7r9tV9/cH5fm1ff+CBATVADVAD1AA1QA1QA9QANUANUANNXwPusaJ5bgI+92QAa2xpTxpY+81BLtD57AOm6DQyGKg40DHr57CGTiO7Tpoeywc6JpxxklXrBHQFN8nlfeUeK9nnd1b2ZR5JHhhQA9QANUANUAPUADVADVAD1AA10JQ1kBsXmjGjqQHX2NGMMd1jS2e8aYIeZ4bOv/zfBDqNTAWcQKfUwsgFiyIT6DSy27L7dk4i892cVM7JZk4850S0TtDr+7pzfV93b5jHQfb77+zv1u+cv/E9a4QDDtQANUANUAPUADVADVAD1AA10CQ1kBsbZseIZuxoPVwBjzvccQc6ZizKV+MECHQaZ1/1nt2Bjj/EuWsFOAf64+/MY19/XM0+Ntb2tbF2IPP93s3s9+zvzO95YEANUAPUADVADVAD1AA1QA1QA9RAs9RAfkyYHRs640ZrDGkFPQcyY0vr6o7l/LId7rFo1QNbNlCxAIFOxXSNf6P7JDLT45zZOHftEMcEN+YEvfd789jXpnms7+tP6wf60y3z2Hc9nN/xPWuDAw7UADVADVAD1AA1QA1QA9QANXBca8A3Flw/sMaKZsxoxo7WGNKaAJCdIGDGmLlZOyv5S66YodPYXIBAp7H+Ve3dHehkZ+TsW7NxTJp876YJcLInpRXc3N7X1u19ffmHA239YV9fJs3jwH44P/M964IDDtQANUANUAPUADVADVAD1AA1cNxrID8etMaIZqx4e19/Mo9b2ckAZkxpxpZmjGlm7ViX4V0n0KlqIF/DN1cU6Djr57Aocg17ooJNuQMdc2nVxmp2mpw56UySvnX7wApwzAfxV3f29ee7B9nHHw+U+uOB/vzHfaU2DnhgQA1QA9QANUANUAPUADVADVAD1EAT1oA1JrTGhvZY8e6BNXa0wjwr4MmOLc0Y07oMbTW7rId7LFrBUJa31EjAHegUWxjZsygygU6N9KvcjPskyl5eZV9SdSs7E8cKcu7uW+GNCW7+cs9+bB7oL5sHuv8nHhhQA9QANUANUAPUADVADVAD1AA10Mw1YMaG1sMeL1r/6P/HA311156h9AcT6mTHmtnZOvnblpsxKV+NEyDQaZx91Xt2BzrmGkdrbRxzWVXSno3zx30rxLlvhzd/3TrQ9pf246sDbX91oAf2wzzngQE1QA1QA9QANUANUAPUADVADVADx78GCsaB9jjRjBmtcMuEPPeyV3WYKz3MGNO6FGs9u0areyxa9cCWDVQsQKBTMV3j3+g+ifJhTnaqnDUjZ/NA5oT865d2cPPnAz1MHejhX7KPHfu7+XnnPg8MqAFqgBqgBqgBaoAaoAaoAWqAGmiGGggaE5qx4oM/Z8eOZgxpxpJm5o4ZW2ZDnYNcqOMeizZ+ZNy8LSDQiXHfu08ia70cs1aOucTKTJUzYY47yHGFNrt/PdDX2wfa3c5+N8+/fsADA2qAGqAGqAFqgBqgBqgBaoAaoAaaogbssaAzJjRjRCfIMmGPO9ixQp17+UuwzNjTPRaN8ZA69k0n0IlxF7pPInP3KrPwsXtmjjkJTcpqTkwnxDEfTo8eSo92pG92zPcDfbMrHhhQA9QANUANUAPUADVADVAD1AA10EQ1YI0FrTGhrDGiFWSZf/i3wx1nxo57po4Zc5qxp3ssGuMhdeybTqAT4y50n0TmmkZz5ypznaM54awwx56VY2bgPHp4YAU42fDmQOmvvY+9RxIPDKgBaoAaoAaoAWqAGqAGqAFqgBo4/jXgHw9+s2v/Q78JeB5mr+QwEwOc2TpWqHMve7dkM/Z0j0VjPKSOfdMPHei473DFbcsb2//uk8hc02gutTILWJlFzB6k8rNyrDBnV1aIY304fyPtfXOgx+aRzj+epCUeGFAD1AA1QA1QA9QANUANUAPUADVwfGvAPQY0Y0IzNtwzY8RH2TGjmQTghDpmto4ZW5oxphlrmjGnGXu6x6KNHRU39979gU7Yrctzty0n0IlOwbhPoj/7ZueYNDU7Myd7OVU2ZTfhjf3BtHegJ65H5rHEAwNqgBqgBqgBaoAaoAaoAWqAGqAGjn8NuMeC1vO0rLHi3qMDK9jJhjqyxpRmbGmuAHFm6Zixp3ssGp0RcvO1hEAnxn3uPomctXPMbclNgmpOukcPstPm0l9Lj78xJ6gJcWQFOZknknk8tR+ZJwfigQE1QA1QA9QANUANUAPUADVADVADx78G8uPA7LgwG/Bkx4xm7GjGkFao8yA7trRm6XyZv+uVeywa4yF17JtOoBPjLnSfRCbQcS63Mtc5mqlxZvFjcy2kSVmzYY75YMqesOZD+mkm/9h/KvHAgBqgBqgBaoAaoAaoAWqAGqAGqIHjXwPusWA2wMsHO2bsaMaQZixpxpRmbGnGmLnLrjaYoROVGOFQgY7/civW0GlsN7oDHbMYsgl0zFQ453Ircxcrs9iVuSbSSlwfH2Rn5GQOXOGNeX6g/X3xwIAaoAaoAWqAGqAGqAFqgBqgBqiBZqgBMwa0Htnwygp4nkhPHmfHjmYMacaSZkxplvJwLrsyY04z9nSPRRs7Km7uvQcFOkHr6Fhr6BDoRKtY3CeRWZzKubuVFeiYy612stc/WuvmmDVyzOVVuTDHCXJMmHOgg33xwIAaoAaoAWqAGqAGqAFqgBqgBqiBJqgBMwa0Hq5gx4wVzZjxyWNnPR1ZY0pzO3Mn0DFjTjP2dI9FozVKbq7WEOjEuL/dJ1HuduXO3a2s9XOyq5WbKXNmYTPrOkl3oGMFOSbMOdDBgXhgQA1QA9QANUANUAPUADVADVAD1EAz1IA9FsyHOtl//DdjRjN2tC67+ia7hIcJdJy7XTkLI7vHojEeUse+6QQ6Me5C90n0l80DmQWRHzoLIj88UHrX3HouuxBydnaOPUPHmkLoBDkmzDGfWOKBATVADVAD1AA1QA1QA9QANUANUANNUANmDGg9nJk6+3agk3HW0smOJc2Y0tzC3MzQMWNNM+Y0Y0/3WDTGQ+rYN51AJ8Zd6D6J3IFOdkHk7CJWZoVys35Oxqyf45qdY11i5fqgyp3QzonN9+wHHA44UAPUADVADVAD1AA1QA1QA9TAMasB5x/0rX/bN//gb6+nY112Za+jY8aS2YWRszN0CHSiFx4Q6ESvT8pukT/Q+at9y3LnDlfmVnPZBZGd9XOyJ2p2vRw7kbU/mMre6ZG+cE9rs5NKJBL2Y1JTi6nq97i3qcXphEZHE5qaW9NO9VusaAs7a3OaSoxqNDGtxc29irbBmxA4MoHUsmYnRzU6OqnZ5Rqcd0fWUDZcqcDmnPvzNaHJuc1KN1Xm+47oM73MvfMyBBBAAAEEEAgX8PyDvr2mqgl1nrpn6FgLI5sZOtk7XZlbl5sxJzN0wl3r/RcCnXqL13B/xy/QSWmqu0UtLflHx/BylWJJTfW0ebc5uFD3UGdnYVAdruNqaevRVLLKQ+PtCNRKIDWrvvb8edfS0q6+WUKdWvFGZTtzfa2ez8LWvrkjbtrhPtP3Upva3Mw/UjsE30fcQWweAQQQQKCJBQh0jkfnE+jEuB8bF+jsaHlqVMPDw1U/RqeXlf9P9sP9x39ZXbc2qk53kGKet/Zptq7TdHY02+sdSJnQqnN0raxD4EUIHLXA5mS3Z6Bvhardkzrq+RtHfVxs3ysQ7UAnpeked6jYovbBRe8B8BMCCCCAAAII1EyAQKdmlA3dEIFOQ/mr23njAp1NTfpm0rhn1Rzqec+U8vMAjiDQSSYCA525fIpUXSeU9e49+QdSxqgrwRSdsvh40ZELpKZ6CgKdVs+5eeRNYAd1EPB/DkVrhs6Opnu9gU71MzTrgMouEEAAAQQQiKkAgU5MO87XbAIdH0icfiTQKae3Uprr71BrbpZOm7on11yzgsrZRvWv2VtLqLstP1hp7RjQXD7Jqn4HbAGBagR2FjXc5ZpF1tql4cW6TmOrpvW8t0yBaAc6e5r1XRLGLMYyO5aXIYAAAgggUIEAgU4FaBF8S9mBzpMnTxT0MBvgqzECBDrluu8ptbagudk5LSYbOEjdSWpxblZzC2tK1XWGULlOvK65BVJaW5jT7Nyi1ggbj2UpRDvQUcFMRmYxHssy5KAQQAABBCIiQKATkY6oshlhgY7JbjKZTO7xraAwx/yOQKfKHqji7Y0LdKSdVFLJZNBjTZO+dRBae6dCXptUMuUOWI7gkqsqfHkrAgggcJwEoh7oLPR7F7DvnmQVp+NUfxwLAggggEC0BAh0otUflbaGQKdSuQi8r5GBTvjhF06bL3+dBgKdcFf+ggACCFQnMNfvuqyupUXlfzZXut/DfaYvDrZ71nLqnXYH/pW2gfchgAACCCCAQJAAgU6QSvx+R6ATvz7LtbgZAp3OUWfh4D1tLk5puL9X3V2d6uzsVFdPnwZGZ7VW9L/595Scm9bU1JT9mNbscrHrScx+ppUY7Fdvd5e1n87OLnX39mt4crbyS1FSy5rOtWFKU9MLco4s16HuJztrmpsc1kBfT/54u3vUNzCq6YXNmqwBtLO5rNmpUQ309arHOdaubvX2Dyoxu+xarNrdsPzznbXZ/DGZ43FdRraTnNPksMuwq0d9gwnN1eyStz1tLs9pKjGo/l6f0eCophdLGxVrf2p5WqO5WutSd4/p/wVtuo7RkTDHmhjsy9VLd0+fBkNe67zH+73aY9nU4rRT32V+n57TmudYNrXg2ca0FoNOE08dT2vRPYFiZ02ziUH19TjnTbd6+oc1eZh63UtpeTbhOs+Nfa/6BxOuc9h7jKavy/4q0v69zQVNDferp9v+fOnuVf/wVIjDmmZHB9TX0539LMq91oNasll7qWXrPM/VcFe3euzPmuWgYiu1Retzw/RBt7o6O9VpzueBhGbtD0l/YFJeoLOn1PJs9nx2+tb6nBjW5Nyain786nCBztpwpyvQaVXf7OE8S/HwdwQQQAABBBDICxDo5C3i/IxAJ8a91wyBjplyv5daVKLXvbBxfnFh645a7T2a9I5OXb16iAGF2U+P91+IC+7Y1dqunuFSIZJr987TxQG15xZmNu3vVfA/Pu8pOT2grlbfMXre26K2zn5NBo64nR2GfTcBV0ID3e2uhaKD99XaOaDZImPltVH34KtL1k279jY1N9ztO1b39jvUN11ko2HNdn6/t6nFqUH1dHhnGhT0U0urOvqmfKGFs5Hs9+D2JzU90Blq09o5mF/M2hzrYJfafH3jtMX4FV34umbHMqe+EvXitCn/vVueq1n2Zn3baFP/gtfL+slXxz1TJvWxa9a16Hd+P6bvW9U5MFcyINxcGFVPu7tWynve2jdbfsDpb/+0af+Olif71BFm2Nat0dwC0eZY+9UZ9tpyF5O2zpMetYdtx6qpNnUNTBet4XwP7WnNfG6E9kGbuganNT3Y4QpMypihs7mg0Z7inxWtnf2arsXnr6RkosvVvpAazB80zxBAAAEEEECgCgECnSrwIvRWAp0IdcZhm9IMgU5X/7B6yxnkdQ5rOfAfc8sMdPaWvXf5CRmkOwPVtr65Ev8y7etN30AyLNDZnO4rEob4B7h2iOLbVfiPSU31lgis/MfdMaiFkH+C9wYibRqYXtBot3cNDMfL8721W5NFpyeFHEFqVv0lgxyvUXvfbGiQUND+2QUNl9H+tp4pbe4sK9FT+ljNrb8D46uaHkvjAp3O4Tkr1MrfRc7rn+/3NvVa4Ulw327OHqbuvfuoJtAx7Z8tEuDl2t/er7lUSgvDXaFhX/61fZoLOWeso99ZLO88sc9FEyIuBM2WylHuaDnRHRos5trlP7dLXHK1tzZZ3mev2W5bjybdU/RybSvz89d+/eZUtyvQadfgYm5DPEEAAQQQQACBGgsQ6NQYtEGbI9BpEHwtdtsMgY53MNKqtvYOtbcFzc5oU99s0CiqvAFFctL9L8NmwNiu7v5hJSanNJkY1UCvayZGe58OPcmknEBnb0H9vvCqtaNXg6OTmpqatC4D684FGq3qGl4uf2aCXXBro/7jbFNnd6/6+vrV19sdOEsh7NbB3kDEO8g2/dba1q6O9qC+alH7wOKh2y5taqrHt722DuuSnL5+c8lTR8Cgtl0DC4FJn4q33661wBkUberI9YM5bvPa9pDZFh0aXg4622t5LIcPdNr6pr1BV4UzdLznZ4taWtvV0REyo6NzVGuBFNPq8c0s6egd1exyUpubm1qeS6jP492ito4udXVlH92jhxj1F5yH3ro1NRv8+dKi9g7vLMFirw2/O5Pp94Ag0NRxb696e7vV6bOwzqXuROhMndRcf2AI3Nreqe4eczliSH8UC3RScwWfRS2tHeruH9RoYlSjg/3q9p3brV2JgP4t7/PXKYud6V5XoNOh4cCCcV7NdwQQQAABBBCoRoBApxq96LyXQCc6fXHoljRPoNOmbs9lTtnLHjp8/+Lc1j8XEBKUM6DY1GS3e2DXqqC7q5g1NhJ9PRoMm7JSrAcLBpIBl1wtDHgDifaBgNkxO1qbHlRP32TxNXjC2rKzoAETGrV3a2ByQQVL2qQWNNjlC03aBxU0ZA4LRNp7Rz1r5ewsB/xLf8egAnOOsHbbv99bHlZnS4vauvqUmEsWzJLaWU6o2zcgDptNFdx+c6nWpBZz65ektDgaNvvBvDahhRKvDQvEanks4Ww7WhjwXmbT0t6nWf+MjyoDndbOPu9aOamgWSjBM8q866a0KHBW0+a0el39GviacIT8XwrOw+x539Y1oKnc2lp72pwbDL3s0VxiNLWYsj9rsq8tuASrezJwZtbmdI9vhk+rOvtnPetPyb6UzzvrqVXd1jWN+UOxnu0tarDD/dllnneob8q3tk1qWVMDhbOLgtfQ2dFcv3cmX2vXoOb8U832zIw/dzjVpv6CqUnlfP7mj2lvts/lE1wv+VfzDAEEEEAAAQSqESDQqUYvOu8l0IlOXxy6JU0R6LR2ajBwIZIdTff6goeuREDIUc6AYtk3KAqbVXHoLsq/oWAgWRjo7Ez3uP51ukWtvdMFgUV+g5U/S62tBS7u62xxb3HQ9y/+ncqtTe28SAqY4dImM1siaJ7U5qT7Ugoz6OyRtfyKa3vlPd1Rcq0wyHG/N+lZ28eEV+UGUm3qSSwHtH9No53+QbM51vJeG96PtTsW9/G7n+8UzN5oV19BmmOWwalsDR0ze6S9dzJ49sjysLyha6v65vyzpfxhqgkF/K8xR7SnhQF3yFDhYL/gPGxRe9+0N1CxAPfkX0A4e6xB6zL522YuQepX4RJEyxr2hS+t3WHBbErTnrDE1PGA/JPNUtO9rgDE1GirekKvZ9zRrO8zMzDQSU6q2x2Wt/Voyh/mOEW2Oalu1yw2c/mb9/wv5/PX2ZikhX5XqN0dvl/XW3iKAAIIIIAAApUJEOhU5ha1dxHoRK1HDtGeZgh0OoKvV7GUNv2XSQUO3MsZUCSV6PIO2Dv6Sy/ieoiukgoGkoWBzt5cv3dw1tqtROhio4fa++FevOe/hKc1cJBdMMMlMFCzd10wuA8OiQ7X0JBXFwRSPQpavuUw7V/0hAktagm7fMiMSQfcsxZa1FLMJeQQcr8u81hyr3c/Cbhspj2srisOdArrONcEcwmha1aNCUSyiyjnXiHJH3J0ajTkMhtvKGhq0r2dMp+XcR46W/LOFikeQu7M+oOVAJeCvgy/HNBqw9qoNRvNuGUf/rArINTuGC46822uzxuCBwU63oWJW9Q+WOzySF8gV/AZXM7nryMuyWMUYOh6KU8RQAABBBBAoDoBAp3q/KLybgKdqPREBe1o9kDHu96C+VfxgYB/FS9vQLHmubtKdgDV1tmn0enForNZyu62cgaSO3Pq8w2AzZok3QOTmlvzXyNT9p7LeOGONtcWNDs1qdHBAfX1dvlm6LSoN+CWXIWBSPBlJlYDNifVlRuYGt8aro+xl9La4pymJ0c1PGBule5d66SlJXg2x2HaX/jaoNlgWeqC1xYJfwo6p8JjKdiOUprtc89oMTM8+sMX6z2KQKcgrGkJuJTRP/spfHac1/XoAx1vuFA80JH/csmAGWgFM8fa+hU4GSnXmf6wKxuu5P6sxezlk67zyoQvxb5KBzopTfU4AZL57g+R/Fvfk3eb/hCmvM/f3FbXspdUWgFWa5+4a3lOhicIIIAAAgjUXIBAp+akDdkggU5D2Guz02YPdPb8/ypeRaCjvTUlur3/ep37l3ETqvQnNJtbY6OC/isn0JGUmg1e4NS0pa2zV4NTAeveVNAcc6vmtblJDfZ1hSzm6x7UBc2sCLjkqqt4oOO5jKPaQGcnqYWpYfUFLoTsbXstAh3/rIVis24KBu6lAp0aHIu/BFLTfa5LV4xHhwaKrf10JIGOP6wJCnT2NNfvndEUvKDwpqY852dwSOd3KPi5zPPQet+y/9LDIpcJemaWGG//a81x+j5fSs7cMjNwfLXsvgwzNa0eV5hjPiMKZ0B5BbzhS9BtywtDpJbWVrUWe3ja0K1Jz+VZhwx0kol88Bt42Zr3ePgJAQQQQAABBCoXINCp3C5K7yTQiVJvHLItBDq+yxyqCXSM/c6yJvv8szvcA6pWmbvvLHgGLGV22iEGkmZBVv/CvrlwyQye2ro04F/0tMxmmJdtLoyq13fXIM/2PQO07PEHDRS9MybMpUX1CHRSWp7sD7wTUPgxBA/+D9P+owl0ancsnu73LSBsXDoGFnxrm3jeUcUaOv4ZGe7tlhPoSGbNJs9aO209Sqy5V2LZU3Kq1xtQlQxD3O1wPT/EeaiaBjoBl0f1+O405mqm87QgBHIvtmzWr/Gcq63qKzGlpXSgs6AB/yxBzz7cn4dBz/1B1iEDnc2p/DEVXL7lqPAdAQQQQAABBGohQKBTC8XGb4NAp/F9UHELCHRqHOhYPbGnzcUpDfd2egeQ7kFNe+/R3LbcXQk7a5pLFN4aOB9amNuWBy9A7N6M/7kZGLe7j8U8b21XV9+gEtNzWk6mtGPunOO7fXo0Ap2UZvt9d2wy7W/rVO/AqKbmFrW2uaM997/yW8caxUCntseS7+eA22J3DGrRnY/kX5x/1rAZOqYJO1oc7PSuH9VianJAg+YSwG7/bbc7KrvTnNlVwwKdvYIFiVt6pry3js/3hv3MfzlTi1q6p/J3zwoIdHobHOi0FlxGdshAZ2dWfW3ZGUFtXSG3uS9w4hcIIIAAAgggUIkAgU4latF7D4FO9Pqk7BYR6BxFoJPn39tc1PRon7oC/sX60LdNPsxAMt8Ea7CbNJdG9QTNHOrQ4GLQHYE8G8j/YO5e47ojjbkjTmf/lAqvJItmoGPu6NPmCaPa1ZtYKFzjKAaBTq2Pxenkzaken1FneTXS0EDHtH6z8K5Onr62Z4O0dqh/OmnfMtw56kN8P8x5WNMZOiZL8q1pVOpSPBWGIeYuUrkzfmdavT6jbu/1TgUwpWfo+GdVBd2ZrGCzRX5ReAzFFrovsiH+hAACCCCAAAI1FiDQqTFogzZHoNMg+FrslkDnaAOdXB/tJTXd759B0OVbKyL36uAnhxlIBm9BqcVR9fjCpbb+hfwAL+R9zq/9lxiFh1JRDHR8d9NpaVFX2O2QIh/o1P5YrD4uCOxa1Dm8XF59NDjQSc0NqMMJG9s71dXhXVentb1LvQOTWtjMxRlOWR/u+2HOwxoHOqmpHvtuVU445b88yXcoe3MFdwnzri9UuChyqc+D0oHOjmZ9d8IqfpcrX5sLfiTQKSDhFwgggAACCEREgEAnIh1RZTOKBTpPnjxRJpOxHt8yPwQ9zAb4aowAgU6dAh2re5c12OFeM6JdA8VvKOMtisMMJL3v9PyUmu7xXprSXeqyDeftOwWXfIT/a34UAx3/2h7hd0NS5AOd2h+LlNSkZ9Fgc2v1YS2Xm380MtDxrPnTqWG70Xs7KW1ubiq1U+5BOLVe5PthzsMaBzqFddkacNevfNsLZ3H5az5gXZ72foWvfb1XENYE3ba84DOmrU+zFd9k77CBzp42l+c0Oz2rhWSp6wTzVjxDAAEEEEAAgcMLEOgc3iyK7yDQiWKvlNkmAp0aBjrJaQ0MTMmzFqunH/x3f/EPrjwvLvyhrIHkjhZGBzRaZNVlc6v2VvdlFu673hTu1fWbwjvmBF/6sKO1ycJ1dhq/hs6Cb7ZCm/oXAgb61mwq/+VpUVtD5wiOJdHtrYvWLo2uBfi4KsLztIGBzuZkd37mSmtfiVt5e1p9+B/KOg/tzdY60FFK072+O1219WoqaJH11Kz6fOtYtXZPKuk7YjPrx/N50JKdlVUYhewpOd3vXXy6JeguV+Yqzzn1+WYCtvdOKVmsnPZ2QmaC+W+D3qKOwWXfUTg/pjQ34J4J2aaeySour3M2y3cEEEAAAQQQCBQg0Alkid0vCXRi12X5BhPo+MKNSu9yZW5Z3mUPtNq7NTC54At2UlpO+NYmKVj8M98vgc/KGEim5pxblmfvpjW7vOkZJO0lZ9XvmSXUIu8lGIF7zv1yedC3hkdbt4bnkrm7H+2szSkRcpevxgc6SSW63DOkWtTaOaDZXAK3o+TCpPo7fQNmK/yKWqBT22PZWxtVl3O5kh32tXb1KzE5qckij1l34NPAQMd7KWCbehKLShULD3IVXcGTMs7D3FZrHuhIQX3V0t6twekFrW2mlNpManF6WD2+MMfcdj5wvaydwsuysmtjJTS7nNTmZlJri9Ma7XMHJfnzKGiG+TecIQAAIABJREFUjjn+5KQvIGxpUVvXgCYXknJPmNrZXNZsol9dbe3qC5zGUzgrqKVrWItm8fIctP1kebggcGpp7dNsYTrlfyc/I4AAAggggEAFAgQ6FaBF8C0EOhHslHKbRKBTi0BnT8vDnfkZArnZL61qa+9QZ2eH2n2DZXOnqc7RtcIBSbGOKzWQ9Fx2kh9wmTtQdXR2qrOjreBf4lsOeSlEwe2hnWNtNXeVce3T+b3re+MDHang1uFO+0z7neeB36MW6NTyWEw4FBRiFe9PU8OePm1goLM31+9byNm03Zx/7Wr3PDrU0dml7p4+DYxOaS4X5hU78Xx/K3Ueul9+BIGOtKfkpC8cDqxZd/+1qms0fC2kzYLFwt3vLf48LNCxFqnu8wXAuXa2qq2tTW3+z4yO4Ev8gs/btoJLVgtmH1r761TYUlnuruI5AggggAACCBxegEDn8GZRfAeBThR7pcw2EejUItCRdtamNdDlXYQ1f3vwwgFRe1+Jyw+C+q/kQDKlhdHe/MKwucFT4f6ttrV2abjkvaj9DUlpLui23559taqjf0qLU15bz+Df3qx3ZkWLWrom87dU9u+64BbLHRpe87+oxM97ixoOnIHjNjJ3vlrUdL+7P6MX6Khmx7Kgfv/A2tOfbhvvc0+fNjDQKR4eeNvsPS9b1TkwG15zQeVU8jx0velIAh2z/eDLGr3H5hx3m7pHF3Oz6Fytcz3d0eJwV4lQs0Ut7b1KzI6qy1Uf4YGOpL1NzXougXLaFPK9rUeTQed0wGLd5lgL1vBaHgyYodOraWbouPqapwgggAACCNROgECndpaN3BKBTiP1q9w3gY43dGip9JIrqx92tDabUH9PR8BsgewAprWjR4NTyyUGVyGdWuZA0twqfWqwV13tYbMu2tTVN6q5ogtahLTB+vWmFoZ7AmcdtXX2aXTOvszLN7jyDP7tzdc90DH73VlUote/Ro7pH3OZ2rBm7YVUvbMCIhjo1OxYjkOgY0LVUXVXFEy1HuqyQ5V5HlolfmSBTvYEMkHycG9nyOdNmzp6BjW1XO5qxHtKzg6rpyPgc6O1Xd0DU1o2wYi5c5bLuWigYzVzT5sLkxroDjrn8p+LA4k5ha9hvKe1qcJ1udoGFrIQuf9NadYTOLepO3HImZC5bfEEAQQQQAABBEoJEOiUEorH3wl04tFPga2MZqAT2NR4/XJnU8sL5k4rU5qcnNL07JwW11KHu8Sq6iPeU2ptUXOz05qamtTU1LRmF5a1Wat/rd5JanHObDt/fFU3uY4bMGt3zE1P2e1f0NqRLbpy9Ad1nI6lEq2dhUF1OiFDa6cGpmY1PZnQ6OiohoeH84/BfvUFBQudowqaGFJJWxryHuvzZlbTU6aepzU7t1wkHCnVQnOXqIXsZ9fUlGbnFqvYlm9fO0ktL8xq2jrvTDsXtHyID6Q9+5w16zpNTc+FvNeshWU+l6Y1V3aY5WsnPyKAAAIIIIBAWQIEOmUxRf5FBDqR76LwBhLohNvwFwQQiIHAzqzrjkqt6gm85ZP3ODYnfXd2CpyZ530PPyGAAAIIIIAAAgh4BQh0vB5x/YlAJ649J4lAJ8adR9MRQEDehXC7NRl0C2+/k/9uSGYxXv9r+BkBBBBAAAEEEECgqACBTlGe2PyRQCc2XVXYUAKdQhN+gwAC8RHwrnXUroGFgptZ+w5mR4uD3rvStQ8s1PlySF+T+BEBBBBAAAEEEIihAIFODDstoMkEOgEocfkVgU5ceop2IoBAkMDeXJ93UeD2Ho3OJQMXHt9LLWtqoMv7+rYelXGVVtCu+R0CCCCAAAIIINDUAgQ6x6P7CXRi3I8EOjHuPJqOAALS3rJGg25F39ahru4e9fb1qbe3R92d7YW35W7t0uBCuXeBAhsBBBBAAAEEEEDALUCg49aI73MCnfj2HWvoxLjvaDoCCNgCm3Ma7GpTS0v2Ntilv7eqvcfcor7U5VkII4AAAggggAACCIQJEOiEycTr9wQ68eovT2uZoePh4AcEEIitwI7W5hIa7OtWR3trYbjT2qaOrh71D09qbo1ZObHtZhqOAAIIIIAAApERINCJTFdU1RACnar4GvtmAp3G+rN3BBA4IoG9HaVSm9rcTCm1w0ycI1JmswgggAACCCDQxAIEOsej8wl0YtyPBDox7jyajgACCCCAAAIIIIAAAgg0SIBAp0HwNd4tgU6NQeu5OQKdemqzLwQQQAABBBBAAAEEEEDgeAgQ6ByPfiTQiXE/EujEuPNoOgIIIIAAAggggAACCCDQIAECnQbB13i3BDo1Bq3n5gh06qnNvhBAAAEEEEAAAQQQQACB4yFAoHM8+pFAJ8b9SKAT486j6QgggAACCCCAAAIIIIBAgwQIdBoEX+PdEujUGLSemyPQqac2+0IAAQQQQAABBBBAAAEEjocAgc7x6EcCnRj3I4FOjDuPpiOAAAIIIIAAAggggAACDRIg0GkQfI13S6BTY9B6bo5Ap57a7AsBBBBAAAEEEEAAAQQQOB4CBDrHox8JdGLcjwQ6Me48mo4AAggggAACCCCAAAIINEiAQKdB8DXeLYFOjUHruTl/oLP95YEepg60+9cDPXp4oG92D/T4G+nJ3oEyjw/0NHOg/afS/tMDHexLBweS7IfnhD44ED9jQA1QA9QANUANUAPUADVADVAD1MDxrIH8OFDW2NCMEc1Y0YwZzdjRjCHNWNKMKc3Y0owxzVjTjDn/snkg91i0nmNg9uUVINDxesTqJ/dJZE4qJ9DZuZ896dK70t4jczJKmSfm5MyeoPv70v6+CXXyH07OCc33fMiFBRbUADVADVAD1AA1QA1QA9QANXAcayAX1O0fWGNDM0Y0YY4ZM5qxoxlDmrGkGVOaQMeMMQl0ohcXEOhEr0/KbpEn0Ll3oL9uHeiBPUPn6wfZGTp730iP0yZllZ5aoU5+lk4u1LGCneyMHTNrhwcG1AA1QA1QA9QANUANUAPUADVADRzjGrD/gd+MCd2zc8yY0YwdzRjSjCXNDB0ztjQzdMxY04w5/3KPGTplD9qP+IUEOkcMfJSb9wQ6m3ag8+dsemoFOjsH2ntkTkbpyWNnlo4r0DHT6uxE1roEy1yGxQMDaoAaoAaoAWqAGqAGqAFqgBqgBo51DTjjwGyY47rcyszOsQIdM0NH+mYnG+iYGToP/mwHOlxydZTD/ENtm0DnUFzRerEn0Ll3oPt/yp5k5mT7evtA3+xI6a9Nspq9BvKJWUfHM0snu56OdRJbl2GZS7F4YEANUAPUADVADVAD1AA1QA1QA9TAsa4Ba80c5x/77cutrDAnO3Y0Y0gzljRjSjO2dAIdM+Zkhk50cgECnej0xaFb4g50Uht2oPPVgR7+xbswsnPZlbU4splCZz2yiyRnr5PMn8jZRZNN0MMDA2qAGqAGqAFqgBqgBqgBaoAaoAaOYw0440BrEeQnB/YYMXtDHfflVrkFkf9yoAdfZcecZuzpHoseeiDLG2omQKBTM8r6b2hu8mnuRPrT77OrjZuFkc21jSZBfWSto5OdpWPd7cpcerVn3/XKDnasGTt2wJOxTmRzMvPAgBqgBqgBaoAaoAaoAWqAGqAGqIHjWgP5caCzCHL2ZjpP0rLubmXNzjELIj+wZ+e47nC1+ft8oDP3/tP6D4TZY06AQCdHEb8nv/2f+7lA59dTT/Xnu951dHa3zd2usgtZmesfzSrl1no6VrBjX4a1l/1uFr7igQE1QA1QA9QANUANUAPUADVADVADx78GzNUbnocJctLZMaO1do51u3LJjCmdy63Mgshf3T3Qr6fy49ClC/vxG0gfoxYT6MS4Mx89kP7fn+dn6binvfEcF2qAGqAGqAFqgBqgBqgBaoAaoAaogaOqATMWNWNSvhonQKDTOPua7Hn3r9L/eo8PqaP6kGK71BY1QA1QA9QANUANUAPUADVADVAD3howy3+YsShfjRUg0Gmsf832/vsr2alv5/+790TjgwcPaoAaoAaoAWqAGqAGqAFqgBqgBqiBamvAjDXN5Vbriwc1G8eyoeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTs4OEUAAAQQQQAABBBBAAAEEEEAAgeoECHSq8+PdCCCAAAIIIIAAAggggAACCCCAQN0FCHTqTn70O3zw5C/6lz/9QoO/+8/qufIf9Le//jf63ty3eWBADdS4Bsy5Zc6xv/9djz7+0z/LnHt8IYAAAggggAACCCCAAAL1ECDQqYdynfbxzdNdvf2Ht/Q3n/0rBu41HrgTiBEIllMD5tx75w+DMuciXwgggAACCCCAAAIIIIDAUQoQ6Bylbh23bWYG/JffthPkEORQAxGogRO//Rt9nXlQx08AdoUAAggggAACCCCAAALNJkCgcwx6fDP9B3Vd+j8ZyEdgIF/OLA5e0xyzfV65/H9p85vbx+AThkNAAAEEEEAAAQQQQACBKAoQ6ESxVw7RpszBE/3Xq39LmEOYQw1EsAb+27X/qH3tH+KM5qUIIIAAAggggAACCCCAQHkCBDrlOUX2VR9t/g8G8hEcyDMLpzlm4ZTTzzNb70X284OGIYAAAggggAACCCCAQHwFCHTi23d6vJ9W58J3CHQIdKiBCNfAf/rNv9Xe/jcx/qSh6QgggAACCCCAAAIIIBBFAQKdKPZKmW26+NVZBvIRHsiXM3uD1zTHTB5zrvKFAAIIIIAAAggggAACCNRSgECnlpp13tYbN14h0CHQoQZiUAPmXOULAQQQQAABBBBAAAEEEKilAIFOLTXrvK2Xf/PvGMzHYDDPLJzmmIVTrJ/NucoXAggggAACCCCAAAIIIFBLAQKdWmrWeVs/+OxfE+gQ6FADMagBc67yhQACCCCAAAIIIIAAAgjUUoBAp5aaddyWWRC52IwA/sasEGogWjVgzlm+EEAAAQQQQAABBBBAAIFaCRDo1EqyAdthwB6tATv9QX8Uq4EGfESwSwQQQAABBBBAAAEEEDjGAgQ6Me7cYoNH/ka4QA1EqwZi/FFD0xFAAAEEEEAAAQQQQCCCAgQ6EeyUcpvEgD1aA3b6g/4oVgPlnte8DgEEEEAAAQQQQAABBBAoR4BApxyliL6m2OCRvxEuUAPRqoGIfozQLAQQQAABBBBAAAEEEIipAIFOTDvONJsBe7QG7PQH/VGsBmL8UUPTEUAAAQQQQAABBBBAIIICBDoR7JRym1Rs8MjfCBeogWjVQLnnNa9DAAEEEEAAAQQQQAABBMoRINApRymir2HAHq0BO/1BfxSrgYh+jNAsBBBAAAEEEEAAAQQQiKkAgU5MO840u9jgkb8RLlAD0aqBGH/U0HQEEEAAAQQQQAABBBCIoACBTgQ7pdwmMWCP1oCd/qA/itVAuec1r0MAAQQQQAABBBBAAAEEyhEg0ClHKaKvKTZ4bOq/ffoMs5fmCFeidg5E9GOEZiGAAAIIIIAAAggggEBMBQh0YtpxptlRG7A2qj3PX3pBQ3entLR7R7v7Todua2v3gj5af0EvfUq40ai+Yb/52nMqk+8IIIAAAggggAACCCCAQC0ECHRqodigbTBYflY/Wj+vrVyIE9wRu4/e09BlZu1UWi8v3Xhbl+9f0PztF/Q8M38qDlKDq5PfIoAAAggggAACCCCAAAKVCRDoVOYWiXdVOkA/Hu97Vic2rumx0xP7d7S09bqGVl7UiS9e0N+t/FA/3/o8F/Y83j6hlwgjKggjntGJzTuW8uPUKwQ6VdSQU6p8RwABBBBAAAEEEEAAAQRqIUCgUwvFBm3jeAQz+UtSDnM8L62e133b/fHue3ojZAbOD66c0PzD8/rJ5cr2c5g2Hc/XPqM3UtuWNIFOdTXUoI8JdosAAggggAACCCCAAALHVIBAJ8YdezwDhDIGzfNduuhMzXk8pTcWynhPFTMrmtbZMntWP9lOW2cJgU51dRbjjxqajgACCCCAAAIIIIAAAhEUINCJYKeU26RmDRpeuvmZfanVti7f/E4FlxG5B+bP6OVrP9ZHqc91N/2ldjNf6v6ja7q89breuPJs+LbnX9DPt6Z0cevHOmEtuvyMXl55XRe3b2rr8bZ2H9/R+v239ZMv3Nv4jn60+k+af3hT9zPbup++qaWvXtdrl0LW9ynYx7P60Y2fevZx9+F5nb35fb0YGFg9q7+7/Z4ufjWlmdsv6geBr/m2Xl19WzPmNRuv6GXnNZ8+p6G7/6SPNqd0wwnP0p9Z2zLbu/jVezq72lLo8+l3dWL9besYHQfTxo/WX2z6xanLPa95HQIIIIAAAggggAACCCBQjgCBTjlKEX1NcwY638nNGFFmSq9VcwerT5/TT1J38uvwFPTzl1q6/UJwELLwYy1Zr7+mn19p0dDWzeDt7F/T5BfP6HsmnLn/ZcEerF88Pq83LrlDJvt5bh83dXblh/roYfbSp6CN3L//Y71aYPFd/fyh/eqHPw5ZQ+gZvfaVvd3023Y49W19b/6EfXxBe8v+7v7Wi541dZ6//IpmHmVn8wS9a/fhT3ViPuA4nRDpmH8PMuF3CCCAAAIIIIAAAggggEClAgQ6lcpF4H1NGeh82qWLmSz+4/s/DA5bygoGWvST+05Aktbd1E81dO05vXz5OZ248bou7jp/29aN29/3BBeWey5s2dbWIxPUpLW1/bbGbryoE9de0djWNe3aNfL44Xuasba3be3nJysvWos2n72fXWzYvGxrM+AOUrl9OMVm2vlP+snKC3r1yvf1d6s/1XwuQEnr7oa/nVUEOp8+pzdu/1STd9/TDcf70ZQm75rfmcfrGrv23fwMnYUuXXSynMef6aObL+jlhWf1g4UWnbj5dm4b91OvhMwmOv5Bj9OLfEcAAQQQQAABBBBAAAEEaiFAoFMLxQZtoykDncuva932NgFGpQYv3bxgBy5prW8EzML59DmNbduhzr6ZheMLHDxhy5e6fPv7vnDpOxrKBUaS9m9q5kaLNxj69Ps6+8g+GPfsGCeQcu8jc00frXzX+37zuvkX9JET6mQuaMiznlAVgY7Thk9f0Iwd1ISvofOsXvvKnn30+IKGAhagfn7pn3TXOtRrmvRbOvs65t8b9DHBbhFAAAEEEEAAAQQQQOCYChDoxLhjKw0zYv2+L5xgQLpx2zVD5FBhQIkgxdmWCY/2swXiv7zoe66w5W7Q7Jq5b+sHqxfsy7DSWroZ1Nb8LcG1/5l+4r8cybWP9dsB69XY7fzB6vlcOHV51b1mT50CnUs/1pLlFHacJgzLXyq3fve5ioO4ONdujD9qaDoCCCCAAAIIIIAAAghEUIBAJ4KdUm6T4jy4rbTtz3/xtrZsoBvrQSGJbyaNE864v1/5aZmzfL6rsYf29BT/DBpX2HIjJGx5/tp79q3Vt3VxJXjh45fWP7eP5nP93L+OThn7sBwXTtiBiuQNnuoT6LzoLFK9f0FD/lDK5f6jjZvWsYbP9Cmj71zbq7SGGvW+cs9rXocAAggggAACCCCAAAIIlCNAoFOOUkRf06iBaUP36w5jKpzp8fyN87m7ZM3fCA5assfomkGjzzTmvpypjLDl+SUnfAoPdEwYkv26pp/7L1UqYx9WOz99UTP2najMukLP50KPegQ6z+jEln25VeZzzeTW2HHW2sl//2jbft3DHzflOjoR/RihWQgggAACCCCAAAIIIBBTAQKdmHacaXZDg5VcaFDnWRXzP9Rl+zKo3VSXK7wovx0/yIUod/TRUrFA59t6+e41u0KuadIduJQRttQv0HlBHzkLEm+fcK3lU59A542Us4B0mScTgU6ZULwMAQQQQAABBBBAAAEEEAgXINAJt4n8X5oy0Jl7rvhCwmUETfm1bb7UzLXigc6rd7OXCUmfe2fQRCrQaewMndxtzx9f0OTqDzVU9PGK3liq8FK5Mvo2yudE5D9QaCACCCCAAAIIIIAAAgjESoBAJ1bd5W1slAevR9c292VQpQOZoHbk17Yxi/h+p8hMp2f0mjP7ZP+83vjUNQsoSoHOpR/rhl0a3tufu2fovK6XAgORZ5QLZPzrBJnXl3GXq1zoVWINnaC+aKbfec9efkIAAQQQQAABBBBAAAEEqhMg0KnOr6HvbqbBsOdYr/zUvgW29Pjh63o5MKhwhS/+v8+/ovlMtut27/8wfD0X99o0D3/sDUQiFOi85CxKrG151wQyd5ayS/TRP+lVv4P18zPKXTIVEug4l3OFLWacD8jM/t132SrSB4FtOd6vb+iHBTtHAAEEEEAAAQQQQACBYydAoBPjLvWEHE01QP6OXvvKXmBXaa1vvOBaN8YfCnxXr21+rhubL7pe86zr/Td1NmQdnZdvf55bPPnyqm8mT50DndBbfc+/qBln/ZzH7+k19yyiOdfsm/0L+ol7UWe7Xl66MZW7a5iCAh33JW7bIYsZf+paw+fR2zpR5E5XzVuz347xJw1NRwABBBBAAAEEEEAAgSgKEOhEsVfKbFMzD46/t9ClmbSTZKS1df+nGlr6rmuR5Gf00tIJffTQCX58CyBfOqHL9iwdPf5MP19yBzbP6kc3z2vLXnzZzAJ61ROUfFvfq3Ogo/0vtbTxil51hSXPX+rS2YfOFJxtLa23FFw+lruluKStrS695BzHp9/VidvZY3y8H3Jrdiv0eTY/g2f/c/38SvCaQy/emLJv0S7tPnxbb1z2vc7s7+bb+ujmcwVtbJY6LvO05mUIIIAAAggggAACCCCAQFkCBDplMUXzRc0yEA49zkuvaGbXCXWyffR4f1v303d0P+P+/Ze6vP591wyd7CyeF6+9rXUn1FFau4+u6cbDz3X3cf69jx+9pzcu+Wf91D/QyYUu+9va2v1cN3bvaDdXlmltffWK95IwZ8bWfJcu2rc0Ny9/nLmju49u6r5zydnD13Xi5oXsTKTAGTrf1g9W8mGN9u9offszLT28qa3UD12mz+pHtz/LhTpSWlu7n+ny/Qu6vH1NW45z5rzecIVSoX3rtP8Yfc91F08QQAABBBBAAAEEEEAAgRoIEOjUALFRm2imwXDosX7aotduT+lG2pmp4u6NtO4/fE8/+SJ8XZfnr7yis9t37EurXO/d/1I3tk7oR2HhQ51n6Cytf19/d/u87jrBiNPUzE3N334hfB2guW/rB0uv63JuNpPzxi91Y/MVvfzpt5VbAyck0Pne3LM6YcIae8aSs4XCS7Se0UvXXtfF3S8LPU3As/2exq65Z1EFBGXHKMDx12zOjScIIIAAAggggAACCCCAQA0ECHRqgNioTfgHjM398zN68coL+ruVV7K3zV55Ua9e8l32UyQseH7hudx737j2/fylSUXec+Te7tBo3b7V96ff0avXssf42rXn9KJzCVWpdn76rF5e6tIb5pbiN17Uqwvl2zjHaRndMLclf0WvLRXf9w8ufT/refOHGjJ9UcH+nP0el++N+pxgvwgggAACCCCAAAIIIHA8BQh0Ytyvx2Wgy3GEzFRxBzq3C9fHwS3ErVS41aC/x/ijhqYjgAACCCCAAAIIIIBABAUIdCLYKeU2iQF9vAb0h+4vAp1jtYByuec1r0MAAQQQQAABBBBAAAEEyhEg0ClHKaKvOXRA0KCZCbSzwuCJQIdAJ6KfPTQLAQQQQAABBBBAAAEEGi9AoNP4Pqi4BQQlFQYlcQm2CHQIdCr+dOCNCCCAAAIIIIAAAgggcNwFCHRi3MMEOgQ61EB8aiDGHzU0HQEEEEAAAQQQQAABBCIoQKATwU4pt0kM5uMzmK+sr57Vy1ee06tXntPL3CUq9rN1yj2veR0CCCCAAAIIIIAAAgggUI4AgU45ShF9TWUhwXEPQTg+6iKaNRDRjxGahQACCCCAAAIIIIAAAjEVINCJaceZZjNwj+bAnX6hX4JqIMYfNTQdAQQQQAABBBBAAAEEIihAoBPBTim3SUGDRn5HmEANRLMGyj2veR0CCCCAAAIIIIAAAgggUI4AgU45ShF9DQP3aA7c6Rf6JagGIvoxQrMQQAABBBBAAAEEEEAgpgIEOjHtONPsoEEjvyNMoAaiWQMx/qih6QgggAACCCCAAAIIIBBBAQKdCHZKuU36wWf/mlBnLpqDd0IV+sVdA+Zc5QsBBBBAAAEEEEAAAQQQqKUAgU4tNeu8rZd/8+8IdAh0qIEY1MCrn//7On86sDsEEEAAAQQQQAABBBA47gIEOjHu4TduvMJgPgaDefdMDZ4358wdc67yhQACCCCAAAIIIIAAAgjUUoBAp5aadd7Wxa/OEugQ6FADMagBc67yhQACCCCAAAIIIIAAAgjUUoBAp5aadd7W4/20Ohe+w4A+BgN6ZuY058wc0+8v/vr/kDlX+UIAAQQQQAABBBBAAAEEailAoFNLzQZs66PN/0GgQ6BDDUS4BswqQ2kgAAAgAElEQVQ5yhcCCCCAAAIIIIAAAgggUGsBAp1ai9Z5e/va13+79h8Z0Ed4QM/snOadnfNfr/6tzDnKFwIIIIAAAggggAACCCBQawECnVqLNmB7X2ceiDteNW9oQGAUzb7/T7/5t/r66cMGfCKwSwQQQAABBBBAAAEEEGgGAQKdY9LL209SOvHbv2GmDjN1qIEI1MB/+W27Hjz5yzH5dOEwEEAAAQQQQAABBBBAIIoCBDpR7JUK22QWXk0k/15/89m/YlAfgUE9M2eiOXPmKPvFnHvmHGQR5Ao/xHgbAggggAACCCCAAAIIlC1AoFM2VXxeaGYGfPynf9bf/65HPVf+g/721/+GgIeAhxo4ghow55Y5xwZ/95/1L3/6BbNy4vMxSUsRQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzkABBBAAAEEEEAAAQQQQAABBBBoNgECnWbrcY4XAQQQQAABBBBAAAEEEEAAAQRiL0CgE/su5AAQQAABBBBAAAEEEEAAAQQQQKDZBAh0mq3HOV4EEEAAAQQQQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzkABBBAAAEEEEAAAQQQQAABBBBoNgECnWbrcY4XAQQQQAABBBBAAAEEEEAAAQRiL0CgE/su5AAQQAABBBBAAAEEEEAAAQQQQKDZBAh0mq3HOV4EEEAAAQQQQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzkABBBAAAEEEEAAAQQQQAABBBBoNgECnWbrcY4XAQQQQAABBBBAAAEEEEAAAQRiL0CgE/su5AAQQAABBBBAAAEEEEAAAQQQQKDZBAh0mq3HOV4EEEAAAQQQQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzkABBBAAAEEEEAAAQQQQAABBBBoNgECnWbrcY4XAQQQQAABBBBAAAEEEEAAAQRiL0CgE/su5AAQQAABBBBAAAEEEEAAAQQQQKDZBAh0mq3HOV4EEEAAAQQQQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzkABBBAAAEEEEAAAQQQQAABBBBoNgECnWbrcY4XAQQQQAABBBBAAAEEEEAAAQRiL0CgE/su5AAQQAABBBBAAAEEEEAAAQQQQKDZBAh0mq3HOV4EEEAAAQQQQAABBBBAAAEEEIi9AIFO7LuQA0AAAQQQQAABBBBAAAEEEEAAgWYTINBpth7neBFAAAEEEEAAAQQQQAABBBBAIPYCBDqx70IOAAEEEEAAAQQQQAABBBBAAAEEmk2AQKfZepzjRQABBBBAAAEEEEAAAQQQQACB2AsQ6MS+CzmAKAukd7e1m4lyC0u3LZPe1nY65gdR+jB5BQIIIIAAAggggAACCCAQKwECnVh1V76x6VRSK1cuaf7CjC7MX9LS/8/e3T85kt11vucfubt3l72wsCxcKO6yBYbiscyyxcO2gXXDQgMLvYBruVAsD41ZWl5tr+CqsaDFhn7QDyJC0SECFchu2VbZsl22NTWasWpGM6WZUdvCCCjKwpYJRVT9oA7rh++Nc/KcVCqVeqqSVNnd745QnKNU6mTmK1U1o0+dh1ZH+sOXqYVAoFNJSTQSkUg8J42LEJzQJU5h0C5KIhqRSDQhxRahziUIn4O3DGS5n4xFWxvIYNG3uHdl2efuNkwFAQQQQAABBBBAAIGVCxDorJx4uQfon1Yln4pJRAUFvkc0kZVy6ylNDpbLNNbaxWlTGo2WdJaZevVOpdloSLsb9G1yII1s1NyjhJQ6Y6cUjg1Tr0GkV027n7N0tReOc+YsQiAwkF6zLLlU3AktI1GJJ7NSanQvGe5cSLuSl3TC/szEJJkpSn3iD2xfOvWiZJP2+BGJxBKSzlek1Qv6efSQDbrSKOUkFbfHikosmZFinVDco0QVAQQQQAABBBBA4CkQINB5Cm6SPcVePSdxE+JE4ynJFctSrdWkWilJPm2/2CSk2J7xhcY2+NyUNlxJSOl0eRd9YcKOVGVC0NFrSaVYkFLtNLS9p2Zeg/ryWy5KsdyQwNxqeZy09BQJdGsZienfRVFJpDKSSSfM85ikK50FQ50LaeTiTnAYjUsqk5F00oTW0aSU2r4UdtCRatbsH4lJIpWWdDopcdWTTJ1TLCPVSR/WXkPyCRuGxySRTEnKEwolCs3Q/qw+RR8PThUBBBBAAAEEEEBgTQIEOmuCvuphBqclSZovUMl8PeDL9UC6jYKkczXpXvVgz9z7L6SWVl/ilhvodMpJ/QVyYqDzFDg+C9fwFDA/W6fYrUhKhydJKXh6BPbbJXd7eYEeaRd1Ew6poYluNjqQTiXthETxggxH+w2kXUzon7t4xtcbp38qlbQTBEXTVXGbsvqDjpSTTq+ceLYibU9nxkG3IYV0WootX3hk30uJAAIIIIAAAggggEAIBQh0QnhTxk+pK5WU81flWLY+/kVl/A1TtwwuP+GEiMyYr2JwyTkpLvu+qVdqX+xKOblYoKOMZvVzahedXgLLCXRmuNpLmVTOcb5Bb13uNThHmP75usR1XvLagq53+rkFvcO77RLn7n27W1ftzPp0uTubyrKO7W930ecDaRecz3280Pb9jAzktOSELbF80/fapOOokEX9bMYk1/CHKfb3XlQydW/6ciq1ajP492CvIikdfKfEP0KwV8s4w8OSZeksyj/p9Gf9Ppz4Pl5AAAEEEEAAAQQQQODqAgQ6VzdceQtqYlpnqFVKKpfsfqP+Al3OpZwJbvUXnpgkMnmptHrBX7wumlJMpyVddIYgXLTV3D12WFdU4qm81NxvRX05rRUkbeekUMMmclU59X8/U3FQuySZdFry9Z7IoCdNNW+GfZ+ahyOdl4r3T+eu7oU0CmpoxaS/otvXM1Jy/5zfk0YxJ9lMygzHUPN8OG2odjLlttu6DC7ktF6WfCYpMTt0w5xP1XchnVpestmMJONOyBZNpPR5qTbTuarYzgn2WtOFSSHchbRrBckkY2YekoiooXTZUn3iXD+dSlbS6ZxU1UGUX9k7F4i6pyVpjnVNGF6mrc17DWI/B+miNH33c/Rc1LwkGUnEHBM1iXKmNPzSPeg1pZxNmmE5EYkmMlJsTPjs6ZO8kHa1IJnEcL6oWCIjhYWHrw2k16pI3h0S5EzwnM6VpTlhWI69bwX1GRXV860kWc89iiWzUg78jFrdoFLN+VLyzDljzqNQHekpItKTWj4t6UxZ1Kezf1qXosctEks6n4+AQKJby+nP4cjn2nMq9vVsxT/usC+dZk1qjXnmkGlLUX/uE1L0N6OO1Sk7PQljeU+vGs9J+Kvd4f7NgGuyczhFs/U5h0KdSkkPqYpLwfPjLWLDoZjkgg7kP6+pz+1nwv4+HM7fM+kzNbU5XkQAAQQQQAABBBBA4JICBDqXhFvn207NEINI0DCCOU6k1yxI0oYUsYSk0mlJuiFKTFLl9viXpV5V0ir4SZakXsk4gVI0LomE90uMGiLRlXrO+at8NJ6QRHz4BTyaqY39FX3QyOrhErF0XnJm+EMskZJUyjMHRiQuubFloXpuL6VMzZcsaAPP63X7+qkU3fkyTNCgwywTxORML4JeXXKeyVjjSRXQpDzhRFoqNqWRgTTzw2v0T0wdiRf1F3F1SvZaI8ny+DC4walUMrYdEzSlhoFHJJETnSf47m+roN4TlVy1IcWUM3wkFk9IMjEMhdQ9Ow34cjxsav5rEPs5iGSk5ukkodrynktB38uoxBJJScSGk80my6dy0S5LSgc9MUkkEsO5TiIT5ntSc6RYm2hCUpmsDuX0aluRqCQKLfGdyvDSRmp9aZfMSmMRFSL572tSCs3xlux9ixfqUs8ndNgWjSUkmYy7gVQkmpbqvOHqoCM1d86X4Xkkbfileoy4592Rku6xkpRcwQw5isYlmUpJyhNuRVPlsXtse8dE7efabdOp2Nf9vWfUkCe9GlskKpnajDSwV3V6wERz0gj6jA1aktfXlZTyHD79etY5droWfE9PS5JQP7OenyvfZfme2p/5uBS9gY79HMfyctU8p1s19yWi5vvJST6XHc75o9q3v358Z8ZTBBBAAAEEEEAAAQSWLUCgs2zRpbdn53+JSOIyM/p2K5LWYU5UUqWWJ2AZSLeeNz12AgIU+wVIByBxyZRbYhePGXSrkjFfRmOxmERiaSk17eo2AxlO3pwcW93JfllWQYhalUv1xnG/F3q/+MZyMjoCwxPYzB3omJsxaEpOG0yaQ6cj5XRSsuXGaM+Y/qmU0yY0yTV8odeF1DJOMDRpyJV7rWOBTl+aeTOpazwnNe9KPhdtKdt5QJLjX9qdEMUEUomc1E6HgcRFq2DmWYpJ3u2lNO0DOfsaZgc65lySBc+KRBfSKjrzC0WiMYlFo5JQPbbsqfbbUjJhVDTrd+1Lq2CG7WQqo728LlpS1MFRXApzXF+vnnUCmGhS8t7Vl1TPpqIJegKCGfe+qc9+NCUFz+pHg15DcqZn1nw/j8PricRSUmp6eiXpHlYFKY9M+msDHeUak1RxtLfWRato5qmJSMo3UY0NbBYNdOw8SupncuY1tQtOuJsoeUIo72ds2BMm3/JuD67bY8cm7XxRk4z+HZQVN6cNbsrZaodcRUf3H7TyzmfBExwN+j3pnHak21sggXEDq4QURubbUb12ylIeTgI07Sx5DQEEEEAAAQQQQACBpQgQ6CyFcZWNdMwQgoik/V0kZh62L42c0wtEzb1jv09739apmC+2ieLoX/zdQCcu2YCuInbuleAVZey8GBHJ+M7Z/bIcz8vI9yF7Uv2m5HRYpHoLeM94lYGOPfh4OWiZL7BjQ0hmhyHutfoDHTssRU/S7MZZw4P3m5LXoYHfwPaKiUg0VZKRHEC/e3hOSd+X/WHj3tpw/0mh1DyBjjoX36g0EfVF3PQKC+pR02+Ynhn+nhd2CE48uKeDHX44HgR5r0uP7ZOCMUwFjlPsSS1jwrr86MpG7n2LqZ5Z4/enaybDVj3mvJ9Q3xk4T917HZf8XF03hoFOumpD0tGWu5WUs5qT+pn1vHTZQEcuWlLKpiSVKc4crjdo5pweNamKJxz2nIQMA+hsfdzOu6eq298jE4OkQcOEseNz4vjbUsPj7ITJsdzoPb2oZZyegWpuHz38z+l5ZXvYqSGAKmyb+c8NjHJX7ukz81jsgAACCCCAAAIIIIDADAECnRlA1/+yHUIQkcxcf6L2nHG/Lln9pXpKj4Z+3Xzx9s054QY640Nt1BHsF6RIqjI+nEjsMuER8QcL7pdlf8jhnvZwONDo0JDrCXQmBxqzw5BJ12q/eE8bQjccZjc6FMX20ImPjCdx8dwvyGrC2tn/Zl/D5OsfhkvB52LnWolKPmiMS8es2uYbutMxk+omAidoUUGN6W3lD4J8F+v2yIhmfT29hjsOmqbXhu8cJt03+0431BgZKmVfHS07JdNTaeLnfXR/ERvoRCfP9aJWmdK9Vkbn1LKfq0V76PjPYNpz12bi8M++1E3PtXl+X9kJlv2/J9xzGDQlr3+HjV6r+7qnMjgtO0NLo+P72hAsls5JOub0DiyUq1KtlvUcRc6QMxW6zYjo1OdPB84xSZdPfb32PCdDFQEEEEAAAQQQQACBNQgQ6KwB+WqHGPZ2SfuXbZnV8GnRmX8iMjr8YPRtdgWoiIy0PyPQGdSduXAmBTrNnNP7IVkazg6ijut+IZzyBdd++RoNPNYR6AzkotOUWrkouUzKM8+QGv7i7yEwOwwJvta+O1RrYq8EnVuYnhCqZ5Dnhs0KdOyX+olDWDxtiepNMWPY2OUDHTs57YRAxw0lsp65WIZhQDJflVqtNv6oFvUX8ogvhBm5LBFxP0MTe5LoVNIM50mMDA0Mvm+eI9jhO4nSSA8Zzx6mOryeiQHV2JvmCHRsqBVRQe2wAXvvVxrouD10ApYF16cy/Exl5wigbaAz8WfBvdaUVKZ1oOm3pajnHorqoWj+vkHu5yESlWRxOFm3ozeQTtn0VJwjpFND+ZxJ6lUwlJFCpTk6VHN4S6ghgAACCCCAAAIIILBSAQKdlfIuo/FhADBfrwvPMe0XT18o4NlDfat1v9SP/JX8GgMdu7LNaFi02kCnf1qTfMozSXEqI7liWSqVvJmXZlmBzvA6Jg5zUjfIrmymeph4vp0+24HO0MYOhZlYzgh0bLgRmbY6khsWjPZOW16gM7yeqfd65AdyjkBH7MTDo7147DWvMtAROwRxYvBh59AZPbeRS/Q8sXPoTPzdpnoQ6t5ImSlz6PSkbiadjmWqAT0GRezvlOiknkX9RYZ2iehV/9KeCeLVBMl5/4plnguligACCCCAAAIIIIDACgQIdFaAuuwm7ZcetXrRaH+XGUeyE5hO/fLbk2rKmdh2ZK6Rawx03OsdmaNk+OV4/lWujI/7xT14UmQ1VCNlhqalSw3peEdduJOyLivQGc4xMhKg+W+lDeOio6vyPNuBztAmXTmVbq8nvYkPz2Tafju9erYZ6uSZBHdst0FdsjosGF2Ce3mBzoVU087P1tR7PXJicwQ67rwyo0HUWgId27NqbE4pexFtM3fR+ITodg9v2a87c9tMDN7sHESxwkhPtWEbF9IqJJ15fRJ5mTRiyh0ml6lNGCZle0KOfhaGxwmu9bstqRYzZnL5iEQSBVa5CqZiKwIIIIAAAggggMAKBAh0VoC69CY7ZulePcTC011j1oHcUGbKlys1R4WZhDjnnevEfW/wHDqrG3LVl3rWGa4V944nkZ775Th4cmhP4OMf6jE10BkeTw0PG9NdeqCj5p5xVriaNrGvO0TEN0fRsx3oeOblmWsOoMk/ACqU0fOixAsycUEsd0ji6Gd8eYHOQFpmifvotJ5CI5cxR6Bjl/KOpMU7CtPOPxTNNcY/xyJiA5/RualGDj77ibvK04RlyW3gE51z0mD7u23CnEgq8NH3MTM6l5Rzop5l6WMZqXXHfnqH12OXW1fBUOBu1n2xQMc9QK8h+cSi4Z37bioIIIAAAggggAACCFxKgEDnUmzrftNwRR7VS6cd+IVEndNAuu22Z/WZYciRKLYDv+T1GzlnOV//MuHXFej0amaZ9bhv6W3PfCRBy7dfDJeTHpuM1Z1YNS7jcwnbuV5Gezu4d3hioDM8n2S56+7urUwKBtSEvXoOjmhGaoHzgtjeAuOTSi830Jl9DeudQ0fNeWwmKvZ/Hr2w89Qv7ITgMckFri41EDt/i38YzqT75h7W9p6aOYeOSL9pfr4CJup12xup2GBh0pAldd7Osu6jQxJFenb1q1TQ/DZdqXpW9Zr4K2TkXIKeDNyV84I+9zaInD/AspNnJ6Q49ovtwg13x4es9eXUznsTTUn5dNYV2UBYTXwcsK9dlS06bWhXkMdwm3vtuWbg79rhntQQQAABBBBAAAEEEFiOAIHOchxX30q3aoKOiMTSJWn2fF9KBj1pltI6nFHLRPfNGfWbJjyIJKXgG48w6NYlq5d2Hg8Opn2RV01fuYdONCmFhm9Z5sGpVNLOPDbRVHlseJntYRBJFEdDrV5TCkmnV4+ac2Us0HFXDopIytulQRsN5/wYmRRavXbRkrI5n/FJkYe9SSZ9eZ0cDKhjmiWzM1UZXRnb2+sgJw3v8C8ZHjN4ZSlvLwzPbLnmsxBU2IBo0jVM+xzY9wafiw3KFpkUWZ3hNBv1+kB6p53Zy4XLQE7tClPxnNR9Py8XzYKZMHx8BbjJ980ILhDoyMAGNMFLzQ+6DanUTj0BwHD/eLYibd/97zXz5rxjkmvYn3LnvNyVvSIpKXs/VIOu1HMmBIpEZKyHTrcmuURUooms1LzvM5frL9TS8Qk1VE33ivG8elGXnP594g9j1W2dfAw7xFL/zHt+rfVb5h7FslIfcVBhjvO7LhJNSrE96uA5o5GqCtd0kJosSGvkLX1pmZAslmu4vztH3myfXDSlmC9Ly/d5Eul7gq6FBsbalikRQAABBBBAAAEEEFhYgEBnYbLre0O/VTJzvaiu/VFJpLOSy+cll01LUg+bikgkmpLSyBccFRCYOSYiMUlmC1Iql6WYH877oCYSHfset+oeOnrukojEklnJF0tSKuQkZcKlSCwtlbET0hOjmAmKndVl8oWC5HNpiUedL8uVvDOUaTzQGQYd6gtgVk12XC5KvuwEX+ov63bZ4nS+JJVKRUoFxyeaSBlb/xw6Im5Pm0hMUjn1vrIUC1V35aOpwUC35oZp0Xha8sWylEsFydmJmaOJsQBOffKmhyjD65xvlavZ17D+QEd9+R8GjZF4ypmculqVSqkg2aQK/MZDmMCfykHHDQgj0YRk8iUpl0tSyCadXmkRtSLSeM+1qfdN3wTTi2iOHjpq90Gn4qzMpUOQpGQL9jxSpqeWd+jUMNDRE0JH45LOFaRUKko+Y3+OI5LIN8dDrYGdv0aFLUnJ5gtSyGedn6tYRiplZxiaP9Bxl1aPRGS+1biGAUgklpJ8qSKVcl7S5uc3HnBuU48xOJWyCTijyZwUKxUpm5+/SCQmWV83tk7FhDnKM56UVCopyURCEmOPnC8IupBm3gRb6mfOLFteyCTM8Lys1AN7zA0/Xe51qPuiflfUalKvVaWUNfdGhU8z2hi2Rg0BBBBAAAEEEEAAgasJEOhczW/t7x70WlLJpXSIMboCkAprgv5yrE5xIJ16QdLxYS8W58tiQrKlpoz9sVm9ZdWBTrosrXpRMr5ziqUKUg8Kc7T0QDq13HACUh0KRSWZr+lAyg45CQp0RPX+yTiBj+vmztvRk0bBfsl35sHQAUCxId2+XQVsPNBRK4S1TK8ot83IcG6RmcHAhbqXvuOqHhSpvFRPR7oQuJ+zZQc6s65h2udg+rlctoeOudSLtlTzKRO8mHui7rcKZoo1mcDjOrmVQVcapWF4ae+TCtGK9YA5k9RPSyMrer9kOXDFJFmkh445kUGvKaWsCQ7059a5JhVollveBMAGOjHJ1wM+H9GEZMut8TDHHKd/WpGMDUbNcWKpojTU/DJm7h1/oDPsxad6/Yx0hXEZxytOj0AVplpTFb6kio3A3yczj3HRlorfJ5aUfM1/j9ScV95jTqtP+JktZ32/QyISTxuj8Qv1belLp140weLosdXP7Tw9nHwN8hQBBBBAAAEEEEAAgUsLEOhcmu6a3zi4kO5pW1qtprTap9IL/v7vO8mBXHTUe1rSPu3KhWd4g2/HlT11vyy7k/2qITTqnNpyOt9FiPR7ctpu6fd0FryIfvd04vUPLjrSbrWk1e4sZON9X7d/CdR+d3g9genaym6H2/CVr8FtacmVQU86+l63pN3pTR8OM+3Q+udFfWZUO9NXyJrWzJVf89zr4M+7DXSGQ9UGF90FP5d96Z62pNVsSbs737X2O23tsvD12Z/F9ql0Z/wOmucYg575GVz17yf387DA7x0fTt+ea6stnXl/d/na4CkCCCCAAAIIIIAAAlcRINC5ih7vXVhgPNBZuAnegMAzLDAMdEZWnXuGr5hLQwABBBBAAAEEEEAAgcsJEOhczo13XVLADXQmDWe5ZLu8DYFnQ4BA59m4j1wFAggggAACCCCAAAKrFyDQWb0xR/AIEOh4MKgiMCZAoDNGwgYEEEAAAQQQQAABBBAIFCDQCWRh46oECHRWJUu7z4YAgc6zcR+5CgQQQAABBBBAAAEEVi9AoLN6Y47gFei1pV6vS73ZufwEt972qCPwTAn0pdOs65+Rtnfxq2fqGrkYBBBAAAEEEEAAAQQQWIYAgc4yFGkDAQQQQAABBBBAAAEEEEAAAQQQWKMAgc4asTkUAggggAACCCCAAAIIIIAAAgggsAwBAp1lKNIGAggggAACCCCAAAIIIIAAAgggsEYBAp01YnMoBBBAAAEEEEAAAQQQQAABBBBAYBkCBDrLUKQNBBBAAAEEEEAAAQQQQAABBBBAYI0CBDprxOZQCCCAAAIIIIAAAggggAACCCCAwDIECHSWoUgbCCCAAAIIIIAAAggggAACCCCAwBoFCHTWiM2hEEAAAQQQQAABBBBAAAEEEEAAgWUIEOgsQ5E2EEAAAQQQQAABBBBAAAEEEEAAgTUKEOisEZtDIYAAAggggAACCCCAAAIIIIAAAssQINBZhiJtIIAAAggggAACCCCAAAIIIIAAAmsUINBZIzaHQgABBBBAAAEEEEAAAQQQQAABBJYhQKCzDEXaQAABBBBAAAEEEEAAAQQQQAABBNYoQKCzRmwOhQACCCCAAAIIIIAAAggggAACCCxDgEBnGYq0gQACCCCAAAIIIIAAAggggAACCKxRgEBnjdgcCgEEEEAAAQQQQAABBLECw0cAACAASURBVBBAAAEEEFiGAIHOMhRpAwEEEEAAAQQQQAABBBBAAAEEEFijAIHOGrE5FAIIIIAAAggggAACCCCAAAIIILAMAQKdZSjSBgIIIIAAAggggAACCCCAAAIIILBGAQKdNWJzKAQQQAABBBBAAAEEEEAAAQQQQGAZAgQ6y1CkDQQQQAABBBBAAAEEEEAAAQQQQGCNAgQ6a8TmUAgggAACCCCAAAIIIIAAAggggMAyBAh0lqFIGwgggAACCCCAAAIIIIAAAggggMAaBQh01ojNoRBAAAEEEEAAAQQQQAABBBBAAIFlCBDoLEORNhBAAAEEEEAAAQQQQAABBBBAAIE1ChDorBGbQyGAAAIIIIAAAggggAACCCCAAALLECDQWYYibSCAAAIIIIAAAggggAACCCCAAAJrFCDQWSM2h0IAAQQQQAABBBBAAAEEEEAAAQSWIUCgswxF2kAAAQQQQAABBBBAAAEEEEAAAQTWKECgs0ZsDoUAAggggAACCCCAAAIIIIAAAggsQ4BAZxmKtIEAAggggAACCCCAAAIIIIAAAgisUYBAZ43YHAoBBBBAAAEEEEAAAQQQQAABBBBYhgCBzjIUaQMBBBBAAAEEEEAAAQQQQAABBBBYowCBzhqxORQCCCCAAAIIIIAAAggggAACCCCwDAECnWUo0gYCCCCAAAIIIIAAAggggAACCCCwRgECnTVicygEEEAAAQQQQAABBBBAAAEEEEBgGQIEOstQpA0EEEAAAQQQQAABBBBAAAEEEEBgjQIEOmvE5lAIIIAAAggggAACCCCAAAIIIIDAMgQIdJahSBsIIIAAAggggAACCCCAAAIIIIDAGgUIdNaIzaEQQAABBBBAAAEEEEAAAQQQQACBZQgQ6CxDkTYQQAABBBBAAAEEEEAAAQQQQACBNQoQ6KwRm0MhgAACCCCAAAIIIIAAAggggAACyxAg0FmGIm0ggAACCCCAAAIIIIAAAggggAACaxQg0FkjNodCAAEEEEAAAQQQQAABBBBAAAEEliFAoLMMRdpAAAEEEEAAAQQQQAABBBBAAAEE1ihAoLNGbA6FAAIIIIAAAggggAACCCCAAAIILEOAQGcZirSBAAIIIIAAAggggAACCCCAAAIIrFGAQGeN2BwKAQQQQAABBBBAAAEEEEAAAQQQWIYAgc4yFGkDAQQQQAABBBBAAAEEEEAAAQQQWKMAgc4asTkUAggggAACCCCAAAIIIIAAAgggsAwBAp1lKNIGAggggAACCCCAAAIIIIAAAgggsEYBAp01YnMoBBBAAAEEEEAAAQQQQAABBBBAYBkCBDrLUKQNBBBAAAEEEEAAAQQQQAABBBBAYI0CBDprxOZQCCCAAAIIIIAAAggggAACCCCAwDIECHSWoUgbCCCAAAIIIIAAAggggAACCCCAwBoFCHTWiM2hEEAAAQQQQAABBBBAAAEEEEAAgWUIEOgsQ3HlbXTl8P6e7O7uyt7efTnsTjng+ZE82NuV3b07sv84YD/7+u6ubk+1OfWx90COzgPa8W560pXjRxm5u3tTdrY2ZWNjQzY2NmXrxi3Zu/9ITqadr7cdVX9yIpk9e60ZOXni38H3vP1I7qrrnXUdQa/v3ZVH7dH2Th7uyV7QvoHb9mTv4cloA9pXnf+e3Ht0NvrahGfdw/v6mHtB1qu4XxPOg80IIIAAAggggAACCCAwkItOS+q1ilTKFanU6tLqXMjg2mD60m01pFatSLlckWq9Kae9y5xN2Nq5NtBn6sAEOk/F7TyThzdVSOI8du4dy8Sc4+yh3NL7bcmdo4CL6+7LbdOObW96eVv2pwQy58cZ2d2xIc7wHEfa3Lwpdw/mCzeeHN6RLff8tuTO4cQrdS7u5J5su/tPOP7E17flni+POdybcS2+tjb3Dn3IbcnYe3Xrocy+6nM52DXHvJkRX74ksuT75TtZniKAAAIIIIAAAggggIARGHQbUkjFJBKJjD1iqaI0upcJUi7P22uWJBOPjp1LJBKTZL4mnf58bYetnfnOmr3mESDQmUfp2vcZDXQ2Nm/L/qSkYIFAZ2cvI/v7+zMeh9KekKl0D+/KjU0bomzJjb37sn94LCeP29J+fCyHjx7I3g0TVmztycGsnj7yRA7vbOnePTdu3tAB1tadw8nhlbov3RN5FHgNGdnbMee2syeZwH3Gew+dHfk9ZrRzNH4jHj9wzn1jY3oYpj9WTw7lzpZznjceBHSp8gQ6V71f1/4x5gQQQAABBBBAAAEEEAipwKBTlUzMBDnRuKRzBSmVSlLIpSUeNdtjGamuKdTp1XMSN8FSNJ6SbKEkpVJR8pmERO32VFlOZ2RMYWsnpLf/qT0tAp2n4tYNA50tM6RpYi+dBQKdWw/Hw4i5Oc725bYJIja2bsmD4wndeJ6cycHdPbk/c9yWGm5lwg0VWB1n5KbqDbN1R2Z10gk+567s3zKBzq19mXB2wW8d2XqJdh4/kBu6J8+m7D6afuQnR3dNj6QbEpTneHvoXOl+jVwTTxBAAAEEEEAAAQQQQMAVGHSknDShTSIvDd+QpkGvIfmEeT1Zls6MEMVt97KVXlXSJkSKZ2tjx7toFSVlXk8U2pOHg4Wtnct68L6JAgQ6E2nC9IINdDZl995d2VFhweat4F46awl0zuVgT/WkUYHJDbl3PLPrzVyY7nCrm2qo0mO5r3vYzDHsKrD1SwQxS2vHnvuGbO4dyDSd43vbjuPOffGN/nLOxtNDh0An8AaxEQEEEEAAAQQQQACBKwlc1LOm10tKyhPSmkGnLEndMyYq2fqcY50udVYDaeXjzjCrREHaE8KjXjXjnHM0LdVe0IHC1k7QObLtqgIEOlcVXMv7baCzIbf3T2T/tjOMafvu0fhwpHUEOu4xNmT7zuHUwGJ+nnMz3GpD7NAjG3aoYVfTQpHgY1xnoCNycn/HCWqm9jA6MaHVhuz4J/OxF0WgYyUoEUAAAQQQQAABBBBYgcCF1DLOPDXRbEMmRzV9aWTNfpmaXKzgTHSTg6bkzNCvdHBS4xx50JJC3Ok1lKoEjAoIWzur8nrO2yXQeSo+AMNA5+bDtpwf3XEmAt68JWOjptywZfakyJft8XH28JbpnbMzNqnwpTnPD2VPD+Hakfumq4oajqQnPN7ak8OFE53rDXTk5J7Tk2pjS+4eTZiEyB2atS33jifIEehMgGEzAggggAACCCCAAAJLEBg0JKeHL83uedO3PXmiOWlM6Dlz5TNqF8zcOakJPW/sEVQPHDOBczogYApbO/a0KZcqQKCzVM5VNTYMdG7cVxPnDldSUr10RrKOlQc6T8RdCWrrjkzKKhaVOD/cc+aS2b4nbrZh59TZ2JK9hROdaw505FjubTtz+GxPSGvOMjedYGz7rhxPyHyYQ2fRTxL7I4AAAggggAACCCCwgIA7lCop5c6M9y2y74ymJr3cq6ad4VbxwvgKuL43XdSG+7Z8r4WtHd/p8XRJAgQ6S4JcbTPDQMeGA24AsnlTMt61rhcIdGatmnRwEtB1zxMmbei5bpZx5cM5ebbvepdkP5dDM1fP1t6iw66uO9B5Isd3p82Pc+ZO2hw4dM6yenroXO5+2YYoEUAAAQQQQAABBBBAYEygmZeYnhsnK3W3101f2pW8ZNIZyVfaw2FYg7pk9b4xyfsTlLGGL7fhtJhwAp1UVdypcfodqRUykk7npNxyt4q07LlnxD+tT9jauZwG75olQKAzSygUrw8Dna27R+aMPL10vHPMLBDoOJMam5Wg9ATHo3UVroz/G877srH7aLR3kNn5/OyxPH48/mh3J3RDOT8ww63UMLHRfc4P9mRTndtcy557z/a6Ax0Rd8jYRsAKVt1HsquXfB+/Zu9VeHvoXO5+jbTGEwQQQAABBBBAAAEEEPAIDBpZJ0CJ5qRpAx13uJKaoyYuBfsHdDUvjVldKruiMVetghlGlam5QVKnnHTOUYVJ3uFe7aIZnjU+MXLY2vGQU12iAIHOEjFX11RQoCNyfmjm0tnw9NJZINDZ2rklt2/fnvjYe6iGd/n/ncg9vfrUhgQHOl3Zvz0aDNkg4uZIV6Jhuyq02dKhTcAS5Z7gY+9gZHDZsIHA2vUHOvLkSO6Ypd391z4MqgKu2Xs9nh46l7tf3saoI4AAAggggAACCCCAgFdAzYsTUUFJLC+2082gmTOrXqlAJyo5N+lpSd5MWLyqla7ceXGydTfQaRfNqle6d1BGanZG5tOSJPS2lFQ8HXfU9YWtHa859eUJEOgsz3KFLQUHOs5cOs6KV+6QJDcAWNWkyI/lwQ0T2NxSy4v7/3Xl4M6ObG9vuw8d1mxsiD/UcN6phls51xC8xPeZu6pX8Ov+49vnIQh05Im7ctfGiNVwu3vf7Gn7S/d+bshlJ7H2N8lzBBBAAAEEEEAAAQQQcARUD52o6fni5jb9tpSSZkWrZEnadukrt4dOVFbXQ8eEN56VtAadiqR1kBSVRL4xXGHL20PHhjzmxrYK4WqHz9tqBAh0VuO65FYnBToi53YlqI2b8kB1qDl/JLt6+NSqAp2uPLI9cLwTGE+84uHQsMBARw230kOPgnv12N49utzck/k76YQh0FG9qEzvo81deeROSXQsd/WEyZsys9cRgc7ETxYvIIAAAggggAACCCBwZYFWwcyh45+HZiAXvQuxo7D0cfo1yegeMTEp2O48Vz6B0QY6JTO8KlUR9+uD2mVwIb0Lmyw573F7EkWzY6tuha2d0avk2bIECHSWJbnSdiYHOiJn8vCW08NlW82l8+TQBCSrCnRETu7vmGXLb44vmz7mMD3QcYceBczhMxLm6NfnCEDc44cj0BF3fiDPuZ/cd5Y0nyegItBx7ygVBBBAAAEEEEAAAQSWLtCtSEqHNAkpzVrlyjvEaSRtWd5Z9WuZsSFgk1rvVVPOvominPp2Cls7vtPj6ZIECHSWBLnaZqYFOp7JdzdvycP2ken9sbpA58nxPdk2AUxgr5sRjGmBjme41e390QR6pI3h9c8/7CokgY4Mr1FNaK2mfG6b5co3J0wqPXLpBDojHDxBAAEEEEAAAQQQQGCpAoPhvDhpd3Ka4CNc2CXFYwVpjXTdCd7/Uls7dl6cWcuo96WRM8PCsg13vh33mGFrxz0xKssUINBZpubK2hoGGsNVrrwHG84zs3PvoZm0eHWBjohnpavtO3I4da7iKYHO+YFZ6WlDbu2Pz8bjvUIbgmxs7s457CosgY4aBbfrrNSlhqg9UT2q1PCyTdkdjsHyXuponUBn1INnCCCAAAIIIIAAAggsVWAgzZyzslQ07VkqfOwYPammnQAllmuODsUa29ds6J9KrZSXXK4glWZvvvdIW4pxNRlzRJLlKV2G+g3JmXl1MoFBVNjamYTE9qsIEOhcRW9t750V6IioXjM7qtfM5o7c0POzrDLQEenalak2NmR7d1/ao6uNe2QmBzpu0LFxa/bQrccP5IYZdrU710Q64Ql0xF2pa0fuHz6U22rOoM3bsj9PN00CHc9niSoCCCCAAAIIIIAAAssXGLQKZvnvuOSbo/PU2KP1m3l3n0J7nu45HSmbiZX1KlqRmOQawW3bY9jSXaY8lpW6b/UqZ5+BnNq5dmI5mdRs2Nqx10e5PAECneVZrrCl2YGOmktn/7Yzl44z98zsQOfmgxM5Ozub+egGhjVdObyzbebS2ZCtm3dl/6gt3s46T7qP5eD+7QnDs87l0a4535sZac/Ueyz3zXLpcw1VkssFOk/Ouz6PE3lw00zYfPOBnPi8uueBOL6rURNJm3mOth2zzalDzDxv9wQ6V7tfnjapIoAAAggggAACCCCAgEegJ/Ws00snEktLqTW6ZNRFq2RWmYpILFsfrjLlaWGs2i1LUs/N4/S2UaFOdO6ePU3Jm1460WRBGj1vgNSX02rGhEtRSVem/JW4H7J2xpDYcFUBAp2rCq7l/fMEOiJPTkwvHd2TZXagMz7pcPBKU7cndSV50paDOzec4URmTp2NzS3Z3tmRne2tke2bN+7II283HrUal1ndavY8PA6yOxmzWjHKmxwF3oPLBTqHZgn1eW029w4Dj+7f2N2/PeIx9xLknkBn3nOaeL/8J8VzBBBAAAEEEEAAAQQQcAQuWlJwe9REJZ7KSDaXlUwq7ixrrgKZZEF8Wc9kvV5V0r5AJ7bA0liD07KkojYMikkynZVcNiOpuDPsSwVE8VxdAjvweM4qbO14To3qEgQIdJaAuPom5gt0RLrDXi8bawh09IU/kfZhRu7c2hkJLGz4sLl9S+5kDuXM15FlONzqhrPc+jyI7mTMav6ZWYlOuAIdOduXWzb0mmeImfUg0LESlAgggAACCCCAAAIIrFagfyrVfMosY27DFFXGJJWvyul8I6bMOfalVUy6YVAknpHqlM40QRc26NSlmB4GSs7QrYhEYknJVdrz9RRSK56HrJ2ga2Xb5QQIdC7nxrsCBJ5023JyfCSHBwdyeHgkJ+2uXtUpYFc2IYAAAggggAACCCCAAALhFOh35bTVlEajIc3WqXQXCnK8lzSQi05Lms32FdoQGfROpdVs6PNptbty4R2B5T3cjHrY2plxurw8hwCBzhxI7IIAAggggAACCCCAAAIIIIAAAgiESYBAJ0x3g3NBAAEEEEAAAQQQQAABBBBAAAEE5hAg0JkDiV0QQAABBBBAAAEEEEAAAQQQQACBMAkQ6ITpbnAuCCCAAAIIIIAAAggggAACCCCAwBwCBDpzILELAggggAACCCCAAAIIIIAAAgggECYBAp0w3Q3OBQEEEEAAAQQQQAABBBBAAAEEEJhDgEBnDiR2QQABBBBAAAEEEEAAAQQQQAABBMIkQKATprvBuSCAAAIIIIAAAggggAACCCCAAAJzCBDozIHELggggAACCCCAAAIIIIAAAggggECYBAh0wnQ3OBcEEEAAAQQQQAABBBBAAAEEEEBgDgECnTmQ2AUBBBBAAAEEEEAAAQQQQAABBBAIkwCBTpjuBueCAAIIIIAAAggggAACCCCAAAIIzCFAoDMHErsggAACCCCAAAIIIIAAAggggAACYRIg0AnT3eBcEEAAAQQQQAABBBBAAAEEEEAAgTkECHTmQGIXBBBAAAEEEEAAAQQQQAABBBBAIEwCBDphuhucCwIIIIAAAggggAACCCCAAAIIIDCHAIHOHEjsggACCCCAAAIIIIAAAggggAACCIRJgEAnTHeDc0EAAQQQQAABBBBAAAEEEEAAAQTmECDQmQOJXRBAAAEEEEAAAQQQQAABBBBAAIEwCRDohOlucC4IIIAAAggggAACCCCAAAIIIIDAHAIEOnMgsQsCCCCAAAIIIIAAAggggAACCCAQJgECnTDdDc4FAQQQQAABBBBAAAEEEEAAAQQQmEOAQGcOJHZBAAEEEEAAAQQQQAABBBBAAAEEwiRAoBOmu8G5IIAAAggggAACCCCAAAIIIIAAAnMIEOjMgcQuCCCAAAIIIIAAAggggAACCCCAQJgECHTCdDc4FwQQQAABBBBAAAEEEEAAAQQQQGAOAQKdOZDYBQEEEEAAAQQQQAABBBBAAAEEEAiTAIFOmO4G54IAAggggAACCCCAAAIIIIAAAgjMIUCgMwcSuyCAAAIIIIAAAggggAACCCCAAAJhEiDQCdPd4FwQQAABBBBAAAEEEEAAAQQQQACBOQQIdOZAYhcEEEAAAQQQQAABBBBAAAEEEEAgTAIEOmG6G5wLAggggAACCCCAAAIIIIAAAgggMIcAgc4cSOyCAAIIIIAAAggggAACCCCAAAIIhEmAQCdMd4NzQQABBBBAAAEEEEAAAQQQQAABBOYQINCZA4ldEEAAAQQQQAABBBBAAAEEEEAAgTAJEOiE6W5wLggggAACCCCAAAIIIIAAAggggMAcAgQ6cyCxCwIIIIAAAggggAACCCCAAAIIIBAmAQKdMN0NzgUBBBBAAAEEEEAAAQQQQAABBBCYQ4BAZw4kdkEAAQQQQAABBBBAAAEEEEAAAQTCJECgE6a7wbkggAACCCCAAAIIIIAAAggggAACcwgQ6MyBdJ27/Pk/+aTk/+mn9OPP/6mp/5/O87wuPym6VHXz2PfW/1lFb983Zf6ffUp03ZT7/7wiets/r4iqu4+vNHVvqerm8Rf/4gVd/4t/URFdt+VXquej2/7i/3rB2aZKz+Mvv8o895R/+VVH8hfq+Ve9IH/51U5dlbr+1c62v/yXznOnfEF06dlWUHXzKHzNi7pe+Bqz7WuORG/T5ZEUvvZF+UtV/9oXzcPU/5V57i1V3Tze93VVXX/f170ow7pvm3ntff+6Kno/VXoe7/9689xT6m3q+ddX5f3f8JIpTf0bnPL9//dL8n5Vt+U3vGTqTvlIbTePR9/4sq4/+kaz7RtfEr1Nly/Jo296Wd6v6t/0snmY+oZ5rsuX5JEqPY/iNzvPixsvS/GbP61fc7d9s9mmy5el+P98WtRrulR18/jAvzF1T6m3qef/5tPygW+pmdLUv8UpP/Bva/IBVbflt9SG9X9bkw+q7ebxwU2nbssPbNZE1035wW89Fr3tW2ui6h+05bep+rF8UJc1p1R18/jQ25z6h77tWD70tlf0dnfb28w2XR7Lh779FVGv6VLVv92pl75D1V+RD+nyWJd6m3r+Ha9IaWtYlrZelQ9tOdtK3/mqfk1tU/vo0m77zlflQNXN4+C7nLotS9/1qui6KQ++uy5623e/Kqp+YMvvMXVvqerf86ocfE9dPqzrTvnh71Xb6+Itdf17nW0f/r7X9Gu6VPXvq4uqf2Rb1V+TD+uyrku9TT3ffk0+8vZh+ZG3vy4ffruzTdXVa8PyNfnI95tt3/+6lFX9+51t5X+n6q+LU74mH/l3pm7K8g+87mz7gdel/AMnop7r8t+burf01D+q6v/+RFT50R1T95R6247z2kd/sKH3+egPnoium/JjP9QQvc1T6m0/dCIf/aGGfOyHG275sR9+Y6SuXlPbnLIhH/sRU/+RN+Tjqv4jzraP/wdVf0OcsiEf+w+mbsqP33jD2XbjDfn4jTdFPdePd5i6LdV2W3/Hm3Ko6u94Q5eHP6rqb4q31PUfdbYd/thb+rXDH3tTdN2Un/jxt0Rv85R624+/KYc//pZ84j++5Zaj9aZ+TW1zH+802975lnxS1d/5lnzinU355E2nbstP3DTbTPnJn2iK3vYTTfnkTzwW9Vw/ftLUbam22/pPPpZPqfpPNnX5qf+k6o/FKZu61PX/5Gz71E95yp/6jOjnP/VYKj9t6m5ptv30Y/nUT39GKrc+45Te+i21/bP6NfW6+/gZs+1nPiMvqPrPfEYqP/NZeeFnnbotKz9rtv3sZ/RrL/zcZ0Vv+7nPiqq7j//cGtbtdrvtP39WjlTdlEc/r+otccrP6lLXf97ZdvQLnvIX/kr0819oyYu3Td0tzbbbLTm6/VfO67edbS/+l79ytv2XvxJVH3v8otn2i38l1V/8nLyonqv6Lzl1W774S59ztv2S81r1lz8netsvf05Uffj4a0/dbH+X2fauz8lLqm7Kl3ZN3VPqbbufE1W+9F//eliqunm8/Cttp/4rZtuv/LUMt7Wdutn28v/blpd+pS2qDHz8qtn+q2359K/+jbysnqv6rzl1W778a2abKT+99zeit+39jaj61Mev/63z+q//jdRU3ZS1/2bqnlJv+29/I6qs/cbfDktVN4/j3/w7p/6bZttv/q14t+m62Xb8W38nNVX/rb/zPcy23zbbf/vv5JXfPpVj9VzV7zh1Wx7fMdtM+crvnIre9junoupqP13a597y3ea1d5/Kq+/+e3lFl6fy6u+auluabb/rlK/+97+XV1VdlZ5H/ffOnOe/N9zu3abrv/f3osr63TN5VdXvOvVhabZFzPbImbym6ubx2ns+r+uvvcdse8+Z6G26PJPX/sfnpW62qbraT5eq7n9Ezbbo5+X1aEde0+Xn5fX/aepuabb9T6d8/V5HXld1VXoeJ/9r9Ll6zd32v1T9H+R1XXbkJGbqsX/Q9ZOYs80tf99s//1/kIaqm0fjD76g640/MNv+4B9Eb9Olt/4Fafx/XxC1ny5V3X2YbXGzLf4FeUPVTfnG/S869ftm2/0viN6myy/IG3/4RXlD1VXpebzpqdvtb77X7PPeL8qb7+3KG7r8oryZMPVEV9ffTDjb3PKP1PYvypt/1JW3VN083vpjp27LN/+4K7puyrf++Etit7314Ev6NV2quvvoOvWk2Zb8kjRV3ZTNP/lHp/4nZtuffEn0Nl1+SZr/+x+lqer/+x+v86v0c3FsAp2Q3+b8PzFhjgl2ZgY8gWGOCXtGwpxP6fAmKMwJ2qaDHBX4qMDmK50wR9fdYMdsu1KYMwx4bLCjQh9vsGODnokBz1XCHBPszAp43jch2NFhjw14Fg1z/nVVvAHP+7/ehDkjwc4cAc9ImGMCnpEw5yUnwDEhzkiYE7Ttm8YDHhXi2HDHH+Y8mhnmOMHOaJgzvs0GO6ocBjtzBDyXCXNUuKPCG1NODnicYEeFOMNgZzTM+eCiYc6EgMcGO6rUYc48AY8b5LyiAx0b4thyLMz5LhPmqHBHhTmmnBXw2DDHH+Ko5wcmxLHlMMxxQhw3zPEEO+42T8Bjgx1V6jBnWsDjCXN0sDMrzDEBjw553GDnRIc+wQHP6zq80WGPCXFUmKMDHU+Io7eZEKc8KcxR23+wIR9TIc+UgMcJdhqiSm+wY4MevU2HOt4wx9RnhTkm4NEhjxvsvKlDn3kCHhvm+EMcFfB83IQ4ttSBjdpmwxxT/4QKeey2gIDHCXbeElV6wxwb9OhtNtDxhDkqxJkY5vgCHh3yeIId9Xw04DHhzoQwZxjiDMOcT5oQx5ZumKO2qzBHl4+l4gY7wQGPDnt+2gl43GBHBzyP3RDnUzbQ8YQ5KsQZC3OCtrnBjhPw6CBHbZsV8HjCHH+IXDgrMgAAIABJREFUowKeF0yI88LPD4MdFe7ohwpzVF2FOW6wExzw6LAnIMw5UttMoHNkgx1PmPOiN8wx9UkBjw55PMGOej4x4AkIc6rvcoIaHeyYMKeqAhxVt2GOee4Pdl7+rybMmRDw6GBnJMz5ax3kvKS2mVDnJRvueMKcl71hjqlPCnh0yOMJdtTziQHPr5uwxxPm6EDHE+Ko5582IY59TQc6KtTxBTvHv2HCnAkBz6Qwxxvq1H5rPMzRQY4KfTzBzism3BkNeMaDHRvmjAQ8NtAJCHN0oOOGOE6o49+mwh4d5qhSBztOWfcEOzbkqdtgx4Q4OsAxIY4Nc2ypA567vjBHhT0jwY4JcyYEPMNgxwQ1JszRAY8b5piAZyTM+bwOc3SgY0Mc9bp5eAMeVddhjip1sOOUJ55gx4Y884Q5OuAxYc7rbqijwhwT8AQEO5MCnmGwMxrmnIyEOpPDHB3o2DDHBDwj20ywo8Oc+yaoMcHOaJjjhD3zhDk64DFhzhtuqGPCHBX26GDHhDg21JkQ8EwKc3TA88CEOLbUYU7XDXFUmOMEOibMMc/1Nk/Ao4IdHeaokkBn5WkDgc7Kia92ABvg2FL11qGnDj11VO8dt4cOPXWcHjv01HF65qheO/TUoacOPXUmhzv01HF679BTR/fYmRjk/DI9dcZ67dBTx+nFQ08deurQU8cNeebpqXO1b8O8e5YAgc4soWt+XfXQ0WEOPXXoqeMOwaKnjjMka8pQLHrqSOk76akzHG4VMOyKnjpOrxw1LIueOnp4lh5uRU8deupMGopFT52xYVe2N463pKfO6FAseurQU4eeOl+65m/Tz/7hCXRCfo9toGN76NiSnjpH8hfMqaPn2aGnDnPquHPpMKeOM7+OZxgWc+owp87EYVj01KGnjppnxzP0ijl12tPn0rFz7dBTh546amgWc+owp447BMvMn6Pm1wmYUyfkX7ef+tMj0An5LVQBju2hY0sd5jCnzkiPHebUeUneFzRpMnPqLD5BMnPqMKeOmiBZza8TOGkyc+oETZDMnDp2Lh1bNsXOpWNL5tQx8+nouXSYU8fOrcOcOsypY+fSsSVz6ph5d6ZMkMycOmpyZDNp8lMwp07Iv24/9adHoBPyW/jn/8dooGN76NiSnjr01AkMckbCHTM58ki4w+pXaiUsVr8yK2Kx+pWzEpY7STKrX+lhWKx+xepXdpUrVdqVroK2eSZMVqtg6ZWu3HI4SbKdOJnVr1j9yp0secLkyHpFLFa/0itd6aBnZMJkVr9i9auna/WrkH/dfupPj0An5LfQ9spRpbfO6lesfjVc1pw5dZhTxyxpHrS8OatfOcuas/qVZwnzgKXMWf3KmUdHLXVuhmIxp85wFaypy5t7whxWv2L1KxXUMKfOcB4dVr8aXd6c1a/MUubP2epXIf+6/dSfHoFOyG+h7aFjS+bUORK1dLldvtyWE4dcqXl27FLmX32k6wX7/F8eSeFrXnS2fY3zml6yXG1TS5jPuYx54WtfFL1k+YTlzN/3dVX9+rB8UXR90aXN1RLmX1+V95nHMNCp6rl06KnjTJKset584Fs+7Qy1UiWrX7H6FXPq6OXPP6ZCG1a/YvWrn/mMqCXOX/hZZ5lzW+rQRm1j9StWv5q2jLmdR8dbMqcOc+owp47opctZ/Spw9auQf91+6k+PQCfkt1D3zAkadsXqV06o81X01BkGO/TUoacOPXU+8vbX5CNvf10+rMvXnN45qq5CHf1w6mqOnOEqWK/JR8xwK7tNv+4OwWJOnY+rlbBumBWxpgzFOnzHm/Lxd7whzKlj59KxJXPq2OFWL/z8cAiWGpqlH7/AnDrMqfO3ooZZHf8Gc+rYuXRsyZw6zKnzxh9+Ud547xflzfeaoVaqnhgOu9KvhXhOnZB/3X7qT49AJ+S3UPXM0b1yTKhDTx3TO+er6KlDT51PixPg1Ew5ZRlzeupIacvMl6NWwvrOV+XAHYr1qhx8l/OaLUvMqcOcOiPLmc8OclTYox8q0FH1d7wpNtxR5eGPqqBntNTbftTZdvhjb+l9Dn/sTdF1U37ix98Svc1T6m0//qYc/vhb8on/+JZbjtab+jW1zX2802x751vySVV/51vyiXc29VArVf+kGm6ltrH6FatfsfqVvPxrbfk0PXV0z5vjO3/n9MD5nVN55c7psK6eex/vNs/ffSqvvvvvRQ+3UvXfNXW3NNt+1yl1cKPq//3vRx713ztznv/ecLt3m66buXXqd8/kVVW/68yvMyzNtojZHjmT11TdPF57z+d1/bX3mG3vORO9TZdn8tr/+LzUzTZVV/vpUtX9j6jZxupXrH7lW/0q5F+3n/rTI9AJ+S2cGebQU4eeOmYoFj11bLBjyykBjxqWZR52YmRbfkCtcrVZE1t+8FuPR+rqNXfbtzr1D9ry245F17/tWD6k6ubxobe9ousfepvZ9rZj0dt0eSwf+vZXRL2mS1X/dqde+g5Vtw/ftu94RfTrplSBzYdUfesVt9Tb9FLmToijlzd3gxwT7MwKc76rLjbgOfjuug5/VKm3fferzopYtvyeuhyo+vfU5cOqbh4f/l6nbsuD762Lrpvyw9/3mn7+4e+ri66b8iNqu3/bttr2mnx4+zX5iKqbUvfKUfW3m2301NHDrD76ww13uNXHVP1H1GOOpcyZU4c5dX7WDMsy5QtqMmRV/zk1LMsp9TY7SbKdNJk5deSld/21VN/1Oam+izl1mFNnNMxhTh3m1Hnj/hed4Vn3vyCq/oYtVS8cVf/DL8qbqu4+zLb3mm1PYU+dkH/dfupPj0Dnqb+FXAACCCCAAAIIIIAAAggggAACCDxvAgQ6z9sd53oRQAABBBBAAAEEEEAAAQQQQOCpFyDQeepvIReAAAIIIIAAAggggAACCCCAAALPmwCBzvN2x7leBBBAAAEEEEAAAQQQQAABBBB46gUIdJ76W8gFIIAAAggggAACCCCAAAIIIIDA8yZAoPO83XGuFwEEEEAAAQQQQAABBBBAAAEEnnoBAp2n/hZyAQgggAACCCCAAAIIIIAAAggg8LwJEOg8b3ec60UAAQQQQAABBBBAAAEEEEAAgadegEDnqb+FXAACCCCAAAIIIIAAAggggAACCDxvAgQ6z9sd53oRQAABBBBAAAEEEEAAAQQQQOCpFyDQeepvIReAAAIIIIAAAggggAACCCCAAALPmwCBztNyx88O5N7eruzujj729vZk784duXvvgTzcP5Tjs/M5r+hcHh9k5O7tG7K9tSkbGxuyubUtN3bvSubwsczbisgTaR8+lHu7t2THtLOxsSlb2zdl9+4DeXR8Jk+mnNHJwz3Z812T/xqHz/dk7+HJaGvtR3I3wGX4nlGvke17d+VRe7S5K5/P+ZE82NuT3d09uffobLTxCc+6h/e1wd7eAznyw+v2plyD3y6ojQnHZTMCCCCAAAIIIIAAAghMFhj0e9Lr9eRiMHmf1b/Sl26rIbVqRcrlilTrTTntXeaEwtbO6uWehyMQ6Dwtd/nxA7mxsaGDFxW+TH5sys7uAznqTrmw8xPJ3N6e0saGbN/OyIk/XPA3ef5YHu7uyOaM89m+Pfl8DvecMGny9Yxe6+be4ehZnNyT7anHH33/6HG25Z4vH7ry+UhbMjfNMW89lNmRzrkc7BqDmxnx5Usi3X25vdD13Zb9afd+VI9nCCCAAAIIIIAAAggg4BXo9+S0WZVSLiWxSEQikYhkan3vHmur95olycSj+hzUeQwfMUnma9KZ87TC1s7aAJ+DAxHoPC032Q10NuXW/QM5PDw0jwM5eLQvDx/clb2bnpBm5+54bw91rU8eS+aWDVFU+HNf9g9P5PHjx3J8uC/3bw8Dms1bGXk8sXtNVw72tkwotCk39h7II9VOuy3txydydPBQ7u/dkC0TRmzuPpKgnOHsaF/2972PjOztmEBkZ08yI6/ty/6RLyLpnsgj/z76+Yx29D6P5MR3Ulc+HxF5/OCGcZkjXHlyKHe2nOu98eDx+KfRE+js7GV8Vl43Wz+U9sR7Nt48WxBAAAEEEEAAAQQQQEBk0CpKKh6T6Ehwcn2BTq+ek7g5l2g8JdlCSUqlouQzCfcco6mynM7orBO2dvisLVeAQGe5nqtrzQ10tuTO0aTDPJH2/q7bY2Xnvq/7yUjYsCm3HpwEDK06l5MHt9xeNzeDQgZ1+JN7sqPDmk1R+0zKEM4f78udW3sy5+gjEenK/i3bw2U/MASadPWj26+xHfdebcruI19iNHqS8uTorgm9bkggtSfQufXQF2b52uIpAggggAACCCCAAAIIXE5g0CxIIpEYPjzhztp76PSqko46YVI8W5OOL7S5UOGTeT1RaIvv5SFA2NoZnhm1JQkQ6CwJcuXNuCHBtEBHnUVX9m+bHjg792Uk0jk/kD3TG2Tz9v6U4UBnwza29uQgYOhV2+2FsiuPAl6/vMc1BjGBJ32Z83ks900vo829g4DQbHig43umV5X/XtldCHSsBCUCCCCAAAIIIIAAAusTuKhJ5lqGXA2klY87w6sSBWlPSGt61YzTUyealmoviCVs7QSdI9uuKkCgc1XBdb1/7kBH5PH9HWfIz+aeHHi6znT3b5ueN9ty98jzQsA1qJ4jztw0m3I7oJfJiT3GBoFOAJ+4Plt35HAi9Ykb/Oz4J/OxjRLoWAlKBBBAAAEEEEAAAQTWJ3Bdgc6gKbmY0zsnHZzUOAaDlhTizn6pSsCogLC1s74791wdiUDnabndiwQ6tvfM5q4n0DmXAzsB8dZdmZHniDw5kru2N09AL5PzR7smHFJDrk4mDrlanPcyPWKCjnLN7bhD0rYmh2fuPd2We8dB16A6XA0nRWbI1QQjNiOAAAIIIIAAAgggsGyB6wp02gUzd05qQs8be6GqB07M6cmTrsmF3WzLsLVjz4tyqQIEOkvlXGFj7pf/WUOuPMHN9j0Z5gTDYUAbu4+mDgNyruJcHt22kxPfl7Hpes+P5K6dvHhjU27ceShHZxO7oiwAc81BzNiZXvZ8juXetuO3PSGtOcvcdHpSbd+V40l0BDpjd4QNCCCAAAIIIIAAAgisXOCaAp1eNe2ENPHC+Aq4vou+qA33bfleC1s7vtPj6ZIECHSWBLnyZuYNdNoZubXpBAlbd46GPWeeHMqe2b59dxjzTDvv47tmfpfN4GFDT04ycsv04nGWA9+SG2rVrOOz4XGnHSDwtcsGKP7GrrudJ+L6Bc6Pc+ZO/rx913Ofxi5j2ENn1ipXB/4lu/xt8RwBBBBAAAEEEEAAAQTmE7imQOe0mHACnVRV3Klx+h2pFTKSTuek3HK3irTyZmn1jNR9S5iHrZ350NlrUQECnUXFrmv/WYHOkzM5Obgvt02vkI3NW/Kw7TnZ80eya5YQv3F/rL+NZ8dh1Z2LZ8o8OU/OjiTjWZ7cCXY2ZOvGntx/dCLdST1Phofx1a47iPGdzhVW3RrOQxSwglX3kezqgE31uJqC5OmhY20nlfMGdf4r5DkCCCCAAAIIIIAAAgj4BK4p0GkVzDCqTE1sRtMpJ52QR03SHM1Jw06U3C6a4VnjEyOHrR2fLk+XJECgsyTIlTfjBjobsrm9Izs7nsf2lrvMuP6yv3VT7h/5JsbyBjqB62OPX8FjOxfPlEDHvuu8fSQP796WHdMLyIYOm9u35cHRIsttPzuBjpqH6I7pwXQz403XRM4P9px7NnXS5NE5dLZ2bsnt27cnPvYezhfU2XtGiQACCCCAAAIIIIAAAhMErivQsfPiZOtuoNMumlWv9KpbGanZCXNOS5LQ21JS8XTcUVfkzq8TknYmKLP5igIEOlcEXNvbPYGODUv8pfrCv3f/kQSOvPEOuZowp4v/WtwltTf3pqzU5HvX+WM5zNyRW9tm6XTdK2hbdh/NG+o8Q4GOPJHDO1vOPDm3HnqWiR9u39o7nD6fkaeHDpMi+z5rPEUAAQQQQAABBBBAYFUC1xXoFEx4kxlOdDzoVCStV76KSiLfGE6A7O2hY0Me49EKWTuruk3Pe7sEOk/LJ8ANdDZl9+GJPH782H2022fSPZ91ISdyz0xivLl7MD1E0E2dy8GuCWV27snJrObHXu/K8cO9YY+dzduyP1em8ywFOiLnh3uypUKtzV0Zrv5+LHf10LhN2TuYceMIdMY+WWxAAAEEEEAAAQQQQGDlAtcU6HRKZnhVqiIjYy4GF9K7sIOwnKsfNHMS1cOwssNhWAYmbO2s/H49pwcg0Hlabrwb6Mxa5WrSBalVqxYJaLwB0DyrYgUft/to1wk0NjZkvh4mz1agI+cHsqeHXXnCm5P7sqNDnj2ZleewbHnw54qtCCCAAAIIIIAAAgisVOCaAp1+LePMlxPLi3/lKv/19qopZ99EUU59L4atHd/p8XRJAgQ6S4JceTNXDnRE2naZ7I0dmTkv8mMTOmxsyM3MXF1rggmeHCy4utYzFujIcBn5LbOalb0Pm/MsH08PneDPFVsRQAABBBBAAAEEEFilwDUFOtKx8+IkpdyZdoF9aeSiOtCJZhvufDvuO8LWjntiVJYpQKCzTM1VtrWEQEfOHsots9LV9p2jKcOuzuXojlmyfOOWPAzKc56cy/mUxZlcCs/cPTv35xm49awFOiLnj3adCZC378nxkzN5eEstK78pu8MxWC7XWIVAZ4yEDQgggAACCCCAAAIIrFxgGYFO/1RqpbzkcgWpNHtiF6eafu5tKcYjOqhJTkt0+g3JmXl1Mu4syd6Ww9aO99yoL0uAQGdZkqtuZxmBjpwPJ+nd2JF7R8Hzt5wf3XOGBG1syPadoEl7u3JwZ0e2bt6Tg/b0VKf7yMwhszFjeW7X79kLdMRdonxH7h8+lNtqJTA1p9DIoFgXYLRCoDPqwTMEEEAAAQQQQAABBNYhcOVApyPlpNODJqJXoopJrjE6B86ky3CXKY9lpe5bvcp5z0BO7Vw7sZxMajZs7Uy6XrZfXoBA5/J2633nUgIdETl7ZOZ0UaHCTbn76MQz2VZXTh7dlRt26fHtPc9EvsPLfXI8DHw2Nnfk9r19OXzclZFo58mZHD+847a1eSsjowt3O+09Oe/K2dmZ53EiD26qHiwbsnHzgZyMvKYmfx45yvCkxmqXC4ZWcz5dd/6i7W2n59Pm7X2P+9jJDzd4Ap2bD048Tl6z0Xp3XqLhUaghgAACCCCAAAIIIPD8CgwGMvA/elVJ6yAmIplqb/z1WVrdsiTN+51AJyLRXHO+Xjr9puRNL51osiCNnrdvT19OqxmJ67ajkq5M+Stx2NqZZcbrCwsQ6CxMdk1vWFagIyLnJxm5pSfqNcHJ5pbs7OzIlg1yVJiydUsyJ8E9eJTA2eF9uaVXajJt6El+t2R7Z0d2dracIUZmeNfmjbtyGDRsS0QO97zLm3vaMu/1L82+uXc45w24XKCzqvPp7t8eMZlvgmgR76TIfotJz2/P1fVnTkZ2QwABBBBAAAEEEEDgGRdoFWLO5MK+AMYGMUFlOnCYkwfKEwjZ98cKs6Y5Hr5/cFqWVNQZehWJxCSZzkoum5FUfNjrJ56rS2AHnmEzErZ2PKdGdQkCBDpLQFxLE0sMdPT5nh1JZu+mbHtDHB3KbMvNOxk5mhDAjFzreVsOM3fk1s6EUGbrpuw9OJBpo7JWFaCIhCvQkbN9d/6ijUnzEo3gmieeHjqTAhz/dgKdIEi2IYAAAggggAACCCAQLLCSQEf60iomnWXFVVAUz0h1SmeaoDMbdOpSTMeHbdjAKZaUXKUtF0FvCtgWtnYCTpFNlxQg0Lkk3DPztvMzeXx8JIeHh3J0/FjOJnfKmXrJ52eP5fjoUA4ODuTw6FhO2r4hWFPfzYsIIIAAAggggAACCCCAwLMmMJCLTkuazbZ055s+JxBg0DuVVrMhjUZDWu2uXHhHYAW+I3hj2NoJPku2LiJAoLOIFvsigAACCCCAAAIIIIAAAggggAACIRAg0AnBTeAUEEAAAQQQQAABBBBAAAEEEEAAgUUECHQW0WJfBBBAAAEEEEAAAQQQQAABBBBAIAQCBDohuAmcAgIIIIAAAggggAACCCCAAAIIILCIAIHOIlrsiwACCCCAAAIIIIAAAggggAACCIRAgEAnBDeBU0AAAQQQQAABBBBAAAEEEEAAAQQWESDQWUSLfRFAAAEEEEAAAQQQQAABBBBAAIEQCBDohOAmcAoIIIAAAggggAACCCCAAAIIIIDAIgIEOotosS8CCCCAAAIIIIAAAggggAACCCAQAgECnRDcBE4BAQQQQAABBBBAAAEEEEAAAQQQWESAQGcRLfZFAAEEEEAAAQQQQAABBBBAAAEEQiBAoBOCm8ApIIAAAggggAACCCCAAAIIIIAAAosIEOgsosW+CCCAAAIIIIAAAggggAACCCCAQAgECHRCcBM4BQQQQAABBBBAAAEEEEAAAQQQQGARAQKdRbTYFwEEEEAAAQQQQAABBBBAAAEEEAiBAIFOCG4Cp4AAAggggAACCCCAAAIIIIAAAggsIkCgs4gW+yKAAAIIIIAAAggggAACCCCAAAIhECDQCcFN4BQQQAABBBBAAAEEEEAAAQQQQACBRQQIdBbRYl8EEEAAAQQQQAABBBBAAAEEEEAgBAIEOiG4CZwCAggggAACCCCAAAIIIIAAAgggsIgAgc4iWuyLAAIIIIAAAggggAACCCCAAAIIhECAQCcEN4FTQAABBBBAAAEEEEAAAQQQQAABBBYRINBZRIt9EUAAAQQQQAABBBBAAAEEEEAAgRAIEOiE4CZwCggggAACCCCAAAIIIIAAAggggMAiAgQ6i2ixLwIIIIAAAggggAACCCCAAAIIIBACAQKdENwETgEBBBBAAAEEEEAAAQQQQAABBBBYRIBAZxEt9kUAAQQQQAABBBBAAAEEEEAAAQRCIECgE4KbwCkggAACCCCAAAIIIIAAAggggAACiwgQ6Cyixb4IIIAAAggggAACCCCAAAIIIIBACAQIdEJwEzgFBBBAAAEEEEAAAQQQQAABBBBAYBEBAp1FtNgXAQQQQAABBBBAAAEEEEAAAQQQCIEAgU4IbgKngAACCCCAAAIIIIAAAggggAACCCwiQKCziBb7IoAAAggggAACCCCAAAIIIIAAAiEQINAJwU3gFBBAAAEEEEAAAQQQQAABBBBAAIFFBAh0FtFiXwQQQAABBBBAAAEEEEAAAQQQQCAEAgQ6IbgJnAICCCCAAAIIIIAAAggggAACCCCwiACBziJa7IsAAggggAACCCCAAAIIIIAAAgiEQIBAJwQ3gVNAAAEEEEAAAQQQQAABBBBAAAEEFhEg0FlEi30RQAABBBBAAAEEEEAAAQQQQACBEAgQ6ITgJnAKCCCAAAIIIIAAAggggAACCCCAwCICBDqLaLEvAggggAACCCCAAAIIIIAAAgggEAIBAp0Q3AROAQEEEEAAAQQQQAABBBBAAAEEEFhEgEBnES32RQABBBBAAAEEEEAAAQQQQAABBEIgQKATgpvAKSCAAAIIIIAAAggggAACCCCAAAKLCBDoLKLFvggggAACCCCAAAIIIIAAAggggEAIBKYFOoPBQOzjK7785S9L0EM1wD8EEEAAAQQQQAABBBBAAAEEEEAAgfUJEOisz5ojIYAAAggggAACCCCAAAIIIIAAAksRINBZCiONIIAAAggggAACCCCAAAIIIIAAAusTINBZnzVHQgABBBBAAAEEEEAAAQQQQAABBJYiQKCzFEYaQQABBBBAAAEEEEAAAQQQQAABBNYnQKCzPmuOhAACCCCAAAIIIIAAAggggAACCCxFgEBnKYw0ggACCCCAAAIIIIAAAggggAACCKxPgEBnfdYcCQEEEEAAAQQQQAABBBBAAAEEEFiKAIHOUhhpBAEEEEAAAQQQQAABBBBAAAEEEFifAIHO+qw5EgIIIIAAAggggAACCCCAAAIIILAUAQKdpTDSCAIIIIAAAggggAACCCCAAAIIILA+AQKd9VlzJAQQQAABBBBAAAEEEEAAAQQQQGApAgQ6S2GkEQQQQAABBBBAAAEEEEAAAQQQQGB9AgQ667PmSAgggAACCCCAAAIIIIAAAggggMBSBOYOdAaDgXz5y18ee6gG+IcAAggggAACCCCAAAIIIIAAAgggsD6BSYGOym+8j68g0FnfTeFICCCAAAIIIIAAAggggAACCCCAwDQBAp1pOryGAAIIIIAAAggggAACCCCAAAIIhFCAQCeEN4VTQgABBBBAAAEEEEAAAQQQQAABBKYJEOhM0+E1BBBAAAEEEEAAAQQQQAABBBBAIIQCBDohvCmcEgIIIIAAAggggAACCCCAAAIIIDBNgEBnmg6vIYAAAggggAACCCCAAAIIIIAAAiEUINAJ4U3hlBBAAAEEEEAAAQQQQAABBBBAAIFpAgQ603R4DQEEEEAAAQQQQAABBBBAAAEEEAihAIFOCG8Kp4QAAggggAACCCCAAAIIIIAAAghME1go0BkMBvLlL3955KEa4B8CCCCAAAIIIIAAAggggAACCCCAwPoEggIdldv4H19hNxDorO/mcCQEEEAAAQQQQAABBBBAAAEEEEAgSIBAJ0iFbQgggAACCCCAAAIIIIAAAggggECIBQh0QnxzODUEEEAAAQQQQAABBBBAAAEEEEAgSGDhQMc/j45qgH8IIIAAAggggAACCCCAAAIIIIAAAusT8Ac6dqocf+nOoUOgs76bw5EQQAABBBBAAAEEEEAAAQQQQACBIAECnSAVtiGAAAIIIIAAAggggAACCCCAAAIhFiDQCfHN4dQQQAABBBBAAAEEEEAAAQQQQACBIAECnSAVtiGAAAIIIIAAAggggAACCCCAAAIhFiDQCfHN4dQQQAABBBBAAAEEEEAAAQQQQACBIAECnSAVtiGAAAIIIIAAAggggAACCCCAAAIhFrhUoONd6Uo1wD8EEEAAAQQQQAD3jT6hAAAgAElEQVQBBBBAAAEEEEAAgfUJeAMd/1Ll3ucjy5YT6KzvBnEkBBBAAAEEEEAAAQQQQAABBBBAwC9AoOMX4TkCCCCAAAIIIIAAAggggAACCFxZIBKJyKTH4eHhpdtX753Urtr+vPwj0Hle7jTXicAzJtDpdOTP/uzPrnRV9Xpd3ve+912pDd6MwHUJqJ+Bg4MD+dM//VP5/d//ff0/NapUz9V29Tr/EEAAAQQQQACB6xSYFroQ6Fz9zhDoXN2QFhBAYM0C6ouq/QJ72UBGhTn2PzCXbWPNl/0MHm4gvXZDGo2mnF48g5e3okvq9Xo6zLSfX1WmUinJZDK69G5Xoafan38IIIAAAgggEB6BfqcljUZDWp1+eE5qRWdi/79E/X+KCnC8j3a7femjqvd621J1dQx7vEs3/JS98dKBjp1HRzXAPwSk35POaVvapx3pXgzmBxlcSK9zKu32qXR6fVngnfMfY9E9L3stix6H/S8l4A1z7C/sy/zHQH0Btu9XJaHOpW7HFd90IdW06oYbk0Lrik1d9u0XLSllkpLMFKX5FIRKzWbTDTPV/7So50H/1Hb7PzUq/LzMz0hQu2xDAAEEEEAAgSkCg7505/hu0ykn9f+HJsvPfm9a+//bKnBZ9T91DHu8VR8rLO3bQMc7AXJQfWxSZAKd1d9C9cX1pZdeWv2Bph1h0JR8LCKRaEZqAV92Br2mlLIJiY6MjYxKPJWTcrM3MaBR7yvnUhKPjo6pjMZTkq+0pDch2WkXE/qHNJqqyMRff72qpFW7qYp0p12b77WLdlXy6aBryUqp0Z14Lb5m5nw6kH6vK91QhFhhOpfpfEFhjuppc5l//X5/rDfD6kOdvjSyMec/NPGCtCZ8ztX12M+6/Y+SLqNRicWTksrmpVw/lfEfya5UUupnKiHF08uorPs91x/oDBpZ9/dXtr6Mv5L1pdftSm+RYHtOdhXSqM9BLBaTeT/3aj+1v3ofoc6c0OyGAAIIIIDAogKDjtSL6YDvNmnJV9tj/89GoLMo8Hz7E+gMJCjMUdsIdOb7DC11L/U/3+p/wv/oj/7o+v5HfNCUnA5d0uOBTq8u2bgTyMSSWckXS1IqFiSXSUpMBzxRSZVPx4KQfrskKRvkxFOSzRelVCpKPpuSuAmGYumytAO+W7WLcefLcCQqmdqEYQS9qqRUO8nynIHOQDrVjDnniMSSmeG1pOLmy15UksXW2C/jy9/wUykmIhJJlOT6v3eH6Vwmiy4zzLFHWXuo029INhqRaDQqkUhcClMSHftZV5/HXC4nuVxWstmMpJL2MxmRWLoipyOhUFfKSQIde3/nKgcdqRVykitU5TTgd85cbXh36lX0759YfrldjtSwKdXTRoUz6mdhkX9qf/U+9X6GXy0ix74IIIAAAgjMITDoSCXt/PFE/XE6W3C+2xRyaUmY7zyxTG3kewmBzhyul9iFQIdA5xIfm9W9xQY6+i/zkYie6HLt/zM+MdDpSzPn/OJKFFri/x406NalkM5IpTPybVPkoi5Z1eMnEpVEvi5d38vqffmE+rIbkVi2PhagOF9yoxJVvxzjeWn6D6xux4KBTr9VkIQOkuKSrXbGrqXXLJoAKirp6iJ9fqZ9NlpOz6dQBDphOpdgs1WEOfZI6wx1+nXVGyQmuUpJf+mP51tjgac9LxvopCrjn7lBtyGFpPNzkix5v9wT6Fi/aytPS/r3ybIDHRXqqd+Lk3rm2N47k4Zgqfep9191IvFrc+XACCCAAAIIhFTgopbRfwCOpsq+P7SJSP9Uarm05Hx/iCbQWc3NJNAh0FnNJ+uSrfoDHRvsfOITnxD1JXQt/yYGOm0p6N45KQn4vjnh1AbSLjg9bKLp6khKPfKGrhkyFdCDwfmSm5RCKat71CRL4z2AFgt0OqZHQ0QC2zIndtFwjheJZaU+Ps5l5PTnejJoSFaFSIsEOoO+XPQHEwOAuY4btNNlziWonRVtW2WYY095PaFOX+rZqESi6jPUkZLqSRPLTxx2NS3QUec9aBedHm2Jkmf44dUCnUH/Qn/GrMt85UD6F/PPfzV6jAupZVTAO98cOqPvnePs1M/MAuc2R4sy8xxaBf27aZmBjvoZUL//1Zw4k/5Vq1W9jyon/bNz6qj2+IcAAggggAACyxAYSCPn/JEtU5v/+1lwoLPo/1Nd5v9znGMEX7l67UL6vj+4B+87/1b1/x/qMemPUvO3NHtPdQx7vNl7Pxt7XGkOHTUWi0mRV/dBmBToqP+xV13nX3vttdUd3LY8JdAp6kAnIcX2nD/1qi3dOyc+4z0DsV9mo9nGSI8ZZ3tciq1TJ4iJpmWs08wiPXTaRad3Tiwnjam/g+0XZdVLxzvU61TK6YQkkrnAoKdbzUoykZSse5IdqWbTnmEz/3975++rsJHE8fvTkSgoKChccBIFJ1FQUFBQcBJNJAoKCgoKIlEQiYKCgiQkITnfZU5je8za2GDA5vHe+1hC/r27/nhsdr+enW1Is9WSlv68YexxFJ4XejiFsX3O3WxqDU96k7W4pQhvVzVlMVP4iPkrxBy7rspFndMi7G7VnQeeZ6EtN2SQ0+3KnoEsD52gzKe5dFUUrPdlFT+CZqf3xNA5ymY2kE4zrIzo+6Xe6shwfumtdrZLkSAOVs+6V2o5WtKd5HVL9GW/HEvPi+IHBXl0ZbTYyryXJegcZTnwpNUey8Y/yXY+lG7kuReWryuj3LhWvhzWUxnE3SW1bE1p9yeyzgzOtZaRp8/fSJKdpMLt3nAlvoRlcBk12gNZuC6Gh4UMOm3xjGO9GT7XrZZ4/XnG82qWd3s+Ho8DsSbP+8ZS0P+Ma5N58ejXKyYIQAACEIAABMog4MsqEnRy62wZ2Zigo+f4x7VM+sXqVP5+JdNh91zf0LpgoxW0DS6/Obt1maOsx924C1iiPXPayXx03qc9KZrtgcw2lylmXAqbPpiA6jF5cXPc7ZkxdBB0qr171wQdbdTo71//+le18XVyBR1fVoOor6g3lKXbsMnDshmGHgXNkVxvdgQRYcNjUx4McSN45ctp1Q+/hKe7Zt0h6NjLtN7XRtv1aT8NR0Wq9xaOyGSeSh1J6DxRUpb+OYL9VqbdtnhxHJSGtDxPPP21R3HDPDyvLq12O7xGjeszHAZxhsL4RDW57OpWTVmuU6lubxExR73VNMZU1tDM7r60R0LevixRRxvTZUxhd6taLAj60fOgnhxZtndT0IlitdQaQ0eIuFfQOciiH3nNtTR21EQmYxNP6tKe7hJlM7vsjkbSbtQk6CfeH0i/a8HEG9K7cGHzZTftBHYcdLXsDmQ4HEo/CEAedZ+88NCxYMktabe1fHVpdfrBeb2OiZtN6S3SsqYbD6sRBI8ejYbSC9LQCk/nshuoeajVerJ0b4Rt9wZB4Hf1IvK6fRkMetKOYofVPCcG1n4u/bZ3rmA1WuFz7XnSHiyeEnTMsybPDtVuf/rpp+Cny3mT7tP/DU2PCQIQgAAEIACBcghYl6taoy3jdbpukp1H3EboDaRTsE6l3tlhmIhGUC8ajEYyHJgQo7FLUx64VpfpTGQx8qQeCDXqLdOTqQXxPK6jbvx1aWpdazyRyagnXjAojnfjI3z2tbH1tQQQdF7L+67cigg6Juzo6DzXKvJ3ZewenCvoaKyac1Bk/QLeGUxltctvTOjLLihv1xVE3MycZfVmCESrpFASNnLr0g9aXgeZddSrIBVctrCg4yrqt1++/noQNko9t4vLvSJKdI27iXh6fTldruwlX6t7Mkh5SpyDSnuSCJ8i1ZTFuSsvWywi5lhMEHsGVNy0Kb3vn//8p+0K3D3tHJ27+/QgfY7SQ5o/7yJ6lEVXbbV9Fv78KHZRYxALeXEhA00zFFryvvaowKijy9W7bqyp+wSd/awTpNHszZPxrPydTNraPawrbpfv2C5rDWmPXW8cX2xfPfJAsmvxt+PQ1mueDFPjgh83k6ASc9nlygQdjZXVk9nW/ULkiDapkcLivOptmSQiHPuyNVGpNZKEU6FVdvIEneA57cvcjQd2XDrehnal4fw47wTvuTK7XGlAY7XJvEmfl6IjWWk66uHJBAEIQAACEIBASQT8ncyioMjhx6uhzNbXR8i1epPWgYrWqUTzGU1llfI4joWe1IdwsTpOXT+geTJK1cNEjtHoqw3ppD7iyX4uXRV1vInTtb8kXiRTKgEEnVJxlpvYPYKONky1kq6eB6VO1wQdzeiwlklPFd/QYyhw0evoS+xyyHLzcCnW0IkauzVPXLHZvBZ60ad0fxc1FvVlY1/XCws6pyh+R026RYYrjoKd1ppDx8OoGhHFXvLJgLd2Z31ZR7GIWmOT13VfNWWxXF85N48EE17UoyM9uUHP7Dg75tF9dr4GH7c0bf7nn3/a7vvnx4V0NZB3e+rEjjIvt4YMzn2m4rTN1pOCji/H/UYW4240Ily6y+Mdgo4f2UsQ0yfONl7wl6EHXGd+FlPMLpvDDK+ifTi6U/L5OMnSgqePNglvnzAjE27SMXTc7fZgx0UTkSgGUc3tsnYeEt6bXAaSFrEh3etJLyKr7OQKOklROSzFWQx2+ei+KgQdtcFbXjVm87e6Xdmz5dJkGQIQgAAEIACBJwn4e1k6XZr0v7ve6spofjlkueZ0f53qWvksJmhLEk2DuI5Tk2R9MkrLPjAn6qeWj4XASLbFbC/z9yGAoPM+9+KiJPcKOvri0IZvqSNh3RJ0olKfdiuZDjrStOHIazVpdqeJYYBN0KkPbndvElmFo0DlCToLa+SdInGjLh1zJTjMpKMC081hy0+yCAKyFhV0IjfHVwo6rprlWIi/1NGSalJLeEN8HUFHvWQ0rpCJKTpPe8mkvXDcBm96n+vdcG2fIta81WvnWt7OrSi0aK6456534WkmmjQyngkTdNxyJJYbbRkt055ldwg629Cek10IncuJBBoVb2yyykf6OoL9p0UU08fxODIvpFram8xSdIUb26Zz296UYU7/TAuwHos3vr0z8gO1H2ah90y9vzyLS3FlJ6fLVVroiYq5HYX2ma4gVSHoqPdN2pPMpaXLRQUdfRY0PSYIQAACEIAABCogcNzKctIPuqZbva3eSnn6OoJO4TpVVlFPB9ltVrKcT6TXCmMSDs7VNok9dGpdyfp2bW2zRDwdJ5/TQntX1MU+pDu7WHwjAgg6b3Qz0kW5R9DRxuytL7Pp9AutFxR04rSOW1mMzHsgOfT4Ke5yNXdi0MRnJhescVjryOzsIBAHS+7Fgo62/aKh0C2w8TEKFntT0Dl/ZU83ypKFCdf81SAUURKuh9WIKFcbzloci0eUuMZqypLF4hXb3G4k9oeYFnW0EavCjw7pnBYy3X3pGDp5+6oQc1ScmAfdreri9ccymUzOP3tWGm5g45CuCToNryu9Xi/49fsDGY4mMltuJOVtG92S4oJOLArWG9JsNjN+UYwsJ77UVbu0bpJukObjPBRX6ymxJDYgE27yPHTyBZ39xAtEt+YoUnxMyHXzj/OJFqzbpPsl6lFBZ/w6QaeIV00RQUftW58lV/xMI2IdAhCAAAQgAIESCPgH2cyHUddy7UI+lLUTmeLuOpUVyT/IejYK4gPGPSQaTWkEH9XrSa/vvDpOlNZ6GNX1Gln1wKY0GzZ6l9MYs3IwfxsCCDpvcysuC1JE0NEvrelG7mVKT2y5V9CJsjqth1HQLudreeQREHTJMAebnKJZwNhk9w3JFnRExOKJaBck3xqWCbEjOyNTptOjaWUdbS9ePfZc/GpEFMsrU7XXwn0DQUcvs4iok3WvHtlWjZgT9MEJu1vF3RKte6I7b0g/1e3KBJ0iYuP5eu8QdBa90Aup1ZPhaCSjnN94ee6+dNUu7blzBZVj5C1X7ycDDscFflzQsbK4gk5bGdcdD6E4n2ghGlI80fUtr7KTtz1KavtCQafIKFdFBB1GuUobBOsQgAAEIACBiglo3L1gMIW6dJ2BI6wek1nXz6pTaTEPKyeA8VCmi7Xsgi98FkbiTkEnGuCm2R3m1gO1fpiII1gxLpK/nwCCzv3MXnbGLUFHK/DaCK10igWdrjihNApkaQ1LjXERHR53v0gFMb5I7RwjpuF4B+hh1shNeOjoDn8rY3U11CCu+6X0VaUuIOiIxcXROCJXUVrMjrp0EyA+RtCxANPJ7jLVlOXi9rx4wytEnSwxp6zRray7VXO4CryI1JPI/e1m3cDzK8/WqxJ0YlGwEw6jXuS23l35UFEk+GKUFYdGc3xU0HG966JuZ25eOR+S1HU4CCTtjlSXJ9zkbY9AvVLQ0WdAPWvcwN/u/VL71cD4esy///3v3P8F8/RJe6y5abEMAQhAAAIQgEC5BKz94sa+vLtOJRYrUEez2qZ6Ozwm6FwtQ4kItP6hdZhKnRCi8moemldenanEy3qbpBB03uZWXBYkT9DRhyLdveTy7Pu3+KeT43kSnW/dGC5G4vFlv92lXiZunrtQYKk1xXpEiPhicSfqbpcH9zRd3k+lEzQCtbvF2RdGd9kL8ULQ0dgnq0EQKLY5GIdKeBFBJw6UWhNPvXvSZYnWj4teOMKVdeuKj9vKKOizmhUjxJdznI/UMIL7YqNcZar2cRceDXDmxlCppizxpX7gQpWiTpaYkxWE+bHLt+5W+V2HRLsIqr2rbTkGaLZemaBzioTP1EhW167z6h9/5tckE3Y1EHGWYvqgoHOyeDlu8L+DzINR79Kiq13RURa90HW4PXOemzzhJm97lFy+oBOO5lcs+LuV7fZcuxWqYJNVGdLnQ/8X7Jcl2Oh5en55tn27zBwBAQhAAAIQ+A4ETrttcrTQxEX7so48YeK4fwVi6ASj/bpezxINGJPZjf0xQSfuEZEIJ5EofCkrWv/QnzojVD2Zx7Lm910mBJ03vtNpQUdjhei2Kqb9oi+teksGS/fTtg5F3A4ewPRQxIdFT5o1jQkyk417SlA4X/bzSADRAMJOI1WOq8jtsCatweLi5efvFzIIBJKaNPtLSSdtjdwsQUfkEMcqqWsDuZCgIxIP9VdrSm92KVId1iNpBwJTXTqzc/eT8D5ogz18SSUb3iGDZvQCuxBmLM5PjmeQNZwb3dl59K7oxh+W/XCEo0ZPHM/NwNuhirJUYW+PpFmFqFOtmBN2twrEydTw2snrP8isozbUkP7yLHqYrSftKnnm5ZoJKC0ZuQOgXR6obm2xwNropoYtD4735XRyH97ziAwX9qzHZwo6YXfI4I+8lew7rqcclkPxgmcrL4ZOXdrj9Feoo6yHYfyaemcmjjQTi7q1Zl/S8aKPkeAbCGdnzE7AwFScnwcFHX/VD2NttZNly7wFd2xUEV+72OpohlmCzbWk9Hg9T8+v4mPAtbzZBwEIQAACEPjKBPzdNIiT02gPZbFzKxjhVQf7g7pOcrQoq+sXr1OtZajDiGt8Ubfyo9XN9TgKwnxfl6twBFD92JXl9aPlP0mqKvjQrUTQeQhb4ZMQdAqjev2BJuhoJbxqRfO0GcUNK683FO1uMux5oVdKrXXhKeNvZ9IL+oOGDVGvN5DReCKT8VD6nWbYoNHz3OhfEcLTdhIHCKs329IbjqL82vEoWY3ORDaX78SrHjpB8rtJJL4UF3S0YasClIkvDa8ng9FYxsG1tKJrqYs3Wl8ITJqndamp1ZvSHU5kOh3LsNuSugpkg7CLx+XL2oZQ1tHAxjJbLGQ2mcXB0uwlH7wAG23pj6Yym01l3G9H96QpvUXqbV5RWV5v+fk5linqVC7mqGAx6wT2o94aSWkkeY2HaSSc9pex11v1gk7YVXHSDr1W6l5PRtO5LJdLWcwmMtDnuDWWrVNws8tLe84XdMTfyMgLRc96syPDySyw5VHPk3pNAzJr/nmCTniePpOj4LywXMFzUe/IdOcULkCqo95Fz2yjLYPJTObzmUwG7XiY98EqJRPnCTd526Nbl+eho0HaA4+rWkPaw5ksFnOZTlcJ4Sl594uvWQwcFWeyPHWyUtLj9HhlVtUHgax82QYBCEAAAhD4FgROGxl3wuDCKow02xqbUAfB0LaUtW3CD1RureX+OtVJVv0oiLHXl8l8IcvFTMZRfarVCoWZgRuT8UZdJrg/+5l0A6GoLhpLZ6rpLhcynw6l26pLc7CK66aP3k8EnUfJFTsPQacYpw85Sivf6h6vDc9XTMe1DnkXNu7swas12jJc7LMbo6edLMY98YIGWdjwsvOa7YHMtvnl9g8rmfRNMHLObXjSn6xyRvC53uUqZOTLJhpOuKiHjrE9bmYyaJsYdS6TNiY1MKz7ErZzwvlBFgMTfsLz6s2uTDZH8aMYPVkNYH+nopjLuxFHpreXfHu8kNnARJyoTHpPcstTflmS1/rxa1miziMN1fTQ5OV3RTl73iT+XLMQRkOE1zR4cPTYvETQ0bL4O1mMOrGYas9wrdmWwXSdeBbNLrPsOc9DJ7jc41rG3eSzpc/IeLUPAprnCzotGc4XMuo0A0HCytbwBlcC9B1lM+2LF1ROzs+xClbTS3fC0j109Hr3i4G0gq9xUf717OFCs0zh1jYVdVTkVxbaP/zHH3+8OEX/M3S77tfj9PhHnpGLhNkAAQhAAAIQgEAGgWMwolXXM2HHqX80OzJa7C7aEQ/VqbQ+FYtHYR4Nrx/Ub/QjogpKiTpnEUFHq4KHlYx7ybZMUH9Q4Wh1rQ2UgSJjk9XfqnZQ0KzpcuWL72f//pG3QxUhpmoIvErISZbel+N+K+v1WjbbfUFFVs/ZyXazlvVmK/tjvvSRzEvfIEfZbTeyXm9kuztevOwujn/BBv+4j69ld8gXpdJFOR12sgm4HQpy0xROstfr32yjKPVhqueXfNjFy8q02RZLu8yypK/zHdZdUedRIcZiiuifzKNpvAOL0srgH0NbrPBZ9O0Z2d2yY4utc449dDpsZaNl2xd9T0TPVnBORnyw0sDlJHQ6RO+RndzxGslJLLlZu01ZTB2rJKlAqQJOllBJN6skP9YgAAEIQAACVRGwOnvQttlXUf/QdlfUdtoXb6cUuV7/ZG2gTaJdUuTca8dYXUXrKT/88EPi98wHJz03nZ59zNI8v8uEh853udNc56cicBZ0UsGUP9VVVFtYFXW0UfvMpKIOYs4zBKs691LQqSqnz5yuPgP6JUqDIZvXjs51XbfrfiYIQAACEIAABCDwkQRM0MmaP+O143rjZKX9kdf8yrwRdF5Jm7wgUJAAgk5BUBz2RQkg6HzRG8tlQQACEIAABCDwzQhkiS22DUHneWNA0HmeISlAoHQCCDqlIyXBT0UAQedT3S4KCwEIQAACEIAABCDwIQQQdD4EO5lC4DoBBJ3rfNj71Qkg6Hz1O8z1QQACEIAABCAAAQg8TwBB53mGpAABCEAAAhCAAAQgAAEIQAACEIAABF5KAEHnpbjJDAIQgAAEIAABCEAAAhCAAAQgAAEIPE+giKDz3//+Vxi2/HnWpAABCEAAAhCAAAQgAAEIQAACEIAABEohgKBTCkYSgQAEIAABCEAAAhCAAAQgAAEIQAACryNwS9BR7xw8dF53P8gJAhCAAAQgAAEIQAACEIAABCAAAQjcJICgcxMRB0AAAhCAAAQgAAEIQAACEIAABCAAgfcigKDzXveD0kAAAhCAAAQgAAEIQAACEIAABCAAgZsErgk61t2KLlc3MXIABCAAAQhAAAIQgAAEIAABCEAAAhB4HYHCgo6qOr7vX/w0ASYIQAACEIAABCAAAQhAAAIQgAAEIACB1xHIE3Rc75zAQwdB53U3hZwgAAEIQAACEIAABCAAAQhAAAIQgMA1Agg61+iwDwIQgAAEIAABCEAAAhCAAAQgAAEIvCGBuwSdLC8duly94V2lSBCAAAQgAAEIQAACEIAABCAAAQh8aQJZgk66u1Xc5cp2uLF0EHS+tH1wcRCAAAQgAAEIQAACEIAABCAAAQi8IYG0oGOaTXr+D3cDgs4b3kmKBAEIQAACEIAABCAAAQhAAAIQgMC3IfCQoKPijok6eOh8G1vhQiEAAQhAAAIQgAAEIAABCEAAAhB4EwIIOm9yIygGBCAAAQhAAAIQgAAEIAABCEAAAhAoSsAVdNxeVenlRJcrPHSK4uU4CEAAAhCAAAQgAAEIQAACEIAABCBQPoGHBR0TdehyVf5NIUUIQAACEIAABCAAAQhAAAIQgAAEIHCNgAk6aY+c9PqFh44r6Pz999/X8mAfBCAAAQhAAAIQgAAEIAABCEAAAhCAQEkEVId5StBRUefXX38VnTNBAAIQgAAEIAABCEAAAhCAAAQgAAEIVE/A1WN0+dov00NHTzgej8GIV9UXlxwgAAEIQAACEIAABCAAAQhAAAIQgAAEdORx1WOuCTm2L1fQ+e233+Svv/6CJgQgAAEIQAACEIAABCAAAQhAAAIQgMALCKgOo3qMiTbX5rmCzul0kj/++OMFxSULCEAAAhCAAAQgAAEIQAACEIAABCAAgd9//11Uj7km5Ni+XEHnP//5j/zyyy/QhAAEIAABCEAAAhCAAAQgAAEIQAACEHgBAdVhVI8x0ebaPFfQ0ZM0MLL232KCAAQgAAEIQAACEIAABCAAAQhAAAIQqI6A6TDXRBx331VBR7tcqasPEwQgAAEIQAACEIAABCAAAQhAAAIQgEB1BFSD0Z8r2lxbviroWLcrHQedCQIQgAAEIAABCEAAAhCAAAQgAAEIQKB8Aqq73NPdSoWeq4KOHmABecovLilCAAIQgAAEIAABCEAAAhCAAAQgAAEI2MBU1zxy0vtuCjoaQ0dVov/9738QhgAEIAABCEAAAhCAAAQgAAEIQAACECiRgOotqruo/pIWba6t3xR09GTtw3U8HkssLklBAAIQgAAEIAABCEAAAhCAAAQgAAEIqN5SdKhyV+ApJOioSmQZgBoCEIAABCAAAQhAAAIQgAAEIAABCEDgeQIq5NgI465YU2S5kKCjCVmAZIYxf4rA1dMAAAViSURBVP6GkQIEIAABCEAAAhCAAAQgAAEIQAAC35uA6Sw6LyLgpI8pLOjoiX/99RfxdL63vXH1EIAABCAAAQhAAAIQgAAEIAABCDxJQDUWjZujOktaqCm6fpego4mqO9DPP/8cBOt5svycDgEIQAACEIAABCAAAQhAAAIQgAAEvhUB7fmkusojcXNM7NFAyncLOnryn3/+GWSuShITBCAAAQhAAAIQgAAEIAABCEAAAhCAwG0Cpqfo3MSZe+cq5jws6Ghm1tfrt99+Cwpxu9gcAQEIQAACEIAABCAAAQhAAAIQgAAEvh8B1VF0sCntZvVozBwTfp4WdDQhdRPSIc3VVej3338PFKLvd1u4YghAAAIQgAAEIAABCEAAAhCAAAQgcElAtRN1hFHdRPUT1VFMmHlkbmLOUx46bsZaIBV0tIBaUO2K9ffff19eCVsgAAEIQAACEIAABCAAAQhAAAIQgMAXJqB6iOoiJuSoXvKskGMaTELQ0RXb8exc3YZUcVI3IhV3dK79wixqs+bFBAEIQAACEIAABCAAAQhAAAIQgAAEvgIB01RU91D9w9VDVB95tnuVq9O4Yo4u/8M2uAc9s6yqk/600BqxWZUovaBff/016CumQg8/GGAD2AA2gA1gA9gANoANYAPYADaADWAD2MBntwGNiaN6h+oeqn+oDqJ6iGkjz+gr7rmm3bjzWNDRje7Bzy5b4cuaK5Aqf6qmveNPFT5+MMAGsAFsABvABrABbAAbwAawAWwAG3iFDbxju1jLVKUe4AowZWkYz2oq7vmuiOMuJwSddxd1FGzVN/FdjTddrlc8yOTBHwY2gA1gA9gANoANYAPYADaADWADX9MG0m3Md12vWgMoS8Bx03HFmDKWXRHHXf50gs4rRB01mHc15qLl4qX7NV+63FfuKzaADWAD2AA2gA1gA9gANoANFLGBom3Hdz2uaiFH03dFmDKXyxBxLA1XwEkvXwg6eoCdWMa8TChuWq+4uZbHuxp4WeUq8jLgGP40sAFsABvABrABbAAbwAawAWwAG/hYGyirDfiu6Vgb/BVzV18oc7kMHcXSSAs46fVMQeeziDoK/RU32s3jXQ2fcr1nDCTuC/cFG8AGsAFsABvABrABbAAbwAawgXwbcNvbr1guU7xJp2VCTBnztHiTtf4SQccuJn2xZa2/4qan8+CBzH8gYQMbbAAbwAawAWwAG8AGsAFsABvABrCBazaQbmO/Yr0sDSKdjmkeZc6zBJz0tlxBxw4ss0CaVvrCy1x/hQFk5XHNSNnHSwwbwAawAWwAG8AGsAFsABvABrABbAAbqH6kqqz2um4rU3dIp1W2ZmJaTJH5ywWdryrquIbDg8rLGhvABrABbAAbwAawAWwAG8AGsAFs4LvbgNtO/qjltABT5nrZYo6mV0TIsWNuCjp6YBWFLBNiVlofZSxZ+X73h5jr548MG8AGsAFsABvABrABbAAbwAawga9vA1nt4Y/alqUTlLmtCp3EhJqi80KCzmcVdfRmfZTxXMuXF9nXf5Fxj7nH2AA2gA1gA9gANoANYAPYADbw1W3gWrv3o/aVKdrkpfUOYo7qNIUFnapEHQWRB6nM7R9lTEXy/eoPOdfHHxk2gA1gA9gANoANYAPYADaADWADn98GirRvP+qYMvWDvLSqEHI0zaIeOenj7hJ0Pruoozflo4zr0Xx56X3+lx73kHuIDWAD2AA2gA1gA9gANoANYAOfxQYebbt+1Hl54kvZ299NzFF95m5B5yuIOp9R2Ek/HJ/lZUA5+ePCBrABbAAbwAawAWwAG8AGsAFs4P1sIN3G/GzrZQs219J7RzFHtZn/A+wx4wuEAlXlAAAAAElFTkSuQmCC" 15 | } 16 | }, 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "# Introduction\n", 21 | "\n", 22 | "Welcome to our end-to-end binary Text-Classification example. In this demo, we will use the Hugging Faces `transformers` and `datasets` library together with a custom Amazon sagemaker-sdk extension to fine-tune a pre-trained transformer on binary text classification. In particular, the pre-trained model will be fine-tuned using the `imdb` dataset. To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. This demo will also show you can use spot instances and continue training.\n", 23 | "\n", 24 | "![image.png](attachment:image.png)\n", 25 | "\n", 26 | "_**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "# Development Environment and Permissions " 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "## Installation\n", 41 | "\n", 42 | "_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if you haven´t it installed_" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "!pip install \"sagemaker>=2.48.0\" \"transformers==4.6.1\" \"datasets[s3]==1.6.2\" --upgrade" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 1, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "import sagemaker.huggingface" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "## Permissions" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "_If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "import sagemaker\n", 84 | "\n", 85 | "sess = sagemaker.Session()\n", 86 | "# sagemaker session bucket -> used for uploading data, models and logs\n", 87 | "# sagemaker will automatically create this bucket if it not exists\n", 88 | "sagemaker_session_bucket=None\n", 89 | "if sagemaker_session_bucket is None and sess is not None:\n", 90 | " # set to default bucket if a bucket name is not given\n", 91 | " sagemaker_session_bucket = sess.default_bucket()\n", 92 | "\n", 93 | "role = sagemaker.get_execution_role()\n", 94 | "sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)\n", 95 | "\n", 96 | "print(f\"sagemaker role arn: {role}\")\n", 97 | "print(f\"sagemaker bucket: {sess.default_bucket()}\")\n", 98 | "print(f\"sagemaker session region: {sess.boto_region_name}\")" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "# Preprocessing\n", 106 | "\n", 107 | "We are using the `datasets` library to download and preprocess the `imdb` dataset. After preprocessing, the dataset will be uploaded to our `sagemaker_session_bucket` to be used within our training job. The [imdb](http://ai.stanford.edu/~amaas/data/sentiment/) dataset consists of 25000 training and 25000 testing highly polar movie reviews." 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "metadata": {}, 113 | "source": [ 114 | "## Tokenization " 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 5, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "from datasets import load_dataset\n", 124 | "from transformers import AutoTokenizer\n", 125 | "\n", 126 | "# tokenizer used in preprocessing\n", 127 | "tokenizer_name = 'distilbert-base-uncased'\n", 128 | "\n", 129 | "# dataset used\n", 130 | "dataset_name = 'imdb'\n", 131 | "\n", 132 | "# s3 key prefix for the data\n", 133 | "s3_prefix = 'samples/datasets/imdb'" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [ 142 | "# load dataset\n", 143 | "dataset = load_dataset(dataset_name)\n", 144 | "\n", 145 | "# download tokenizer\n", 146 | "tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n", 147 | "\n", 148 | "# tokenizer helper function\n", 149 | "def tokenize(batch):\n", 150 | " return tokenizer(batch['text'], padding='max_length', truncation=True)\n", 151 | "\n", 152 | "# load dataset\n", 153 | "train_dataset, test_dataset = load_dataset('imdb', split=['train', 'test'])\n", 154 | "test_dataset = test_dataset.shuffle().select(range(10000)) # smaller the size for test dataset to 10k \n", 155 | "\n", 156 | "\n", 157 | "# tokenize dataset\n", 158 | "train_dataset = train_dataset.map(tokenize, batched=True)\n", 159 | "test_dataset = test_dataset.map(tokenize, batched=True)\n", 160 | "\n", 161 | "# set format for pytorch\n", 162 | "train_dataset = train_dataset.rename_column(\"label\", \"labels\")\n", 163 | "train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])\n", 164 | "test_dataset = test_dataset.rename_column(\"label\", \"labels\")\n", 165 | "test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])" 166 | ] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "metadata": {}, 171 | "source": [ 172 | "## Uploading data to `sagemaker_session_bucket`\n", 173 | "\n", 174 | "After we processed the `datasets` we are going to use the new `FileSystem` [integration](https://huggingface.co/docs/datasets/filesystems.html) to upload our dataset to S3." 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": 8, 180 | "metadata": {}, 181 | "outputs": [], 182 | "source": [ 183 | "import botocore\n", 184 | "from datasets.filesystems import S3FileSystem\n", 185 | "\n", 186 | "s3 = S3FileSystem() \n", 187 | "\n", 188 | "# save train_dataset to s3\n", 189 | "training_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/train'\n", 190 | "train_dataset.save_to_disk(training_input_path,fs=s3)\n", 191 | "\n", 192 | "# save test_dataset to s3\n", 193 | "test_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/test'\n", 194 | "test_dataset.save_to_disk(test_input_path,fs=s3)\n" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": 6, 200 | "metadata": {}, 201 | "outputs": [], 202 | "source": [ 203 | "training_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/train'\n", 204 | "test_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/test'\n" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": {}, 210 | "source": [ 211 | "# Fine-tuning & starting Sagemaker Training Job\n", 212 | "\n", 213 | "In order to create a sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. In a Estimator we define, which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, which `hyperparameters` are passed in .....\n", 214 | "\n", 215 | "\n", 216 | "\n", 217 | "```python\n", 218 | "huggingface_estimator = HuggingFace(entry_point='train.py',\n", 219 | " source_dir='./scripts',\n", 220 | " base_job_name='huggingface-sdk-extension',\n", 221 | " instance_type='ml.p3.2xlarge',\n", 222 | " instance_count=1,\n", 223 | " transformers_version='4.4',\n", 224 | " pytorch_version='1.6',\n", 225 | " py_version='py36',\n", 226 | " role=role,\n", 227 | " hyperparameters = {'epochs': 1,\n", 228 | " 'train_batch_size': 32,\n", 229 | " 'model_name':'distilbert-base-uncased'\n", 230 | " })\n", 231 | "```\n", 232 | "\n", 233 | "When we create a SageMaker training job, SageMaker takes care of starting and managing all the required ec2 instances for us with the `huggingface` container, uploads the provided fine-tuning script `train.py` and downloads the data from our `sagemaker_session_bucket` into the container at `/opt/ml/input/data`. Then, it starts the training job by running. \n", 234 | "\n", 235 | "```python\n", 236 | "/opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32\n", 237 | "```\n", 238 | "\n", 239 | "The `hyperparameters` you define in the `HuggingFace` estimator are passed in as named arguments. \n", 240 | "\n", 241 | "Sagemaker is providing useful properties about the training environment through various environment variables, including the following:\n", 242 | "\n", 243 | "* `SM_MODEL_DIR`: A string that represents the path where the training job writes the model artifacts to. After training, artifacts in this directory are uploaded to S3 for model hosting.\n", 244 | "\n", 245 | "* `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host.\n", 246 | "\n", 247 | "* `SM_CHANNEL_XXXX:` A string that represents the path to the directory that contains the input data for the specified channel. For example, if you specify two input channels in the HuggingFace estimator’s fit call, named `train` and `test`, the environment variables `SM_CHANNEL_TRAIN` and `SM_CHANNEL_TEST` are set.\n", 248 | "\n", 249 | "\n", 250 | "To run your training job locally you can define `instance_type='local'` or `instance_type='local_gpu'` for gpu usage. _Note: this does not working within SageMaker Studio_\n" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 7, 256 | "metadata": {}, 257 | "outputs": [ 258 | { 259 | "name": "stdout", 260 | "output_type": "stream", 261 | "text": [ 262 | "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mtransformers\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m AutoModelForSequenceClassification, Trainer, TrainingArguments\r\n", 263 | "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mtransformers\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mtrainer_utils\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m get_last_checkpoint\r\n", 264 | "\r\n", 265 | "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36msklearn\u001b[39;49;00m\u001b[04m\u001b[36m.\u001b[39;49;00m\u001b[04m\u001b[36mmetrics\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m accuracy_score, precision_recall_fscore_support\r\n", 266 | "\u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mdatasets\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m load_from_disk\r\n", 267 | "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mlogging\u001b[39;49;00m\r\n", 268 | "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\r\n", 269 | "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m\r\n", 270 | "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mos\u001b[39;49;00m\r\n", 271 | "\r\n", 272 | "\u001b[37m# Set up logging\u001b[39;49;00m\r\n", 273 | "logger = logging.getLogger(\u001b[31m__name__\u001b[39;49;00m)\r\n", 274 | "\r\n", 275 | "logging.basicConfig(\r\n", 276 | " level=logging.getLevelName(\u001b[33m\"\u001b[39;49;00m\u001b[33mINFO\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m),\r\n", 277 | " handlers=[logging.StreamHandler(sys.stdout)],\r\n", 278 | " \u001b[36mformat\u001b[39;49;00m=\u001b[33m\"\u001b[39;49;00m\u001b[33m%(asctime)s\u001b[39;49;00m\u001b[33m - \u001b[39;49;00m\u001b[33m%(name)s\u001b[39;49;00m\u001b[33m - \u001b[39;49;00m\u001b[33m%(levelname)s\u001b[39;49;00m\u001b[33m - \u001b[39;49;00m\u001b[33m%(message)s\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m,\r\n", 279 | ")\r\n", 280 | "\r\n", 281 | "\u001b[34mif\u001b[39;49;00m \u001b[31m__name__\u001b[39;49;00m == \u001b[33m\"\u001b[39;49;00m\u001b[33m__main__\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m:\r\n", 282 | "\r\n", 283 | " logger.info(sys.argv)\r\n", 284 | "\r\n", 285 | " parser = argparse.ArgumentParser()\r\n", 286 | "\r\n", 287 | " \u001b[37m# hyperparameters sent by the client are passed as command-line arguments to the script.\u001b[39;49;00m\r\n", 288 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--epochs\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m3\u001b[39;49;00m)\r\n", 289 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--train-batch-size\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m32\u001b[39;49;00m)\r\n", 290 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--eval-batch-size\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m64\u001b[39;49;00m)\r\n", 291 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--warmup_steps\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m500\u001b[39;49;00m)\r\n", 292 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--model_name\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m)\r\n", 293 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--learning_rate\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=\u001b[34m5e-5\u001b[39;49;00m)\r\n", 294 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--output_dir\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m)\r\n", 295 | "\r\n", 296 | " \u001b[37m# Data, model, and output directories\u001b[39;49;00m\r\n", 297 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--output-data-dir\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_OUTPUT_DATA_DIR\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m])\r\n", 298 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--model-dir\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_MODEL_DIR\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m])\r\n", 299 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--n_gpus\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_NUM_GPUS\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m])\r\n", 300 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--training_dir\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_CHANNEL_TRAIN\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m])\r\n", 301 | " parser.add_argument(\u001b[33m\"\u001b[39;49;00m\u001b[33m--test_dir\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m\"\u001b[39;49;00m\u001b[33mSM_CHANNEL_TEST\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m])\r\n", 302 | "\r\n", 303 | " args, _ = parser.parse_known_args()\r\n", 304 | "\r\n", 305 | " \u001b[37m# load datasets\u001b[39;49;00m\r\n", 306 | " train_dataset = load_from_disk(args.training_dir)\r\n", 307 | " test_dataset = load_from_disk(args.test_dir)\r\n", 308 | "\r\n", 309 | " logger.info(\u001b[33mf\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m loaded train_dataset length is: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00m\u001b[36mlen\u001b[39;49;00m(train_dataset)\u001b[33m}\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 310 | " logger.info(\u001b[33mf\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m loaded test_dataset length is: \u001b[39;49;00m\u001b[33m{\u001b[39;49;00m\u001b[36mlen\u001b[39;49;00m(test_dataset)\u001b[33m}\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 311 | "\r\n", 312 | " \u001b[37m# compute metrics function for binary classification\u001b[39;49;00m\r\n", 313 | " \u001b[34mdef\u001b[39;49;00m \u001b[32mcompute_metrics\u001b[39;49;00m(pred):\r\n", 314 | " labels = pred.label_ids\r\n", 315 | " preds = pred.predictions.argmax(-\u001b[34m1\u001b[39;49;00m)\r\n", 316 | " precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average=\u001b[33m\"\u001b[39;49;00m\u001b[33mbinary\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 317 | " acc = accuracy_score(labels, preds)\r\n", 318 | " \u001b[34mreturn\u001b[39;49;00m {\u001b[33m\"\u001b[39;49;00m\u001b[33maccuracy\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m: acc, \u001b[33m\"\u001b[39;49;00m\u001b[33mf1\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m: f1, \u001b[33m\"\u001b[39;49;00m\u001b[33mprecision\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m: precision, \u001b[33m\"\u001b[39;49;00m\u001b[33mrecall\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m: recall}\r\n", 319 | "\r\n", 320 | " \u001b[37m# download model from model hub\u001b[39;49;00m\r\n", 321 | " model = AutoModelForSequenceClassification.from_pretrained(args.model_name)\r\n", 322 | "\r\n", 323 | " \u001b[37m# define training args\u001b[39;49;00m\r\n", 324 | " training_args = TrainingArguments(\r\n", 325 | " output_dir=args.output_dir,\r\n", 326 | " num_train_epochs=args.epochs,\r\n", 327 | " per_device_train_batch_size=args.train_batch_size,\r\n", 328 | " per_device_eval_batch_size=args.eval_batch_size,\r\n", 329 | " warmup_steps=args.warmup_steps,\r\n", 330 | " evaluation_strategy=\u001b[33m\"\u001b[39;49;00m\u001b[33mepoch\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m,\r\n", 331 | " logging_dir=\u001b[33mf\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m{\u001b[39;49;00margs.output_data_dir\u001b[33m}\u001b[39;49;00m\u001b[33m/logs\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m,\r\n", 332 | " learning_rate=\u001b[36mfloat\u001b[39;49;00m(args.learning_rate),\r\n", 333 | " )\r\n", 334 | "\r\n", 335 | " \u001b[37m# create Trainer instance\u001b[39;49;00m\r\n", 336 | " trainer = Trainer(\r\n", 337 | " model=model,\r\n", 338 | " args=training_args,\r\n", 339 | " compute_metrics=compute_metrics,\r\n", 340 | " train_dataset=train_dataset,\r\n", 341 | " eval_dataset=test_dataset,\r\n", 342 | " )\r\n", 343 | "\r\n", 344 | " \u001b[37m# train model\u001b[39;49;00m\r\n", 345 | " \u001b[34mif\u001b[39;49;00m get_last_checkpoint(args.output_dir) \u001b[35mis\u001b[39;49;00m \u001b[35mnot\u001b[39;49;00m \u001b[34mNone\u001b[39;49;00m:\r\n", 346 | " logger.info(\u001b[33m\"\u001b[39;49;00m\u001b[33m***** continue training *****\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 347 | " trainer.train(resume_from_checkpoint=args.output_dir)\r\n", 348 | " \u001b[34melse\u001b[39;49;00m:\r\n", 349 | " trainer.train()\r\n", 350 | " \u001b[37m# evaluate model\u001b[39;49;00m\r\n", 351 | " eval_result = trainer.evaluate(eval_dataset=test_dataset)\r\n", 352 | "\r\n", 353 | " \u001b[37m# writes eval result to file which can be accessed later in s3 ouput\u001b[39;49;00m\r\n", 354 | " \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(os.path.join(args.output_data_dir, \u001b[33m\"\u001b[39;49;00m\u001b[33meval_results.txt\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m), \u001b[33m\"\u001b[39;49;00m\u001b[33mw\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m writer:\r\n", 355 | " \u001b[36mprint\u001b[39;49;00m(\u001b[33mf\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m***** Eval results *****\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 356 | " \u001b[34mfor\u001b[39;49;00m key, value \u001b[35min\u001b[39;49;00m \u001b[36msorted\u001b[39;49;00m(eval_result.items()):\r\n", 357 | " writer.write(\u001b[33mf\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m\u001b[33m{\u001b[39;49;00mkey\u001b[33m}\u001b[39;49;00m\u001b[33m = \u001b[39;49;00m\u001b[33m{\u001b[39;49;00mvalue\u001b[33m}\u001b[39;49;00m\u001b[33m\\n\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\r\n", 358 | "\r\n", 359 | " \u001b[37m# Saves the model to s3\u001b[39;49;00m\r\n", 360 | " trainer.save_model(args.model_dir)\r\n" 361 | ] 362 | } 363 | ], 364 | "source": [ 365 | "!pygmentize ./scripts/train.py" 366 | ] 367 | }, 368 | { 369 | "cell_type": "markdown", 370 | "metadata": {}, 371 | "source": [ 372 | "## Creating an Estimator and start a training job" 373 | ] 374 | }, 375 | { 376 | "cell_type": "code", 377 | "execution_count": 8, 378 | "metadata": {}, 379 | "outputs": [], 380 | "source": [ 381 | "from sagemaker.huggingface import HuggingFace\n", 382 | "import time\n", 383 | "\n", 384 | "# hyperparameters, which are passed into the training job\n", 385 | "hyperparameters={'epochs': 1, # number of training epochs\n", 386 | " 'train_batch_size': 32, # batch size for training\n", 387 | " 'eval_batch_size': 64, # batch size for evaluation\n", 388 | " 'learning_rate': 3e-5, # learning rate used during training\n", 389 | " 'model_id':'distilbert-base-uncased', # pre-trained model\n", 390 | " 'fp16': True, # Whether to use 16-bit (mixed) precision training\n", 391 | " 'output_dir':'/opt/ml/checkpoints', # output_dir where our checkpoints will be saved\n", 392 | " }" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 9, 398 | "metadata": {}, 399 | "outputs": [], 400 | "source": [ 401 | "# s3 uri where our checkpoints will be uploaded during training\n", 402 | "job_name = f'huggingface-workshop-using-spot-{time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())}'\n", 403 | "\n", 404 | "# s3 directory for our uploaded checkpoints\n", 405 | "checkpoint_s3_uri = f's3://{sess.default_bucket()}/{job_name}/checkpoints'\n", 406 | "\n", 407 | "# create the Estimator\n", 408 | "huggingface_estimator = HuggingFace(\n", 409 | " entry_point = 'train.py', # fine-tuning script used in training jon\n", 410 | " source_dir = './scripts', # directory where fine-tuning script is stored\n", 411 | " instance_type = 'ml.p3.2xlarge', # instances type used for the training job\n", 412 | " instance_count = 1, # the number of instances used for training\n", 413 | " base_job_name = job_name, # the name of the training job\n", 414 | " role = role, # Iam role used in training job to access AWS ressources, e.g. S3\n", 415 | " transformers_version = '4.6.1', # the transformers version used in the training job\n", 416 | " pytorch_version = '1.7.1', # the pytorch_version version used in the training job\n", 417 | " py_version = 'py36', # the python version used in the training job\n", 418 | " hyperparameters = hyperparameters, # the hyperparameter used for running the training job\n", 419 | " checkpoint_s3_uri = checkpoint_s3_uri, # s3 directory for our uploaded checkpoints\n", 420 | " use_spot_instances = True, # Wether to use spot instances or not\n", 421 | " max_wait = 3600, # This should be equal to or greater than max_run in seconds'\n", 422 | " max_run = 1000, # expected max run in seconds\n", 423 | ")" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": null, 429 | "metadata": {}, 430 | "outputs": [], 431 | "source": [ 432 | "# define a data input dictonary with our uploaded s3 uris\n", 433 | "data = {\n", 434 | " 'train': training_input_path,\n", 435 | " 'test': test_input_path\n", 436 | "}\n", 437 | "\n", 438 | "\n", 439 | "# starting the train job with our uploaded datasets as input\n", 440 | "huggingface_estimator.fit(data)\n", 441 | "\n", 442 | "# Training seconds: 874\n", 443 | "# Billable seconds: 262\n", 444 | "# Managed Spot Training savings: 70.0%" 445 | ] 446 | }, 447 | { 448 | "cell_type": "markdown", 449 | "metadata": {}, 450 | "source": [ 451 | "## Deploying the endpoint\n", 452 | "\n", 453 | "To deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type." 454 | ] 455 | }, 456 | { 457 | "cell_type": "code", 458 | "execution_count": null, 459 | "metadata": {}, 460 | "outputs": [], 461 | "source": [ 462 | "predictor = huggingface_estimator.deploy(1,\"ml.g4dn.xlarge\")" 463 | ] 464 | }, 465 | { 466 | "cell_type": "markdown", 467 | "metadata": {}, 468 | "source": [ 469 | "Then, we use the returned predictor object to call the endpoint." 470 | ] 471 | }, 472 | { 473 | "cell_type": "code", 474 | "execution_count": null, 475 | "metadata": {}, 476 | "outputs": [], 477 | "source": [ 478 | "sentiment_input= {\"inputs\":\"I love using the new Inference DLC.\"}\n", 479 | "\n", 480 | "predictor.predict(sentiment_input)" 481 | ] 482 | }, 483 | { 484 | "cell_type": "markdown", 485 | "metadata": {}, 486 | "source": [ 487 | "Finally, we delete the endpoint again." 488 | ] 489 | }, 490 | { 491 | "cell_type": "code", 492 | "execution_count": 12, 493 | "metadata": {}, 494 | "outputs": [], 495 | "source": [ 496 | "predictor.delete_endpoint()" 497 | ] 498 | } 499 | ], 500 | "metadata": { 501 | "instance_type": "ml.t3.medium", 502 | "interpreter": { 503 | "hash": "c281c456f1b8161c8906f4af2c08ed2c40c50136979eaae69688b01f70e9f4a9" 504 | }, 505 | "kernelspec": { 506 | "display_name": "Python 3", 507 | "language": "python", 508 | "name": "python3" 509 | }, 510 | "language_info": { 511 | "codemirror_mode": { 512 | "name": "ipython", 513 | "version": 3 514 | }, 515 | "file_extension": ".py", 516 | "mimetype": "text/x-python", 517 | "name": "python", 518 | "nbconvert_exporter": "python", 519 | "pygments_lexer": "ipython3", 520 | "version": "3.8.5" 521 | } 522 | }, 523 | "nbformat": 4, 524 | "nbformat_minor": 4 525 | } 526 | --------------------------------------------------------------------------------