├── .DS_Store
├── 1-KnowYourDevice
├── .DS_Store
├── 1-RegisterYourDevice
│ ├── .DS_Store
│ ├── README.md
│ └── assets
│ │ ├── .DS_Store
│ │ ├── certificate.png
│ │ ├── createstack.png
│ │ ├── createstack2.png
│ │ ├── createstack3.png
│ │ ├── createstack4.png
│ │ ├── device-verified.png
│ │ ├── deviceaccess.png
│ │ ├── deviceip.png
│ │ ├── deviceregs1.png
│ │ ├── ggcore.png
│ │ ├── launchstack.png
│ │ ├── namedevice.png
│ │ ├── networkedit.png
│ │ ├── readme.txt
│ │ ├── setupsummary.png
│ │ ├── ssh.png
│ │ └── ssid-connect.png
├── 2-ObjectDetection
│ ├── .DS_Store
│ ├── README.md
│ └── assets
│ │ ├── .DS_Store
│ │ ├── dliottopic.png
│ │ ├── dlmessages.png
│ │ ├── hotdog.png
│ │ ├── projectcontent.png
│ │ ├── projecthome.png
│ │ ├── projecttemplate.png
│ │ └── targetdevice.png
├── README.md
└── assets
│ └── dlgeneral.png
├── 2-SageMaker
├── .DS_Store
├── 1-FaceDetection
│ ├── .DS_Store
│ ├── FaceDetection.ipynb
│ ├── ReadMe.md
│ ├── assets
│ │ ├── .DS_Store
│ │ ├── sm01.png
│ │ ├── sm02.png
│ │ ├── sm03.png
│ │ ├── sm04.png
│ │ ├── sm05.png
│ │ ├── sm06.png
│ │ ├── sm07.png
│ │ ├── sm08.png
│ │ ├── sm09.png
│ │ ├── sm10.png
│ │ ├── sm11.png
│ │ ├── sm12.png
│ │ ├── sm13.png
│ │ └── sm132.png
│ └── jeff_portrait.jpg
├── ReadMe.md
└── assets
│ ├── .DS_Store
│ └── sm.png
├── 3-SentimentAnalysis
├── .DS_Store
├── 1-DetectFaces
│ ├── .DS_Store
│ ├── README.md
│ ├── facecrop.py
│ └── images
│ │ ├── .DS_Store
│ │ ├── bucketname.png
│ │ ├── dlprojectcontent.png
│ │ ├── dlprojecthome.png
│ │ ├── dlprojecttargetdevice.png
│ │ ├── dlprojecttemplate.png
│ │ ├── faceiotoutput.png
│ │ ├── iotaddactionfinal.png
│ │ ├── iotinvokelambda.png
│ │ ├── iotlambdatestevent.png
│ │ ├── iotrulefinal01.png
│ │ ├── iotrulefinal02.png
│ │ ├── iotrulefinal03.png
│ │ ├── lambdaiottos3create.png
│ │ └── lambdaversion.png
├── 2-IdentifyEmotions
│ ├── .DS_Store
│ ├── README.md
│ └── rekognize-emotions.py
├── README.md
└── assets
│ └── sentiments.png
├── 4-FaceDetectionAndVerification
├── .DS_Store
├── 1-FaceDetection
│ ├── .DS_Store
│ ├── Architecture.png
│ ├── README.md
│ ├── facecrop.py
│ ├── images
│ │ ├── .DS_Store
│ │ ├── dlprojectcontent.png
│ │ ├── dlprojecthome.png
│ │ ├── dlprojecttargetdevice.png
│ │ ├── dlprojecttemplate.png
│ │ ├── faceiotoutput.png
│ │ ├── iotaddactionfinal.png
│ │ ├── iotinvokelambda.png
│ │ ├── iotlambdatestevent.png
│ │ ├── iotrulefinal01.png
│ │ ├── iotrulefinal02.png
│ │ ├── iotrulefinal03.png
│ │ ├── lambdaiottos3create.png
│ │ └── lambdaversion.png
│ └── iottos3.py
├── 2-Rekognition
│ ├── .DS_Store
│ ├── README.md
│ ├── andy1.jpg
│ ├── andy2.png
│ ├── images
│ │ ├── .DS_Store
│ │ ├── Burner_Accounts.png
│ │ ├── Lambda_IAM_Screen1.png
│ │ ├── Lambda_IAM_Screen2_Policy.png
│ │ ├── Region.png
│ │ ├── celebrity.png
│ │ ├── facial.png
│ │ ├── facialanalysis.png
│ │ ├── listfaces.png
│ │ ├── objectscene.png
│ │ ├── persontracking.png
│ │ ├── searchfacebyimage.png
│ │ └── text.png
│ └── jb.jpg
├── 3-ApprovalWorkflow
│ ├── .DS_Store
│ ├── APIGatewayToStepFunctions-respond-swagger-apigateway.yaml
│ ├── README.md
│ ├── images
│ │ ├── .DS_Store
│ │ ├── Approval_page.png
│ │ ├── Cognito_ID_Pool_ID.png
│ │ ├── Create_new_api.png
│ │ ├── Invoke_url.png
│ │ ├── ML_Bucket.png
│ │ ├── Make_public.png
│ │ ├── New_execution.png
│ │ ├── State_machines.png
│ │ ├── Step_fns.png
│ │ ├── Workflow_view.png
│ │ ├── approvalwebsite.png
│ │ └── s3public.png
│ └── index.html
├── 4-BringingItAllTogether
│ ├── .DS_Store
│ ├── README.md
│ ├── images
│ │ ├── .DS_Store
│ │ ├── Basic_settings.png
│ │ ├── Configure_test_event.png
│ │ ├── Configure_test_event2.png
│ │ ├── Configure_triggers.png
│ │ ├── Function_code.png
│ │ ├── StartWorkflow.png
│ │ ├── approvalemail.png
│ │ ├── approvalwebsite.png
│ │ ├── lambdatest.png
│ │ ├── statemachinerunning.png
│ │ └── statemachinerunning2.png
│ ├── postapproval.py
│ └── startworkflow.py
├── Architecture.png
└── README.md
├── 5-ProjectIdeas
├── README.md
└── peoplecount.py
├── Debug.md
└── README.md
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/README.md:
--------------------------------------------------------------------------------
1 | # Register your device
2 |
3 | 1. Sign in to the AWS Management Console and open the AWS DeepLens console at https://console.aws.amazon.com/deeplens/home?region=us-east-1#firstrun.
4 | 2. Choose Register device.
5 | 3. For Device name, type a name for your AWS DeepLens, then choose Next. Use only alphanumeric characters and dashes (-).
6 |
7 | 
8 |
9 | 4. If this is your first time registering an AWS DeepLens device, create the following AWS Identity and Access Management (IAM) roles. They give AWS DeepLens the permissions it needs to perform tasks on your behalf. If you have already created these roles, skip to step 5 otherwise follow CloudFormation or manual steps below to create required IAM roles.
10 |
11 | ### Steps to use CloudFormation to automatically create required IAM roles for DeepLens
12 |
13 | - [Click to launch CloudFormation Template ](https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/create/review?stackName=DeepLensRoles&templateURL=https://s3.amazonaws.com/deep-learning-with-deeplens/DeepLensRoles.json)
14 |
15 | 
16 |
17 | - Select the checkbox "I acknowledge that AWS CloudFormation might create IAM resources with custom names." and click Create.
18 |
19 | 
20 |
21 | - Wait for few seconds and refresh the screen to find that status is CREATE_COMPLETE.
22 |
23 | 
24 |
25 | - Go to https://console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks and select the checkbox next to stack DeepLensRoles.
26 |
27 | - Click on Resources tab and you should see five IAM roles that CloudFormation template created.
28 |
29 | 
30 |
31 | - You can now move to step 5.
32 |
33 |
34 | Manual steps to create IAM roles for DeepLens
35 |
36 | _Only use this step if you did not use the CloudFormation template above to automatically create required IAM roles for DeepLens. Otherwise, move to step 7._
37 |
38 | - IAM role for AWS DeepLens
39 | - From the list, choose AWSDeepLensServiceRole. If AWSDeepLensServiceRole isn't listed, choose Create role in IAM and follow these steps in the IAM console.
40 | - Accept the DeepLens service and DeepLens use case by choosing Next: Permissions.
41 | - Accept the AWSDeepLensServiceRolePolicy policy by choosing Next: Review.
42 | - Accept the role name AWSDeepLensServiceRole and the provided description by choosing Create role. Do not change the role name.
43 | - Close the IAM window.
44 |
45 | - IAM role for AWS Greengrass service
46 | - From the list, choose AWSDeepLensGreengrassRole. If AWSDeepLensGreengrassRole isn't listed, choose Create role in IAM and follow these steps in the IAM console.
47 | - Accept the Greengrass service and Greengrass use case by choosing Next: Permissions.
48 | - Accept the AWSGreengrassResourceAccessRolePolicy policy by choosing Next: Review.
49 | - Accept the role name AWSDeepLensGreengrassRole and the provided description by choosing Create role. Do not change the role name.
50 | - Close the IAM window.
51 |
52 | - IAM role for AWS Greengrass device groups.
53 | - From the list, choose AWSDeepLensGreengrassGroupRole. If AWSDeepLensGreengrassGroupRole isn't listed, choose Create role in IAM and follow these steps in the IAM console.
54 | - Accept the DeepLens service and the DeepLens - Greengrass Lambda use case by choosing Next: Permissions.
55 | - Accept the AWSDeepLensLambdaFunctionAccessPolicy policy by choosing Next: Review.
56 | - Accept the role name AWSDeepLensGreengrassGroupRole and the provided description by choose Create role. Do not change the role name.
57 | - Close the IAM window.
58 |
59 | - IAM role for Amazon SageMaker
60 | - From the list, choose AWSDeepLensSagemakerRole. If AWSDeepLensSagemakerRole isn't listed, choose Create role in IAM and follow these steps in the IAM console.
61 | - Accept the SageMaker service and the SageMaker - Execution use case by choosing Next: Permissions.
62 | - Accept the AmazonSageMakerFullAccess policy by choosing Next: Review.
63 | - Accept the role name AWSDeepLensSageMakerRole and the provided description by choosing Create role. Do not change the role name.
64 | - Close the IAM window.
65 |
66 | - IAM role for AWS Lambda
67 | - From the list, choose AWSDeepLensLambdaRole. If AWSDeepLensLambdaRole isn't listed, choose Create role in IAM and follow these steps i the IAM console.
68 | - Accept the Lambda service and the Lambda use case by choosing Next: Permissions.
69 | - Accept the AWSLambdaFullAccess policy by choosing Next: Review.
70 | - Accept the role name AWSDeepLensLambdaRole and the provided description by choosing Create role. Do not change the role name.
71 | - Close the IAM window.
72 |
73 |
74 |
75 | 5. In AWS DeepLens, on the Set permissions page, choose Refresh IAM roles, then do the following:
76 | - For IAM role for AWS DeepLens, choose AWSDeepLensServiceRole.
77 | - For IAM role for AWS Greengrass service, choose AWSDeepLensGreengrassRole.
78 | - For IAM role for AWS Greengrass device groups, choose AWSDeepLensGreegrassGroupRole.
79 | - For IAM role for Amazon SageMaker, choose AWSDeepLensSagemakerRole.
80 | - For IAM role for AWS Lambda, choose AWSDeepLensLambdaRole.
81 |
82 | _Important, Attach the roles exactly as described. Otherwise, you might have trouble deploying models to AWS DeepLens._
83 |
84 | If any of the lists do not have the specified role, find that role in step 4, follow the directions to create the role, choose Refresh IAM roles, and return to where you were in step 5.
85 |
86 | 6. Choose Next.
87 | 7. On the Download certificate page, choose Download certificate, then choose Save File. Note where you save the certificate file because you need it later.
88 | 8. After the certificated has been downloaded, choose Register. You should see success message about your device registration like one below.
89 |
90 | _Important: The certificate is a .zip file. You attach it to AWS DeepLens in .zip format, so don’t unzip it. Certificates aren't reusable. You need to generate a new certificate every time you register your device._
91 |
92 | 
93 |
94 | ## Connect Your AWS DeepLens Device
95 |
96 | 1. Start your AWS DeepLens device by plugging the power cord into an outlet and the other end into the back of your device. Turn on the AWS DeepLens by pressing the On/Off button on the front of the device.
97 | 2. On your computer, choose the SSID for your AWS DeepLens from the list of available networks. The SSID and password are on the bottom of your device.
98 |
99 | 
100 |
101 | 3. Wi-Fi light should be blinking at this time. If Wi-Fi light is not blinking, you need to reset the device using a pin and restart the device.
102 | 4. If Wi-Fi light is blinking, connect to the wifi network of the device.
103 |
104 | ## Set Up Your AWS DeepLens Device
105 |
106 | 1. In a browser, open a new tab and navigate to http://192.168.0.1.
107 |
108 | 2. If you see Device Setup summary like below then follow "Edit pre-configured device" otherwise follow "Setup new device",
109 |
110 | 
111 |
112 | ### Edit Pre-configured device
113 |
114 | - For Network Connection, click on Edit
115 | - Under Connect to network, Click on Use Ethernet. _In this lab, we are using Ether so do not use Wi-Fi._
116 |
117 | 
118 |
119 | - For Certificate, click on Edit
120 | - Click Browse and select the certificate you downloaded during DeepLens registration and click Save.
121 | _Even if you see certificate.zip already populated, make sure you still browse and select certificate you downloaded during DeepLens registration._
122 |
123 | 
124 |
125 | - You do not need to edit Device access. Just make sure that SSH is enabled under Device access.
126 |
127 | 
128 |
129 | - Click Finish.
130 |
131 |
132 | Setup new device
133 |
134 | On the Device page:
135 | - Connect to the network.
136 | - For this lab we will be using Ethernet so do not choose Wi-Fi. Choose the Ethernet option and then choose Next.
137 | - Upload the certificate.
138 | - Locate and choose the certificate that you downloaded from the AWS DeepLens console, then choose Upload certificate.
139 | - The certificate is saved as a .zip file in your Downloads directory. Don't unzip the file. You attach the certificate as a .zip file.
140 | - Configure device access.
141 | - Create a password for the device—You need this password to access and update your AWS DeepLens.
142 | - SSH server— Enable SSH as in the lab you will use SSH to connect to the device in later modules. SSH allows you to log in without using the AWS DeepLens console.
143 | - Automatic updates— Enable this option. Enabling automatic updates keeps your device's software up-to-date.
144 | - Review the settings and finish setting up the device.
145 | - To modify settings, choose Edit for the setting that you want to change.
146 | - Click Finish.
147 |
148 |
149 | ## Verify That Your AWS DeepLens Is Connected
150 |
151 | After you set up your device, your computer automatically connects to the internet. This can take a few seconds. When your device is connected, you see the following message:
152 |
153 | After the connection is established, you can return to the AWS DeepLens console. You are now ready to deploy an AWS DeepLens project. For more information, see Creating and Deploying an AWS DeepLens Sample Project.
154 |
155 | 
156 |
157 | If you fail to establish a connection, return to Connect AWS DeepLens to the Network and repeat the steps for setting up your device and connecting it to the network.
158 |
159 | ## Get the IP of your DeepLens device
160 |
161 | 1. Go to IoT in AWS Console at https://console.aws.amazon.com/iot/home?region=us-east-1#/dashboard
162 | 2. In the left navigation, click on Greengrass then click on Cores.
163 | 3. Click on the Greengrass core that starts with deeplens_.
164 |
165 | 
166 |
167 | 4. On Greengrass core details screen, click on Connectivity and note the IP address of your DeepLens device.
168 |
169 | 
170 |
171 | 5. You should now be able to SSH into the DeepLens Device
172 |
173 | ```
174 | ssh aws_cam@IP-ADDRESS-OF-YOUR-DEEPLENS-DEVICE
175 | ```
176 | ```
177 | Example: ssh aws_cam@10.0.1.3
178 | ```
179 |
180 | 
181 |
182 | ## Completion
183 | You have successfully registered your Deeplens device. In the next module, [Object Detection](../2-ObjectDetection), you will learn how to deploy an object detection project to Deeplens and view its output.
184 |
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/certificate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/certificate.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack2.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack3.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/createstack4.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/device-verified.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/device-verified.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceaccess.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceaccess.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceip.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceregs1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/deviceregs1.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/ggcore.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/ggcore.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/launchstack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/launchstack.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/namedevice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/namedevice.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/networkedit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/networkedit.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/readme.txt:
--------------------------------------------------------------------------------
1 | This directory is for hosting screenshots for the KnowYourDevice lab.
2 |
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/setupsummary.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/setupsummary.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/ssh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/ssh.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/1-RegisterYourDevice/assets/ssid-connect.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/1-RegisterYourDevice/assets/ssid-connect.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/README.md:
--------------------------------------------------------------------------------
1 | # Object Detection Using Deep Lens
2 |
3 | ## Create Your Project
4 |
5 | 1. Using your browser, open the AWS DeepLens console at https://console.aws.amazon.com/deeplens/.
6 | 2. Choose Projects, then choose Create new project.
7 | 3. On the Choose project type screen
8 | - Choose Use a project template, then choose Object detection.
9 |
10 | 
11 |
12 | - Scroll to the bottom of the screen, then choose Next.
13 | 4. On the Specify project details screen
14 | - In the Project information section:
15 | - Either accept the default name for the project, or type a name you prefer.
16 | - Either accept the default description for the project, or type a description you prefer.
17 | - In the Project content section:
18 | - Model—make sure the model is deeplens-object-detection. If it isn't, remove the current model then choose Add model. From the list of models, choose deeplens-object-detection.
19 | - Function—make sure the function is deeplens-object-detection. If it isn't, remove the current function then choose Add function. From the list of functions, choose deeplens-object-detection.
20 |
21 | 
22 |
23 | - Choose Create.
24 |
25 | This returns you to the Projects screen where the project you just created is listed with your other projects.
26 |
27 | ## Deploy your project
28 |
29 | Next you will deploy the Object Detection project you just created.
30 |
31 | 1. From Deeplens console, On the Projects screen, choose the radio button to the left of your project name, then choose Deploy to device.
32 |
33 | 
34 |
35 | 2. On the Target device screen, from the list of AWS DeepLens devices, choose the radio button to the left of the device that you want to deploy this project to. An AWS DeepLens device can have only one project deployed to it at a time.
36 |
37 | 
38 |
39 | 3. Choose Review.
40 |
41 | This will take you to the Review and deploy screen.
42 |
43 | If a project is already deployed to the device, you will see an error message
44 | "There is an existing project on this device. Do you want to replace it?
45 | If you Deploy, AWS DeepLens will remove the current project before deploying the new project."
46 |
47 | 4. On the Review and deploy screen, review your project and choose Deploy to deploy the project.
48 |
49 | This will take you to to device screen, which shows the progress of your project deployment.
50 |
51 | ## View your project output
52 |
53 | You need MPlayer to view the project output from Deeplens device. Instructions are provided below for installing this on Mac and Windows.
54 |
55 | ### For Mac
56 | 1. Install MPlayer by using command below in the terminal window:
57 |
58 | ```
59 | brew install mplayer
60 | ```
61 |
62 | 2. Wait until the project is deployed and you see the message Deployment of project Face-detection, version 0 succeeded. After the project is successfully deployed, use the command below from terminal window to view project output stream:
63 |
64 | ```
65 | ssh aws_cam@ cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
66 | ```
67 | Example:
68 | ```
69 | ssh aws_cam@192.168.86.120 cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
70 | ```
71 |
72 | ### For Windows
73 |
74 | 1. You will need to use 7zip to extract the MPlayer executable. You can install 7zip by running the executable at the following link:
75 | * 64-bit: http://www.7-zip.org/a/7z1801-x64.exe
76 | * 32-bit: http://www.7-zip.org/a/7z1801.exe
77 |
78 | 2. After you have installed 7zip, download the MPlayer 7z archive at the following link, and then use 7zip to extract the contents of the archive:
79 | * 64-bit: http://sourceforge.net/projects/mplayerwin/files/MPlayer-MEncoder/r38017/mplayer-svn-38017-x86_64.7z/download
80 | * 32-bit: http://sourceforge.net/projects/mplayerwin/files/MPlayer-MEncoder/r38017/mplayer-svn-38017.7z/download
81 |
82 | Remember the directory in which you extracted the contents of the archive, because in the next step, you will need to provide the full path to the MPlayer executable that you extracted from that archive.
83 |
84 | #### To view the project output stream
85 |
86 | Wait until the project is deployed and you see the message Deployment of project Face-detection, version 0 succeeded. After the project is successfully deployed, follow these steps to view the project output stream:
87 |
88 | Option 1. If you want to use Windows Subsystem for Linux (e.g. running bash or any Linux distro installed from Windows Store), use the following command in your Windows command prompt:
89 | ```
90 | ssh aws_cam@ cat /tmp/results.mjpeg | /mnt/c/your-path-to-mplayer-exe-here/mplayer.exe -demuxer lavf -lavfdopts format=mjpeg:probesize=32 –
91 | ```
92 | Example:
93 | ```
94 | ssh aws_cam@10.57.226.58 cat /tmp/results.mjpeg | /mnt/c/your-path-to-mplayer-exe-here/mplayer.exe -demuxer lavf -lavfdopts format=mjpeg:probesize=32 –
95 | ```
96 |
97 | Option 2. If you have ssh installed directly in Windows 10 (using Fall Creators Update or later, in optional features), use the following command in your Windows command prompt:
98 | ```
99 | ssh aws_cam@ cat /tmp/results.mjpeg | c:\Your-path-to-mplayer-exe-here\mplayer.exe -demuxer lavf -lavfdopts format=mjpeg:probesize=32 –
100 | ```
101 | Example:
102 | ```
103 | ssh aws_cam@192.168.86.120 cat /tmp/results.mjpeg | c:\Your-path-to-mplayer-exe-here\mplayer.exe -demuxer lavf -lavfdopts format=mjpeg:probesize=32 –
104 | ```
105 |
106 | ## View your project log messages in IoT
107 |
108 | You can also view the log messages that your project's Lambda function running on DeepLens device sends to IoT topic.
109 |
110 | 1. Go to DeepLens in AWS console and then Devices at https://console.aws.amazon.com/deeplens/home?region=us-east-1#devices
111 | 2. Click on the name of your DeepLens device and on the next screen note the IoT topic under Device settings.
112 |
113 | 
114 |
115 | 3. Go to IoT in AWS Console at https://console.aws.amazon.com/iot/home?region=us-east-1#/dashboard
116 | 4. Click on Test in the left navigation.
117 | 5. Enter the IoT topic in the textbox under Subscription topic and click Subscribe to topic
118 | 6. You should now see log messages published from DeepLens device to IoT.
119 |
120 | 
121 |
122 | ## Completion
123 | You have create and deployed object detection project to your Deeplens device.
124 |
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/.DS_Store
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/dliottopic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/dliottopic.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/dlmessages.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/dlmessages.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/hotdog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/hotdog.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/projectcontent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/projectcontent.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/projecthome.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/projecthome.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/projecttemplate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/projecttemplate.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/2-ObjectDetection/assets/targetdevice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/2-ObjectDetection/assets/targetdevice.png
--------------------------------------------------------------------------------
/1-KnowYourDevice/README.md:
--------------------------------------------------------------------------------
1 | # Get to know your Device
2 |
3 | AWS DeepLens is a wireless video camera and API. It shows you how to use the latest Artificial Intelligence (AI) tools and technology to develop computer vision applications. Through examples and tutorials, AWS DeepLens gives you hands-on experience using a physical camera to run real-time computer vision models.
4 |
5 | The AWS DeepLens camera, or device, uses deep convolutional neural networks (CNNs) to analyze visual imagery. You use the device as a development environment to build computer vision applications.
6 |
7 | 
8 |
9 | AWS DeepLens support Apache MXNet framework. You can also use other AWS services with DeepLens including:
10 | - Amazon SageMaker, for model training and validation
11 | - AWS Lambda, for running inference against CNN models
12 | - AWS Greengrass, for deploying updates and functions to your device
13 |
14 | ## AWS DeepLens Hardware
15 | The AWS DeepLens camera includes the following:
16 |
17 | - A 4-megapixel camera with MJPEG (Motion JPEG)
18 | - 8 GB of on-board memory
19 | - 16 GB of storage capacity
20 | - A 32-GB SD (Secure Digital) card
21 | - WiFi support for both 2.4 GHz and 5 GHz standard dual-band networking
22 | - A micro HDMI display port
23 | - Audio out and USB ports
24 |
25 | The AWS DeepLens camera is powered by an Intel® Atomprocessor, which can process 100 billion floating-point operations per second (GFLOPS). This gives you all of the compute power that you need to perform inference on your device. The micro HDMI display port, audio out, and USB ports allow you to attach peripherals, so you can get creative with your computer vision applications.
26 |
27 | ## Learning Objectives of This Lab
28 |
29 | In this lab you will learn the following:
30 |
31 | - Registering your Deeplens device.
32 | - Creating a DeepLens project using the object detection template.
33 | - Deploying model to the device.
34 | - Checking the output on the video stream.
35 |
36 | ## Modules
37 |
38 | This lab has following modules:
39 |
40 | - [Register Your Device](1-RegisterYourDevice)
41 | - [Object Detection](2-ObjectDetection)
42 |
43 | ## Clean Up
44 | After completing the labs in this workshop ensure you delete all the resources created in your AWS account during the labs so that no further costs are incurred.
45 |
--------------------------------------------------------------------------------
/1-KnowYourDevice/assets/dlgeneral.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/1-KnowYourDevice/assets/dlgeneral.png
--------------------------------------------------------------------------------
/2-SageMaker/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/.DS_Store
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/.DS_Store
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/ReadMe.md:
--------------------------------------------------------------------------------
1 | # Build, train and deploy Face Detection model using Amazon SageMaker
2 |
3 | In this module, you will learn how to build and train a face detection model using Amazon SageMaker.
4 |
5 | ## Create SageMaker Notebook Instance
6 |
7 | 1. Go to SageMaker console at https://console.aws.amazon.com/sagemaker/home?region=us-east-1#/landing
8 | ___Make sure you have us-east-1 selected as region.___
9 |
10 | 2. Click on Create Notebook instance
11 |
12 | 
13 |
14 | 3. Under Notebook instance settings:
15 | - Notebook instance name: Enter a name eg: DeepLearning
16 | - Notebook instance type: ml.t2.medium
17 | - IAM role: Click on 'Create a new role'
18 | - Under Create an IAM role: Select "Any S3 bucket" and click "Create role".
19 |
20 | 
21 |
22 | - Leave defaults for VPC, Lifecycle configuration and encryption key and click 'Create notebook instance'.
23 |
24 | 
25 |
26 | - You should see message on the next screen that your notebook instance is being created.
27 |
28 | 
29 |
30 | ## View Notebook Instance
31 |
32 | Once the status of your notebook instance is in InService, click on link "Open" under Actions.
33 |
34 | 
35 |
36 | You should see Jupyter UI with sample-notebooks folder that contains various sample notebooks.
37 |
38 | 
39 |
40 | ## Upload and Open Notebook
41 |
42 | You can upload individual notebooks using Jypyter UI, but in this lab we will use git to bring notebook in our SageMaker instance.
43 |
44 | - Click on New and choose Terminal from the drop down list.
45 |
46 | 
47 |
48 | - You should get a console in a new browser tab:
49 |
50 | 
51 |
52 | - Type: "cd SageMaker" command to go to SageMaker directory.
53 | - Type: "git clone https://github.com/darwaishx/Deep-Learning-With-Deep-Lens.git" command to clone github repo.
54 | - Type: "ls" command and you should now see a folder "Deep-Learning-With-Deep-Lens"
55 |
56 | 
57 |
58 | - Go back to Jupyter UI, and you should see folder "Deep-Learning-With-Deep-Lens".
59 |
60 | 
61 |
62 | - Click on "Deep-Learning-With-Deep-Lens", then "2-SageMaker", then "1-FaceDetection" and click on notebook FaceDetection.ipyb
63 |
64 | 
65 |
66 | ## Execute Notebook
67 |
68 | - You can execute notebook cells by either clicking on run button in the menu or using shift+ enter on your keyboard.
69 |
70 | 
71 |
72 | - After you successfully train and deploy your face detection model, last cell in the notebook will test the trained model.
73 |
74 | 
75 |
76 | ## Completion
77 | You have successfully build, trained and deployed a face detection model using SageMaker.
78 |
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/.DS_Store
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm01.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm02.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm03.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm04.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm05.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm06.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm07.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm08.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm09.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm10.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm11.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm12.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm13.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/assets/sm132.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/assets/sm132.png
--------------------------------------------------------------------------------
/2-SageMaker/1-FaceDetection/jeff_portrait.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/1-FaceDetection/jeff_portrait.jpg
--------------------------------------------------------------------------------
/2-SageMaker/ReadMe.md:
--------------------------------------------------------------------------------
1 | # Amazon SageMaker
2 |
3 | Amazon SageMaker is a fully-managed platform that enables developers and data scientists to quickly and easily build, train, and deploy machine learning models at any scale. Amazon SageMaker removes all the barriers that typically slow down developers who want to use machine learning.
4 |
5 | Amazon SageMaker removes the complexity that holds back developer success with each of these steps. Amazon SageMaker includes modules that can be used together or independently to build, train, and deploy your machine learning models.
6 |
7 | 
8 |
9 | ## Learning Objectives of This Lab
10 |
11 | In this lab you will learn the following:
12 |
13 | - Launch your notebook instance and open its Jupyter interface
14 | - Amazon Algorithms - Reliable ML in Scale
15 | - Training Models with a Training Job
16 | - Tuning Models with Hyper-parameter Optimization (HPO)
17 | - Hosting Inference Endpoint
18 | - Build, train and deploy face detection model with Amazon SageMaker
19 |
20 | ## Modules
21 |
22 | This lab has following module:
23 |
24 | - [Build Face Detection model using Amazon SageMaker](1-FaceDetection)
25 |
26 | ## Clean Up
27 | After completing the labs in this workshop ensure you delete all the resources created in your AWS account during the labs so that no further costs are incurred. For SageMaker, you should delete the endpoint (this also deletes the ML compute instance or instances, the endpoint configuration, the model and the the notebook instance. You will need to stop the instance before deleting it.
28 |
--------------------------------------------------------------------------------
/2-SageMaker/assets/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/assets/.DS_Store
--------------------------------------------------------------------------------
/2-SageMaker/assets/sm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/2-SageMaker/assets/sm.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/.DS_Store
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/.DS_Store
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/README.md:
--------------------------------------------------------------------------------
1 | # Detect faces, crop them and store in S3 from DeepLens
2 |
3 | ## Create S3 Bucket
4 |
5 | _This is the bucket to which cropped faces coming from DeepLens will be stored._
6 |
7 | 1. Go to S3 in AWS Console at https://s3.console.aws.amazon.com/s3/home?region=us-east-1.
8 | 2. Click "Create bucket", and enter the following details:
9 | * Bucket name: _[Your name or username]-dl-faces_
10 | * Region: US East (N. Virginia)
11 | 3. Click "Create".
12 |
13 | ## Inference Lambda function to Crop Faces and Send to S3
14 |
15 | In this section you will update the lambda function that is part of face detection project to crop faces and send to S3.
16 |
17 | 1. Using your browser, open the AWS Lambda console at https://console.aws.amazon.com/lambda/home?region=us-east-1#/functions.
18 | 2. In the search box type deeplens-face-detection to find the lambda function for your project and click on the name of your lambda function.
19 | 3. Replace code in lambda function with code from [facecrop.py](facecrop.py).
20 | 4. Update with the name of S3 bucket you created above _[Your name or username]-dl-faces_.
21 |
22 | 
23 |
24 | 5. Click Save.
25 | 6. Click on Action and then Publish a new version.
26 | 7. Enter version description and click Publish.
27 |
28 | ## Create Your Project
29 |
30 | 1. Using your browser, open the AWS DeepLens console at https://console.aws.amazon.com/deeplens/.
31 | 2. Choose Projects, then choose Create new project.
32 | 3. On the Choose project type screen
33 | - Choose Use a project template, then choose Face detection.
34 |
35 | 
36 |
37 | - Scroll to the bottom of the screen, then choose Next.
38 | 4. On the Specify project details screen
39 | - In the Project information section:
40 | - Either accept the default name for the project, or type a name you prefer.
41 | - Either accept the default description for the project, or type a description you prefer.
42 | - In the Project content section:
43 | - Model—make sure the model is deeplens-face-detection. If it isn't, remove the current model then choose Add model. From the list of models, choose deeplens-face-detection.
44 | - Function—make sure the function is deeplens-face-detection. If it isn't, remove the current function then choose Add function. From the list of functions, choose deeplens-face-detection.
45 |
46 | 
47 |
48 | 
49 |
50 | - Under Version, select the current version that you just published.
51 | - Choose Create.
52 |
53 | This returns you to the Projects screen where the project you just created is listed with your other projects.
54 |
55 | ## Deploy your project
56 |
57 | Next you will deploy the Face Detection project you just created.
58 |
59 | 1. From Deeplens console, On the Projects screen, choose the radio button to the left of your project name, then choose Deploy to device.
60 |
61 | 
62 |
63 | 2. On the Target device screen, from the list of AWS DeepLens devices, choose the radio button to the left of the device that you want to deploy this project to. An AWS DeepLens device can have only one project deployed to it at a time.
64 |
65 | 
66 |
67 | 3. Choose Review.
68 |
69 | This will take you to the Review and deploy screen.
70 |
71 | If a project is already deployed to the device, you will see an error message
72 | "There is an existing project on this device. Do you want to replace it?
73 | If you Deploy, AWS DeepLens will remove the current project before deploying the new project."
74 |
75 | 4. On the Review and deploy screen, review your project and choose Deploy to deploy the project.
76 |
77 | This will take you to to device screen, which shows the progress of your project deployment.
78 |
79 | ## View your project output
80 |
81 | 1. You need mplayer to view the project output from Deeplens device. For Windows, follow the installation instructions at this link: http://www.mplayerhq.hu/design7/dload.html
82 | For Mac, install mplayer by using command below in the terminal window:
83 |
84 | ```
85 | brew install mplayer
86 | ```
87 |
88 | 2. Wait until the project is deployed and you see the message Deployment of project Face-detection, version 0 succeeded. After project is successfully deployed, use the command below from terminal window to view project output stream:
89 |
90 | ```
91 | ssh aws_cam@ cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
92 | ```
93 | Example:
94 | ```
95 | ssh aws_cam@192.168.86.120 cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
96 | ```
97 |
98 | ### Verify DeepLens is sending faces to S3
99 | Go to S3 bucket _[Your name or username]-dl-faces_ and you should now see images coming from Deeplens.
100 |
101 | ## Completion
102 | You have successfully created and deployed a face detection project on DeepLens. You also modified the default project so when DeepLens detects a human face, it will crop the face and store as image in S3. In the next activity, [Identify Emotions](../2-IdentifyEmotions), you will learn how Amazon Rekognition provides Deep learning-based image and video analysis capabilities including facial analysis in the cloud.
103 |
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/facecrop.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Amazon AWS DeepLens, 2017
3 | #
4 |
5 | import os
6 | import sys
7 | import datetime
8 | import greengrasssdk
9 | from threading import Timer
10 | import time
11 | import awscam
12 | import cv2
13 | from threading import Thread
14 | import urllib
15 | import zipfile
16 |
17 | #boto3 is not installed on device by default.
18 |
19 | boto_dir = '/tmp/boto_dir'
20 | if not os.path.exists(boto_dir):
21 | os.mkdir(boto_dir)
22 | urllib.urlretrieve("https://s3.amazonaws.com/dear-demo/boto_3_dist.zip", "/tmp/boto_3_dist.zip")
23 | with zipfile.ZipFile("/tmp/boto_3_dist.zip", "r") as zip_ref:
24 | zip_ref.extractall(boto_dir)
25 | sys.path.append(boto_dir)
26 |
27 | import boto3
28 |
29 | # Creating a greengrass core sdk client
30 | client = greengrasssdk.client('iot-data')
31 |
32 | # The information exchanged between IoT and clould has
33 | # a topic and a message body.
34 | # This is the topic that this code uses to send messages to cloud
35 | iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
36 |
37 | ret, frame = awscam.getLastFrame()
38 | ret, jpeg = cv2.imencode('.jpg', frame)
39 |
40 | Write_To_FIFO = True
41 |
42 | class FIFO_Thread(Thread):
43 | def __init__(self):
44 | ''' Constructor. '''
45 | Thread.__init__(self)
46 |
47 | def run(self):
48 | fifo_path = "/tmp/results.mjpeg"
49 | if not os.path.exists(fifo_path):
50 | os.mkfifo(fifo_path)
51 | f = open(fifo_path, 'w')
52 | client.publish(topic=iotTopic, payload="Opened Pipe")
53 | while Write_To_FIFO:
54 | try:
55 | f.write(jpeg.tobytes())
56 | except IOError as e:
57 | continue
58 |
59 | def push_to_s3(img, index):
60 | try:
61 | bucket_name = ""
62 |
63 | timestamp = int(time.time())
64 | now = datetime.datetime.now()
65 | key = "faces/{}_{}/{}_{}/{}_{}.jpg".format(now.month, now.day,
66 | now.hour, now.minute,
67 | timestamp, index)
68 |
69 | s3 = boto3.client('s3')
70 |
71 | encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
72 | _, jpg_data = cv2.imencode('.jpg', img, encode_param)
73 | response = s3.put_object(ACL='public-read',
74 | Body=jpg_data.tostring(),
75 | Bucket=bucket_name,
76 | Key=key)
77 |
78 | client.publish(topic=iotTopic, payload="Response: {}".format(response))
79 | client.publish(topic=iotTopic, payload="Face pushed to S3")
80 | except Exception as e:
81 | msg = "Pushing to S3 failed: " + str(e)
82 | client.publish(topic=iotTopic, payload=msg)
83 |
84 | def greengrass_infinite_infer_run():
85 | try:
86 | modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
87 | modelType = "ssd"
88 | input_width = 300
89 | input_height = 300
90 | prob_thresh = 0.25
91 | results_thread = FIFO_Thread()
92 | results_thread.start()
93 |
94 | # Send a starting message to IoT console
95 | client.publish(topic=iotTopic, payload="Face detection starts now")
96 |
97 | # Load model to GPU (use {"GPU": 0} for CPU)
98 | mcfg = {"GPU": 1}
99 | model = awscam.Model(modelPath, mcfg)
100 | client.publish(topic=iotTopic, payload="Model loaded")
101 | ret, frame = awscam.getLastFrame()
102 | if ret == False:
103 | raise Exception("Failed to get frame from the stream")
104 |
105 | yscale = float(frame.shape[0]/input_height)
106 | xscale = float(frame.shape[1]/input_width)
107 |
108 | doInfer = True
109 | while doInfer:
110 | # Get a frame from the video stream
111 | ret, frame = awscam.getLastFrame()
112 | # Raise an exception if failing to get a frame
113 | if ret == False:
114 | raise Exception("Failed to get frame from the stream")
115 |
116 | # Resize frame to fit model input requirement
117 | frameResize = cv2.resize(frame, (input_width, input_height))
118 |
119 | # Run model inference on the resized frame
120 | inferOutput = model.doInference(frameResize)
121 |
122 | # Output inference result to the fifo file so it can be viewed with mplayer
123 | parsed_results = model.parseResult(modelType, inferOutput)['ssd']
124 | # client.publish(topic=iotTopic, payload = json.dumps(parsed_results))
125 | label = '{'
126 | for i, obj in enumerate(parsed_results):
127 | if obj['prob'] < prob_thresh:
128 | break
129 | offset = 25
130 | xmin = int( xscale * obj['xmin'] ) + int((obj['xmin'] - input_width/2) + input_width/2)
131 | ymin = int( yscale * obj['ymin'] )
132 | xmax = int( xscale * obj['xmax'] ) + int((obj['xmax'] - input_width/2) + input_width/2)
133 | ymax = int( yscale * obj['ymax'] )
134 |
135 | crop_img = frame[ymin:ymax, xmin:xmax]
136 |
137 | push_to_s3(crop_img, i)
138 |
139 | cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
140 | label += '"{}": {:.2f},'.format(str(obj['label']), obj['prob'] )
141 | label_show = '{}: {:.2f}'.format(str(obj['label']), obj['prob'] )
142 | cv2.putText(frame, label_show, (xmin, ymin-15),
143 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4)
144 | label += '"null": 0.0'
145 | label += '}'
146 | client.publish(topic=iotTopic, payload=label)
147 | global jpeg
148 | ret, jpeg = cv2.imencode('.jpg', frame)
149 |
150 | except Exception as e:
151 | msg = "Test failed: " + str(e)
152 | client.publish(topic=iotTopic, payload=msg)
153 |
154 | # Asynchronously schedule this function to be run again in 15 seconds
155 | Timer(15, greengrass_infinite_infer_run).start()
156 |
157 |
158 | # Execute the function above
159 | greengrass_infinite_infer_run()
160 |
161 |
162 | # This is a dummy handler and will not be invoked
163 | # Instead the code above will be executed in an infinite loop for our example
164 | def function_handler(event, context):
165 | return
166 |
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/.DS_Store
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/bucketname.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/bucketname.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/dlprojectcontent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/dlprojectcontent.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/dlprojecthome.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/dlprojecthome.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/dlprojecttargetdevice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/dlprojecttargetdevice.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/dlprojecttemplate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/dlprojecttemplate.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/faceiotoutput.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/faceiotoutput.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotaddactionfinal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotaddactionfinal.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotinvokelambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotinvokelambda.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotlambdatestevent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotlambdatestevent.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal01.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal02.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/iotrulefinal03.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/lambdaiottos3create.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/lambdaiottos3create.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/1-DetectFaces/images/lambdaversion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/1-DetectFaces/images/lambdaversion.png
--------------------------------------------------------------------------------
/3-SentimentAnalysis/2-IdentifyEmotions/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/2-IdentifyEmotions/.DS_Store
--------------------------------------------------------------------------------
/3-SentimentAnalysis/2-IdentifyEmotions/README.md:
--------------------------------------------------------------------------------
1 | # Identify Emotions
2 |
3 | ## Create DynamoDB Table
4 |
5 | - Go to DynamoDB Console at https://console.aws.amazon.com/dynamodb/home?region=us-east-1#
6 | - Click on Create Table.
7 | - Table name: recognize-emotions-your-name
8 | - Primary key: s3key
9 | - Click "Create" to create DynamoDB table.
10 |
11 | ## Create a role for Lambda function
12 |
13 | - Go to IAM Console at https://console.aws.amazon.com/iam/home?region=us-east-1#
14 | - Choose 'Create Role'
15 | - Select “AWS Service”
16 | - Select “Lambda” and choose "Next:Permissions"
17 | - Attach the following policies:
18 | - AmazonDynamoDBFullAcces
19 | - AmazonS3FullAccess
20 | - AmazonRekognitionFullAccess
21 | - CloudWatchFullAccess
22 | - Click Next
23 | - Provide a name for the role: rekognizeEmotions
24 | - Choose 'Create role'
25 |
26 | ## Create a Lambda function to Detect Emotions
27 |
28 | 1. Go to Lambda Console at https://console.aws.amazon.com/lambda/home?region=us-east-1
29 | 2. Click 'Create function'
30 | 3. Choose 'Author from scratch'
31 | - Name the function: recognize-emotion-your-name.
32 | - Runtime: Choose Python 2.7
33 | - Role: Choose an existing role
34 | - Existing role: rekognizeEmotions
35 | - Choose Create function
36 | 4. Replace the default script with the script in [recognize-emotions.py](rekognize-emotions.py).
37 | 5. Update the table name in lambda function with the name of DynamoDB table your created earlier.
38 | 6. Next, we need to add the event that triggers this lambda function. This will be an “S3:ObjectCreated” event that happens every time a face is uploaded to the face S3 bucket. Add S3 trigger from designer section on the left.
39 | 7. Configure with the following:
40 | - Bucket name: face-detection-your-name (you created this bucket earlier)
41 | - Event type- Object Created
42 | - Prefix- faces/
43 | - Filter- .jpg
44 | - Enable trigger- ON (keep the checkbox on)
45 | 8. Save the lambda function
46 | 9. Under 'Actions' tab choose **Publish**
47 |
48 | ## View emotions in CloudWatch Dashboard
49 |
50 | - Go to CloudWatch Console at https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#
51 | - Create a dashboard called “sentiment-dashboard-your-name”
52 | - Choose Line in the widget
53 | - Under Custom Namespaces, select “string”, “Metrics with no dimensions”, and then select all metrics.
54 | - Next, set “Auto-refresh” to the smallest interval possible (1h), and change the “Period” to whatever works best for you (1 second or 5 seconds)
55 |
56 | NOTE: These metrics will only appear once they have been sent to Cloudwatch via the Rekognition Lambda. It may take some time for them to appear after your model is deployed and running locally. If they do not appear, then there is a problem somewhere in the pipeline.
57 |
58 | ## Completion
59 | You have successfully created and deployed a face detection project on DeepLens. You also modified the default project to detect faces and perform sentiment analysis on those faces.
60 |
--------------------------------------------------------------------------------
/3-SentimentAnalysis/2-IdentifyEmotions/rekognize-emotions.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import boto3
4 | import urllib
5 | import datetime
6 |
7 | print('Loading function')
8 |
9 | rekognition = boto3.client('rekognition')
10 | cloudwatch = boto3.client('cloudwatch')
11 |
12 |
13 | # --------------- Helper Function to call CloudWatch APIs ------------------
14 |
15 | def push_to_cloudwatch(name, value):
16 | try:
17 | response = cloudwatch.put_metric_data(
18 | Namespace='string',
19 | MetricData=[
20 | {
21 | 'MetricName': name,
22 | 'Value': value,
23 | 'Unit': 'Percent'
24 | },
25 | ]
26 | )
27 | print("Metric pushed: {}".format(response))
28 | except Exception as e:
29 | print("Unable to push to cloudwatch\n e: {}".format(e))
30 | return True
31 |
32 | # --------------- Helper Functions to call Rekognition APIs ------------------
33 |
34 | def detect_faces(bucket, key):
35 | print("Key: {}".format(key))
36 | response = rekognition.detect_faces(Image={"S3Object":
37 | {"Bucket": bucket,
38 | "Name": key}},
39 | Attributes=['ALL'])
40 |
41 | if not response['FaceDetails']:
42 | print ("No Face Details Found!")
43 | return response
44 |
45 | push = False
46 | dynamo_obj = {}
47 | dynamo_obj['s3key'] = key
48 |
49 | for index, item in enumerate(response['FaceDetails'][0]['Emotions']):
50 | print("Item: {}".format(item))
51 | if int(item['Confidence']) > 10:
52 | push = True
53 | dynamo_obj[item['Type']] = str(round(item["Confidence"], 2))
54 | push_to_cloudwatch(item['Type'], round(item["Confidence"], 2))
55 |
56 | if push: # Push only if at least on emotion was found
57 | table = boto3.resource('dynamodb').Table('rekognize-faces')
58 | table.put_item(Item=dynamo_obj)
59 |
60 | return response
61 |
62 | # --------------- Main handler ------------------
63 |
64 |
65 | def lambda_handler(event, context):
66 | '''Demonstrates S3 trigger that uses
67 | Rekognition APIs to detect faces, labels and index faces in S3 Object.
68 | '''
69 |
70 | # Get the object from the event
71 | bucket = event['Records'][0]['s3']['bucket']['name']
72 | key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
73 | try:
74 | # Calls rekognition DetectFaces API to detect faces in S3 object
75 | response = detect_faces(bucket, key)
76 |
77 | return response
78 | except Exception as e:
79 | print("Error processing object {} from bucket {}. ".format(key, bucket) +
80 | "Make sure your object and bucket exist and your bucket is in the same region as this function.")
81 | raise e
82 |
--------------------------------------------------------------------------------
/3-SentimentAnalysis/README.md:
--------------------------------------------------------------------------------
1 | # Build sentiment analysis system using DeepLens and Amazon Rekognition
2 |
3 | ### Learning Objectives of This lab
4 | In this lab your will the following:
5 | - Create an S3 bucket.
6 | - Modify the DeepLens inference lambda function to upload cropped faces to S3 bucket.
7 | - Deploy the inference lambda function and face detection model to DeepLens.
8 | - Create lambda function to call Amazon Rekognition and identify emotions of incoming faces.
9 | - Create a DynamoDB table to store recognized emotions.
10 | - Analyse results using CloudWatch.
11 |
12 | 
13 |
14 | ## Modules
15 |
16 | This lab has following modules:
17 |
18 | - [Detect Faces](1-DetectFaces)
19 | - [Identify Emotions](2-IdentifyEmotions)
20 |
21 | ## Clean Up
22 | After completing the labs in this workshop ensure you delete all the resources created in your AWS account during the labs so that no further costs are incurred.
23 |
--------------------------------------------------------------------------------
/3-SentimentAnalysis/assets/sentiments.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/3-SentimentAnalysis/assets/sentiments.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/Architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/Architecture.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/README.md:
--------------------------------------------------------------------------------
1 | # Lab 1 - Face Detection Using Deep Lens
2 |
3 | ## Create Your Project
4 |
5 | 1. Using your browser, open the AWS DeepLens console at https://console.aws.amazon.com/deeplens/.
6 | 2. Choose Projects, then choose Create new project.
7 | 3. On the Choose project type screen
8 | - Choose Use a project template, then choose Face detection.
9 |
10 | 
11 |
12 | - Scroll to the bottom of the screen, then choose Next.
13 | 4. On the Specify project details screen
14 | - In the Project information section:
15 | - Either accept the default name for the project, or type a name you prefer.
16 | - Either accept the default description for the project, or type a description you prefer.
17 | - In the Project content section:
18 | - Model—make sure the model is deeplens-face-detection. If it isn't, remove the current model then choose Add model. From the list of models, choose deeplens-face-detection.
19 | - Function—make sure the function is deeplens-face-detection. If it isn't, remove the current function then choose Add function. From the list of functions, choose deeplens-face-detection.
20 |
21 | 
22 |
23 | - Choose Create.
24 |
25 | This returns you to the Projects screen where the project you just created is listed with your other projects.
26 |
27 | ## Deploy your project
28 |
29 | Next you will deploy the Face Detection project you just created.
30 |
31 | 1. From Deeplens console, On the Projects screen, choose the radio button to the left of your project name, then choose Deploy to device.
32 |
33 | 
34 |
35 | 2. On the Target device screen, from the list of AWS DeepLens devices, choose the radio button to the left of the device that you want to deploy this project to. An AWS DeepLens device can have only one project deployed to it at a time.
36 |
37 | 
38 |
39 | 3. Choose Review.
40 |
41 | This will take you to the Review and deploy screen.
42 |
43 | If a project is already deployed to the device, you will see an error message
44 | "There is an existing project on this device. Do you want to replace it?
45 | If you Deploy, AWS DeepLens will remove the current project before deploying the new project."
46 |
47 | 4. On the Review and deploy screen, review your project and choose Deploy to deploy the project.
48 |
49 | This will take you to to device screen, which shows the progress of your project deployment.
50 |
51 | ## View your project output
52 |
53 | 1. You need mplayer to view the project output from Deeplens device. For Windows, follow the installation instructions at this link: http://www.mplayerhq.hu/design7/dload.html
54 | For Mac, install mplayer by using command below in the terminal window:
55 |
56 | ```
57 | brew install mplayer
58 | ```
59 |
60 | 2. Wait until the project is deployed and you see the message Deployment of project Face-detection, version 0 succeeded. After project is successfully deployed, use the command below from terminal window to view project output stream:
61 |
62 | ```
63 | ssh aws_cam@ cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
64 | ```
65 | Example:
66 | ```
67 | ssh aws_cam@192.168.86.120 cat /tmp/results.mjpeg | mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 -
68 | ```
69 |
70 | ## Crop face and send to cloud
71 |
72 | In this section you will update the lambda function that is part of face detection project to crop face and send it as IoT message. You will then create a rul in IoT to save that image to S3 if confidence level is above 50%.
73 |
74 | 1. Using your browser, open the AWS Lambda console at https://console.aws.amazon.com/lambda/home?region=us-east-1#/functions.
75 | 2. In the search box type deeplens-face-detection to find the lambda function for your project and click on the name of your lambda function.
76 | 3. Replace code in lambda function with code from [facecrop.py](facecrop.py) and click Save.
77 | 4. Click on Action and then Publish a new version.
78 | 5. Enter version description and click Publish.
79 | 6. Open the AWS DeepLens console at https://console.aws.amazon.com/deeplens/.
80 | 7. Choose Projects, then choose click on the project Face-detection.
81 | 8. On project details screen, click on Edit.
82 | 9. On Edit project screen, under Project content, click on Function to expand that section.
83 | 
84 | 10. Under Version, select the current version that you just published.
85 | 11. Click Save. This will take you back to you project screen.
86 |
87 | ### Deploy updated project
88 |
89 | Next you will deploy the Face Detection project you just updated.
90 |
91 | 1. From Deeplens console, On the Projects screen, choose the radio button to the left of your project name, then choose Deploy to device.
92 |
93 | 2. On the Target device screen, from the list of AWS DeepLens devices, choose the radio button to the left of the device that you want to deploy this project to. An AWS DeepLens device can have only one project deployed to it at a time.
94 |
95 | 3. Choose Review.
96 |
97 | This will take you to the Review and deploy screen.
98 |
99 | Since the project is already deployed to the device, you will see an error message
100 | "There is an existing project on this device. Do you want to replace it?
101 | If you Deploy, AWS DeepLens will remove the current project before deploying the new project."
102 |
103 | 4. On the Review and deploy screen, review your project and choose Deploy to deploy the project.
104 |
105 | This will take you to to device screen, which shows the progress of your project deployment. Wait until the project is deployed and you see the message Deployment of project Face-detection, version 1 succeeded. After project is successfully deployed.
106 |
107 | ### View face detection and cropped face output in IoT
108 |
109 | 1. Using your browser, open the AWS IoT console at https://console.aws.amazon.com/iot/home?region=us-east-1#/dashboard.
110 | 2. Click on Test and enter the subscription topic for your Deeplens device which is in the format "$aws/things/deeplens_c2422202-e22f-4220-a333-9456789154f5/infer"
111 | 3. Click on Subscribe to topic. That will take you the screen for topic where you can see all incoming messages.
112 | 4. As Deeplens detects a face, it will crop the face and send the image as a json message to IoT.
113 | 
114 |
115 | ## Save Images from IoT messages to S3
116 |
117 | In this section you will create resources including S3 bucket, Lambda function and IoT rule to invoke the lambda function with image data if confidence level is above 80%.
118 |
119 | ### Create S3 Bucket
120 |
121 | _This is the bucket to which cropped faces coming from DeepLens will be stored._
122 |
123 | 1. Go to S3 in AWS Console at https://s3.console.aws.amazon.com/s3/home?region=us-east-1.
124 | 2. Click "Create bucket", and enter the following details:
125 | * Bucket name: _[Your name or username]-dl-faces_
126 | * Region: US East (N. Virginia)
127 | 3. Click "Create".
128 |
129 | ### Create an IAM Role
130 |
131 | _This will allow our Lambda function (created in later steps) to access our S3 bucket, and other services that will be used in later parts of this workshop._
132 |
133 | 1. Go to IAM in AWS Console at https://console.aws.amazon.com/iam/home?region=us-east-1#/home
134 | 2. On the left-hand side of the screen, click "Roles", and then click “Create Role”.
135 | 3. Click “AWS service” and click “Lambda”.
136 | 4. Click “Next: Permissions” (at the bottom of the screen).
137 | 5. In the Search field, type _s3_, and then select “AmazonS3FullAccess” (i.e. click the checkbox to the left of “AmazonS3FullAccess”; see screenshot below).
138 | 6. Repeat for other required services as follows:
139 | - In the Search field, type _step_, and then select “AWSStepFunctionsFullAccess”.
140 | - In the Search field, type _rek_, and then select “AmazonRekognitionFullAccess”.
141 | - In the Search field, type _dyn_, and then select “AmazonDynamoDBFullAccess”.
142 | - In the Search field, type _ses_, and then select “AmazonSESFullAccess”.
143 | - In the Search field, type _cog_, and then select “AmazonCognitoPowerUser”.
144 | 7. Click “Next: Review” (at the bottom of the screen).
145 | 8. In the “Role name” text box, type DL-Lambda-Role
146 | 9. Click “Create role” (at the bottom of the screen).
147 |
148 | ### Create Lambda Function
149 | 1. Using your browser, open the AWS Lambda console at https://console.aws.amazon.com/lambda/home?region=us-east-1#/.
150 | 2. On Create function screen, choose Author from scratch, and complete following:
151 | - Name: dlworkshop-iottos3.
152 | - Runtime: Python 3.6.
153 | - Role: Choose an existing role.
154 | - Existing role*: Select the IAM role DL-Lambda-Role that you just created.
155 | - Click Create function.
156 | 3. Replace code with code from [iottos3.py](iottos3.py).
157 | 4. Update the following line with the name of your S3 bucket _[Your name or username]-dl-faces_ that you created above to store images and click Save.
158 | ```
159 | bucket = "YOUR-S3-BUCKET-NAME"
160 | ```
161 | 5. Click on drop down Select a test event... and click Configure test events
162 | - Leave the defaults as Create new test event and Event template Hello World
163 | - Event name: Test
164 | - Replace JSON in the text box with JSON below:
165 | ```
166 | { "face": "facedata" }
167 | ```
168 | 
169 | - Click Create
170 | 6. Click on Test
171 | 7. Your lambda function should return URL of file it just created in S3.
172 | 8. Go to S3 bucket _[Your name or username]-dl-faces_ and verify that you can see the file lambda function just created.
173 |
174 | ### Create IoT Rule
175 | 1. Using your browser, open the AWS IoT console at https://console.aws.amazon.com/iot/home?region=us-east-1#/dashboard.
176 | 2. Click on Act and click "Create a rule".
177 | 3. On the Create a rule screen, complete the following:
178 | - Name: FaceDetection
179 | 
180 | - Attribute: face
181 | - Topic filter: Enter the name of IoT topic for your Deeplens device (example: $aws/things/deeplens_c1234562-ea5f-47e0-abd5-96f8123456f5/infer)
182 | - Condition: confidence > 0.8
183 | 
184 | - Under Set one or more actions:
185 | - Click Add action
186 | - Under Select an action, choose Invoke a Lambda function passing the message.
187 | 
188 | - Scroll to the bottom of the screen and click Configure action.
189 | - On Configure action screen, under Function name, select the dlworkshop-iottos3 lambda function.
190 | 
191 | - Click Add action
192 | 
193 | - Click Create rule.
194 |
195 | ### Verify Deeplens is sending faces to S3
196 | Go to S3 bucket _[Your name or username]-dl-faces_ and you should now see images coming from Deeplens.
197 |
198 | ## Completion
199 | You have successfully created and deployed a face detection project on DeepLens. You also modified the default project so when DeepLens detects a human face, it will crop the face and store as image in S3. In the next activity, [Rekognition](../2-Rekognition), you will learn how Amazon Rekognition provides Deep learning-based image and video analysis capabilities including face verification in the cloud.
200 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/facecrop.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Amazon AWS DeepLens, 2017.
3 | #
4 |
5 | import os
6 | import greengrasssdk
7 | from threading import Timer
8 | import time
9 | import awscam
10 | import cv2
11 | from threading import Thread
12 | import base64
13 |
14 | # Creating a greengrass core sdk client
15 | client = greengrasssdk.client('iot-data')
16 |
17 | # The information exchanged between IoT and clould has
18 | # a topic and a message body.
19 | # This is the topic that this code uses to send messages to cloud
20 | iotTopic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
21 |
22 | def cropFace(img, x, y, w, h):
23 |
24 | #Crop face
25 | cimg = img[y:y+h, x:x+w]
26 |
27 | #Convert to jpeg
28 | ret,jpeg = cv2.imencode('.jpg', cimg)
29 | face = base64.b64encode(jpeg.tobytes())
30 |
31 | return face
32 |
33 | ret, frame = awscam.getLastFrame()
34 | ret,jpeg = cv2.imencode('.jpg', frame)
35 | Write_To_FIFO = True
36 | class FIFO_Thread(Thread):
37 | def __init__(self):
38 | ''' Constructor. '''
39 | Thread.__init__(self)
40 |
41 | def run(self):
42 | fifo_path = "/tmp/results.mjpeg"
43 | if not os.path.exists(fifo_path):
44 | os.mkfifo(fifo_path)
45 | f = open(fifo_path,'w')
46 | client.publish(topic=iotTopic, payload="Opened Pipe")
47 | while Write_To_FIFO:
48 | try:
49 | f.write(jpeg.tobytes())
50 | except IOError as e:
51 | continue
52 |
53 | def greengrass_infinite_infer_run():
54 | try:
55 | modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
56 | modelType = "ssd"
57 | input_width = 300
58 | input_height = 300
59 | prob_thresh = 0.25
60 | results_thread = FIFO_Thread()
61 | results_thread.start()
62 |
63 | # Send a starting message to IoT console
64 | client.publish(topic=iotTopic, payload="Face detection starts now")
65 |
66 | # Load model to GPU (use {"GPU": 0} for CPU)
67 | mcfg = {"GPU": 1}
68 | model = awscam.Model(modelPath, mcfg)
69 | client.publish(topic=iotTopic, payload="Model loaded")
70 | ret, frame = awscam.getLastFrame()
71 | if ret == False:
72 | raise Exception("Failed to get frame from the stream")
73 |
74 | yscale = float(frame.shape[0]/input_height)
75 | xscale = float(frame.shape[1]/input_width)
76 |
77 | doInfer = True
78 | while doInfer:
79 | # Get a frame from the video stream
80 | ret, frame = awscam.getLastFrame()
81 | # Raise an exception if failing to get a frame
82 | if ret == False:
83 | raise Exception("Failed to get frame from the stream")
84 |
85 |
86 | # Resize frame to fit model input requirement
87 | frameResize = cv2.resize(frame, (input_width, input_height))
88 |
89 | # Run model inference on the resized frame
90 | inferOutput = model.doInference(frameResize)
91 |
92 |
93 | # Output inference result to the fifo file so it can be viewed with mplayer
94 | parsed_results = model.parseResult(modelType, inferOutput)['ssd']
95 | label = '{'
96 | for obj in parsed_results:
97 | if obj['prob'] < prob_thresh:
98 | break
99 | xmin = int( xscale * obj['xmin'] ) + int((obj['xmin'] - input_width/2) + input_width/2)
100 | ymin = int( yscale * obj['ymin'] )
101 | xmax = int( xscale * obj['xmax'] ) + int((obj['xmax'] - input_width/2) + input_width/2)
102 | ymax = int( yscale * obj['ymax'] )
103 |
104 | #Crop face
105 | ################
106 | client.publish(topic=iotTopic, payload = "cropping face")
107 | try:
108 | cimage = cropFace(frame, xmin, ymin, xmax-xmin, ymax-ymin)
109 | lblconfidence = '"confidence" : ' + str(obj['prob'])
110 | client.publish(topic=iotTopic, payload = '{ "face" : "' + cimage + '", ' + lblconfidence + '}')
111 | except Exception as e:
112 | msg = "Crop image failed: " + str(e)
113 | client.publish(topic=iotTopic, payload=msg)
114 | client.publish(topic=iotTopic, payload = "Crop face complete")
115 | ################
116 |
117 |
118 | cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
119 | label += '"{}": {:.2f},'.format(str(obj['label']), obj['prob'] )
120 | label_show = '{}: {:.2f}'.format(str(obj['label']), obj['prob'] )
121 | cv2.putText(frame, label_show, (xmin, ymin-15),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4)
122 | label += '"null": 0.0'
123 | label += '}'
124 | #client.publish(topic=iotTopic, payload = label)
125 | global jpeg
126 | ret,jpeg = cv2.imencode('.jpg', frame)
127 |
128 | except Exception as e:
129 | msg = "Test failed: " + str(e)
130 | client.publish(topic=iotTopic, payload=msg)
131 |
132 | # Asynchronously schedule this function to be run again in 15 seconds
133 | Timer(15, greengrass_infinite_infer_run).start()
134 |
135 |
136 | # Execute the function above
137 | greengrass_infinite_infer_run()
138 |
139 |
140 | # This is a dummy handler and will not be invoked
141 | # Instead the code above will be executed in an infinite loop for our example
142 | def function_handler(event, context):
143 | return
144 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojectcontent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojectcontent.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecthome.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecthome.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecttargetdevice.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecttargetdevice.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecttemplate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/dlprojecttemplate.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/faceiotoutput.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/faceiotoutput.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotaddactionfinal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotaddactionfinal.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotinvokelambda.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotinvokelambda.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotlambdatestevent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotlambdatestevent.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal01.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal02.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/iotrulefinal03.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/lambdaiottos3create.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/lambdaiottos3create.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/images/lambdaversion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/1-FaceDetection/images/lambdaversion.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/1-FaceDetection/iottos3.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import base64
3 | import time
4 |
5 | def lambda_handler(event, context):
6 |
7 | bucket = "YOUR-S3-BUCKET-NAME"
8 |
9 | face = base64.b64decode(event['face'])
10 |
11 | s3 = boto3.client('s3')
12 |
13 | file_name = 'image-'+time.strftime("%Y%m%d-%H%M%S")+'.jpg'
14 |
15 | response = s3.put_object(ACL='public-read', Body=face,Bucket=bucket,Key=file_name)
16 |
17 | file_url = 'https://s3.amazonaws.com/' + bucket + '/' + file_name
18 |
19 | return file_url
20 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Rekognition (Deep learning-based image and video analysis)
3 |
4 | Amazon Rekognition provides Deep learning-based image and video analysis. It provides key features:
5 | - Object, scene and activity detection
6 | 
7 | - Facial recognition
8 | 
9 | - Facial analysis
10 | 
11 | - Person tracking
12 | 
13 | - Unsafe content detection
14 | - Celebrity recognition
15 | 
16 | - Text in images
17 | 
18 |
19 | You will use Rekognition for face verification by saving and comparing faces stored in the collection.
20 |
21 | ## Rekognition Introduction
22 | 1. In AWS Console go to Rekognition at https://console.aws.amazon.com/rekognition/home?region=us-east-1#/.
23 | 2. In the left navigation click on Object and scene detection and notice how Rekognition return labels for objects and activities detected in the image.
24 | 3. In the left navigation click on Face comparison to see how Rekognition let you compare two faces and gives a confidence score on how closely they match.
25 | 4. You can explore other feature including Image moderation, Facial analysis, Celebrity recognition and Text in image.
26 |
27 | ## Rekognition Collections
28 | Amazon Rekognition can store information about detected faces in server-side containers known as collections. You can use the facial information stored in a collection to search for known faces in images, stored videos and streaming videos. You will use AWS CLI to create and manage Rekognition collections.
29 |
30 | ### Configure AWS CLI
31 | 1. Install the AWS CLI by following the instructions at the following link: https://docs.aws.amazon.com/cli/latest/userguide/installing.html
32 | 2. Configure the AWS CLI by following the instructions at the following link: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html
33 |
34 | ### Create Rekognition Collection
35 |
36 | _Rekognition will be consulted in order to check whether a face in the image sent by DeepLens is recognized (i.e. whether it exists in our Rekognition collection)._
37 |
38 | 1. On your laptop, either open a terminal window (Mac) or cmd (Windows) in order to use the AWS CLI.
39 | 2. Type the following AWS CLI command to create a Rekognition collection:
40 | ```
41 | aws rekognition create-collection --collection-id "dl-faces" --region us-east-1
42 | ```
43 | 3. Verify that your Rekognition collection has been created:
44 | ```
45 | aws rekognition list-collections --region us-east-1
46 | ```
47 | ### Veiw/Add Faces to Rekognition Collection
48 |
49 | 1. With the following command, you will see that there are currently no faces in your newly-created collection:
50 | ```
51 | aws rekognition list-faces --collection-id "dl-faces" --region us-east-1
52 | ```
53 | 2. Upload images [jb.jpg](jb.jpg), [andy1.jpg](andy1.jpg) and [andy2.png](andy2.png) to your S3 bucket _[Your name or username]-dl-faces_.
54 |
55 | 3. Add both jb.jpg and andy1.jpg to Rekognition collection
56 | ```
57 | aws rekognition index-faces --image "{\"S3Object\":{\"Bucket\":\"[Your name or username]-dl-faces\",\"Name\":\"jb.jpg\"}}" --external-image-id "JB" --collection-id "dl-faces" --detection-attributes "ALL" --region us-east-1
58 | ```
59 | ```
60 | aws rekognition index-faces --image "{\"S3Object\":{\"Bucket\":\"[Your name or username]-dl-faces\",\"Name\":\"andy1.jpg\"}}" --external-image-id "Andy" --collection-id "dl-faces" --detection-attributes "ALL" --region us-east-1
61 | ```
62 | 4. Now list the faces in your collection again and you should see JSON response with two faces in your Rekognition collection.
63 | ```
64 | aws rekognition list-faces --collection-id "dl-faces" --region us-east-1
65 | ```
66 | 
67 |
68 | ### Search Face in Rekognition Collection
69 | 1. We will now use andy2.png as source image to search faces stored in the Rekognition collection. You should see JSON response similar to below with it finding Andy's face with 99.9% confidence level.
70 | ```
71 | aws rekognition search-faces-by-image --image "{\"S3Object\":{\"Bucket\":\"[Your name or username]-dl-faces\",\"Name\":\"andy2.png\"}}" --collection-id "dl-faces" --region us-east-1
72 | ```
73 | 
74 |
75 | ### Delete Face from Rekognition Collection
76 |
77 | 1. To delete a face from your collection, use the face-id that you get from list-faces command.
78 | ```
79 | aws rekognition delete-faces --collection-id "dl-faces" --face-ids "FACE-ID-To-DELETE, GET FaceID FROM list-faces"
80 | ```
81 |
82 | ## Completion
83 | You have successfully created a Rekognition collection, stored faces in the collection and searched for faces by providing image from S3 bucket. You will use this collection in next modules to verify incoming faces from Deeplens. In the next [Approval Workflow](../3-ApprovalWorkflow), you will learn how to build an approval workflow before sending incoming image from Deeplens to Rekognition collection.
84 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/andy1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/andy1.jpg
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/andy2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/andy2.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/Burner_Accounts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/Burner_Accounts.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/Lambda_IAM_Screen1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/Lambda_IAM_Screen1.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/Lambda_IAM_Screen2_Policy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/Lambda_IAM_Screen2_Policy.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/Region.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/Region.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/celebrity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/celebrity.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/facial.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/facial.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/facialanalysis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/facialanalysis.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/listfaces.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/listfaces.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/objectscene.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/objectscene.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/persontracking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/persontracking.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/searchfacebyimage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/searchfacebyimage.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/images/text.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/images/text.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/2-Rekognition/jb.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/2-Rekognition/jb.jpg
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/APIGatewayToStepFunctions-respond-swagger-apigateway.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | swagger: "2.0"
3 | info:
4 | version: "2018-02-04T12:21:02Z"
5 | title: "APIGatewayToStepFunctions"
6 | host: "1o1bhkc8r9.execute-api.us-east-1.amazonaws.com"
7 | basePath: "/respond"
8 | schemes:
9 | - "https"
10 | paths:
11 | /fail:
12 | get:
13 | consumes:
14 | - "application/json"
15 | produces:
16 | - "application/json"
17 | responses:
18 | 200:
19 | description: "200 response"
20 | schema:
21 | $ref: "#/definitions/Empty"
22 | x-amazon-apigateway-integration:
23 | credentials: "arn:aws:iam::111111111111:role/APIGatewayToStepFunctions"
24 | responses:
25 | default:
26 | statusCode: "200"
27 | uri: "arn:aws:apigateway:us-east-1:states:action/SendTaskSuccess"
28 | passthroughBehavior: "when_no_match"
29 | httpMethod: "POST"
30 | requestTemplates:
31 | application/json: "{\n \"output\": \"{\\\"approval\\\":\\\"rejected\\\"\
32 | , \\\"ImageId\\\": \\\"$input.params('ImageId')\\\", \\\"taskToken\\\"\
33 | : \\\"$input.params('taskToken')\\\"}\",\n \n \"taskToken\": \"$input.params('taskToken')\"\
34 | \n}"
35 | type: "aws"
36 | /succeed:
37 | get:
38 | consumes:
39 | - "application/json"
40 | produces:
41 | - "application/json"
42 | responses:
43 | 200:
44 | description: "200 response"
45 | schema:
46 | $ref: "#/definitions/Empty"
47 | headers:
48 | Access-Control-Allow-Origin:
49 | type: "string"
50 | x-amazon-apigateway-integration:
51 | credentials: "arn:aws:iam::111111111111:role/APIGatewayToStepFunctions"
52 | responses:
53 | default:
54 | statusCode: "200"
55 | responseParameters:
56 | method.response.header.Access-Control-Allow-Origin: "'*'"
57 | uri: "arn:aws:apigateway:us-east-1:states:action/SendTaskSuccess"
58 | passthroughBehavior: "when_no_match"
59 | httpMethod: "POST"
60 | requestTemplates:
61 | application/json: "{\n \"output\": \"{\\\"approval\\\":\\\"approved\\\"\
62 | , \\\"name\\\": \\\"$input.params('name')\\\", \\\"ImageId\\\": \\\"$input.params('ImageId')\\\
63 | \", \\\"taskToken\\\": \\\"$input.params('taskToken')\\\"}\",\n \n \
64 | \ \"taskToken\": \"$input.params('taskToken')\"\n}"
65 | type: "aws"
66 | options:
67 | consumes:
68 | - "application/json"
69 | produces:
70 | - "application/json"
71 | responses:
72 | 200:
73 | description: "200 response"
74 | schema:
75 | $ref: "#/definitions/Empty"
76 | headers:
77 | Access-Control-Allow-Origin:
78 | type: "string"
79 | Access-Control-Allow-Methods:
80 | type: "string"
81 | Access-Control-Allow-Headers:
82 | type: "string"
83 | x-amazon-apigateway-integration:
84 | responses:
85 | default:
86 | statusCode: "200"
87 | responseParameters:
88 | method.response.header.Access-Control-Allow-Methods: "'GET,OPTIONS'"
89 | method.response.header.Access-Control-Allow-Headers: "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'"
90 | method.response.header.Access-Control-Allow-Origin: "'*'"
91 | passthroughBehavior: "when_no_match"
92 | requestTemplates:
93 | application/json: "{\"statusCode\": 200}"
94 | type: "mock"
95 | definitions:
96 | Empty:
97 | type: "object"
98 | title: "Empty Schema"
99 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/README.md:
--------------------------------------------------------------------------------
1 | # Approval Workflow (Step Functions, API GW and Approval Website)
2 |
3 | In the next module you will learn how to build an approval workflow before sending incoming image from Deeplens to Rekognition collection.
4 |
5 | ## Create IAM Roles for API Gateway and Step Functions
6 |
7 | ### Create IAM Role for API Gateway to Step Functions
8 |
9 | _As seen in the architecture diagram, API gateway will be used to invoke the 2nd Step Function. We need to create an IAM role to allow this to happen._
10 |
11 | 1. To access the IAM Console: In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “IAM” (you can find it by typing _iam_ into the search field at the top of the screen).
12 | 2. On the left-hand side of the screen, click "Roles", and then click “Create Role”.
13 | 2. Click “AWS service” and click “API Gateway”.
14 | 3. Click “Next: Permissions” (at the bottom of the screen).
15 | 4. Click “Next: Review” (at the bottom of the screen).
16 | 5. In the “Role name” text box, type _APIGatewayToStepFunctions_
17 | 6. Click “Create role” (at the bottom of the screen).
18 | 7. On the left-hand side of the screen, click "Roles", search for your role (APIGatewayToStepFunctions) and then click on that role.
19 | 8. On the Permissions tab, choose "Attach Policy".
20 | 9. On the Attach Policy page, type _step_ into the search field and click the checkbox to the left of "AWSStepFunctionsFullAccess".
21 | 10. Click "Attach Policy".
22 |
23 |
24 | ### IAM Role for Step Functions to Lambda
25 |
26 | _As seen in the architecture diagram, Step Functions will be used to invoke the PostApproval Lambda function. We need to create an IAM role to allow this to happen._
27 |
28 | 1. In the IAM Console, on the left-hand side of the screen, click "Roles", and then click “Create Role”
29 | 2. Click “AWS service” and click “Step Functions”.
30 | 3. Click “Next: Permissions” (at the bottom of the screen).
31 | 4. Click “Next: Review” (at the bottom of the screen).
32 | 5. In the “Role name” text box, type _StepFunctionstoLambda_
33 | 6. Click “Create role” (at the bottom of the screen).
34 |
35 | ## Configure Step Functions
36 |
37 | _We will use the AWS Step Functions service to define and control our overall workflow. (For more information: https://aws.amazon.com/step-functions)_
38 |
39 | 1. Go to StepFunction in AWS Console at https://console.aws.amazon.com/states/home?region=us-east-1#/tasks
40 | 2. Click create "Create new activity".
41 | 3. Type ManualStep in the "Activity Name" textbox, and then click "Create Activity".
42 | 4. In the left navigation, click on Dashboard and click Create a state machine
43 | 5. Choose "Author from scratch" and for the name, type _MLApprovalProcess_.
44 | 6. For the IAM Role, choose the _StepFunctionstoLambda_ role that your created above.
45 | 7. In the "Code" section, paste the following code and **replace the strings 111111111111 with your own AWS account Id**:
46 | ```
47 | {
48 | "Comment": "Image review process!",
49 | "StartAt": "ManualApproval",
50 | "States": {
51 | "ManualApproval": {
52 | "Type": "Task",
53 | "Resource": "arn:aws:states:us-east-1:111111111111:activity:ManualStep",
54 | "Next": "PostApproval"
55 | },
56 | "PostApproval": {
57 | "Type": "Task",
58 | "Resource": "arn:aws:lambda:us-east-1:111111111111:function:PostApproval",
59 | "End": true
60 | }
61 | }
62 | }
63 | ```
64 |
65 | 
66 |
67 | ### Test Step Functions
68 |
69 | 1. While still in the step functions console, on the left-hand side of the screen, click “Dashboard”.
70 | 2. Click directly on the state machine you just created (not on the radio button next to it) and then click “New execution”.
71 | 3. Enter a name for the execution test and then click “Start execution” (see screenshot below).
72 |
73 | 
74 |
75 | 4. This will simply go into the ManualApproval activity for now:
76 |
77 | 
78 |
79 | 5. Now click “Stop execution” in the top, right-hand corner of the screen.
80 |
81 | ## Configure API Gateway
82 |
83 | 1. Save the following Swagger file to your computer: [API Gateway Swagger File](./APIGatewayToStepFunctions-respond-swagger-apigateway.yaml), open the file for editing, and substitute all instances of the string **111111111111** with your own AWS account number.
84 | 2. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “API Gateway” (you can find it by typing api into the search field at the top of the screen).
85 | 3. Click "Get Started", and then click "OK".
86 | 4. Select the "Import from Swagger" option (see screenshot below).
87 |
88 | 
89 |
90 | 5. Click “Select Swagger File” and upload the swagger file you created in step 1 above.
91 | 6. Click "Import" (in the bottom, right-hand corner of the screen).
92 | 7. In the "Resources" section for your API, click the "Actions" drop-down menu and select "Deploy API". Enter the following details:
93 | o Deployment stage: [New Stage]
94 | o Stage name: respond
95 | 8. Click "Deploy".
96 | 9. Note the "Invoke URL" that gets created for this API. Copy this to a text file because you will need it in a later step (see example screenshot below).
97 |
98 | 
99 |
100 | 10. Paste the invoke URL into a browser tab to ensure that the API is responding. For now it will just return an error saying “{"message":"Missing Authentication Token"}”, which is expected at this point, because the request has not gone through the expected end-to-end workflow.
101 |
102 | ## Approval Verification Website
103 |
104 | ### Create Cognito Identity Pool
105 |
106 | _Cognito will be used to assign temporary credentials for securely accessing AWS resources used in this workshop. (For more information: https://aws.amazon.com/cognito/)_
107 |
108 | 1. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “Cognito” (you can find it by typing _cog_ into the search field at the top of the screen).
109 | 2. Click “Manage Federated Identities”, and then click “Create new identity pool”.
110 | 3. For “Identity pool name”, enter _ML_ID_Pool_
111 | 4. Select “Enable access to unauthenticated identities” from the “Unauthenticated identities” collapsible section.
112 | 5. Click “Create Pool”.
113 | 6. In the screen that appears, click “Allow” (in the bottom, right-hand corner of the screen).
114 | 7. Note the identity pool ID that is displayed in the center of the screen (please see the following example screenshot).
115 |
116 | 
117 |
118 | 8. Copy that into a text file because you will use it in a later step.
119 |
120 | ### Update the Cognito IAM Role to Allow Access to AWS Resources
121 |
122 | 1. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “IAM” (you can find it by typing _iam_ into the search field at the top of the screen).
123 | 2. On the left-hand side of the screen, click "Roles".
124 | 3. In your list of roles, click on “Cognito_ML_ID_PoolUnauth_Role”, and click “Attach policy”.
125 | 4. In the Search field, type _s3_, and then select “AmazonS3FullAccess” (i.e. click the checkbox to the left of “AmazonS3FullAccess”).
126 | 5. In the Search field, type _step_, and then select “AWSStepFunctionsFullAccess”.
127 | 6. In the Search field, type _rek_, and then select “AmazonRekognitionFullAccess”.
128 | 7. In the Search field, type _dyn_, and then select “AmazonDynamoDBFullAccess”.
129 | 8. In the Search field, type _ses_, and then select “AmazonSESFullAccess”.
130 | 9. In the Search field, type _api_, and then select “AmazonAPIGatewayInvokeFullAccess”.
131 | 10. Click “Attach policy” (at the bottom, right-hand corner of the screen).
132 | 11. Repeat steps 1 to 10 for the “Cognito_ML_ID_PoolAuth_Role”.
133 |
134 | ### Create S3 Bucket for Static Website Hosting
135 |
136 | _We will use a static website to host a web-page that will be used for approving unrecognized faces to be added to our Rekognition collection._
137 |
138 | 1. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “S3” (you can find it by typing _s3_ into the search field at the top of the screen).
139 | 2. Click "Create bucket", and enter the following details:
140 | * Bucket name: [Your name or username]-dl-web
141 | * Region: US East-1 (Virginia)
142 | 2. Click "Create".
143 | 3. Now, in your list of S3 buckets, click on the bucket you just created (i.e. [Your name or username]-dl-web]).
144 | 4. Click on the "Properties" tab and click on "Static website hosting".
145 | 5. Select the "Use this bucket to host a website" option.
146 | 6. In the "Index document" textbox, type index.html
147 | 7. Click "Save".
148 |
149 | ### Create the Approval Static Web Page
150 |
151 | _The document at the following link contains the HTML code for the static web page that will be used for allowing manual approval of images to be added to the Rekognition collection: [index.html](./index.html)_
152 |
153 | 1. Copy the _[index.html](./index.html)_ file to your computer, save it as index.html, and make the following substitution:
154 | ```
155 | cognitoIdentityPoolId = 'YOUR-IdentityPoolId'
156 | apiGatewayUrl = 'https://{YOUR-APIGatewayEndPoint}.execute-api.us-east-1.amazonaws.com/respond/succeed'
157 | ```
158 | _Next, we will upload that file to S3 according to the following steps:_
159 |
160 | 1. Go to S3 in AWS Console at https://s3.console.aws.amazon.com/s3/home?region=us-east-1
161 | 2. In your list of S3 buckets, click on the bucket [Your name or username]-dl-web you created above and click Upload.
162 | 3. Click Add files and select the index.html from your computer.
163 | 4. Under Manage public permissions, select "Grant public read access to this object(s)".
164 |
165 | 
166 |
167 | 5. Click Next, Next and upload.
168 | 6. Replace [Your name or username]-dl-web in the URL below with the name of your S3 bucket that is hosting index.html
169 | ```
170 | http://[Your name or username]-dl-web.s3-website-us-east-1.amazonaws.com
171 | ```
172 | 7. You should see approval page like one below:
173 | 
174 |
175 | ## Completion
176 | You have successfully created state machine in StepFunction, API Gateway and approval website to manage the approval workflow for images coming from Deeplens. In the next [Bringing it All Together](../4-BringingItAllTogether), you will learn create lambda function to initiate workflow.
177 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Approval_page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Approval_page.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Cognito_ID_Pool_ID.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Cognito_ID_Pool_ID.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Create_new_api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Create_new_api.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Invoke_url.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Invoke_url.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/ML_Bucket.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/ML_Bucket.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Make_public.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Make_public.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/New_execution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/New_execution.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/State_machines.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/State_machines.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Step_fns.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Step_fns.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Workflow_view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/Workflow_view.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/approvalwebsite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/approvalwebsite.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/s3public.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/3-ApprovalWorkflow/images/s3public.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/3-ApprovalWorkflow/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Image Approval
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
Approval Confirmation
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
157 |
158 |
159 |
160 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/README.md:
--------------------------------------------------------------------------------
1 | # Bringing it All Together
2 |
3 | ## Register Your Email Address With SES
4 |
5 | _SES will be used to send an email from the “StartWorkflow” Lambda function we will create later in this workshop. In order to do this, your email address must be registered with SES._
6 |
7 | 1. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “Simple Email Service” (you can find it by typing _ses_ into the search field at the top of the screen).
8 | 2. Click on “Manage Identities”
9 | 3. On the left-hand side of the page, click “Email Addresses”, and then click “Verify a New Email Address”.
10 | 4. Enter your email address and click “Verify This Email Address”.
11 | 5. A verification email will be sent to your email address. Open your email and click the verification link in that email.
12 |
13 | ## Create DynamoDB Table
14 |
15 | 1. In the AWS Console, click on “Services” in the top, left-hand corner of the screen, and click on “DynamoDB” (you can find it by typing _dyn_ into the search field at the top of the screen).
16 | 2. Click "Create table", and enter the following details:
17 | * Table name: DLApprovals
18 | * Primary key: ImageId
19 | 3. Click "Create".
20 |
21 | ## Create Lambda Functions
22 |
23 | ### Create the “StartWorkflow” Lambda Function
24 |
25 | _This function will be the heart of the entire workflow. It will check to see if the face in the image sent from DeepLens exists in a Rekognition collection. If so, it will simply send an email to notify you that this person was seen by the DeepLans device. If not, it will send an email to ask if you want to approve this face and add it to your Rekognition collection._
26 |
27 | 1. Go to Lambda in AWS Console at https://console.aws.amazon.com/lambda/home?region=us-east-1#/functions.
28 | 2. Click “Create a function”, and enter the following details:
29 | * Name: StartWorkflow
30 | * Runtime: Python 3.6
31 | * Role: Choose an existing role
32 | * Role name: DL-Lambda-Role
33 | 3. Click “Create function”.
34 | 4. In the "Add Triggers" section on the left-hand side of the page, click "S3" (see screenshot below).
35 |
36 | 
37 |
38 | 5. In the "Configure triggers" section that appears at the bottom of the screen, configure the following details:
39 | * Bucket: _[Your name or username]-dl-faces_ This is the bucket where images from Deeples are being stored]
40 | * Event type: Object Created (All)
41 | * Enable trigger: Yes [Checked]
42 | 6. Click "Add" (at the bottom of the page), and then click "Save" (at the top of the page).
43 | 8. Now click on the "StartWorkflow" icon in the center of the screen, and a "Function code" section will appear at the bottom of the screen (scroll down).
44 | e.g.
45 |
46 | 
47 |
48 | 9. Delete the existing code in that section, and replace it with the code from [startworkflow.py](startworkflow.py).
49 | 10. Update following lines in the lambda_handler according to your environment.
50 | ```
51 | #########Update according to your environment #########################
52 | #API Gateway end point URL
53 | apiGatewayUrl = 'https://YOUR-APIGW-ENDPOINT.execute-api.us-east-1.amazonaws.com/respond/'
54 | #URL for S3 Hosted Approval Website
55 | approvalWebsiteUrl = 'http://YOUR-S3BUCKET-web.s3-website-us-east-1.amazonaws.com'
56 | #Rekognition Collection Name
57 | rekognitionCollection = 'YOUR-REKOGNITION-COLLECTION'
58 | #Step Function State Machine Arn
59 | stateMachineArn = 'arn:aws:states:us-east-1:YOUR-AWS-ACCOUNT-ID:stateMachine:MLApprovalProcess'
60 | #Step Function Activity Arn
61 | activityArn = 'arn:aws:states:us-east-1:YOUR-AWS-ACCOUND-ID:activity:ManualStep'
62 | #Email information
63 | emailSender = "YOUR-EMAIL-ADDRESS"
64 | emailRecipient = "YOUR-EMAIL-ADDRESS"
65 | emailSubject = "Approval needed for image"
66 | #DynamoDB Table
67 | dynamoTable = 'YOUR-DYNAMODB-TABLE'
68 | #########Update according to your environment #########################
69 | ```
70 | 11. Scroll down further, and in the “Basic Settings” section, set the timeout to 5 minutes.
71 |
72 | 
73 |
74 | 17. Click "Save" (at the top of the screen).
75 |
76 | ### Test the “StartWorkflow” Lambda Function
77 |
78 | _We will create a test event within the Lambda console._
79 |
80 | 1. While still in the Lambda Console screen for the function you just created, scroll to the top of the web-page, and click the drop-down menu that says “Select a test event”, and then click “Configure test events”.
81 | 2. In the screen that appears (see screenshot below), click “Create new test event”.
82 | 3. In the “Event template” drop-down menu, select “S3 Put”.
83 |
84 | 
85 |
86 | 4. A set of auto-generated code will appear when you select that option. In that code, change the key to andy2.png and the bucket name to S3 buck where images from Deeplens are being stored _[YourName-dl-faces]_.
87 |
88 | 
89 |
90 | 5. Click “Save”.
91 | 6. Now, in the top, right-hand corner of the console screen, click the “Test” button.
92 | 7. At this point you should receive an email asking you to approve/reject the photo.
93 |
94 | 
95 |
96 | 8. Go ahead and click the Approve link and that should open the approval website.
97 |
98 | 
99 |
100 | 9. Go to StepFunctions in AWS console at https://console.aws.amazon.com/states/home?region=us-east-1#/ and click on the state machine _MLApprovalProcess_
101 | 10. You should see current execution with status "Running".
102 |
103 | 
104 |
105 | 11. Click on the arn of the execution to see more details.
106 |
107 | 
108 |
109 | 12. Go back to the approval email and click the Reject link. It should take you to API GW and return {} if request if successful.
110 |
111 | ### Create the “PostApproval” Lambda Function
112 |
113 | _This function will actually add the image to our Rekognition collection, so that it will be recognized on subsequent attempts._
114 | 1. Go to Lambda in AWS Console at https://console.aws.amazon.com/lambda/home?region=us-east-1#/functions.
115 | 2. Click “Create a function”, and enter the following details:
116 | * Name: PostApproval
117 | * Runtime: Python 3.6
118 | * Role: Choose an existing role
119 | * Role name: DL-Lambda-Role
120 | 3. Click “Create function”.
121 | 4. Delete the existing code in that section, and replace it with the code from [postapproval.py](postapproval.py).
122 | 5. Update line below with the name of your Rekognition collection.
123 | ```
124 | rekognitionCollection = 'YOUR-REKOGNITION-COLLECTION'
125 | dynamodbTableName = 'YOUR-DYNAMODB-TABLE'
126 | ```
127 | 6. Click “Save”.
128 |
129 | ## End to End Test
130 | As Deeplens now detect a face, you should get an email if face already exist in your Rekognition collection. If face does not exist, you should get an email to approve/reject wheter that face should be added to your Rekognition collection.
131 |
132 | ## Completion
133 | You have successfully build and end to end solution to detect faces using Deeples, and recognize those using Rekognition.
134 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/.DS_Store
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Basic_settings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Basic_settings.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_test_event.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_test_event.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_test_event2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_test_event2.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_triggers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Configure_triggers.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Function_code.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/Function_code.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/StartWorkflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/StartWorkflow.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/approvalemail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/approvalemail.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/approvalwebsite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/approvalwebsite.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/lambdatest.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/lambdatest.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/statemachinerunning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/statemachinerunning.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/statemachinerunning2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darwaishx/Deep-Learning-With-Deep-Lens/cfb3dd89718535cbf8054845f2d92beeae59ddf4/4-FaceDetectionAndVerification/4-BringingItAllTogether/images/statemachinerunning2.png
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/postapproval.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import urllib
3 |
4 | def saveFace(client, bucket, imageName, imageId, rekognitionCollection):
5 |
6 | print(bucket)
7 | print(imageName)
8 | print(imageId)
9 | print(rekognitionCollection)
10 |
11 | response = client.index_faces(
12 | CollectionId=rekognitionCollection,
13 | Image={
14 | 'S3Object': {
15 | 'Bucket': bucket,
16 | 'Name': imageName,
17 | }
18 | },
19 | ExternalImageId=imageId
20 | )
21 |
22 | def lambda_handler(event, context):
23 | rekognitionCollection = 'YOUR-REKOGNITION-COLLECTION'
24 | dynamodbTableName = 'YOUR-DYNAMODB-TABLE'
25 |
26 | approval = event['approval']
27 | urlEncodedTaskToken = event['taskToken']
28 | taskToken = urllib.parse.unquote(urlEncodedTaskToken)
29 | urlEncodedImageId = event['ImageId']
30 | imageId = urllib.parse.unquote(urlEncodedImageId)
31 |
32 | bucket = ""
33 | imageName = ""
34 |
35 | ddb = boto3.client('dynamodb')
36 | response = ddb.get_item(
37 | TableName=dynamodbTableName,
38 | Key={
39 | 'ImageId' : {'S': imageId}
40 | })
41 |
42 | if 'Item' in response:
43 | bucket = response['Item']['Bucket']['S']
44 | imageName = response['Item']['ImageName']['S']
45 |
46 | #SHOULD ALSO VERIFY THAT IMAGEID AND TOKEN MATCH SO BUT LET USER DO ADDITIONAL CHECKS
47 |
48 | if(approval == 'approved'):
49 | name = event['name']
50 | client = boto3.client('rekognition')
51 | saveFace(client, bucket, imageName, imageId, rekognitionCollection)
52 |
53 | client = boto3.client('dynamodb')
54 | response = ddb.update_item(
55 | TableName=dynamodbTableName,
56 | Key={
57 | 'ImageId' : {'S': imageId}
58 | },
59 |
60 | UpdateExpression='SET PersonsName = :val1',
61 | ExpressionAttributeValues={
62 | ':val1': { 'S' : name}
63 | })
64 | else:
65 | print('item does not exist.')
66 |
67 | return bucket + '/' + imageName
68 |
--------------------------------------------------------------------------------
/4-FaceDetectionAndVerification/4-BringingItAllTogether/startworkflow.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import uuid
3 | import json
4 | import urllib
5 | from botocore.exceptions import ClientError
6 |
7 |
8 | def faceExists(client, bucket, imageName, rekognitionCollection):
9 | result = ""
10 |
11 | response = client.search_faces_by_image(
12 | CollectionId=rekognitionCollection,
13 | Image={
14 | 'S3Object': {
15 | 'Bucket': bucket,
16 | 'Name': imageName
17 | }
18 | },
19 | MaxFaces=1,
20 | FaceMatchThreshold=90
21 | )
22 |
23 | if(len(response["FaceMatches"]) > 0):
24 | result = response["FaceMatches"][0]["Face"]["ExternalImageId"]
25 |
26 | return result
27 |
28 | def runWorkflow(bucket, imageName, smArn):
29 | data = {}
30 | data['bucket'] = bucket
31 | data['imageName'] = imageName
32 | json_data = json.dumps(data)
33 |
34 | client = boto3.client(service_name='stepfunctions', region_name='us-east-1')
35 | response = client.start_execution(stateMachineArn=smArn,
36 | name= str(uuid.uuid1()),
37 | input= json_data)
38 |
39 | def getTask(actArn):
40 | client = boto3.client(service_name='stepfunctions', region_name='us-east-1')
41 | response = client.get_activity_task(
42 | activityArn=actArn,
43 | workerName='Lambda'
44 | )
45 |
46 | data = json.loads(response['input'])
47 | taskToken = response['taskToken']
48 | taskInput = json.loads(response['input'])
49 |
50 | return taskInput, taskToken
51 |
52 |
53 | def saveRequest(imageId, token, bucket, imageName, s3url, dynamoTable):
54 | client = boto3.client('dynamodb')
55 | response = client.put_item(
56 | TableName= dynamoTable,
57 | Item={
58 | 'ImageId' : {'S' : imageId},
59 | 'Token' : {'S': token},
60 | 'Bucket' : {'S': bucket},
61 | 'ImageName' : {'S': imageName},
62 | 'S3Url' : {'S': s3url}
63 | })
64 |
65 | def sendEmail(MESSAGE, SENDER, RECIPIENT, SUBJECT):
66 | BODY_HTML = MESSAGE
67 |
68 | # The character encoding for the email.
69 | CHARSET = "UTF-8"
70 |
71 | # Create a new SES resource and specify a region.
72 | client = boto3.client('ses',region_name='us-east-1')
73 |
74 | # Try to send the email.
75 | try:
76 | #Provide the contents of the email.
77 | response = client.send_email(
78 | Destination={
79 | 'ToAddresses': [
80 | RECIPIENT,
81 | ],
82 | },
83 | Message={
84 | 'Body': {
85 | 'Html': {
86 | 'Charset': CHARSET,
87 | 'Data': BODY_HTML,
88 | },
89 | },
90 | 'Subject': {
91 | 'Charset': CHARSET,
92 | 'Data': SUBJECT,
93 | },
94 | },
95 | Source=SENDER
96 | )
97 | # Display an error if something goes wrong.
98 | except ClientError as e:
99 | print(e.response['Error']['Message'])
100 | else:
101 | print("Email sent! Message ID:"),
102 | print(response['ResponseMetadata']['RequestId'])
103 |
104 | def sendApprovalMessage(approveUrl, rejectUrl, s3url, SENDER, RECIPIENT, SUBJECT):
105 | BODY_HTML = """
106 |
107 |
108 |