├── .gitattributes ├── .gitignore ├── .idea ├── PythonRekognitionDemo.iml └── vcs.xml ├── README.md ├── boto_ex.py ├── celebs.py ├── face_detect.py ├── face_detect_graphical.py ├── image_helpers.py ├── labels.py └── requirements.txt /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 2 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 3 | 4 | # User-specific stuff: 5 | .idea/**/workspace.xml 6 | .idea/**/tasks.xml 7 | .idea/dictionaries 8 | 9 | # Sensitive or high-churn files: 10 | .idea/**/dataSources/ 11 | .idea/**/dataSources.ids 12 | .idea/**/dataSources.xml 13 | .idea/**/dataSources.local.xml 14 | .idea/**/sqlDataSources.xml 15 | .idea/**/dynamic.xml 16 | .idea/**/uiDesigner.xml 17 | 18 | # Gradle: 19 | .idea/**/gradle.xml 20 | .idea/**/libraries 21 | 22 | # CMake 23 | cmake-build-debug/ 24 | 25 | # Mongo Explorer plugin: 26 | .idea/**/mongoSettings.xml 27 | 28 | ## File-based project format: 29 | *.iws 30 | 31 | ## Plugin-specific files: 32 | 33 | # IntelliJ 34 | out/ 35 | 36 | # mpeltonen/sbt-idea plugin 37 | .idea_modules/ 38 | 39 | # JIRA plugin 40 | atlassian-ide-plugin.xml 41 | 42 | # Cursive Clojure plugin 43 | .idea/replstate.xml 44 | 45 | # Crashlytics plugin (for Android Studio and IntelliJ) 46 | com_crashlytics_export_strings.xml 47 | crashlytics.properties 48 | crashlytics-build.properties 49 | fabric.properties -------------------------------------------------------------------------------- /.idea/PythonRekognitionDemo.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PythonRekognitionDemo 2 | Examples of using AWS Rekognition from Python 3 | 4 | To install the necessary Python packages, in a terminal window (inside this folder) use: `pip install -r requirements.txt` 5 | 6 | Watch a video explaining this code by clicking the image below: 7 | 8 | [![Video preview image](http://img.youtube.com/vi/f4NIuLb2QkI/0.jpg)](https://youtu.be/f4NIuLb2QkI) 9 | -------------------------------------------------------------------------------- /boto_ex.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from pprint import pprint 3 | 4 | s3 = boto3.resource('s3') 5 | pprint(s3.buckets.all()) 6 | 7 | # error 8 | # pprint(s3.buckets.all()[0]) 9 | 10 | print('S3 Buckets') 11 | for bucket in s3.buckets.all(): 12 | pprint(bucket) 13 | 14 | ec2 = boto3.resource('ec2') 15 | 16 | print('\nEC2 Instances:') 17 | for ins in ec2.instances.all(): 18 | pprint(ins) 19 | 20 | print('\nEC2 Instances (id, state)') 21 | for ins in ec2.instances.all(): 22 | print(ins.id, ins.state) 23 | 24 | -------------------------------------------------------------------------------- /celebs.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from pprint import pprint 3 | import image_helpers 4 | 5 | client = boto3.client('rekognition') 6 | 7 | # grab the image from online 8 | # imgurl = 'https://media1.popsugar-assets.com/files/thumbor/xptPz9chB_kMwxzqI9qMCZrK_YA/fit-in/1024x1024/filters:format_auto-!!-:strip_icc-!!-/2015/07/13/766/n/1922398/3d3a7ee5_11698501_923697884352975_2728822964439153485_n.jpg' 9 | # imgurl = 'http://media.comicbook.com/uploads1/2015/07/fox-comic-con-panel-144933.jpg' 10 | imgurl = 'https://blog.njsnet.co/content/images/2017/02/trumprecognition.png' 11 | 12 | imgbytes = image_helpers.get_image_from_url(imgurl) 13 | 14 | rekresp = client.recognize_celebrities(Image={'Bytes': imgbytes}) 15 | # pprint(rekresp['CelebrityFaces']) 16 | for face in rekresp['CelebrityFaces']: 17 | print(face['Name'],'confidence:', face['MatchConfidence'], 'url:',face['Urls']) 18 | -------------------------------------------------------------------------------- /face_detect.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from pprint import pprint 3 | import image_helpers 4 | 5 | client = boto3.client('rekognition') 6 | 7 | # imgurl = 'http://media.comicbook.com/uploads1/2015/07/fox-comic-con-panel-144933.jpg' 8 | imgurl = 'https://blog.njsnet.co/content/images/2017/02/trumprecognition.png' 9 | 10 | imgbytes = image_helpers.get_image_from_url(imgurl) 11 | 12 | rekresp = client.detect_faces(Image={'Bytes': imgbytes}, 13 | Attributes=['ALL']) 14 | 15 | # pprint(rekresp) 16 | 17 | numfaces = len(rekresp['FaceDetails']) 18 | print('Found', numfaces, end='') 19 | if numfaces == 1: 20 | print(' face:') 21 | else: 22 | print(' faces:') 23 | 24 | for facedeets in rekresp['FaceDetails']: 25 | 26 | # construct a printf (almost) style format string for printing the info 27 | fmtstr = '{gender} age {lowage}-{highage},' 28 | 29 | # mustache and beard detection 30 | if facedeets['Mustache']['Value'] and facedeets['Beard']['Value']: 31 | fmtstr += ' with beard and mustache,' 32 | elif facedeets['Mustache']['Value']: 33 | fmtstr += ' with mustache,' 34 | elif facedeets['Beard']['Value']: 35 | fmtstr += ' with beard,' 36 | 37 | # sunglasses/eyeglasses detection 38 | if facedeets['Sunglasses']['Value']: 39 | fmtstr += ' wearing sunglasses,' 40 | elif facedeets['Eyeglasses']['Value']: 41 | fmtstr += ' wearing glasses,' 42 | 43 | fmtstr += ' looks {emotion}' 44 | 45 | print( 46 | fmtstr.format( 47 | gender=facedeets['Gender']['Value'], 48 | lowage=facedeets['AgeRange']['Low'], 49 | highage=facedeets['AgeRange']['High'], 50 | emotion=facedeets['Emotions'][0]['Type'].lower() 51 | ) 52 | ) 53 | 54 | 55 | -------------------------------------------------------------------------------- /face_detect_graphical.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageDraw 2 | import boto3 3 | from pprint import pprint 4 | from io import BytesIO 5 | import image_helpers 6 | 7 | def bbox_to_coords(bbox, img_width, img_height): 8 | '''Given a BoundingBox map (from Rekognition) 9 | return the corresponding coords 10 | suitable for use with ImageDraw rectangle.''' 11 | upper_left_x = bbox['Left'] * img_width 12 | upper_y = bbox['Top'] * img_height 13 | bottom_right_x = upper_left_x + (bbox['Width'] * img_width) 14 | bottom_y = upper_y + (bbox['Height'] * img_height) 15 | return [upper_left_x, upper_y, bottom_right_x, bottom_y] 16 | 17 | client = boto3.client('rekognition') 18 | 19 | # imgurl = 'http://media.comicbook.com/uploads1/2015/07/fox-comic-con-panel-144933.jpg' 20 | imgurl = 'https://blog.njsnet.co/content/images/2017/02/trumprecognition.png' 21 | 22 | imgbytes = image_helpers.get_image_from_url(imgurl) 23 | 24 | 25 | rekresp = client.detect_faces(Image={'Bytes': imgbytes}, 26 | Attributes=['ALL']) 27 | 28 | # load the image in Pillow for processing 29 | img = Image.open(BytesIO(imgbytes)) 30 | 31 | (img_width, img_height) = img.size 32 | 33 | # prepare to draw on the image 34 | draw = ImageDraw.Draw(img) 35 | 36 | # pprint(rekresp) 37 | for facedeets in rekresp['FaceDetails']: 38 | bbox = facedeets['BoundingBox'] 39 | draw.rectangle(bbox_to_coords(bbox, img_width, img_height), 40 | outline=(0,200,0)) 41 | del draw 42 | 43 | img.show() 44 | -------------------------------------------------------------------------------- /image_helpers.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def get_image_from_url(imgurl): 4 | resp = requests.get(imgurl) 5 | imgbytes = resp.content 6 | return imgbytes 7 | 8 | def get_image_from_file(filename): 9 | '''Based on 10 | https://docs.aws.amazon.com/rekognition/latest/dg/example4.html, 11 | last access 10/3/2017''' 12 | with open(filename, 'rb') as imgfile: 13 | return imgfile.read() 14 | -------------------------------------------------------------------------------- /labels.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from pprint import pprint 3 | import image_helpers 4 | 5 | client = boto3.client('rekognition') 6 | 7 | imgurl = 'https://www.parrots.org/images/uploads/dreamstime_C_47716185.jpg' 8 | imgurl = 'http://www.idothat.us/images/idothat-img/features/pool-patio-lanai/ft-pool-patio-lanai-2.jpg' 9 | 10 | # grab the image from online 11 | imgbytes = image_helpers.get_image_from_url(imgurl) 12 | 13 | rekresp = client.detect_labels(Image={'Bytes': imgbytes}, 14 | MinConfidence=1) 15 | pprint(rekresp['Labels']) 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aws-shell==0.2.0 2 | awscli==1.11.163 3 | boto3==1.4.7 4 | botocore==1.7.21 5 | colorama==0.3.7 6 | configobj==5.0.6 7 | docutils==0.14 8 | jmespath==0.9.3 9 | prompt-toolkit==1.0.15 10 | pyasn1==0.3.6 11 | Pygments==2.2.0 12 | python-dateutil==2.6.1 13 | PyYAML==3.12 14 | rsa==3.4.2 15 | s3transfer==0.1.11 16 | six==1.11.0 17 | wcwidth==0.1.7 18 | --------------------------------------------------------------------------------