├── Driveway.py ├── FrontDoor.py ├── README.md ├── automl.py ├── pushbullet.py └── sighthound.py /Driveway.py: -------------------------------------------------------------------------------- 1 | from google.cloud import automl_v1beta1 as automl 2 | import os 3 | import glob 4 | from google.oauth2 import service_account 5 | import urllib.request 6 | import urllib.parse 7 | import pickle 8 | import json 9 | import requests 10 | from PIL import Image 11 | import base64 12 | import http.client 13 | import ssl 14 | from pushbullet import pushbullet 15 | from sighthound import sighthound 16 | from automl import visionclassification 17 | 18 | # For BlueIris I have to run this from the %/Users/Username directory for some reason to execute correctly 19 | # Create a blank file called data.pickle and prediction.pickle (for Sighthound and AutoML respectively), I'm too dumb to figure out how to create the file via Python 20 | avoid_duplicate = "True" # Conditional Argument For AutoML Function to Avoid Giving the same result twice (for example, a parked car in the driveway) 21 | token = "o.xxxxxxxxxxxxxxxxxxxx" # Pushbullet Token 22 | recipient = "" # Pushbullet Device Identifier. Not used currently, so instead it sends it to all devices with Pushbullet installed 23 | 24 | project_id = 'project_id' # AutoML Project_id 25 | model_id = 'model_id' # AutoML model_id 26 | min_confidence_score = 0.71 # Cut off for confidence score to be considered relevant 27 | 28 | list_of_files = glob.glob('H:\BlueIris\Alerts\*.jpg') # Replace with your Alert Images Directory 29 | local_image_path = max(list_of_files, key=os.path.getctime) 30 | print(local_image_path) 31 | 32 | # Use AutoML.py function visionclassification 33 | prediction = visionclassification(local_image_path, project_id, model_id, min_confidence_score, avoid_duplicate) 34 | 35 | # Various IF Conditions you can copy paste depending on what you have trained AutoML 36 | 37 | if prediction == "Bob Arriving": 38 | #contents = urllib.request.urlopen("http://url.com").read() #make an HTTP request for IFTTT or WebCoRE 39 | print("True: Bob is arriving") 40 | #pushbullet(prediction, token, local_image_path) 41 | 42 | if prediction == "Alice Arriving": 43 | print("True: Alice is arriving") 44 | #contents = urllib.request.urlopen("http://url.com").read() 45 | #pushbullet(prediction, token, local_image_path) 46 | 47 | # If Nothing is Detected or Other Car, run it against Sighthound's AI 48 | if prediction == "Nothing" or prediction == "Other Car": 49 | vehicle_detected_return = sighthound(local_image_path) 50 | if vehicle_detected_return is not None: 51 | #pushbullet(vehicle_detected_return, token, local_image_path) 52 | print(vehicle_detected_return) 53 | # Optionally below, is how I have formatted the script to pass an argument to WebCoRE so it can read it aloud on my Google Cast Devices with cast-web 54 | 55 | #vehicle_detected_speak = "Attention. There is a %s in the driveway" % vehicle_detected_return 56 | #contents = urllib.request.urlopen("http://smartthings.com" % urllib.parse.quote(vehicle_detected_speak)).read() 57 | -------------------------------------------------------------------------------- /FrontDoor.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | import ssl 5 | import glob 6 | import urllib.request 7 | from automl import visionclassification 8 | from pushbullet import pushbullet 9 | 10 | project_id = 'project-id' # AutoML Project_id 11 | model_id = 'model-id' # AutoML model_id 12 | min_confidence_score = 0.71 13 | token = "o.xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # Pushbullet Token 14 | 15 | list_of_files = glob.glob('H:\BlueIris\FrontDoor\*.jpg') # Replace with your directory of Alert Images 16 | local_image_path = max(list_of_files, key=os.path.getctime) 17 | print(local_image_path) 18 | 19 | prediction = visionclassification(local_image_path, project_id, model_id, min_confidence_score) 20 | 21 | print(prediction) 22 | 23 | # Copy Paste depending on the classifications you create 24 | 25 | if prediction == "Person": 26 | #contents = urllib.request.urlopen("http://url.com").read() #Makes a Web Request depending on outcome 27 | print(contents) 28 | #pushbullet(prediction, token, local_image_path) #Uncomment to send pushbullet notification 29 | 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BlueIris-SmartDetection 2 | Python Script to Execute in BlueIris that submits triggered images to Google's AutoML Vision Classification cloud service and Sighthound's Vehicle Recognition service. Also with Pushbullet support. You can easily modify this script to detect people. I also have trained AutoML to detect people in my front door (FrontDoor.py) with only about 50 person images and 50 no person images. Sighthound's People Detection may or may not give you lots of false positives (it does in my experience). 3 | 4 | Excuse the mess of code, I am not a coder at all. I just mashed stuff together and kept executing it until it worked. 5 | 6 | I've commented where I can so it makes sense (because it is messy, and it's just made to my particular use case). 7 | 8 | I have BlueIris execute this python script on a trigger. It does various things such as send me a Pushbullet notification with the image, and execute an HTTP request (in my case a Smartthings action in my smarthome). 9 | 10 | I have trained AutoML Vision Image Classification to a bunch of different scenarios in my driveway from months of trigger images (such as "Me arriving" "Wife arriving" "Inlaws arriving" "Me leaving" "Wife leaving" "Unknown car" "Mailman" "Nothing"). 11 | 12 | I have probably anywhere from 20 to 80 images for either category to train the dataset. It's very easy to do all via a GUI, and the average user will never encounter any billing from this small use, if you do, it might be cents. 13 | 14 | If "Unknown Car" is the returned variable, then I pass it along to Sighthound to send me a Pusbullet notification me the Make, Model, Colour and Plate (if applicable) of the car detected. Maybe 5% of the time, something slips through that is not a car at all and just vision error. 15 | 16 | ## Instructions 17 | 18 | 1) Copy all the scripts to your Users\User Directory 19 | 2) Configure the global variables (most of them are at the top), namely you will need a Sighthound Cloud Developer Account Token, and Pushbullet Token, as well as the relevant information from Google's AutoML Vision Classification 20 | 3) Namely the one thing that is pretty specific to me, is the image cropping in Sighthound (I had to remove the section of the image that contains a street, otherwise it will just pick off any car that could potentially be driving by). You will probably have to delete that, and replace line 32 in sighthound.py to local_image_path 21 | 22 | Please feel free to contribute. God knows this could probably use some refining. 23 | -------------------------------------------------------------------------------- /automl.py: -------------------------------------------------------------------------------- 1 | from google.cloud import automl_v1beta1 as automl 2 | import os 3 | import glob 4 | from google.oauth2 import service_account 5 | import urllib.request 6 | import urllib.parse 7 | import pickle 8 | import json 9 | import requests 10 | from PIL import Image 11 | import base64 12 | import http.client 13 | import ssl 14 | 15 | def visionclassification(local_image_path, project_id, model_id, min_confidence_score, avoid_duplicate="False"): 16 | os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="file.json" # You need to generate and download a json key in AutoML, you only need one if you keep datasets in one project 17 | compute_region = 'us-central1' # This is by default 18 | prediction = "" 19 | automl_client = automl.AutoMlClient() 20 | 21 | # Get the full path of the model. 22 | model_full_id = automl_client.model_path(project_id, compute_region, model_id) 23 | 24 | # Google AutoML Vision - Upload Image and Read Result, Check if the Same 25 | prediction_client = automl.PredictionServiceClient() 26 | with open(local_image_path, 'rb') as f_in: 27 | image_bytes = f_in.read() 28 | payload = {'image': {'image_bytes': image_bytes}} 29 | result = prediction_client.predict(model_full_id, payload) 30 | 31 | 32 | for result in result.payload: 33 | print("Predicted class name: {}".format(result.display_name)) 34 | print("Predicted class score: {}".format(result.classification.score)) 35 | if result.classification.score > min_confidence_score: 36 | prediction = result.display_name 37 | if avoid_duplicate == "True": 38 | try: 39 | with open('prediction.pickle', 'rb') as f: 40 | previous_prediction = pickle.load(f) 41 | 42 | if result.display_name == previous_prediction: 43 | print('Data is the same!') 44 | raise SystemExit 45 | with open('prediction.pickle', 'wb') as f: 46 | pickle.dump(result.display_name, f, pickle.HIGHEST_PROTOCOL) 47 | except EOFError: 48 | with open('prediction.pickle', 'wb') as f: 49 | pickle.dump(result.display_name, f, pickle.HIGHEST_PROTOCOL) 50 | except FileNotFoundError: 51 | os.open("prediction.pickle", os.O_CREAT | os.O_EXCL) 52 | return prediction; 53 | -------------------------------------------------------------------------------- /pushbullet.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import urllib.parse 3 | import pickle 4 | import json 5 | import requests 6 | from PIL import Image 7 | import base64 8 | import http.client 9 | import ssl 10 | import os 11 | 12 | def pushbullet (title, token, local_image_path): 13 | jpgbase = os.path.basename(local_image_path) 14 | jpgimg = local_image_path 15 | res = requests.post("https://api.pushbullet.com/v2/upload-request", 16 | headers={ 17 | "Access-Token": token, 18 | "Content-Type": "application/json" 19 | }, 20 | data=json.dumps({"file_name": jpgbase, "file_type": "image/jpeg"})) 21 | 22 | js = json.loads(res.content) 23 | files = {'file': open(jpgimg, 'rb')} 24 | requests.post(js['upload_url'], files=files) 25 | fileurl = js['file_url'] 26 | res = requests.post("https://api.pushbullet.com/v2/pushes", 27 | headers={ 28 | "Access-Token": token, 29 | "Content-Type": "application/json"}, 30 | data=json.dumps({ 31 | #"device_iden": recipient, 32 | "type": "file", 33 | "title": title, 34 | "file_name": jpgbase, 35 | "file_type": "image/jpeg", 36 | "file_url": fileurl})) -------------------------------------------------------------------------------- /sighthound.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import json 3 | import requests 4 | from PIL import Image 5 | import base64 6 | import http.client 7 | import ssl 8 | import urllib.request 9 | import urllib.parse 10 | import os 11 | import glob 12 | 13 | def sighthound(local_image_path): 14 | sighthound_token = "insert-token-here" # Sighthound Token 15 | plate = "" 16 | vehicle_detected = "" 17 | # Comment or Delete the Below of not needed to crop image (to ignore other vehicles on a road, etc.) 18 | crop_image_location = "crop-image-directory-here" # Needed if cropping image 19 | im = Image.open(local_image_path) 20 | width = im.size[0] 21 | height = im.size[1] 22 | im2 = im.crop( 23 | ( 24 | width - 1920, 25 | height - 950, 26 | width, 27 | height 28 | ) 29 | ) 30 | im2.save(crop_image_location) # Delete until here if not needed to crop 31 | headers = {"Content-type": "application/json", "X-Access-Token": sighthound_token} 32 | image_data = base64.b64encode(open(crop_image_location, 'rb').read()) # Change crop_image_location to local_image_path if removing cropped image 33 | params = json.dumps({"image": image_data.decode('ascii')}) 34 | conn = http.client.HTTPSConnection("dev.sighthoundapi.com", context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)) 35 | conn.request("POST", "/v1/recognition?objectType=vehicle,licenseplate", params, headers) 36 | 37 | # Parse the response and print the make and model for each vehicle found 38 | response = conn.getresponse() 39 | results = json.loads(response.read()) 40 | for obj in results["objects"]: 41 | if obj["objectType"] == "vehicle": 42 | make = obj["vehicleAnnotation"]["attributes"]["system"]["make"]["name"] 43 | model = obj["vehicleAnnotation"]["attributes"]["system"]["model"]["name"] 44 | color = obj["vehicleAnnotation"]["attributes"]["system"]["color"]["name"] 45 | if "licenseplate" in obj["vehicleAnnotation"]: 46 | plate = obj["vehicleAnnotation"]["licenseplate"]["attributes"]["system"]["string"]["name"] 47 | vehicle_detected = "%s %s %s %s" % (color, make, model, plate) 48 | print("Detected: %s %s %s %s" % (color, make, model, plate)) 49 | current_data = { 50 | 'make' : (make), 51 | 'model' : (model), 52 | 'color' : (color) 53 | } 54 | 55 | try: 56 | with open('data.pickle', 'rb') as f: # Using Pickle to make sure to not send duplicate notifications 57 | previous_data = pickle.load(f) 58 | 59 | if current_data == previous_data: 60 | print('Data is the same!') # If it it's the same, exit 61 | raise SystemExit 62 | with open('data.pickle', 'wb') as f: 63 | pickle.dump(current_data, f, pickle.HIGHEST_PROTOCOL) 64 | except EOFError: 65 | with open('data.pickle', 'wb') as f: 66 | pickle.dump(current_data, f, pickle.HIGHEST_PROTOCOL) 67 | except FileNotFoundError: 68 | os.open("prediction.pickle", os.O_CREAT | os.O_EXCL) 69 | if vehicle_detected != "": 70 | return vehicle_detected; 71 | --------------------------------------------------------------------------------