├── .gitignore ├── assets └── cover.jpg ├── requirements.txt ├── README.md ├── run.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | venv/ 3 | license-plates/ -------------------------------------------------------------------------------- /assets/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/theos-ai/license-plate-recognition/HEAD/assets/cover.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2022.9.24 2 | charset-normalizer==2.1.1 3 | idna==3.4 4 | numpy==1.23.3 5 | opencv-python==4.6.0.66 6 | requests==2.28.1 7 | urllib3==1.26.12 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # License Plate Recognition System (ALPR / ANPR) 2 | 3 | ![License Plate Recognition System Made by Theos AI](assets/cover.jpg) 4 | 5 | This is an Automatic License Plate Recognition System built using YOLOv7 in Python, made with ❤️ by [Theos AI](https://theos.ai). 6 | 7 | Don't forget to read the [Blog Post](https://blog.theos.ai/articles/how-to-train-yolov7-on-a-custom-dataset-for-license-plate-recognition-in-python-anpr-alpr-tutorial) and watch the [YouTube Video](https://www.youtube.com/watch?v=GVLUVxTpqG0)! 8 | 9 | Make sure you have a camera connected to your computer, then run the following commands to start recognizing license plates. 10 | 11 | ### Install all the dependencies 12 | 13 | ``` 14 | pip install -r requirements.txt 15 | ``` 16 | 17 | ### Copy and paste your Theos deployment URL inside run.py 18 | 19 | ``` python 20 | URL = '' 21 | ``` 22 | 23 | ### Start the program 24 | 25 | ``` 26 | python run.py 27 | ``` -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | import utils 2 | import cv2 3 | import time 4 | import os 5 | 6 | 7 | URL = '' # copy and paste your Theos deployment URL here 8 | OCR_MODEL = 'large' 9 | OCR_CLASS = 'license-plate' 10 | FOLDER_PATH = 'license-plates' 11 | 12 | seconds_to_wait = 2 13 | 14 | if not os.path.exists(FOLDER_PATH): 15 | os.makedirs(FOLDER_PATH) 16 | 17 | camera = cv2.VideoCapture(0) 18 | 19 | if camera.isOpened(): 20 | camera_open, frame = camera.read() 21 | else: 22 | camera_open = False 23 | 24 | start = time.time() 25 | 26 | while camera_open: 27 | recording, frame = camera.read() 28 | elapsed = time.time() - start 29 | 30 | if elapsed >= seconds_to_wait: 31 | image_bytes = cv2.imencode('.jpg', frame)[1].tobytes() 32 | now = utils.get_time() 33 | 34 | try: 35 | detections = utils.detect(image_bytes, url=URL, ocr_model=OCR_MODEL, ocr_classes=OCR_CLASS) 36 | 37 | if len(detections) > 0: 38 | detected_frame = utils.draw(frame, detections) 39 | license_plates = [] 40 | 41 | for detection in detections: 42 | if detection['class'] == OCR_CLASS and detection['text']: 43 | license_plates.append(detection['text'].upper()) 44 | 45 | if len(license_plates) > 0: 46 | utils.save_frame(detected_frame, os.path.join(FOLDER_PATH, now + '.jpg')) 47 | utils.save_json(detections, os.path.join(FOLDER_PATH, now + '.json')) 48 | print(f'[{now}] [+] License plates saved:', ', '.join(license_plates)) 49 | except Exception as error: 50 | pass 51 | 52 | start = time.time() 53 | 54 | camera.release() -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import numpy as np 3 | import requests 4 | import json 5 | import time 6 | import cv2 7 | 8 | 9 | def detect(image_bytes, url, ocr_model, ocr_classes, fallback_url=None, conf_thres=0.25, iou_thres=0.45, retries=10, delay=0): 10 | response = requests.post(url, data={'conf_thres':conf_thres, 'iou_thres':iou_thres, 'ocr_model':ocr_model, 'ocr_classes':ocr_classes}, files={'image':image_bytes}) 11 | if response.status_code in [200, 500]: 12 | data = response.json() 13 | if 'error' in data: 14 | print('[!]', data['message']) 15 | else: 16 | return data 17 | elif response.status_code == 403: 18 | print('[!] you reached your monthly requests limit. Upgrade your plan to unlock unlimited requests.') 19 | elif retries > 0: 20 | if delay > 0: 21 | time.sleep(delay) 22 | return detect(image_bytes, url=fallback_url if fallback_url else url, retries=retries-1, delay=2) 23 | return [] 24 | 25 | def draw_border(image, top_left_point, bottom_right_point, color, thickness, radius=5, length=5): 26 | x1, y1 = top_left_point 27 | x2, y2 = bottom_right_point 28 | res_scale = (image.shape[0] + image.shape[1])/2000 29 | radius = int(radius * res_scale) 30 | 31 | # Top left 32 | cv2.line(image, (x1 + radius, y1), (x2 - radius - length, y1), color, thickness, cv2.LINE_AA) 33 | cv2.line(image, (x1, y1 + radius), (x1, y2 - radius - length), color, thickness, cv2.LINE_AA) 34 | cv2.ellipse(image, (x1 + radius, y1 + radius), (radius, radius), 180, 0, 90, color, thickness, cv2.LINE_AA) 35 | 36 | # Top right 37 | cv2.line(image, (x2 - radius, y1), (x1 + radius + length, y1), color, thickness, cv2.LINE_AA) 38 | cv2.line(image, (x2, y1 + radius), (x2, y2 - radius - length), color, thickness, cv2.LINE_AA) 39 | cv2.ellipse(image, (x2 - radius, y1 + radius), (radius, radius), 270, 0, 90, color, thickness, cv2.LINE_AA) 40 | 41 | # Bottom left 42 | cv2.line(image, (x1 + radius, y2), (x2 - radius - length, y2), color, thickness, cv2.LINE_AA) 43 | cv2.line(image, (x1, y2 - radius), (x1, y1 + radius + length), color, thickness, cv2.LINE_AA) 44 | cv2.ellipse(image, (x1 + radius, y2 - radius), (radius, radius), 90, 0, 90, color, thickness, cv2.LINE_AA) 45 | 46 | # Bottom right 47 | cv2.line(image, (x2 - radius, y2), (x1 + radius + length, y2), color, thickness, cv2.LINE_AA) 48 | cv2.line(image, (x2, y2 - radius), (x2, y1 + radius + length), color, thickness, cv2.LINE_AA) 49 | cv2.ellipse(image, (x2 - radius, y2 - radius), (radius, radius), 0, 0, 90, color, thickness, cv2.LINE_AA) 50 | 51 | def plot_box(image, top_left_point, bottom_right_point, width, height, label, color=(210,240,0), padding=6, font_scale=0.375, alpha=0.15): 52 | if alpha > 1: 53 | alpha = 1 54 | 55 | if alpha > 0: 56 | box_crop = image[top_left_point['y']:top_left_point['y']+height, top_left_point['x']:top_left_point['x']+width] 57 | colored_rect = np.ones(box_crop.shape, dtype=np.uint8) 58 | colored_rect[:,:,0] = color[0] - 90 if color[0] - 90 >= 0 else 0 59 | colored_rect[:,:,1] = color[1] - 90 if color[1] - 90 >= 0 else 0 60 | colored_rect[:,:,2] = color[2] - 90 if color[2] - 90 >= 0 else 0 61 | box_crop_weighted = cv2.addWeighted(box_crop, 1 - alpha, colored_rect, alpha, 1.0) 62 | image[top_left_point['y']:top_left_point['y']+height, top_left_point['x']:top_left_point['x']+width] = box_crop_weighted 63 | 64 | cv2.rectangle(image, (top_left_point['x'] - 1, top_left_point['y']), (bottom_right_point['x'], bottom_right_point['y']), color, thickness=2, lineType=cv2.LINE_AA) 65 | res_scale = (image.shape[0] + image.shape[1])/1600 66 | font_scale = font_scale * res_scale 67 | font_width, font_height = 0, 0 68 | font_face = cv2.FONT_HERSHEY_SIMPLEX 69 | text_size = cv2.getTextSize(label, font_face, fontScale=font_scale, thickness=1)[0] 70 | 71 | if text_size[0] > font_width: 72 | font_width = text_size[0] 73 | if text_size[1] > font_height: 74 | font_height = text_size[1] 75 | if top_left_point['x'] - 1 < 0: 76 | top_left_point['x'] = 1 77 | if top_left_point['x'] + font_width + padding*2 > image.shape[1]: 78 | top_left_point['x'] = image.shape[1] - font_width - padding*2 79 | if top_left_point['y'] - font_height - padding*2 < 0: 80 | top_left_point['y'] = font_height + padding*2 81 | 82 | p3 = top_left_point['x'] + font_width + padding*2, top_left_point['y'] - font_height - padding*2 83 | cv2.rectangle(image, (top_left_point['x'] - 1, top_left_point['y']), p3, color, -1, lineType=cv2.LINE_AA) 84 | x = top_left_point['x'] + padding 85 | y = top_left_point['y'] - padding 86 | cv2.putText(image, label, (x, y), font_face, font_scale, [0, 0, 0], thickness=1, lineType=cv2.LINE_AA) 87 | 88 | return image 89 | 90 | def draw(image, detections, classes=None, alpha=0.15): 91 | image_copy = image.copy() 92 | 93 | for box in detections: 94 | draw_box = False 95 | class_name = box['class'] 96 | conf = box['confidence'] 97 | label = f'{class_name} {str(int(conf*100))}%' + (f' | {box["text"].upper()}' if 'text' in box else '') 98 | width = box['width'] 99 | height = box['height'] 100 | top_left_point = {'x':box['x'], 'y':box['y']} 101 | bottom_right_point = {'x':box['x'] + width, 'y':box['y'] + height} 102 | if (classes is None) or (classes is not None and class_name in classes): 103 | draw_box = True 104 | if draw_box: 105 | image_copy = plot_box(image_copy, top_left_point, bottom_right_point, width, height, label, alpha=alpha) 106 | 107 | return image_copy 108 | 109 | def get_time(): 110 | return datetime.now().strftime('%d-%m-%Y_%I:%M:%S_%p') 111 | 112 | def save_frame(frame, path): 113 | cv2.imwrite(path, frame) 114 | 115 | def save_json(data, path): 116 | with open(path, 'w') as json_file: 117 | json_file.write(json.dumps(data, indent=4)) --------------------------------------------------------------------------------