├── README.md ├── TF-Pi-Image-OD.py ├── TF-Pi-Video-OD.py ├── TF-PiCamera-OD.py ├── doc ├── .gitignore ├── Camera Interface.png ├── Raspi vid.png ├── Thumbnail.png ├── Thumbnail2.png ├── demo.png ├── directory.png └── modelzoo.png ├── get-prerequisites.sh └── install-object-detection-api.sh /README.md: -------------------------------------------------------------------------------- 1 | # Object-Detection-on-Raspberry-Pi 2 | [](https://github.com/tensorflow/tensorflow/releases/tag/v2.2.0) 3 | ### This Tutorial Covers How to deploy the New TensorFlow 2 Object Detection Models and Custom Object Detection Models on the Raspberry Pi 4 |
5 |
6 |
44 |
45 |
90 |
91 |
167 |
168 |
234 |
236 |
237 | Congratulations! This means we're successfully performing real-time object detection on the Raspberry Pi! Now that you've tried out the Pi Camera, why not one of the other scripts? Over the next weeks I'll continue to add on to this repo and tinker with the programs to make them better than ever! If you find something cool, feel free to share it, as others can also learn! And if you have any errors, just raise an issue and I'll be happy to take a look at it. Great work, and until next time, bye!
238 |
--------------------------------------------------------------------------------
/TF-Pi-Image-OD.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | """
4 | Object Detection (On Image) From TF2 Saved Model
5 | =====================================
6 | """
7 |
8 | import os
9 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
10 | import pathlib
11 | import tensorflow as tf
12 | import cv2
13 | import argparse
14 |
15 | tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
16 |
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--model', help='Folder that the Saved Model is Located In',
19 | default='od-models/my_mobilenet_model')
20 | parser.add_argument('--labels', help='Where the Labelmap is Located',
21 | default='models/research/object_detection/data/mscoco_label_map.pbtxt')
22 | parser.add_argument('--image', help='Name of the single image to perform detection on',
23 | default='test.png')
24 | parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
25 | default=0.5)
26 |
27 | args = parser.parse_args()
28 | # Enable GPU dynamic memory allocation
29 | gpus = tf.config.experimental.list_physical_devices('GPU')
30 | for gpu in gpus:
31 | tf.config.experimental.set_memory_growth(gpu, True)
32 |
33 | # PROVIDE PATH TO IMAGE DIRECTORY
34 | IMAGE_PATHS = args.image
35 |
36 |
37 | # PROVIDE PATH TO MODEL DIRECTORY
38 | PATH_TO_MODEL_DIR = args.model
39 |
40 | # PROVIDE PATH TO LABEL MAP
41 | PATH_TO_LABELS = args.labels
42 |
43 | # PROVIDE THE MINIMUM CONFIDENCE THRESHOLD
44 | MIN_CONF_THRESH = float(args.threshold)
45 |
46 | # LOAD THE MODEL
47 |
48 | import time
49 | from object_detection.utils import label_map_util
50 | from object_detection.utils import visualization_utils as viz_utils
51 |
52 | PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model"
53 |
54 | print('Loading model...', end='')
55 | start_time = time.time()
56 |
57 | # LOAD SAVED MODEL AND BUILD DETECTION FUNCTION
58 | detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)
59 |
60 | end_time = time.time()
61 | elapsed_time = end_time - start_time
62 | print('Done! Took {} seconds'.format(elapsed_time))
63 |
64 | # LOAD LABEL MAP DATA FOR PLOTTING
65 |
66 | category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,
67 | use_display_name=True)
68 | import numpy as np
69 | import matplotlib.pyplot as plt
70 | import warnings
71 | warnings.filterwarnings('ignore') # Suppress Matplotlib warnings
72 |
73 |
74 | print('Running inference for {}... '.format(IMAGE_PATHS), end='')
75 |
76 | image = cv2.imread(IMAGE_PATHS)
77 | image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
78 | imH, imW, _ = image.shape
79 | image_expanded = np.expand_dims(image_rgb, axis=0)
80 |
81 | # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
82 | input_tensor = tf.convert_to_tensor(image)
83 | # The model expects a batch of images, so add an axis with `tf.newaxis`.
84 | input_tensor = input_tensor[tf.newaxis, ...]
85 |
86 | # input_tensor = np.expand_dims(image_np, 0)
87 | detections = detect_fn(input_tensor)
88 |
89 | # All outputs are batches tensors.
90 | # Convert to numpy arrays, and take index [0] to remove the batch dimension.
91 | # We're only interested in the first num_detections.
92 | num_detections = int(detections.pop('num_detections'))
93 | detections = {key: value[0, :num_detections].numpy()
94 | for key, value in detections.items()}
95 | detections['num_detections'] = num_detections
96 |
97 | # detection_classes should be ints.
98 | detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
99 | scores = detections['detection_scores']
100 | boxes = detections['detection_boxes']
101 | classes = detections['detection_classes']
102 | count = 0
103 | for i in range(len(scores)):
104 | if ((scores[i] > MIN_CONF_THRESH) and (scores[i] <= 1.0)):
105 | #increase count
106 | count += 1
107 | # Get bounding box coordinates and draw box
108 | # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
109 | ymin = int(max(1,(boxes[i][0] * imH)))
110 | xmin = int(max(1,(boxes[i][1] * imW)))
111 | ymax = int(min(imH,(boxes[i][2] * imH)))
112 | xmax = int(min(imW,(boxes[i][3] * imW)))
113 |
114 | cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
115 | # Draw label
116 | object_name = category_index[int(classes[i])]['name'] # Look up object name from "labels" array using class index
117 | label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
118 | labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
119 | label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
120 | cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
121 | cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
122 |
123 |
124 | cv2.putText (image,'Objects Detected : ' + str(count),(10,25),cv2.FONT_HERSHEY_SIMPLEX,1,(70,235,52),2,cv2.LINE_AA)
125 | print('Done')
126 | # DISPLAYS OUTPUT IMAGE
127 | cv2.imshow('Object Counter', image)
128 | # CLOSES WINDOW ONCE KEY IS PRESSED
129 | cv2.waitKey(0)
130 | # CLEANUP
131 | cv2.destroyAllWindows()
132 |
133 |
--------------------------------------------------------------------------------
/TF-Pi-Video-OD.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | """
4 | Object Detection (On Video) From TF2 Saved Model
5 | =====================================
6 | """
7 |
8 | import os
9 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
10 | import pathlib
11 | import tensorflow as tf
12 | import cv2
13 | import argparse
14 |
15 | tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
16 |
17 | parser = argparse.ArgumentParser()
18 | parser.add_argument('--model', help='Folder that the Saved Model is Located In',
19 | default='od-models/my_mobilenet_model')
20 | parser.add_argument('--labels', help='Where the Labelmap is Located',
21 | default='models/research/object_detection/data/mscoco_label_map.pbtxt')
22 | parser.add_argument('--video', help='Name of the video to perform detection on',
23 | default='test.mp4')
24 | parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
25 | default=0.5)
26 |
27 | args = parser.parse_args()
28 | # Enable GPU dynamic memory allocation
29 | gpus = tf.config.experimental.list_physical_devices('GPU')
30 | for gpu in gpus:
31 | tf.config.experimental.set_memory_growth(gpu, True)
32 |
33 | # PROVIDE PATH TO IMAGE DIRECTORY
34 | VIDEO_PATHS = args.video
35 |
36 |
37 | # PROVIDE PATH TO MODEL DIRECTORY
38 | PATH_TO_MODEL_DIR = args.model
39 |
40 | # PROVIDE PATH TO LABEL MAP
41 | PATH_TO_LABELS = args.labels
42 |
43 | # PROVIDE THE MINIMUM CONFIDENCE THRESHOLD
44 | MIN_CONF_THRESH = float(args.threshold)
45 |
46 | # Load the model
47 | # ~~~~~~~~~~~~~~
48 | import time
49 | from object_detection.utils import label_map_util
50 | from object_detection.utils import visualization_utils as viz_utils
51 |
52 | PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model"
53 |
54 | print('Loading model...', end='')
55 | start_time = time.time()
56 |
57 | # Load saved model and build the detection function
58 | detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)
59 |
60 | end_time = time.time()
61 | elapsed_time = end_time - start_time
62 | print('Done! Took {} seconds'.format(elapsed_time))
63 |
64 | # Load label map data (for plotting)
65 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 |
67 |
68 | category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,
69 | use_display_name=True)
70 |
71 | import numpy as np
72 | from PIL import Image
73 | import matplotlib.pyplot as plt
74 | import warnings
75 | warnings.filterwarnings('ignore') # Suppress Matplotlib warnings
76 |
77 | def load_image_into_numpy_array(path):
78 | """Load an image from file into a numpy array.
79 | Puts image into numpy array to feed into tensorflow graph.
80 | Note that by convention we put it into a numpy array with shape
81 | (height, width, channels), where channels=3 for RGB.
82 | Args:
83 | path: the file path to the image
84 | Returns:
85 | uint8 numpy array with shape (img_height, img_width, 3)
86 | """
87 | return np.array(Image.open(path))
88 |
89 |
90 |
91 |
92 | print('Running inference for {}... '.format(VIDEO_PATHS), end='')
93 |
94 | video = cv2.VideoCapture(VIDEO_PATHS)
95 | while(video.isOpened()):
96 |
97 | # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
98 | # i.e. a single-column array, where each item in the column has the pixel RGB value
99 | ret, frame = video.read()
100 | frame_rgb = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
101 | frame_expanded = np.expand_dims(frame_rgb, axis=0)
102 | imH, imW, _ = frame.shape
103 |
104 | # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
105 | input_tensor = tf.convert_to_tensor(frame)
106 | # The model expects a batch of images, so add an axis with `tf.newaxis`.
107 | input_tensor = input_tensor[tf.newaxis, ...]
108 |
109 | # input_tensor = np.expand_dims(image_np, 0)
110 | detections = detect_fn(input_tensor)
111 |
112 | # All outputs are batches tensors.
113 | # Convert to numpy arrays, and take index [0] to remove the batch dimension.
114 | # We're only interested in the first num_detections.
115 | num_detections = int(detections.pop('num_detections'))
116 | detections = {key: value[0, :num_detections].numpy()
117 | for key, value in detections.items()}
118 | detections['num_detections'] = num_detections
119 |
120 | # detection_classes should be ints.
121 | detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
122 |
123 |
124 | # SET MIN SCORE THRESH TO MINIMUM THRESHOLD FOR DETECTIONS
125 |
126 | detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
127 | scores = detections['detection_scores']
128 | boxes = detections['detection_boxes']
129 | classes = detections['detection_classes']
130 | count = 0
131 | for i in range(len(scores)):
132 | if ((scores[i] > MIN_CONF_THRESH) and (scores[i] <= 1.0)):
133 | #increase count
134 | count += 1
135 | # Get bounding box coordinates and draw box
136 | # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
137 | ymin = int(max(1,(boxes[i][0] * imH)))
138 | xmin = int(max(1,(boxes[i][1] * imW)))
139 | ymax = int(min(imH,(boxes[i][2] * imH)))
140 | xmax = int(min(imW,(boxes[i][3] * imW)))
141 |
142 | cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
143 | # Draw label
144 | object_name = category_index[int(classes[i])]['name'] # Look up object name from "labels" array using class index
145 | label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
146 | labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
147 | label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
148 | cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
149 | cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
150 |
151 |
152 | cv2.putText (frame,'Objects Detected : ' + str(count),(10,25),cv2.FONT_HERSHEY_SIMPLEX,1,(70,235,52),2,cv2.LINE_AA)
153 | cv2.imshow('Object Detector', frame)
154 |
155 | if cv2.waitKey(1) == ord('q'):
156 | break
157 |
158 | cv2.destroyAllWindows()
159 | print("Done")
160 |
--------------------------------------------------------------------------------
/TF-PiCamera-OD.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding: utf-8
3 | """
4 | Object Detection (On Pi Camera) From TF2 Saved Model
5 | =====================================
6 | """
7 |
8 | import os
9 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
10 | import pathlib
11 | import tensorflow as tf
12 | import cv2
13 | import argparse
14 | from threading import Thread
15 |
16 | tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
17 | class VideoStream:
18 | """Camera object that controls video streaming from the Picamera"""
19 | def __init__(self,resolution=(640,480),framerate=30):
20 | # Initialize the PiCamera and the camera image stream
21 | self.stream = cv2.VideoCapture(0)
22 | ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
23 | ret = self.stream.set(3,resolution[0])
24 | ret = self.stream.set(4,resolution[1])
25 |
26 | # Read first frame from the stream
27 | (self.grabbed, self.frame) = self.stream.read()
28 |
29 | # Variable to control when the camera is stopped
30 | self.stopped = False
31 |
32 | def start(self):
33 | # Start the thread that reads frames from the video stream
34 | Thread(target=self.update,args=()).start()
35 | return self
36 |
37 | def update(self):
38 | # Keep looping indefinitely until the thread is stopped
39 | while True:
40 | # If the camera is stopped, stop the thread
41 | if self.stopped:
42 | # Close camera resources
43 | self.stream.release()
44 | return
45 |
46 | # Otherwise, grab the next frame from the stream
47 | (self.grabbed, self.frame) = self.stream.read()
48 |
49 | def read(self):
50 | # Return the most recent frame
51 | return self.frame
52 |
53 | def stop(self):
54 | # Indicate that the camera and thread should be stopped
55 | self.stopped = True
56 |
57 |
58 | parser = argparse.ArgumentParser()
59 | parser.add_argument('--model', help='Folder that the Saved Model is Located In',
60 | default='od-models/my_mobilenet_model')
61 | parser.add_argument('--labels', help='Where the Labelmap is Located',
62 | default='models/research/object_detection/data/mscoco_label_map.pbtxt')
63 | parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
64 | default=0.5)
65 |
66 | args = parser.parse_args()
67 |
68 |
69 | # PROVIDE PATH TO MODEL DIRECTORY
70 | PATH_TO_MODEL_DIR = args.model
71 |
72 | # PROVIDE PATH TO LABEL MAP
73 | PATH_TO_LABELS = args.labels
74 |
75 | # PROVIDE THE MINIMUM CONFIDENCE THRESHOLD
76 | MIN_CONF_THRESH = float(args.threshold)
77 |
78 | # Load the model
79 | # ~~~~~~~~~~~~~~
80 | import time
81 | from object_detection.utils import label_map_util
82 | from object_detection.utils import visualization_utils as viz_utils
83 |
84 | PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model"
85 |
86 | print('Loading model...', end='')
87 | start_time = time.time()
88 |
89 | # Load saved model and build the detection function
90 | detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)
91 |
92 | end_time = time.time()
93 | elapsed_time = end_time - start_time
94 | print('Done! Took {} seconds'.format(elapsed_time))
95 |
96 | # Load label map data (for plotting)
97 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
98 |
99 |
100 | category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS,
101 | use_display_name=True)
102 |
103 | import numpy as np
104 | import matplotlib.pyplot as plt
105 | import warnings
106 | warnings.filterwarnings('ignore') # Suppress Matplotlib warnings
107 |
108 | print('Running inference for PiCamera')
109 | videostream = VideoStream(resolution=(640,480),framerate=30).start()
110 | while True:
111 |
112 | # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
113 | # i.e. a single-column array, where each item in the column has the pixel RGB value
114 | frame = videostream.read()
115 | frame_rgb = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
116 | frame_expanded = np.expand_dims(frame_rgb, axis=0)
117 | imH, imW, _ = frame.shape
118 |
119 | # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
120 | input_tensor = tf.convert_to_tensor(frame)
121 | # The model expects a batch of images, so add an axis with `tf.newaxis`.
122 | input_tensor = input_tensor[tf.newaxis, ...]
123 |
124 | # input_tensor = np.expand_dims(image_np, 0)
125 | detections = detect_fn(input_tensor)
126 |
127 | # All outputs are batches tensors.
128 | # Convert to numpy arrays, and take index [0] to remove the batch dimension.
129 | # We're only interested in the first num_detections.
130 | num_detections = int(detections.pop('num_detections'))
131 | detections = {key: value[0, :num_detections].numpy()
132 | for key, value in detections.items()}
133 | detections['num_detections'] = num_detections
134 |
135 | # detection_classes should be ints.
136 | detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
137 |
138 |
139 | # SET MIN SCORE THRESH TO MINIMUM THRESHOLD FOR DETECTIONS
140 |
141 | detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
142 | scores = detections['detection_scores']
143 | boxes = detections['detection_boxes']
144 | classes = detections['detection_classes']
145 | count = 0
146 | for i in range(len(scores)):
147 | if ((scores[i] > MIN_CONF_THRESH) and (scores[i] <= 1.0)):
148 | #increase count
149 | count += 1
150 | # Get bounding box coordinates and draw box
151 | # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
152 | ymin = int(max(1,(boxes[i][0] * imH)))
153 | xmin = int(max(1,(boxes[i][1] * imW)))
154 | ymax = int(min(imH,(boxes[i][2] * imH)))
155 | xmax = int(min(imW,(boxes[i][3] * imW)))
156 |
157 | cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
158 | # Draw label
159 | object_name = category_index[int(classes[i])]['name'] # Look up object name from "labels" array using class index
160 | label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
161 | labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
162 | label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
163 | cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
164 | cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
165 |
166 |
167 | cv2.putText (frame,'Objects Detected : ' + str(count),(10,25),cv2.FONT_HERSHEY_SIMPLEX,1,(70,235,52),2,cv2.LINE_AA)
168 | cv2.imshow('Object Detector', frame)
169 |
170 | if cv2.waitKey(1) == ord('q'):
171 | break
172 |
173 | cv2.destroyAllWindows()
174 | print("Done")
175 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/doc/Camera Interface.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/Camera Interface.png
--------------------------------------------------------------------------------
/doc/Raspi vid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/Raspi vid.png
--------------------------------------------------------------------------------
/doc/Thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/Thumbnail.png
--------------------------------------------------------------------------------
/doc/Thumbnail2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/Thumbnail2.png
--------------------------------------------------------------------------------
/doc/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/demo.png
--------------------------------------------------------------------------------
/doc/directory.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/directory.png
--------------------------------------------------------------------------------
/doc/modelzoo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/armaanpriyadarshan/Object-Detection-on-Raspberry-Pi/7f977cd306230f01773e6d771a26b55004d42e2b/doc/modelzoo.png
--------------------------------------------------------------------------------
/get-prerequisites.sh:
--------------------------------------------------------------------------------
1 | # Installs Prerequisites for OpenCV
2 |
3 | sudo apt-get install libhdf5-dev libhdf5-serial-dev libhdf5-103
4 | sudo apt-get install libqtgui4 libqtwebkit4 libqt4-test python3-pyqt5
5 | sudo apt-get install libatlas-base-dev
6 | sudo apt-get install libjasper-dev
7 |
8 | # Installs OpenCV pip package
9 |
10 | pip install opencv-python==4.1.0.25
11 |
12 | # Install prerequisites for TensorFlow 2.2.0
13 |
14 | sudo apt-get install gfortran
15 | sudo apt-get install libhdf5-dev libc-ares-dev libeigen3-dev
16 | sudo apt-get install libatlas-base-dev libopenblas-dev libblas-dev
17 | sudo apt-get install liblapack-dev cython
18 | sudo pip3 install pybind11
19 | sudo pip3 install h5py
20 | sudo pip3 install --upgrade setuptools
21 | pip install gdown
22 | sudo cp /home/pi/.local/bin/gdown /usr/local/bin/gdown
23 | gdown https://drive.google.com/uc?id=11mujzVaFqa7R1_lB7q0kVPW22Ol51MPg
24 |
25 | # Install the Downloaded Wheel
26 |
27 | pip install tensorflow-2.2.0-cp37-cp37m-linux_armv7l.whl
28 |
29 | # A Few more Prerequisites for the TensorFlow Object Detection API and Testing
30 |
31 | pip install matplotlib
32 | sudo apt-get install protobuf-compiler
33 |
34 | # Print Success Message
35 |
36 | echo Prerequisites Downloaded Successfully
37 |
--------------------------------------------------------------------------------
/install-object-detection-api.sh:
--------------------------------------------------------------------------------
1 | # Clone the TensorFlow Models Repository, Compile the Protos, Add Object Detection Module to Path
2 |
3 | git clone https://github.com/tensorflow/models.git
4 | cd models/research
5 | protoc object_detection/protos/*.proto --python_out=.
6 | export PYTHONPATH=$PYTHONPATH:/home/pi/tensorflow/models/research:/home/pi/tensorflow/models/research/slim
7 |
8 | # Return to our Directory
9 | cd ../..
10 |
11 | # Echo Success Message
12 |
13 | echo TensorFlow Object Detection API Setup Successfully!
14 |
--------------------------------------------------------------------------------