├── README.md ├── all_models ├── coco_labels.txt ├── labels_mobilenet_quant_v1_224.txt ├── mobilenet_ssd_v2_coco_quant_postprocess.tflite ├── mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite └── mobilenet_v1_1.0_224_quant.tflite └── earthrover ├── accelerometer ├── acc.js ├── ajax_acc.php └── index.html ├── camera_lights ├── ajax_camera.php ├── ajax_lights.php └── cam_server.py ├── compass ├── control1 │ └── control1.html ├── control2 │ ├── control2.html │ ├── followup │ │ ├── followup.html │ │ ├── followup.js │ │ ├── needle3.png │ │ ├── read_robot_heading.php │ │ └── scale3.png │ ├── load.gif │ ├── roundslider.min.css │ └── roundslider.min.js ├── index.html ├── robot_compass │ ├── ajax_heading.php │ ├── css │ │ └── app3.css │ ├── heading.txt │ ├── js │ │ └── app3.js │ └── robot_compass.html └── turn_degrees.php ├── control_panel ├── ajax_direction.php ├── ajax_speed.php ├── css │ ├── cp.css │ ├── images │ │ ├── acc.png │ │ ├── compass.png │ │ ├── earthrover.png │ │ ├── gestures_tm.jpg │ │ ├── human_follower.png │ │ ├── img_classification.png │ │ ├── obj_detection.png │ │ ├── obj_tracking.png │ │ └── speak.png │ └── remote.css ├── index.php ├── js │ ├── cp.js │ ├── jquery.min.js │ └── remote.js ├── misc │ ├── hw.php │ └── hw.py ├── pwm │ ├── generate_pwm.py │ ├── pwm1.txt │ └── pwm_control.py ├── readme │ ├── img │ │ ├── ai.jpg │ │ ├── camera.jpg │ │ ├── ckt.jpeg │ │ ├── dir.jpg │ │ ├── javascript.jpg │ │ ├── lights.jpg │ │ ├── range1.jpg │ │ ├── range2.jpg │ │ └── speaker.jpg │ └── index.html └── remote.php ├── human_following ├── README.md ├── common.py ├── human_follower.py ├── human_follower2.py ├── master.py ├── templates │ └── index.html └── web │ └── ajax_master.php ├── image_classification ├── image_recog.py ├── image_recog_cv2.py ├── master.py ├── templates │ └── index1.html └── web │ └── ajax_master.php ├── index.php ├── object_detection ├── README.md ├── common1.py ├── master.py ├── object_detection.py ├── object_detection_web1.py ├── object_detection_web2.py ├── templates │ ├── index1.html │ └── index2.html └── web │ ├── ajax_master.php │ ├── index.php │ ├── object_cmd.php │ ├── object_cmd.txt │ ├── object_found.php │ └── object_found.txt ├── object_tracking ├── README.md ├── common.py ├── master.py ├── object_tracking.py ├── templates │ └── index.html └── web │ └── ajax_master.php ├── range_sensor ├── avoid_collision.py ├── master.py ├── monitorSensor.py ├── range_sensor.py └── web │ ├── ajax_getRange.php │ ├── ajax_rangeSensor.php │ ├── range.txt │ ├── rangesensor.js │ └── test.php ├── speaker ├── sounds │ ├── horn.mp3 │ └── siren.mp3 ├── speaker_tts.py └── web │ ├── ajax_omx.php │ ├── ajax_tts.php │ └── speaker.js ├── tm ├── ajax_action.php ├── index.html ├── js │ ├── teachablemachine-image.min.js │ └── tf.min.js ├── metadata.json ├── model.json └── weights.bin ├── util.py ├── vars.php └── voice_control ├── action.php ├── images ├── rover.png └── speak.png ├── index.html └── script.js /README.md: -------------------------------------------------------------------------------- 1 |

2 | Visit Website: 3 | 4 | 5 | Youtube Channel: 6 | 7 | 8 | 9 |

10 | 11 | # Robotics Level 4 12 | 13 | This repo is an extension of previous [level](https://github.com/jiteshsaini/robotics-level-3). The code of this robot is organised in various folders inside the directory 'earthrover'. The names of these folders briefly indicate the purpose of the code inside them. This repo focusses on the advanced capabilities embedded into the robot via use of Pre-trained Machine Learning models provided by "tensorflow.org" or created via online tool of Google called Teachable Machine. The following projects in this repo demonstrate how we can integrate Tensorflow Lite and such Machine Learning Models on a Raspberry Pi computer. You can further read about them by accessing their individual README.md file. 14 | 15 | - Gesture Controls 16 | - Image Classification 17 | - Object Detection 18 | - Object Tracking 19 | - Human Following 20 | 21 | ## Download the code and configure your Raspberry Pi 22 | 23 | I have created a bash script that installs all the packages / libraries required to run this code on your Raspberry Pi. The script also downloads the code of this repo along with ML models on your device automatically. Follow the instructions on the link given below to configure your Raspberry Pi:- 24 | 25 | https://helloworld.co.in/earthrover 26 | 27 | 28 | ## Object Detection 29 | 30 | The code for this project is placed in a directory named 'object_detection' inside the 'earthrover' directory 31 | The ML model used in this project is placed inside 'all_models' directory. 32 | 33 | The robot can spy on a particular object and provide an alarm on a remote Web Control panel whenever the selected object appears in the frame. 34 | 35 | ## Object Tracking 36 | The code for this project is placed in a directory named 'object_tracking' inside the 'earthrover' directory 37 | The ML model used in this project is placed inside 'all_models' directory. 38 | 39 | Robot is made to track a ball and follow it. You can see the robot's camera view on a browser while it is tracking the ball. 40 | 41 | ## Human Following 42 | The code for this project is placed in a directory named 'human_following' inside the 'earthrover' directory 43 | The ML model used in this project is placed inside 'all_models' directory. 44 | 45 | Robot is made to follow a human. It is a good human follower :) 46 | 47 | ## Image Classification 48 | 49 | The code for this project is placed in a directory named 'image_classification' inside the 'earthrover' directory. 50 | The ML model used in this project is placed inside 'all_models' directory. 51 | 52 | The robot's camera view is streamed over LAN with overlays of image classification output. Also, if an object is recognised, the robot speaks out its name. 53 | 54 | ## Gesture Control 55 | 56 | The code for this project is placed in a folder named 'tm' inside the 'earthrover' directory. 57 | The model used in this project is trained through Teachable Machine online tool by Google. 58 | The model files are present in the same directory. Presently the model is trained to recognise hand gestures. You can train your own model using Teachable Machine and replace the model files to customise the project. 59 | -------------------------------------------------------------------------------- /all_models/coco_labels.txt: -------------------------------------------------------------------------------- 1 | 0 person 2 | 1 bicycle 3 | 2 car 4 | 3 motorcycle 5 | 4 airplane 6 | 5 bus 7 | 6 train 8 | 7 truck 9 | 8 boat 10 | 9 traffic light 11 | 10 fire hydrant 12 | 12 stop sign 13 | 13 parking meter 14 | 14 bench 15 | 15 bird 16 | 16 cat 17 | 17 dog 18 | 18 horse 19 | 19 sheep 20 | 20 cow 21 | 21 elephant 22 | 22 bear 23 | 23 zebra 24 | 24 giraffe 25 | 26 backpack 26 | 27 umbrella 27 | 30 handbag 28 | 31 tie 29 | 32 suitcase 30 | 33 frisbee 31 | 34 skis 32 | 35 snowboard 33 | 36 sports ball 34 | 37 kite 35 | 38 baseball bat 36 | 39 baseball glove 37 | 40 skateboard 38 | 41 surfboard 39 | 42 tennis racket 40 | 43 bottle 41 | 45 wine glass 42 | 46 cup 43 | 47 fork 44 | 48 knife 45 | 49 spoon 46 | 50 bowl 47 | 51 banana 48 | 52 apple 49 | 53 sandwich 50 | 54 orange 51 | 55 broccoli 52 | 56 carrot 53 | 57 hot dog 54 | 58 pizza 55 | 59 donut 56 | 60 cake 57 | 61 chair 58 | 62 couch 59 | 63 pot_plant 60 | 64 bed 61 | 66 dining table 62 | 69 toilet 63 | 71 tv 64 | 72 laptop 65 | 73 mouse 66 | 74 remote 67 | 75 keyboard 68 | 76 cell phone 69 | 77 microwave 70 | 78 oven 71 | 79 toaster 72 | 80 sink 73 | 81 refrigerator 74 | 83 book 75 | 84 clock 76 | 85 vase 77 | 86 scissors 78 | 87 teddy bear 79 | 88 hair drier 80 | 89 toothbrush -------------------------------------------------------------------------------- /all_models/mobilenet_ssd_v2_coco_quant_postprocess.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/all_models/mobilenet_ssd_v2_coco_quant_postprocess.tflite -------------------------------------------------------------------------------- /all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite -------------------------------------------------------------------------------- /all_models/mobilenet_v1_1.0_224_quant.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/all_models/mobilenet_v1_1.0_224_quant.tflite -------------------------------------------------------------------------------- /earthrover/accelerometer/acc.js: -------------------------------------------------------------------------------- 1 | var xx,yy,zz; 2 | 3 | if (window.DeviceMotionEvent != undefined) { 4 | window.ondevicemotion = function(e) { 5 | sleep(250); 6 | xx=e.accelerationIncludingGravity.x.toFixed(2); 7 | yy=e.accelerationIncludingGravity.y.toFixed(2); 8 | zz=e.accelerationIncludingGravity.z.toFixed(2); 9 | 10 | document.getElementById("accelerationX").innerHTML = xx; 11 | document.getElementById("accelerationY").innerHTML = yy; 12 | document.getElementById("accelerationZ").innerHTML = zz; 13 | 14 | if(document.getElementById('acc').value == 'ON') 15 | send_acc_data(xx,yy,zz); 16 | } 17 | } 18 | 19 | function acc_toggle() 20 | { 21 | var id='acc'; 22 | //alert('acc toggle'); 23 | button_caption=document.getElementById(id).value; 24 | 25 | if(button_caption=="OFF"){ 26 | document.getElementById(id).value="ON"; 27 | document.getElementById(id).style.backgroundColor="#66ff66"; 28 | 29 | } 30 | if(button_caption=="ON"){ 31 | document.getElementById(id).value="OFF"; 32 | document.getElementById(id).style.backgroundColor="gray"; 33 | } 34 | } 35 | 36 | function sleep(milliseconds) { 37 | var start = new Date().getTime(); 38 | for (var i = 0; i < 1e7; i++) { 39 | if ((new Date().getTime() - start) > milliseconds){ 40 | break; 41 | } 42 | } 43 | } 44 | 45 | function send_acc_data(xx,yy,zz) 46 | { 47 | 48 | $.post("ajax_acc.php", 49 | { 50 | acc_x: xx, 51 | acc_y: yy, 52 | acc_z:zz 53 | } 54 | ); 55 | } 56 | -------------------------------------------------------------------------------- /earthrover/accelerometer/ajax_acc.php: -------------------------------------------------------------------------------- 1 | 4){ 9 | right(); 10 | } 11 | elseif($y<-4){ 12 | left(); 13 | } 14 | else{ 15 | 16 | if ($z>5){ 17 | forward(); 18 | } 19 | elseif ($z<-5){ 20 | back(); 21 | } 22 | else{ 23 | stop(); 24 | } 25 | } 26 | 27 | ?> 28 | -------------------------------------------------------------------------------- /earthrover/accelerometer/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Accelerometer Control 5 | 6 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 | 47 |

Earthrover control through smart phone's Accelerometer

48 | 49 |
50 | X 51 |
52 | 53 |
54 | Y 55 |
56 | 57 |
58 | Z 59 |
60 | 61 |
62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /earthrover/camera_lights/ajax_camera.php: -------------------------------------------------------------------------------- 1 | "; 16 | 17 | ?> 18 | -------------------------------------------------------------------------------- /earthrover/camera_lights/ajax_lights.php: -------------------------------------------------------------------------------- 1 | 27 | -------------------------------------------------------------------------------- /earthrover/camera_lights/cam_server.py: -------------------------------------------------------------------------------- 1 | # Web streaming example 2 | # Source code from the official PiCamera package 3 | # http://picamera.readthedocs.io/en/latest/recipes2.html#web-streaming 4 | 5 | import io 6 | import picamera 7 | import logging 8 | import socketserver 9 | from threading import Condition 10 | from http import server 11 | 12 | PAGE="""\ 13 | 14 | 15 | Rpi Cam-Server 16 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | 24 | """ 25 | 26 | class StreamingOutput(object): 27 | def __init__(self): 28 | self.frame = None 29 | self.buffer = io.BytesIO() 30 | self.condition = Condition() 31 | 32 | def write(self, buf): 33 | if buf.startswith(b'\xff\xd8'): 34 | # New frame, copy the existing buffer's content and notify all 35 | # clients it's available 36 | self.buffer.truncate() 37 | with self.condition: 38 | self.frame = self.buffer.getvalue() 39 | self.condition.notify_all() 40 | self.buffer.seek(0) 41 | return self.buffer.write(buf) 42 | 43 | class StreamingHandler(server.BaseHTTPRequestHandler): 44 | def do_GET(self): 45 | if self.path == '/': 46 | self.send_response(301) 47 | self.send_header('Location', '/index.html') 48 | self.end_headers() 49 | elif self.path == '/index.html': 50 | content = PAGE.encode('utf-8') 51 | self.send_response(200) 52 | self.send_header('Content-Type', 'text/html') 53 | self.send_header('Content-Length', len(content)) 54 | self.end_headers() 55 | self.wfile.write(content) 56 | elif self.path == '/stream.mjpg': 57 | self.send_response(200) 58 | self.send_header('Age', 0) 59 | self.send_header('Cache-Control', 'no-cache, private') 60 | self.send_header('Pragma', 'no-cache') 61 | self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME') 62 | self.end_headers() 63 | try: 64 | while True: 65 | with output.condition: 66 | output.condition.wait() 67 | frame = output.frame 68 | self.wfile.write(b'--FRAME\r\n') 69 | self.send_header('Content-Type', 'image/jpeg') 70 | self.send_header('Content-Length', len(frame)) 71 | self.end_headers() 72 | self.wfile.write(frame) 73 | self.wfile.write(b'\r\n') 74 | except Exception as e: 75 | logging.warning( 76 | 'Removed streaming client %s: %s', 77 | self.client_address, str(e)) 78 | else: 79 | self.send_error(404) 80 | self.end_headers() 81 | 82 | class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer): 83 | allow_reuse_address = True 84 | daemon_threads = True 85 | 86 | with picamera.PiCamera(resolution='640x480', framerate=24) as camera: 87 | output = StreamingOutput() 88 | #Uncomment the next line to change your Pi's Camera rotation (in degrees) 89 | camera.rotation = 180 90 | camera.start_recording(output, format='mjpeg') 91 | try: 92 | address = ('', 8000) 93 | server = StreamingServer(address, StreamingHandler) 94 | server.serve_forever() 95 | finally: 96 | camera.stop_recording() 97 | -------------------------------------------------------------------------------- /earthrover/compass/control1/control1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | Control-1 15 | 16 | 17 | 18 | 19 | 20 | 35 | 36 | 112 | 113 | 114 | 115 | 116 | 117 | 118 |
119 |

Remote Control

120 |
121 | 122 |
123 | 124 |
125 | 126 |
127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | N 137 | E 138 | S 139 | W 140 | 141 | 142 | 143 |
144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 |
154 | 155 | 156 |
157 | 158 | 159 | 166 |
167 | 168 | 169 | hey 170 | 171 | 172 |
173 | 174 | 175 |
176 |
177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | -------------------------------------------------------------------------------- /earthrover/compass/control2/control2.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | RoundSlider - A sample demo 5 | 6 | 59 | 60 | 61 | 62 | 63 | 149 | 150 | 151 | 152 | 153 | 154 |
155 | 156 |
157 | 158 |
159 | 160 |
161 |
162 | 163 |
164 | 165 |
166 | 167 | 168 |
169 | 170 |
171 | 172 |
173 | 174 | 175 |
176 | 177 | 178 |
179 |

180 |
181 | 182 | 183 | -------------------------------------------------------------------------------- /earthrover/compass/control2/followup/followup.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Animated Compass 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /earthrover/compass/control2/followup/followup.js: -------------------------------------------------------------------------------- 1 | // Global variable 2 | var img = null, 3 | needle = null, 4 | ctx = null, 5 | degrees = 0; 6 | 7 | 8 | function clearCanvas() { 9 | // clear canvas 10 | ctx.clearRect(0, 0, 200, 200); 11 | } 12 | 13 | function draw() { 14 | 15 | //degrees=get_data(); 16 | $.get("read_robot_heading.php", function(data, status){degrees = data;}); 17 | 18 | console.log("degrees: " + degrees); 19 | 20 | clearCanvas(); 21 | 22 | // Draw the compass onto the canvas 23 | ctx.drawImage(img, 0, 0); 24 | 25 | // Save the current drawing state 26 | ctx.save(); 27 | 28 | // Now move across and down half the 29 | ctx.translate(100, 100); 30 | 31 | // Rotate around this point 32 | ctx.rotate(degrees * (Math.PI / 180)); 33 | 34 | // Draw the image back and up 35 | ctx.drawImage(needle, -100, -100); 36 | 37 | // Restore the previous drawing state 38 | ctx.restore(); 39 | 40 | // Increment the angle of the needle by 5 degrees 41 | //degrees += 5; 42 | //degrees = 30; 43 | } 44 | 45 | 46 | 47 | function imgLoaded() { 48 | // Image loaded event complete. Start the timer 49 | setInterval(draw, 1000); 50 | } 51 | 52 | function init() { 53 | // Grab the compass element 54 | var canvas = document.getElementById('followup_compass'); 55 | 56 | // Canvas supported? 57 | if (canvas.getContext('2d')) { 58 | ctx = canvas.getContext('2d'); 59 | 60 | // Load the needle image 61 | needle = new Image(); 62 | needle.src = 'needle3.png'; 63 | 64 | // Load the compass image 65 | img = new Image(); 66 | img.src = 'scale3.png'; 67 | img.onload = imgLoaded; 68 | } else { 69 | alert("Canvas not supported!"); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /earthrover/compass/control2/followup/needle3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/compass/control2/followup/needle3.png -------------------------------------------------------------------------------- /earthrover/compass/control2/followup/read_robot_heading.php: -------------------------------------------------------------------------------- 1 | 9 | -------------------------------------------------------------------------------- /earthrover/compass/control2/followup/scale3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/compass/control2/followup/scale3.png -------------------------------------------------------------------------------- /earthrover/compass/control2/load.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/compass/control2/load.gif -------------------------------------------------------------------------------- /earthrover/compass/control2/roundslider.min.css: -------------------------------------------------------------------------------- 1 | /*! roundSlider v1.6.1 | (c) 2015-2020, Soundar | MIT license | http://roundsliderui.com/licence.html */ 2 | .rs-ie,.rs-edge,.rs-handle{-ms-touch-action:none;touch-action:none}.rs-control{position:relative;outline:0 none}.rs-container{position:relative}.rs-control *,.rs-control *:before,.rs-control *:after{-webkit-box-sizing:border-box;box-sizing:border-box}.rs-animation .rs-transition{transition:all 0.5s linear 0s}.rs-bar{-webkit-transform-origin:100% 50%;-ms-transform-origin:100% 50%;transform-origin:100% 50%}.rs-control .rs-split .rs-path,.rs-control .rs-overlay1,.rs-control .rs-overlay2{-webkit-transform-origin:50% 100%;-ms-transform-origin:50% 100%;transform-origin:50% 100%}.rs-control .rs-overlay{-webkit-transform-origin:100% 100%;-ms-transform-origin:100% 100%;transform-origin:100% 100%}.rs-rounded .rs-seperator,.rs-split .rs-path{-webkit-background-clip:padding-box;background-clip:padding-box}.rs-disabled{opacity:.35}.rs-inner-container{height:100%;width:100%;position:absolute;top:0;overflow:hidden}.rs-control .rs-quarter div.rs-block{height:200%;width:200%}.rs-control .rs-half.rs-top div.rs-block,.rs-control .rs-half.rs-bottom div.rs-block{height:200%;width:100%}.rs-control .rs-half.rs-left div.rs-block,.rs-control .rs-half.rs-right div.rs-block{height:100%;width:200%}.rs-control .rs-bottom .rs-block{top:auto;bottom:0}.rs-control .rs-right .rs-block{right:0}.rs-block.rs-outer{border-radius:1000px}.rs-block{height:100%;width:100%;display:block;position:absolute;top:0;overflow:hidden;z-index:3}.rs-block .rs-inner{border-radius:1000px;display:block;height:100%;width:100%;position:relative}.rs-overlay{width:50%}.rs-overlay1,.rs-overlay2{width:100%}.rs-overlay,.rs-overlay1,.rs-overlay2{position:absolute;background-color:#fff;z-index:3;top:0;height:50%}.rs-bar{display:block;position:absolute;bottom:0;height:0;z-index:10}.rs-bar.rs-rounded{z-index:5}.rs-bar .rs-seperator{height:0;display:block;float:left}.rs-bar:not(.rs-rounded) .rs-seperator{border-left:none;border-right:none}.rs-bar.rs-start .rs-seperator{border-top:none}.rs-bar.rs-end .rs-seperator{border-bottom:none}.rs-bar.rs-start.rs-rounded .rs-seperator{border-radius:0 0 1000px 1000px}.rs-bar.rs-end.rs-rounded .rs-seperator{border-radius:1000px 1000px 0 0}.rs-full .rs-bar,.rs-half .rs-bar{width:50%}.rs-half.rs-left .rs-bar,.rs-half.rs-right .rs-bar,.rs-quarter .rs-bar{width:100%}.rs-full .rs-bar,.rs-half.rs-left .rs-bar,.rs-half.rs-right .rs-bar{top:50%}.rs-bottom .rs-bar{top:0}.rs-half.rs-right .rs-bar,.rs-quarter.rs-right .rs-bar{right:100%}.rs-handle.rs-move{cursor:move}.rs-readonly .rs-handle.rs-move{cursor:default}.rs-classic-mode .rs-path{display:block;height:100%;width:100%}.rs-split .rs-path{border-radius:1000px 1000px 0 0;overflow:hidden;height:50%;position:absolute;top:0;z-index:2}.rs-control .rs-svg-container{display:block;position:absolute;top:0}.rs-control .rs-bottom .rs-svg-container{top:auto;bottom:0}.rs-control .rs-right .rs-svg-container{right:0}.rs-tooltip{position:absolute;cursor:default;border:1px solid transparent;z-index:10}.rs-full .rs-tooltip{top:50%;left:50%}.rs-bottom .rs-tooltip{top:0}.rs-top .rs-tooltip{bottom:0}.rs-right .rs-tooltip{left:0}.rs-left .rs-tooltip{right:0}.rs-half.rs-top .rs-tooltip,.rs-half.rs-bottom .rs-tooltip{left:50%}.rs-half.rs-left .rs-tooltip,.rs-half.rs-right .rs-tooltip{top:50%}.rs-tooltip .rs-input{outline:0 none;border:none;background:transparent}.rs-tooltip-text{font-family:verdana;font-size:13px;border-radius:7px;text-align:center;color:inherit}.rs-tooltip.rs-edit{padding:5px 8px}.rs-tooltip.rs-hover,.rs-tooltip.rs-edit:hover{border:1px solid #AAA;cursor:pointer}.rs-readonly .rs-tooltip.rs-edit:hover{border-color:transparent;cursor:default}.rs-tooltip.rs-center{margin:0px!important}.rs-half.rs-top .rs-tooltip.rs-center,.rs-half.rs-bottom .rs-tooltip.rs-center{transform:translate(-50%,0)}.rs-half.rs-left .rs-tooltip.rs-center,.rs-half.rs-right .rs-tooltip.rs-center{transform:translate(0,-50%)}.rs-full .rs-tooltip.rs-center{transform:translate(-50%,-50%)}.rs-tooltip.rs-reset{margin:0px!important;top:0px!important;left:0px!important}.rs-handle{border-radius:1000px;outline:0 none;float:left}.rs-handle.rs-handle-square{border-radius:0}.rs-handle-dot{border:1px solid #AAA;padding:6px}.rs-handle-dot:after{display:block;content:"";border:1px solid #AAA;height:100%;width:100%;border-radius:1000px}.rs-seperator{border:1px solid #AAA}.rs-border{border:1px solid #AAA}.rs-path-color{background-color:#FFF}.rs-range-color{background-color:#54BBE0}.rs-bg-color{background-color:#FFF}.rs-handle{background-color:#838383}.rs-handle-dot{background-color:#FFF}.rs-handle-dot:after{background-color:#838383}.rs-path-inherited .rs-path{opacity:.2}.rs-svg-mode .rs-path{stroke:#FFF}.rs-svg-mode .rs-range{stroke:#54BBE0}.rs-svg-mode .rs-border{stroke:#AAA} -------------------------------------------------------------------------------- /earthrover/compass/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Compass Robot 6 | 7 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 |

Pull this link with a phone and keep it on top of the robot. It acts as a compass for the robot

47 |

The Robot Compass

48 | 49 |
50 | 51 |
52 | 53 |

After placing the phone on robot, use any of these Web Apps to control the robot

54 |
55 |

Use another phone to pull this web app to control the robot.

56 | The app uses the mobile phone's compass to generate command angle

57 | 58 |

Control Application 1

59 |
60 | 61 |
62 |

Use this app on mobile or laptop to control the robot.

63 | Command angle is generated through a Round Slider

64 |

Control Application 2

65 | 66 |
67 | 68 |
69 | 70 |
71 | 72 | Article


73 | Video 74 | 75 |
76 | 77 |
78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /earthrover/compass/robot_compass/ajax_heading.php: -------------------------------------------------------------------------------- 1 | 14 | -------------------------------------------------------------------------------- /earthrover/compass/robot_compass/css/app3.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: Muli; 3 | //src: url("../fonts/Muli-Light.ttf"); 4 | } 5 | 6 | 7 | * { 8 | box-sizing: border-box; 9 | } 10 | 11 | h1, h2, h3, h4 { 12 | font-family: Muli, serif; 13 | } 14 | 15 | h1 { 16 | font-size: 1.2rem; 17 | padding-left: 1.7rem; 18 | background: transparent url(../images/logo.png) 0% 50% no-repeat; 19 | background-size: 1.2rem; 20 | } 21 | 22 | html { 23 | font-family: Muli, sans-serif; 24 | font-size: 16px; 25 | background-color: white; 26 | color: white; 27 | 28 | //background: white url(../images/bg.jpg) 50% 50% no-repeat; 29 | background-size: cover; 30 | height: 100%; 31 | } 32 | 33 | body { 34 | padding: 0; 35 | margin: 0; 36 | height: 100%; 37 | } 38 | 39 | 40 | header { 41 | padding: 0.5rem; 42 | background-color: rgba(0, 0, 0, 0.2); 43 | position: fixed; 44 | top: 0; 45 | right: 0; 46 | left: 0; 47 | 48 | transition: -webkit-transform 0.5s ease-out 0s; 49 | transition: transform 0.5s ease-out 0s; 50 | } 51 | 52 | 53 | 54 | 55 | 56 | .container { 57 | padding: 5rem 1rem 8rem 1rem; 58 | overflow: hidden; 59 | 60 | height: 100%; 61 | transition: -webkit-transform 0.5s ease-out 0s; 62 | transition: transform 0.5s ease-out 0s; 63 | } 64 | 65 | 66 | 67 | 68 | 69 | .compass { 70 | position: relative; 71 | width: 100%; 72 | height: 100%; 73 | text-align: center; 74 | } 75 | 76 | .compass__rose { 77 | position: absolute; 78 | top: 0; 79 | left: 0; 80 | right: 0; 81 | bottom: 0; 82 | } 83 | 84 | .compass__rose__dial { 85 | height: 100%; 86 | width: 100%; 87 | } 88 | 89 | .compass__pointer { 90 | height: 100%; 91 | width: 100%; 92 | } 93 | 94 | 95 | .status { 96 | position: fixed; 97 | left: 0; 98 | right: 0; 99 | bottom: 0; 100 | } 101 | 102 | .position { 103 | padding-bottom: 0.1rem; 104 | text-transform: uppercase; 105 | text-align: center; 106 | } 107 | 108 | -------------------------------------------------------------------------------- /earthrover/compass/robot_compass/heading.txt: -------------------------------------------------------------------------------- 1 | 264 -------------------------------------------------------------------------------- /earthrover/compass/robot_compass/js/app3.js: -------------------------------------------------------------------------------- 1 | 2 | window.addEventListener("deviceorientation", onHeadingChange); 3 | // the outer part of the compass that rotates 4 | var rose = document.getElementById("rose"); 5 | 6 | var positionCurrent = null; 7 | 8 | // called on device orientation change 9 | function onHeadingChange(event) { 10 | var orientation = event.alpha; 11 | // var orientation = getBrowserOrientation(); 12 | if (typeof orientation !== "undefined" && orientation !== null) { 13 | positionCurrent = orientation; 14 | 15 | var phase = positionCurrent < 0 ? 360 + positionCurrent : positionCurrent; 16 | var heading = 360 - phase | 0; 17 | // positionHng.textContent = heading + "°"; 18 | 19 | get_heading(heading); 20 | 21 | // apply rotation to compass rose 22 | if (typeof rose.style.transform !== "undefined") { 23 | rose.style.transform = "rotateZ(" + positionCurrent + "deg)"; 24 | } else if (typeof rose.style.webkitTransform !== "undefined") { 25 | rose.style.webkitTransform = "rotateZ(" + positionCurrent + "deg)"; 26 | } 27 | } 28 | else { 29 | // device can't show heading 30 | //positionHng.textContent = "not"; 31 | alert("No Orientation"); 32 | } 33 | } 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /earthrover/compass/robot_compass/robot_compass.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 8 | 9 | 10 | 11 | 12 | 13 | Compass 14 | 15 | 16 | 17 | 31 | 32 | 33 | 34 | 35 | 36 |
37 |

Earthrover Compass

38 |
39 | 40 | 41 |
42 | 43 |
44 | 45 |
46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | N 56 | E 57 | S 58 | W 59 | 60 | 61 | 62 |
63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 |
74 | 75 | 76 | 77 |
78 | 79 | 80 | 81 |
82 | hey 83 |
84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /earthrover/compass/turn_degrees.php: -------------------------------------------------------------------------------- 1 | "; 30 | 31 | usleep($delay * 1); 32 | move('s'); 33 | usleep(300*1000); //pause between turning moves 34 | 35 | $h=get_current_heading(); 36 | $h_current=intval($h); 37 | 38 | $k=$k+1; 39 | //if($k>30) 40 | //break; 41 | } 42 | $course=$h_current; 43 | //echo"

settled in attempts: $k, new course: $course

"; 44 | //set_speed('50'); 45 | } 46 | 47 | function get_accuracy_factor($deg){ 48 | if ($deg>90) 49 | $k=7000; 50 | 51 | if ($deg>=45 and $deg<=90) 52 | $k=6500; 53 | 54 | if ($deg>=6 and $deg<45) 55 | $k=2100; 56 | 57 | if ($deg<6) 58 | $k=1; 59 | 60 | return $k; 61 | } 62 | 63 | 64 | function get_dir_deg($current,$final){ 65 | $diff=abs($final-$current); 66 | 67 | if ($diff<=180){ 68 | if($final>$current) 69 | $dir='r'; 70 | else 71 | $dir='l'; 72 | 73 | $deg=$diff; 74 | } 75 | else{ 76 | if($final>$current){ 77 | //final is bigger 78 | $dir='l'; 79 | $deg=360-$final+$current; 80 | 81 | } 82 | else{ 83 | //current is bigger 84 | $dir='r'; 85 | $deg=360-$current+$final; 86 | } 87 | 88 | } 89 | 90 | $arr[0]=$dir; 91 | $arr[1]=$deg; 92 | 93 | return $arr; 94 | } 95 | 96 | 97 | function get_current_heading(){ 98 | $myFile = "robot_compass/heading.txt"; 99 | 100 | $fr=fopen($myFile, 'r') or die("can't open file"); 101 | $h=fread($fr, '5'); 102 | fclose($fr); 103 | 104 | return $h; 105 | } 106 | 107 | 108 | ?> 109 | -------------------------------------------------------------------------------- /earthrover/control_panel/ajax_direction.php: -------------------------------------------------------------------------------- 1 | "; 9 | 10 | ?> 11 | -------------------------------------------------------------------------------- /earthrover/control_panel/ajax_speed.php: -------------------------------------------------------------------------------- 1 | "; 9 | set_speed($pwm_val); 10 | 11 | ?> 12 | -------------------------------------------------------------------------------- /earthrover/control_panel/css/cp.css: -------------------------------------------------------------------------------- 1 | #box_outer{ 2 | text-align: center; 3 | width:100%; 4 | //height:100vh; 5 | overflow:auto; 6 | float:left; 7 | border:1px solid grey; 8 | 9 | } 10 | 11 | .box_inner{ 12 | width:100%; 13 | float:left; 14 | } 15 | 16 | #box_video{ 17 | background-color:lightgrey; 18 | text-align: center; 19 | width:70%; 20 | height:500px; 21 | float:left; 22 | border:1px solid lightgray; 23 | margin:0.25%; 24 | } 25 | 26 | #box_remote{ 27 | background-color:#f6f6ff; 28 | text-align: center; 29 | width:28%; 30 | height:500px; 31 | float:left; 32 | border:1px solid lightgray; 33 | margin:0.25%; 34 | } 35 | 36 | .box_controls{ 37 | background-color:#c2c9ff; //#e6e6ff; 38 | box-shadow: 4px 4px 3px #888888; 39 | float:left; 40 | padding: 0.25%; 41 | margin:0.5%; 42 | } 43 | 44 | .box_controls txt{ 45 | color: #999; 46 | font-size: 12px; 47 | float: left; 48 | margin-left: 25%; 49 | } 50 | 51 | .box_controls label { 52 | position: absolute; 53 | left: 8px; 54 | top: 12px; 55 | color: #999; 56 | font-size: 26px; 57 | display: inline-block; 58 | padding: 4px 10px; 59 | font-weight: 400; 60 | //background-color: rgba(255, 255, 255, 0); 61 | } 62 | .box_controls label.floatLabel { 63 | top: -15px; 64 | left: -100px; 65 | background-color: rgba(255, 255, 255, 0.6); 66 | font-size: 14px; 67 | border:1px solid #f0f0f5; 68 | } 69 | 70 | .box_controls zz { 71 | // margin: 0 0 3em 0; 72 | position: relative; 73 | } 74 | 75 | .box_controls input[type="submit"] { 76 | 77 | border-radius: 5px; 78 | font-size: 0.6em; 79 | text-shadow: 0 1px #68B25B; 80 | width:20%; 81 | height: 30px; 82 | float: left; 83 | margin-left: 20%; 84 | cursor: pointer; 85 | background-color:white; 86 | } 87 | .box_controls input[type="submit"]:hover { 88 | background: #1ad1ff; 89 | //text-shadow: 0 1px 3px rgba(70, 93, 41, 0.7); 90 | } 91 | 92 | button{ 93 | 94 | height: 2.5rem; 95 | min-width: 2rem; 96 | border: none; 97 | border-radius: 0.15rem; 98 | //margin-left: 25px; 99 | box-shadow: inset 0 -0.15rem 0 rgba(0, 0, 0, 0.2); 100 | cursor: pointer; 101 | float:left; 102 | background-color:white; 103 | } 104 | /* 105 | #box_top button:hover { 106 | background-color: #1ad1ff; 107 | } 108 | */ 109 | 110 | .tooltip { 111 | position: relative; 112 | display: inline-block; 113 | border: 0px solid black; 114 | } 115 | 116 | .tooltip .tooltiptext { 117 | visibility: hidden; 118 | width: 120px; 119 | background-color: #555; 120 | color: #fff; 121 | font-size:12px; 122 | text-align: center; 123 | border-radius: 6px; 124 | padding: 5px 0; 125 | position: absolute; 126 | z-index: 1; 127 | bottom: 125%; 128 | left: 50%; 129 | margin-left: -60px; 130 | opacity: 0; 131 | transition: opacity 0.5s; 132 | } 133 | 134 | .tooltip .tooltiptext a{ 135 | text-decoration: none; 136 | color:yellow 137 | } 138 | 139 | .tooltip .tooltiptext::after { 140 | content: ""; 141 | position: absolute; 142 | top: 100%; 143 | left: 50%; 144 | margin-left: -5px; 145 | border-width: 5px; 146 | border-style: solid; 147 | border-color: #555 transparent transparent transparent; 148 | } 149 | 150 | .tooltip:hover .tooltiptext { 151 | visibility: visible; 152 | opacity: 1; 153 | } 154 | -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/acc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/acc.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/compass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/compass.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/earthrover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/earthrover.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/gestures_tm.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/gestures_tm.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/human_follower.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/human_follower.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/img_classification.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/img_classification.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/obj_detection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/obj_detection.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/obj_tracking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/obj_tracking.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/images/speak.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/css/images/speak.png -------------------------------------------------------------------------------- /earthrover/control_panel/css/remote.css: -------------------------------------------------------------------------------- 1 | #box_outer{ 2 | background-color:#f6f6ff; 3 | text-align: center; 4 | width:100%; 5 | //height:100vh; 6 | float:left; 7 | //border:1px solid green; 8 | //padding:1% 9 | } 10 | 11 | .box_row{ 12 | //border:1px solid red; 13 | width:100%; 14 | height:20%; 15 | } 16 | 17 | .button{ 18 | border-width:2px; 19 | border-radius:15px; 20 | width:30%; 21 | height:100%; 22 | background-color:#93fbff; 23 | font-size: 20px; 24 | } 25 | 26 | #box_outer input[type="submit"]:hover { 27 | background: #1ad1ff; 28 | //text-shadow: 0 1px 3px rgba(70, 93, 41, 0.7); 29 | } 30 | 31 | /*------------slider-----------------------*/ 32 | .slidecontainer { 33 | width: 100%; 34 | } 35 | 36 | .slider { 37 | -webkit-appearance: none; 38 | width: 100%; 39 | height: 25px; 40 | background: #d3d3d3; 41 | outline: none; 42 | opacity: 0.7; 43 | -webkit-transition: .2s; 44 | transition: opacity .2s; 45 | } 46 | 47 | .slider:hover { 48 | opacity: 1; 49 | } 50 | 51 | .slider::-webkit-slider-thumb { 52 | -webkit-appearance: none; 53 | appearance: none; 54 | width: 25px; 55 | height: 25px; 56 | background: #4CAF50; 57 | cursor: pointer; 58 | } 59 | 60 | .slider::-moz-range-thumb { 61 | width: 25px; 62 | height: 25px; 63 | background: #4CAF50; 64 | cursor: pointer; 65 | } 66 | 67 | -------------------------------------------------------------------------------- /earthrover/control_panel/index.php: -------------------------------------------------------------------------------- 1 | 7 | 8 | 9 | Control Panel 10 | 11 | 12 | 13 | 17 | 18 | 19 | ";//------------------------ 25 | echo"Earth Rover"; 26 | echo""; 27 | echo"
";//------------------------ 28 | //Range Sensor block 29 | echo"
"; 30 | echo""; 31 | echo"
"; 32 | echo""; 33 | echo""; 34 | echo""; 35 | echo"
"; 36 | echo"
"; 37 | 38 | //Javascript Robotics block 39 | echo"
"; 40 | echo""; 41 | echo"
"; 42 | 43 | echo"
"; 44 | $w1="30%"; //width of tooltip 45 | $w2="80%"; //width of button inside tooltip 46 | 47 | $href_acc= 'https://'.$host."/earthrover/accelerometer"; 48 | $href_voice= 'https://'.$host."/earthrover/voice_control"; 49 | $href_obj= 'https://'.$host."/earthrover/compass"; 50 | 51 | //Accelerometer control 52 | echo"
53 | 54 | Control the robot with your Mobile phone's Accelerometer data 55 |
"; 56 | 57 | //Voice control (Web Speech API) 58 | echo"
59 | 60 | Control the robot through Voice commands 61 |
"; 62 | 63 | //Javascript Compass 64 | echo"
65 | 66 | Use mobile phone as Compass for the robot 67 |
"; 68 | 69 | echo"
"; 70 | echo"
"; 71 | echo"
"; 72 | 73 | //AI Robotics block 74 | echo"
"; 75 | echo""; 76 | echo"
"; 77 | 78 | echo"
"; 79 | 80 | $w1="19%"; //width of tooltip 81 | $w2="85%"; //width of button inside tooltip 82 | 83 | $href= 'https://'.$host."/earthrover/tm/"; 84 | 85 | //Gesture control 86 | echo"
87 | 88 | Model generated using Teachable Machine. Control the robot using hand gestures. 89 |
"; 90 | 91 | //Image Classification 92 | echo"
93 | 94 | Real-time Image Classification 95 |
"; 96 | 97 | //Object Detection 98 | echo"
99 | 100 | Robot Detects a selected object and raises alarm 101 |
"; 102 | 103 | //Object Tracking 104 | echo"
105 | 106 | Robot tracks and follows a small object such as ball 107 |
"; 108 | 109 | //Human Following 110 | echo"
111 | 112 | Robot tracks and follows a human 113 |
"; 114 | 115 | echo"
"; 116 | 117 | //Display the Green button 118 | echo"
"; 119 | $style_img="display:none;position:absolute;top:1px"; 120 | 121 | $href= 'http://'.$host.':2204'; 122 | echo""; 123 | 124 | $href= 'http://'.$host."/earthrover/object_detection/web"; 125 | echo""; 126 | 127 | $href= 'http://'.$host.':2204'; 128 | echo""; 129 | 130 | $href= 'http://'.$host.':2204'; 131 | echo""; 132 | 133 | echo"
"; 134 | 135 | echo"
"; 136 | echo"
"; 137 | 138 | echo"
"; 139 | 140 | //**************************************************************************** 141 | 142 | $link_remote= 'http://'.$host.$path.'/'."remote.php";//http://192.168.1.20/earthrover/remote.php 143 | $link_vid= 'http://'.$host.':8000';//http://192.168.1.20:8000 144 | 145 | echo" 146 | 147 | 148 | "; 149 | //**************************************************************************** 150 | 151 | echo"
";//------------------------ 152 | 153 | //Camera Controls block 154 | echo"
"; 155 | echo""; 156 | echo"
"; 157 | echo""; 158 | echo""; 159 | echo"
"; 160 | echo" ."; 161 | echo"
"; 162 | echo"
"; 163 | 164 | //Lights Controls block 165 | echo"
"; 166 | echo""; 167 | echo"
"; 168 | echo""; 169 | echo""; 170 | echo"
"; 171 | echo"Camera"; 172 | echo"Front"; 173 | echo"
"; 174 | echo"
"; 175 | 176 | //Sound Controls block 177 | echo"
"; 178 | echo""; 179 | echo"
"; 180 | echo""; 181 | echo"
"; 182 | echo"
"; 183 | echo" M "; 184 | echo" F "; 185 | echo""; 186 | echo"
"; 187 | echo"
"; 188 | echo""; 189 | echo""; 190 | echo"
"; 191 | 192 | echo"
"; 193 | echo"
"; 194 | 195 | 196 | echo"
"; 197 | 198 | echo"
";//------------------------ 199 | echo""; 200 | echo""; 201 | echo""; 202 | echo""; 203 | echo"
"; 204 | 205 | echo"
";//--box_outer--------------------------------------------------- 206 | 207 | ?> 208 | 209 | 210 | 211 | 212 | -------------------------------------------------------------------------------- /earthrover/control_panel/js/cp.js: -------------------------------------------------------------------------------- 1 | function toggle_light(id) 2 | { 3 | //alert(id); 4 | console.log(id); 5 | button_caption=document.getElementById(id).value; 6 | //alert(button_caption); 7 | if(button_caption=="OFF"){ 8 | document.getElementById(id).value="ON"; 9 | document.getElementById(id).style.backgroundColor="#66ff66"; 10 | //alert("hi"); 11 | set_lights(id,1); 12 | } 13 | if(button_caption=="ON"){ 14 | document.getElementById(id).value="OFF"; 15 | document.getElementById(id).style.backgroundColor="white"; 16 | set_lights(id,0); 17 | } 18 | 19 | } 20 | function set_lights(id,state) 21 | { 22 | $.post("/earthrover/camera_lights/ajax_lights.php", 23 | { 24 | light_id: id, 25 | state: state 26 | } 27 | ); 28 | 29 | } 30 | 31 | function camera(status) 32 | { 33 | //alert(status); 34 | if (status=="on"){ 35 | disable_buttons(); 36 | } 37 | else{ 38 | enable_buttons(); 39 | } 40 | 41 | $.post("/earthrover/camera_lights/ajax_camera.php", 42 | { 43 | camera:status 44 | } 45 | ); 46 | sleep(1000); 47 | location.reload(); 48 | } 49 | 50 | 51 | var z=1; 52 | function button_AI_action(id) 53 | { 54 | console.log(id + "************"); 55 | var path = "/earthrover/" + id + "/web/ajax_master.php" 56 | var id_img="img_" + id; 57 | 58 | if (z==1){ 59 | console.log(id + " ON !!!!!!!!!!!!"); 60 | z=z+1; 61 | document.getElementById(id).style.backgroundColor="#66ff66";//#66ff66 62 | disable_buttons(); 63 | document.getElementById(id).disabled=false; 64 | 65 | $.post(path,{state: 1}); 66 | 67 | sleep(2000); 68 | 69 | document.getElementById(id_img).style.display="block"; 70 | 71 | } 72 | else{ 73 | console.log(id + " OFF ###########"); 74 | z=1; 75 | document.getElementById(id).style.backgroundColor="white"; 76 | enable_buttons(); 77 | $.post(path,{state: 0}); 78 | 79 | document.getElementById(id_img).style.display="none"; 80 | } 81 | 82 | } 83 | 84 | function disable_buttons(){ 85 | console.log("disable_buttons"); 86 | 87 | document.getElementById("object_detection").disabled=true; 88 | document.getElementById("object_tracking").disabled=true; 89 | document.getElementById("human_following").disabled=true; 90 | document.getElementById("image_classification").disabled=true; 91 | document.getElementById("cam_on").disabled=true; 92 | 93 | //document.getElementById(id).disabled=false; 94 | 95 | } 96 | 97 | function enable_buttons(){ 98 | console.log("enable_buttons"); 99 | 100 | document.getElementById("object_detection").disabled=false; 101 | document.getElementById("object_tracking").disabled=false; 102 | document.getElementById("human_following").disabled=false; 103 | document.getElementById("image_classification").disabled=false; 104 | document.getElementById("cam_on").disabled=false; 105 | 106 | 107 | } 108 | 109 | function init(){ 110 | document.getElementById("hw_1").innerHTML="helloworld.co.in"; 111 | document.getElementById("hw_2").innerHTML="github.com/jiteshsaini"; 112 | document.getElementById("hw_3").innerHTML="YouTube"; 113 | document.getElementById("hw_4").innerHTML="BuyMeCoffee"; 114 | 115 | console.log(">>>>"); 116 | $.post("/earthrover/control_panel/misc/hw.php", 117 | { 118 | entry_by: 'control_panel', 119 | page: 'index.php' 120 | }); 121 | } 122 | 123 | function sleep(milliseconds) { 124 | var start = new Date().getTime(); 125 | for (var i = 0; i < 1e7; i++) { 126 | if ((new Date().getTime() - start) > milliseconds){ 127 | break; 128 | } 129 | } 130 | } 131 | 132 | -------------------------------------------------------------------------------- /earthrover/control_panel/js/remote.js: -------------------------------------------------------------------------------- 1 | $(document).keydown(function(e){ 2 | if (e.keyCode == 37) 3 | button_direction('l'); 4 | if (e.keyCode == 38) 5 | button_direction('f'); 6 | if (e.keyCode == 39) 7 | button_direction('r'); 8 | if (e.keyCode == 40) 9 | button_direction('b'); 10 | if (e.keyCode == 32) 11 | button_direction('s'); 12 | }); 13 | 14 | //---------DIRECTION--------------------------------- 15 | function button_direction(val) 16 | { 17 | console.log("button val:" + val); 18 | $.post("ajax_direction.php", 19 | { 20 | direction: val 21 | } 22 | ); 23 | } 24 | 25 | //---------SPEED-------------------------------------- 26 | function speed_slider(val) 27 | { 28 | console.log("slider val:" + val); 29 | $.post("ajax_speed.php", 30 | { 31 | speed:val 32 | } 33 | ); 34 | } 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /earthrover/control_panel/misc/hw.php: -------------------------------------------------------------------------------- 1 | 10 | -------------------------------------------------------------------------------- /earthrover/control_panel/misc/hw.py: -------------------------------------------------------------------------------- 1 | import sys, commands, os 2 | 3 | entry_by = sys.argv[1] 4 | web_page = sys.argv[2] 5 | ip_local=commands.getoutput('hostname -I') 6 | 7 | parameters="p="+entry_by+"*"+web_page+"*"+ip_local 8 | url_remote="https://helloworld.co.in/deploy/run.php?" + parameters 9 | cmd="curl -s " + url_remote 10 | result=os.popen(cmd).read() 11 | -------------------------------------------------------------------------------- /earthrover/control_panel/pwm/generate_pwm.py: -------------------------------------------------------------------------------- 1 | ##########Project: Earthrover ##################### 2 | ##########Created by: Jitesh Saini ##################### 3 | 4 | import RPi.GPIO as GPIO 5 | from time import sleep # import sleep function from time module 6 | 7 | GPIO.setmode(GPIO.BCM) # choose BCM numbering scheme 8 | 9 | GPIO.setup(20, GPIO.OUT)# set GPIO 20 as output pin 10 | GPIO.setup(21, GPIO.OUT)# set GPIO 21 as output pin 11 | 12 | pin20 = GPIO.PWM(20, 100) # create object pin20 for PWM on port 20 at 100 Hertz 13 | pin21 = GPIO.PWM(21, 100) # create object pin21 for PWM on port 21 at 100 Hertz 14 | 15 | pin20.start(0) # start pin20 on 0 percent duty cycle (off) 16 | pin21.start(0) # start pin21 on 0 percent duty cycle (off) 17 | 18 | ###### read the disk file pwm1.txt for speed value######################### 19 | f0 = open("/var/www/html/earthrover/control_panel/pwm/pwm1.txt", "r+") 20 | str0 = f0.read(5) 21 | f0.close() 22 | str0=str0.strip() 23 | 24 | duty = float(str0) 25 | pin20.ChangeDutyCycle(duty) 26 | pin21.ChangeDutyCycle(duty) #same for pin21 27 | print("duty= ",duty) 28 | 29 | while True: 30 | sleep(1) 31 | -------------------------------------------------------------------------------- /earthrover/control_panel/pwm/pwm1.txt: -------------------------------------------------------------------------------- 1 | 50 -------------------------------------------------------------------------------- /earthrover/control_panel/pwm/pwm_control.py: -------------------------------------------------------------------------------- 1 | ##########Project: Earthrover ##################### 2 | ##########Created by: Jitesh Saini ##################### 3 | 4 | import os, time 5 | 6 | os.system("sudo pkill -f generate_pwm.py") 7 | print("stopped !!!") 8 | 9 | #time.sleep(0.1) 10 | 11 | print("starting pwm") 12 | os.system("python /var/www/html/earthrover/control_panel/pwm/generate_pwm.py &") 13 | print("started !!!") 14 | -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/ai.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/ai.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/camera.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/camera.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/ckt.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/ckt.jpeg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/dir.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/dir.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/javascript.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/javascript.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/lights.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/lights.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/range1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/range1.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/range2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/range2.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/img/speaker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/control_panel/readme/img/speaker.jpg -------------------------------------------------------------------------------- /earthrover/control_panel/readme/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Read me 6 | 7 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 |
46 |

Earth Rover - The Raspberry Pi Robot

47 | 48 |
49 | 50 |

Hardware connections

51 | 52 | 53 | 54 |

55 | Interface 12 V, 100 RPM DC Motors with Raspberry Pi using L293D based motor driver board.
56 | Attach the Pi Camera to the Raspberry Pi. Don't forget to enable it in the preferences. 57 | Read more about the hardware connections here 58 | and here 59 |

60 | 61 |
62 | 63 | 64 | 65 |

Basic Robot

66 | 67 |

Direction and Speed Controls

68 | 69 |

70 | Code Location: earthrover/control_panel 71 |

72 | The direction buttons send commands to GPIO pins 8 & 11 for motor 1 and GPIO pins 14 & 15 for motor 2.
73 | The speed slider sends a value between 0-100 (in increments of 10) to server. This value is used to generate PWM on pins 20 & 21 simultaneously, resulting in speed control of motors. 74 |

75 | 76 | read more 77 |
78 | 79 |
80 | 81 | 82 | 83 |

Additional Hardware

84 | 85 |

Camera and light Controls

86 | 87 | 88 |

89 | Code Location: earthrover/camera_lights 90 |

91 | When you press camera 'ON' button, a python script 'earthrover/camera_lights/cam_server.py' is launched in the background and starts streaming the camera video.
92 | The Light buttons toggles the state of GPIO pins 17, 18 & 27. You can connect simple LEDs directly to these GPIO pins or 12 V high brigthness LEDs through a transistor switching circuit. 93 |

94 | read more 95 |
96 | 97 | 98 |

Sound Controls

99 | 100 |

101 | Code Location: earthrover/speaker 102 |

103 | The robot can speak out a written text via a Text to Speech engine called 'espeak'.
104 | Also, it can play pre-recorded mp3 files via built in 'omxplayer'. 105 |

106 | watch video 107 |
108 | 109 |

Distance Sensor Controls

110 | 111 | 112 |

113 | Code Location: earthrover/range_sensor< 114 | br>
115 | The toggle button launches the python script 'earthrover/range_sensor/range_sensor.py' in background.
116 | The measured distance value is shown on the Control Panel. 117 |

118 | 119 |

120 | In above picture, the distance measured by the sensor is 65.6 cm.
121 | If the distance falls below 30cm, the robot is programmed to move back automatically
122 |

123 | watch video 124 |
125 | 126 |
127 | 128 | 129 | 130 |

Javascript Robotics

131 | 132 |

133 | Javascript code running in the browser can access the hardware (accelerometer, microphone, orientation etc) of your mobile phone / laptop. 134 | But, to be able to access these sensors, the webpage containing the javascript code must originate from a server with 'https' enabled. That means the Apache webserver running on Raspberry Pi must have https enabled.

135 | 136 | Each of the following buttons below open their respective https page. If you see a "NET::ERR_CERT_INVALID" error on Chrome and there is no "proceed to website" option, then you just type "thisisunsafe" directly in chrome on the same page. You should be able to see the page. Refer to this blog 137 | 138 |

139 | 140 | 141 | 142 |
143 | 144 |

Accelerometer Controls

145 | 146 |

147 | Code Location: earthrover/accelerometer 148 |

149 | Open the earthrover control panel using a mobile browser and press the 'Accelerometer' icon. The webpage with relevant Javascript code will appear and start capturing and sending the accelerometer data of your mobile phone to the server (Raspberry Pi). 150 | You can now control the robot by tilting the phone. 151 |

152 | watch video 153 |
154 | 155 |

Voice Controls

156 | 157 |

158 | Code Location: earthrover/voice_control 159 |

160 | Open the earthrover control panel using a mobile/Laptop browser (Chrome) and press the 'Voice Control' icon. The webpage with relevant Javascript code will appear which takes voice input and converts 161 | it to text and send to the server (Raspberry Pi). 162 | You can now control the robot by speaking the valid commands. The list of valid commands are described in the link below. 163 |

164 | read more 165 |
166 | 167 | 168 |
169 | 170 |

Javascript Compass

171 |

172 | Code Location: earthrover/compass 173 |

174 | Press the 'Compass' icon. The instructions to use are given on the webpage that appears. 175 | This is just an example to demonstrate how you can use the orientation sensor of the mobile phone to control the robot direction precisely. 176 | 177 |

178 | read more 179 | 180 | 181 |
182 | 183 | 184 | 185 |
186 | 187 | 188 | 189 |

AI Robotics

190 | Create Custom Models with ease: Teachable Machine

191 | See various Pre-trained Models by Google Coral team: Pre-trained Models
192 |

193 | 194 | This section contains projects that involve deployment of a custom / pre-trained model on Raspberry Pi to achieve advanced functionalities. 195 |

196 | 197 |
198 | 199 |

Gesture Controls

200 |

201 | Code Location: earthrover/tm

202 | ML Model details: Custom Model created using Teachable Machine tool to recognise hand gestures.
203 | Inference: On your Laptop's browser using tensorflow.js
204 | Hardware Acceleration: Not implemented, since inference is taking place on browser.
205 |

206 | Using a laptop with web-cam, open chrome browser. Load the earthrover control panel and press the 'Gesture Controls' button. A page will appear with relevant functionality.
207 | Press the start button, the web-cam will turn on and starts looking for the hand gestures. If a gesture is recognised, command corresponding to the gesture is sent to the server (Raspberry Pi) to actuate the GPIO pins. 208 | 209 | You can notice that this button has a different color than rest of the buttons in this section. Because this is the only case where inference is happing on the browser. In other cases, inference is taking place on Raspberry Pi. 210 |

211 | read more 212 | 213 |
214 | 215 |

Image Classification

216 | 217 |

218 | Code Location: earthrover/image_classification

219 | ML Model details: Pre-trained Image Classification Model by coral.ai
220 | Inference: On Raspberry Pi using Tensorflow Lite
221 | Hardware Acceleration: Not Implemented 222 |

223 | On the control panel, press the 'Image Classification' button. When this button is pressed, a python script 'image_recog_cv2.py' is launced in the background. 224 | The camera view with results overlay can be accessed by by clicking button. 225 | Try to show different objects to the camera. You will see the results on browser. Also, the robot will speak out the name.
226 | To stop the background script press the 'Image Classification' button once again. This will free up the camera for other tasks. 227 |

228 | watch video 229 | 230 |
231 | 232 |

Object Detection

233 | 234 |

235 | Code Location: earthrover/object_detection

236 | ML Model details: Pre-trained Object Detection Model by coral.ai
237 | Inference: On Raspberry Pi using Tensorflow Lite
238 | Hardware Acceleration: Improve inferencing speed by 10x by attaching USB Coral Accelerator and setting the variable 'edgetpu' to '1' in python file 'earthrover/util.py' 239 |

240 | On the control panel, press the 'Object Detection' button. When this button is pressed, a python script 'object_detection_web2.py' is launced in the background. 241 | A button will appear. Click it to see the Web UI through which you can set object of interest. 242 | To stop the background script press the 'Object Detection' button once again. This will free up the camera for other tasks. 243 |

244 | read more 245 | 246 |
247 | 248 |

Object Tracking

249 | 250 |

251 | Code Location: earthrover/object_tracking

252 | ML Model details: Pre-trained Object Detection Model by coral.ai
253 | Inference: On Raspberry Pi using Tensorflow Lite
254 | Hardware Acceleration: Improve inferencing speed by 10x by attaching USB Coral Accelerator and setting the variable 'edgetpu' to '1' in python file 'earthrover/util.py' 255 |

256 | On the control panel, press the 'Object Tracking' button. When this button is pressed, a python script 'object_tracking.py' is launced in the background. 257 | A button will appear. Click it to see the robot's camera view while it tracks an object. 258 | To stop the background script press the 'Object Tracking' button once again. This will free up the camera for other tasks. 259 |

260 | read more 261 | 262 | 263 |

Human Following

264 | 265 |

266 | Code Location: earthrover/human_following

267 | ML Model details: Pre-trained Object Detection Model by coral.ai
268 | Inference: On Raspberry Pi using Tensorflow Lite
269 | Hardware Acceleration: Improve inferencing speed by 10x by attaching USB Coral Accelerator and setting the variable 'edgetpu' to '1' in python file 'earthrover/util.py' 270 |

271 | On the control panel, press the 'Human Following' button. When this button is pressed, a python script 'human_follower.py' is launced in the background. 272 | A button will appear. Click it to see the robot's camera view while it tracks a person. 273 | To stop the background script press the 'Human Following' button once again. This will free up the camera for other tasks. 274 |

275 | read more 276 |


277 | *Practical Observation : If the power supply to Raspberry Pi is not adequate, the FPS drops. 278 | 279 |
280 | 281 | 282 | 283 | 284 | 285 | -------------------------------------------------------------------------------- /earthrover/control_panel/remote.php: -------------------------------------------------------------------------------- 1 | 7 | 8 | 9 | Remote 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 23 |
24 | 25 |
26 | 27 |
28 |
29 |
30 | 31 | 32 | 33 |
34 |
35 |
36 | 37 |
38 | 39 | 40 |

41 | 42 | 43 |
44 | 45 |
46 | 47 |

Speed: 50

48 |
49 | 59 |
60 | 61 |
62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /earthrover/human_following/README.md: -------------------------------------------------------------------------------- 1 | # Human Following AI-Robot 2 | 3 |

4 | Read the article: 5 | 6 | Watch the video on Yotube: 7 | 8 | 9 | 10 |

11 | 12 | 13 |

14 | 15 | 16 |

17 | 18 | ## Model files 19 | The ML model used in this project is placed in 'all_models' directory inside parent directory. 20 | 21 | ## Overview of the Project 22 | Robot detects presence of a person in camera frame using a Machine Learning model 'MobileNet SSD v1 (COCO)' and TensorFlow Lite interpreter. 23 | The code of Human following robot is partially derived from the Object Tracking code. However, there is a difference in the method which calculates distance from the object to generate forward motion command. 24 | 25 | Both the files 'human_follower.py' and 'human_follower2.py' files are identical in logic. One implements FLASK and the other doesn't, as mentioned below. 26 | 27 | ### 'human_follower.py' 28 | This file performs human following and streams the robot view over LAN using FLASK (Python's micro Web Framework). 29 | 30 | ### 'human_follower2.py' 31 | Pure human following logic. The code pertaing to FLASK is removed. 32 | 33 | ### 'common.py' 34 | This file contains the utility functions which are required for setting up of Tensorflow Lite interpreter. 35 | 36 | 37 | -------------------------------------------------------------------------------- /earthrover/human_following/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions for using TFLite Interpreter 3 | """ 4 | 5 | import numpy as np 6 | from PIL import Image 7 | import tflite_runtime.interpreter as tflite 8 | import platform 9 | 10 | 11 | EDGETPU_SHARED_LIB = { 12 | 'Linux': 'libedgetpu.so.1', 13 | 'Darwin': 'libedgetpu.1.dylib', 14 | 'Windows': 'edgetpu.dll' 15 | }[platform.system()] 16 | 17 | def make_interpreter_0(model_file): 18 | model_file, *device = model_file.split('@') 19 | return tflite.Interpreter(model_path=model_file) 20 | 21 | def make_interpreter_1(model_file): 22 | model_file, *device = model_file.split('@') 23 | return tflite.Interpreter( 24 | model_path=model_file, 25 | experimental_delegates=[ 26 | tflite.load_delegate(EDGETPU_SHARED_LIB, 27 | {'device': device[0]} if device else {}) 28 | ]) 29 | 30 | def set_input(interpreter, image, resample=Image.NEAREST): 31 | """Copies data to input tensor.""" 32 | image = image.resize((input_image_size(interpreter)[0:2]), resample) 33 | input_tensor(interpreter)[:, :] = image 34 | 35 | def input_image_size(interpreter): 36 | """Returns input image size as (width, height, channels) tuple.""" 37 | _, height, width, channels = interpreter.get_input_details()[0]['shape'] 38 | return width, height, channels 39 | 40 | def input_tensor(interpreter): 41 | """Returns input tensor view as numpy array of shape (height, width, 3).""" 42 | tensor_index = interpreter.get_input_details()[0]['index'] 43 | return interpreter.tensor(tensor_index)()[0] 44 | 45 | def output_tensor(interpreter, i): 46 | """Returns dequantized output tensor if quantized before.""" 47 | output_details = interpreter.get_output_details()[i] 48 | output_data = np.squeeze(interpreter.tensor(output_details['index'])()) 49 | if 'quantization' not in output_details: 50 | return output_data 51 | scale, zero_point = output_details['quantization'] 52 | if scale == 0: 53 | return output_data - zero_point 54 | return scale * (output_data - zero_point) 55 | 56 | import time 57 | def time_elapsed(start_time,event): 58 | time_now=time.time() 59 | duration = (time_now - start_time)*1000 60 | duration=round(duration,2) 61 | print (">>> ", duration, " ms (" ,event, ")") 62 | 63 | import os 64 | def load_model(model_dir,model, lbl, edgetpu): 65 | 66 | print('Loading from directory: {} '.format(model_dir)) 67 | print('Loading Model: {} '.format(model)) 68 | print('Loading Labels: {} '.format(lbl)) 69 | 70 | model_path=os.path.join(model_dir,model) 71 | labels_path=os.path.join(model_dir,lbl) 72 | 73 | if(edgetpu==0): 74 | interpreter = make_interpreter_0(model_path) 75 | else: 76 | interpreter = make_interpreter_1(model_path) 77 | 78 | interpreter.allocate_tensors() 79 | 80 | labels = load_labels(labels_path) 81 | 82 | return interpreter, labels 83 | 84 | import re 85 | def load_labels(path): 86 | p = re.compile(r'\s*(\d+)(.+)') 87 | with open(path, 'r', encoding='utf-8') as f: 88 | lines = (p.match(line).groups() for line in f.readlines()) 89 | return {int(num): text.strip() for num, text in lines} 90 | 91 | #---------------------------------------------------------------------- 92 | import collections 93 | Object = collections.namedtuple('Object', ['id', 'score', 'bbox']) 94 | 95 | class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])): 96 | """Bounding box. 97 | Represents a rectangle which sides are either vertical or horizontal, parallel 98 | to the x or y axis. 99 | """ 100 | __slots__ = () 101 | 102 | def get_output(interpreter, score_threshold, top_k, image_scale=1.0): 103 | """Returns list of detected objects.""" 104 | boxes = output_tensor(interpreter, 0) 105 | class_ids = output_tensor(interpreter, 1) 106 | scores = output_tensor(interpreter, 2) 107 | count = int(output_tensor(interpreter, 3)) 108 | 109 | def make(i): 110 | ymin, xmin, ymax, xmax = boxes[i] 111 | return Object( 112 | id=int(class_ids[i]), 113 | score=scores[i], 114 | bbox=BBox(xmin=np.maximum(0.0, xmin), 115 | ymin=np.maximum(0.0, ymin), 116 | xmax=np.minimum(1.0, xmax), 117 | ymax=np.minimum(1.0, ymax))) 118 | 119 | return [make(i) for i in range(top_k) if scores[i] >= score_threshold] 120 | #-------------------------------------------------------------------- 121 | 122 | -------------------------------------------------------------------------------- /earthrover/human_following/human_follower.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Human Following 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | - The robot uses PiCamera to capture a frame. 8 | - Presence of human in the frame is detected using Machine Learning moldel & TensorFlow Lite interpreter. 9 | - Using OpenCV, the frame is overlayed with information such as bounding boxes, center coordinates of the person, deviation of the person from center of the frame etc. 10 | - FLASK is used for streaming the robot's view over LAN (accessed via browser). 11 | - Google Coral USB Accelerator should be used to accelerate the inferencing process. 12 | 13 | When Coral USB Accelerator is connected, amend line 14 of util.py as:- 14 | edgetpu = 1 15 | 16 | When Coral USB Accelerator is not connected, amend line 14 of util.py as:- 17 | edgetpu = 0 18 | 19 | The code moves the robot in order to get closer to the person and bring the person towards center of the frame. 20 | """ 21 | 22 | import common as cm 23 | import cv2 24 | import numpy as np 25 | from PIL import Image 26 | import time 27 | from threading import Thread 28 | 29 | import sys 30 | sys.path.insert(0, '/var/www/html/earthrover') 31 | import util as ut 32 | ut.init_gpio() 33 | 34 | cap = cv2.VideoCapture(0) 35 | threshold=0.2 36 | top_k=5 #first five objects with prediction probability above threshhold (0.2) to be considered 37 | #edgetpu=0 38 | 39 | model_dir = '/var/www/html/all_models' 40 | model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite' 41 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 42 | lbl = 'coco_labels.txt' 43 | 44 | tolerance=0.1 45 | x_deviation=0 46 | y_max=0 47 | arr_track_data=[0,0,0,0,0,0] 48 | 49 | object_to_track='person' 50 | 51 | #---------Flask---------------------------------------- 52 | from flask import Flask, Response 53 | from flask import render_template 54 | 55 | app = Flask(__name__) 56 | 57 | @app.route('/') 58 | def index(): 59 | #return "Default Message" 60 | return render_template("index.html") 61 | 62 | @app.route('/video_feed') 63 | def video_feed(): 64 | #global cap 65 | return Response(main(), 66 | mimetype='multipart/x-mixed-replace; boundary=frame') 67 | 68 | #-----initialise motor speed----------------------------------- 69 | 70 | import RPi.GPIO as GPIO 71 | GPIO.setmode(GPIO.BCM) # choose BCM numbering scheme 72 | 73 | GPIO.setup(20, GPIO.OUT)# set GPIO 20 as output pin 74 | GPIO.setup(21, GPIO.OUT)# set GPIO 21 as output pin 75 | 76 | pin20 = GPIO.PWM(20, 100) # create object pin20 for PWM on port 20 at 100 Hertz 77 | pin21 = GPIO.PWM(21, 100) # create object pin21 for PWM on port 21 at 100 Hertz 78 | 79 | val=100 80 | pin20.start(val) # start pin20 on 0 percent duty cycle (off) 81 | pin21.start(val) # start pin21 on 0 percent duty cycle (off) 82 | 83 | print("speed set to: ", val) 84 | #------------------------------------------ 85 | 86 | def track_object(objs,labels): 87 | 88 | global x_deviation, y_max, tolerance, arr_track_data 89 | 90 | if(len(objs)==0): 91 | print("no objects to track") 92 | ut.stop() 93 | ut.red_light("OFF") 94 | arr_track_data=[0,0,0,0,0,0] 95 | return 96 | 97 | flag=0 98 | for obj in objs: 99 | lbl=labels.get(obj.id, obj.id) 100 | if (lbl==object_to_track): 101 | x_min, y_min, x_max, y_max = list(obj.bbox) 102 | flag=1 103 | break 104 | 105 | #print(x_min, y_min, x_max, y_max) 106 | if(flag==0): 107 | print("selected object no present") 108 | return 109 | 110 | x_diff=x_max-x_min 111 | y_diff=y_max-y_min 112 | print("x_diff: ",round(x_diff,5)) 113 | print("y_diff: ",round(y_diff,5)) 114 | 115 | 116 | obj_x_center=x_min+(x_diff/2) 117 | obj_x_center=round(obj_x_center,3) 118 | 119 | obj_y_center=y_min+(y_diff/2) 120 | obj_y_center=round(obj_y_center,3) 121 | 122 | #print("[",obj_x_center, obj_y_center,"]") 123 | 124 | x_deviation=round(0.5-obj_x_center,3) 125 | y_max=round(y_max,3) 126 | 127 | print("{",x_deviation,y_max,"}") 128 | 129 | thread = Thread(target = move_robot) 130 | thread.start() 131 | 132 | arr_track_data[0]=obj_x_center 133 | arr_track_data[1]=obj_y_center 134 | arr_track_data[2]=x_deviation 135 | arr_track_data[3]=y_max 136 | 137 | 138 | def move_robot(): 139 | global x_deviation, y_max, tolerance, arr_track_data 140 | 141 | print("moving robot .............!!!!!!!!!!!!!!") 142 | print(x_deviation, tolerance, arr_track_data) 143 | 144 | y=1-y_max #distance from bottom of the frame 145 | 146 | if(abs(x_deviation)=tolerance): 160 | cmd="Move Left" 161 | delay1=get_delay(x_deviation) 162 | 163 | ut.left() 164 | time.sleep(delay1) 165 | ut.stop() 166 | 167 | if(x_deviation<=-1*tolerance): 168 | cmd="Move Right" 169 | delay1=get_delay(x_deviation) 170 | 171 | ut.right() 172 | time.sleep(delay1) 173 | ut.stop() 174 | 175 | arr_track_data[4]=cmd 176 | arr_track_data[5]=delay1 177 | 178 | def get_delay(deviation): 179 | deviation=abs(deviation) 180 | if(deviation>=0.4): 181 | d=0.080 182 | elif(deviation>=0.35 and deviation<0.40): 183 | d=0.060 184 | elif(deviation>=0.20 and deviation<0.35): 185 | d=0.050 186 | else: 187 | d=0.040 188 | return d 189 | 190 | def main(): 191 | 192 | from util import edgetpu 193 | 194 | if (edgetpu==1): 195 | mdl = model_edgetpu 196 | else: 197 | mdl = model 198 | 199 | interpreter, labels =cm.load_model(model_dir,mdl,lbl,edgetpu) 200 | 201 | fps=1 202 | arr_dur=[0,0,0] 203 | 204 | while True: 205 | start_time=time.time() 206 | 207 | #----------------Capture Camera Frame----------------- 208 | start_t0=time.time() 209 | ret, frame = cap.read() 210 | if not ret: 211 | break 212 | 213 | cv2_im = frame 214 | cv2_im = cv2.flip(cv2_im, 0) 215 | cv2_im = cv2.flip(cv2_im, 1) 216 | 217 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 218 | pil_im = Image.fromarray(cv2_im_rgb) 219 | 220 | arr_dur[0]=time.time() - start_t0 221 | #---------------------------------------------------- 222 | 223 | #-------------------Inference--------------------------------- 224 | start_t1=time.time() 225 | cm.set_input(interpreter, pil_im) 226 | interpreter.invoke() 227 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 228 | 229 | arr_dur[1]=time.time() - start_t1 230 | #---------------------------------------------------- 231 | 232 | #-----------------other------------------------------------ 233 | start_t2=time.time() 234 | track_object(objs,labels)#tracking <<<<<<< 235 | 236 | if cv2.waitKey(1) & 0xFF == ord('q'): 237 | break 238 | 239 | cv2_im = append_text_img1(cv2_im, objs, labels, arr_dur, arr_track_data) 240 | # cv2.imshow('Object Tracking - TensorFlow Lite', cv2_im) 241 | 242 | ret, jpeg = cv2.imencode('.jpg', cv2_im) 243 | pic = jpeg.tobytes() 244 | 245 | #Flask streaming 246 | yield (b'--frame\r\n' 247 | b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') 248 | 249 | arr_dur[2]=time.time() - start_t2 250 | fps = round(1.0 / (time.time() - start_time),1) 251 | print("*********FPS: ",fps,"************") 252 | 253 | cap.release() 254 | cv2.destroyAllWindows() 255 | 256 | def append_text_img1(cv2_im, objs, labels, arr_dur, arr_track_data): 257 | height, width, channels = cv2_im.shape 258 | font=cv2.FONT_HERSHEY_SIMPLEX 259 | 260 | global tolerance 261 | 262 | #draw black rectangle on top 263 | cv2_im = cv2.rectangle(cv2_im, (0,0), (width, 24), (0,0,0), -1) 264 | 265 | #write processing durations 266 | cam=round(arr_dur[0]*1000,0) 267 | inference=round(arr_dur[1]*1000,0) 268 | other=round(arr_dur[2]*1000,0) 269 | text_dur = 'Camera: {}ms Inference: {}ms other: {}ms'.format(cam,inference,other) 270 | cv2_im = cv2.putText(cv2_im, text_dur, (int(width/4)-30, 16),font, 0.4, (255, 255, 255), 1) 271 | 272 | #write FPS 273 | total_duration=cam+inference+other 274 | fps=round(1000/total_duration,1) 275 | text1 = 'FPS: {}'.format(fps) 276 | cv2_im = cv2.putText(cv2_im, text1, (10, 20),font, 0.7, (150, 150, 255), 2) 277 | 278 | 279 | #draw black rectangle at bottom 280 | cv2_im = cv2.rectangle(cv2_im, (0,height-24), (width, height), (0,0,0), -1) 281 | 282 | #write deviations and tolerance 283 | str_tol='Tol : {}'.format(tolerance) 284 | cv2_im = cv2.putText(cv2_im, str_tol, (10, height-8),font, 0.55, (150, 150, 255), 2) 285 | 286 | x_dev=arr_track_data[2] 287 | str_x='X: {}'.format(x_dev) 288 | if(abs(x_dev)0.9): 297 | color_y=(0,255,0) 298 | else: 299 | color_y=(0,0,255) 300 | cv2_im = cv2.putText(cv2_im, str_y, (220, height-8),font, 0.55, color_y, 2) 301 | 302 | #write command, tracking status and speed 303 | cmd=arr_track_data[4] 304 | cv2_im = cv2.putText(cv2_im, str(cmd), (int(width/2) + 10, height-8),font, 0.68, (0, 255, 255), 2) 305 | 306 | delay1=arr_track_data[5] 307 | str_sp='Speed: {}%'.format(round(delay1/(0.1)*100,1)) 308 | cv2_im = cv2.putText(cv2_im, str_sp, (int(width/2) + 185, height-8),font, 0.55, (150, 150, 255), 2) 309 | 310 | if(cmd==0): 311 | str1="No object" 312 | elif(cmd=='Stop'): 313 | str1='Acquired' 314 | else: 315 | str1='Tracking' 316 | cv2_im = cv2.putText(cv2_im, str1, (width-140, 18),font, 0.7, (0, 255, 255), 2) 317 | 318 | #draw center cross lines 319 | cv2_im = cv2.rectangle(cv2_im, (0,int(height/2)-1), (width, int(height/2)+1), (255,0,0), -1) 320 | cv2_im = cv2.rectangle(cv2_im, (int(width/2)-1,0), (int(width/2)+1,height), (255,0,0), -1) 321 | 322 | #draw the center red dot on the object 323 | cv2_im = cv2.circle(cv2_im, (int(arr_track_data[0]*width),int(arr_track_data[1]*height)), 7, (0,0,255), -1) 324 | 325 | #draw the tolerance box 326 | cv2_im = cv2.rectangle(cv2_im, (int(width/2-tolerance*width),0), (int(width/2+tolerance*width),height), (0,255,0), 2) 327 | 328 | for obj in objs: 329 | x0, y0, x1, y1 = list(obj.bbox) 330 | x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height) 331 | percent = int(100 * obj.score) 332 | 333 | box_color, text_color, thickness=(0,150,255), (0,255,0),1 334 | 335 | 336 | text3 = '{}% {}'.format(percent, labels.get(obj.id, obj.id)) 337 | 338 | if(labels.get(obj.id, obj.id)=="person"): 339 | cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), box_color, thickness) 340 | cv2_im = cv2.putText(cv2_im, text3, (x0, y1-5),font, 0.5, text_color, thickness) 341 | 342 | return cv2_im 343 | 344 | if __name__ == '__main__': 345 | app.run(host='0.0.0.0', port=2204, threaded=True) # Run FLASK 346 | main() 347 | -------------------------------------------------------------------------------- /earthrover/human_following/human_follower2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Human Following 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | The code in this file is same as 'human_follower.py' file. However, code with respect to FLASK implementation has been removed. 8 | So there is no streaming of camera view. This is bare minimum human following robot. 9 | """ 10 | 11 | import common as cm 12 | import cv2 13 | import numpy as np 14 | from PIL import Image 15 | import time 16 | from threading import Thread 17 | 18 | import sys 19 | sys.path.insert(0, '/var/www/html/earthrover') 20 | import util as ut 21 | ut.init_gpio() 22 | 23 | cap = cv2.VideoCapture(0) 24 | threshold=0.2 25 | top_k=5 #number of objects to be shown as detected 26 | edgetpu=1 27 | 28 | model_dir = '/var/www/html/all_models' 29 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 30 | lbl = 'coco_labels.txt' 31 | 32 | tolerance=0.1 33 | x_deviation=0 34 | y_max=0 35 | 36 | object_to_track='person' 37 | 38 | #-----initialise motor speed----------------------------------- 39 | 40 | import RPi.GPIO as GPIO 41 | GPIO.setmode(GPIO.BCM) # choose BCM numbering scheme 42 | 43 | GPIO.setup(20, GPIO.OUT)# set GPIO 20 as output pin 44 | GPIO.setup(21, GPIO.OUT)# set GPIO 21 as output pin 45 | 46 | pin20 = GPIO.PWM(20, 100) # create object pin20 for PWM on port 20 at 100 Hertz 47 | pin21 = GPIO.PWM(21, 100) # create object pin21 for PWM on port 21 at 100 Hertz 48 | 49 | val=100 # maximum speed 50 | pin20.start(val) # start pin20 on 0 percent duty cycle (off) 51 | pin21.start(val) # start pin21 on 0 percent duty cycle (off) 52 | 53 | print("speed set to: ", val) 54 | #------------------------------------------ 55 | 56 | def track_object(objs,labels): 57 | 58 | #global delay 59 | global x_deviation, y_max, tolerance 60 | 61 | 62 | if(len(objs)==0): 63 | print("no objects to track") 64 | ut.stop() 65 | ut.red_light("OFF") 66 | return 67 | 68 | flag=0 69 | for obj in objs: 70 | lbl=labels.get(obj.id, obj.id) 71 | if (lbl==object_to_track): 72 | x_min, y_min, x_max, y_max = list(obj.bbox) 73 | flag=1 74 | break 75 | 76 | #print(x_min, y_min, x_max, y_max) 77 | if(flag==0): 78 | print("selected object no present") 79 | return 80 | 81 | x_diff=x_max-x_min 82 | y_diff=y_max-y_min 83 | 84 | obj_x_center=x_min+(x_diff/2) 85 | obj_x_center=round(obj_x_center,3) 86 | 87 | obj_y_center=y_min+(y_diff/2) 88 | obj_y_center=round(obj_y_center,3) 89 | 90 | x_deviation=round(0.5-obj_x_center,3) 91 | y_max=round(y_max,3) 92 | 93 | print("{",x_deviation,y_max,"}") 94 | 95 | thread = Thread(target = move_robot) 96 | thread.start() 97 | 98 | 99 | def move_robot(): 100 | global x_deviation, y_max, tolerance 101 | 102 | y=1-y_max #distance from bottom of the frame 103 | 104 | if(abs(x_deviation)=tolerance): 119 | delay1=get_delay(x_deviation) 120 | 121 | ut.left() 122 | time.sleep(delay1) 123 | ut.stop() 124 | print("moving robot ...Left....<<<<<<<<<<") 125 | 126 | 127 | if(x_deviation<=-1*tolerance): 128 | delay1=get_delay(x_deviation) 129 | 130 | ut.right() 131 | time.sleep(delay1) 132 | ut.stop() 133 | print("moving robot ...Right....>>>>>>>>") 134 | 135 | 136 | def get_delay(deviation): 137 | 138 | deviation=abs(deviation) 139 | 140 | if(deviation>=0.4): 141 | d=0.080 142 | elif(deviation>=0.35 and deviation<0.40): 143 | d=0.060 144 | elif(deviation>=0.20 and deviation<0.35): 145 | d=0.050 146 | else: 147 | d=0.040 148 | 149 | return d 150 | 151 | def main(): 152 | 153 | interpreter, labels =cm.load_model(model_dir,model_edgetpu,lbl,edgetpu) 154 | 155 | fps=1 156 | 157 | while True: 158 | start_time=time.time() 159 | 160 | #----------------Capture Camera Frame----------------- 161 | ret, frame = cap.read() 162 | if not ret: 163 | break 164 | 165 | cv2_im = frame 166 | cv2_im = cv2.flip(cv2_im, 0) 167 | cv2_im = cv2.flip(cv2_im, 1) 168 | 169 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 170 | pil_im = Image.fromarray(cv2_im_rgb) 171 | 172 | #-------------------Inference--------------------------------- 173 | cm.set_input(interpreter, pil_im) 174 | interpreter.invoke() 175 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 176 | 177 | #-----------------other------------------------------------ 178 | track_object(objs,labels)#tracking <<<<<<< 179 | 180 | fps = round(1.0 / (time.time() - start_time),1) 181 | print("*********FPS: ",fps,"************") 182 | 183 | cap.release() 184 | cv2.destroyAllWindows() 185 | 186 | if __name__ == '__main__': 187 | main() 188 | 189 | -------------------------------------------------------------------------------- /earthrover/human_following/master.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import time,os 5 | import sys 6 | 7 | local_path=os.path.dirname(os.path.realpath(__file__)) 8 | 9 | print ("local_path: ", local_path) 10 | 11 | status = sys.argv[1] 12 | 13 | file_name="human_follower.py" 14 | 15 | if (status=="1"): 16 | print "starting Object Detection script" 17 | cmd= "sudo python3 " + local_path + "/" + file_name + " &" 18 | print ("cmd: ", cmd) 19 | os.system(cmd) 20 | time.sleep(1) 21 | 22 | 23 | if (status=="0"): 24 | cmd= "sudo pkill -f " + file_name 25 | os.system(cmd) 26 | -------------------------------------------------------------------------------- /earthrover/human_following/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 |
6 | 7 | 8 | -------------------------------------------------------------------------------- /earthrover/human_following/web/ajax_master.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/image_classification/image_recog.py: -------------------------------------------------------------------------------- 1 | # python3 2 | # 3 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 4 | 5 | 6 | # Modified by: Jitesh Saini 7 | # Project: Earth Rover (Real Time Image classifiation) 8 | 9 | from tflite_runtime.interpreter import Interpreter 10 | import numpy as np 11 | import matplotlib.pyplot as plt 12 | from PIL import Image 13 | 14 | import picamera 15 | from picamera import PiCamera, Color 16 | from time import sleep 17 | 18 | def scale_image(frame, new_size=(224, 224)): 19 | # Get the dimensions 20 | height, width, _ = frame.shape # Image shape 21 | new_width, new_height = new_size # Target shape 22 | 23 | # Calculate the target image coordinates 24 | left = (width - new_width) // 2 25 | top = (height - new_height) // 2 26 | right = (width + new_width) // 2 27 | bottom = (height + new_height) // 2 28 | 29 | #print("left:", left) 30 | #print("right:", right) 31 | #print("top:", top) 32 | #print("bottom:", bottom) 33 | 34 | image = frame[left: right, top: bottom, :] 35 | return image 36 | 37 | def move_back(): 38 | ut.back() 39 | sleep(3) 40 | ut.stop() 41 | 42 | def move_forward(): 43 | ut.forward() 44 | sleep(3) 45 | ut.stop() 46 | 47 | def action(pred,lbl): 48 | #print("max_prediction: ", pred) 49 | #print("max_Label: ", lbl) 50 | 51 | if (pred < threshold): 52 | camera.annotate_text = "___" 53 | ut.camera_light("OFF") 54 | 55 | if (pred >= threshold): 56 | percent=round(pred*100) 57 | txt= "Saw a " + lbl + ", i am " + str(percent) + "% sure" 58 | camera.annotate_text = txt 59 | 60 | ut.speak_tts(lbl,"f") 61 | sleep(0.3) 62 | 63 | if (pred >= threshold and lbl=="mouse"): 64 | print(lbl) 65 | ut.camera_light("ON") 66 | move_back() 67 | 68 | if (pred >= threshold and lbl=="tennis ball"): 69 | print(lbl) 70 | ut.camera_light("ON") 71 | move_forward() 72 | 73 | 74 | #----initialise GPIO---------------------------- 75 | import sys 76 | sys.path.insert(0, '/var/www/html/earthrover') 77 | import util as ut 78 | ut.init_gpio() 79 | #----------------------------------------------- 80 | 81 | #-----initialise the Model and Load into interpreter------------------------- 82 | 83 | #specify the path of Model and Label file 84 | model_path = "/var/www/html/all_models/mobilenet_v1_1.0_224_quant.tflite" 85 | label_path = "/var/www/html/all_models/labels_mobilenet_quant_v1_224.txt" 86 | top_k_results = 3 87 | 88 | with open(label_path, 'r') as f: 89 | labels = list(map(str.strip, f.readlines())) 90 | 91 | # Load TFLite model and allocate tensors 92 | interpreter = Interpreter(model_path=model_path) 93 | interpreter.allocate_tensors() 94 | 95 | # Get input and output tensors. 96 | input_details = interpreter.get_input_details() 97 | output_details = interpreter.get_output_details() 98 | 99 | ## Get input size 100 | input_shape = input_details[0]['shape'] 101 | #print(input_shape) 102 | size = input_shape[:2] if len(input_shape) == 3 else input_shape[1:3] 103 | #print(size) 104 | 105 | #prediction threshold for triggering actions 106 | threshold=0.5 107 | #----------------------------------------------------------- 108 | 109 | #-------Window to display camera view--------------------- 110 | plt.ion() 111 | plt.tight_layout() 112 | 113 | fig = plt.gcf() 114 | fig.canvas.set_window_title('TensorFlow Lite') 115 | fig.suptitle('Earth Rover: Image Classification') 116 | ax = plt.gca() 117 | ax.set_axis_off() 118 | tmp = np.zeros([480,640] + [3], np.uint8) 119 | preview = ax.imshow(tmp) 120 | #--------------------------------------------------------- 121 | 122 | with picamera.PiCamera() as camera: 123 | camera.framerate = 30 124 | camera.resolution = (640, 480) 125 | camera.annotate_foreground = Color('black') 126 | 127 | #loop continuously (press control + 'c' to exit program) 128 | while True: 129 | stream = np.empty((480, 640, 3), dtype=np.uint8) 130 | camera.capture(stream, 'rgb') 131 | 132 | img = scale_image(stream) 133 | 134 | # Add a batch dimension 135 | input_data = np.expand_dims(img, axis=0) 136 | #print(input_data) 137 | 138 | # feed data to input tensor and run the interpreter 139 | interpreter.set_tensor(input_details[0]['index'], input_data) 140 | interpreter.invoke() 141 | 142 | # Obtain results and map them to the classes 143 | predictions = interpreter.get_tensor(output_details[0]['index'])[0] 144 | 145 | # Get indices of the top k results 146 | top_k_indices = np.argsort(predictions)[::-1][:top_k_results] 147 | 148 | 149 | for i in range(top_k_results): 150 | pred=predictions[top_k_indices[i]]/255.0 151 | pred=round(pred,2) 152 | lbl=labels[top_k_indices[i]] 153 | print(lbl, "=", pred) 154 | 155 | print("-----------------------------------") 156 | 157 | pred_max=predictions[top_k_indices[0]]/255.0 158 | lbl_max=labels[top_k_indices[0]] 159 | 160 | #take action based on maximum prediction value 161 | action(pred_max,lbl_max) 162 | 163 | #update the window of camera view 164 | preview.set_data(stream) 165 | fig.canvas.get_tk_widget().update() 166 | 167 | camera.close() 168 | -------------------------------------------------------------------------------- /earthrover/image_classification/image_recog_cv2.py: -------------------------------------------------------------------------------- 1 | # Example code provided by TensorFlow Authors have been useful in creating this project. 2 | # Author: Jitesh Saini 3 | # Project: Earth Rover (Real Time Image classifiation) 4 | 5 | from tflite_runtime.interpreter import Interpreter 6 | import numpy as np 7 | from PIL import Image 8 | from time import sleep 9 | import cv2 10 | import os 11 | 12 | cap = cv2.VideoCapture(0) 13 | 14 | font=cv2.FONT_HERSHEY_SIMPLEX 15 | text_overlay="" 16 | 17 | 18 | #---------Flask---------------------------------------- 19 | from flask import Flask, Response 20 | from flask import render_template 21 | 22 | app = Flask(__name__) 23 | 24 | @app.route('/') 25 | def index(): 26 | #return "Default Message" 27 | return render_template("index1.html") 28 | 29 | @app.route('/video_feed') 30 | def video_feed(): 31 | #global cap 32 | return Response(main(), 33 | mimetype='multipart/x-mixed-replace; boundary=frame') 34 | 35 | #------------------------------------------------------------- 36 | 37 | #-----initialise the Model and Load into interpreter------------------------- 38 | 39 | #specify the path of Model and Label file 40 | model_path = "/var/www/html/all_models/mobilenet_v1_1.0_224_quant.tflite" 41 | label_path = "/var/www/html/all_models/labels_mobilenet_quant_v1_224.txt" 42 | top_k_results = 3 43 | 44 | with open(label_path, 'r') as f: 45 | labels = list(map(str.strip, f.readlines())) 46 | 47 | # Load TFLite model and allocate tensors 48 | interpreter = Interpreter(model_path=model_path) 49 | interpreter.allocate_tensors() 50 | 51 | # Get input and output tensors. 52 | input_details = interpreter.get_input_details() 53 | output_details = interpreter.get_output_details() 54 | 55 | threshold=0.35 56 | #----------------------------------------------------------- 57 | 58 | #----initialise GPIO---------------------------- 59 | import sys 60 | sys.path.insert(0, '/var/www/html/earthrover') 61 | import util as ut 62 | ut.init_gpio() 63 | #----------------------------------------------- 64 | 65 | def action(pred,lbl): 66 | global text_overlay 67 | 68 | if (pred < threshold): 69 | text_overlay = "__" 70 | ut.camera_light("OFF") 71 | 72 | if (pred >= threshold): 73 | percent=round(pred*100) 74 | text_overlay= "Saw a " + lbl + ", i am " + str(percent) + "% sure" 75 | #ut.speak_tts(lbl,"f") 76 | #sleep(1) 77 | #text_to_speech(lbl,"f") 78 | ut.camera_light("ON") 79 | 80 | def input_image_size(interpreter): 81 | """Returns input image size as (width, height, channels) tuple.""" 82 | _, height, width, channels = interpreter.get_input_details()[0]['shape'] 83 | return width, height, channels 84 | 85 | def main(): 86 | while True: 87 | 88 | ret, frame = cap.read() 89 | if not ret: 90 | break 91 | 92 | cv2_im = frame 93 | cv2_im = cv2.flip(cv2_im, 0) 94 | cv2_im = cv2.flip(cv2_im, 1) 95 | 96 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 97 | pil_im = Image.fromarray(cv2_im_rgb) 98 | 99 | image = pil_im.resize((input_image_size(interpreter)[0:2]), Image.NEAREST) 100 | 101 | # Add a batch dimension 102 | input_data = np.expand_dims(image, axis=0) 103 | 104 | 105 | #print(input_data) 106 | 107 | # feed data to input tensor and run the interpreter 108 | interpreter.set_tensor(input_details[0]['index'], input_data) 109 | interpreter.invoke() 110 | 111 | # Obtain results and map them to the classes 112 | predictions = interpreter.get_tensor(output_details[0]['index'])[0] 113 | 114 | # Get indices of the top k results 115 | top_k_indices = np.argsort(predictions)[::-1][:top_k_results] 116 | 117 | j=0 118 | for i in range(top_k_results): 119 | pred=predictions[top_k_indices[i]]/255.0 120 | pred=round(pred,2) 121 | lbl=labels[top_k_indices[i]] 122 | print(lbl, "=", pred) 123 | 124 | txt1=lbl + "(" + str(pred) + ")" 125 | cv2_im = cv2.rectangle(cv2_im, (25,45 + j*35), (160, 65 + j*35), (0,0,0), -1) 126 | cv2_im = cv2.putText(cv2_im, txt1, (30, 60 + j*35),font, 0.5, (255, 255, 255), 1) 127 | j=j+1 128 | 129 | pred_max=predictions[top_k_indices[0]]/255.0 130 | lbl_max=labels[top_k_indices[0]] 131 | 132 | #print(lbl_max, "=", pred_max) 133 | 134 | #take action based on maximum prediction value 135 | action(pred_max,lbl_max) 136 | if cv2.waitKey(1) & 0xFF == ord('q'): 137 | break 138 | 139 | #cv2.imshow('Real-time Image Classification', cv2_im) 140 | cv2_im = cv2.putText(cv2_im, text_overlay, (60, 30),font, 0.8, (0, 0, 255), 2) 141 | 142 | ret, jpeg = cv2.imencode('.jpg', cv2_im) 143 | pic = jpeg.tobytes() 144 | 145 | #Flask streaming 146 | yield (b'--frame\r\n' 147 | b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') 148 | 149 | print("-----------------------------------") 150 | 151 | #sleep(0.5) 152 | 153 | cap.release() 154 | cv2.destroyAllWindows() 155 | 156 | if __name__ == '__main__': 157 | app.run(host='0.0.0.0', port=2204, threaded=True) # Run FLASK 158 | main() 159 | -------------------------------------------------------------------------------- /earthrover/image_classification/master.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Project: AI Robot - Real-time Image Classification 3 | Author: Jitesh Saini 4 | ''' 5 | 6 | import time,os 7 | import sys 8 | 9 | local_path=os.path.dirname(os.path.realpath(__file__)) 10 | 11 | #print ("local_path: ", local_path) 12 | 13 | status = sys.argv[1] 14 | 15 | file_name="image_recog_cv2.py" 16 | 17 | if (status=="1"): 18 | cmd= "sudo python3 " + local_path + "/" + file_name + " &" 19 | print ("cmd: ", cmd) 20 | os.system(cmd) 21 | time.sleep(1) 22 | 23 | 24 | if (status=="0"): 25 | cmd= "sudo pkill -f " + file_name 26 | os.system(cmd) 27 | -------------------------------------------------------------------------------- /earthrover/image_classification/templates/index1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Image Classification 4 | 5 | 6 |
7 |

Real-time Image Classification

8 | 9 |
10 | 11 | 12 | -------------------------------------------------------------------------------- /earthrover/image_classification/web/ajax_master.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/index.php: -------------------------------------------------------------------------------- 1 | 11 | -------------------------------------------------------------------------------- /earthrover/object_detection/README.md: -------------------------------------------------------------------------------- 1 | # AI Robot: Object Detection with TensorFlow Lite on Raspberry Pi & Live-Stream results on browser 2 | 3 |

4 | Read the article: 5 | 6 | Watch the video on Yotube: 7 | 8 | 9 | 10 |

11 | 12 |

13 | 14 |

15 | 16 | ## Code Files 17 | The ML model used in this project is placed in 'all_models' directory inside parent directory. 18 | 19 | 20 | ## Overview 21 | The code in this project is based on Google-Coral Object Detection example available at:-
22 | https://github.com/google-coral/examples-camera/tree/master/opencv 23 | 24 | A brief description of the files used in this project is mentioned below. 25 | 26 | 1. **object_detection.py** 27 | 28 | The file implements basic object detection using TensorFlow Lite API. Camera operation and generation of output window with text and figure overlays is carried out using OPENCV. 29 | Other customisations implemented are as follows:- 30 | - Object Detection with colour coded bounding boxes 31 | - Added information bar on top of the output window to show FPS, Processing duration and an Object Counter 32 | - Counter gets updated upon finding 'Person' in the frame 33 | 34 | 2. **object_detection_web1.py** 35 | 36 | This file covers all the features mentioned above. In addition, following features have been implemented:- 37 | - Integrated FLASK with the object detection code to stream the output window over LAN. 38 | - Created a directory named 'templates' and placed a html file named 'index1.html' in it. This html file is responsible for displaying the output streamed through FLASK. It can be viewd by typing the IP address of Raspberry Pi follwed by the port as mentioned in the code. 39 | 40 | 3. **object_detection_web2.py** 41 | 42 | This file covers all the features mentioned above. In addition, following features have been implemented:- 43 | 44 | - The object name to be monitored is supplied through Web GUI for updating the Object Counter during run time. 45 | - The code for Web GUI is present in 'web' directory. 46 | 47 | 4. **common1.py** 48 | 49 | This is a utility file which is imported in all the above three files. It contains functions to load a model, making interpreter, generating overlays on output window etc 50 | 51 | -------------------------------------------------------------------------------- /earthrover/object_detection/common1.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file has utility functions which are used in the following three files:- 3 | 1. object_detection.py 4 | 2. object_detection_web1.py 5 | 3. object_detection_web2.py 6 | 7 | This file is imported in all the above three files. 8 | 9 | This code is based on Google-Coral Object Detection example code available at: 10 | https://github.com/google-coral/examples-camera/tree/master/opencv 11 | 12 | """ 13 | import numpy as np 14 | from PIL import Image 15 | import tflite_runtime.interpreter as tflite 16 | import platform 17 | 18 | 19 | EDGETPU_SHARED_LIB = { 20 | 'Linux': 'libedgetpu.so.1', 21 | 'Darwin': 'libedgetpu.1.dylib', 22 | 'Windows': 'edgetpu.dll' 23 | }[platform.system()] 24 | 25 | def make_interpreter_0(model_file): 26 | model_file, *device = model_file.split('@') 27 | return tflite.Interpreter(model_path=model_file) 28 | 29 | def make_interpreter_1(model_file): 30 | model_file, *device = model_file.split('@') 31 | return tflite.Interpreter( 32 | model_path=model_file, 33 | experimental_delegates=[ 34 | tflite.load_delegate(EDGETPU_SHARED_LIB, 35 | {'device': device[0]} if device else {}) 36 | ]) 37 | 38 | def set_input(interpreter, image, resample=Image.NEAREST): 39 | """Copies data to input tensor.""" 40 | image = image.resize((input_image_size(interpreter)[0:2]), resample) 41 | input_tensor(interpreter)[:, :] = image 42 | 43 | def input_image_size(interpreter): 44 | """Returns input image size as (width, height, channels) tuple.""" 45 | _, height, width, channels = interpreter.get_input_details()[0]['shape'] 46 | return width, height, channels 47 | 48 | def input_tensor(interpreter): 49 | """Returns input tensor view as numpy array of shape (height, width, 3).""" 50 | tensor_index = interpreter.get_input_details()[0]['index'] 51 | return interpreter.tensor(tensor_index)()[0] 52 | 53 | def output_tensor(interpreter, i): 54 | """Returns dequantized output tensor if quantized before.""" 55 | output_details = interpreter.get_output_details()[i] 56 | output_data = np.squeeze(interpreter.tensor(output_details['index'])()) 57 | if 'quantization' not in output_details: 58 | return output_data 59 | scale, zero_point = output_details['quantization'] 60 | if scale == 0: 61 | return output_data - zero_point 62 | return scale * (output_data - zero_point) 63 | 64 | import time 65 | def time_elapsed(start_time,event): 66 | time_now=time.time() 67 | duration = (time_now - start_time)*1000 68 | duration=round(duration,2) 69 | print (">>> ", duration, " ms (" ,event, ")") 70 | 71 | import os 72 | def load_model(model_dir,model, lbl, edgetpu): 73 | 74 | print('Loading from directory: {} '.format(model_dir)) 75 | print('Loading Model: {} '.format(model)) 76 | print('Loading Labels: {} '.format(lbl)) 77 | 78 | model_path=os.path.join(model_dir,model) 79 | labels_path=os.path.join(model_dir,lbl) 80 | 81 | if(edgetpu==0): 82 | interpreter = make_interpreter_0(model_path) 83 | else: 84 | interpreter = make_interpreter_1(model_path) 85 | 86 | interpreter.allocate_tensors() 87 | 88 | labels = load_labels(labels_path) 89 | 90 | return interpreter, labels 91 | 92 | import re 93 | def load_labels(path): 94 | p = re.compile(r'\s*(\d+)(.+)') 95 | with open(path, 'r', encoding='utf-8') as f: 96 | lines = (p.match(line).groups() for line in f.readlines()) 97 | return {int(num): text.strip() for num, text in lines} 98 | 99 | #---------------------------------------------------------------------- 100 | import collections 101 | Object = collections.namedtuple('Object', ['id', 'score', 'bbox']) 102 | 103 | class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])): 104 | """Bounding box. 105 | Represents a rectangle which sides are either vertical or horizontal, parallel 106 | to the x or y axis. 107 | """ 108 | __slots__ = () 109 | 110 | def get_output(interpreter, score_threshold, top_k, image_scale=1.0): 111 | """Returns list of detected objects.""" 112 | boxes = output_tensor(interpreter, 0) 113 | class_ids = output_tensor(interpreter, 1) 114 | scores = output_tensor(interpreter, 2) 115 | count = int(output_tensor(interpreter, 3)) 116 | 117 | def make(i): 118 | ymin, xmin, ymax, xmax = boxes[i] 119 | return Object( 120 | id=int(class_ids[i]), 121 | score=scores[i], 122 | bbox=BBox(xmin=np.maximum(0.0, xmin), 123 | ymin=np.maximum(0.0, ymin), 124 | xmax=np.minimum(1.0, xmax), 125 | ymax=np.minimum(1.0, ymax))) 126 | 127 | return [make(i) for i in range(top_k) if scores[i] >= score_threshold] 128 | #-------------------------------------------------------------------- 129 | 130 | import cv2 131 | 132 | def append_text_img1(cv2_im, objs, labels, arr_dur, counter, selected_obj): 133 | height, width, channels = cv2_im.shape 134 | font=cv2.FONT_HERSHEY_SIMPLEX 135 | 136 | cam=round(arr_dur[0]*1000,0) 137 | inference=round(arr_dur[1]*1000,0) 138 | other=round(arr_dur[2]*1000,0) 139 | 140 | #total_duration=arr_dur[0] + arr_dur[1] + arr_dur[2] 141 | total_duration=cam+inference+other 142 | 143 | fps=round(1000/total_duration,1) 144 | 145 | cv2_im = cv2.rectangle(cv2_im, (0,0), (width, 24), (0,0,0), -1) 146 | 147 | text1 = 'FPS: {}'.format(fps) 148 | cv2_im = cv2.putText(cv2_im, text1, (10, 20),font, 0.7, (0, 0, 255), 2) 149 | 150 | #text_dur = 'Camera: {}ms, Inference {}ms, other {}ms'.format(arr_dur[0]*1000,arr_dur[1]*1000,arr_dur[2]*1000) 151 | text_dur = 'Camera: {}ms Inference: {}ms other: {}ms'.format(cam,inference,other) 152 | 153 | cv2_im = cv2.putText(cv2_im, text_dur, (int(width/4)-30, 16),font, 0.4, (255, 255, 255), 1) 154 | 155 | 156 | #object_name="person" 157 | text2 = selected_obj + ': {}'.format(counter) 158 | cv2_im = cv2.putText(cv2_im, text2, (width-140, 20),font, 0.6, (0, 255, 0), 2) 159 | 160 | for obj in objs: 161 | x0, y0, x1, y1 = list(obj.bbox) 162 | x0, y0, x1, y1 = int(x0*width), int(y0*height), int(x1*width), int(y1*height) 163 | percent = int(100 * obj.score) 164 | 165 | if (percent>=60): 166 | box_color, text_color, thickness=(0,255,0), (0,255,0),2 167 | elif (percent<60 and percent>40): 168 | box_color, text_color, thickness=(0,0,255), (0,0,255),2 169 | else: 170 | box_color, text_color, thickness=(255,0,0), (255,0,0),1 171 | 172 | 173 | text3 = '{}% {}'.format(percent, labels.get(obj.id, obj.id)) 174 | 175 | cv2_im = cv2.rectangle(cv2_im, (x0, y0), (x1, y1), box_color, thickness) 176 | cv2_im = cv2.putText(cv2_im, text3, (x0, y1-5),font, 0.5, text_color, thickness) 177 | 178 | return cv2_im 179 | -------------------------------------------------------------------------------- /earthrover/object_detection/master.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Project: AI Robot - Object Detection 3 | Author: Jitesh Saini 4 | ''' 5 | 6 | import time,os 7 | import sys 8 | 9 | local_path=os.path.dirname(os.path.realpath(__file__)) 10 | 11 | print ("local_path: ", local_path) 12 | 13 | status = sys.argv[1] 14 | 15 | file_name="object_detection_web2.py" 16 | 17 | if (status=="1"): 18 | print "starting Object Detection script" 19 | cmd= "sudo python3 " + local_path + "/" + file_name + " &" 20 | print ("cmd: ", cmd) 21 | os.system(cmd) 22 | time.sleep(1) 23 | 24 | 25 | if (status=="0"): 26 | cmd= "sudo pkill -f " + file_name 27 | os.system(cmd) 28 | -------------------------------------------------------------------------------- /earthrover/object_detection/object_detection.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Object Detection 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | The code does following:- 8 | - The robot uses PiCamera to capture frames. 9 | - An object within the frame is detected using Machine Learning moldel & TensorFlow Lite interpreter. 10 | - Using OpenCV, the frame is overlayed with information such as: color coded bounding boxes, information bar to show FPS, Processing durations and an Object Counter. 11 | - Display the output window (camera view with overlays) locally on Raspberry Pi 12 | 13 | """ 14 | 15 | import common1 as cm 16 | import cv2 17 | import numpy as np 18 | from PIL import Image 19 | import time 20 | 21 | import sys 22 | sys.path.insert(0, '/var/www/html/earthrover') 23 | 24 | cap = cv2.VideoCapture(0) 25 | threshold=0.2 26 | top_k=10 #number of objects to be shown as detected 27 | edgetpu=0 28 | 29 | model_dir = '/var/www/html/all_models' 30 | model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite' 31 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 32 | lbl = 'coco_labels.txt' 33 | 34 | counter=0 35 | prev_val=0 36 | 37 | selected_obj="person" 38 | 39 | def show_selected_object_counter(objs,labels): 40 | global counter, prev_val, selected_obj 41 | arr=[] 42 | for obj in objs: 43 | #print(obj.id) 44 | label = labels.get(obj.id, obj.id) 45 | #print(label) 46 | arr.append(label) 47 | 48 | print("arr:",arr) 49 | 50 | x = arr.count(selected_obj) 51 | 52 | diff=x - prev_val 53 | 54 | print("diff:",diff) 55 | if(diff>0): 56 | counter=counter + diff 57 | 58 | prev_val = x 59 | 60 | print("counter:",counter) 61 | 62 | 63 | def main(): 64 | from util import edgetpu 65 | 66 | if (edgetpu==1): 67 | mdl = model_edgetpu 68 | else: 69 | mdl = model 70 | 71 | interpreter, labels =cm.load_model(model_dir,mdl,lbl,edgetpu) 72 | 73 | fps=1 74 | arr_dur=[0,0,0] 75 | #while cap.isOpened(): 76 | while True: 77 | start_time=time.time() 78 | 79 | #----------------Capture Camera Frame----------------- 80 | start_t0=time.time() 81 | ret, frame = cap.read() 82 | if not ret: 83 | break 84 | 85 | cv2_im = frame 86 | cv2_im = cv2.flip(cv2_im, 0) 87 | cv2_im = cv2.flip(cv2_im, 1) 88 | 89 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 90 | pil_im = Image.fromarray(cv2_im_rgb) 91 | 92 | arr_dur[0]=time.time() - start_t0 93 | cm.time_elapsed(start_t0,"camera capture") 94 | #---------------------------------------------------- 95 | 96 | #-------------------Inference--------------------------------- 97 | start_t1=time.time() 98 | cm.set_input(interpreter, pil_im) 99 | interpreter.invoke() 100 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 101 | 102 | arr_dur[1]=time.time() - start_t1 103 | cm.time_elapsed(start_t1,"inference") 104 | #---------------------------------------------------- 105 | 106 | #-----------------other------------------------------------ 107 | start_t2=time.time() 108 | show_selected_object_counter(objs,labels)#counter <<<<<<< 109 | 110 | if cv2.waitKey(1) & 0xFF == ord('q'): 111 | break 112 | 113 | cv2_im = cm.append_text_img1(cv2_im, objs, labels, arr_dur, counter,selected_obj) 114 | cv2.imshow('Object Detection - TensorFlow Lite', cv2_im) 115 | 116 | #time.sleep(0.5) 117 | 118 | arr_dur[2]=time.time() - start_t2 119 | cm.time_elapsed(start_t2,"other") 120 | cm.time_elapsed(start_time,"overall") 121 | 122 | print("arr_dur:",arr_dur) 123 | fps = round(1.0 / (time.time() - start_time),1) 124 | print("*********FPS: ",fps,"************") 125 | 126 | cap.release() 127 | cv2.destroyAllWindows() 128 | 129 | 130 | if __name__ == '__main__': 131 | main() 132 | -------------------------------------------------------------------------------- /earthrover/object_detection/object_detection_web1.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Object Detection 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | The code does following:- 8 | - The robot uses PiCamera to capture frames. 9 | - An object within the frame is detected using Machine Learning moldel & TensorFlow Lite interpreter. 10 | - Using OpenCV, the frame is overlayed with information such as: color coded bounding boxes, information bar to show FPS, Processing durations and an Object Counter. 11 | - Stream the output window (camera view with overlays) over LAN through FLASK. 12 | - The Flask stream can be accessed at "http://192.168.1.20:2204". IP '192.168.1.20' should be replaced with your RPi's IP 13 | 14 | """ 15 | 16 | 17 | import common1 as cm 18 | import cv2 19 | import numpy as np 20 | from PIL import Image 21 | import time 22 | 23 | import sys 24 | sys.path.insert(0, '/var/www/html/earthrover') 25 | 26 | cap = cv2.VideoCapture(0) 27 | threshold=0.2 28 | top_k=5 #number of objects to be shown as detected 29 | 30 | model_dir = '/var/www/html/all_models' 31 | model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite' 32 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 33 | lbl = 'coco_labels.txt' 34 | 35 | counter=0 36 | prev_val=0 37 | 38 | selected_obj="" 39 | 40 | #---------Flask---------------------------------------- 41 | from flask import Flask, Response 42 | from flask import render_template 43 | 44 | app = Flask(__name__) 45 | 46 | @app.route('/') 47 | def index(): 48 | #return "Default Message" 49 | return render_template("index1.html") 50 | 51 | @app.route('/video_feed') 52 | def video_feed(): 53 | #global cap 54 | return Response(main(), 55 | mimetype='multipart/x-mixed-replace; boundary=frame') 56 | 57 | #------------------------------------------------------------- 58 | 59 | def show_selected_object_counter(objs,labels): 60 | global counter, prev_val 61 | global selected_obj 62 | 63 | arr=[] 64 | for obj in objs: 65 | #print(obj.id) 66 | label = labels.get(obj.id, obj.id) 67 | #print(label) 68 | arr.append(label) 69 | 70 | print("arr:",arr) 71 | 72 | selected_obj="person" 73 | x = arr.count(selected_obj) 74 | diff=x - prev_val 75 | 76 | print("diff:",diff) 77 | if(diff>0): 78 | counter=counter + diff 79 | 80 | prev_val = x 81 | 82 | print("counter:",counter) 83 | 84 | 85 | def main(): 86 | from util import edgetpu 87 | 88 | if (edgetpu==1): 89 | mdl = model_edgetpu 90 | else: 91 | mdl = model 92 | 93 | interpreter, labels =cm.load_model(model_dir,mdl,lbl,edgetpu) 94 | 95 | fps=1 96 | arr_dur=[0,0,0] 97 | #while cap.isOpened(): 98 | while True: 99 | start_time=time.time() 100 | 101 | #----------------Capture Camera Frame----------------- 102 | start_t0=time.time() 103 | ret, frame = cap.read() 104 | if not ret: 105 | break 106 | 107 | cv2_im = frame 108 | cv2_im = cv2.flip(cv2_im, 0) 109 | cv2_im = cv2.flip(cv2_im, 1) 110 | 111 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 112 | pil_im = Image.fromarray(cv2_im_rgb) 113 | 114 | arr_dur[0]=time.time() - start_t0 115 | cm.time_elapsed(start_t0,"camera capture") 116 | #---------------------------------------------------- 117 | 118 | #-------------------Inference--------------------------------- 119 | start_t1=time.time() 120 | cm.set_input(interpreter, pil_im) 121 | interpreter.invoke() 122 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 123 | 124 | arr_dur[1]=time.time() - start_t1 125 | cm.time_elapsed(start_t1,"inference") 126 | #---------------------------------------------------- 127 | 128 | #-----------------other------------------------------------ 129 | start_t2=time.time() 130 | show_selected_object_counter(objs,labels)#counter <<<<<<< 131 | 132 | if cv2.waitKey(1) & 0xFF == ord('q'): 133 | break 134 | 135 | cv2_im = cm.append_text_img1(cv2_im, objs, labels, arr_dur, counter,selected_obj) 136 | #cv2.imshow('Object Detection - TensorFlow Lite', cv2_im) 137 | 138 | ret, jpeg = cv2.imencode('.jpg', cv2_im) 139 | pic = jpeg.tobytes() 140 | 141 | #Flask streaming 142 | yield (b'--frame\r\n' 143 | b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') 144 | 145 | arr_dur[2]=time.time() - start_t2 146 | cm.time_elapsed(start_t2,"other") 147 | cm.time_elapsed(start_time,"overall") 148 | 149 | print("arr_dur:",arr_dur) 150 | fps = round(1.0 / (time.time() - start_time),1) 151 | print("*********FPS: ",fps,"************") 152 | 153 | cap.release() 154 | cv2.destroyAllWindows() 155 | 156 | 157 | if __name__ == '__main__': 158 | app.run(host='0.0.0.0', port=2204, threaded=True) # Run FLASK 159 | main() 160 | -------------------------------------------------------------------------------- /earthrover/object_detection/object_detection_web2.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Object Detection 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | - The robot uses PiCamera to capture frames. 8 | - An object within the frame is detected using Machine Learning moldel & TensorFlow Lite interpreter. 9 | - Using OpenCV, the frame is overlayed with information such as: color coded bounding boxes, information bar to show FPS, Processing durations and an Object Counter. 10 | - The frame with overlays is streamed over LAN using FLASK. The Flask stream is embedded into a Web GUI which can be accessed at "http://192.168.1.20/earthrover/object_detection/web". IP '192.168.1.20' should be replaced with your RPi's IP 11 | - You can select an object through Web GUI to generate alarm on a specific object. 12 | - Google Coral USB Accelerator can be used to accelerate the inferencing process. 13 | 14 | When Coral USB Accelerator is connected, amend line 14 of util.py as:- 15 | edgetpu = 1 16 | 17 | When Coral USB Accelerator is not connected, amend line 14 of util.py as:- 18 | edgetpu = 0 19 | 20 | """ 21 | 22 | import common1 as cm 23 | import cv2 24 | import numpy as np 25 | from PIL import Image 26 | import time 27 | 28 | import sys 29 | sys.path.insert(0, '/var/www/html/earthrover') 30 | import util as ut 31 | ut.init_gpio() 32 | 33 | cap = cv2.VideoCapture(0) 34 | threshold=0.2 35 | top_k=5 #number of objects to be shown as detected 36 | 37 | model_dir = '/var/www/html/all_models' 38 | model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite' 39 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 40 | lbl = 'coco_labels.txt' 41 | 42 | counter=0 43 | prev_val=0 44 | 45 | file_path="/var/www/html/earthrover/object_detection/web/" 46 | selected_obj="" 47 | prev_val_obj="" 48 | 49 | #---------Flask---------------------------------------- 50 | from flask import Flask, Response 51 | from flask import render_template 52 | 53 | app = Flask(__name__) 54 | 55 | @app.route('/') 56 | def index(): 57 | #return "Default Message" 58 | return render_template("index2.html") 59 | 60 | @app.route('/video_feed') 61 | def video_feed(): 62 | #global cap 63 | return Response(main(), 64 | mimetype='multipart/x-mixed-replace; boundary=frame') 65 | 66 | #------------------------------------------------------------- 67 | 68 | 69 | def show_selected_object_counter(objs,labels): 70 | global counter, prev_val 71 | global file_path,selected_obj,prev_val_obj 72 | 73 | arr=[] 74 | for obj in objs: 75 | #print(obj.id) 76 | label = labels.get(obj.id, obj.id) 77 | #print(label) 78 | arr.append(label) 79 | 80 | print("arr:",arr) 81 | 82 | 83 | f0 = open(file_path + "object_cmd.txt", "r+") 84 | selected_obj = f0.read(20); 85 | f0.close() 86 | 87 | if(selected_obj!=prev_val_obj): 88 | counter=0 89 | 90 | prev_val_obj=selected_obj 91 | 92 | 93 | print("selected_obj: ",selected_obj) 94 | 95 | 96 | x = arr.count(selected_obj) #no of occurances of selected object in array of objects detected by the model 97 | f1 = open(file_path + "object_found.txt", "w") 98 | f1.write(str(x)) 99 | f1.close() 100 | 101 | if(x>0):#selected object present in frame. Make GPIO pin high 102 | ut.camera_light("ON") 103 | else:#selected object absent in frame. Make GPIO pin Low 104 | ut.camera_light("OFF") 105 | 106 | diff=x - prev_val #detect change in the no of occurances of selected object w.r.t previous frame 107 | 108 | print("diff:",diff) 109 | if(diff>0): #if there is an change then update counter 110 | counter=counter + diff 111 | 112 | prev_val = x 113 | 114 | print("counter:",counter) 115 | 116 | 117 | def main(): 118 | from util import edgetpu 119 | 120 | if (edgetpu==1): 121 | mdl = model_edgetpu 122 | else: 123 | mdl = model 124 | 125 | interpreter, labels =cm.load_model(model_dir,mdl,lbl,edgetpu) 126 | 127 | fps=1 128 | arr_dur=[0,0,0] 129 | #while cap.isOpened(): 130 | while True: 131 | start_time=time.time() 132 | 133 | #----------------Capture Camera Frame----------------- 134 | start_t0=time.time() 135 | ret, frame = cap.read() 136 | if not ret: 137 | break 138 | 139 | cv2_im = frame 140 | cv2_im = cv2.flip(cv2_im, 0) 141 | cv2_im = cv2.flip(cv2_im, 1) 142 | 143 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 144 | pil_im = Image.fromarray(cv2_im_rgb) 145 | 146 | arr_dur[0]=time.time() - start_t0 147 | cm.time_elapsed(start_t0,"camera capture") 148 | #---------------------------------------------------- 149 | 150 | #-------------------Inference--------------------------------- 151 | start_t1=time.time() 152 | cm.set_input(interpreter, pil_im) 153 | interpreter.invoke() 154 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 155 | 156 | arr_dur[1]=time.time() - start_t1 157 | cm.time_elapsed(start_t1,"inference") 158 | #---------------------------------------------------- 159 | 160 | #-----------------other------------------------------------ 161 | start_t2=time.time() 162 | show_selected_object_counter(objs,labels)#counter <<<<<<< 163 | 164 | if cv2.waitKey(1) & 0xFF == ord('q'): 165 | break 166 | 167 | 168 | cv2_im = cm.append_text_img1(cv2_im, objs, labels, arr_dur, counter,selected_obj) 169 | #cv2.imshow('Object Detection - TensorFlow Lite', cv2_im) 170 | 171 | ret, jpeg = cv2.imencode('.jpg', cv2_im) 172 | pic = jpeg.tobytes() 173 | 174 | #Flask streaming 175 | yield (b'--frame\r\n' 176 | b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') 177 | 178 | arr_dur[2]=time.time() - start_t2 179 | cm.time_elapsed(start_t2,"other") 180 | cm.time_elapsed(start_time,"overall") 181 | 182 | print("arr_dur:",arr_dur) 183 | fps = round(1.0 / (time.time() - start_time),1) 184 | print("*********FPS: ",fps,"************") 185 | 186 | cap.release() 187 | cv2.destroyAllWindows() 188 | 189 | 190 | if __name__ == '__main__': 191 | app.run(host='0.0.0.0', port=2204, threaded=True) # Run FLASK 192 | main() 193 | -------------------------------------------------------------------------------- /earthrover/object_detection/templates/index1.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | FLASK Stream 4 | 5 | 6 |

FLASK Web Streaming

7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /earthrover/object_detection/templates/index2.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /earthrover/object_detection/web/ajax_master.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/object_detection/web/index.php: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | Object detection 8 | 55 | 56 | 104 | 105 | 106 | ";//------------------------ 114 | echo"

Object Detection with TensorFlow Lite

"; 115 | //echo""; 116 | echo"
";//------------------------ 117 | echo""; 118 | echo"
"; 119 | 120 | echo"
";//------------------------ 121 | echo"

Select Object to Monitor

"; 122 | 123 | echo""; 124 | 125 | echo""; 126 | 127 | echo""; 128 | 129 | echo""; 130 | 131 | echo""; 132 | 133 | echo""; 134 | 135 | echo""; 136 | 137 | echo""; 138 | 139 | 140 | //echo""; 141 | 142 | echo"
"; 143 | 144 | echo"
";//------------------------ 145 | echo""; 146 | echo"
"; 147 | 148 | echo"
"; 149 | 150 | ?> 151 | 152 | 153 | 154 | -------------------------------------------------------------------------------- /earthrover/object_detection/web/object_cmd.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/object_detection/web/object_cmd.txt: -------------------------------------------------------------------------------- 1 | person -------------------------------------------------------------------------------- /earthrover/object_detection/web/object_found.php: -------------------------------------------------------------------------------- 1 | 0){ 11 | //echo"
writing 0"; 12 | $myfile = fopen("object_found.txt", "w") or die("Unable to open file!"); 13 | fwrite($myfile,"0"); 14 | fclose($myfile); 15 | 16 | } 17 | */ 18 | ?> 19 | -------------------------------------------------------------------------------- /earthrover/object_detection/web/object_found.txt: -------------------------------------------------------------------------------- 1 | 0 -------------------------------------------------------------------------------- /earthrover/object_tracking/README.md: -------------------------------------------------------------------------------- 1 | # Object Tracking AI-Robot 2 | 3 |

4 | Read the article: 5 | 6 | Watch the video on Yotube: 7 | 8 | 9 | 10 |

11 | 12 |

13 | 14 |

15 | 16 | ## Model files 17 | The ML model used in this project is placed in 'all_models' directory inside parent directory. 18 | 19 | ## Overview of the Project 20 | Robot detects an object using a Machine Learning model 'MobileNet SSD v1 (COCO)' and TensorFlow Lite interpreter. The Robot follows the object and manoeuvres itself to get the object in the center of frame. While the robot is tracking / following the object, working of tracking algorithm and Robot's view can be accessed on a browser. Robot's view with information overlay is generated using OpenCV. The various overlays on a frame are shown in the picture below 21 | 22 |

23 | 24 |

25 | 26 | 27 | When the object is present in the frame, information such as bounding boxes, center of the object, deviation of the object from center of the frame, robot direction and speed are updated as shown in picture below. In the below example, X and Y values denote the deviation of center of the object (the red dot) from center of the frame. Since the horizontal deviation i.e. value of 'X' is above the tolerance value, the code generated 'Move Left' command. 28 | 29 |

30 | 31 |

32 | 33 | 34 | Python's micro Web Framework called "FLASK" is used for streaming the camera frame (or Robot's view) over LAN. 35 | -------------------------------------------------------------------------------- /earthrover/object_tracking/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file has utility functions which are used in 'object_tracking.py' file 3 | 4 | This code is based on Google-Coral Object Detection example code available at: 5 | https://github.com/google-coral/examples-camera/tree/master/opencv 6 | 7 | """ 8 | import numpy as np 9 | from PIL import Image 10 | import tflite_runtime.interpreter as tflite 11 | import platform 12 | 13 | 14 | EDGETPU_SHARED_LIB = { 15 | 'Linux': 'libedgetpu.so.1', 16 | 'Darwin': 'libedgetpu.1.dylib', 17 | 'Windows': 'edgetpu.dll' 18 | }[platform.system()] 19 | 20 | def make_interpreter_0(model_file): 21 | model_file, *device = model_file.split('@') 22 | return tflite.Interpreter(model_path=model_file) 23 | 24 | def make_interpreter_1(model_file): 25 | model_file, *device = model_file.split('@') 26 | return tflite.Interpreter( 27 | model_path=model_file, 28 | experimental_delegates=[ 29 | tflite.load_delegate(EDGETPU_SHARED_LIB, 30 | {'device': device[0]} if device else {}) 31 | ]) 32 | 33 | def set_input(interpreter, image, resample=Image.NEAREST): 34 | """Copies data to input tensor.""" 35 | image = image.resize((input_image_size(interpreter)[0:2]), resample) 36 | input_tensor(interpreter)[:, :] = image 37 | 38 | def input_image_size(interpreter): 39 | """Returns input image size as (width, height, channels) tuple.""" 40 | _, height, width, channels = interpreter.get_input_details()[0]['shape'] 41 | return width, height, channels 42 | 43 | def input_tensor(interpreter): 44 | """Returns input tensor view as numpy array of shape (height, width, 3).""" 45 | tensor_index = interpreter.get_input_details()[0]['index'] 46 | return interpreter.tensor(tensor_index)()[0] 47 | 48 | def output_tensor(interpreter, i): 49 | """Returns dequantized output tensor if quantized before.""" 50 | output_details = interpreter.get_output_details()[i] 51 | output_data = np.squeeze(interpreter.tensor(output_details['index'])()) 52 | if 'quantization' not in output_details: 53 | return output_data 54 | scale, zero_point = output_details['quantization'] 55 | if scale == 0: 56 | return output_data - zero_point 57 | return scale * (output_data - zero_point) 58 | 59 | import time 60 | def time_elapsed(start_time,event): 61 | time_now=time.time() 62 | duration = (time_now - start_time)*1000 63 | duration=round(duration,2) 64 | print (">>> ", duration, " ms (" ,event, ")") 65 | 66 | import os 67 | def load_model(model_dir,model, lbl, edgetpu): 68 | 69 | print('Loading from directory: {} '.format(model_dir)) 70 | print('Loading Model: {} '.format(model)) 71 | print('Loading Labels: {} '.format(lbl)) 72 | 73 | model_path=os.path.join(model_dir,model) 74 | labels_path=os.path.join(model_dir,lbl) 75 | 76 | if(edgetpu==0): 77 | interpreter = make_interpreter_0(model_path) 78 | else: 79 | interpreter = make_interpreter_1(model_path) 80 | 81 | interpreter.allocate_tensors() 82 | 83 | labels = load_labels(labels_path) 84 | 85 | return interpreter, labels 86 | 87 | import re 88 | def load_labels(path): 89 | p = re.compile(r'\s*(\d+)(.+)') 90 | with open(path, 'r', encoding='utf-8') as f: 91 | lines = (p.match(line).groups() for line in f.readlines()) 92 | return {int(num): text.strip() for num, text in lines} 93 | 94 | #---------------------------------------------------------------------- 95 | import collections 96 | Object = collections.namedtuple('Object', ['id', 'score', 'bbox']) 97 | 98 | class BBox(collections.namedtuple('BBox', ['xmin', 'ymin', 'xmax', 'ymax'])): 99 | """Bounding box. 100 | Represents a rectangle which sides are either vertical or horizontal, parallel 101 | to the x or y axis. 102 | """ 103 | __slots__ = () 104 | 105 | def get_output(interpreter, score_threshold, top_k, image_scale=1.0): 106 | """Returns list of detected objects.""" 107 | boxes = output_tensor(interpreter, 0) 108 | class_ids = output_tensor(interpreter, 1) 109 | scores = output_tensor(interpreter, 2) 110 | count = int(output_tensor(interpreter, 3)) 111 | 112 | def make(i): 113 | ymin, xmin, ymax, xmax = boxes[i] 114 | return Object( 115 | id=int(class_ids[i]), 116 | score=scores[i], 117 | bbox=BBox(xmin=np.maximum(0.0, xmin), 118 | ymin=np.maximum(0.0, ymin), 119 | xmax=np.minimum(1.0, xmax), 120 | ymax=np.minimum(1.0, ymax))) 121 | 122 | return [make(i) for i in range(top_k) if scores[i] >= score_threshold] 123 | #-------------------------------------------------------------------- 124 | 125 | 126 | -------------------------------------------------------------------------------- /earthrover/object_tracking/master.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import time,os 5 | import sys 6 | 7 | local_path=os.path.dirname(os.path.realpath(__file__)) 8 | 9 | print ("local_path: ", local_path) 10 | 11 | status = sys.argv[1] 12 | 13 | file_name="object_tracking.py" 14 | 15 | if (status=="1"): 16 | print "starting Object Detection script" 17 | cmd= "sudo python3 " + local_path + "/" + file_name + " &" 18 | print ("cmd: ", cmd) 19 | os.system(cmd) 20 | time.sleep(1) 21 | 22 | 23 | if (status=="0"): 24 | cmd= "sudo pkill -f " + file_name 25 | os.system(cmd) 26 | -------------------------------------------------------------------------------- /earthrover/object_tracking/object_tracking.py: -------------------------------------------------------------------------------- 1 | """ 2 | Project: AI Robot - Object Tracking 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | - The robot uses PiCamera to capture frames. 8 | - An object within the frame is detected using Machine Learning moldel & TensorFlow Lite interpreter. 9 | - Using OpenCV, the frame is overlayed with information such as bounding boxes, center coordinates of the object, deviation of the object from center of the frame etc. 10 | - The frame with overlays is streamed over LAN using FLASK, which can be accessed using a browser by typing IP address of the RPi followed by the port (2204 as per this code) 11 | - Google Coral USB Accelerator should be used to accelerate the inferencing process. 12 | 13 | When Coral USB Accelerator is connected, amend line 14 of util.py as:- 14 | edgetpu = 1 15 | 16 | When Coral USB Accelerator is not connected, amend line 14 of util.py as:- 17 | edgetpu = 0 18 | 19 | The code moves the robot in order to bring center of the object closer to center of the frame. 20 | """ 21 | 22 | import common as cm 23 | import cv2 24 | import numpy as np 25 | from PIL import Image 26 | import time 27 | from threading import Thread 28 | 29 | import sys 30 | sys.path.insert(0, '/var/www/html/earthrover') 31 | import util as ut 32 | ut.init_gpio() 33 | 34 | cap = cv2.VideoCapture(0) 35 | threshold=0.2 36 | top_k=5 #number of objects to be shown as detected 37 | 38 | model_dir = '/var/www/html/all_models' 39 | model = 'mobilenet_ssd_v2_coco_quant_postprocess.tflite' 40 | model_edgetpu = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' 41 | lbl = 'coco_labels.txt' 42 | 43 | tolerance=0.1 44 | x_deviation=0 45 | y_deviation=0 46 | arr_track_data=[0,0,0,0,0,0] 47 | 48 | arr_valid_objects=['apple', 'sports ball', 'frisbee', 'orange', 'mouse', 'vase', 'banana' ] 49 | 50 | #---------Flask---------------------------------------- 51 | from flask import Flask, Response 52 | from flask import render_template 53 | 54 | app = Flask(__name__) 55 | 56 | @app.route('/') 57 | def index(): 58 | #return "Default Message" 59 | return render_template("index.html") 60 | 61 | @app.route('/video_feed') 62 | def video_feed(): 63 | #global cap 64 | return Response(main(), 65 | mimetype='multipart/x-mixed-replace; boundary=frame') 66 | 67 | #----------------------------------------------------------- 68 | 69 | 70 | #-----initialise motor speed----------------------------------- 71 | 72 | import RPi.GPIO as GPIO 73 | GPIO.setmode(GPIO.BCM) # choose BCM numbering scheme 74 | 75 | GPIO.setup(20, GPIO.OUT)# set GPIO 20 as output pin 76 | GPIO.setup(21, GPIO.OUT)# set GPIO 21 as output pin 77 | 78 | pin20 = GPIO.PWM(20, 100) # create object pin20 for PWM on port 20 at 100 Hertz 79 | pin21 = GPIO.PWM(21, 100) # create object pin21 for PWM on port 21 at 100 Hertz 80 | 81 | #set speed to maximum value 82 | val=100 83 | pin20.start(val) # start pin20 on 0 percent duty cycle (off) 84 | pin21.start(val) # start pin21 on 0 percent duty cycle (off) 85 | 86 | print("speed set to: ", val) 87 | #--------------------------------------------------------------- 88 | 89 | 90 | def track_object(objs,labels): 91 | 92 | #global delay 93 | global x_deviation, y_deviation, tolerance, arr_track_data 94 | 95 | 96 | if(len(objs)==0): 97 | print("no objects to track") 98 | ut.stop() 99 | ut.red_light("OFF") 100 | arr_track_data=[0,0,0,0,0,0] 101 | return 102 | 103 | 104 | #ut.head_lights("OFF") 105 | k=0 106 | flag=0 107 | for obj in objs: 108 | lbl=labels.get(obj.id, obj.id) 109 | k = arr_valid_objects.count(lbl) 110 | if (k>0): 111 | x_min, y_min, x_max, y_max = list(obj.bbox) 112 | flag=1 113 | break 114 | 115 | #print(x_min, y_min, x_max, y_max) 116 | if(flag==0): 117 | print("selected object no present") 118 | return 119 | 120 | x_diff=x_max-x_min 121 | y_diff=y_max-y_min 122 | print("x_diff: ",round(x_diff,5)) 123 | print("y_diff: ",round(y_diff,5)) 124 | 125 | 126 | obj_x_center=x_min+(x_diff/2) 127 | obj_x_center=round(obj_x_center,3) 128 | 129 | obj_y_center=y_min+(y_diff/2) 130 | obj_y_center=round(obj_y_center,3) 131 | 132 | 133 | #print("[",obj_x_center, obj_y_center,"]") 134 | 135 | x_deviation=round(0.5-obj_x_center,3) 136 | y_deviation=round(0.5-obj_y_center,3) 137 | 138 | print("{",x_deviation,y_deviation,"}") 139 | 140 | #move_robot() 141 | thread = Thread(target = move_robot) 142 | thread.start() 143 | #thread.join() 144 | 145 | #print(cmd) 146 | 147 | arr_track_data[0]=obj_x_center 148 | arr_track_data[1]=obj_y_center 149 | arr_track_data[2]=x_deviation 150 | arr_track_data[3]=y_deviation 151 | 152 | 153 | #this function is executed within a thread 154 | def move_robot(): 155 | global x_deviation, y_deviation, tolerance, arr_track_data 156 | 157 | print("moving robot .............!!!!!!!!!!!!!!") 158 | print(x_deviation, y_deviation, tolerance, arr_track_data) 159 | 160 | if(abs(x_deviation)abs(y_deviation)): 169 | if(x_deviation>=tolerance): 170 | cmd="Move Left" 171 | delay1=get_delay(x_deviation,'l') 172 | 173 | ut.left() 174 | time.sleep(delay1) 175 | ut.stop() 176 | 177 | if(x_deviation<=-1*tolerance): 178 | cmd="Move Right" 179 | delay1=get_delay(x_deviation,'r') 180 | 181 | ut.right() 182 | time.sleep(delay1) 183 | ut.stop() 184 | else: 185 | 186 | if(y_deviation>=tolerance): 187 | cmd="Move Forward" 188 | delay1=get_delay(y_deviation,'f') 189 | 190 | ut.forward() 191 | time.sleep(delay1) 192 | ut.stop() 193 | 194 | if(y_deviation<=-1*tolerance): 195 | cmd="Move Backward" 196 | delay1=get_delay(y_deviation,'b') 197 | 198 | ut.back() 199 | time.sleep(delay1) 200 | ut.stop() 201 | 202 | 203 | arr_track_data[4]=cmd 204 | arr_track_data[5]=delay1 205 | 206 | #based on the deviation of the object from the center of the frame, a delay value is returned by this function 207 | #which decides how long the motion command is to be given to the motors. 208 | def get_delay(deviation,direction): 209 | deviation=abs(deviation) 210 | if (direction=='f' or direction=='b'): 211 | if(deviation>=0.3): 212 | d=0.1 213 | elif(deviation>=0.2 and deviation<0.30): 214 | d=0.075 215 | elif(deviation>=0.15 and deviation<0.2): 216 | d=0.045 217 | else: 218 | d=0.035 219 | else: 220 | if(deviation>=0.4): 221 | d=0.080 222 | elif(deviation>=0.35 and deviation<0.40): 223 | d=0.070 224 | elif(deviation>=0.30 and deviation<0.35): 225 | d=0.060 226 | elif(deviation>=0.25 and deviation<0.30): 227 | d=0.050 228 | elif(deviation>=0.20 and deviation<0.25): 229 | d=0.040 230 | else: 231 | d=0.030 232 | 233 | return d 234 | 235 | 236 | def main(): 237 | 238 | from util import edgetpu 239 | 240 | if (edgetpu==1): 241 | mdl = model_edgetpu 242 | else: 243 | mdl = model 244 | 245 | interpreter, labels =cm.load_model(model_dir,mdl,lbl,edgetpu) 246 | 247 | fps=1 248 | arr_dur=[0,0,0] 249 | #while cap.isOpened(): 250 | while True: 251 | start_time=time.time() 252 | 253 | #----------------Capture Camera Frame----------------- 254 | start_t0=time.time() 255 | ret, frame = cap.read() 256 | if not ret: 257 | break 258 | 259 | cv2_im = frame 260 | cv2_im = cv2.flip(cv2_im, 0) 261 | cv2_im = cv2.flip(cv2_im, 1) 262 | 263 | cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) 264 | pil_im = Image.fromarray(cv2_im_rgb) 265 | 266 | arr_dur[0]=time.time() - start_t0 267 | #cm.time_elapsed(start_t0,"camera capture") 268 | #---------------------------------------------------- 269 | 270 | #-------------------Inference--------------------------------- 271 | start_t1=time.time() 272 | cm.set_input(interpreter, pil_im) 273 | interpreter.invoke() 274 | objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) 275 | 276 | arr_dur[1]=time.time() - start_t1 277 | #cm.time_elapsed(start_t1,"inference") 278 | #---------------------------------------------------- 279 | 280 | #-----------------other------------------------------------ 281 | start_t2=time.time() 282 | track_object(objs,labels)#tracking <<<<<<< 283 | 284 | if cv2.waitKey(1) & 0xFF == ord('q'): 285 | break 286 | 287 | 288 | cv2_im = draw_overlays(cv2_im, objs, labels, arr_dur, arr_track_data) 289 | # cv2.imshow('Object Tracking - TensorFlow Lite', cv2_im) 290 | 291 | ret, jpeg = cv2.imencode('.jpg', cv2_im) 292 | pic = jpeg.tobytes() 293 | 294 | #Flask streaming 295 | yield (b'--frame\r\n' 296 | b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') 297 | 298 | arr_dur[2]=time.time() - start_t2 299 | #cm.time_elapsed(start_t2,"other") 300 | #cm.time_elapsed(start_time,"overall") 301 | 302 | #print("arr_dur:",arr_dur) 303 | fps = round(1.0 / (time.time() - start_time),1) 304 | print("*********FPS: ",fps,"************") 305 | 306 | cap.release() 307 | cv2.destroyAllWindows() 308 | 309 | def draw_overlays(cv2_im, objs, labels, arr_dur, arr_track_data): 310 | height, width, channels = cv2_im.shape 311 | font=cv2.FONT_HERSHEY_SIMPLEX 312 | 313 | global tolerance 314 | 315 | #draw black rectangle on top 316 | cv2_im = cv2.rectangle(cv2_im, (0,0), (width, 24), (0,0,0), -1) 317 | 318 | 319 | #write processing durations 320 | cam=round(arr_dur[0]*1000,0) 321 | inference=round(arr_dur[1]*1000,0) 322 | other=round(arr_dur[2]*1000,0) 323 | text_dur = 'Camera: {}ms Inference: {}ms other: {}ms'.format(cam,inference,other) 324 | cv2_im = cv2.putText(cv2_im, text_dur, (int(width/4)-30, 16),font, 0.4, (255, 255, 255), 1) 325 | 326 | #write FPS 327 | total_duration=cam+inference+other 328 | fps=round(1000/total_duration,1) 329 | text1 = 'FPS: {}'.format(fps) 330 | cv2_im = cv2.putText(cv2_im, text1, (10, 20),font, 0.7, (150, 150, 255), 2) 331 | 332 | 333 | #draw black rectangle at bottom 334 | cv2_im = cv2.rectangle(cv2_im, (0,height-24), (width, height), (0,0,0), -1) 335 | 336 | #write deviations and tolerance 337 | str_tol='Tol : {}'.format(tolerance) 338 | cv2_im = cv2.putText(cv2_im, str_tol, (10, height-8),font, 0.55, (150, 150, 255), 2) 339 | 340 | 341 | x_dev=arr_track_data[2] 342 | str_x='X: {}'.format(x_dev) 343 | if(abs(x_dev) 2 | 3 |
4 | 5 |
6 | 7 | 8 | -------------------------------------------------------------------------------- /earthrover/object_tracking/web/ajax_master.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/range_sensor/avoid_collision.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import time,sys,os 5 | 6 | sys.path.insert(0, '/var/www/html/earthrover') 7 | import util as ut 8 | 9 | #dynamically obtain path of current file 10 | local_path=os.path.dirname(os.path.realpath(__file__)) 11 | 12 | ut.init_gpio() 13 | 14 | while 1: 15 | 16 | f1 = open(local_path+"/web/range.txt", "r+") 17 | distance = f1.read(20); 18 | f1.close() 19 | 20 | print ("distance:", distance) 21 | 22 | #if due to some reason file is empty, ignore such occurance 23 | if (distance=="" or distance=="--"): 24 | print("blank file") 25 | continue 26 | 27 | if(float(distance)<30): #to be removed from here, move to seperate python file 28 | ut.back() 29 | ut.speak_tts("obstacle detected","f") 30 | time.sleep(1) 31 | ut.stop() 32 | 33 | time.sleep(0.2) 34 | -------------------------------------------------------------------------------- /earthrover/range_sensor/master.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import time,os 5 | import sys 6 | 7 | sys.path.insert(0, '/var/www/html/earthrover') 8 | import util as ut 9 | 10 | local_path=os.path.dirname(os.path.realpath(__file__)) 11 | 12 | status = sys.argv[1] 13 | #ut.speak_tts(status,"f") 14 | 15 | if (status=="1"): 16 | #print "starting 2 python scripts" 17 | os.system("sudo python /var/www/html/earthrover/range_sensor/range_sensor.py &") 18 | time.sleep(1) #should be equal to settling time of range sensor 19 | os.system("sudo python /var/www/html/earthrover/range_sensor/monitorSensor.py &") 20 | time.sleep(0.1) 21 | os.system("sudo python /var/www/html/earthrover/range_sensor/avoid_collision.py &") 22 | 23 | 24 | if (status=="0"): 25 | 26 | ut.speak_tts("sensor off ","f") 27 | os.system("sudo pkill -f monitorSensor.py") 28 | time.sleep(0.1) 29 | os.system("sudo pkill -f range_sensor.py") 30 | time.sleep(0.1) 31 | os.system("sudo pkill -f avoid_collision.py") 32 | 33 | f1 = open(local_path+"/web/range.txt", "wb") 34 | f1.write("--") 35 | f1.close() 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /earthrover/range_sensor/monitorSensor.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import time,os 5 | 6 | import sys 7 | sys.path.insert(0, '/var/www/html/earthrover') 8 | import util as ut 9 | 10 | local_path=os.path.dirname(os.path.realpath(__file__)) 11 | 12 | prev_distance=0 13 | counter=0 14 | ut.speak_tts("monitoring","m") 15 | while 1: 16 | f0 = open(local_path+"/web/range.txt", "r+") 17 | distance = f0.read(15); 18 | f0.close() 19 | 20 | if(distance==prev_distance): 21 | counter=counter+1 22 | else: 23 | counter=0 24 | 25 | print (counter), distance 26 | 27 | 28 | 29 | if(counter==3): 30 | ut.speak_tts("restarting","m") 31 | os.system("sudo pkill -f range_sensor.py") 32 | print("stopped range_sensor.py !!!") 33 | time.sleep(0.1) 34 | os.system("python /var/www/html/earthrover/range_sensor/range_sensor.py &") 35 | print("started range_sensor.py !!!") 36 | 37 | 38 | prev_distance = distance 39 | time.sleep(0.7) #this should be more than file writing rate in range_sensor.py 40 | 41 | 42 | -------------------------------------------------------------------------------- /earthrover/range_sensor/range_sensor.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import RPi.GPIO as GPIO 5 | import time,os 6 | 7 | import sys 8 | sys.path.insert(0, '/var/www/html/earthrover') 9 | import util as ut 10 | 11 | local_path=os.path.dirname(os.path.realpath(__file__)) 12 | 13 | 14 | TRIG = 23 15 | ECHO = 24 16 | 17 | GPIO.setmode(GPIO.BCM) 18 | 19 | GPIO.setup(TRIG,GPIO.OUT) 20 | GPIO.setup(ECHO,GPIO.IN) 21 | 22 | GPIO.output(TRIG, False) 23 | 24 | ut.speak_tts("sensor ON","f") 25 | 26 | print "Waiting For Sensor To Settle" 27 | time.sleep(1) #settling time 28 | 29 | prev_distance=0 30 | 31 | while 1: 32 | try: 33 | GPIO.output(TRIG, True) 34 | time.sleep(0.00001) 35 | GPIO.output(TRIG, False) 36 | 37 | while GPIO.input(ECHO)==0: 38 | pulse_start = time.time() 39 | 40 | while GPIO.input(ECHO)==1: 41 | pulse_end = time.time() 42 | 43 | pulse_duration = pulse_end - pulse_start 44 | 45 | distance = pulse_duration * 17150 46 | 47 | distance = round(distance, 5) 48 | 49 | diff = abs(distance - prev_distance) 50 | print "diff: ", diff 51 | 52 | if (diff < 10): 53 | 54 | print "Distance:",distance,"cm |||| Prev_Distance:",prev_distance,"cm" 55 | #print distance,"cm" 56 | 57 | f1 = open(local_path+"/web/range.txt", "wb") 58 | f1.write(str(distance)) 59 | f1.close() 60 | 61 | else: 62 | print "Error in rangeSensor calculation:", distance - prev_distance 63 | 64 | 65 | 66 | prev_distance = distance 67 | time.sleep(0.25) 68 | 69 | except Exception as e: 70 | ut.speak_tts(e,"f") 71 | pass 72 | 73 | ut.speak_tts("closed range sensor","f") 74 | 75 | -------------------------------------------------------------------------------- /earthrover/range_sensor/web/ajax_getRange.php: -------------------------------------------------------------------------------- 1 | 15 | -------------------------------------------------------------------------------- /earthrover/range_sensor/web/ajax_rangeSensor.php: -------------------------------------------------------------------------------- 1 | "; 6 | 7 | $xx=exec("sudo python /var/www/html/earthrover/range_sensor/master.py $state"); 8 | 9 | ?> 10 | -------------------------------------------------------------------------------- /earthrover/range_sensor/web/range.txt: -------------------------------------------------------------------------------- 1 | -- -------------------------------------------------------------------------------- /earthrover/range_sensor/web/rangesensor.js: -------------------------------------------------------------------------------- 1 | var interval; 2 | function toggle_rangeSensor(id) 3 | { 4 | 5 | //alert(id); 6 | console.log("toggle_rangeSensor button clicked"); 7 | button_caption=document.getElementById(id).value; 8 | //alert(button_caption); 9 | if(button_caption=="OFF"){ 10 | rangeSensor(1); 11 | 12 | document.getElementById(id).value="ON"; 13 | document.getElementById(id).style.backgroundColor="#66ff66"; 14 | //alert("hi"); 15 | interval=window.setInterval(get_range, 500); //timer for initiating ajax request 16 | 17 | 18 | 19 | } 20 | if(button_caption=="ON"){ 21 | rangeSensor(0); 22 | document.getElementById(id).value="OFF"; 23 | document.getElementById(id).style.backgroundColor="white"; 24 | 25 | clearInterval(interval); 26 | document.getElementById("range").innerHTML=""; 27 | 28 | 29 | } 30 | 31 | } 32 | function rangeSensor(state) 33 | { console.log("state: ", state); 34 | 35 | $.post("/earthrover/range_sensor/web/ajax_rangeSensor.php", 36 | { 37 | state: state 38 | } 39 | ); 40 | 41 | } 42 | function get_range() 43 | { 44 | $.post("/earthrover/range_sensor/web/ajax_getRange.php", 45 | { 46 | //direction: dir 47 | //speed:sp 48 | }, 49 | function(data){ 50 | //document.getElementById("range").innerHTML = data; 51 | 52 | if (data<=30) 53 | document.getElementById("range").style.color="red"; 54 | else if(data > 30 && data <= 60) 55 | document.getElementById("range").style.color="orange"; 56 | else 57 | document.getElementById("range").style.color="blue"; 58 | 59 | if (data>400) 60 | document.getElementById("range").innerHTML = "-"; 61 | else 62 | document.getElementById("range").innerHTML = data; 63 | 64 | }); 65 | } 66 | -------------------------------------------------------------------------------- /earthrover/range_sensor/web/test.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Range Sensor 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /earthrover/speaker/sounds/horn.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/speaker/sounds/horn.mp3 -------------------------------------------------------------------------------- /earthrover/speaker/sounds/siren.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/speaker/sounds/siren.mp3 -------------------------------------------------------------------------------- /earthrover/speaker/speaker_tts.py: -------------------------------------------------------------------------------- 1 | #Project: Earthrover 2 | #Created by: Jitesh Saini 3 | 4 | import RPi.GPIO as GPIO 5 | GPIO.setwarnings(False) 6 | 7 | import os, sys 8 | 9 | text = sys.argv[1] 10 | gender = sys.argv[2] 11 | 12 | print "text to speech: ", text 13 | #print "gender: ", gender 14 | 15 | speaker_light = 9 #17 16 | 17 | GPIO.setmode(GPIO.BCM) 18 | GPIO.setup(speaker_light, GPIO.OUT) 19 | blink = GPIO.PWM(speaker_light, 3) #frequency 20 | blink.start(0) 21 | blink.ChangeDutyCycle(50) #start blinking the mouthpiece LED as the text is converted to speech 22 | 23 | #str1="testing speaker" 24 | 25 | cmd_speak="sudo espeak -ven-us+" + gender + "5 -s120 " + "'" + text + "'" + " --stdout |aplay" 26 | 27 | print cmd_speak 28 | os.system(cmd_speak) 29 | 30 | blink.ChangeDutyCycle(0) #stop blinking the mouthpiece LED 31 | 32 | GPIO.output(speaker_light, False) 33 | -------------------------------------------------------------------------------- /earthrover/speaker/web/ajax_omx.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /earthrover/speaker/web/ajax_tts.php: -------------------------------------------------------------------------------- 1 | 29 | -------------------------------------------------------------------------------- /earthrover/speaker/web/speaker.js: -------------------------------------------------------------------------------- 1 | function button_tts() 2 | { 3 | var str = document.getElementById("txt_tts").value; 4 | //var str2 = document.getElementById("radio1").value; 5 | var state = document.getElementById('radio1').checked; 6 | 7 | console.log(str); 8 | 9 | var gender; 10 | 11 | if (state)gender='m'; 12 | else gender='f'; 13 | 14 | 15 | console.log(gender); 16 | //alert(str); 17 | $.post("/earthrover/speaker/web/ajax_tts.php", 18 | { 19 | str:str.toLowerCase(), 20 | gen:gender 21 | } 22 | ); 23 | } 24 | 25 | function button_recording(no) 26 | { 27 | var path; 28 | switch(no) { 29 | case 1:path="earthrover/speaker/sounds/horn.mp3";break; 30 | case 2:path="earthrover/speaker/sounds/siren.mp3";break; 31 | } 32 | 33 | console.log(path); 34 | //alert(str); 35 | $.post("/earthrover/speaker/web/ajax_omx.php", 36 | { 37 | rec_path:path 38 | } 39 | ); 40 | } 41 | -------------------------------------------------------------------------------- /earthrover/tm/ajax_action.php: -------------------------------------------------------------------------------- 1 | 24 | -------------------------------------------------------------------------------- /earthrover/tm/index.html: -------------------------------------------------------------------------------- 1 | 7 | 8 | 9 | 10 | Teachable Machine Model 11 | 22 | 23 | 24 |
25 |

Gesture Controlled Robot

26 |

Teachable Machine's Image Model on Raspberry Pi

27 |
28 | 29 |
30 |

31 |

32 |
33 | 34 |
35 | 36 |
37 |
38 | 39 |
40 | 41 |
42 |

- Press 'Start' button and wait for the Webcam to start.

43 |

- The model is generated through Teachale Machine online tool by Google. Following gestures are recognised by the model:-

44 |

45 |

- Using your hand, make any of the 3 gestures shown above. Ensure that the backgroud is blank while you show hand gestures to web cam.

46 |

When a gesture is recognised, a command is sent to the server i.e. Raspberry Pi from where this page is fetched. Based on the command received, GPIO pins of RPi are actuated

47 | 48 |
49 | 50 | 54 | 55 | 56 | 57 | 58 | 59 | 187 | 188 | 189 | -------------------------------------------------------------------------------- /earthrover/tm/metadata.json: -------------------------------------------------------------------------------- 1 | {"tfjsVersion":"1.3.1","tmVersion":"2.3.1","packageVersion":"0.8.4","packageName":"@teachablemachine/image","timeStamp":"2021-04-17T06:52:22.855Z","userMetadata":{},"modelName":"tm-my-image-model","labels":["none","Gesture-1","Gesture-2","Gesture-3"]} -------------------------------------------------------------------------------- /earthrover/tm/weights.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/tm/weights.bin -------------------------------------------------------------------------------- /earthrover/util.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Project: Earthrover Robot 3 | Author: Jitesh Saini 4 | Github: https://github.com/jiteshsaini 5 | website: https://helloworld.co.in 6 | 7 | ''' 8 | 9 | import RPi.GPIO as GPIO 10 | GPIO.setwarnings(False) 11 | 12 | import os, time 13 | 14 | edgetpu=0 # If Coral USB Accelerator connected, then make it '1' otherwise '0' 15 | 16 | m1_1 = 8 17 | m1_2 = 11 18 | m2_1 = 14 19 | m2_2 = 15 20 | cam_light = 17 21 | headlight_right = 18 22 | headlight_left = 27 23 | sp_light=9 24 | 25 | 26 | def init_gpio(): 27 | GPIO.setmode(GPIO.BCM) 28 | GPIO.setup(m1_1,GPIO.OUT) 29 | GPIO.setup(m1_2,GPIO.OUT) 30 | GPIO.setup(m2_1,GPIO.OUT) 31 | GPIO.setup(m2_2,GPIO.OUT) 32 | GPIO.setup(cam_light,GPIO.OUT) 33 | GPIO.setup(headlight_right,GPIO.OUT) 34 | GPIO.setup(headlight_left,GPIO.OUT) 35 | GPIO.setup(sp_light,GPIO.OUT) 36 | 37 | 38 | def back(): 39 | print("moving back!!!!!!") 40 | GPIO.output(m1_1, False) 41 | GPIO.output(m1_2, True) 42 | GPIO.output(m2_1, True) 43 | GPIO.output(m2_2, False) 44 | 45 | def right(): 46 | GPIO.output(m1_1, True) 47 | GPIO.output(m1_2, False) 48 | GPIO.output(m2_1, True) 49 | GPIO.output(m2_2, False) 50 | 51 | def left(): 52 | GPIO.output(m1_1, False) 53 | GPIO.output(m1_2, True) 54 | GPIO.output(m2_1, False) 55 | GPIO.output(m2_2, True) 56 | 57 | def forward(): 58 | GPIO.output(m1_1, True) 59 | GPIO.output(m1_2, False) 60 | GPIO.output(m2_1, False) 61 | GPIO.output(m2_2, True) 62 | 63 | def stop(): 64 | GPIO.output(m1_1, False) 65 | GPIO.output(m1_2, False) 66 | GPIO.output(m2_1, False) 67 | GPIO.output(m2_2, False) 68 | 69 | def speak_tts(text,gender): 70 | cmd="python /var/www/html/earthrover/speaker/speaker_tts.py '" + text + "' " + gender + " &" 71 | os.system(cmd) 72 | 73 | def camera_light(state): 74 | if(state=="ON"): 75 | GPIO.output(cam_light, True) 76 | #print("light on") 77 | else: 78 | GPIO.output(cam_light, False) 79 | #print("light off") 80 | 81 | def head_lights(state): 82 | if(state=="ON"): 83 | GPIO.output(headlight_left, True) 84 | GPIO.output(headlight_right, True) 85 | #print("light on") 86 | else: 87 | GPIO.output(headlight_left, False) 88 | GPIO.output(headlight_right, False) 89 | #print("light off") 90 | 91 | def red_light(state): 92 | if(state=="ON"): 93 | GPIO.output(sp_light, True) 94 | #print("light on") 95 | else: 96 | GPIO.output(sp_light, False) 97 | #print("light off") 98 | 99 | -------------------------------------------------------------------------------- /earthrover/vars.php: -------------------------------------------------------------------------------- 1 | "; 19 | global $m1_1,$m1_2,$m2_1,$m2_2; 20 | 21 | //====motors================= 22 | 23 | set_gpio($m1_1,'output'); 24 | set_gpio($m1_2,'output'); 25 | set_gpio($m2_1,'output'); 26 | set_gpio($m2_2,'output'); 27 | 28 | set_gpio($m1_1,'0'); 29 | set_gpio($m1_2,'0'); 30 | set_gpio($m2_1,'0'); 31 | set_gpio($m2_2,'0'); 32 | 33 | global $cameralight,$headlight_left,$headlight_right; 34 | 35 | //====Lights============ 36 | set_gpio($headlight_right,'output'); 37 | set_gpio($headlight_left,'output'); 38 | set_gpio($cameralight,'output'); 39 | 40 | set_gpio($headlight_right,'0'); 41 | set_gpio($headlight_left,'0'); 42 | set_gpio($cameralight,'0'); 43 | } 44 | 45 | function set_speed($pwm_val){ 46 | $myFile = "/var/www/html/earthrover/control_panel/pwm/pwm1.txt"; 47 | $fh = fopen($myFile, 'w') or die("can't open file"); 48 | fwrite($fh, $pwm_val); 49 | fclose($fh); 50 | 51 | /* append following lines in /etc/sudoers file for launching python script from PHP:- 52 | pi ALL=(ALL) NOPASSWD: ALL 53 | www-data ALL=(ALL) NOPASSWD: ALL 54 | */ 55 | exec("sudo python /var/www/html/earthrover/control_panel/pwm/pwm_control.py");# launch Python script 56 | 57 | } 58 | 59 | function move($dir){ 60 | switch ($dir) { 61 | case 'f': forward(); break; 62 | case 'b': back(); break; 63 | case 'r': right(); break; 64 | case 'l': left(); break; 65 | case 's': stop(); break; 66 | } 67 | } 68 | 69 | function right(){ 70 | global $m1_1,$m1_2,$m2_1,$m2_2; 71 | set_gpio($m1_1,'1'); 72 | set_gpio($m1_2,'0'); 73 | set_gpio($m2_1,'1'); 74 | set_gpio($m2_2,'0'); 75 | 76 | } 77 | function left(){ 78 | global $m1_1,$m1_2,$m2_1,$m2_2; 79 | set_gpio($m1_1,'0'); 80 | set_gpio($m1_2,'1'); 81 | set_gpio($m2_1,'0'); 82 | set_gpio($m2_2,'1'); 83 | } 84 | function forward(){ 85 | global $m1_1,$m1_2,$m2_1,$m2_2; 86 | set_gpio($m1_1,'1'); 87 | set_gpio($m1_2,'0'); 88 | set_gpio($m2_1,'0'); 89 | set_gpio($m2_2,'1'); 90 | //echo"fwd
"; 91 | } 92 | function back(){ 93 | global $m1_1,$m1_2,$m2_1,$m2_2; 94 | set_gpio($m1_1,'0'); 95 | set_gpio($m1_2,'1'); 96 | set_gpio($m2_1,'1'); 97 | set_gpio($m2_2,'0'); 98 | } 99 | function stop(){ 100 | global $m1_1,$m1_2,$m2_1,$m2_2; 101 | set_gpio($m1_1,'0'); 102 | set_gpio($m1_2,'0'); 103 | set_gpio($m2_1,'0'); 104 | set_gpio($m2_2,'0'); 105 | } 106 | 107 | function set_gpio($pin,$x){ 108 | switch($x){ 109 | case '1': $z='dh';break; 110 | case '0': $z='dl';break; 111 | case 'output': $z='op';break; 112 | } 113 | $cmd="sudo raspi-gpio set $pin $z"; 114 | system($cmd); 115 | //echo"$x: $cmd
"; 116 | } 117 | 118 | ?> 119 | 120 | -------------------------------------------------------------------------------- /earthrover/voice_control/action.php: -------------------------------------------------------------------------------- 1 | 1 and $sz<=3){ 25 | 26 | if($word[1] == "forward"){ 27 | forward(); 28 | echo"[$time]: moving forward
"; 29 | } 30 | elseif($word[1] == "backward"){ 31 | back(); 32 | echo"[$time]: moving backwards
"; 33 | } 34 | elseif($word[1] == "left"){ 35 | left(); 36 | echo"[$time]: turning left
"; 37 | } 38 | elseif($word[1] == "right"){ 39 | right(); 40 | echo"[$time]: turning right
"; 41 | } 42 | elseif($word[1] == "stop"){ 43 | stop(); 44 | echo"[$time]: movement stopped
"; 45 | } 46 | elseif($word[1] == "lights"){ 47 | 48 | if($word[2] == "on"){ 49 | system("gpio -g write $cameralight 1"); 50 | system("gpio -g write $headlight_right 1"); 51 | system("gpio -g write $headlight_left 1"); 52 | 53 | echo"[$time]: Lights switched ON
"; 54 | } 55 | elseif ($word[2] == "off") { 56 | system("gpio -g write $cameralight 0"); 57 | system("gpio -g write $headlight_right 0"); 58 | system("gpio -g write $headlight_left 0"); 59 | 60 | echo"[$time]: Lights switched OFF
"; 61 | } 62 | else{ 63 | echo"Invalid command
"; 64 | } 65 | 66 | } 67 | else{ 68 | echo"Invalid command
"; 69 | } 70 | 71 | } 72 | else{ 73 | 74 | echo"Invalid command
"; 75 | 76 | } 77 | 78 | 79 | 80 | ?> 81 | -------------------------------------------------------------------------------- /earthrover/voice_control/images/rover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/voice_control/images/rover.png -------------------------------------------------------------------------------- /earthrover/voice_control/images/speak.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiteshsaini/robotics-level-4/34e70e32c6733c3500619d5d73bd2f6ceb77abe8/earthrover/voice_control/images/speak.png -------------------------------------------------------------------------------- /earthrover/voice_control/index.html: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | 8 | 9 | 10 | Voice Control 11 | 12 | 13 | 14 | 47 | 48 | 49 |

Earth Rover Voice Control

50 |

Your Browser Doesn't Support the Web Speech API

51 | 52 |
53 | Voice commands to Rover 54 |

55 | 56 |
57 | 58 |
59 | 60 |
61 | 62 |
63 | 64 |
65 | 68 | 69 | 72 | 73 |

74 |
75 | 76 |
77 | Response from Rover 78 |

79 | 80 | 81 | 82 | 83 |
84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /earthrover/voice_control/script.js: -------------------------------------------------------------------------------- 1 | try { 2 | var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; 3 | var recognition = new SpeechRecognition(); 4 | $('.no-browser-support').hide(); 5 | } 6 | catch(e) { 7 | console.error(e); 8 | $('.no-browser-support').show(); 9 | } 10 | 11 | 12 | var noteTextarea = $('#note-textarea'); 13 | var instructions = $('#recording-instructions'); 14 | 15 | var btn_start = $('#start-record-btn'); 16 | var btn_stop = $('#stop-record-btn'); 17 | var noteContent = ''; 18 | 19 | 20 | /*----------------------------- 21 | Voice Recognition 22 | ------------------------------*/ 23 | 24 | // If false, the recording will stop after a few seconds of silence. 25 | // When true, the silence period is longer (about 15 seconds), 26 | recognition.continuous = true; 27 | 28 | // This block is called every time the Speech APi captures a line. 29 | recognition.onresult = function(event) { 30 | 31 | // event is a SpeechRecognitionEvent object. 32 | // It holds all the lines we have captured so far. 33 | // We only need the current one. 34 | var current = event.resultIndex; 35 | 36 | // Get a transcript of what was said. 37 | var transcript = event.results[current][0].transcript; 38 | 39 | // There is a weird bug on mobile, where everything is repeated twice. 40 | // There is no official solution so far so we have to handle an edge case. 41 | var mobileRepeatBug = (current == 1 && transcript == event.results[0][0].transcript); 42 | console.log("mobile bug: " + mobileRepeatBug); 43 | 44 | if(!mobileRepeatBug) { 45 | 46 | noteTextarea.html(transcript); 47 | 48 | action(transcript); 49 | 50 | } 51 | 52 | 53 | }; 54 | 55 | function action(text){ 56 | 57 | console.log("text:" + text); 58 | 59 | $.post("action.php", 60 | { 61 | txt: text 62 | }, 63 | function(data,status){ 64 | document.getElementById("response").innerHTML = data; 65 | }); 66 | } 67 | 68 | 69 | recognition.onstart = function() { 70 | instructions.text('Voice recognition activated'); 71 | 72 | } 73 | 74 | recognition.onspeechend = function() { 75 | instructions.text('Voice recognition turned off'); 76 | btn_start.css("background-color", "white"); 77 | } 78 | 79 | recognition.onerror = function(event) { 80 | if(event.error == 'no-speech') { 81 | instructions.text('No speech was detected. Try again.'); 82 | }; 83 | } 84 | 85 | /*----------------------------- 86 | buttons 87 | ------------------------------*/ 88 | 89 | $('#start-record-btn').on('click', function(e) { 90 | console.log("start recog"); 91 | 92 | recognition.start(); 93 | btn_start.css("background-color", "green"); 94 | }); 95 | 96 | 97 | $('#stop-record-btn').on('click', function(e) { 98 | console.log("stop recog"); 99 | recognition.stop(); 100 | instructions.text('Voice recognition stopped.'); 101 | btn_start.css("background-color", "white"); 102 | 103 | }); 104 | 105 | 106 | --------------------------------------------------------------------------------