├── Adafruit_I2C.py ├── Adafruit_PWM_Servo_Driver.py ├── README.md ├── arm.py ├── autonomous.py ├── camera_pi.py ├── classify_image.py ├── configure.py ├── drive_server.py ├── face.py ├── image.py ├── inception_server.py ├── nginx └── nginx.conf ├── requirements.txt ├── robot.py ├── run_and_parse_inception.py ├── run_and_parse_inception.sh ├── server.sh ├── services ├── inception.service └── web.service ├── sonar.py ├── speaker.py ├── static ├── arm.js ├── robot.js └── style.css ├── templates ├── arm.html ├── index.html └── index.html~ ├── wheels.py └── wsgi.py /Adafruit_I2C.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import re 3 | import smbus 4 | 5 | # =========================================================================== 6 | # Adafruit_I2C Class 7 | # =========================================================================== 8 | 9 | class Adafruit_I2C(object): 10 | 11 | @staticmethod 12 | def getPiRevision(): 13 | "Gets the version number of the Raspberry Pi board" 14 | # Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History 15 | try: 16 | with open('/proc/cpuinfo', 'r') as infile: 17 | for line in infile: 18 | # Match a line of the form "Revision : 0002" while ignoring extra 19 | # info in front of the revsion (like 1000 when the Pi was over-volted). 20 | match = re.match('Revision\s+:\s+.*(\w{4})$', line) 21 | if match and match.group(1) in ['0000', '0002', '0003']: 22 | # Return revision 1 if revision ends with 0000, 0002 or 0003. 23 | return 1 24 | elif match: 25 | # Assume revision 2 if revision ends with any other 4 chars. 26 | return 2 27 | # Couldn't find the revision, assume revision 0 like older code for compatibility. 28 | return 0 29 | except: 30 | return 0 31 | 32 | @staticmethod 33 | def getPiI2CBusNumber(): 34 | # Gets the I2C bus number /dev/i2c# 35 | return 1 if Adafruit_I2C.getPiRevision() > 1 else 0 36 | 37 | def __init__(self, address, busnum=-1, debug=False): 38 | self.address = address 39 | # By default, the correct I2C bus is auto-detected using /proc/cpuinfo 40 | # Alternatively, you can hard-code the bus version below: 41 | # self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's) 42 | # self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's) 43 | self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber()) 44 | self.debug = debug 45 | 46 | def reverseByteOrder(self, data): 47 | "Reverses the byte order of an int (16-bit) or long (32-bit) value" 48 | # Courtesy Vishal Sapre 49 | byteCount = len(hex(data)[2:].replace('L','')[::2]) 50 | val = 0 51 | for i in range(byteCount): 52 | val = (val << 8) | (data & 0xff) 53 | data >>= 8 54 | return val 55 | 56 | def errMsg(self): 57 | print "Error accessing 0x%02X: Check your I2C address" % self.address 58 | return -1 59 | 60 | def write8(self, reg, value): 61 | "Writes an 8-bit value to the specified register/address" 62 | try: 63 | self.bus.write_byte_data(self.address, reg, value) 64 | if self.debug: 65 | print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg) 66 | except IOError, err: 67 | return self.errMsg() 68 | 69 | def write16(self, reg, value): 70 | "Writes a 16-bit value to the specified register/address pair" 71 | try: 72 | self.bus.write_word_data(self.address, reg, value) 73 | if self.debug: 74 | print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" % 75 | (value, reg, reg+1)) 76 | except IOError, err: 77 | return self.errMsg() 78 | 79 | def writeRaw8(self, value): 80 | "Writes an 8-bit value on the bus" 81 | try: 82 | self.bus.write_byte(self.address, value) 83 | if self.debug: 84 | print "I2C: Wrote 0x%02X" % value 85 | except IOError, err: 86 | return self.errMsg() 87 | 88 | def writeList(self, reg, list): 89 | "Writes an array of bytes using I2C format" 90 | try: 91 | if self.debug: 92 | print "I2C: Writing list to register 0x%02X:" % reg 93 | print list 94 | self.bus.write_i2c_block_data(self.address, reg, list) 95 | except IOError, err: 96 | return self.errMsg() 97 | 98 | def readList(self, reg, length): 99 | "Read a list of bytes from the I2C device" 100 | try: 101 | results = self.bus.read_i2c_block_data(self.address, reg, length) 102 | if self.debug: 103 | print ("I2C: Device 0x%02X returned the following from reg 0x%02X" % 104 | (self.address, reg)) 105 | print results 106 | return results 107 | except IOError, err: 108 | return self.errMsg() 109 | 110 | def readU8(self, reg): 111 | "Read an unsigned byte from the I2C device" 112 | try: 113 | result = self.bus.read_byte_data(self.address, reg) 114 | if self.debug: 115 | print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % 116 | (self.address, result & 0xFF, reg)) 117 | return result 118 | except IOError, err: 119 | return self.errMsg() 120 | 121 | def readS8(self, reg): 122 | "Reads a signed byte from the I2C device" 123 | try: 124 | result = self.bus.read_byte_data(self.address, reg) 125 | if result > 127: result -= 256 126 | if self.debug: 127 | print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" % 128 | (self.address, result & 0xFF, reg)) 129 | return result 130 | except IOError, err: 131 | return self.errMsg() 132 | 133 | def readU16(self, reg, little_endian=True): 134 | "Reads an unsigned 16-bit value from the I2C device" 135 | try: 136 | result = self.bus.read_word_data(self.address,reg) 137 | # Swap bytes if using big endian because read_word_data assumes little 138 | # endian on ARM (little endian) systems. 139 | if not little_endian: 140 | result = ((result << 8) & 0xFF00) + (result >> 8) 141 | if (self.debug): 142 | print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg) 143 | return result 144 | except IOError, err: 145 | return self.errMsg() 146 | 147 | def readS16(self, reg, little_endian=True): 148 | "Reads a signed 16-bit value from the I2C device" 149 | try: 150 | result = self.readU16(reg,little_endian) 151 | if result > 32767: result -= 65536 152 | return result 153 | except IOError, err: 154 | return self.errMsg() 155 | 156 | if __name__ == '__main__': 157 | try: 158 | bus = Adafruit_I2C(address=0) 159 | print "Default I2C bus is accessible" 160 | except: 161 | print "Error accessing default I2C bus" 162 | -------------------------------------------------------------------------------- /Adafruit_PWM_Servo_Driver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import time 4 | import math 5 | from Adafruit_I2C import Adafruit_I2C 6 | 7 | # ============================================================================ 8 | # Adafruit PCA9685 16-Channel PWM Servo Driver 9 | # ============================================================================ 10 | 11 | class PWM : 12 | # Registers/etc. 13 | __MODE1 = 0x00 14 | __MODE2 = 0x01 15 | __SUBADR1 = 0x02 16 | __SUBADR2 = 0x03 17 | __SUBADR3 = 0x04 18 | __PRESCALE = 0xFE 19 | __LED0_ON_L = 0x06 20 | __LED0_ON_H = 0x07 21 | __LED0_OFF_L = 0x08 22 | __LED0_OFF_H = 0x09 23 | __ALL_LED_ON_L = 0xFA 24 | __ALL_LED_ON_H = 0xFB 25 | __ALL_LED_OFF_L = 0xFC 26 | __ALL_LED_OFF_H = 0xFD 27 | 28 | # Bits 29 | __RESTART = 0x80 30 | __SLEEP = 0x10 31 | __ALLCALL = 0x01 32 | __INVRT = 0x10 33 | __OUTDRV = 0x04 34 | 35 | general_call_i2c = Adafruit_I2C(0x00) 36 | 37 | @classmethod 38 | def softwareReset(cls): 39 | "Sends a software reset (SWRST) command to all the servo drivers on the bus" 40 | cls.general_call_i2c.writeRaw8(0x06) # SWRST 41 | 42 | def __init__(self, address=0x40, debug=False): 43 | self.i2c = Adafruit_I2C(address) 44 | self.i2c.debug = debug 45 | self.address = address 46 | self.debug = debug 47 | if (self.debug): 48 | print "Reseting PCA9685 MODE1 (without SLEEP) and MODE2" 49 | self.setAllPWM(0, 0) 50 | self.i2c.write8(self.__MODE2, self.__OUTDRV) 51 | self.i2c.write8(self.__MODE1, self.__ALLCALL) 52 | time.sleep(0.005) # wait for oscillator 53 | 54 | mode1 = self.i2c.readU8(self.__MODE1) 55 | mode1 = mode1 & ~self.__SLEEP # wake up (reset sleep) 56 | self.i2c.write8(self.__MODE1, mode1) 57 | time.sleep(0.005) # wait for oscillator 58 | 59 | def setPWMFreq(self, freq): 60 | "Sets the PWM frequency" 61 | prescaleval = 25000000.0 # 25MHz 62 | prescaleval /= 4096.0 # 12-bit 63 | prescaleval /= float(freq) 64 | prescaleval -= 1.0 65 | if (self.debug): 66 | print "Setting PWM frequency to %d Hz" % freq 67 | print "Estimated pre-scale: %d" % prescaleval 68 | prescale = math.floor(prescaleval + 0.5) 69 | if (self.debug): 70 | print "Final pre-scale: %d" % prescale 71 | 72 | oldmode = self.i2c.readU8(self.__MODE1); 73 | newmode = (oldmode & 0x7F) | 0x10 # sleep 74 | self.i2c.write8(self.__MODE1, newmode) # go to sleep 75 | self.i2c.write8(self.__PRESCALE, int(math.floor(prescale))) 76 | self.i2c.write8(self.__MODE1, oldmode) 77 | time.sleep(0.005) 78 | self.i2c.write8(self.__MODE1, oldmode | 0x80) 79 | 80 | def setPWM(self, channel, on, off): 81 | "Sets a single PWM channel" 82 | self.i2c.write8(self.__LED0_ON_L+4*channel, on & 0xFF) 83 | self.i2c.write8(self.__LED0_ON_H+4*channel, on >> 8) 84 | self.i2c.write8(self.__LED0_OFF_L+4*channel, off & 0xFF) 85 | self.i2c.write8(self.__LED0_OFF_H+4*channel, off >> 8) 86 | 87 | def setAllPWM(self, on, off): 88 | "Sets a all PWM channels" 89 | self.i2c.write8(self.__ALL_LED_ON_L, on & 0xFF) 90 | self.i2c.write8(self.__ALL_LED_ON_H, on >> 8) 91 | self.i2c.write8(self.__ALL_LED_OFF_L, off & 0xFF) 92 | self.i2c.write8(self.__ALL_LED_OFF_H, off >> 8) 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # robot 2 | 3 | This will run a simple robot with a webserver on a raspberry PI with the Adafruit Motor Hat. I wrote this up for myself for fun and to help me remember how I set things up. 4 | 5 | High level overview can be found in this article: https://www.oreilly.com/learning/how-to-build-a-robot-that-sees-with-100-and-tensorflow 6 | 7 | ## Hardware 8 | 9 | - Raspberry PI 3 10 | - 16GB (or larger) SIM Card 11 | - Adafruit Motor Hat (for wheels) 12 | - Any chassis with DC motors - for example: https://www.amazon.com/Emgreat-Chassis-Encoder-wheels-Battery/dp/B00GLO5SMY/ref=sr_1_2?ie=UTF8&qid=1486959207&sr=8-2&keywords=robot+chassis 13 | - Adafruit Servo Hat (for arms) 14 | - HC-SR04 sonars 15 | - Any stepper motor arm - for example: SainSmart DIY Control Palletizing Robot Arm for the arm (https://www.amazon.com/dp/B0179BTLZ2/ref=twister_B00YTW763Y?_encoding=UTF8&psc=1) 16 | - Raspberry PI compatible camera - for example: https://www.amazon.com/Raspberry-Pi-Camera-Module-Megapixel/dp/B01ER2SKFS/ref=sr_1_1?s=electronics&ie=UTF8&qid=1486960149&sr=1-1&keywords=raspberry+pi+camera 17 | 18 | To get started, you should be able to make the robot work without the arm, sonar and servo hat. 19 | 20 | ## Programs 21 | 22 | - robot.py program will run commands from the commandline 23 | - sonar.py tests sonar wired into GPIO ports 24 | - wheels.py tests simple DC motor wheels 25 | - arm.py tests a servo controlled robot arm 26 | - autonomous.py implements a simple driving algorithm using the wheels and sonal 27 | - inception_server.py runs an image classifying microservice 28 | 29 | ## Example Robots 30 | 31 | Here are two robots I made that use this software 32 | 33 | ![Robots](https://joyfulgrit.files.wordpress.com/2013/10/img_0183.jpg?w=700) 34 | 35 | ## Wiring The Robot 36 | ### Sonar 37 | 38 | If you want to use the default sonar configuation, wire like this: 39 | 40 | - Left sonar trigger GPIO pin 23 echo 24 41 | - Center sonar trigger GPIO pin 17 echo 18 42 | - Right sonar trigger GPIO pin 22 echo 27 43 | 44 | You can modify the pins by making a robot.conf file. 45 | 46 | ### Wheels 47 | 48 | You can easily change this but this is what wheels.py expects 49 | 50 | - M1 - Front Left 51 | - M2 - Back Left (optional - leave unwired for 2wd chassis) 52 | - M3 - Back Right (optional - leave unwired for 2wd chassis) 53 | - M4 - Front Right 54 | 55 | 56 | ## Installation 57 | 58 | ### basic setup 59 | 60 | There are a ton of articles on how to do basic setup of a Raspberry PI - one good one is here https://www.howtoforge.com/tutorial/howto-install-raspbian-on-raspberry-pi/ 61 | 62 | You will need to turn on i2c and optionally the camera 63 | 64 | ``` 65 | raspi-config 66 | ``` 67 | 68 | Next you will need to download i2c tools and smbus 69 | 70 | ``` 71 | sudo apt-get install i2c-tools python-smbus python3-smbus 72 | ``` 73 | 74 | Test that your hat is attached and visible with 75 | 76 | ``` 77 | i2cdetect -y 1 78 | ``` 79 | 80 | Install this code 81 | 82 | ``` 83 | sudo apt-get install git 84 | git clone https://github.com/lukas/robot.git 85 | cd robot 86 | ``` 87 | 88 | Install dependencies 89 | 90 | ``` 91 | pip install -r requirements.txt 92 | ``` 93 | 94 | At this point you should be able to drive your robot locally, try: 95 | 96 | ``` 97 | ./robot.py forward 98 | ``` 99 | 100 | ### server 101 | 102 | To run a webserver in the background with a camera you need to setup gunicorn and nginx 103 | 104 | #### nginx 105 | 106 | Nginx is a lightway fast reverse proxy - we store the camera image in RAM and serve it up directly. This was the only way I was able to get any kind of decent fps from the raspberry pi camera. We also need to proxy to gunicorn so that the user can control the robot from a webpage. 107 | 108 | copy the configuration file from nginx/nginx.conf to /etc/nginx/nginx.conf 109 | 110 | ``` 111 | sudo apt-get install nginx 112 | sudo cp nginx/nginx.conf /etc/nginx/nginx.conf 113 | ``` 114 | 115 | restart nginx 116 | 117 | ``` 118 | sudo nginx -s reload 119 | ``` 120 | 121 | #### gunicorn 122 | 123 | install gunicorn 124 | 125 | 126 | copy configuration file from services/web.service /etc/systemd/system/web.service 127 | 128 | ``` 129 | sudo cp services/web.service /etc/systemd/system/web.service 130 | ``` 131 | 132 | start gunicorn web app service 133 | 134 | ``` 135 | sudo systemctl daemon-reload 136 | sudo systemctl enable web 137 | sudo systemctl start web 138 | ``` 139 | 140 | Your webservice should be started now. You can try driving your robot with buttons or arrow keys 141 | 142 | #### camera 143 | 144 | In order to stream from the camera you can use RPi-cam. It's documented at http://elinux.org/RPi-Cam-Web-Interface but you can also just run the following 145 | 146 | ``` 147 | git clone https://github.com/silvanmelchior/RPi_Cam_Web_Interface.git 148 | cd RPi_Cam_Web_Interface 149 | chmod u+x *.sh 150 | ./install.sh 151 | ``` 152 | 153 | Now a stream of images from the camera should be constantly updating the file at /dev/shm/mjpeg. Nginx will serve up the image directly if you request localhost/cam.jpg. 154 | 155 | #### tensorflow 156 | 157 | There is a great project at https://github.com/samjabrahams/tensorflow-on-raspberry-pi that gives instructions on installing tensorflow on the Raspberry PI. Recently it's gotten much easier, just do 158 | 159 | ``` 160 | wget https://github.com/samjabrahams/tensorflow-on-raspberry-pi/releases/download/v0.11.0/tensorflow-0.11.0-cp27-none-linux_armv7l.whl 161 | sudo pip install tensorflow-0.11.0-cp27-none-linux_armv7l.whl 162 | ``` 163 | 164 | Next start a tensorflow service that loads up an inception model and does object recognition the the inception model 165 | 166 | ``` 167 | sudo cp services/inception.service /etc/systemd/system/inception.service 168 | sudo systemctl daemon-reload 169 | sudo systemctl enable inception 170 | sudo systemctl start inception 171 | ``` 172 | 173 | 174 | 175 | 176 | 177 | -------------------------------------------------------------------------------- /arm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | 4 | from flask import Flask, render_template, request, Response, send_file 5 | 6 | import os 7 | import atexit 8 | 9 | from Adafruit_PWM_Servo_Driver import PWM 10 | import time 11 | 12 | pwm = PWM(0x40, debug=True) 13 | 14 | servoMin = 150 # Min pulse length out of 4096 15 | servoMax = 600 # Max pulse length out of 4096 16 | 17 | pwm.setPWMFreq(60) # Set frequency to 60 Hz 18 | 19 | app = Flask(__name__) 20 | 21 | def setServoPulse(channel, pulse): 22 | pulseLength = 1000000 # 1,000,000 us per second 23 | pulseLength /= 60 # 60 Hz 24 | print "%d us per period" % pulseLength 25 | pulseLength /= 4096 # 12 bits of resolution 26 | print "%d us per bit" % pulseLength 27 | pulse *= 1000 28 | pulse /= pulseLength 29 | pwm.setPWM(channel, 0, pulse) 30 | 31 | 32 | @app.route("/") 33 | def main(): 34 | return render_template('arm.html') 35 | 36 | @app.route('/slider') 37 | def slider(): 38 | num = int(request.args.get('num')) 39 | value = int(request.args.get('value')) 40 | print("%i %i" % (num, value)) 41 | fraction = int(servoMin + (value/100.0)*(servoMax - servoMin)) 42 | print("Setting servo to %i" % fraction) 43 | pwm.setPWM(num, 0, fraction) 44 | return "" 45 | 46 | 47 | if __name__ == "__main__": 48 | app.run() 49 | -------------------------------------------------------------------------------- /autonomous.py: -------------------------------------------------------------------------------- 1 | 2 | import wheels 3 | import sonar 4 | import time 5 | 6 | 7 | 8 | 9 | 10 | FORWARD=1 11 | LEFT=2 12 | RIGHT=3 13 | BACKWARD=4 14 | 15 | def autodrive(dur): 16 | start_time = time.time() 17 | end_time = time.time() + dur 18 | 19 | mode = FORWARD 20 | 21 | wheels.forward(-150) 22 | 23 | 24 | while(time.time() < end_time): 25 | time.sleep(0.1) 26 | cdist = sonar.cdist() 27 | ldist = sonar.ldist() 28 | rdist= sonar.rdist() 29 | 30 | print ("%d %d %d" % (ldist, cdist, rdist)) 31 | 32 | if (mode == FORWARD): 33 | if (cdist < 35 or ldist <6 or rdist < 6): 34 | print ("turning") 35 | wheels.stop() 36 | if (ldist < rdist): 37 | mode=RIGHT 38 | wheels.backward(-100) 39 | time.sleep(1) 40 | wheels.right(-250) 41 | time.sleep(1) 42 | else: 43 | mode=LEFT 44 | wheels.backward(-100) 45 | time.sleep(1) 46 | wheels.left(-250) 47 | time.sleep(1) 48 | 49 | if (mode==LEFT or mode==RIGHT): 50 | if (cdist > 50): 51 | mode=FORWARD 52 | wheels.forward(-150) 53 | 54 | 55 | 56 | wheels.stop() 57 | 58 | 59 | 60 | if (__name__ == '__main__'): 61 | autodrive(10) 62 | -------------------------------------------------------------------------------- /camera_pi.py: -------------------------------------------------------------------------------- 1 | import time 2 | import io 3 | import threading 4 | import picamera 5 | 6 | 7 | class Camera(object): 8 | thread = None # background thread that reads frames from camera 9 | frame = None # current frame is stored here by background thread 10 | last_access = 0 # time of last client access to the camera 11 | 12 | def initialize(self): 13 | if Camera.thread is None: 14 | # start background frame thread 15 | Camera.thread = threading.Thread(target=self._thread) 16 | Camera.thread.start() 17 | 18 | # wait until frames start to be available 19 | while self.frame is None: 20 | time.sleep(0) 21 | 22 | def get_frame(self): 23 | Camera.last_access = time.time() 24 | self.initialize() 25 | return self.frame 26 | 27 | @classmethod 28 | def _thread(cls): 29 | with picamera.PiCamera() as camera: 30 | # camera setup 31 | camera.resolution = (320, 240) 32 | camera.hflip = True 33 | camera.vflip = True 34 | 35 | # let camera warm up 36 | camera.start_preview() 37 | time.sleep(2) 38 | 39 | stream = io.BytesIO() 40 | for foo in camera.capture_continuous(stream, 'jpeg', 41 | use_video_port=True): 42 | # store frame 43 | stream.seek(0) 44 | cls.frame = stream.read() 45 | 46 | # reset stream for next frame 47 | stream.seek(0) 48 | stream.truncate() 49 | 50 | # if there hasn't been any clients asking for frames in 51 | # the last 10 seconds stop the thread 52 | if time.time() - cls.last_access > 10: 53 | break 54 | cls.thread = None 55 | -------------------------------------------------------------------------------- /classify_image.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Simple image classification with Inception. 17 | 18 | Run image classification with Inception trained on ImageNet 2012 Challenge data 19 | set. 20 | 21 | This program creates a graph from a saved GraphDef protocol buffer, 22 | and runs inference on an input JPEG image. It outputs human readable 23 | strings of the top 5 predictions along with their probabilities. 24 | 25 | Change the --image_file argument to any jpg image to compute a 26 | classification of that image. 27 | 28 | Please see the tutorial and website for a detailed description of how 29 | to use this script to perform image recognition. 30 | 31 | https://tensorflow.org/tutorials/image_recognition/ 32 | """ 33 | 34 | from __future__ import absolute_import 35 | from __future__ import division 36 | from __future__ import print_function 37 | 38 | import os.path 39 | import re 40 | import sys 41 | import tarfile 42 | 43 | import numpy as np 44 | from six.moves import urllib 45 | import tensorflow as tf 46 | 47 | FLAGS = tf.app.flags.FLAGS 48 | 49 | # classify_image_graph_def.pb: 50 | # Binary representation of the GraphDef protocol buffer. 51 | # imagenet_synset_to_human_label_map.txt: 52 | # Map from synset ID to a human readable string. 53 | # imagenet_2012_challenge_label_map_proto.pbtxt: 54 | # Text representation of a protocol buffer mapping a label to synset ID. 55 | tf.app.flags.DEFINE_string( 56 | 'model_dir', '/tmp/imagenet', 57 | """Path to classify_image_graph_def.pb, """ 58 | """imagenet_synset_to_human_label_map.txt, and """ 59 | """imagenet_2012_challenge_label_map_proto.pbtxt.""") 60 | tf.app.flags.DEFINE_string('image_file', '', 61 | """Absolute path to image file.""") 62 | tf.app.flags.DEFINE_integer('num_top_predictions', 5, 63 | """Display this many predictions.""") 64 | 65 | # pylint: disable=line-too-long 66 | DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' 67 | # pylint: enable=line-too-long 68 | 69 | 70 | class NodeLookup(object): 71 | """Converts integer node ID's to human readable labels.""" 72 | 73 | def __init__(self, 74 | label_lookup_path=None, 75 | uid_lookup_path=None): 76 | if not label_lookup_path: 77 | label_lookup_path = os.path.join( 78 | FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt') 79 | if not uid_lookup_path: 80 | uid_lookup_path = os.path.join( 81 | FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt') 82 | self.node_lookup = self.load(label_lookup_path, uid_lookup_path) 83 | 84 | def load(self, label_lookup_path, uid_lookup_path): 85 | """Loads a human readable English name for each softmax node. 86 | 87 | Args: 88 | label_lookup_path: string UID to integer node ID. 89 | uid_lookup_path: string UID to human-readable string. 90 | 91 | Returns: 92 | dict from integer node ID to human-readable string. 93 | """ 94 | if not tf.gfile.Exists(uid_lookup_path): 95 | tf.logging.fatal('File does not exist %s', uid_lookup_path) 96 | if not tf.gfile.Exists(label_lookup_path): 97 | tf.logging.fatal('File does not exist %s', label_lookup_path) 98 | 99 | # Loads mapping from string UID to human-readable string 100 | proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines() 101 | uid_to_human = {} 102 | p = re.compile(r'[n\d]*[ \S,]*') 103 | for line in proto_as_ascii_lines: 104 | parsed_items = p.findall(line) 105 | uid = parsed_items[0] 106 | human_string = parsed_items[2] 107 | uid_to_human[uid] = human_string 108 | 109 | # Loads mapping from string UID to integer node ID. 110 | node_id_to_uid = {} 111 | proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines() 112 | for line in proto_as_ascii: 113 | if line.startswith(' target_class:'): 114 | target_class = int(line.split(': ')[1]) 115 | if line.startswith(' target_class_string:'): 116 | target_class_string = line.split(': ')[1] 117 | node_id_to_uid[target_class] = target_class_string[1:-2] 118 | 119 | # Loads the final mapping of integer node ID to human-readable string 120 | node_id_to_name = {} 121 | for key, val in node_id_to_uid.items(): 122 | if val not in uid_to_human: 123 | tf.logging.fatal('Failed to locate: %s', val) 124 | name = uid_to_human[val] 125 | node_id_to_name[key] = name 126 | 127 | return node_id_to_name 128 | 129 | def id_to_string(self, node_id): 130 | if node_id not in self.node_lookup: 131 | return '' 132 | return self.node_lookup[node_id] 133 | 134 | 135 | def create_graph(): 136 | """Creates a graph from saved GraphDef file and returns a saver.""" 137 | # Creates graph from saved graph_def.pb. 138 | with tf.gfile.FastGFile(os.path.join( 139 | FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: 140 | graph_def = tf.GraphDef() 141 | graph_def.ParseFromString(f.read()) 142 | _ = tf.import_graph_def(graph_def, name='') 143 | 144 | 145 | def run_inference_on_image(image): 146 | """Runs inference on an image. 147 | 148 | Args: 149 | image: Image file name. 150 | 151 | Returns: 152 | Nothing 153 | """ 154 | if not tf.gfile.Exists(image): 155 | tf.logging.fatal('File does not exist %s', image) 156 | image_data = tf.gfile.FastGFile(image, 'rb').read() 157 | 158 | # Creates graph from saved GraphDef. 159 | create_graph() 160 | 161 | with tf.Session() as sess: 162 | # Some useful tensors: 163 | # 'softmax:0': A tensor containing the normalized prediction across 164 | # 1000 labels. 165 | # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 166 | # float description of the image. 167 | # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG 168 | # encoding of the image. 169 | # Runs the softmax tensor by feeding the image_data as input to the graph. 170 | softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') 171 | predictions = sess.run(softmax_tensor, 172 | {'DecodeJpeg/contents:0': image_data}) 173 | predictions = np.squeeze(predictions) 174 | 175 | # Creates node ID --> English string lookup. 176 | node_lookup = NodeLookup() 177 | 178 | top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] 179 | for node_id in top_k: 180 | human_string = node_lookup.id_to_string(node_id) 181 | score = predictions[node_id] 182 | print('%s (score = %.5f)' % (human_string, score)) 183 | 184 | 185 | def maybe_download_and_extract(): 186 | """Download and extract model tar file.""" 187 | dest_directory = FLAGS.model_dir 188 | if not os.path.exists(dest_directory): 189 | os.makedirs(dest_directory) 190 | filename = DATA_URL.split('/')[-1] 191 | filepath = os.path.join(dest_directory, filename) 192 | if not os.path.exists(filepath): 193 | def _progress(count, block_size, total_size): 194 | sys.stdout.write('\r>> Downloading %s %.1f%%' % ( 195 | filename, float(count * block_size) / float(total_size) * 100.0)) 196 | sys.stdout.flush() 197 | filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) 198 | print() 199 | statinfo = os.stat(filepath) 200 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') 201 | tarfile.open(filepath, 'r:gz').extractall(dest_directory) 202 | 203 | 204 | def main(_): 205 | maybe_download_and_extract() 206 | image = (FLAGS.image_file if FLAGS.image_file else 207 | os.path.join(FLAGS.model_dir, 'cropped_panda.jpg')) 208 | run_inference_on_image(image) 209 | 210 | 211 | if __name__ == '__main__': 212 | tf.app.run() 213 | -------------------------------------------------------------------------------- /configure.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os.path 3 | 4 | data={} 5 | 6 | # Default configuration 7 | # If you want to change make a json file that looks like 8 | # { 9 | # "LTRIG":"17", 10 | # "CTRIG":"23", 11 | # "RTRIG":"22", 12 | # "LECHO":"18", 13 | # "CECHO":"24", 14 | # "RECHO":"27" 15 | # } 16 | 17 | data["LTRIG"] = 23 # default pin for left sonar trigger 18 | data["CTRIG"] = 17 # default pin for center sonar trigger 19 | data["RTRIG"] = 22 # default pin for right sonar trigger 20 | data["LECHO"] = 24 # default pin for left sonar echo 21 | data["CECHO"] = 18 # default pin for center sonar echo 22 | data["RECHO"] = 27 # default pin for right sonar echo 23 | 24 | if os.path.isfile('robot.conf'): 25 | with open('robot.conf') as data_file: 26 | data = json.load(data_file) 27 | else: 28 | print("Couldn't find robot.conf file, using default configuration") 29 | -------------------------------------------------------------------------------- /drive_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from flask import Flask, render_template, request, Response, send_file 4 | from camera_pi import Camera 5 | import wheels 6 | import speaker 7 | import autonomous 8 | import os 9 | 10 | app = Flask(__name__) 11 | 12 | @app.route("/") 13 | def main(): 14 | return render_template('index.html') 15 | 16 | @app.route('/forward') 17 | def forward(): 18 | wheels.forward(200, 3) 19 | return '' 20 | 21 | @app.route('/backward') 22 | def backward(): 23 | wheels.backward(200, 3) 24 | return '' 25 | 26 | 27 | @app.route('/shake') 28 | def shake(): 29 | wheels.left(200, 0.2) 30 | wheels.right(200, 0.2) 31 | return '' 32 | 33 | @app.route('/left') 34 | def left(): 35 | wheels.left(200, 3) 36 | return '' 37 | 38 | @app.route('/right') 39 | def right(): 40 | wheels.right(200, 3) 41 | return '' 42 | 43 | @app.route('/f') 44 | def f(): 45 | wheels.forward(200) 46 | return '' 47 | 48 | @app.route('/b') 49 | def b(): 50 | wheels.backward(200) 51 | return '' 52 | 53 | @app.route('/r') 54 | def r(): 55 | wheels.right(200) 56 | return '' 57 | 58 | @app.route('/l') 59 | def l(): 60 | wheels.left(200) 61 | return '' 62 | 63 | @app.route('/stop') 64 | def stop(): 65 | wheels.stop() 66 | return '' 67 | 68 | @app.route('/latest.jpg') 69 | def latest(): 70 | filename = 'images/latest_img.jpg' 71 | return send_file(filename, mimetype='image/jpg') 72 | 73 | 74 | @app.route('/drive') 75 | def drive(): 76 | time = 10 77 | if 'time' in request.args: 78 | time = int(request.args.get('time')) 79 | 80 | 81 | autonomous.autodrive(time) 82 | 83 | return '' 84 | 85 | @app.route('/say') 86 | def say(): 87 | text = request.args.get('text') 88 | speaker.say(text) 89 | return '' 90 | 91 | @app.route('/data') 92 | def data(): 93 | img_num = request.args.get('i') 94 | if img_num is None: 95 | filename = 'images/latest_data' 96 | else: 97 | filename = 'images/data'+img_num 98 | 99 | f = open(filename, 'r') 100 | data = f.read() 101 | return data 102 | 103 | @app.route('/img_rec') 104 | def img_rec(): 105 | wheels.stop() 106 | # os.system('python image.py') 107 | return '' 108 | 109 | def gen(camera): 110 | """Video streaming generator function.""" 111 | while True: 112 | frame = camera.get_frame() 113 | yield (b'--frame\r\n' 114 | b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 115 | time.sleep(0.5) 116 | 117 | @app.route('/video_feed') 118 | def video_feed(): 119 | """Video streaming route. Put this in the src attribute of an img tag.""" 120 | return Response(gen(Camera()), 121 | mimetype='multipart/x-mixed-replace; boundary=frame') 122 | 123 | if __name__ == "__main__": 124 | app.run() 125 | -------------------------------------------------------------------------------- /face.py: -------------------------------------------------------------------------------- 1 | import time 2 | import requests 3 | import cv2 4 | import operator 5 | import numpy as np 6 | 7 | _url = 'https://api.projectoxford.ai/face/v1.0/detect' 8 | _key = 'daaf903b4d55405793705c280f26bf34' 9 | _maxNumRetries = 10 10 | 11 | def processRequest( json, data, headers, params ): 12 | 13 | """ 14 | Helper function to process the request to Project Oxford 15 | 16 | Parameters: 17 | json: Used when processing images from its URL. See API Documentation 18 | data: Used when processing image read from disk. See API Documentation 19 | headers: Used to pass the key information and the data type request 20 | """ 21 | 22 | retries = 0 23 | result = None 24 | 25 | while True: 26 | 27 | response = requests.request( 'post', _url, json = json, data = data, headers = headers, params = params ) 28 | 29 | if response.status_code == 429: 30 | 31 | print( "Message: %s" % ( response.json()['error']['message'] ) ) 32 | 33 | if retries <= _maxNumRetries: 34 | time.sleep(1) 35 | retries += 1 36 | continue 37 | else: 38 | print( 'Error: failed after retrying!' ) 39 | break 40 | 41 | elif response.status_code == 200 or response.status_code == 201: 42 | 43 | if 'content-length' in response.headers and int(response.headers['content-length']) == 0: 44 | result = None 45 | elif 'content-type' in response.headers and isinstance(response.headers['content-type'], str): 46 | if 'application/json' in response.headers['content-type'].lower(): 47 | result = response.json() if response.content else None 48 | elif 'image' in response.headers['content-type'].lower(): 49 | result = response.content 50 | else: 51 | print( "Error code: %d" % ( response.status_code ) ) 52 | print( "Message: %s" % ( response.json()['error']['message'] ) ) 53 | 54 | break 55 | 56 | return result 57 | 58 | 59 | def renderResultOnImage( result, img ): 60 | 61 | """Display the obtained results onto the input image""" 62 | 63 | for currFace in result: 64 | faceRectangle = currFace['faceRectangle'] 65 | cv2.rectangle( img,(faceRectangle['left'],faceRectangle['top']), 66 | (faceRectangle['left']+faceRectangle['width'], faceRectangle['top'] + faceRectangle['height']), 67 | color = (255,0,0), thickness = 1 ) 68 | 69 | faceLandmarks = currFace['faceLandmarks'] 70 | 71 | for _, currLandmark in faceLandmarks.items(): 72 | cv2.circle( img, (int(currLandmark['x']),int(currLandmark['y'])), color = (0,255,0), thickness= -1, radius = 1 ) 73 | 74 | for currFace in result: 75 | faceRectangle = currFace['faceRectangle'] 76 | faceAttributes = currFace['faceAttributes'] 77 | 78 | textToWrite = "%c (%d)" % ( 'M' if faceAttributes['gender']=='male' else 'F', faceAttributes['age'] ) 79 | cv2.putText( img, textToWrite, (faceRectangle['left'],faceRectangle['top']-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1 ) 80 | 81 | 82 | # URL direction to image 83 | urlImage = 'https://raw.githubusercontent.com/Microsoft/ProjectOxford-ClientSDK/master/Face/Windows/Data/identification1.jpg' 84 | 85 | # Face detection parameters 86 | params = { 'returnFaceAttributes': 'age,gender', 87 | 'returnFaceLandmarks': 'true'} 88 | 89 | headers = dict() 90 | headers['Ocp-Apim-Subscription-Key'] = _key 91 | headers['Content-Type'] = 'application/json' 92 | 93 | json = { 'url': urlImage } 94 | data = None 95 | 96 | result = processRequest( json, data, headers, params ) 97 | 98 | if result is not None: 99 | # Load the original image, fetched from the URL 100 | arr = np.asarray( bytearray( requests.get( urlImage ).content ), dtype=np.uint8 ) 101 | img = cv2.cvtColor( cv2.imdecode( arr, -1 ), cv2.COLOR_BGR2RGB ) 102 | 103 | renderResultOnImage( result, img ) 104 | cv2.imwrite("face.jpg", img) 105 | 106 | 107 | -------------------------------------------------------------------------------- /image.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import subprocess 4 | 5 | def do(cmd): 6 | print(cmd) 7 | p =subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) 8 | for line in p.stdout.readlines(): 9 | print line 10 | 11 | retval = p.wait() 12 | 13 | 14 | def classify(n): 15 | suffix = str(n) 16 | imageFile = 'images/img'+suffix+'.jpg' 17 | dataFile = 'images/data'+suffix 18 | latestImage = 'images/latest_img.jpg' 19 | latestData = 'images/latest_data' 20 | do('echo "I\'m thinking." | flite -voice slt') 21 | do('cp /dev/shm/mjpeg/cam.jpg '+imageFile); 22 | do('ln -f '+imageFile+' '+latestImage); 23 | do('echo "thinking" > ' + dataFile); 24 | do('ln -f '+dataFile+' '+latestData); 25 | do('bash run_and_parse_inception.sh '+imageFile+ " " +dataFile) 26 | 27 | 28 | 29 | 30 | do('{ echo "I think I see ah "; head -1 '+dataFile+' | sed -e \'$ ! s/$/. or maybe a/\'; } | flite -voice slt') 31 | 32 | do('echo '+suffix+' > images/INDEX') 33 | 34 | 35 | i=0 36 | read_data = 0 37 | 38 | with open('images/INDEX', 'r') as f: 39 | read_data = int(f.read()) 40 | 41 | i += read_data + 1; 42 | 43 | 44 | classify(i) 45 | -------------------------------------------------------------------------------- /inception_server.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Simple image classification server that runs the inception model Inception. 17 | 18 | Run image classification with Inception trained on ImageNet 2012 Challenge data 19 | set. 20 | 21 | This is modified from the classify_image_timed.py script in the tensorflow-on-rasberry-pi 22 | github repository 23 | """ 24 | 25 | from __future__ import absolute_import 26 | from __future__ import division 27 | from __future__ import print_function 28 | 29 | import os.path 30 | import re 31 | import sys 32 | import tarfile 33 | 34 | import numpy as np 35 | from six.moves import urllib 36 | import tensorflow as tf 37 | 38 | FLAGS = tf.app.flags.FLAGS 39 | 40 | tf.app.flags.DEFINE_string( 41 | 'model_dir', '/tmp/imagenet', 42 | """Path to classify_image_graph_def.pb, """ 43 | """imagenet_synset_to_human_label_map.txt, and """ 44 | """imagenet_2012_challenge_label_map_proto.pbtxt.""") 45 | tf.app.flags.DEFINE_string('image_file', '', 46 | """Absolute path to image file.""") 47 | tf.app.flags.DEFINE_integer('num_top_predictions', 5, 48 | """Display this many predictions.""") 49 | 50 | DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' 51 | IMAGE_PATH = '/dev/shm/mjpeg/cam.jpg' 52 | 53 | 54 | class NodeLookup(object): 55 | """Converts integer node ID's to human readable labels.""" 56 | 57 | def __init__(self, 58 | label_lookup_path=None, 59 | uid_lookup_path=None): 60 | if not label_lookup_path: 61 | label_lookup_path = os.path.join( 62 | FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt') 63 | if not uid_lookup_path: 64 | uid_lookup_path = os.path.join( 65 | FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt') 66 | self.node_lookup = self.load(label_lookup_path, uid_lookup_path) 67 | 68 | def load(self, label_lookup_path, uid_lookup_path): 69 | """Loads a human readable English name for each softmax node. 70 | 71 | Args: 72 | label_lookup_path: string UID to integer node ID. 73 | uid_lookup_path: string UID to human-readable string. 74 | 75 | Returns: 76 | dict from integer node ID to human-readable string. 77 | """ 78 | if not tf.gfile.Exists(uid_lookup_path): 79 | tf.logging.fatal('File does not exist %s', uid_lookup_path) 80 | if not tf.gfile.Exists(label_lookup_path): 81 | tf.logging.fatal('File does not exist %s', label_lookup_path) 82 | 83 | # Loads mapping from string UID to human-readable string 84 | proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines() 85 | uid_to_human = {} 86 | p = re.compile(r'[n\d]*[ \S,]*') 87 | for line in proto_as_ascii_lines: 88 | parsed_items = p.findall(line) 89 | uid = parsed_items[0] 90 | human_string = parsed_items[2] 91 | uid_to_human[uid] = human_string 92 | 93 | # Loads mapping from string UID to integer node ID. 94 | node_id_to_uid = {} 95 | proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines() 96 | for line in proto_as_ascii: 97 | if line.startswith(' target_class:'): 98 | target_class = int(line.split(': ')[1]) 99 | if line.startswith(' target_class_string:'): 100 | target_class_string = line.split(': ')[1] 101 | node_id_to_uid[target_class] = target_class_string[1:-2] 102 | 103 | # Loads the final mapping of integer node ID to human-readable string 104 | node_id_to_name = {} 105 | for key, val in node_id_to_uid.items(): 106 | if val not in uid_to_human: 107 | tf.logging.fatal('Failed to locate: %s', val) 108 | name = uid_to_human[val] 109 | node_id_to_name[key] = name 110 | 111 | return node_id_to_name 112 | 113 | def id_to_string(self, node_id): 114 | if node_id not in self.node_lookup: 115 | return '' 116 | return self.node_lookup[node_id] 117 | 118 | 119 | def create_graph(): 120 | """Creates a graph from saved GraphDef file and returns a saver.""" 121 | # Creates graph from saved graph_def.pb. 122 | with tf.gfile.FastGFile(os.path.join( 123 | FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: 124 | graph_def = tf.GraphDef() 125 | graph_def.ParseFromString(f.read()) 126 | _ = tf.import_graph_def(graph_def, name='') 127 | 128 | 129 | def run_inference_on_image(image): 130 | """Runs inference on an image. 131 | 132 | Args: 133 | image: Image file name. 134 | 135 | Returns: 136 | results of inference 137 | """ 138 | print("Running inference") 139 | if not tf.gfile.Exists(image): 140 | tf.logging.fatal('File does not exist %s', image) 141 | image_data = tf.gfile.FastGFile(image, 'rb').read() 142 | 143 | # Creates graph from saved GraphDef. 144 | create_graph() 145 | 146 | with tf.Session() as sess: 147 | # Some useful tensors: 148 | # 'softmax:0': A tensor containing the normalized prediction across 149 | # 1000 labels. 150 | # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 151 | # float description of the image. 152 | # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG 153 | # encoding of the image. 154 | # Runs the softmax tensor by feeding the image_data as input to the graph. 155 | softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') 156 | 157 | predictions = sess.run(softmax_tensor, 158 | {'DecodeJpeg/contents:0': image_data}) 159 | 160 | predictions = np.squeeze(predictions) 161 | 162 | node_lookup = NodeLookup() 163 | 164 | top_k = predictions.argsort()[-1:][::-1] 165 | results = [] 166 | for node_id in top_k: 167 | print(node_id) 168 | human_string = node_lookup.id_to_string(node_id) 169 | score = predictions[node_id] 170 | print('%s (score = %.5f)' % (human_string, score)) 171 | results.append(human_string) 172 | 173 | return results 174 | 175 | 176 | 177 | 178 | def maybe_download_and_extract(): 179 | """Download and extract model tar file.""" 180 | dest_directory = FLAGS.model_dir 181 | if not os.path.exists(dest_directory): 182 | os.makedirs(dest_directory) 183 | filename = DATA_URL.split('/')[-1] 184 | filepath = os.path.join(dest_directory, filename) 185 | if not os.path.exists(filepath): 186 | def _progress(count, block_size, total_size): 187 | sys.stdout.write('\r>> Downloading %s %.1f%%' % ( 188 | filename, float(count * block_size) / float(total_size) * 100.0)) 189 | sys.stdout.flush() 190 | filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, 191 | reporthook=_progress) 192 | print() 193 | statinfo = os.stat(filepath) 194 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') 195 | tarfile.open(filepath, 'r:gz').extractall(dest_directory) 196 | 197 | 198 | def main(_): 199 | maybe_download_and_extract() 200 | image = (FLAGS.image_file if FLAGS.image_file else 201 | os.path.join(FLAGS.model_dir, 'cropped_panda.jpg')) 202 | run_inference_on_image(image) 203 | 204 | 205 | 206 | 207 | 208 | from flask import Flask 209 | app = Flask(__name__) 210 | 211 | @app.route('/') 212 | def hello_world(): 213 | result = run_inference_on_image(IMAGE_PATH) 214 | print("found") 215 | print(result) 216 | return(result[0]) 217 | 218 | 219 | if __name__ == '__main__': 220 | print("Starting up") 221 | maybe_download_and_extract() 222 | print("Warming up by running an inference") 223 | # "warm up" by running the classifer on an image 224 | run_inference_on_image(IMAGE_PATH) 225 | print("Ready to label!") 226 | app.run(host='0.0.0.0', port=9999) 227 | -------------------------------------------------------------------------------- /nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | http { 3 | server { 4 | location / { 5 | proxy_pass http://unix:/home/pi/drive.sock; 6 | } 7 | location /cam.jpg { 8 | root /dev/shm/mjpeg; 9 | } 10 | } 11 | 12 | } 13 | 14 | events { 15 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ez_setup 2 | Adafruit-MotorHAT 3 | picamera 4 | flask 5 | RPi.GPIO 6 | smbus 7 | 8 | -------------------------------------------------------------------------------- /robot.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | import wheels 4 | import sys 5 | import sonar 6 | import configure 7 | 8 | usage = ''' 9 | robot 10 | 11 | Tell the robot what to do. 12 | 13 | Commands 14 | 15 | Actions 16 | ------- 17 | forward 18 | backward 19 | left 20 | right 21 | stop 22 | shake 23 | 24 | Test Wheels 25 | ----------- 26 | wheel1 27 | wheel2 28 | wheel3 29 | wheel4 30 | 31 | Test Sonar 32 | ---------- 33 | leftdist 34 | rightdist 35 | centerdist 36 | ''' 37 | 38 | print sys.argv 39 | 40 | if (len(sys.argv) == 1): 41 | print usage 42 | exit(1) 43 | 44 | cmd = sys.argv[1] 45 | 46 | if (cmd == 'forward'): 47 | wheels.forward(200, 1) 48 | 49 | elif (cmd == 'backward'): 50 | wheels.backward(200, 1) 51 | 52 | elif (cmd == 'left'): 53 | wheels.left(200, 1) 54 | 55 | elif (cmd == 'right'): 56 | wheels.right(200, 1) 57 | 58 | elif (cmd == 'wheel1'): 59 | wheels.spinMotor(1, 200, 1) 60 | 61 | elif (cmd == 'wheel2'): 62 | wheels.spinMotor(2, 200, 1) 63 | 64 | elif (cmd == 'wheel3'): 65 | wheels.spinMotor(3, 200, 1) 66 | 67 | elif (cmd == 'wheel4'): 68 | wheels.spinMotor(4, 200, 1) 69 | 70 | elif (cmd == 'shake'): 71 | wheels.left(200, 0.2) 72 | wheels.right(200, 0.2) 73 | 74 | elif (cmd == 'leftdist'): 75 | print sonar.ldist() 76 | elif (cmd == 'rightdist'): 77 | print sonar.rdist() 78 | elif (cmd == 'centerdist'): 79 | print sonar.cdist() 80 | 81 | else: 82 | print usage 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /run_and_parse_inception.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import subprocess 4 | 5 | imageFile = sys.argv[1] 6 | dataFile = sys.argv[2] 7 | 8 | 9 | 10 | def do(cmd): 11 | print(cmd) 12 | p =subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 13 | for line in p.stdout.readlines(): 14 | print line 15 | 16 | retval = p.wait() 17 | 18 | do('( cd /home/pi/tensorflow && ./label_image --image=../robot/'+imageFile+' ) &> ../robot/raw.output ' ) 19 | 20 | do('tail -5 raw.output | cut -d"]" -f2 | cut -d"(" -f1 > ' + dataFile) 21 | -------------------------------------------------------------------------------- /run_and_parse_inception.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | ( cd /home/pi/tensorflow && ./label_image --image=../robot/$1) &> ../robot/raw.output 4 | tail -5 raw.output | cut -d"]" -f2 | cut -d"(" -f1 > $2 5 | -------------------------------------------------------------------------------- /server.sh: -------------------------------------------------------------------------------- 1 | gunicorn -w 4 -b0.0.0.0:8000 --timeout 1000 drive_server:app 2 | -------------------------------------------------------------------------------- /services/inception.service: -------------------------------------------------------------------------------- 1 | #goes in /etc/systemd/system/inception.service 2 | 3 | [Unit] 4 | Description=inception daemon 5 | After=network.target 6 | 7 | [Service] 8 | User=pi 9 | Group=www-data 10 | WorkingDirectory=/home/pi/robot 11 | ExecStart=/usr/local/bin/gunicorn --workers 1 -b0.0.0.0:9999 --timeout 10000 inception_server:app 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /services/web.service: -------------------------------------------------------------------------------- 1 | #goes in /etc/systemd/system/web.service 2 | 3 | [Unit] 4 | Description=gunicorn web daemon 5 | After=network.target 6 | 7 | [Service] 8 | User=pi 9 | Group=www-data 10 | WorkingDirectory=/home/pi/robot 11 | ExecStart=/usr/bin/gunicorn --workers 3 --bind unix:/home/pi/drive.sock drive_server:app 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /sonar.py: -------------------------------------------------------------------------------- 1 | import RPi.GPIO as GPIO 2 | import os 3 | import time 4 | import atexit 5 | import sys 6 | from subprocess import call 7 | import configure 8 | 9 | 10 | def setup(): 11 | for i in range(3): 12 | GPIO.setup(TRIG[i],GPIO.OUT) 13 | GPIO.setup(ECHO[i],GPIO.IN) 14 | GPIO.output(TRIG[i], False) 15 | 16 | print "Waiting For Sensor To Settle" 17 | 18 | #if ('LTRIG' in os.environ): 19 | # TRIG = [int(os.environ['LTRIG']), 20 | # int(os.environ['CTRIG']), 21 | # int(os.environ['RTRIG'])] 22 | # ECHO = [int(os.environ['LECHO']), 23 | # int(os.environ['CECHO']), 24 | # int(os.environ['RECHO'])] 25 | 26 | 27 | if ('LTRIG' in configure.data): 28 | TRIG = [int(configure.data['LTRIG']), 29 | int(configure.data['CTRIG']), 30 | int(configure.data['RTRIG'])] 31 | ECHO = [int(configure.data['LECHO']), 32 | int(configure.data['CECHO']), 33 | int(configure.data['RECHO'])] 34 | 35 | GPIO.setmode(GPIO.BCM) 36 | setup() 37 | 38 | def turnOffGPIO(): 39 | GPIO.cleanup() 40 | 41 | atexit.register(turnOffGPIO) 42 | 43 | 44 | def raw_distance(TRIG, ECHO): 45 | #check a sonar with trigger argv1 and echo argv2 46 | #example usage 47 | #python sonar.py 22 27 48 | 49 | # recommended for auto-disabling motors on shutdown! 50 | GPIO.setmode(GPIO.BCM) 51 | 52 | 53 | print "Distance Measurement In Progress" 54 | 55 | GPIO.setup(TRIG,GPIO.OUT) 56 | 57 | GPIO.setup(ECHO,GPIO.IN) 58 | 59 | GPIO.output(TRIG, False) 60 | 61 | print "Waiting For Sensor To Settle" 62 | 63 | time.sleep(2) 64 | 65 | GPIO.output(TRIG, True) 66 | 67 | time.sleep(0.00001) 68 | 69 | GPIO.output(TRIG, False) 70 | 71 | while GPIO.input(ECHO)==0: 72 | pulse_start = time.time() 73 | 74 | while GPIO.input(ECHO)==1: 75 | pulse_end = time.time() 76 | 77 | pulse_duration = pulse_end - pulse_start 78 | 79 | distance = pulse_duration * 17150 80 | 81 | distance = round(distance, 2) 82 | 83 | print "Distance:",distance,"cm" 84 | 85 | 86 | def distance(i): 87 | # print "Distance Measurement In Progress" 88 | 89 | GPIO.output(TRIG[i], True) 90 | 91 | time.sleep(0.00001) 92 | 93 | GPIO.output(TRIG[i], False) 94 | 95 | pulse_end = 0; 96 | pulse_start = 0; 97 | 98 | while GPIO.input(ECHO[i])==0: 99 | pulse_start = time.time() 100 | 101 | while GPIO.input(ECHO[i])==1: 102 | pulse_end = time.time() 103 | 104 | if (pulse_end == 0 or pulse_start==0): 105 | return 1000 106 | 107 | pulse_duration = pulse_end - pulse_start 108 | 109 | distance = pulse_duration * 17150 110 | 111 | distance = round(distance, 2) 112 | 113 | # print "Distance:",distance,"cm" 114 | 115 | return distance 116 | 117 | def ldist(): 118 | return distance(0) 119 | 120 | def rdist(): 121 | return distance(2) 122 | 123 | def cdist(): 124 | return distance(1) 125 | 126 | 127 | if __name__ == "__main__": 128 | TRIG = int(sys.argv[1]) 129 | 130 | ECHO = int(sys.argv[2]) 131 | raw_distance(TRIG, ECHO) 132 | -------------------------------------------------------------------------------- /speaker.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | def say(text): 5 | if 'VOICE' in os.environ: 6 | voice = os.environ['VOICE'] 7 | subprocess.Popen(['flite', '-voice', voice,'-t', text]) 8 | else: 9 | subprocess.Popen(['flite', '-t', text]) 10 | 11 | 12 | -------------------------------------------------------------------------------- /static/arm.js: -------------------------------------------------------------------------------- 1 | function call(name) { 2 | console.log(name); 3 | var xhr = new XMLHttpRequest(); 4 | xhr.open('GET', name, true); 5 | xhr.send(); 6 | } 7 | 8 | function saveFields() { 9 | var data = {}; 10 | data.max = []; 11 | data.min = []; 12 | for (i=0; i<5; i++) { 13 | data.min[i] = document.getElementById("tmin" + i).value; 14 | data.max[i] = document.getElementById("tmax" + i).value; 15 | } 16 | localStorage.setItem("myData", JSON.stringify(data)); 17 | } 18 | 19 | function loadFields() { 20 | var data = localStorage.getItem("myData"); 21 | var dataObject; 22 | if (data != null) { 23 | dataObject = JSON.parse(data); 24 | for (i=0; i<5; i++) { 25 | document.getElementById("tmin" + i).value = dataObject.min[i] 26 | document.getElementById("tmax" + i).value = dataObject.max[i] 27 | } 28 | 29 | } else { 30 | for (i=0; i<5; i++) { 31 | document.getElementById("tmin" + i).value = 0 32 | document.getElementById("tmax" + i).value = 100 33 | } 34 | } 35 | } 36 | 37 | function slider(num, value) { 38 | console.log(num); 39 | tid = "t" + num; 40 | document.getElementById(tid).value = value; 41 | var xhr = new XMLHttpRequest(); 42 | xhr.open('GET', "slider?num="+num+"&value="+value); 43 | xhr.send(); 44 | } 45 | 46 | function sliderInc(num) { 47 | sid = "s" + num; 48 | v = parseInt(document.getElementById(sid).value); 49 | document.getElementById(sid).value = v+1; 50 | slider(num,document.getElementById(sid).value); 51 | } 52 | 53 | function sliderDec(num) { 54 | sid = "s" + num; 55 | v = parseInt(document.getElementById(sid).value); 56 | document.getElementById(sid).value = v-1; 57 | slider(num,document.getElementById(sid).value); 58 | } 59 | 60 | function sliderReset(num) { 61 | sid = "s" + num; 62 | v = parseInt(document.getElementById(sid).value); 63 | document.getElementById(sid).value = 50; 64 | slider(num,document.getElementById(sid).value); 65 | } 66 | 67 | function randomizeTargets() { 68 | for (i=0; i<5; i++) { 69 | min = parseInt(document.getElementById("tmin" + i).value); 70 | max = parseInt(document.getElementById("tmax" + i).value); 71 | r = Math.floor(Math.random() * (max-min) + min) 72 | document.getElementById("target"+i).value = r 73 | } 74 | } 75 | 76 | 77 | 78 | document.onkeyup = function(e) { 79 | 80 | } 81 | 82 | document.onkeydown = function(e) { 83 | var key = e.keyCode ? e.keyCode : e.which; 84 | console.log(key); 85 | 86 | if (key == 81) { //q 87 | sliderDec(0); 88 | } else if (key == 69) { 89 | sliderInc(0); 90 | } else if (key == 87) { 91 | sliderReset(0); 92 | } 93 | else if (key == 65) { 94 | sliderDec(1); 95 | } else if (key == 68) { 96 | sliderInc(1); 97 | } else if (key == 83) { 98 | sliderReset(1); 99 | } 100 | else if (key == 90) { 101 | sliderDec(2); 102 | } else if (key == 67) { 103 | sliderInc(2); 104 | } else if (key == 88) { 105 | sliderReset(2); 106 | } 107 | else if (key == 84) { 108 | sliderDec(3); 109 | } else if (key == 85) { 110 | sliderInc(3); 111 | } else if (key == 89) { 112 | sliderReset(3); 113 | } 114 | else if (key == 71) { 115 | sliderDec(4); 116 | } else if (key == 74) { 117 | sliderInc(4); 118 | } else if (key == 72) { 119 | sliderReset(4); 120 | } 121 | else if (key == 80) { 122 | randomizeTargets(); 123 | } else if (key == 77) { 124 | moveToTarget(); 125 | } 126 | } 127 | 128 | function stepToTarget() { 129 | stepSize = 1; 130 | for (i=0; i<5; i++) { 131 | stopFlag = true; 132 | pos = parseInt(document.getElementById("s" + i).value); 133 | target = parseInt(document.getElementById("target" + i).value); 134 | if ( pos == target ) { 135 | newPos = target; 136 | } else if ( Math.abs(pos-target) < stepSize) { 137 | newPos = target; 138 | } else if ( pos < target ) { 139 | stopFlag = false; 140 | newPos = pos + stepSize; 141 | } else if ( pos > target ) { 142 | stopFlag = false; 143 | newPos = pos - stepSize; 144 | } 145 | document.getElementById("s" + i).value = newPos; 146 | slider(i,newPos); 147 | if (stopFlag) { 148 | clearFlag(timer); 149 | } 150 | } 151 | } 152 | 153 | 154 | 155 | var timer; 156 | function moveToTarget() { 157 | timer=setInterval(function() { stepToTarget(); }, 100); 158 | 159 | } 160 | 161 | 162 | window.onload = function() { 163 | loadFields(); 164 | } 165 | -------------------------------------------------------------------------------- /static/robot.js: -------------------------------------------------------------------------------- 1 | function refreshCam() { 2 | var camImg = document.getElementById("camera"); 3 | camImg.src="cam.jpg?t="+new Date().getTime(); 4 | } 5 | function refreshLatest() { 6 | var latestImg = document.getElementById("latest"); 7 | latestImg.src="latest.jpg?t="+new Date().getTime(); 8 | 9 | var xhr = new XMLHttpRequest(); 10 | xhr.open('GET', '/data', true); 11 | xhr.onload = function() { 12 | console.log('HERE'); 13 | var status = xhr.status; 14 | if (status == 200) { 15 | var guesses = document.getElementById("guesses"); 16 | guesses.innerHTML = xhr.response 17 | } 18 | }; 19 | xhr.send(); 20 | 21 | } 22 | 23 | setInterval(refreshCam, 200); 24 | setInterval(refreshLatest, 1001); 25 | function call(name) { 26 | console.log(name); 27 | var xhr = new XMLHttpRequest(); 28 | xhr.open('GET', name, true); 29 | xhr.send(); 30 | } 31 | 32 | function setSpeed(left, right) { 33 | var xhr = new XMLHttpRequest(); 34 | xhr.open('GET', 'setSpeed?left='+left+'&right='+right, true); 35 | xhr.send(); 36 | } 37 | 38 | document.onkeyup = function(e) { 39 | call('stop'); 40 | } 41 | 42 | document.onkeydown = function(e) { 43 | var key = e.keyCode ? e.keyCode : e.which; 44 | console.log(key); 45 | 46 | if (key == 37) { //Left 47 | call('l'); 48 | } else if (key == 38) { //up 49 | call('b'); 50 | } else if (key == 39) {//right 51 | call('r'); 52 | } else if (key == 40) {// down 53 | call('f'); 54 | } else if (key == 90) {//z 55 | call('stop'); 56 | } else if (key == 32) { //space 57 | call('img_rec'); 58 | } 59 | } 60 | 61 | if (window.DeviceMotionEvent != undefined) { 62 | window.ondevicemotion = function(e) { 63 | ax = event.accelerationIncludingGravity.x * 5; 64 | ay = event.accelerationIncludingGravity.y * 5; 65 | 66 | left = ax + ay; 67 | right = -ax + ay; 68 | 69 | 70 | setSpeed(left, right); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /static/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: sans-serif; background: #eee; 3 | } 4 | input[type="number"] { 5 | width: 50px; 6 | } 7 | 8 | .min { 9 | readonly: "readonly" 10 | } 11 | 12 | .max { 13 | readonly: "readonly" 14 | } 15 | 16 | .target { 17 | readonly: "readonly" 18 | } -------------------------------------------------------------------------------- /templates/arm.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 |
Claw
Wrist
Elbow
Sholder
Rotate
51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /templates/index.html~: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /wheels.py: -------------------------------------------------------------------------------- 1 | 2 | from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor 3 | 4 | import os 5 | import time 6 | import atexit 7 | 8 | # create a default object, no changes to I2C address or frequency 9 | mh = Adafruit_MotorHAT(addr=0x60) 10 | 11 | # recommended for auto-disabling motors on shutdown! 12 | def turnOffMotors(): 13 | mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE) 14 | mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE) 15 | mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE) 16 | mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE) 17 | 18 | atexit.register(turnOffMotors) 19 | 20 | ################################# DC motor test! 21 | mFL = mh.getMotor(1) 22 | mBL = mh.getMotor(2) 23 | mBR = mh.getMotor(3) 24 | mFR = mh.getMotor(4) 25 | 26 | 27 | 28 | def wakeup(m): 29 | # set the speed to start, from 0 (off) to 255 (max speed) 30 | m.setSpeed(150) 31 | m.run(Adafruit_MotorHAT.FORWARD); 32 | # turn on motor 33 | m.run(Adafruit_MotorHAT.RELEASE); 34 | 35 | 36 | wakeup(mFL) 37 | wakeup(mBL) 38 | wakeup(mFR) 39 | wakeup(mBL) 40 | 41 | def spin(wheel, speed): 42 | if (speed > 0): 43 | wheel.run(Adafruit_MotorHAT.FORWARD) 44 | wheel.setSpeed(speed) 45 | elif (speed < 0): 46 | wheel.run(Adafruit_MotorHAT.BACKWARD) 47 | wheel.setSpeed(-speed) 48 | else: 49 | wheel.run(Adafruit_MotorHAT.RELEASE) 50 | 51 | def spinMotor(motorId=1, speed=200, dur=-1): 52 | m = mh.getMotor(motorId) 53 | spin(m, speed) 54 | if (dur >= 0): 55 | time.sleep(dur) 56 | stop() 57 | 58 | 59 | def stop(): 60 | mFL.run(Adafruit_MotorHAT.RELEASE) 61 | mFR.run(Adafruit_MotorHAT.RELEASE) 62 | mBL.run(Adafruit_MotorHAT.RELEASE) 63 | mBR.run(Adafruit_MotorHAT.RELEASE) 64 | 65 | 66 | def forward(speed=200, dur=-1): 67 | spin(mFR, speed) 68 | spin(mFL, speed) 69 | spin(mBR, speed) 70 | spin(mBL, speed) 71 | 72 | if (dur >= 0): 73 | time.sleep(dur) 74 | stop() 75 | 76 | 77 | def backward(speed, dur=-1): 78 | spin(mFR, -speed) 79 | spin(mFL, -speed) 80 | spin(mBR, -speed) 81 | spin(mBL, -speed) 82 | 83 | if (dur >- 0): 84 | time.sleep(dur) 85 | stop() 86 | 87 | 88 | def left(speed, dur=-1): 89 | spin(mFR, -speed) 90 | spin(mFL, speed) 91 | spin(mBR, -speed) 92 | spin(mBL, speed) 93 | 94 | if (dur >- 0): 95 | time.sleep(dur) 96 | stop() 97 | 98 | def right(speed, dur=-1): 99 | spin(mFR, speed) 100 | spin(mFL, -speed) 101 | spin(mBR, speed) 102 | spin(mBL, -speed) 103 | 104 | if (dur >- 0): 105 | time.sleep(dur) 106 | stop() 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /wsgi.py: -------------------------------------------------------------------------------- 1 | from drive import app 2 | 3 | if __name__ == "__main__": 4 | app.run() 5 | --------------------------------------------------------------------------------