├── config.py ├── docs ├── sketch.png ├── img00455.jpg ├── img06721.jpg ├── Autonomia.postman_environment.json ├── dependencies.md ├── f.sh ├── remote-api.md └── Autonomia.postman_collection.json ├── config.json ├── Arduino ├── README.md └── firmware │ └── firmware.ino ├── ConvNet ├── config.py ├── test.py ├── utils.py ├── README.md ├── predict.py ├── train.py ├── cnnModels.py └── train_data_augmentation.py ├── .gitignore ├── gpslib.py ├── application.py ├── utils.py ├── README.md ├── api.py ├── runtime.py ├── cometalib.py ├── LICENSE ├── streamer.py └── controller.py /config.py: -------------------------------------------------------------------------------- 1 | ConvNet/config.py -------------------------------------------------------------------------------- /docs/sketch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cometa/Autonomia/HEAD/docs/sketch.png -------------------------------------------------------------------------------- /docs/img00455.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cometa/Autonomia/HEAD/docs/img00455.jpg -------------------------------------------------------------------------------- /docs/img06721.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cometa/Autonomia/HEAD/docs/img06721.jpg -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "config":{ 3 | "cometa":{"ssl":true, "server":"autonomia.cometa.io", "port": 443, "app_key":"80d25d08e5fa6e13fb0a"}, 4 | "app_params":{"debug":false, "telemetry_period":15, "neutral_delta":2, "verbose": false}, 5 | "arduino":{"serial":"/dev/ttyUSB0", "speed":"57700"}, 6 | "gps":{"serial":"/dev/ttyACM0", "speed":"19200"}, 7 | "video":{"server":"stream.cometa.io", "port":12345, "auto_start": false, "key":"74DA388EAC6"} 8 | }, 9 | "service":{"provider":"VE"}, 10 | "hardware":{"model": "Autonomia", "type":"O", "version":"001", "batch":"", "firmware":"001-dev"} 11 | } 12 | -------------------------------------------------------------------------------- /docs/Autonomia.postman_environment.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "7ab2dc82-52bd-35b2-b6d5-a7dec8e301ff", 3 | "name": "Autonomia", 4 | "values": [ 5 | { 6 | "key": "DEVICE_ID", 7 | "value": "B827EA51680", 8 | "type": "text", 9 | "enabled": true 10 | }, 11 | { 12 | "key": "APPLICATION_ID", 13 | "value": "a94660d971eca2879", 14 | "type": "text", 15 | "enabled": true 16 | }, 17 | { 18 | "key": "COMETA_SECRET", 19 | "value": "a724dc4811d507688", 20 | "type": "text", 21 | "enabled": true 22 | }, 23 | ], 24 | "timestamp": 1483580052972, 25 | "_postman_variable_scope": "environment", 26 | "_postman_exported_at": "2017-01-05T01:38:57.428Z", 27 | "_postman_exported_using": "Postman/4.9.2" 28 | } -------------------------------------------------------------------------------- /Arduino/README.md: -------------------------------------------------------------------------------- 1 | The Arduino board controls the steering and motor servos and it allows drive-by-wire by the main on-board computer. It also intercepts and provides the computer board with input data from the RC receiver. 2 | 3 | 4 | ## USB Serial Interface 5 | ### Input 6 | Input commands are strings received from the USB port. 7 | 8 | Set the motor servo to a value in the [0, 180] range: 9 | 10 | ``` 11 | 'M [value]\n' 12 | ``` 13 | Set the steering servo to a value in the [0, 180] range: 14 | ``` 15 | 'S [value]\n' 16 | ``` 17 | Set output to raw receiver values: 18 | ``` 19 | 'R\n' 20 | ``` 21 | Set receiver output to values in the [0, 180] range: 22 | ``` 23 | 'V\n' 24 | ``` 25 | Receive heartbeat from the computer board: 26 | ``` 27 | 'H\n' 28 | ``` 29 | ### Output 30 | Output to the USB port is a string containing the input from the RC remote control for steering and throttle: 31 | ``` 32 | '[steering] [throttle]\n' 33 | ``` 34 | ### Settings 35 | **USB speed is set to 38400 baud.** 36 | 37 | > Requirements: 38 | > `EnableInterrupt` library 39 | 40 | 41 | ## Schematics 42 | ![alt text](https://github.com/cometa/Autonomia/raw/master/docs/sketch.png) 43 | -------------------------------------------------------------------------------- /ConvNet/config.py: -------------------------------------------------------------------------------- 1 | class DataConfig(object): 2 | # img_height = 150 3 | # img_height = 90 4 | # img_width = 320 5 | 6 | # 1 channel Y or 3 channels YCrCb 7 | num_channels = 1 8 | num_buckets = 1 9 | # number of frames to skip ahead in matching telemetry 10 | skip_ahead = 1 11 | 12 | # image y-axis cropping 13 | img_yaxis_start = 140 14 | img_yaxis_end = 227 15 | img_height = img_yaxis_end - img_yaxis_start + 1 16 | #ycrop_range = [140, -20] Gina 17 | ycrop_range = [90, -20] #lola 18 | # image x-axis cropping 19 | img_xaxis_start = 0 20 | img_xaxis_end = 319 21 | img_width = img_xaxis_end - img_yaxis_start + 1 22 | # image resampling dimensions 23 | img_resample_dim = (128,128) 24 | cspace = 'YCR_CB' #image color space to be fed to model 25 | keep_rate = 0.5 #Dropout 26 | reg_fc = 0.05 #regularizer FC layers 27 | reg_conv = 0.00001 #regularizer Conv layers 28 | 29 | 30 | class TrainConfig(DataConfig): 31 | model_name = "relu" 32 | batch_size = 128 33 | num_epoch = 50 34 | validation_split = 0.2 35 | model = 'model_wroscoe_mod' 36 | model_type = 'regression' 37 | data_augmentation = 5 38 | seed = 42 39 | 40 | class TestConfig(TrainConfig): 41 | model_path = "" 42 | 43 | -------------------------------------------------------------------------------- /docs/dependencies.md: -------------------------------------------------------------------------------- 1 | ### Dependencies 2 | 3 | Python: 4 | ``` 5 | $ sudo apt-get install libpython2.7-dev 6 | ``` 7 | ``` 8 | $ pip install http_parser 9 | $ pip install pynmea2 10 | ``` 11 | FFmpeg: 12 | Add `OpenMAX Integration Layer` hardware acceleration support: 13 | ``` 14 | $ sudo apt-get install liboxmil-bellagio 15 | ``` 16 | Build `FFmpeg` from repo `https://github.com/FFmpeg/FFmpeg.git` 17 | 18 | Enable the `OMX h264` encoder that uses the GPU and add libraries to draw text: 19 | ``` 20 | $ ./configure --arch=armel --target-os=linux --enable-gpl --enable-nonfree \ 21 | --enable-libx264 --enable-omx --enable-omx-rpi \ 22 | --enable-libfreetype --enable-libfontconfig --enable-libfribidi 23 | 24 | $ make 25 | $ sudo make install 26 | ``` 27 | For data preparation `OpenCV2` is needed. 28 | 29 | For training and model evaluation in the application `Keras` and `Tensorflow` are needed. On the Raspberry PI, `OpenCV2` is not needed. 30 | 31 | ### Cloud Connection 32 | The Autonomia application has a dependency on Vederly, a video and device management cloud platform for mobility applications, including a two-way message broker for device-to-cloud and cloud-to-device secure communication. 33 | 34 | The main application manages the connection to the Vederly server as defined in the `config.json` parameters file, streams video using RTMP to the server, and exposes methods for JSON-RPC remote procedure calls to the vehicle. 35 | 36 | If you are interested in receiving beta tester credentials and access to a Vederly cloud server for testing the Autonomia software or the cloud API send an email to cometa@visiblenergy.com 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | Arduino/libraries/ 92 | 93 | #images datafile (JM machine) 94 | ConvNet/dataWhiteTrack/*.flv 95 | ConvNet/dataWhiteTrack/vid01/*.* 96 | ConvNet/data 97 | ConvNet/__pycache__ 98 | ConvNet/.ipynb_checkpoints 99 | 100 | 101 | -------------------------------------------------------------------------------- /ConvNet/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import math 4 | import hashlib 5 | import numpy as np 6 | import cv2 7 | from config import DataConfig 8 | 9 | # show an image in a proper scale 10 | def show_img(img): 11 | screen_res = 320. * 1 , 240. * 1 12 | scale_width = screen_res[0] / img.shape[1] 13 | scale_height = screen_res[1] / img.shape[0] 14 | scale = min(scale_width, scale_height) 15 | window_width = int(img.shape[1] * scale) 16 | window_height = int(img.shape[0] * scale) 17 | cv2.namedWindow('dst1_rt', cv2.WINDOW_NORMAL) 18 | cv2.resizeWindow('dst1_rt', window_width, window_height) 19 | cv2.imshow('dst1_rt', img) 20 | return 21 | 22 | # images are aquired by ffmpeg -s 320x240 -pix_fmt yuyv422 23 | def read_uyvy(filename, config, rows=240, cols=320): 24 | # input image size 25 | image_size = rows * cols * 2 26 | # read a YUYV raw image and extract the Y plane - YUV 4:2:2 - (Y0,U0,Y1,V0),(Y2,U2,Y3,V2) 27 | # this is equivalent to YUY2 pixel format http://www.fourcc.org/pixel-format/yuv-yuy2 28 | fd = open(filename,'rb') 29 | f = np.fromfile(fd, dtype=np.uint8, count=image_size) 30 | print "read %d bytes from %s" % (len(f), filename) 31 | if len(f) != image_size: #rows*cols*2 32 | print "error in reading" 33 | return None, None 34 | 35 | # TODO: support for three channels YUV 36 | f = f.reshape((rows * cols / 2), 4) 37 | Y_channel = np.empty((rows * cols), dtype=np.uint8) 38 | Y_channel[0::2] = f[:,0] 39 | Y[1::2] = f[:,2] 40 | Y = Y.reshape(rows, cols) 41 | # TODO: Y is only the Y plane 42 | 43 | # crop image 44 | Y = Y[config.ycrop_range[0]:config.ycrop_range[1], config.img_xaxis_start:config.img_xaxis_end + 1] 45 | 46 | # resample image 47 | Y = cv2.resize(Y, config.img_resample_dim) #, cv2.INTER_LINEAR) 48 | 49 | # convert to float 50 | X = np.empty((rows * cols), dtype=np.float32) 51 | X = Y / 255. - 0.5 52 | 53 | return Y, X 54 | 55 | if __name__ == "__main__": 56 | cnn_config = DataConfig() 57 | 58 | while True: 59 | Y, X = read_uyvy('/tmpfs/frame.yuv', cnn_config) 60 | if Y is None: 61 | continue 62 | print "\r\n" 63 | show_img(Y) 64 | print X 65 | 66 | key = cv2.waitKey(0) 67 | 68 | if key == 27: 69 | sys.exit(0) 70 | -------------------------------------------------------------------------------- /docs/f.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #v4l2-ctl --device=/dev/video0 --set-fmt-video=width=320,height=180,pixelformat=1 3 | 4 | # SD 240p resolution 5 | v4l2-ctl --device=/dev/video0 --set-fmt-video=width=352,height=240,pixelformat=1 6 | 7 | # **** use RPI GPU and write text 8 | ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -c:v h264_omx -maxrate 768k -vf "format=yuv444p,drawbox=y=ih-h:color=black@0.9:width=40:height=12:t=max,drawtext=fontfile=OpenSans-Regular.ttf:textfile=/tmpfs/meta.txt:reload=1:fontsize=10:fontcolor=white:x=0:y=(h-th-2),format=yuv420p" -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 9 | 10 | #ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -preset veryfast -tune zerolatency -vprofile baseline -c:v libx264 -vf "format=yuv444p,drawbox=y=ih-h:color=black@0.9:width=40:height=12:t=max,drawtext=fontfile=OpenSans-Regular.ttf:textfile=/tmp/meta.txt:reload=1:fontsize=10:fontcolor=white:x=0:y=(h-th-2),format=yuv420p" -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 11 | 12 | # ** stream plain video to server 13 | fmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -c:v h264_omx -maxrate 1024k -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 14 | 15 | # stream plain video to server 16 | #ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -copytb 0 -f v4l2 -vcodec h264 -i /dev/video0 -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 17 | 18 | # stream plain video to server and save 20 YUV frames per second in memory filesystem /tmpfs 19 | #ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -copytb 0 -f v4l2 -vcodec h264 -i /dev/video0 -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 -vcodec rawvideo -vf fps=20 -f image2 "/tmpfs/out-%2d".rgb 20 | 21 | 22 | # **** stream plain video to server and save last YUV frame in /tmpfs/thumb.yuv 23 | # ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -c:v h264_omx -maxrate 768k -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 -vcodec rawvideo -an -updatefirst 1 -y -f image2 /tmpfs/thumb.jpg 24 | 25 | /usr/local/bin/ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -c:v h264 -maxrate 1024k -threads 4 -r 30 -g 60 -f flv rtmp://newstaging.cometa.io:12345/src/74DA388EAC61 26 | -------------------------------------------------------------------------------- /gpslib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud connected autonomous RC car. 3 | 4 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 5 | """ 6 | __license__ = """ 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import serial 19 | import threading 20 | import pynmea2 21 | # https://github.com/Knio/pynmea2 22 | # 23 | # Protocol documentation: 24 | # http://fort21.ru/download/NMEAdescription.pdf 25 | 26 | class GPS(object): 27 | """Receive data from a NMEA compatible GPS """ 28 | 29 | def __init__(self, logger): 30 | self.port = None 31 | self.reader = None 32 | self.log = logger 33 | self.readings = {} 34 | """ Current readings accessible from the `readings` instance attribute """ 35 | 36 | def connect(self, device, speed): 37 | """Connect to a serial NMEA GPS """ 38 | 39 | try: 40 | self.port = serial.Serial(device, speed) 41 | except Exception as e: 42 | self.log("GPS: %s" % e) 43 | return False 44 | 45 | self.reader = pynmea2.NMEAStreamReader() 46 | # start the reading thread 47 | self.threader = threading.Thread(target=self.loop) 48 | self.threader.daemon = True 49 | self.threader.start() 50 | return True 51 | 52 | def loop(self): 53 | """ Reader thread """ 54 | 55 | while True: 56 | data = self.port.read(16) 57 | try: 58 | for msg in self.reader.next(data): 59 | #print(msg) 60 | try: 61 | # p = pynmea2.parse(msg) 62 | #print msg.sentence_type 63 | if msg.sentence_type == 'GGA': 64 | #print msg.timestamp, msg.lat, msg.lat_dir, msg.lon, msg.lon_dir 65 | # convert degrees,decimal minutes to decimal degrees 66 | lats = msg.lat 67 | longs = msg.lon 68 | lat1 = (float(lats[2]+lats[3]+lats[4]+lats[5]+lats[6]+lats[7]+lats[8]))/60 69 | lat = (float(lats[0]+lats[1])+lat1) 70 | lon1 = (float(longs[3]+longs[4]+longs[5]+longs[6]+longs[7]+longs[8]+longs[9]))/60 71 | lon = (float(longs[0]+longs[1]+longs[2])+lon1) 72 | #print lat, lon 73 | self.readings['lat'] = lat 74 | self.readings['lat_dir'] = msg.lat_dir 75 | self.readings['lon'] = lon 76 | self.readings['lon_dir'] = msg.lon_dir 77 | self.readings['time'] = msg.timestamp 78 | except Exception, e: 79 | self.log("GPS: %s" % e) 80 | except Exception, e: 81 | self.log("GPS: %s" % e) 82 | -------------------------------------------------------------------------------- /application.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Cloud connected autonomous RC car. 4 | 5 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 6 | """ 7 | __license__ = """ 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | """ 18 | 19 | import time 20 | import json 21 | import serial 22 | import string 23 | import sys 24 | import copy 25 | import signal 26 | import subprocess 27 | import pdb 28 | 29 | # --------------------- 30 | import utils 31 | from cometalib import CometaClient 32 | from runtime import Runtime 33 | from gpslib import GPS 34 | import api 35 | from controller import RCVehicle 36 | import streamer 37 | 38 | TELEMFNAME = '/tmpfs/meta.txt' 39 | 40 | def signal_handler(signum, frame): 41 | sys.exit(0) 42 | 43 | def main(argv): 44 | signal.signal(signal.SIGINT, signal_handler) 45 | 46 | Runtime.init_runtime() 47 | syslog = Runtime.syslog 48 | 49 | # Read configuration 50 | config = Runtime.read_config() 51 | if config == None: 52 | # error reading configuration file 53 | syslog("(FATAL) Error reading configuration file. Exiting.") 54 | return 55 | syslog("Configuration: %s" % json.dumps(config)) 56 | 57 | # Connect to GPS 58 | if 'gps' in config: 59 | gps = GPS(syslog) 60 | ret = gps.connect(config['gps']['serial'], config['gps']['speed']) 61 | if ret: 62 | syslog("Connected to GPS.") 63 | else: 64 | gps = None 65 | syslog("Error connecting to GPS on % s. Disabling." % config['gps']['serial']) 66 | 67 | # Connect the device to Cometa 68 | cometa_server = config['cometa']['server'] 69 | cometa_port = config['cometa']['port'] 70 | application_id = config['cometa']['app_key'] 71 | # use the machine's MAC address as Cometa device ID 72 | device_id = Runtime.get_serial() 73 | config['serial'] = device_id 74 | # override camera key with new format 75 | config['video']['key'] = utils.buildKey(device_id, str(application_id)) + ':' + '1' 76 | 77 | # Instantiate a Cometa object 78 | com = CometaClient(cometa_server, cometa_port, application_id, config['cometa']['ssl'], syslog) 79 | com.debug = config['app_params']['debug'] 80 | # bind the message_handler() callback 81 | com.bind_cb(api.message_handler) 82 | 83 | # Attach the device to Cometa 84 | connected = False 85 | 86 | while not connected: 87 | ret = com.attach(device_id, "ROV") 88 | if com.error != 0: 89 | print "Error in attaching to Cometa. Retrying ...", com.perror() 90 | time.sleep(1) 91 | continue 92 | # Get the timestamp from the server 93 | try: 94 | ret_obj = json.loads(ret) 95 | except Exception, e: 96 | print "Error in parsing the message returned after attaching to Cometa. Message:", ret 97 | time.sleep(1) 98 | continue 99 | connected = True 100 | 101 | # The server returns an object like: {"msg":"200 OK","heartbeat":60,"timestamp":1441405206} 102 | syslog("Device \"%s\" attached to Cometa. Server timestamp: %d" % (device_id, ret_obj['timestamp'])) 103 | if com.debug: 104 | print "Server returned:", ret 105 | 106 | # Create a car controller object 107 | car = RCVehicle(config, syslog) 108 | 109 | # Initialize camera streamer 110 | streamer.init(config, syslog) 111 | 112 | # Start the vehicle with default training mode 113 | car.start() 114 | car.com = com 115 | 116 | # Export the vechicle object to the API module 117 | api.car = car 118 | streamer.car = car 119 | 120 | gps = None 121 | last_second, last_telemetry = 0., 0. 122 | while car.state: 123 | now = time.time() 124 | 125 | # Per second loop 126 | if 1 < now - last_second: 127 | if car.verbose and gps: print "GPS readings", gps.readings 128 | # update GPS readings 129 | try: 130 | if gps: car.readings = gps.readings 131 | except: 132 | pass 133 | last_second = now 134 | 135 | # Send telemetry data 136 | # if car.telemetry_period < now - last_telemetry: 137 | # msg = car.telemetry() 138 | # if com.send_data(json.dumps(msg)) < 0: 139 | # syslog("Error in sending telemetry data.") 140 | # else: 141 | # if car.verbose: 142 | # syslog("Sending telemetry data %s " % msg) 143 | # last_telemetry = now 144 | 145 | time.sleep(1) 146 | 147 | if __name__ == '__main__': 148 | main(sys.argv[1:]) 149 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions Cometa agent. 3 | 4 | """ 5 | __license__ = """ 6 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import math 19 | import hashlib, hmac 20 | import numpy as np 21 | import cv2 22 | 23 | def check_rpc_msg(req): 24 | ret = False 25 | id = None 26 | k = req.keys() 27 | # check presence of required id attribute 28 | if 'id' in k: 29 | id = req['id'] 30 | else: 31 | return ret, id 32 | # check object length 33 | if (len(k) != 4): 34 | return ret, id 35 | # check presence of required attributes 36 | if (not 'jsonrpc' in k) or (not 'method' in k) or (not 'params' in k): 37 | return ret, id 38 | # check for version 39 | if req['jsonrpc'] != "2.0": 40 | return ret, id 41 | # valid request 42 | return True,id 43 | 44 | def isanumber(x): 45 | try: 46 | int(x) 47 | except ValueError: 48 | try: 49 | float(x) 50 | except ValueError: 51 | return False 52 | return True 53 | 54 | def buildKey(mac, secret): 55 | """Return the camera streaming key.""" 56 | h = hmac.new(secret, mac, digestmod=hashlib.sha256).hexdigest() 57 | return mac + ':' + h[0:32] 58 | 59 | # images are aquired by ffmpeg -s 320x240 -pix_fmt yuyv422 60 | def read_uyvy(filename, config, rows=240, cols=320): 61 | # input image size 62 | image_size = rows * cols * 2 63 | # read a YUYV raw image and extract the Y plane - YUV 4:2:2 - (Y0,U0,Y1,V0),(Y2,U2,Y3,V2) 64 | # this is equivalent to YUY2 pixel format http://www.fourcc.org/pixel-format/yuv-yuy2 65 | fd = open(filename,'rb') 66 | f = np.fromfile(fd, dtype=np.uint8, count=image_size) 67 | if len(f) != image_size: #rows*cols*2 68 | # error in reading 69 | return None 70 | 71 | # TODO: support for three channels YUV 72 | f = f.reshape((rows * cols / 2), 4) 73 | Y = np.empty((rows * cols), dtype=np.uint8) 74 | Y[0::2] = f[:,0] 75 | Y[1::2] = f[:,2] 76 | Y = Y.reshape(rows, cols) 77 | # TODO: Y is only the Y plane 78 | 79 | # crop image 80 | Y = Y[config.img_yaxis_start:config.img_yaxis_end + 1, config.img_xaxis_start:config.img_xaxis_end + 1] 81 | 82 | # resample image 83 | Y = cv2.resize(Y, config.img_resample_dim) #, cv2.INTER_LINEAR) 84 | 85 | # Y is of shape (1,:,:,:) 86 | Y = Y.reshape(1, config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels) 87 | 88 | # cast to float and normalize the image values 89 | Y_f = np.empty((rows * cols), dtype=np.float64) 90 | Y_f = Y / 127.5 - 1 91 | 92 | # reshape as a tensor for model prediction 93 | return Y_f 94 | 95 | def steering2bucket(s): 96 | """ Convert from [0,180] range to a bucket number in the [0,14] range with log distribution to stretch the range of the buckets around 0 """ 97 | s -= 90 98 | return int(round(math.copysign(math.log(abs(s) + 1, 2.0), s))) + 7 99 | 100 | def bucket2steering(a): 101 | """ Reverse the function that buckets the steering for neural net output """ 102 | steer = a - 7 103 | original = steer 104 | steer = abs(steer) 105 | steer = math.pow(2.0, steer) 106 | steer -= 1.0 107 | steer = math.copysign(steer, original) 108 | steer += 90.0 109 | steer = max(0, min(179, steer)) 110 | return steer 111 | 112 | # throttle bucket conversion map -- from [0,180] range to a bucket number in the [0.14] range 113 | throttle_map = [ 114 | [80,0], # if t <= 80 -> o=0 # Breaking: 115 | [82,1], # elif t <= 82 -> o=1 116 | [84,2], # elif t <= 84 -> o=2 117 | [86,3], # elif t <= 86 -> o=3 118 | [87,4], # elif t <= 87 -> o=4 # Breaking ^ 119 | 120 | [96,5], # elif t <= 96 -> o=5 # Neutral 121 | 122 | [97,6], # elif t <= 97 -> o=6 # Forward: 123 | [98,7], # elif t <= 98 -> o=7 124 | [99,8], # elif t <= 99 -> o=8 125 | [100,9], # elif t <= 100 -> o=9 126 | 127 | [101,10], # elif t <= 101 -> o=10 128 | [102,11], # elif t <= 102 -> o=11 129 | [105,12], # elif t <= 105 -> o=12 130 | [107,13], # elif t <= 107 -> o=13 131 | [110,14] # elif t <= 110 -> o=14 132 | ] 133 | 134 | def throttle2bucket(t): 135 | """ Convert throttle from [0,180] range to a bucket number in the [0.14] range using a map. """ 136 | 137 | for max_in_bucket,bucket in throttle_map: 138 | if t <= max_in_bucket: 139 | return bucket 140 | return 14 141 | 142 | def bucket2throttle(t): 143 | """ Reverse the function that buckets the throttle for neural net output """ 144 | map_back = {5:90} 145 | t = int(float(t)+0.5) 146 | for ibucket,(max_in_bucket,bucket) in enumerate(throttle_map): 147 | if t == bucket: 148 | if map_back.has_key(bucket): 149 | return map_back[bucket] 150 | 151 | return max_in_bucket 152 | return 100 # Never happens, defensively select a mild acceleration 153 | -------------------------------------------------------------------------------- /ConvNet/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility functions Cometa agent. 3 | 4 | """ 5 | __license__ = """ 6 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import math 19 | import hashlib 20 | import numpy as np 21 | import cv2 22 | 23 | def check_rpc_msg(req): 24 | ret = False 25 | id = None 26 | k = req.keys() 27 | # check presence of required id attribute 28 | if 'id' in k: 29 | id = req['id'] 30 | else: 31 | return ret, id 32 | # check object length 33 | if (len(k) != 4): 34 | return ret, id 35 | # check presence of required attributes 36 | if (not 'jsonrpc' in k) or (not 'method' in k) or (not 'params' in k): 37 | return ret, id 38 | # check for version 39 | if req['jsonrpc'] != "2.0": 40 | return ret, id 41 | # valid request 42 | return True,id 43 | 44 | def isanumber(x): 45 | try: 46 | int(x) 47 | except ValueError: 48 | try: 49 | float(x) 50 | except ValueError: 51 | return False 52 | return True 53 | 54 | def buildKey(mac, secret): 55 | """Return the camera streaming key.""" 56 | h = hmac.new(secret, message, digestmod=hashlib.sha256).hexdigest() 57 | return mac + '-' + h[0:32] 58 | 59 | # images are aquired by ffmpeg -s 320x240 -pix_fmt yuyv422 60 | def read_uyvy(filename, config, rows=240, cols=320): 61 | # input image size 62 | image_size = rows * cols * 2 63 | # read a YUYV raw image and extract the Y plane - YUV 4:2:2 - (Y0,U0,Y1,V0),(Y2,U2,Y3,V2) 64 | # this is equivalent to YUY2 pixel format http://www.fourcc.org/pixel-format/yuv-yuy2 65 | fd = open(filename,'rb') 66 | f = np.fromfile(fd, dtype=np.uint8, count=image_size) 67 | if len(f) != image_size: #rows*cols*2 68 | # error in reading 69 | return None 70 | 71 | # TODO: support for three channels YUV 72 | f = f.reshape((rows * cols / 2), 4) 73 | Y = np.empty((rows * cols), dtype=np.uint8) 74 | Y[0::2] = f[:,0] 75 | Y[1::2] = f[:,2] 76 | Y = Y.reshape(rows, cols) 77 | # TODO: Y is only the Y plane 78 | 79 | # crop image 80 | Y = Y[config.img_yaxis_start:config.img_yaxis_end + 1, config.img_xaxis_start:config.img_xaxis_end + 1] 81 | 82 | # resample image 83 | Y = cv2.resize(Y, config.img_resample_dim) #, cv2.INTER_LINEAR) 84 | 85 | # Y is of shape (1,:,:,:) 86 | Y = Y.reshape(1, config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels) 87 | 88 | # cast to float and normalize the image values 89 | Y_f = np.empty((rows * cols), dtype=np.float64) 90 | Y_f = Y / 127.5 - 1 91 | 92 | # reshape as a tensor for model prediction 93 | return Y_f 94 | 95 | def steering2bucket(s): 96 | """ Convert from [0,180] range to a bucket number in the [0,14] range with log distribution to stretch the range of the buckets around 0 """ 97 | s -= 90 98 | return int(round(math.copysign(math.log(abs(s) + 1, 2.0), s))) + 7 99 | 100 | def bucket2steering(a): 101 | """ Reverse the function that buckets the steering for neural net output """ 102 | steer = a - 7 103 | original = steer 104 | steer = abs(steer) 105 | steer = math.pow(2.0, steer) 106 | steer -= 1.0 107 | steer = math.copysign(steer, original) 108 | steer += 90.0 109 | steer = max(0, min(179, steer)) 110 | return steer 111 | 112 | # throttle bucket conversion map -- from [0,180] range to a bucket number in the [0.14] range 113 | throttle_map = [ 114 | [80,0], # if t <= 80 -> o=0 # Breaking: 115 | [82,1], # elif t <= 82 -> o=1 116 | [84,2], # elif t <= 84 -> o=2 117 | [86,3], # elif t <= 86 -> o=3 118 | [87,4], # elif t <= 87 -> o=4 # Breaking ^ 119 | 120 | [96,5], # elif t <= 96 -> o=5 # Neutral 121 | 122 | [97,6], # elif t <= 97 -> o=6 # Forward: 123 | [98,7], # elif t <= 98 -> o=7 124 | [99,8], # elif t <= 99 -> o=8 125 | [100,9], # elif t <= 100 -> o=9 126 | 127 | [101,10], # elif t <= 101 -> o=10 128 | [102,11], # elif t <= 102 -> o=11 129 | [105,12], # elif t <= 105 -> o=12 130 | [107,13], # elif t <= 107 -> o=13 131 | [110,14] # elif t <= 110 -> o=14 132 | ] 133 | 134 | def throttle2bucket(t): 135 | """ Convert throttle from [0,180] range to a bucket number in the [0.14] range using a map. """ 136 | 137 | for max_in_bucket,bucket in throttle_map: 138 | if t <= max_in_bucket: 139 | return bucket 140 | return 14 141 | 142 | def bucket2throttle(t): 143 | """ Reverse the function that buckets the throttle for neural net output """ 144 | map_back = {5:90} 145 | t = int(float(t)+0.5) 146 | for ibucket,(max_in_bucket,bucket) in enumerate(throttle_map): 147 | if t == bucket: 148 | if map_back.has_key(bucket): 149 | return map_back[bucket] 150 | 151 | return max_in_bucket 152 | return 100 # Never happens, defensively select a mild acceleration 153 | -------------------------------------------------------------------------------- /ConvNet/README.md: -------------------------------------------------------------------------------- 1 | ## Video Acquisition 2 | 3 | Video is streamed from the car to the Autonomia Cloud server using the RTMP protocol at 30 frames per second, with a 240x320 resolution and H.264 encoding. The streaming is controlled by the `video_start` and `video_stop` JSON/RPC methods. 4 | 5 | To acquire video for CNN training, the `video_start` method must be called with the `{'telem':true}` parameter. With this option, the video generated embeds steering and throttle servo values in the bottom left of each frame. 6 | 7 | >Key to keeping the car additional hardware minimal is to perform all offline image processing and CNN training not on the car computer but on a cloud server or a desktop. 8 | 9 | **Example of image with telemetry:** 10 | 11 | ![Image with telemetry](../docs/img06721.jpg "image with telemetry") 12 | 13 | **Corresponding telemetry record:** 14 | ``` 15 | {"c":6721,"s":107,"time":"1492552494401","t":102,"device_id":"B827EB0431DA"} 16 | ``` 17 | Both `time` in Epoch msec and frame count `c` are used for synchronizing video and telemetry. In most cases, following the image extraction procedure indicated below, the image filename contains the frame number, as expected in the model training. 18 | 19 | > `steering` and `trhottle` values are in the `[0,180]` range with `90` as neutral. This is the values received by the RC radio receiver and captured by the Arduino controller. 20 | 21 | Streaming video is automatically ingested and stored in the Autonomia cluster in `flv` files of 5 minutes length with the filename including the vehicle identifier and an Epoch timestamp in seconds. A video file stored in Vederly cloud has a name that includes a `camera_key` unique for the vehicle, and an Epoch `timestamp`. For instance: `74DA388EAC61-1482024251.flv` 22 | 23 | Video and telemetry files are transfered automatically from the Autonomia servers to an S3 bucket. 24 | 25 | ## Preparation Steps 26 | Once a telemetry video file has been downloaded, the preparation steps are: 27 | 28 | 1. Extract frames from the video 29 | 2. Train the model 30 | 3. Deploy the model into the car 31 | 32 | ### Extract Frames from Video 33 | A video file is split in individual frames using the `ffmpeg` command: 34 | ``` 35 | ffmpeg -i -qscale:v 2 img%05d.jpg 36 | ``` 37 | > the `qscale:v 2` option is to obtain JPEG images with the best possible quality 38 | 39 | Each frame is a JPEG image file with a name containing sequential numbers in order of time. 40 | 41 | Example: 42 | ``` 43 | # create a directory to contain the frames using the timestamp in the filename 44 | mkdir images-1482024251 45 | # change directory and run the ffmpeg command 46 | cd images-1482024251 47 | ffmpeg -i ../74DA388EAC61-1482024251.flv -qscale:v 2 img%05d.jpg 48 | ``` 49 | > At 30 fps acquisition rate, every minute of video results in 1800 JPEG images. 50 | 51 | ### Train the Model 52 | 53 | The CNN model in `Keras` is defined in the `train.py` Python script. The hyperparameters are in `config.py`, with some exceptions that are hard-coded (learning rate, optimization function). 54 | 55 | ``` 56 | train.py 57 | ``` 58 | Example: 59 | ``` 60 | cd Autonomia/ConvNet 61 | train.py /home/oem/images-1482024251 62 | ``` 63 | Once the training is completed, the model is saved as JSON model and the weights in the HDF5 binary format, in the files `autonomia_cnn.json` and `autonomia_cnn.h5`. 64 | 65 | >The main application loads the model and weights from the name indicated in the `config.json` file. 66 | 67 | ### Docker image 68 | For training the CNN we have built a Linux Ubunutu Docker image available in Docker Hub. Since there are 69 | several cross-dependencies, it is recommended to run the image in a Docker container for training. 70 | 71 | Docker pull command: 72 | ``` 73 | $ docker pull marcoeg/python-ml 74 | ``` 75 | To run the container and mount a local directory with the images: 76 | ``` 77 | $ docker run -it -p 8888:8888 -v [LOCAL_PATH]:/mnt marcoeg/python-ml:latest 78 | ``` 79 | For instance: 80 | ``` 81 | $ docker run -it -p 8888:8888 -v /home/marco/Videos:/mnt marcoeg/python-ml:latest 82 | ``` 83 | A Jupiter notebook is also started and accessible with a browser and the token provided after the container starts. 84 | 85 | At startup, check for this message on the console: 86 | 87 | ``` 88 | Copy/paste this URL into your browser when you connect for the first time, 89 | to login with a token: 90 | http://0.0.0.0:8888/?token=96acae164b1ea0ff1f2b5d79eee04d73e160df135fb5136b 91 | ``` 92 | 93 | To use the pre-configured container, in another terminal, login the container using the ID: 94 | ``` 95 | $ docker ps 96 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 97 | 6f395a47bb83 marcoeg/python-ml:latest "jupyter notebook ..." 6 seconds ago Up 5 seconds 0.0.0.0:8888->8888/tcp jovial_engelbart 98 | 99 | $ docker exec -it 6f395a47bb83 bash 100 | ``` 101 | 102 | In the container shell, the directory from the host filesystem is in /mnt: 103 | ``` 104 | root@5eca27f4b434:~# ls /mnt 105 | Oakland-Apr20 106 | ``` 107 | 108 | The Autonomia repo (train2softmax branch) is available in /root/Autonomia. Peform a repo update every time the container starts to insure the head of the tree is used: 109 | ``` 110 | root@5eca27f4b434:~# cd /root/Autonomia/ 111 | root@5eca27f4b434:~/Autonomia# git pull 112 | ``` 113 | 114 | To train the model: 115 | ``` 116 | root@5eca27f4b434:~/Autonomia# cd ConvNet/ 117 | root@5eca27f4b434:~/Autonomia/ConvNet# ./train.py [PATH_TO_VIDEO_DIRECTORY] 118 | ``` 119 | For instance: 120 | ``` 121 | root@5eca27f4b434:~/Autonomia/ConvNet# ./train.py /mnt/Oakland-Apr20/ 122 | ``` 123 | When the training is completed, the trained model is in the files `autonomia_cnn.h5` and `autonomia_cnn.josn`. 124 | The file `test_on_trainingset.png` contains a comparison plot between the predicted and actual steering values 125 | of the model. 126 | 127 | There is a working Jupyter notebook for dataset analysis in /Autonomia/Convnet/data_analysis_lola.ipynb 128 | Change the `vid1_dir` variable in the first cell to the path of the video directory: 129 | ``` 130 | vid1_dir = '/mnt/Oakland-Apr20/' 131 | ``` 132 | 133 | Important: 134 | There is no backward compatibility of Keras 2.x models with 1.x. It is necessary to insure the same 135 | version of Tensorflow and Keras are used in the training in the container and the predictions in the RPI. 136 | 137 | To check versions: 138 | ``` 139 | $ python 140 | >>> import tensorflow as tf 141 | >>> import keras as K 142 | >>> tf.__version__ 143 | '1.0.1' 144 | >>> K.__version__ 145 | '2.0.4' 146 | >>> 147 | ``` 148 | If either Tensorflow or Keras need to be updated: 149 | ``` 150 | $ pip install tensorflow==1.0.1 --upgrade 151 | $ pip install keras==2.0.4 --upgrade 152 | 153 | ``` 154 | Keep in mind that changes in the container are not persistent. 155 | -------------------------------------------------------------------------------- /ConvNet/predict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Cloud connected autonomous RC car. 4 | 5 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 6 | """ 7 | __license__ = """ 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | """ 18 | 19 | """ 20 | Simple steering and throttle prediction from a trained model and compared with a telemetry image. 21 | 22 | Usage: 23 | ./predict.py 24 | 25 | Arguments: 26 | image directory 27 | """ 28 | 29 | import cv2 30 | import numpy as np 31 | import sys 32 | import os 33 | import time 34 | import math 35 | from keras.models import model_from_json 36 | from config import DataConfig 37 | import utils 38 | import ntpath 39 | 40 | interactive = True 41 | 42 | # show an image in a proper scale 43 | def show_img(img): 44 | screen_res = 320. * 2 , 240. * 2 45 | scale_width = screen_res[0] / img.shape[1] 46 | scale_height = screen_res[1] / img.shape[0] 47 | scale = min(scale_width, scale_height) 48 | window_width = int(img.shape[1] * scale) 49 | window_height = int(img.shape[0] * scale) 50 | cv2.namedWindow('dst1_rt', cv2.WINDOW_NORMAL) 51 | cv2.resizeWindow('dst1_rt', window_width, window_height) 52 | cv2.imshow('dst1_rt', img) 53 | return 54 | 55 | if __name__ == "__main__": 56 | config = DataConfig() 57 | 58 | try: 59 | data_path = os.path.expanduser(sys.argv[1]) 60 | except Exception as e: 61 | print(e, "Usage: ./predict.py ") 62 | sys.exit(-1) 63 | 64 | if not os.path.exists(data_path): 65 | print("Directory %s not found." % data_path) 66 | sys.exit(-1) 67 | 68 | log = np.load('log.npy') 69 | model = model_from_json(open("{}/autonomia_cnn.json".format(data_path)).read()) 70 | # Load model weights 71 | model.load_weights("{}/autonomia_cnn.h5".format(data_path)) 72 | model.summary() 73 | 74 | img_height, img_width, num_channels = config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels 75 | skip = config.skip_ahead 76 | 77 | 78 | # open labels csv file (frame filename, steering, throttle) 79 | #with open("{}/labels.csv".format(data_path)) as f: 80 | #labels = f.readlines() 81 | #nlabels = len(labels) 82 | #print("found %d labels" % nlabels) 83 | 84 | #out_file = open("{}/labels_pred.csv".format(data_path), 'w') 85 | 86 | # Load model structure 87 | 88 | model = model_from_json(open("{}/autonomia_cnn.json".format(data_path)).read()) 89 | 90 | # Load model weights 91 | model.load_weights("{}/autonomia_cnn.h5".format(data_path)) 92 | model.summary() 93 | 94 | img_height, img_width, num_channels = config.img_resample_dim[0], config.img_resample_dim[1], config.num_channels 95 | skip = config.skip_ahead 96 | 97 | for i in range(len(log)): 98 | if i < skip: 99 | continue 100 | filename, steering, throttle= log[i][0], log[i][1], log[i][2] 101 | print('***************** {} | {} | {}'.format(filename, steering, throttle)) 102 | steering = int(steering) 103 | # throttle 104 | throttle = int(throttle) 105 | print(filename, steering, throttle) 106 | # load image 107 | img = cv2.imread(filename) 108 | # convert to YCrCb 109 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) 110 | 111 | if num_channels == 1: 112 | # extract and use Y plane only 113 | X_img, _, _ = cv2.split(gray_img) 114 | else: 115 | # use YCrCb 116 | X_img = gray_img 117 | 118 | if interactive: show_img(X_img) 119 | 120 | # crop image 121 | X_img = X_img[config.ycrop_range[0]:config.ycrop_range[1], :] 122 | 123 | # resample image 124 | X_img = cv2.resize(X_img, config.img_resample_dim[::-1] , cv2.INTER_LINEAR) 125 | 126 | # X_img is of shape (1,:,:,:) 127 | X_img = X_img.reshape(1, img_height, img_width, num_channels) 128 | 129 | # normalize the image values 130 | X_img = X_img / 255.0 - 0.5 131 | 132 | now = time.time() 133 | # predict steering and throttle 134 | steering = model.predict(X_img) 135 | t = time.time() - now 136 | print("execution time:", t) 137 | # steering = np.argmax(p[:, :15], 1) 138 | # throttle = np.argmax(p[:, 15:], 1) 139 | # print p[0, :15] 140 | # print p[0, 15:] 141 | 142 | steering = steering + 90 143 | 144 | print(steering) 145 | #out_file.write("%s,%d\n" % (ntpath.basename(filename), steering)) 146 | 147 | if interactive: 148 | key = cv2.waitKey(0) 149 | if key == 27: 150 | sys.exit(0) 151 | ''' 152 | 153 | 154 | for i,line in enumerate(labels): 155 | if i < skip: 156 | continue 157 | filename, steering, throttle= line.split(',') 158 | # image filename 159 | filename = data_path + '/' + filename 160 | # steering 161 | steering = int(steering) 162 | # throttle 163 | throttle = int(throttle) 164 | print filename, steering, throttle, utils.steering2bucket(steering), utils.throttle2bucket(throttle) 165 | # load image 166 | img = cv2.imread(filename) 167 | 168 | # convert to YCrCb 169 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) 170 | 171 | if num_channels == 1: 172 | # extract and use Y plane only 173 | X_img, _, _ = cv2.split(gray_img) 174 | else: 175 | # use YCrCb 176 | X_img = gray_img 177 | 178 | if interactive: show_img(X_img) 179 | 180 | # crop image 181 | X_img = X_img[config.img_yaxis_start:config.img_yaxis_end + 1, config.img_xaxis_start:config.img_xaxis_end + 1] 182 | 183 | # resample image 184 | X_img = cv2.resize(X_img, config.img_resample_dim, cv2.INTER_LINEAR) 185 | 186 | # X_img is of shape (1,:,:,:) 187 | X_img = X_img.reshape(1, img_height, img_width, num_channels) 188 | 189 | # normalize the image values 190 | X_img = X_img / 127.5 - 1 191 | 192 | now = time.time() 193 | # predict steering and throttle 194 | steering, throttle = model.predict(X_img[0:1]) 195 | t = time.time() - now 196 | print "execution time:", t 197 | # steering = np.argmax(p[:, :15], 1) 198 | # throttle = np.argmax(p[:, 15:], 1) 199 | # print p[0, :15] 200 | # print p[0, 15:] 201 | 202 | steering = np.argmax(steering[0]) 203 | throttle = np.argmax(throttle[0]) 204 | 205 | print steering, throttle 206 | steering = utils.bucket2steering(steering) 207 | throttle = utils.bucket2throttle(throttle) 208 | print steering, throttle 209 | 210 | out_file.write("%s,%d,%d\n" % (ntpath.basename(filename), steering, throttle)) 211 | 212 | if interactive: 213 | key = cv2.waitKey(0) 214 | if key == 27: 215 | sys.exit(0) 216 | ''' -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![alt tag](https://img.shields.io/badge/python-2.7-blue.svg) 2 | 3 | # Autonomia 4 | A project for a cloud connected, autonomous RC 1/10 scale electric car to participate in the [DIY Robocars self-racing-cars events.](https://www.meetup.com/Self-Racing-Cars/) For video and telemetry data collection and the remote control API, the project relies on cloud API [Autonomia](http://www.autonomia.io), a video and device management cloud platform and API for mobility applications developed by `Visible Energy Inc. dba Autonomia` 5 | 6 | The car autopilot car software running on the Raspberry PI on-board the car, is based on a convolutional neural network, trained end-to-end using video from the camera and it predicts steering and throttle values from a live image about 20 times per second on the RPI. 7 | 8 | **Video from the first test run (Oakland warehouse):** 9 | 10 | [![First Test Run](https://img.youtube.com/vi/3SsrNfRHWoU/0.jpg)](https://youtu.be/3SsrNfRHWoU) 11 | ## Running Modes 12 | The Autonomia vehicle has three running modes: 13 | 14 | 1. Controlled by the radio remote control (RC) 15 | 2. Controlled remotely through the Remote JSON/RPC API and the Autonomia cloud servers 16 | 3. Autonomously, driven by the predictions of a convolutional neural network trained end-to-end 17 | 18 | In any running mode, the vehicle is connected to the Autonomia cloud servers using the [Cometa API](http://www.cometa.io/cometa-api.html). It is also remotely managed and responding to the commands in the JSON/RPC Remote API, as well streaming telemetry and live video from the on-board camera to the Autonomia cloud managemengt platform. 19 | 20 | ## Hardware 21 | The hardware added to a commercial 1/10 scale RC car, such as our first car "Gina", a [Traxxas Stampede](https://traxxas.com/products/models/electric/36054-1stampede?t=details), consists of: 22 | 23 | 1. [Raspberry PI 3 model B](https://www.raspberrypi.org/products/raspberry-pi-3-model-b/) with a 8GB SD card 24 | 2. [Arduino Nano](https://www.arduino.cc/en/Main/arduinoBoardNano) 25 | 3. [Logitech C920 camera](http://www.logitech.com/en-us/product/hd-pro-webcam-c920) 26 | 4. DC-DC step-down converter for automotive 27 | 28 | Our second car "Lola" is based on the 1/8 scale [Thunder Tiger MT4 G3 4WD Monster Truck](http://www.thundertiger.com/products-detail.php?id=10&lang=en) 29 | 30 | Optional equipmment: 31 | 32 | 1. [Ublox GPS with USB interface](http://www.hardkernel.com/main/products/prdt_info.php?g_code=G142502154078) (needed if used outdoor) 33 | 2. [Adafruit 9 DFO Inertial Measurements Unit RPI shield](https://www.adafruit.com/products/2472) 34 | 3. [Maxbotix Ultrasonic Rangefinder](https://www.adafruit.com/products/172) 35 | 36 | No changes have been made to the car chassis. The Arduino Nano is mounted on a protoboard and anchored with one of the screws used for the RC receiver. The camera is tight with a strap tiedown to the roof. The RPI is inside an enclosure and attached to the bottom of the chassis with a strap tiedown. 37 | 38 | Power is supplied by the NiHM 3000 mAhr standard battery or a Lipo 5000 mAhr battery with a Y cable to power the RPI through an automotive DC-DC step-down power supply. The Arduino, camera and GPS are powered from their USB connections to the RPI. 39 | 40 | The Arduino Nano receives the throttle and steering inputs from the radio receiver, and controls the inputs to the car motor ESC and steering servo. It also interfaces with the RPI to receive steering and throttle values as well as communicate to the RPI the readings from the radio controller. There is no direct connection between the radio receiver and the vehicle's servos. 41 | 42 | ## Software 43 | 44 | The main application in `Python` consists of: 45 | 46 | 1. an implementation of a `JSON/RPC` remote API to control the vehicle from the Autonomia cloud 47 | 2. a main car controller loop to operate the car motor and steering servos (through the Arduino interface) 48 | 3. a neural network model in `Keras`, trained end-to-end to predict steering and throttle from images 49 | 50 | In parallel to the main application, an `ffmpeg` streamer is sending video to the Autonomia cloud for live viewing inside a ground control station (GCS) application, and to store it for CNN training or driving evaluation purposes. 51 | 52 | The CNN training video and telemetry data are acquired with the car controlled manually with the radio RC, and the `ffmpeg` streamer running in training mode, which allows for embedding the current steering and throttle values in the bottom left corner of the video itself. Steering and throttle values are then extracted frame per frame, as part of the data preparation and model training pipeline. 53 | 54 | At any time together with live video, telemetry data are also sent at a selectable rate, to the Autonomia cloud for use live in the GCS and to store for offline viewing. 55 | 56 | The trained `Keras` model (`Tensorflow` back-end) is loaded at runtime and is evaluated in about 40 milliseconds or less, depending on the number of nodes in the network. The model is evaluating steering and throttle values from a `YUV 4:2:2` encoded frame, acquired by the streamer at 30 fps. The evaluated steering and throttle are passed to the Arduino controller to set the proper values for the motor and steering servos. In the current implementation, no loopback control mechanism is in place. 57 | 58 | ## Performance 59 | 60 | The car runs autonomously very smoothly, with the main application in Python running on the Raspberry PI making steering and throttle predictions at the video acquisition rate of 30 frames per second. The main application does not perform any video processing, with the raw video acquired and resized by the streamer running in parallel and sharing data through a video pipeline. 61 | 62 | Also, the `ffmpeg` streamer has been built to take advantage of the RPI GPU, which leaves most of the CPUs available to perform the model evaluation. 63 | 64 | Since the CNN training is happening in the cloud, an inexpensive Raspberry PI and a small Arduino Nano is all the computing power needed on board the vehicle. The camera used is also encoding video in H.264 in its own hardware, requiring re-encoding by the RPI only while training to embed steering and throttle data in the video. 65 | 66 | ## Documentation 67 | 68 | * [Remote cloud API](../master/docs/remote-api.md) 69 | * [CNN training pipeline](../master/ConvNet/README.md) 70 | * [Arduino controller](../master/Arduino/README.md) 71 | * [Dependencies](../master/docs/dependencies.md) 72 | 73 | ## Cloud Server and API 74 | The application uses the Autonomia,io a video and device management cloud platform for mobility applications, including a two-way message broker for device-to-cloud and cloud-to-device secure communication. It runs on a server in the cloud and it uses HTTPS and secure WebSockets for efficient remote interaction of applications and vehicles. 75 | 76 | The main application manages the connection to the Autonomia server as defined in the `config.json` parameters file, streams video using RTMP to the server, and exposes methods for JSON-RPC remote procedure calls to the vehicle. 77 | 78 | If you are interested in receiving beta tester credentials and access to a Autonomia cloud server for testing the Autonomia software or the cloud API send an email to info@autonomia.io 79 | >Teams participating to the `DYI Robocars` events can obtain free use of the Autonomia server and storage (within limits). 80 | 81 | To use with Postman: 82 | * [Swagger cloud API definition file](../master/docs/Autonomia.postman_collection.json) 83 | * [Swagger cloud API environment file](../master/docs/Autonomia.postman_environment.json) 84 | 85 | ## Credits 86 | 87 | While we are using a radically different approach and a minimalistic hardware, credit is given to Otavio Good and the [Carputer](https://github.com/otaviogood/carputer) team for showing feasibility of using a CNN for 1/10 scale cars autonomous driving, and for providing very useful insights in relevant design choices. 88 | 89 | We also credit DYI Robocars' Chris Anderson for organizing and driving the self-driving cars open source movement. 90 | -------------------------------------------------------------------------------- /api.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud connected autonomous RC car. 3 | 4 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 5 | """ 6 | __license__ = """ 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import json 19 | import subprocess 20 | import pdb 21 | import utils 22 | import streamer 23 | from runtime import Runtime 24 | 25 | # JSON-RPC errors 26 | JSON_RPC_PARSE_ERROR = '{"jsonrpc": "2.0","error":{"code":-32700,"message":"Parse error"},"id": null}' 27 | JSON_RPC_INVALID_REQUEST = '{"jsonrpc": "2.0","error":{"code":-32600,"message":"Invalid Request"},"id":null}' 28 | 29 | JSON_RPC_METHOD_NOTFOUND_FMT_STR = '{"jsonrpc":"2.0","error":{"code": -32601,"message":"Method not found"},"id": %s}' 30 | JSON_RPC_METHOD_NOTFOUND_FMT_NUM = '{"jsonrpc":"2.0","error":{"code": -32601,"message":"Method not found"},"id": %d}' 31 | JSON_RPC_INVALID_PARAMS_FMT_STR = '{"jsonrpc":"2.0","error":{"code": -32602,"message":"Method not found"},"id": %s}' 32 | JSON_RPC_INVALID_PARAMS_FMT_NUM = '{"jsonrpc":"2.0","error":{"code": -32602,"message":"Method not found"},"id": %d}' 33 | JSON_RPC_INTERNAL_ERROR_FMT_STR = '{"jsonrpc":"2.0","error":{"code": -32603,"message":"Method not found"},"id": %s}' 34 | JSON_RPC_INTERNAL_ERROR_FMT_NUM = '{"jsonrpc":"2.0","error":{"code": -32602,"message":"Method not found"},"id": %d}' 35 | 36 | # Vehicle object instantiated in the application module 37 | car=None 38 | 39 | def message_handler(msg, msg_len): 40 | """ 41 | The generic message handler for Cometa receive callback. 42 | Invoked every time the Cometa object receives a JSON-RPC message for this device. 43 | It returns the JSON-RPC result object to send back to the application that sent the request. 44 | The rpc_methods tuple contains the mapping of names into functions. 45 | """ 46 | # pdb.set_trace() 47 | try: 48 | req = json.loads(msg) 49 | except: 50 | # the message is not a json object 51 | car.log("Received JSON-RPC invalid message (parse error): %s" % msg, escape=True) 52 | return JSON_RPC_PARSE_ERROR 53 | 54 | # check the message is a proper JSON-RPC message 55 | ret,id = utils.check_rpc_msg(req) 56 | if not ret: 57 | if id and utils.isanumber(id): 58 | return JSON_RPC_INVALID_PARAMS_FMT_NUM % id 59 | if id and isinstance(id, str): 60 | return JSON_RPC_INVALID_PARAMS_FMT_STR % id 61 | else: 62 | return JSON_RPC_PARSE_ERROR 63 | 64 | car.log("JSON-RPC: %s" % msg, escape=True) 65 | 66 | method = req['method'] 67 | func = None 68 | # check if the method is in the registered list 69 | for m in rpc_methods: 70 | if m['name'] == method: 71 | func = m['function'] 72 | break 73 | 74 | if func == None: 75 | return JSON_RPC_INVALID_REQUEST 76 | 77 | # call the method 78 | try: 79 | result = func(req['params']) 80 | except Exception as e: 81 | print e 82 | return JSON_RPC_INTERNAL_ERROR_FMT_STR % str(id) 83 | 84 | # build the response object 85 | reply = {} 86 | reply['jsonrpc'] = "2.0" 87 | reply['result'] = result 88 | reply['id'] = req['id'] 89 | 90 | return json.dumps(reply) 91 | 92 | # -------------------- 93 | # 94 | # RPC Methods 95 | 96 | def _rexec(params): 97 | """Start a subprocess shell to execute the specified command and return its output. 98 | 99 | params - a one element list ["/bin/cat /etc/hosts"] 100 | """ 101 | # check that params is a list 102 | if not isinstance(params, list) or len(params) == 0: 103 | return "Parameter must be a not empty list" 104 | command = params[0] 105 | try: 106 | subprocess.check_call(command,shell=True) 107 | out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read() 108 | return '\n' + out.decode() 109 | except Exception, e: 110 | print e 111 | return "{\"msg\":\"Invalid command.\"}" 112 | 113 | def _video_devices(params): 114 | """List available video devices (v4l).""" 115 | vdevices = Runtime.list_camera_devices() 116 | ret = {} 117 | ret['devices'] = vdevices[0] 118 | ret['names'] = vdevices[1] 119 | return ret 120 | 121 | def _set_telemetry_period(params): 122 | """Set telemetry period in seconds. 123 | 124 | params - JSON object {'period':5} 125 | """ 126 | if type(params) is not dict or 'period' not in params.keys(): 127 | return {"success": False} 128 | if params['period'] <= 0: 129 | return {"success": False} 130 | 131 | car.telemetry_period=params['period'] 132 | return {"success": True} 133 | 134 | def _get_config(params): 135 | """ Get configuration object """ 136 | config = Runtime.read_config() 137 | config['app_params']['verbose']=car.verbose 138 | config['app_params']['telemetry_period']=car.telemetry_period 139 | return config 140 | 141 | def _get_status(params): 142 | ret = {} 143 | ret['state'] = car.state 144 | ret['mode'] = car.mode 145 | ret['steering'] = car.steering 146 | ret['throttle'] = car.throttle 147 | ret['GPS'] = {} 148 | try: 149 | ret['GPS']['lat'] = car.readings['lat'] 150 | ret['GPS']['lon'] = car.readings['lon'] 151 | except: 152 | pass 153 | return ret 154 | 155 | def _set_throttle(params): 156 | """ Set the throttle """ 157 | if type(params) is not dict or 'value' not in params.keys(): 158 | return {"success": False} 159 | 160 | val = params['value'] 161 | # only values in the [0.180] range 162 | if val < 0 or 180 < val: 163 | return {"success": False} 164 | # TODO: call autonomoia.set_throttle(val) 165 | 166 | car.throttle=val 167 | return {"success": True} 168 | 169 | def _set_steering(params): 170 | """ Set the steering """ 171 | if type(params) is not dict or 'value' not in params.keys(): 172 | return {"success": False} 173 | 174 | val = params['value'] 175 | # only values in the [0.180] range 176 | if val < 0 or 180 < val: 177 | return {"success": False} 178 | #car.cur_steering = val 179 | car.steering=val 180 | return {"success": True} 181 | 182 | def _set_mode(params): 183 | """ Set the vehicle running mode""" 184 | if type(params) is not dict or 'value' not in params.keys(): 185 | return {"success": False} 186 | 187 | val = params['value'] 188 | if val not in ("AUTO","TRAINING","REMOTE"): 189 | return {"success": False} 190 | 191 | if val == "AUTO": 192 | car.mode2auto() 193 | elif val == "TRAINING": 194 | car.mode2training() 195 | elif val == "REMOTE": 196 | car.mode2remote() 197 | return {"success": True} 198 | 199 | def _stop(params): 200 | car.state2idle() 201 | return {"success": True} 202 | 203 | def _start(params): 204 | car.state2run() 205 | return {"success": True} 206 | 207 | def _video_stop(params): 208 | streamer.video_stop() 209 | return {"success": True} 210 | 211 | def _video_start(params): 212 | if type(params) is not dict or 'telem' not in params.keys(): 213 | return {"success": False} 214 | #pdb.set_trace() 215 | 216 | telem = params['telem'] 217 | streamer.video_start(telem) 218 | return {"success": True} 219 | 220 | def _load_model(params): 221 | if type(params) is not dict or 'path' not in params.keys(): 222 | return {"success": False} 223 | ret = car.load_model(params['path']) 224 | if ret: 225 | return {"success": True} 226 | else: 227 | return {"success": False} 228 | 229 | 230 | global rpc_methods 231 | rpc_methods = ({'name':'rexec','function':_rexec}, 232 | {'name':'video_devices','function':_video_devices}, 233 | {'name':'set_telemetry_period','function':_set_telemetry_period}, 234 | {'name':'get_config','function':_get_config}, 235 | {'name':'get_status','function':_get_status}, 236 | {'name':'set_throttle','function':_set_throttle}, 237 | {'name':'set_steering','function':_set_steering}, 238 | {'name':'set_mode','function':_set_mode}, 239 | {'name':'stop','function':_stop}, 240 | {'name':'start','function':_start}, 241 | {'name':'video_start','function':_video_start}, 242 | {'name':'video_stop','function':_video_stop}, 243 | {'name':'load_model','function':_load_model}, 244 | ) 245 | -------------------------------------------------------------------------------- /Arduino/firmware/firmware.ino: -------------------------------------------------------------------------------- 1 | /* 2 | * Firmware for Autonomia low-level Arduino controller. 3 | * 4 | * The low-level controller intercepts from the receiver the values for throttle and servo and sends them to the host. 5 | * From the host receives from the PWM settings for throttle and servo. 6 | * 7 | * Copyright (C) 2016 Visible Energy, Inc. 8 | * 9 | * Licensed under the Apache License, Version 2.0 (the "License"); 10 | * you may not use this file except in compliance with the License. 11 | * You may obtain a copy of the License at 12 | * 13 | * http://www.apache.org/licenses/LICENSE-2.0 14 | * 15 | * Unless required by applicable law or agreed to in writing, software 16 | * distributed under the License is distributed on an "AS IS" BASIS, 17 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | * See the License for the specific language governing permissions and 19 | * limitations under the License. 20 | */ 21 | 22 | #include 23 | #include 24 | 25 | // Pins mapping 26 | const int THROTTLE_PIN = 4; 27 | const int STEERING_PIN = 5; 28 | const int MOTOR_PIN = 10; 29 | const int SERVO_PIN= 11; 30 | const int LED_PIN = 13; // the on-board L LED 31 | 32 | // Globals 33 | bool debug = false; 34 | volatile uint8_t updateFlagsShared; 35 | uint8_t updateFlags; 36 | const int THROTTLE_FLAG = 1; 37 | const int STEERING_FLAG = 2; 38 | 39 | uint32_t throttleStart; 40 | uint32_t steeringStart; 41 | volatile uint16_t throttleInShared; 42 | volatile uint16_t steeringInShared; 43 | 44 | // Cctual throttle range is [980, 1980 === [253.176] 45 | uint16_t throttleIn = 1500; 46 | // Cctual steering range is [1000, 1984] === [0. 177] -- TODO: do we need calibration? 47 | uint16_t steeringIn = 1500; 48 | 49 | // Motor limits -- TODO: not used 50 | const int MOTOR_MAX = 120; 51 | const int MOTOR_MIN = 40; 52 | const int MOTOR_NEUTRAL = 90; 53 | 54 | // Steering limits -- TODO: not used 55 | const int D_THETA_MAX = 30; 56 | const int THETA_CENTER = 90; 57 | const int THETA_MAX = THETA_CENTER + D_THETA_MAX; 58 | const int THETA_MIN = THETA_CENTER - D_THETA_MAX; 59 | 60 | // Interfaces to motor and steering actuators 61 | Servo motor; 62 | Servo steering; 63 | 64 | String inputLine = ""; 65 | bool rawOutput = false; 66 | bool isConnected = true; 67 | unsigned long lastHeartbeat = 0; 68 | 69 | void initActuators() { 70 | motor.attach(MOTOR_PIN); 71 | steering.attach(SERVO_PIN); 72 | } 73 | 74 | void armActuators() { 75 | motor.write(MOTOR_NEUTRAL); 76 | steering.write(THETA_CENTER); 77 | delay(1000); 78 | } 79 | 80 | // RC steering input interrupt service routine 81 | void steeringISR() { 82 | if(digitalRead(STEERING_PIN) == HIGH) { 83 | steeringStart = micros(); 84 | } else { 85 | steeringInShared = (uint16_t)(micros() - steeringStart); 86 | updateFlagsShared |= STEERING_FLAG; 87 | } 88 | } 89 | 90 | // RC throttle input interrupt service routine 91 | void throttleISR() { 92 | if(digitalRead(THROTTLE_PIN) == HIGH) { 93 | // rising edge of the signal pulse, start timing 94 | throttleStart = micros(); 95 | } else { 96 | // falling edge, calculate duration of throttle pulse 97 | throttleInShared = (uint16_t)(micros() - throttleStart); 98 | // set the throttle flag to indicate that a new signal has been received 99 | updateFlagsShared |= THROTTLE_FLAG; 100 | } 101 | } 102 | 103 | void initRCInput() { 104 | pinMode(THROTTLE_PIN, INPUT_PULLUP); 105 | pinMode(STEERING_PIN, INPUT_PULLUP); 106 | pinMode(LED_PIN, OUTPUT); 107 | digitalWrite(LED_PIN, LOW); 108 | enableInterrupt(THROTTLE_PIN, throttleISR, CHANGE); 109 | enableInterrupt(STEERING_PIN, steeringISR, CHANGE); 110 | } 111 | 112 | // Handle inputs from RC 113 | void readAndCopyInputs() { 114 | // check global update flags to see if any channels have a new signal 115 | if (updateFlagsShared) { 116 | noInterrupts(); 117 | // make local copies 118 | updateFlags = updateFlagsShared; 119 | if(updateFlags & THROTTLE_FLAG) { 120 | throttleIn = throttleInShared; 121 | } 122 | if(updateFlags & STEERING_FLAG) { 123 | steeringIn = steeringInShared; 124 | } 125 | // clear shared update flags and enable interrupts 126 | updateFlagsShared = 0; 127 | interrupts(); 128 | } 129 | } 130 | 131 | // Scale RC pulses from 1000 - 2000 microseconds to 0 - 180 PWM angles 132 | uint8_t microseconds2PWM(uint16_t microseconds) { 133 | if (microseconds < 1000) 134 | microseconds = 1000; 135 | 136 | microseconds -= 1000; 137 | uint16_t pwm = int(microseconds *.180 + .5); 138 | 139 | if (pwm < 0) 140 | pwm = 0; 141 | if (pwm > 180) 142 | pwm = 180; 143 | return static_cast(pwm); 144 | } 145 | 146 | /* 147 | Parse inputLine received from host 148 | 149 | M [val] -- set motor to val 150 | S [val] -- set servo to val 151 | H -- heartbeat 152 | R -- serial output raw 153 | V -- serial output in range [0, 180] (default) 154 | 155 | Change parameters of servo and motor values accordingly 156 | return true if steering or throttle values have changed 157 | */ 158 | bool cmdParse(uint8_t *rc_outputs_steering, uint8_t *rc_outputs_throttle) { 159 | String val; 160 | if (inputLine.length() < 1) 161 | return false; 162 | 163 | int commandCode = inputLine[0]; 164 | bool ret = false; 165 | 166 | Serial.println(inputLine); 167 | switch (commandCode) { 168 | case 'R': 169 | rawOutput = true; 170 | break; 171 | case 'V': 172 | rawOutput = false; 173 | break; 174 | case 'M': 175 | val = inputLine.substring(1); 176 | *rc_outputs_throttle = constrain(val.toInt(), 0, 180); 177 | ret = true; 178 | break; 179 | case 'S': 180 | val = inputLine.substring(1); 181 | *rc_outputs_steering = constrain(val.toInt(), 0, 180); 182 | ret = true; 183 | break; 184 | } 185 | 186 | // treat each command as a heartbeat 187 | lastHeartbeat = millis(); 188 | return ret; 189 | } 190 | 191 | void setup() { 192 | inputLine.reserve(128); 193 | 194 | initRCInput(); 195 | initActuators(); 196 | 197 | armActuators(); 198 | Serial.begin(38400); //57600); // 38400); 199 | } 200 | 201 | void loop() { 202 | static uint8_t rc_outputs_steering = THETA_CENTER; 203 | static uint8_t rc_outputs_throttle = MOTOR_NEUTRAL; 204 | static unsigned long dt; 205 | static unsigned long t0; 206 | static uint8_t last_steeringIn; 207 | static uint8_t last_throttleIn; 208 | unsigned long now; 209 | uint8_t rc_inputs_steering; 210 | uint8_t rc_inputs_throttle; 211 | 212 | // check for connected flag 213 | if (!isConnected) 214 | return; 215 | 216 | // compute time elapsed from last loop 217 | now = millis(); 218 | dt = now - t0; 219 | 220 | // handle inputs from radio receiver every 50 msec 221 | if (dt > 50) { 222 | readAndCopyInputs(); 223 | 224 | // RC inputs scaled to [0, 180] range 225 | rc_inputs_throttle = microseconds2PWM(throttleIn); 226 | rc_inputs_steering = microseconds2PWM(steeringIn); 227 | 228 | // send readings to the host only when changed 229 | if ((0 < abs(last_throttleIn - throttleIn)) || (0< abs(last_steeringIn - steeringIn))) { 230 | // send readings to the host 231 | if (rawOutput) { 232 | Serial.print(throttleIn); 233 | Serial.print(" "); 234 | Serial.println(steeringIn); 235 | } else { 236 | Serial.print(rc_inputs_throttle); 237 | Serial.print(" "); 238 | Serial.println(rc_inputs_steering); 239 | } 240 | } 241 | t0 = millis(); 242 | } 243 | 244 | // handle input from host 245 | while (Serial.available()) { 246 | char ch = (char)Serial.read(); 247 | if (ch == '\n') { 248 | if (cmdParse(&rc_outputs_steering, &rc_outputs_throttle)) { 249 | if (debug) { 250 | Serial.print("****"); 251 | Serial.print(rc_outputs_steering); 252 | Serial.println(rc_outputs_throttle); 253 | } 254 | // output values have changed 255 | motor.write(rc_outputs_throttle); 256 | delay(15); 257 | steering.write(rc_outputs_steering); 258 | delay(15); 259 | } 260 | inputLine = ""; 261 | } else 262 | inputLine += ch; 263 | } 264 | 265 | /* DEBUG 266 | // check connection to host and stop the car if the heartbeat has not been received for a second 267 | if (lastHeartbeat + 1000 < now) { 268 | 269 | rc_outputs_steering = THETA_CENTER; 270 | rc_outputs_throttle = MOTOR_NEUTRAL; 271 | motor.write(rc_outputs_throttle); 272 | steering.write(rc_outputs_steering); 273 | isConnected = false; 274 | } 275 | */ 276 | } // loop 277 | 278 | -------------------------------------------------------------------------------- /runtime.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Low-level Board run-time support for Cometa IoT devices. 4 | 5 | Author: Marco Graziano (marco@visiblenergy.com) 6 | """ 7 | __license__ = """ 8 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | http://www.apache.org/licenses/LICENSE-2.0 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | """ 19 | 20 | __all__ = ["Runtime"] 21 | 22 | import time 23 | import threading 24 | import json 25 | import subprocess 26 | import os 27 | #from uuid import getnode as get_mac 28 | 29 | # Default values 30 | DCT_FILENAME = 'config.json' 31 | 32 | import socket, fcntl, struct 33 | def get_mac(): 34 | ifname = 'eth0' 35 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 36 | info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15])) 37 | return ''.join(['%02X' % ord(char) for char in info[18:24]]) 38 | 39 | # This module uses the following enviromental variables: 40 | # COMETA_APIKEY 41 | # COMETA_SERVER 42 | # COMETA_PORT 43 | 44 | #@runtimeclass 45 | class Runtime(object): 46 | """ 47 | Run-time support for Cometa IoT Devices. 48 | Note: the class decorator method runs after the class is created, 49 | """ 50 | 51 | # Internal system time - resolution 1Hz 52 | __systime = 0 53 | __has_systime = True 54 | 55 | # System status 56 | __status = "unknown" 57 | 58 | # Thread IDs 59 | __thtime = None # Update __systime thread 60 | 61 | def __init__(self): 62 | """ 63 | The Runtime object instance constructor. 64 | """ 65 | pass 66 | 67 | # Complete class initialization after loading. 68 | @classmethod 69 | def init_runtime(klass, systime=True): 70 | if not systime: 71 | # start the update systime thread for runtimes that don't have built in timer 72 | if klass.__thtime == None: 73 | klass.__thtime = threading.Thread(target=klass._update_systime) 74 | klass.__thtime.daemon = True # force to exit on SIGINT 75 | klass.__thtime.start() 76 | klass.__has_systime = systime 77 | klass.__status = "OK" 78 | # 79 | #----------------------------------------------- 80 | # 81 | # Update systime every second (thread) 82 | @classmethod 83 | def _update_systime(klass): 84 | """ 85 | Thread to update the system time every second. (private) 86 | """ 87 | while True: 88 | time.sleep(1) 89 | klass.__systime += 1 # no need for lock 90 | 91 | # Force systime to a new value 92 | @classmethod 93 | def set_systime(klass, time): 94 | """ 95 | Set the system time to a new value. 96 | """ 97 | klass.__systime = time 98 | 99 | # Get current systime 100 | @classmethod 101 | def get_systime(klass): 102 | if klass.__has_systime: 103 | return int(time.time()) 104 | else: 105 | return klass.__systime 106 | 107 | # Get host systime 108 | @classmethod 109 | def get_hostsystime(klass): 110 | return int(time.time()) 111 | 112 | # Set current status 113 | @classmethod 114 | def set_status(klass, new_status): 115 | klass.__status = new_status 116 | 117 | # Get current status 118 | @classmethod 119 | def get_status(klass): 120 | return klass.__status 121 | # 122 | #----------------------------------------------- 123 | # 124 | 125 | # 126 | #----------------------------------------------- 127 | # 128 | # Read config from DCT (Device Configuration Table simulated in a file) 129 | @classmethod 130 | def read_config(klass): 131 | # read the configuration from the DCT 132 | try: 133 | f = open(DCT_FILENAME) 134 | content = json.loads(f.read())['config'] 135 | 136 | # get Cometa API Key and Server name from the environment 137 | if not 'app_key' in content['cometa']: 138 | content['cometa']['app_key'] = os.environ['COMETA_APIKEY'] 139 | if not 'server' in content['cometa']: 140 | content['cometa']['server'] = os.environ['COMETA_SERVER'] 141 | if not 'port' in content['cometa']: 142 | content['cometa']['port'] = int(os.environ['COMETA_PORT']) 143 | return content 144 | except Exception as e: 145 | print "Error in reading file %s" % DCT_FILENAME 146 | print e 147 | return None 148 | 149 | # Read all DCT 150 | @classmethod 151 | def read_dct(klass): 152 | # read the DCT 153 | try: 154 | f = open(DCT_FILENAME) 155 | #content = f.read().replace(u'\n', u'').replace(u'\r', u'') 156 | content = json.loads(f.read().replace('\n', '').replace('\r', '')) 157 | # get Cometa API Key and Server name from the environment 158 | if not 'app_key' in content['cometa']: 159 | content['cometa']['app_key'] = os.environ['COMETA_APIKEY'] 160 | if not 'server' in content['cometa']: 161 | content['cometa']['server'] = os.environ['COMETA_SERVER'] 162 | if not 'port' in content['cometa']: 163 | content['cometa']['port'] = int(os.environ['COMETA_PORT']) 164 | return content 165 | except Exception as e: 166 | return "" 167 | 168 | # Get device serial number 169 | # in Linux hosts is the least six digits of the MAC address 170 | @classmethod 171 | def get_serial(klass): 172 | """ 173 | Return an hex string with the current network interface MAC address. 174 | """ 175 | # TODO: testing only 176 | #return 'A7A7A8' 177 | 178 | mac = '' 179 | m = get_mac() 180 | return m 181 | # used with uuid 182 | # for i in range(0, 12, 2): 183 | # #for i in range(6, 12, 2): 184 | # mac += ("%012X" % m)[i:i+2] 185 | # return mac 186 | 187 | # Get MAC address of current network interface 188 | @classmethod 189 | def get_mac_address(klass): 190 | """ 191 | Return an hex string with the current network interface MAC address. 192 | """ 193 | # TODO: testing only 194 | #return 'A7A7A8' 195 | 196 | return get_mac() 197 | 198 | # Get info of the current network interface 199 | @classmethod 200 | def get_network_info(klass): 201 | # TODO: testing only 202 | #return {'gateway': 'N/A', 'ip_address': 'N/A'} 203 | 204 | # get network info 205 | s = subprocess.Popen('ip route', shell=True, stdout=subprocess.PIPE).stdout.read() 206 | reply = {} 207 | reply['gateway'] = s.split('default via')[-1].split()[0] 208 | reply['ip_address'] = s.split('src')[-1].split()[0] 209 | return reply 210 | 211 | # Simple system logger 212 | @classmethod 213 | def syslog(klass, msg, escape=False): 214 | """ 215 | Simple system logger. 216 | """ 217 | if escape: 218 | #print ("[%d] %s" % (klass.__systime, msg.replace(u'\n', u'#015').replace(u'\r', u'#012'))) 219 | print ("[%d] %s" % (klass.get_systime(), msg.replace('\n', '#015').replace('\r', '#012'))) 220 | else: 221 | print ("[%d] %s" % (klass.get_systime(), msg)) 222 | 223 | # Get list of camera devices 224 | @classmethod 225 | def list_camera_devices(klass): 226 | """ 227 | List all video devices and their names 228 | """ 229 | videodevs = ["/dev/" + x for x in os.listdir("/dev/") if x.startswith("video") ] 230 | 231 | # Do ioctl dance to extract the name of the device 232 | import fcntl 233 | _IOC_NRBITS = 8 234 | _IOC_TYPEBITS = 8 235 | _IOC_SIZEBITS = 14 236 | _IOC_DIRBITS = 2 237 | 238 | _IOC_NRSHIFT = 0 239 | _IOC_TYPESHIFT =(_IOC_NRSHIFT+_IOC_NRBITS) 240 | _IOC_SIZESHIFT =(_IOC_TYPESHIFT+_IOC_TYPEBITS) 241 | _IOC_DIRSHIFT =(_IOC_SIZESHIFT+_IOC_SIZEBITS) 242 | 243 | _IOC_NONE = 0 244 | _IOC_WRITE = 1 245 | _IOC_READ = 2 246 | def _IOC(direction,type,nr,size): 247 | return (((direction) << _IOC_DIRSHIFT) | 248 | ((type) << _IOC_TYPESHIFT) | 249 | ((nr) << _IOC_NRSHIFT) | 250 | ((size) << _IOC_SIZESHIFT)) 251 | def _IOR(type, number, size): 252 | return _IOC(_IOC_READ, type, number, size) 253 | def _IOW(type, number, size): 254 | return _IOC(_IOC_WRITE, type, number, size) 255 | 256 | sizeof_struct_v4l2_capability = (16 + 32 + 32 + 4 + 4 + 16) 257 | VIDIOC_QUERYCAP = _IOR(ord('V'), 0, sizeof_struct_v4l2_capability) 258 | 259 | import array 260 | import struct 261 | emptybuf = " " * (16 + 32 + 32 + 4 + 4 + 16) # sizeof(struct v4l2_capability) 262 | buf = array.array('c', emptybuf) 263 | cameranames = [] 264 | for dev in videodevs: 265 | camera_dev = open(dev, "rw") 266 | camera_fd = camera_dev.fileno() 267 | fcntl.ioctl(camera_fd, VIDIOC_QUERYCAP, buf, 1) 268 | cameranames.append(buf[16:48].tostring()) 269 | # bus_info = buf[48:80].tostring() 270 | camera_dev.close() 271 | 272 | return [videodevs, cameranames] 273 | -------------------------------------------------------------------------------- /cometalib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Emile Camus 3 | """ 4 | __license__ = """ 5 | Copyright 2015 Visible Energy Inc. All Rights Reserved. 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | __all__ = ["CometaClient"] 17 | 18 | import socket 19 | import select 20 | import time 21 | import threading 22 | import ssl 23 | # From http-parser (0.8.3) 24 | # pip install http-parser 25 | from http_parser.parser import HttpParser 26 | import pdb 27 | 28 | class CometaClient(object): 29 | """Connect a device to the Cometa infrastructure""" 30 | errors = {0:'ok', 1:'timeout', 2:'network error', 3:'protocol error', 4:'authorization error', 5:'wrong parameters', 9:'internal error'} 31 | 32 | def __init__(self,server, port, application_id, use_ssl, logger): 33 | """ 34 | The Cometa instance constructor. 35 | 36 | server: the Cometa server FQDN 37 | port: the Cometa server port 38 | application_id: the Cometa application ID 39 | """ 40 | self.error = 9 41 | self.debug = False 42 | 43 | self._server = server 44 | self._port = port 45 | self._app_id = application_id 46 | self._use_ssl = use_ssl 47 | self._message_cb = None 48 | 49 | self._device_id = "" 50 | self._platform = "" 51 | self._hparser = None 52 | self._sock = None #socket.socket(socket.AF_INET, socket.SOCK_STREAM) 53 | self._heartbeat_rate = 60 54 | self._trecv = None 55 | self._thbeat = None 56 | self._hb_lock = threading.Lock() 57 | self._reconnecting = False 58 | self.log = logger 59 | return 60 | 61 | def attach(self, device_id, device_info): 62 | """ 63 | Attach the specified device to a Cometa registered application. 64 | Authentication is done using only the application_id (one-way authentication). 65 | 66 | device_id: the device unique identifier 67 | device_info: a description of the platform or the device (used only as a comment) 68 | """ 69 | self._device_id = device_id 70 | self._platform = device_info 71 | self._hparser = HttpParser() 72 | tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 73 | if self._use_ssl: 74 | self._sock = ssl.wrap_socket(tsock, ssl_version=ssl.PROTOCOL_SSLv23, ciphers="AES256-GCM-SHA384") 75 | else: 76 | self._sock = tsock 77 | try: 78 | self._sock.connect((self._server, self._port)) 79 | sendBuf="POST /v1/applications/%s/devices/%s HTTP/1.1\r\nHost: api.cometa.io\r\nContent-Length:%d\r\n\r\n%s" % (self._app_id,device_id,len(device_info),device_info) 80 | self._sock.send(sendBuf) 81 | recvBuf = "" 82 | while True: 83 | data = self._sock.recv(1024) 84 | if not data: 85 | break 86 | 87 | dataLen = len(data) 88 | nparsed = self._hparser.execute(data, dataLen) 89 | assert nparsed == dataLen 90 | 91 | if self._hparser.is_headers_complete(): 92 | if self.debug: 93 | print "connection for device %s headers received" % (device_id) 94 | print self._hparser.get_headers() 95 | 96 | if self._hparser.is_partial_body(): 97 | recvBuf = self._hparser.recv_body() 98 | if self.debug: 99 | print "connection for device %s body received" % (device_id) 100 | print recvBuf 101 | #TODO: check for error in connecting, i.e. 403 already connected 102 | 103 | # reading the attach complete message from the server 104 | # i.e. {"msg":"200 OK","heartbeat":60,"timestamp":1441382935} 105 | if len(recvBuf) < 16 or recvBuf[1:12] != '"msg":"200"': 106 | self.error = 5 107 | print "Error in string from server; %s" % recvBuf 108 | return recvBuf 109 | 110 | # reset error 111 | self.error = 0 112 | 113 | # set the socket non blocking 114 | self._sock.setblocking(0) 115 | 116 | # do not (re)start the threads during a reconnection 117 | if self._reconnecting: 118 | self._reconnecting = False 119 | return recvBuf 120 | 121 | if self.debug: 122 | print "connection for device %s completed" % (device_id) 123 | # start the hearbeat thread 124 | self._thbeat = threading.Thread(target=self._heartbeat) 125 | self._thbeat.daemon = True 126 | self._thbeat.start() 127 | 128 | # start the receive thread 129 | #time.sleep(2) 130 | self._trecv = threading.Thread(target=self._receive) 131 | self._trecv.daemon = True # force to exit on SIGINT 132 | self._trecv.start() 133 | 134 | 135 | return recvBuf 136 | except Exception, e: 137 | print e 138 | self.error = 2 139 | return 140 | 141 | def send_data(self, msg): 142 | """ 143 | Send a data event message upstream to the Cometa server. 144 | If a Webhook is specified for the Application in the Cometa configuration file /etc/cometa.conf on the server, 145 | the message is relayed to the Webhook. Also, the Cometa server propagates the message to all open devices Websockets. 146 | """ 147 | sendBuf = "%x\r\n%c%s\r\n" % (len(msg) + 1,'\07',msg) 148 | if self._reconnecting: 149 | if self.debug: 150 | print "Error in Cometa.send_data(): device is reconnecting." 151 | return -1 152 | try: 153 | self._hb_lock.acquire() 154 | self._sock.send(sendBuf) 155 | self._hb_lock.release() 156 | except Exception, e: 157 | if self.debug: 158 | print "Error in Cometa.send_data(): socket write failed." 159 | return -1 160 | return 0 161 | 162 | def bind_cb(self, message_cb): 163 | """ 164 | Binds the specified user callback to the Cometa instance. 165 | """ 166 | self._message_cb = message_cb 167 | return 168 | 169 | def perror(self): 170 | """ 171 | Return a string for the current error. 172 | """ 173 | return CometaClient.errors[self.error] 174 | 175 | def _heartbeat(self): 176 | """ 177 | The heartbeat thread. 178 | The hearbeat message is a chunk of length 3 with the MSG_HEARBEAT byte and closed with CRLF. 179 | This thread detects a server disconnection and attempts to reconnect to the Cometa server. 180 | """ 181 | if self.debug: 182 | print "Hearbeat thread started.\r" 183 | 184 | while True: 185 | time.sleep(self._heartbeat_rate) 186 | if self._reconnecting: 187 | print "--- heartbeat while reconnecting" 188 | continue 189 | sendBuf = "1\r\n%c\r\n" % '\06' 190 | self.log("sending heartbeat") 191 | try: 192 | self._hb_lock.acquire() 193 | self._sock.send(sendBuf) 194 | self._hb_lock.release() 195 | except Exception, e: 196 | print "--- error sending heartbeat" 197 | return 198 | 199 | def _receive(self): 200 | """ 201 | The receive and user callback dispatch loop thread. 202 | """ 203 | if self.debug: 204 | print "Receive thread started.\r" 205 | while True: 206 | ready_to_read, ready_to_write, in_error = select.select([self._sock.fileno()],[],[self._sock.fileno()], 15) 207 | 208 | # check for timeout 209 | if not (ready_to_read or ready_to_write or in_error): 210 | continue 211 | 212 | for i in in_error: 213 | # handle errors as disconnections and try to reconnect to the server 214 | print "Network error in receive loop (error). Reconnecting..." 215 | self._sock.close() 216 | # self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 217 | self._reconnecting = True 218 | ret = self.attach(self._device_id, self._platform) 219 | if self.error != 0: 220 | print "Error in attaching to Cometa.", self.perror() 221 | time.sleep(15) 222 | continue 223 | else: 224 | print "Device attached to Cometa.", ret 225 | continue 226 | 227 | data = None 228 | for i in ready_to_read: 229 | try: 230 | data = self._sock.recv(1024) 231 | except Exception, e: 232 | print e 233 | pass 234 | 235 | if not data: 236 | if self._use_ssl: 237 | # ssl read may return no data 238 | continue 239 | 240 | # handle errors as disconnections and try to reconnect to the server 241 | print "Network error in receive loop (no data). Reconnecting..." 242 | try: 243 | self._sock.close() 244 | except Exception, e: 245 | print "--- exception in close socket." 246 | pass 247 | 248 | # self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 249 | self._reconnecting = True 250 | ret = self.attach(self._device_id, self._platform) 251 | if self.error != 0: 252 | print "Error in attaching to Cometa.", self.perror() 253 | time.sleep(15) 254 | continue 255 | else: 256 | print "Device attached to Cometa.", ret 257 | continue 258 | 259 | if self.debug: 260 | print "** received: %s (%d)" % (data, len(data)) 261 | self._hparser.execute(data, len(data)) 262 | if self._hparser.is_partial_body(): 263 | to_send = self._hparser.recv_body() 264 | # pdb.set_trace() 265 | # the payload contains a HTTP chunk 266 | if self._message_cb: 267 | # invoke the user callback 268 | reply = self._message_cb(to_send, len(to_send)) 269 | else: 270 | reply = "" 271 | if self.debug: 272 | print "After callback." 273 | else: 274 | continue 275 | 276 | if self.debug: 277 | print "Returning result." 278 | sendBuf = "%x\r\n%s\r\n" % (len(reply),reply) 279 | try: 280 | self._hb_lock.acquire() 281 | self._sock.send(sendBuf) 282 | self._hb_lock.release() 283 | except Exception, e: 284 | print "--- error sending reply" 285 | pass 286 | 287 | msg = "" 288 | -------------------------------------------------------------------------------- /docs/remote-api.md: -------------------------------------------------------------------------------- 1 | # Cloud API 2 | 3 | Once the Autonomia (application.py) application is started on the RPI in the vehicle, it will connect to the Cometa middleware server, the message broker component of the Autonomia Cloud API. The application uses as credentials the `app_key` application key specified in the `config.json` file. The connection is kept open permanently and used for full-duplex communication with the vehicle through the Cometa server, that is both for telemetry (device2cloud) and remote control (cloud2device). 4 | 5 | Since the connection is initiated from within a NAT or a firewall on stadard port 443, which is typically open for outgoing traffic, the vechicle becomes accessible from an application using the `Autonomia` Cloud API without exposing or even knowing its public IP address. We use regularly a 4G/LTE hotspot from the Oakland warehouse for the connection. 6 | 7 | An application that intends to communicate with an Autonomia vehicle, sends `JSON-RPC` requests through the Cometa cloud server `send` method. 8 | 9 | >To use the Cometa API and the `send` method, an `APPLICATION_KEY` and a `COMETA_SECRET` are needed as credentials for authentication. Users of the Cometa Robotics cloud service can create applications and manage their vehicles in the cloud as well as develop applications using the API provided by `cometa-dronekit`. 10 | 11 | >Public availability of Autonimia Cloud API service is planned for 3Q 2017. Send an email to info@autonomia.io to request early access. 12 | 13 | #### Cometa Authentication 14 | An application is authenticated by including an Authorization HTTPS request header in every request. 15 | 16 | | HTTP HEADER | DESCRIPTION | VALUE | 17 | -------------------------------|-------------------------|------------- 18 | | `Authorization` | authorization token | OAuth {`COMETA_SECRET`} 19 | 20 | Example: 21 | 22 | `Authorization: OAuth b4f51e1e6125dca873e5` 23 | 24 | ### Send RPC Message 25 | 26 | Send a JSON-RPC message to a vehicle. 27 | 28 | ``` 29 | POST /v1/applications/{APPLICATION_KEY}/devices/{DEVICE_ID}/send 30 | ``` 31 | The message containing the JSON-RPC request is in the POST `body`. 32 | 33 | | URL PARAMETER | DESCRIPTION | TYPE | 34 | ------------------|----------------------------------|------------------------------------- 35 | | `APPLICATION_KEY` | Cometa Application Key | String | 36 | | `DEVICE_ID` | Device Cometa Id | String | 37 | 38 | The `DEVICE_ID` is the vehicle ID provided by `cometa-dronekit` when a vehicle is connected to a Cometa server. **The default value is the vehicle's MAC address.** 39 | 40 | The `Autonomia` vehicle agent always reply to a JSON-RPC message with a JSON response. 41 | 42 | Example: 43 | ``` 44 | $ curl -X POST -H 'Authorization: OAuth a724dc4811d507688' -H 'Content-type: application/json' \ 45 | -d '{"jsonrpc":"2.0", "method":"video_start","params":{"telem":true},"id":7}' \ 46 | https://vederly.cometa.io/v1/applications/a94660d971eca2879/devices/e984060007/send 47 | 48 | {"jsonrpc": "2.0", "result": {"success": true}, "id": 7} 49 | ``` 50 | ### WebSockets Endpoint 51 | 52 | The Cometa server exposes a `WebSockets` endpoint for each vehicle connected to it. A vehicle WebSocket can be opened only once. To obtain an endpoint, an application must request a new WebSockets `DEVICE_KEY` every time is needed using the following HTTPS method: 53 | 54 | ``` 55 | GET /applications/{APPLICATION_KEY}/devices/{DEVICE_ID}/websocket 56 | ``` 57 | 58 | URL PARAMETER | DESCRIPTION | TYPE | 59 | ------------------|----------------------------------|------------------------------------- 60 | | `APPLICATION_KEY` | Cometa Application Key | String | 61 | | `DEVICE_ID` | Device Cometa Id | String | 62 | 63 | The method returns a `DEVICE_KEY` that is used to obtain the WebSocket vehicle's endpoint as follows: 64 | 65 | `wss://{COMETA_HOST}:{PORT}/websockets/{DEVICE_ID}/{DEVICE_KEY}` 66 | 67 | 68 | Example: 69 | ``` 70 | $ curl -H 'Authorization: OAuth a724dc4811d507688' -H 'Content-type: application/json' \ 71 | https://vederly.cometa.io/v1/applications/a94660d971eca2879/devices/e984060007/websocket 72 | 73 | { 74 | "device_id":"e984060007", 75 | "device_key":"dc670dae876ee4f919de9e777c9bd98a5e182cd8" 76 | } 77 | ``` 78 | WebSocket endpoint (one-time use only): 79 | 80 | wss://vederly.cometa.io/v1/websockets/e984060007/dc670dae876ee4f919de9e777c9bd98a5e182cd8 81 | 82 | Opening a device `WebSocket` would fail if the vehicle is not connected. Upon vehicle disconnection, the `WebSocket` is closed by the server after the inactivity timeout period. **Immediately after opening a WebSocket, and without any other request, an application starts receiving telemetry messages at expiration of every period of the duration indicated in the `config.json` file.** 83 | 84 | >On an open WebSocket an application receives both telemetry messages without requesting them, and responses to JSON-RPC requests. 85 | 86 | `WebSockets` are asynchronous, full-duplex channels to exchange messages directly between an application and a remote vehicle running the `Autonomia` application. A WebSocket `send()` is relaying the message to the vehicle the same way as an HTTPS `send`. From the vehicle's standpoint messages are received the same way regardless the method used by an application, that is using `WebSockets` method `send()`, or a Cometa HTTPS `send`. **On an open WebSocket an application receives both telemetry messages without requesting them, and responses to JSON-RPC requests.** 87 | 88 | > Before sending a message to a WebSocket an application should always check its `readyState` attribute to check the WebSocket connection is open and ready to communicate (`readyState === WS_OPEN`). 89 | 90 | ### Connected Vehicles 91 | 92 | Get a list of vehicle connected to the Cometa server. 93 | ``` 94 | GET /v1/applications/{APPLICATION_KEY}/devices 95 | ``` 96 | 97 | The message containing the JSON-RPC request is in the POST `body`. 98 | 99 | | URL PARAMETER | DESCRIPTION | TYPE | 100 | ------------------|----------------------------------|------------------------------------- 101 | | `APPLICATION_KEY` | Cometa Application Key | String | 102 | 103 | Example: 104 | ``` 105 | $ curl -H 'Authorization: OAuth b4f51e1e6125dcc873e9' -H 'Content-type: application/json' \ 106 | http://vederly.cometa.io/v1/applications/a0353b75b8fa61889d19/devices 107 | 108 | { 109 | "num_devices": 11, 110 | "devices": [ 111 | "cc79cf45f1b4", 112 | "cc79cf45f1d4", 113 | "cc79cf45f421", 114 | "cc79cf45f1ba", 115 | "cc79cf45f400", 116 | "cc79cf45f401", 117 | "cc79cf45f2ab", 118 | "cc79cf45f307", 119 | "cc79cf45f221", 120 | "cc79cf45f314", 121 | "cc79cf45f2d8" 122 | ] 123 | } 124 | ``` 125 | 126 | ### Cometa Vehicle Presence 127 | 128 | Get vehicle connection state and statistics information from the Cometa server. 129 | ``` 130 | GET /v1/applications/{APPLICATION_KEY}/devices/{DEVICE_ID} 131 | ``` 132 | 133 | The message containing the JSON-RPC request is in the POST `body`. 134 | 135 | | URL PARAMETER | DESCRIPTION | TYPE | 136 | ------------------|----------------------------------|------------------------------------- 137 | | `APPLICATION_KEY` | Cometa Application Key | String | 138 | | `DEVICE_ID` | Device Cometa Id | String | 139 | 140 | Example: 141 | ``` 142 | $ curl -H 'Authorization: OAuth b4f51e1e6125dcc873e9' -H 'Content-type: application/json' \ 143 | http://vederly.cometa.io/v1/applications/a0353b75b8fa61889d19/devices/e984060007 144 | 145 | { 146 | "device_id": "e984060007", 147 | "ip_address": "73.202.12.128:64471", 148 | "heartbeat": "1478378638", 149 | "info": "Autonomia", 150 | "connected_at": "1478373655", 151 | "latency": "45", 152 | "websockets": "1", 153 | "net": { 154 | "received": { 155 | "bytes": "4237", 156 | "messages": "12" 157 | }, 158 | "sent": { 159 | "bytes": "34789", 160 | "messages": "781" 161 | } 162 | } 163 | } 164 | ``` 165 | **Latency is in milliseconds and indicates the average time of a round-trip from the server to the vehicle (message/response). Latencies of less than 100 msec for a round-trip are typical for vehicles connected in the US.** 166 | >Vehicle connections are maintained by a regular heartbeat message sent by `Autonomia` to the server (60 seconds period). The Cometa server times out and disconnects a vehicle 90 seconds after receiving the last heartbeat message. A disconnected vehicle may appear to be connected for up to an additional 90 seconds, if it disconnect abruptly without cleanly close its socket connection. 167 | 168 | ## Methods 169 | 170 | ### Video Devices 171 | `video_devices` 172 | 173 | List available video devices (v4l). 174 | 175 | Example: 176 | ``` 177 | $ curl -X POST -H 'Authorization: OAuth a724dc4811d507688' -H 'Content-type: application/json' \ 178 | -d '{"jsonrpc":"2.0","method":"video_devices","params":{},"id":7}' \ 179 | https://vederly.cometa.io/v1/applications/a94660d971eca2879/devices/e984060007/send 180 | ``` 181 | 182 | ### Set Telemetry Period 183 | `set_telemetry_period` 184 | 185 | Set telemetry period in seconds. 186 | 187 | ### Get Configuration 188 | `get_config` 189 | 190 | Get the configuration object in the `config.json` file. 191 | 192 | ### Get Status 193 | `get_status` 194 | 195 | Get the vehicle current status. 196 | 197 | ### Set Mode 198 | `set_mode` 199 | 200 | Set the vehicle running mode. 201 | 202 | | MODE | DESCRIPTION | NOTES | 203 | ------------------------|-------------------------|------------- 204 | | `TRAINING` | control from RC radio controller | embedded telemetry in video for CNN training 205 | | `REMOTE` | rempte control with Cloud API methods | use `set_throttle` and `set_steering` for control 206 | | `AUTO` | autonomous mode | predicting steering and throttle from CNN 207 | 208 | ### Set Steering 209 | `set_steering` 210 | 211 | Set the steering value for a vehicle running in `REMOTE` mode. 212 | 213 | >Values are in the [0,180] range, with 90 for neutral. 214 | 215 | ### Set Throttle 216 | `set_throttle` 217 | 218 | Set the throttle value for a vehicle running in `REMOTE` mode. 219 | 220 | >Values are in the [0,180] range, with 90 for neutral. 221 | 222 | ### Start 223 | `start` 224 | 225 | Start the vehicle in `TRAINING` mode. 226 | 227 | ### Stop 228 | `stop` 229 | 230 | Stop the vehicle and change state to `IDLE`. 231 | 232 | ### Video Start 233 | `video_start` 234 | 235 | Start video streaming to the Vederly server using the `streamer` and `camera key` set in `config.json`. Set `"telem":true` to embed steering and throttle values in the image. 236 | 237 | ### Video Stop 238 | `video_stop` 239 | 240 | Stop video streaming. 241 | 242 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /streamer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud connected autonomous RC car. 3 | 4 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 5 | """ 6 | __license__ = """ 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | 18 | import os 19 | import signal 20 | import time 21 | import subprocess as sp 22 | import numpy as np 23 | import cv2 24 | import json 25 | import time 26 | import threading 27 | 28 | from config import DataConfig 29 | 30 | # filename to fetch telemetry from -- updated atomically by the controller loop at 30 Hz 31 | TELEMFNAME = '/tmpfs/meta.txt' 32 | 33 | # filename where to store the last frame -- used by the application loop and as parameter for the CNN prediction 34 | FRAMEFNAME = '/tmpfs/frame.yuv' 35 | 36 | log=None 37 | config=None 38 | camera=None 39 | 40 | cnn_config = DataConfig() 41 | 42 | # Vehicle object instantiated in the application module 43 | car=None 44 | 45 | streaming=False 46 | video_thread=None 47 | 48 | def init(conf, logger): 49 | global config, log, camera 50 | 51 | config=conf 52 | log=logger 53 | 54 | videodevs = ["/dev/" + x for x in os.listdir("/dev/") if x.startswith("video") ] 55 | if len(videodevs) == 0: 56 | log("Fatal error. Cannot proceed without cameras connected.") 57 | return None 58 | 59 | print videodevs 60 | # create an empty telemetry file 61 | s = 'echo > ' + TELEMFNAME 62 | sp.check_call(s, shell=True) 63 | 64 | # use the first camera 65 | camera=videodevs[0] 66 | 67 | # set resolution and encoding (Logitech C920) 68 | s = 'v4l2-ctl --device=' + camera + ' --set-fmt-video=width=320,height=240,pixelformat=1' 69 | log("Setting camera: %s" % s) 70 | # execute and wait for completion 71 | sp.check_call(s, shell=True) 72 | 73 | return camera 74 | 75 | def video_start(telem): 76 | """ 77 | The video is streamed with a pipeline: 78 | camera -> ffmpeg to stdout -> each frame is read and processed -> stdin into ffmpeg for RTMP push to server 79 | the streaming pipeline runs in a separate thread 80 | """ 81 | global video_thread 82 | 83 | video_thread = threading.Thread(target=video_pipe,args=(telem,)) 84 | # video_thread.streaming = True 85 | video_thread.start() 86 | return 87 | 88 | def video_pipe(telem): 89 | global video_thread 90 | 91 | try: 92 | pname = config['video']['streamer'] 93 | except: 94 | log("Error cannot start the video streamer. Streamer not defined in config.json") 95 | return None 96 | 97 | # input ffmpeg command 98 | i_command = [ pname, 99 | '-r', '30', 100 | '-use_wallclock_as_timestamps', '1', 101 | '-f', 'v4l2', 102 | '-i', '/dev/video0', 103 | '-vb','1000k', 104 | '-f', 'image2pipe', 105 | '-pix_fmt', 'yuyv422', 106 | '-vcodec', 'rawvideo', '-'] 107 | # ffmpeg stdout into a pipe 108 | i_pipe = sp.Popen(i_command, stdout = sp.PIPE, bufsize=10**5) 109 | 110 | # output ffmpeg push to the RTMP server 111 | url = 'rtmp://' + config['video']['server'] + ':' + config['video']['port'] + '/src/' + config['video']['key'] 112 | o_command = [ pname, 113 | '-f', 'rawvideo', 114 | '-vcodec','rawvideo', 115 | '-s', '320x240', # size of one frame 116 | '-pix_fmt', 'rgb24', #'yuyv422', rgb24 117 | '-r', '30', # frames per second 118 | '-i', 'pipe:0', # The imput comes from a pipe 119 | # to use with standard h264 codec 120 | # '-an', # Tells FFMPEG not to expect any audio 121 | # '-c:v','libx264', 122 | # '-profile:v','main', 123 | # '-preset','ultrafast', 124 | # '-pix_fmt', 'yuv420p', 125 | # '-b:v','1000k', 126 | # RPI GPU codec 127 | '-c:v', 'h264_omx', 128 | '-maxrate','768k', 129 | '-bufsize','2000k', 130 | '-r', '30', # frames per second 131 | '-g','60', 132 | '-f','flv', 133 | url ] 134 | # ffmpeg stdin from a pipe 135 | try: 136 | o_pipe = sp.Popen(o_command, stdin=sp.PIPE, stderr=sp.PIPE) 137 | except Exception, e: 138 | print e 139 | 140 | # frame size 141 | rows = car.rows 142 | cols = car.cols 143 | image_size = rows * cols * 2 # *3 144 | 145 | # frame counter -- image frame filenames will have this number once the stored video file is split with ffmpeg 146 | count = 1 147 | #video_thread = threading.currentThread() 148 | 149 | # telemetry object sent for every frame 150 | ret = {} 151 | ret['device_id'] = car.serial # constant for every frame 152 | 153 | # streaming loop 154 | while getattr(video_thread, "streaming", True): 155 | # read frame bytes from ffmpeg input 156 | raw_image = i_pipe.stdout.read(image_size) 157 | 158 | # send telemetry in the capture loop for optimal synchronization 159 | if telem: 160 | now = int(time.time() * 1000) 161 | ret['time'] = str(now) 162 | ret['s'] = car.steering 163 | ret['t'] = car.throttle 164 | ret['c'] = count 165 | car.com.send_data(json.dumps(ret)) 166 | 167 | # prepare the frame for any processing 168 | f = np.fromstring(raw_image, dtype=np.uint8) 169 | img = f.reshape(rows, cols, 2) # 2) 170 | 171 | # lock the frame for use in the controller loop 172 | car.glock.acquire() 173 | 174 | # convert to RGB and assign to car object attribute 175 | car.frame = cv2.cvtColor(img, cv2.COLOR_YUV2RGB_YUY2) # working w camera format yuyv422 176 | 177 | # here car.frame is RGB 178 | 179 | # TODO: the car.frame is an RGB image of shape (rows,cols.3) 180 | # it needs to be pre-processed before the lock is released to use by the prediction 181 | # crop, resample, normalize, reshape 182 | # 183 | 184 | # --- TEST only 185 | # draw a center rectangle 186 | # cv2.rectangle(car.frame,(130,100),(190,140),(255,0,0),2) 187 | # M = cv2.getRotationMatrix2D((width/2,height/2),180,1) 188 | # rotate the image 90 degrees twice and bring back to normal 189 | #dst = cv2.warpAffine(car.frame,M,(width,height)) 190 | 191 | if telem: 192 | # print steering and throttle value into the image (for telemetry checking only) 193 | s = "%04d: %03d %03d" % (count, car.steering, car.throttle) 194 | cv2.putText(car.frame, s,(5,10), cv2.FONT_HERSHEY_SIMPLEX, .4, (0,255,0), 1) 195 | 196 | # output the frame to the ffmpeg output process 197 | o_pipe.stdin.write(car.frame.tostring()) 198 | 199 | # convert to YUV 200 | car.frame = cv2.cvtColor(car.frame, cv2.COLOR_BGR2YCR_CB) # working w camera format yuyv422 201 | # crop image 202 | Y = car.frame[cnn_config.ycrop_range[0]:cnn_config.ycrop_range[1], :,0] 203 | 204 | # resample image 205 | Y = cv2.resize(Y, cnn_config.img_resample_dim) #, cv2.INTER_LINEAR) 206 | """ 207 | # crop image 208 | Y = car.frame[cnn_config.img_yaxis_start:cnn_config.img_yaxis_end + 1, cnn_config.img_xaxis_start:cnn_config.img_xaxis_end + 1] 209 | 210 | # resample image 211 | Y = cv2.resize(Y, cnn_config.img_resample_dim) #, cv2.INTER_LINEAR) 212 | """ 213 | 214 | 215 | # Y is of shape (1,:,:,:) 216 | # reduce planes to 1 then proceed 217 | #print('*************', Y.shape, cnn_config) 218 | Y = Y.reshape(1, cnn_config.img_resample_dim[0], cnn_config.img_resample_dim[1], cnn_config.num_channels) 219 | 220 | # cast to float and normalize the image values 221 | car.frame = np.empty((rows * cols), dtype=np.float64) 222 | car.frame = Y / 255.0 - 0.5 223 | 224 | 225 | # TODO: insure car.frame is the proper format and shape to use it as input to model prediction 226 | # 227 | # release the frame lock 228 | car.glock.release() 229 | 230 | # frame counter 231 | count += 1 232 | # flush the input buffer 233 | i_pipe.stdout.flush() 234 | # short delay to allow other threads to run 235 | time.sleep(0.001) 236 | 237 | log('exiting streaming loop') 238 | # terminate child processes 239 | i_pipe.kill() 240 | o_pipe.kill() 241 | return 242 | 243 | 244 | def video_stop(): 245 | """ Stop running video capture streamer """ 246 | global video_thread 247 | video_thread.streaming = False 248 | # wait for the thread to finish 249 | video_thread.join(5) 250 | return 251 | 252 | #------------------------------------------------------------------ 253 | 254 | #old version video_start 255 | def Xvideo_start(telem): 256 | """ Start a video streamer """ 257 | global config, log, camera 258 | 259 | # insure no streamer is running 260 | video_stop() 261 | time.sleep(1) 262 | 263 | try: 264 | pname = config['video']['streamer'] 265 | except: 266 | log("Error cannot start the video streamer. Streamer not defined in config.json") 267 | return None 268 | 269 | # set video codec depending on platform 270 | vcodec = 'h264_omx' if 'raspberrypi' in os.uname() else 'h264' 271 | # to suppress output when running the streamer 272 | FNULL = open(os.devnull, 'w') 273 | if telem: 274 | # streaming video with embedded telemetry 275 | params = [pname, '-r','30', '-use_wallclock_as_timestamps', '1', '-thread_queue_size', '512', '-f', 'v4l2', '-i', camera, '-c:v ', vcodec, '-maxrate', '768k', '-bufsize', '960k'] 276 | format = 'format=yuv444p,drawbox=y=ih-h:color=black@0.9:width=40:height=12:t=max,drawtext=fontfile=OpenSans-Regular.ttf:textfile=' + TELEMFNAME + ':reload=1:fontsize=10:fontcolor=white:x=0:y=(h-th-2),format=yuv420p' 277 | url = 'rtmp://' + config['video']['server'] + ':' + config['video']['port'] + '/src/' + config['video']['key'] 278 | params = params + ['-vf', format, '-threads', '4', '-r', '30', '-g', '60', '-f', 'flv', url] 279 | # spawn a process and do not wait 280 | pid = sp.Popen(params, stderr=FNULL) 281 | else: 282 | # streaming video and saving the last frame for CNN prediction 283 | params = [pname, '-r','30', '-use_wallclock_as_timestamps', '1', '-thread_queue_size', '512', '-f', 'v4l2', '-i', camera, '-c:v ', vcodec, '-maxrate', '768k', '-bufsize', '960k'] 284 | url = 'rtmp://' + config['video']['server'] + ':' + config['video']['port'] + '/src/' + config['video']['key'] 285 | params = params + ['-threads', '4', '-r', '30', '-g', '60', '-f', 'flv', url] 286 | params = params + ['-vcodec', 'rawvideo', '-an', '-updatefirst', '1', '-y', '-f', 'image2', FRAMEFNAME] 287 | # to transcode use format YUYV422: 288 | # $ ffmpeg -vcodec rawvideo -s 320x240 -r 1 -pix_fmt yuyv422 -i frame.yuv rawframe.jpg 289 | # spawn a process and do not wait 290 | pid = sp.Popen(params, stderr=FNULL) 291 | return pid 292 | 293 | # old video stop 294 | def Xvideo_stop(): 295 | """ Stop running video capture streamer """ 296 | try: 297 | pname = config['video']['streamer'] 298 | except: 299 | log("Error cannot stop the video streamer. Streamer not defined in config.json") 300 | return None 301 | 302 | s = 'killall ' + pname 303 | FNULL = open(os.devnull, 'w') 304 | try: 305 | # execute and wait for completion 306 | sp.check_call(s, shell=True, stderr=FNULL) 307 | except Exception, e: 308 | # fails when no ffmpeg is running 309 | if config['app_params']['verbose']: 310 | log("Error stopping streamer. %s" % e) 311 | else: 312 | pass 313 | return 314 | 315 | """ 316 | ffmpeg -i ../1488768815.flv -vcodec rawvideo -pix_fmt yuyv422 -f image2 %03d.raw 317 | 318 | f=open('021.raw','rb') 319 | i=f.read(240 * 320 * 2) 320 | ff = np.fromstring(i,dtype=np.uint8) 321 | img = ff.reshape(240, 320, 2) 322 | img[0,0,0] 323 | img[0,0,1] 324 | 325 | pass through 326 | ./ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -vcodec h264 -i /dev/video0 -vcodec copy -f flv rtmp://stream.cometa.io:12345/src/74DA388EAC61 327 | 328 | ffmpeg -r 30 -use_wallclock_as_timestamps 1 -thread_queue_size 512 -f v4l2 -i /dev/video0 -c:v h264_omx -r 30 -g 60 -f flv rtmp://stream.cometa.io:12345/src/74DA388EAC61 329 | 330 | i_command = [ pname, 331 | '-r', '30', 332 | '-use_wallclock_as_timestamps', '1', 333 | '-f', 'v4l2', 334 | '-i', '/dev/video0', 335 | '-vb','1000k', 336 | '-f', 'image2pipe', 337 | '-'] 338 | 339 | o_command = [ pname, 340 | '-f', 'image2pipe', 341 | '-i', 'pipe:0', # The imput comes from a pipe 342 | '-f','flv', 343 | url ] 344 | f = np.fromstring(raw_image, dtype=np.uint8) 345 | 346 | 347 | o_pipe.stdin.write(f.tostring()) 348 | i_pipe.stdout.flush() 349 | continue 350 | """ 351 | -------------------------------------------------------------------------------- /docs/Autonomia.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "709ad316-59c9-b2e8-3549-b0f97521d372", 3 | "owner": "306237", 4 | "name": "Autonomia Cometa API", 5 | "description": "Remote API for Autonomia vehicles connected to Cometa Robotics cloud.", 6 | "order": [ 7 | "1fea178c-5b30-9c92-b5f7-b59e88f8ce2b", 8 | "c64ef3a6-6a90-e465-003c-2bdd29eba03b", 9 | "81717215-8c56-4576-90ab-d83d94f134a2", 10 | "d5ca344b-16a9-a3d9-0f8a-d3a444a8bef4", 11 | "b55e1480-6c62-d523-c40f-16a7dddc991d", 12 | "42537893-18a7-3f26-91b8-8f1b6c877ab5", 13 | "c8c2fac9-79b8-bbe2-a085-fa3b93e597b7", 14 | "ab097d23-43af-ea9a-d8ec-ce680639e262", 15 | "2c1031ba-6387-77e7-0e86-84a9ca5b6e1c", 16 | "64d0e909-7548-866f-1516-bdc28fb11047", 17 | "e9e41f7f-7789-d4c9-9634-c38a181d2d33", 18 | "6ab83ee6-7909-0459-f502-08476509a65f", 19 | "d8daae78-8662-0dc6-4715-50635f21a916", 20 | "a9d018ac-56aa-6394-9592-026e0c84004a", 21 | "4c734a44-703c-38fa-a3bb-650947857fe7" 22 | ], 23 | "folders": [], 24 | "timestamp": 1480265177930, 25 | "public": false, 26 | "favoriters": [], 27 | "lastUpdatedBy": "306237", 28 | "lastRevision": 896719451, 29 | "team": null, 30 | "requests": [ 31 | { 32 | "id": "1fea178c-5b30-9c92-b5f7-b59e88f8ce2b", 33 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 34 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices", 35 | "preRequestScript": null, 36 | "pathVariables": {}, 37 | "method": "GET", 38 | "data": null, 39 | "dataMode": "params", 40 | "version": 2, 41 | "tests": null, 42 | "currentHelper": "normal", 43 | "helperAttributes": {}, 44 | "time": 1480265289317, 45 | "name": "list connected vehicles", 46 | "description": "List vehicles connected to Cometa Robotics cloud", 47 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 48 | "responses": [] 49 | }, 50 | { 51 | "id": "2c1031ba-6387-77e7-0e86-84a9ca5b6e1c", 52 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 53 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 54 | "preRequestScript": null, 55 | "pathVariables": {}, 56 | "method": "POST", 57 | "data": [], 58 | "dataMode": "raw", 59 | "version": 2, 60 | "tests": null, 61 | "currentHelper": "normal", 62 | "helperAttributes": {}, 63 | "time": 1481065917604, 64 | "name": "stop", 65 | "description": "Stop the vehicle.", 66 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 67 | "responses": [], 68 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"stop\",\"params\":{},\"id\":7}" 69 | }, 70 | { 71 | "id": "42537893-18a7-3f26-91b8-8f1b6c877ab5", 72 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 73 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 74 | "preRequestScript": null, 75 | "pathVariables": {}, 76 | "method": "POST", 77 | "data": [], 78 | "dataMode": "raw", 79 | "version": 2, 80 | "tests": null, 81 | "currentHelper": "normal", 82 | "helperAttributes": "{}", 83 | "time": 1480980631959, 84 | "name": "set_steering", 85 | "description": "Set Vehicle Steering PWM. Value range is [0, 180].", 86 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 87 | "responses": [], 88 | "folder": null, 89 | "descriptionFormat": "html", 90 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"set_steering\",\"params\":{\"value\":100},\"id\":7}" 91 | }, 92 | { 93 | "id": "4c734a44-703c-38fa-a3bb-650947857fe7", 94 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 95 | "url": "http://api.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/websocket", 96 | "preRequestScript": "", 97 | "pathVariables": {}, 98 | "method": "GET", 99 | "data": [], 100 | "dataMode": "params", 101 | "version": 2, 102 | "tests": "", 103 | "currentHelper": "normal", 104 | "helperAttributes": {}, 105 | "time": 1481125660232, 106 | "name": "WebSocket key", 107 | "description": "Get WebSocket session key", 108 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 109 | "responses": [] 110 | }, 111 | { 112 | "id": "64d0e909-7548-866f-1516-bdc28fb11047", 113 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 114 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 115 | "preRequestScript": null, 116 | "pathVariables": {}, 117 | "method": "POST", 118 | "data": [], 119 | "dataMode": "raw", 120 | "version": 2, 121 | "tests": null, 122 | "currentHelper": "normal", 123 | "helperAttributes": {}, 124 | "time": 1480265794711, 125 | "name": "video_devices", 126 | "description": "List connected V4L video devices.", 127 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 128 | "responses": [], 129 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"video_devices\",\"params\":{},\"id\":7}" 130 | }, 131 | { 132 | "id": "6ab83ee6-7909-0459-f502-08476509a65f", 133 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 134 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 135 | "preRequestScript": null, 136 | "pathVariables": {}, 137 | "method": "POST", 138 | "data": [], 139 | "dataMode": "raw", 140 | "version": 2, 141 | "tests": null, 142 | "currentHelper": "normal", 143 | "helperAttributes": {}, 144 | "time": 1480286079401, 145 | "name": "video_stop", 146 | "description": "Stop streaming video.", 147 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 148 | "responses": [], 149 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"video_stop\",\"params\":{},\"id\":7}" 150 | }, 151 | { 152 | "id": "81717215-8c56-4576-90ab-d83d94f134a2", 153 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 154 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 155 | "preRequestScript": null, 156 | "pathVariables": {}, 157 | "method": "POST", 158 | "data": [], 159 | "dataMode": "raw", 160 | "version": 2, 161 | "tests": null, 162 | "currentHelper": "normal", 163 | "helperAttributes": {}, 164 | "time": 1480973668760, 165 | "name": "set_config", 166 | "description": "Set configuration object.", 167 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 168 | "responses": [], 169 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"set_config\",\"params\":{\"app_params\":{\"debug\":false,\"telemetry_period\":1, \"neutral_delta\":2},\"arduino\":{\"serial\":\"/dev/ttyUSB1\",\"speed\":38400}},\"id\":7}" 170 | }, 171 | { 172 | "id": "a9d018ac-56aa-6394-9592-026e0c84004a", 173 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 174 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 175 | "pathVariables": {}, 176 | "preRequestScript": null, 177 | "method": "POST", 178 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 179 | "data": [], 180 | "dataMode": "raw", 181 | "name": "start", 182 | "description": "Start the vehicle.", 183 | "descriptionFormat": "html", 184 | "time": 1481065969146, 185 | "version": 2, 186 | "responses": [], 187 | "tests": null, 188 | "currentHelper": "normal", 189 | "helperAttributes": {}, 190 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"start\",\"params\":{},\"id\":7}" 191 | }, 192 | { 193 | "id": "ab097d23-43af-ea9a-d8ec-ce680639e262", 194 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 195 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 196 | "preRequestScript": "", 197 | "pathVariables": {}, 198 | "method": "POST", 199 | "data": [], 200 | "dataMode": "raw", 201 | "version": 2, 202 | "tests": "", 203 | "currentHelper": "normal", 204 | "helperAttributes": {}, 205 | "time": 1480285477938, 206 | "name": "rexec", 207 | "description": "Run a remote shell command", 208 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 209 | "responses": [], 210 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"rexec\",\"params\":[\"/sbin/iwconfig\"],\"id\":17}" 211 | }, 212 | { 213 | "id": "b55e1480-6c62-d523-c40f-16a7dddc991d", 214 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 215 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 216 | "pathVariables": {}, 217 | "preRequestScript": null, 218 | "method": "POST", 219 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 220 | "data": [], 221 | "dataMode": "raw", 222 | "name": "set_throttle", 223 | "description": "Set Vehicle Throttle PWM. Value range is [0, 180].", 224 | "descriptionFormat": "html", 225 | "time": 1480265940070, 226 | "version": 2, 227 | "responses": [], 228 | "tests": null, 229 | "currentHelper": "normal", 230 | "helperAttributes": {}, 231 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"set_throttle\",\"params\":{\"value\":90},\"id\":7}" 232 | }, 233 | { 234 | "id": "c64ef3a6-6a90-e465-003c-2bdd29eba03b", 235 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 236 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 237 | "preRequestScript": null, 238 | "pathVariables": {}, 239 | "method": "POST", 240 | "data": [], 241 | "dataMode": "raw", 242 | "version": 2, 243 | "tests": null, 244 | "currentHelper": "normal", 245 | "helperAttributes": {}, 246 | "time": 1480265824105, 247 | "name": "get_config", 248 | "description": "Get configuration object.", 249 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 250 | "responses": [], 251 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"get_config\",\"params\":{},\"id\":7}" 252 | }, 253 | { 254 | "id": "c8c2fac9-79b8-bbe2-a085-fa3b93e597b7", 255 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 256 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 257 | "preRequestScript": null, 258 | "pathVariables": {}, 259 | "method": "POST", 260 | "data": [], 261 | "dataMode": "raw", 262 | "version": 2, 263 | "tests": null, 264 | "currentHelper": "normal", 265 | "helperAttributes": "{}", 266 | "time": 1480273034531, 267 | "name": "set_mode", 268 | "description": "Set vehicle mode.", 269 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 270 | "responses": [], 271 | "folder": null, 272 | "descriptionFormat": "html", 273 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"set_mode\",\"params\":{\"value\":\"REMOTE\"},\"id\":7}" 274 | }, 275 | { 276 | "id": "d5ca344b-16a9-a3d9-0f8a-d3a444a8bef4", 277 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 278 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 279 | "preRequestScript": null, 280 | "pathVariables": {}, 281 | "method": "POST", 282 | "data": [], 283 | "dataMode": "raw", 284 | "version": 2, 285 | "tests": null, 286 | "currentHelper": "normal", 287 | "helperAttributes": {}, 288 | "time": 1480266186164, 289 | "name": "get_status", 290 | "description": "Get Vehicle Status", 291 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 292 | "responses": [], 293 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"get_status\",\"params\":{},\"id\":10}" 294 | }, 295 | { 296 | "id": "d8daae78-8662-0dc6-4715-50635f21a916", 297 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 298 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 299 | "pathVariables": {}, 300 | "preRequestScript": null, 301 | "method": "POST", 302 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 303 | "data": [], 304 | "dataMode": "raw", 305 | "name": "set_telemetry", 306 | "description": "Set telemetry period in seconds", 307 | "descriptionFormat": "html", 308 | "time": 1480972165254, 309 | "version": 2, 310 | "responses": [], 311 | "tests": null, 312 | "currentHelper": "normal", 313 | "helperAttributes": {}, 314 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"set_telemetry_period\",\"params\":{\"period\":5},\"id\":7}" 315 | }, 316 | { 317 | "id": "e9e41f7f-7789-d4c9-9634-c38a181d2d33", 318 | "headers": "Authorization: OAuth {{COMETA_SECRET}}\nContent-Type: application/json\n", 319 | "url": "https://autonomia.cometa.io/v1/applications/{{APPLICATION_ID}}/devices/{{DEVICE_ID}}/send", 320 | "preRequestScript": null, 321 | "pathVariables": {}, 322 | "method": "POST", 323 | "data": [], 324 | "dataMode": "raw", 325 | "version": 2, 326 | "tests": null, 327 | "currentHelper": "normal", 328 | "helperAttributes": "{}", 329 | "time": 1481139046552, 330 | "name": "video_start", 331 | "description": "Start streaming video.", 332 | "collectionId": "709ad316-59c9-b2e8-3549-b0f97521d372", 333 | "responses": [], 334 | "descriptionFormat": "html", 335 | "folder": null, 336 | "rawModeData": "{\"jsonrpc\":\"2.0\",\"method\":\"video_start\",\"params\":{\"telem\":true},\"id\":7}" 337 | } 338 | ] 339 | } -------------------------------------------------------------------------------- /ConvNet/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Cloud connected autonomous RC car. 4 | 5 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 6 | """ 7 | __license__ = """ 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | """ 18 | 19 | import numpy as np 20 | import sys 21 | import os 22 | import cv2 23 | import glob 24 | import csv 25 | #from keras.models import Sequential, Graph 26 | from keras.optimizers import SGD, RMSprop, Adagrad, Adam 27 | from keras.layers.core import Dense, Dropout, Activation 28 | from keras.layers import Input, Convolution2D, MaxPooling2D, AveragePooling2D, Flatten, PReLU 29 | from keras.models import Sequential, Model 30 | from config import TrainConfig 31 | from keras import backend as K 32 | from sklearn.utils import shuffle 33 | from keras import callbacks 34 | from keras.regularizers import l2 35 | import cnnModels as nnet 36 | from sklearn.model_selection import train_test_split 37 | import matplotlib 38 | matplotlib.use('Agg') 39 | import matplotlib.pyplot as plt 40 | 41 | class DriveLog(): 42 | 43 | def __init__(self, folder_imgs): 44 | self.folder_imgs = folder_imgs 45 | 46 | def exists(self, fname='log.npy'): 47 | file_ls = glob.glob('*.npy') 48 | if fname in file_ls: 49 | return True 50 | else: 51 | print('Unable to find the log file: {}'.format(fname)) 52 | return False 53 | 54 | 55 | 56 | def make_imgName(self, id): 57 | ''' 58 | generate image filename from index in 1st column of summary txt file 59 | ''' 60 | idx = '{:05d}'.format(id) 61 | img_name = 'img' + str(idx) + '.jpg' 62 | return img_name 63 | 64 | 65 | def make_log(self): 66 | ''' 67 | row {"c":1,"s":94,"time":"1489255712171","t":99,"device_id":"B827EB4879D6"} 68 | ''' 69 | #get summary txt file 70 | summary_fileList = glob.glob(self.folder_imgs+'/*.txt') 71 | if len(summary_fileList) == 1: 72 | summary_txt = summary_fileList[0] 73 | print(summary_txt) 74 | with open(summary_txt, 'r') as csvfile: 75 | data = csv.reader(csvfile, delimiter=',') 76 | log = [] 77 | for row in data: 78 | img_idx, steering, throttle = row[0][5::], row[1][2::], row[3][2::] 79 | img_fname = self.folder_imgs +'/'+ self.make_imgName( int(img_idx) ) 80 | log.append([img_fname, steering, throttle]) 81 | np.save('log.npy', log) 82 | return log 83 | else: 84 | print('Unable to find the summary file {}'.format('*'+self.folder_imgs+'*.txt')) 85 | 86 | 87 | def steering2bucket(s, bucket_sz): 88 | """ 89 | Convert from [0,180] range to a bucket number in the [0,14] 90 | range with log distribution to stretch the range of the buckets around 0 91 | """ 92 | s -= 90 93 | return int(round(math.copysign(math.log(abs(s) + 1, 2.0), s))) + bucket_sz/2 94 | 95 | def bucket2steering(a, bucket_sz): 96 | """ Reverse the function that buckets the steering for neural net output """ 97 | steer = a - bucket_sz/2 98 | original = steer 99 | steer = abs(steer) 100 | steer = math.pow(2.0, steer) 101 | steer -= 1.0 102 | steer = math.copysign(steer, original) 103 | steer += 90.0 104 | steer = max(0, min(179, steer)) 105 | return steer 106 | 107 | 108 | def image_flip(img, steering): 109 | coin = np.random.choice([0, 1]) 110 | if coin == 1: 111 | new_img = cv2.flip(img, 1) 112 | new_steering = - steering 113 | return (new_img, new_steering) 114 | else: 115 | return (img, steering) 116 | 117 | 118 | def batch_generator(x, y, batch_size, model_img_sz, n_outputs, ycrop_range = [120, -20], cspace='YCR_CB', model_type='classification', run='train'): 119 | ''' 120 | Generate training batch: Yield X and Y data when the batch is filled. 121 | x: list of the adress of all images to be used for training 122 | y: steering angles 123 | batch_size: size of the batch (X, Y) 124 | model_img_sz: size of the image (height, width, channel) to generate 125 | monitor: save X of the last batch generated 'X_batch_sample.npy 126 | save angles of all batches generated 'y_bag.npy 127 | True as long as total number of examples generated is lower than the number of 'samples_per_epoch' set by user. 128 | ''' 129 | 130 | # import pdb; pdb.set_trace() 131 | 132 | offset = 0 133 | while True: 134 | # Initialize X and Y array 135 | X = np.zeros((batch_size, model_img_sz[0], model_img_sz[1], model_img_sz[2]), dtype='float32') 136 | Y = np.zeros((batch_size, n_outputs), dtype='float32') 137 | #Generate a batch 138 | for example in range(batch_size): 139 | fname = x[example + offset] 140 | img = cv2.imread(fname) 141 | img = cv2.cvtColor(img, eval('cv2.COLOR_BGR2'+cspace) ) 142 | if model_img_sz[2] == 1: 143 | img = img[:,:,0] 144 | #cv2 resize (x_size, y_size) 145 | img_resize = cv2.resize(img[ycrop_range[0]:ycrop_range[1], :], 146 | (model_img_sz[1], model_img_sz[0]), cv2.INTER_LINEAR) 147 | if run=='train': 148 | img_feed, steering = image_flip(img_resize, y[example+offset]) 149 | else: 150 | img_feed, steering = img_resize, y[example + offset] 151 | if model_img_sz[2] == 1: 152 | X[example,:,:,0] = img_feed/255.0 - 0.5 153 | else: 154 | X[example,:,:,:] = img_feed/255.0 - 0.5 155 | if model_type == 'classification': 156 | steering_class = steering2bucket( steering, n_outputs ) 157 | Y[example, int(steering_class)] = 1 158 | else: 159 | Y[example] = steering 160 | 161 | yield (X, Y) 162 | 163 | if (offset+batch_size >= len(y)-len(y)%batch_size): 164 | offset = 0 165 | x, y = shuffle(x, y) 166 | else: 167 | offset = offset + batch_size 168 | np.save('x_val.npy', X ) 169 | np.save('y_val.npy', Y) #save last batch of images 170 | 171 | 172 | models = { 173 | 'model_wroscoe_mod': nnet.model_wroscoe_mod, 174 | } 175 | 176 | 177 | if __name__ == "__main__": 178 | config = TrainConfig() 179 | 180 | try: 181 | data_path = os.path.expanduser(sys.argv[1]) 182 | except Exception as e: 183 | print(e, "Usage: ./prepare_data.py ") 184 | sys.exit(-1) 185 | 186 | if not os.path.exists(data_path): 187 | print("Directory %s not found." % data_path) 188 | sys.exit(-1) 189 | 190 | ################ 191 | # PARAMETERS 192 | img_resample_dim = config.img_resample_dim 193 | ch = config.num_channels 194 | num_epoch = config.num_epoch 195 | batch_size = config.batch_size 196 | validation_split = config.validation_split 197 | num_outputs = config.num_buckets * 1 198 | model_type = config.model_type 199 | data_augmentation = config.data_augmentation 200 | ycrop_range = config.ycrop_range 201 | cspace = config.cspace 202 | SEED = config.seed 203 | 204 | # set of callbacks to save model weights during training when loss of validation set decreases 205 | #model_path = os.path.expanduser('model.h5') 206 | #Save the model after each epoch if the validation loss improved. 207 | save_best = callbacks.ModelCheckpoint("{}/autonomia_cnn_step.h5".format(data_path), monitor='val_loss', verbose=1, 208 | save_best_only=True, mode='min') 209 | #stop training if the validation loss doesn't improve for 5 consecutive epochs. 210 | early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, 211 | verbose=0, mode='auto') 212 | #callbacks_list = [save_best, early_stop] 213 | callbacks_list = [] 214 | 215 | model = models[config.model]((img_resample_dim[0],img_resample_dim[1],ch), config.num_buckets, keep_rate=config.keep_rate, reg_fc=config.reg_fc, reg_conv=config.reg_conv, model_type=config.model_type) 216 | print("---------------------------") 217 | print("model %s is created and compiled\r\n" % config.model) 218 | print(model.summary()) 219 | 220 | 221 | ######## 222 | # Generate log array 223 | print("Create log book") 224 | logBook = DriveLog(data_path) 225 | 226 | #if logBook.exists(): 227 | # log = np.load('log.npy') 228 | #else: 229 | log = np.array( logBook.make_log() ) 230 | print(log) 231 | x_original = log[:, 0] 232 | yst_original = (log[:, 1].astype('float32')) 233 | # y is corrected steering angle 234 | x, yst = shuffle(x_original, yst_original-90) 235 | # split train/validation set with ratio: 5:1 236 | X_train, X_val, yst_train, yst_val = train_test_split(x, yst, test_size=validation_split, random_state=SEED) 237 | print('--> Train set size: {} | Validation set size: {}'.format(len(X_train), len(X_val))) 238 | 239 | #samples_per_epoch = len(y_train) * data_augmentation 240 | samples_per_epoch = ((len(yst_train) - len(yst_train)%batch_size)*data_augmentation)/128 241 | # make validation set size to be a multiple of batch_size 242 | nb_val_samples = len(yst_val) - len(yst_val)%batch_size 243 | nb_val_samples = 256 244 | # and trained it via: 245 | #history = model.fit(X, {'o_st': y1_steering, 'o_thr': y2_throttle}, batch_size=batch_size, nb_epoch=num_epoch, verbose=1, validation_split=validation_split, callbacks=callbacks_list ) 246 | #history = model.fit_generator(batch_generator(X_train, yst_train, batch_size=batch_size, 247 | # model_img_sz=(*img_resample_dim,ch), n_outputs=num_outputs, 248 | # ycrop_range= ycrop_range, cspace=cspace, model_type=model_type, run='train'), 249 | # steps_per_epoch=int(samples_per_epoch), nb_val_samples=nb_val_samples, 250 | # validation_data=batch_generator(X_val, yst_val, batch_size=batch_size, 251 | # model_img_sz = (*img_resample_dim,ch), 252 | # n_outputs=num_outputs, ycrop_range= ycrop_range, cspace=cspace, model_type=model_type, run='valid'), 253 | # nb_epoch=num_epoch, verbose=1, callbacks=callbacks_list) 254 | 255 | history = model.fit_generator(batch_generator(X_train, yst_train, batch_size=batch_size, 256 | model_img_sz=(img_resample_dim[0], img_resample_dim[1], ch), n_outputs=num_outputs, 257 | ycrop_range= ycrop_range, cspace=cspace, model_type=model_type, run='train'), 258 | steps_per_epoch=int(samples_per_epoch), 259 | # samples_per_epoch=int(samples_per_epoch), 260 | nb_epoch=num_epoch, verbose=1, callbacks=callbacks_list) 261 | 262 | print("saving model and weights") 263 | with open("{}/autonomia_cnn.json".format(data_path), 'w') as f: 264 | f.write(model.to_json()) 265 | 266 | model.save_weights("{}/autonomia_cnn.h5".format(data_path)) 267 | 268 | test_sz = 600 269 | start = 656 270 | x_test = x_original[start:start + test_sz] 271 | # print(x_original.shape) 272 | y_test = yst_original[start:start + test_sz]-90 273 | X_test = np.zeros((test_sz, img_resample_dim[0], img_resample_dim[1], ch), dtype='float32') 274 | Y_test = np.zeros((test_sz, num_outputs), dtype='float32') 275 | for index, fname in enumerate(x_test): 276 | img = cv2.imread(fname) 277 | img = cv2.cvtColor(img, eval('cv2.COLOR_BGR2'+cspace) ) 278 | if ch == 1: 279 | img = img[:,:,0] 280 | img = img[ycrop_range[0]:ycrop_range[1], :] 281 | img_resize = cv2.resize(img, (img_resample_dim[1], img_resample_dim[0]) ) 282 | if model_type == 'classification': 283 | Y_test[index] = steering2bucket( y_test[index], n_buckets ) 284 | else: 285 | Y_test[index] = y_test[index] 286 | 287 | if ch == 1: 288 | X_test[index, :,:,0] = img_resize/255.0-0.5 289 | 290 | else: 291 | X_test[index, :,:,:] = img_resize/255.0-0.5 292 | pred = model.predict(X_test, batch_size=test_sz, verbose=1) 293 | if model_type == 'classification': 294 | pred_class = np.argmax(pred, axis=1) 295 | else: 296 | pred_class = pred 297 | plt.plot(np.arange(0,test_sz, 1), Y_test, 'b-') 298 | # print(pred_class) 299 | plt.plot(np.arange(0,test_sz, 1), pred_class, 'r-') 300 | plt.savefig('test_on_trainingset.png') 301 | print("plot of testing on trainingset in test_on_trainingset.png") 302 | # plt.show() 303 | 304 | """ 305 | logfile_test = 'log_test.npy' #MEG: where is this coming from? 306 | test_dir = 'data/oakland170418' #MEG: is the test_dir different from data_path? 307 | # summary_file = 'data/oakland170418/1492543329.txt' 308 | files_log = glob.glob('*.npy') 309 | if logfile_test in files_log: 310 | try: 311 | log_test = np.load(logfile_test) 312 | except: 313 | print('file {} not found'.format(logfile_test)) 314 | else: 315 | logBookTest = DriveLog(test_dir) 316 | log_test = np.array( logBookTest.make_log() ) 317 | 318 | 319 | xt_original = log_test[:, 0] 320 | yt_original = (log_test[:, 1].astype('float32')-90) 321 | start2 = 200 322 | x_t = xt_original[start2:start2 + test_sz] 323 | y_t = yt_original[start2:start2 + test_sz] 324 | X_t = np.zeros((test_sz, img_resample_dim[0],img_resample_dim[1], ch), dtype='float32') 325 | Y_t = np.zeros((test_sz, num_outputs), dtype='float32') 326 | for index, fname in enumerate(x_t): 327 | img = cv2.imread(fname) 328 | img = cv2.cvtColor(img, eval('cv2.COLOR_BGR2'+cspace) ) 329 | if ch == 1: 330 | img = img[:,:,0] 331 | img = img[ycrop_range[0]:ycrop_range[1], :] 332 | img_resize = cv2.resize(img, img_resample_dim[::-1] ) #reverse tuple dimension 333 | if model_type == 'classification': 334 | Y_t[index] = steering2bucket( y_t[index], n_buckets ) 335 | else: 336 | Y_t[index] = y_t[index] 337 | 338 | if ch == 1: 339 | X_t[index, :,:,0] = img_resize/255.0-0.5 340 | else: 341 | X_t[index, :,:,:] = img_resize/255.0-0.5 342 | pred = model.predict(X_t, batch_size=test_sz, verbose=1) 343 | if model_type == 'classification': 344 | pred_class = np.argmax(pred, axis=1) 345 | else: 346 | pred_class = pred 347 | plt.plot(np.arange(0,test_sz, 1), Y_t, 'b-') 348 | plt.plot(np.arange(0,test_sz, 1), pred_class, 'r-') 349 | plt.savefig('test_on_testset.png') 350 | plt.show() 351 | 352 | """ 353 | 354 | 355 | #clear session to avoid error at the end of program: "AttributeError: 'NoneType' object has no attribute 'TF_DeleteStatus'" 356 | # The alternative does not work: import gc; gc.collect() 357 | # https://github.com/tensorflow/tensorflow/issues/3388 358 | K.clear_session() -------------------------------------------------------------------------------- /controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | Cloud connected autonomous RC car. 3 | 4 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 5 | """ 6 | __license__ = """ 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | """ 17 | import time 18 | import json 19 | import serial 20 | import string 21 | import sys 22 | import Queue 23 | import threading 24 | import subprocess 25 | 26 | import streamer 27 | import utils 28 | import numpy as np 29 | from keras.models import model_from_json 30 | 31 | from config import DataConfig 32 | 33 | class States: 34 | """ Vehicle states """ 35 | IDLE=1 36 | RUNNING=2 37 | PAUSE=3 38 | STOPPED=0 39 | 40 | class Modes: 41 | """ Vehicle running modes """ 42 | AUTO=1 # fully autonomous 43 | TRAINING=2 # RC controlled to capture training video 44 | REMOTE=3 # controlled remotely through Cometa JSON/RPC 45 | 46 | # --- Module constansts 47 | THETA_CENTER = 90 48 | MOTOR_NEUTRAL = 90 49 | # filename to fetch telemetry from -- updated atomically by the controller loop at 30 Hz 50 | TELEMFNAME = '/tmpfs/meta.txt' 51 | 52 | # filename where to store the last frame -- used by the application loop and as parameter for the CNN prediction 53 | FRAMEFNAME = '/tmpfs/frame.yuv' 54 | 55 | def setup_arduino(config, logger): 56 | """ Arduino radio receiver and servos controller setup. """ 57 | try: 58 | # set serial non-blocking 59 | port = serial.Serial(config['arduino']['serial'], config['arduino']['speed'], timeout=0.0, xonxoff=False, rtscts=False, dsrdtr=False) 60 | port.flushInput() 61 | port.flushOutput() 62 | except Exception as e: 63 | logger("Arduino setup: %s" % e) 64 | return None 65 | # wait the board to start 66 | while port.inWaiting() == 0: 67 | time.sleep(0.1) 68 | return port 69 | 70 | class RCVehicle(object): 71 | """ 72 | Vehicle controller class 73 | config - configuration object from config.json 74 | log - system logger 75 | """ 76 | # current state and mode 77 | state = None 78 | mode = None 79 | 80 | # running options flags 81 | capture = False # capturing video and telemetry for CNN training 82 | streaming = False # streaming video to cloud server 83 | 84 | # current servo and motor values 85 | steering = None 86 | throttle = None 87 | 88 | # Arduino serial port 89 | arport = None 90 | 91 | # Mailbox for asynchronous commands 92 | mbox = Queue.Queue() 93 | 94 | # GPS readings 95 | readings={} 96 | 97 | # System logger 98 | log = None 99 | 100 | def __init__(self, config, logger): 101 | self.state=States.IDLE 102 | self.steering=THETA_CENTER 103 | self.throttle=MOTOR_NEUTRAL 104 | self.serial=config['serial'] 105 | self.config = config 106 | 107 | # Set the system log 108 | self.log=logger 109 | self.verbose=config['app_params']['verbose'] 110 | 111 | # CNN predicting model 112 | self.cnn_model = None 113 | self.p_steering=THETA_CENTER 114 | self.p_throttle=MOTOR_NEUTRAL 115 | 116 | self.arport=setup_arduino(config, self.log) 117 | while self.arport == None: 118 | self.log("Fatal error setting up Arduino board. Cannot proceed without properly connecting to the control board.") 119 | time.sleep(5) 120 | self.arport=setup_arduino(config, self.log) 121 | # TODO: remember to create a board heartbeat thread 122 | 123 | # Image frame 124 | self.glock = threading.Lock() 125 | self.rows = 240 126 | self.cols = 320 127 | self.frame = np.zeros(shape=(self.rows, self.cols, 3), dtype=np.uint8) 128 | 129 | # Start the main loop 130 | self.loop_t = threading.Thread(target=self.control_loop) 131 | self.loop_t.daemon = True # force to exit on SIGINT 132 | 133 | self.telemetry_period=config['app_params']['telemetry_period'] 134 | # cometa handle 135 | self.com = None 136 | return 137 | 138 | def load_model(self, modelpath): 139 | """ Load a CNN model """ 140 | if self.mode == Modes.AUTO: 141 | return False 142 | try: 143 | self.model = model_from_json(open(modelpath + ".json").read()) 144 | self.model.load_weights(modelpath + ".h5") 145 | except Exception, e: 146 | self.log("Failed to load CNN model %s. (%s)" % (modelpath,str(e)) ) 147 | return False 148 | 149 | self.log("Loaded CNN model %s." % modelpath) 150 | return True 151 | 152 | def state2run(self): 153 | """ State transition to RUNNING """ 154 | if self.state == States.RUNNING: 155 | return 156 | self.steering=THETA_CENTER 157 | self.throttle=MOTOR_NEUTRAL 158 | self.state=States.RUNNING 159 | self.log("State RUNNING") 160 | return 161 | 162 | def state2stopped(self): 163 | """ State transition to PAUSE """ 164 | if self.state == States.STOPPED: 165 | return 166 | self.steering=THETA_CENTER 167 | self.throttle=MOTOR_NEUTRAL 168 | self.state=States.STOPPED 169 | self.output_arduino(self.steering, self.throttle) 170 | self.log("State STOPPED") 171 | return 172 | 173 | def state2idle(self): 174 | """ State transition to IDLE """ 175 | if self.state == States.IDLE: 176 | return 177 | self.steering=THETA_CENTER 178 | self.throttle=MOTOR_NEUTRAL 179 | self.state=States.IDLE 180 | self.output_arduino(self.steering, self.throttle) 181 | self.log("State IDLE") 182 | return 183 | 184 | def mode2auto(self): 185 | """ Mode transition to AUTO """ 186 | if self.mode == Modes.AUTO: 187 | return 188 | # TODO: start the video fast video streamer 189 | 190 | self.mode=Modes.AUTO 191 | self.arport.flushInput() 192 | self.arport.flushOutput() 193 | self.log("Mode AUTO") 194 | return 195 | 196 | def mode2training(self): 197 | """ Mode transition to TRAINING """ 198 | if self.mode == Modes.TRAINING: 199 | return 200 | # TODO: start the video streamer with telemetry annotations 201 | self.mode=Modes.TRAINING 202 | self.arport.flushInput() 203 | self.arport.flushOutput() 204 | self.log("Mode TRAINING") 205 | return 206 | 207 | def mode2remote(self): 208 | """ Mode transition to REMOTE """ 209 | if self.mode == Modes.REMOTE: 210 | return 211 | 212 | self.arport.flushInput() 213 | self.arport.flushOutput() 214 | self.steering=THETA_CENTER 215 | self.throttle=MOTOR_NEUTRAL 216 | self.mode=Modes.REMOTE 217 | self.log("Mode REMOTE") 218 | return 219 | 220 | def start(self): 221 | """ Initial start """ 222 | self.mode2training() 223 | self.state2run() 224 | self.loop_t.start() 225 | return 226 | 227 | def telemetry(self): 228 | ret = {} 229 | ret['type'] = 1 230 | ret['time'] = str(int(time.time() * 1000)) 231 | ret['device_id'] = self.serial 232 | ret['state'] = self.state 233 | ret['mode'] = self.mode 234 | ret['steering'] = self.steering 235 | ret['throttle'] = self.throttle 236 | ret['p_steering'] = self.p_steering 237 | ret['p_throttle'] = self.p_throttle 238 | ret['GPS'] = {} 239 | try: 240 | ret['GPS']['lat'] = int(self.readings['lat'] * 10E4) / 10E4 241 | ret['GPS']['lon'] = int(self.readings['lon'] * 10E4) / 10E4 242 | except: 243 | # leave GPS empty 244 | pass 245 | return ret 246 | 247 | def input_arduino(self): 248 | """ Read a line composed of throttle and steering values received from the RC. """ 249 | inputLine = '' 250 | if self.arport.inWaiting(): 251 | ch = self.arport.read(1) 252 | while ch != b'\x0A': 253 | inputLine += ch 254 | ch = self.arport.read(1) 255 | try: 256 | # print inputLine.decode('ISO-8859-1') 257 | t_in, s_in = inputLine.split() 258 | # return the steering and throttle values from the receiver 259 | return int(s_in), int(t_in) 260 | except: 261 | pass 262 | # return current values after a reading error 263 | return self.steering, self.throttle 264 | 265 | def output_arduino(self, steering, throttle): 266 | """ Write steering and throttle PWM values in the [0,180] range to the controller. """ 267 | # set steering to neutral if within an interval around 90 268 | steering = 90 if 88 < steering < 92 else steering 269 | # send a new steering PWM setting to the controller 270 | if self.mode == Modes.TRAINING: 271 | if steering != self.steering: 272 | self.steering = steering # update global 273 | self.arport.write(('S %d\n' % self.steering).encode('ascii')) 274 | else: 275 | self.arport.write(('S %d\n' % self.steering).encode('ascii')) 276 | 277 | # send a new throttle PWM setting to the controller 278 | if self.mode == Modes.TRAINING: 279 | if throttle != self.throttle: 280 | self.throttle = throttle # update global 281 | self.arport.write(('M %d\n' % self.throttle).encode('ascii')) 282 | else: 283 | self.arport.write(('M %d\n' % self.throttle).encode('ascii')) 284 | return 285 | 286 | # --------------------------------------- 287 | # 288 | def control_loop(self): 289 | """ Controller main loop """ 290 | last_update=0 291 | steering_in=self.steering 292 | throttle_in=self.throttle 293 | mv = '/bin/mv /tmpfs/meta.tmp ' + TELEMFNAME 294 | 295 | # Load a CNN model -- must be done in the same thread of the prediction 296 | self.load_model(self.config['app_params']['model']) 297 | 298 | cnn_config = DataConfig() 299 | 300 | last_telemetry = 0. 301 | while True: 302 | now = time.time() 303 | 304 | # Send telemetry data 305 | if self.telemetry_period < now - last_telemetry: 306 | msg = self.telemetry() 307 | if self.com.send_data(json.dumps(msg)) < 0: 308 | self.log("Error in sending telemetry data.") 309 | else: 310 | if self.verbose: 311 | self.log("Sending telemetry data %s " % msg) 312 | last_telemetry = now 313 | 314 | # get inputs from RC receiver in the [0,180] range 315 | try: 316 | if self.arport.inWaiting(): 317 | steering_in, throttle_in = self.input_arduino() 318 | except Exception, e: 319 | self.log("input_arduino: %s" % str(e)) 320 | continue 321 | # 322 | # ------------------------------------------------------------ 323 | # 324 | if self.state == States.RUNNING and self.mode == Modes.TRAINING: 325 | # get inputs from RC receiver in the [0,180] range 326 | # try: 327 | # if self.arport.inWaiting(): 328 | # steering_in, throttle_in = self.input_arduino() 329 | # except Exception, e: 330 | # self.log("input_arduino: %s" % str(e)) 331 | # continue 332 | # set steering to neutral if within an interval around 90 333 | steering_in = 90 if 87 <= steering_in < 92 else steering_in 334 | if self.verbose: print steering_in, throttle_in 335 | 336 | if self.steering == steering_in and self.throttle == throttle_in: 337 | # like it or not we need to sleep to avoid to hog the CPU in a spin loop 338 | time.sleep(0.01) 339 | continue 340 | 341 | # update telemetry file 30 times per second 342 | if 0.03337 < now - last_update: 343 | """ old version 344 | s = "%03d %03d" % (steering_in, throttle_in) 345 | # create metadata file for embedding steering and throttle values in the video stream 346 | try: 347 | f = open('/tmpfs/meta.tmp', 'w', 0) 348 | f.write(s) 349 | f.close() 350 | # use mv that is a system call and not preempted (atomic) 351 | subprocess.check_call(mv, shell=True) 352 | except Exception, e: 353 | self.log("writing file: %s" % str(e)) 354 | pass 355 | """ 356 | 357 | last_update = now 358 | # set new values for throttle and steering servos 359 | self.output_arduino(steering_in, throttle_in) 360 | # 361 | # ------------------------------------------------------------ 362 | # 363 | elif self.state == States.RUNNING and self.mode == Modes.AUTO: 364 | # predict steering and trhottle and set the values at a rate depending on preditiction speed 365 | start_t = time.time() 366 | 367 | # Y = utils.read_uyvy(FRAMEFNAME, cnn_config) # Y is of shape (1, :, :, 1) or (1, :, :, 3) 368 | # if Y is None: 369 | # print "image not acquired" 370 | # continue 371 | # Y is of shape (1, :, :, 1) or (1, :, :, 3) 372 | 373 | self.glock.acquire() 374 | Y = self.frame 375 | self.glock.release() 376 | 377 | # predict steering and throttle 378 | s = self.model.predict(Y[0:1]) 379 | 380 | #self.glock.release() 381 | 382 | self.steering = int(s[0][0] + 90 + 0.5) 383 | self.throttle = 100 #99 #98 384 | 385 | #self.steering = utils.bucket2steering(self.steering) 386 | #self.throttle = utils.bucket2throttle(self.throttle) 387 | 388 | # clip the prediction for testing 389 | if self.throttle > 110: 390 | self.throttle = 110 391 | # insure a certain value for throttle 392 | if self.throttle < 97: 393 | self.throttle = 97 394 | 395 | self.output_arduino(self.steering, self.throttle) 396 | print self.steering, self.throttle, s[0][0] 397 | dt = time.time() - start_t 398 | time.sleep(0.075) #0.075 #0.01 #0.034 - max(0.033, dt)) 399 | print "execution time:", dt 400 | # 401 | # ------------------------------------------------------------ 402 | # 403 | elif self.state == States.RUNNING and self.mode == Modes.REMOTE: 404 | self.output_arduino(self.steering, self.throttle) 405 | 406 | start_t = time.time() 407 | Y = utils.read_uyvy(FRAMEFNAME, cnn_config) # Y is of shape (1, :, :, 1) or (1, :, :, 3) 408 | if Y is None: 409 | print "image not acquired" 410 | continue 411 | 412 | # predict steering and throttle 413 | s, t = self.model.predict(Y[0:1]) 414 | self.p_steering = utils.bucket2steering(np.argmax(s[0])) 415 | self.p_throttle = utils.bucket2throttle(np.argmax(t[0])) 416 | 417 | dt = time.time() - start_t 418 | print self.p_steering, self.p_throttle, dt 419 | 420 | time.sleep(0.01) 421 | # time.sleep(0.05) 422 | # 423 | # ------------------------------------------------------------ 424 | # 425 | elif self.state == States.IDLE: 426 | time.sleep(0.1) 427 | # 428 | # ------------------------------------------------------------ 429 | # 430 | else: 431 | time.sleep(0.1) 432 | -------------------------------------------------------------------------------- /ConvNet/cnnModels.py: -------------------------------------------------------------------------------- 1 | import keras 2 | import numpy as np 3 | from keras.optimizers import Adam 4 | from keras.layers.core import Dense, Activation 5 | from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, PReLU, Dropout, ELU 6 | from keras.models import Sequential, Model 7 | from keras import backend as K 8 | from keras.regularizers import l2 9 | 10 | 11 | def coeff_determination(y_true, y_pred): 12 | ''' 13 | R^2 gives the percentage of the variability between 2 variable that is accounted for. The remaining (1-R^2) is the variability that is not accounted for. 14 | ''' 15 | SS_res = K.sum(K.square(y_true - y_pred)) #Residual sum of squares 16 | SS_tot = K.sum( K.square(y_true - K.mean(y_true)) ) #Residual sum of squares 17 | coeff = (1 - SS_res/(SS_tot + K.epsilon() )) 18 | # Return the score 19 | return coeff 20 | 21 | 22 | def create_model_2softmax(img_size): 23 | keep_rate = 0.3 24 | pool_size = (2, 2) 25 | 26 | img_input = Input(shape = img_size) 27 | x = Convolution2D(16, 5, 5, subsample=(2, 2), W_regularizer=l2(0.001), border_mode="same", activation='relu')(img_input) 28 | x = MaxPooling2D(pool_size=pool_size)(x) 29 | x = Dropout(keep_rate)(x) 30 | x = Convolution2D(32, 2, 2, subsample=(1, 1), W_regularizer=l2(0.001), border_mode="valid", activation='relu')(x) 31 | x = MaxPooling2D(pool_size=pool_size)(x) 32 | 33 | x = Flatten()(x) 34 | x = Dropout(keep_rate)(x) 35 | x = Dense(128, activation='relu')(x) 36 | x = Dropout(keep_rate)(x) 37 | 38 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x) 39 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x) 40 | model = Model(input=img_input, output=[o_st, o_thr]) 41 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 42 | 43 | return model 44 | 45 | def model_wroscoe(img_size, bucket_sz, keep_rate=0.2, reg_fc=0.01, reg_conv=0.0001, model_type='classification'): 46 | pool_size = (2, 2) 47 | num_outputs = bucket_sz 48 | img_input = Input(shape = img_size) 49 | x = Convolution2D(img_size[2], 1, 1, subsample=(1, 1), border_mode="same", activation='relu')(img_input) 50 | x = Convolution2D(8, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu')(x) 51 | x = MaxPooling2D(pool_size=pool_size)(x) 52 | x = Convolution2D(16, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu')(x) 53 | x = MaxPooling2D(pool_size=pool_size)(x) 54 | x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu')(x) 55 | x = MaxPooling2D(pool_size=pool_size)(x) 56 | x = Flatten()(x) 57 | x = Dropout(keep_rate)(x) 58 | x = Dense(256, activation='relu')(x) 59 | x = Dropout(keep_rate)(x) 60 | 61 | if model_type == 'classification': 62 | o_st = Dense(num_outputs, activation='softmax', name='o_st', W_regularizer=l2(reg_fc))(x) 63 | model = Model(input=img_input, output=o_st) 64 | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 65 | print('Model is loaded: classification domain ') 66 | else: 67 | o_st = Dense(num_outputs, activation='linear', name='o_st', W_regularizer=l2(reg_fc))(x) 68 | model = Model(input=[img_input], output=[o_st]) 69 | model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) 70 | print('Model is loaded: regression domain') 71 | return model 72 | 73 | 74 | def model_wroscoe_mod(img_size, bucket_sz, keep_rate=1, reg_fc=0.01, reg_conv=0.0001, model_type='classification'): 75 | pool_size = (2, 2) 76 | num_outputs = bucket_sz 77 | img_input = Input(shape = img_size) 78 | x = Convolution2D(img_size[2], 1, 1, subsample=(1, 1), border_mode="same", activation='relu', W_regularizer=l2(reg_conv))(img_input) 79 | #x = Dropout(keep_rate)(x) 80 | x = Convolution2D(8, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', W_regularizer=l2(reg_conv))(x) 81 | x = MaxPooling2D(pool_size=pool_size)(x) 82 | #x = Dropout(keep_rate)(x) 83 | x = Convolution2D(16, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu', W_regularizer=l2(reg_conv))(x) 84 | x = MaxPooling2D(pool_size=pool_size)(x) 85 | #x = Dropout(keep_rate)(x) 86 | x = Convolution2D(32, 3, 3, subsample=(1, 1), border_mode="valid", activation='relu', W_regularizer=l2(reg_conv))(x) 87 | x = MaxPooling2D(pool_size=pool_size)(x) 88 | #x = Dropout(keep_rate)(x) 89 | x = Convolution2D(64, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu', W_regularizer=l2(reg_conv))(x) 90 | x = MaxPooling2D(pool_size=pool_size)(x) 91 | x = Flatten()(x) 92 | x = Dropout(keep_rate)(x) 93 | x = Dense(128, activation='relu', W_regularizer=l2(reg_fc))(x) 94 | x = Dropout(keep_rate)(x) 95 | 96 | if model_type == 'classification': 97 | o_st = Dense(num_outputs, activation='softmax', name='o_st', W_regularizer=l2(reg_fc))(x) 98 | model = Model(input=img_input, output=o_st) 99 | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 100 | print('Model is loaded: classification domain ') 101 | else: 102 | o_st = Dense(num_outputs, activation='linear', name='o_st', W_regularizer=l2(reg_fc))(x) 103 | model = Model(input=[img_input], output=[o_st]) 104 | model.compile(optimizer='adam', loss='mean_squared_error', metrics=[coeff_determination]) 105 | print('Model is loaded: regression domain') 106 | return model 107 | 108 | 109 | 110 | def model_jmlbP3(img_sz, bucket_sz, keep_rate=1, reg_fc=0.01, reg_conv=0.0001, activation_fn = 'relu', model_type='class'): 111 | ''' 112 | steering angle predictor: takes an image and predict the steerign angle value 113 | img_sz: size of the image that the model accepts (128, 128, 3) 114 | activation_fn: non-linear function - relu, prelu or elu 115 | l2_reg - L2 regularization coefficient for fully connected layers 116 | ??? NEED TO INCLUDE REG_CONV 117 | ''' 118 | 119 | # size of pooling area for max pooling 120 | pool_size = (2, 2) 121 | num_outputs = bucket_sz 122 | model = Sequential() 123 | 124 | model.add(Convolution2D(8, 5, 5, subsample=(1, 1), border_mode="valid", name='conv1', input_shape=img_sz)) 125 | if activation_fn == 'elu': 126 | model.add(Activation('elu')) 127 | elif activation_fn == 'prelu': 128 | model.add(PReLU()) 129 | else: 130 | model.add(Activation('relu')) 131 | 132 | model.add(MaxPooling2D(pool_size=pool_size)) 133 | 134 | model.add(Convolution2D(8, 5, 5, subsample=(1, 1), border_mode="valid") ) 135 | if activation_fn == 'elu': 136 | model.add(Activation('elu')) 137 | elif activation_fn == 'prelu': 138 | model.add(PReLU()) 139 | else: 140 | model.add(Activation('relu')) 141 | model.add(MaxPooling2D(pool_size=pool_size)) 142 | 143 | model.add(Convolution2D(16, 4, 4, subsample=(1, 1), border_mode="valid") ) 144 | if activation_fn == 'elu': 145 | model.add(Activation('elu')) 146 | elif activation_fn == 'prelu': 147 | model.add(PReLU()) 148 | else: 149 | model.add(Activation('relu')) 150 | model.add(MaxPooling2D(pool_size=pool_size)) 151 | 152 | model.add(Convolution2D(16, 5, 5, subsample=(1, 1), border_mode="valid")) 153 | if activation_fn == 'elu': 154 | model.add(Activation('elu')) 155 | elif activation_fn == 'prelu': 156 | model.add(PReLU()) 157 | else: 158 | model.add(Activation('relu')) 159 | 160 | model.add(Flatten()) 161 | 162 | model.add(Dense(128, W_regularizer=l2(reg_fc))) 163 | if activation_fn == 'elu': 164 | model.add(Activation('elu')) 165 | elif activation_fn == 'prelu': 166 | model.add(PReLU()) 167 | else: 168 | model.add(Activation('relu')) 169 | 170 | model.add(Dense(50, W_regularizer=l2(reg_fc))) 171 | if activation_fn == 'elu': 172 | model.add(Activation('elu')) 173 | elif activation_fn == 'prelu': 174 | model.add(PReLU()) 175 | else: 176 | model.add(Activation('relu')) 177 | 178 | model.add(Dense(10, W_regularizer=l2(reg_fc))) 179 | if activation_fn == 'elu': 180 | model.add(Activation('elu')) 181 | elif activation_fn == 'prelu': 182 | model.add(PReLU()) 183 | else: 184 | model.add(Activation('relu')) 185 | 186 | if model_type == 'class': 187 | model.add(Dense(num_outputs, activation='linear', W_regularizer=l2(reg_fc), init='he_normal')) 188 | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 189 | print('Model is loaded: classification domain ') 190 | else: 191 | model.add(Dense(num_outputs, activation='linear', W_regularizer=l2(reg_fc), init='he_normal')) 192 | adam = Adam(lr=0.001) #optimizer 193 | model.compile(optimizer=adam, loss='mean_squared_error', metrics=['accuracy']) 194 | print('Model is loaded: regression domain') 195 | 196 | return model 197 | 198 | 199 | def model_vivekP3(img_size, bucket_sz, keep_rate=1, reg_fc=0.01, reg_conv=0.0001, activation_fn = 'elu', model_type='class'): 200 | pool_size = (2,2) 201 | num_outputs = bucket_sz 202 | filter_size = 3 203 | model = Sequential() 204 | model.add(Convolution2D(img_size[2],1,1, border_mode='valid', name='conv0', init='he_normal', input_shape=img_size)) 205 | model.add(Convolution2D(32,filter_size,filter_size, border_mode='valid',name='conv1', init='he_normal', W_regularizer=l2(reg_conv))) 206 | model.add(ELU()) 207 | model.add(Convolution2D(32,filter_size,filter_size, border_mode='valid', name='conv2', init='he_normal', W_regularizer=l2(reg_conv))) 208 | model.add(ELU()) 209 | model.add(MaxPooling2D(pool_size=pool_size)) 210 | model.add(Dropout(keep_rate)) 211 | model.add(Convolution2D(64,filter_size,filter_size, border_mode='valid', name='conv3', init='he_normal', W_regularizer=l2(reg_conv))) 212 | model.add(ELU()) 213 | model.add(Convolution2D(64,filter_size,filter_size, border_mode='valid', name='conv4', init='he_normal', W_regularizer=l2(reg_conv))) 214 | model.add(ELU()) 215 | model.add(MaxPooling2D(pool_size=pool_size)) 216 | model.add(Dropout(keep_rate)) 217 | model.add(Convolution2D(128,filter_size,filter_size, border_mode='valid',name='conv5', init='he_normal', W_regularizer=l2(reg_conv))) 218 | model.add(ELU()) 219 | model.add(Convolution2D(128,filter_size,filter_size, border_mode='valid',name='conv6', init='he_normal', W_regularizer=l2(reg_conv))) 220 | model.add(ELU()) 221 | model.add(MaxPooling2D(pool_size=pool_size)) 222 | model.add(Dropout(keep_rate)) 223 | model.add(Flatten()) 224 | model.add(Dense(512,name='hidden1', init='he_normal', W_regularizer=l2(reg_fc))) 225 | model.add(ELU()) 226 | model.add(Dropout(keep_rate)) 227 | model.add(Dense(64,name='hidden2', init='he_normal', W_regularizer=l2(reg_fc))) 228 | model.add(ELU()) 229 | model.add(Dropout(keep_rate)) 230 | model.add(Dense(16,name='hidden3',init='he_normal', W_regularizer=l2(reg_fc))) 231 | model.add(ELU()) 232 | model.add(Dropout(keep_rate)) 233 | if model_type == 'classification': 234 | model.add(Dense(num_outputs, activation='softmax', name='o_st', W_regularizer=l2(reg_fc))) 235 | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 236 | print('Model is loaded: classification domain ') 237 | else: 238 | model.add(Dense(num_outputs, activation='linear', name='o_st', W_regularizer=l2(reg_fc))) 239 | model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy']) 240 | print('Model is loaded: regression domain') 241 | return model 242 | 243 | 244 | ''' 245 | def combined_crossentropy(y_true, y_pred): 246 | y_true_steering = y_true[:, :num_outputs] 247 | y_true_throttle = y_true[:, num_outputs:] 248 | y_pred_steering = y_pred[:, :num_outputs] 249 | y_pred_throttle = y_pred[:, num_outputs:] 250 | 251 | steering_crossentropy = K.categorical_crossentropy(y_pred_steering, y_true_steering) 252 | throttle_crossentropy = K.categorical_crossentropy(y_pred_throttle, y_true_throttle) 253 | return (steering_crossentropy + throttle_crossentropy) / 2. 254 | 255 | def create_model_relu2(): 256 | # size of pooling area for max pooling 257 | pool_size = (2, 2) 258 | 259 | model = Sequential() 260 | 261 | model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same", input_shape=(row, col, ch))) 262 | model.add(Activation('relu')) 263 | model.add(MaxPooling2D(pool_size=pool_size)) 264 | 265 | model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same")) 266 | model.add(Activation('relu')) 267 | model.add(MaxPooling2D(pool_size=pool_size)) 268 | 269 | model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same")) 270 | model.add(Activation('relu')) 271 | model.add(MaxPooling2D(pool_size=pool_size)) 272 | 273 | model.add(Flatten()) 274 | model.add(Dense(256, init='he_normal')) 275 | model.add(Activation('relu')) 276 | model.add(Dropout(.25)) 277 | 278 | model.add(Dense(num_outputs, init='he_normal')) 279 | model.add(Activation('softmax')) 280 | 281 | sgd = RMSprop(lr=0.001) 282 | model.compile(optimizer=sgd, loss=combined_crossentropy, metrics=['accuracy']) 283 | 284 | return model 285 | 286 | def create_model_relu(): 287 | pool_size = (2, 2) 288 | model = Sequential() 289 | 290 | model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same", input_shape=(row, col, ch))) 291 | model.add(Activation('relu')) 292 | # model.add(MaxPooling2D(pool_size=pool_size)) #added 293 | 294 | model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same")) 295 | model.add(Activation('relu')) 296 | model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same")) 297 | model.add(Flatten()) 298 | model.add(Dropout(.5)) 299 | model.add(Activation('relu')) 300 | # model.add(Dense(512, init='he_normal')) 301 | model.add(Dense(256, init='he_normal')) #mod 302 | model.add(Dropout(.5)) 303 | model.add(Activation('relu')) 304 | 305 | model.add(Dense(num_outputs, init='he_normal')) 306 | model.add(Activation('softmax')) 307 | 308 | sgd = RMSprop(lr=0.001) 309 | # sgd = Adam(lr=0.001) #mod 310 | model.compile(optimizer=sgd, loss=combined_crossentropy, metrics=['accuracy']) 311 | 312 | return model 313 | 314 | def create_model_2softmax(img_size): 315 | keep_rate = 0.3 316 | pool_size = (2, 2) 317 | img_input = Input(shape = img_size) 318 | x = Convolution2D(16, 5, 5, subsample=(2, 2), W_regularizer=l2(0.001), border_mode="same", activation='relu')(img_input) 319 | x = MaxPooling2D(pool_size=pool_size)(x) 320 | x = Dropout(keep_rate)(x) 321 | x = Convolution2D(32, 2, 2, subsample=(1, 1), W_regularizer=l2(0.001), border_mode="valid", activation='relu')(x) 322 | x = MaxPooling2D(pool_size=pool_size)(x) 323 | 324 | x = Flatten()(x) 325 | x = Dropout(keep_rate)(x) 326 | x = Dense(128, activation='relu')(x) 327 | x = Dropout(keep_rate)(x) 328 | 329 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x) 330 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x) 331 | model = Model(input=img_input, output=[o_st, o_thr]) 332 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 333 | 334 | return model 335 | 336 | def create_modelB_2softmax(img_size): 337 | keep_rate = 0.5 338 | pool_size = (2, 2) 339 | img_input = Input(shape = img_size) 340 | x = Convolution2D(16, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(img_input) 341 | x = MaxPooling2D(pool_size=pool_size)(x) 342 | x = Dropout(keep_rate)(x) 343 | x = Convolution2D(32, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(x) 344 | x = MaxPooling2D(pool_size=pool_size)(x) 345 | x = Dropout(keep_rate)(x) 346 | x = Convolution2D(64, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu')(x) 347 | x = MaxPooling2D(pool_size=pool_size)(x) 348 | x = Flatten()(x) 349 | x = Dropout(keep_rate)(x) 350 | 351 | x1 = Dense(900, activation='relu', W_regularizer=l2(0.001))(x) 352 | x1 = Dropout(keep_rate)(x1) 353 | x1 = Dense(110, activation='relu', W_regularizer=l2(0.001))(x1) 354 | x1 = Dropout(keep_rate)(x1) 355 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x1) 356 | 357 | x2 = Dense(800, activation='relu', W_regularizer=l2(0.001))(x) 358 | x2 = Dropout(keep_rate)(x2) 359 | x2 = Dense(128, activation='relu', W_regularizer=l2(0.001))(x2) 360 | x2 = Dropout(keep_rate)(x2) 361 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x2) 362 | model = Model(input=img_input, output=[o_st, o_thr]) 363 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 364 | 365 | return model 366 | ''' -------------------------------------------------------------------------------- /ConvNet/train_data_augmentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Cloud connected autonomous RC car. 4 | 5 | Copyright 2016 Visible Energy Inc. All Rights Reserved. 6 | """ 7 | __license__ = """ 8 | Licensed under the Apache License, Version 2.0 (the "License"); 9 | you may not use this file except in compliance with the License. 10 | You may obtain a copy of the License at 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | """ 18 | 19 | import numpy as np 20 | import sys 21 | import os 22 | import csv 23 | #from keras.models import Sequential, Graph 24 | from keras.optimizers import SGD, RMSprop, Adagrad, Adam 25 | from keras.layers.core import Dense, Dropout, Activation 26 | from keras.layers import Input, Convolution2D, MaxPooling2D, AveragePooling2D, Flatten, PReLU 27 | from keras.models import Sequential, Model 28 | from config import TrainConfig 29 | from keras import backend as K 30 | from sklearn.utils import shuffle 31 | from keras import callbacks 32 | from keras.callbacks import ModelCheckpoint 33 | from keras.regularizers import l2 34 | import cv2 35 | # utils.py is a link to ../utils.py 36 | import utils 37 | from sklearn.model_selection import train_test_split 38 | 39 | SEED = 42 40 | 41 | # scale image size 42 | def img_array(img_adress, label): 43 | img = cv2.imread(img_adress) 44 | img = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB) 45 | return (img, label) 46 | 47 | 48 | def image_hflip(img, label): 49 | ''' 50 | Randomly flip image along horizontal axis: 1/2 chance that the image will be flipped 51 | img: original image in array type 52 | label: steering angle value of the original image 53 | ''' 54 | choice = np.random.choice([0,1]) 55 | if choice == 1: 56 | img = cv2.flip(img, 1) 57 | delta_label = label - 90 58 | label = 90 - delta_label 59 | 60 | return (img, label) 61 | 62 | 63 | 64 | def transformation_brightness(img): 65 | ''' 66 | Adjust the brightness of the image, by a randomly generated factor between 0.1 (dark) and 1. (unchanged) 67 | img: original image in array type 68 | label: steering angle value of the original image 69 | ''' 70 | hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) 71 | #change Value/Brightness/Luminance: alpha * V 72 | alpha = np.random.uniform(low=0.5, high=1.0, size=None) 73 | v = hsv[:,:,2] 74 | v = v * alpha 75 | hsv[:,:,2] = v 76 | rgb = cv2.cvtColor(hsv.astype('uint8'), cv2.COLOR_HSV2RGB) 77 | 78 | return rgb 79 | 80 | 81 | def gamma_transform(img, label): 82 | ''' 83 | Adjust the brightness of the image 84 | img: original image in array type 85 | label: steering angle value of the original image 86 | ''' 87 | gamma = np.random.uniform(low=0.2, high=1.0, size=None) 88 | inv_gamma = 1.0/gamma 89 | img = 255 *(img/255.)**(inv_gamma) 90 | 91 | return (img, label) 92 | 93 | 94 | def pixel_scaling(img, label): 95 | return ((img/127.5 - 1), label) 96 | 97 | 98 | def image_transform(img_adress, label, target_sz ): 99 | #open image as array from adress 100 | img, label = img_array(img_adress, label) 101 | # change luminance: 50% chance 102 | img, label = gamma_transform(img, label) 103 | # resize image 104 | # Here we take only Y component 105 | img = cv2.resize(img[:,:,0], target_sz, cv2.INTER_LINEAR) 106 | #Horizontal flip 107 | img, label = image_hflip(img, label) 108 | #pixel scaling 109 | img, label = pixel_scaling(img, label) 110 | return (img, label) 111 | 112 | 113 | 114 | def batch_generator(x, y , num_buckets, dir_data, batch_size, img_sz, training=True, monitor=True, yieldXY=True): 115 | """ 116 | Generate training batch: Yield X and Y data when the batch is filled. 117 | Data augmentation schemes: horizontal flip, chnage brightness, change viewpoint 118 | At each EPOCH, before data augmentation, del_rate=95% of examples with 119 | steering angle = 0 are deleted from original dataset 120 | x: list of the adress of all images to be used for training 121 | y: steering angles 122 | training: use True for generating training batch with data augmentation scheme 123 | use False to generate validation batch 124 | batch_size: size of the batch (X, Y) 125 | img_sz: size of the image (height, width, channel) to generate 126 | del_rate: percent of examples with steering angle=0 that will be deleted before generating batches 127 | monitor: save X of the last batch generated 'X_batch_sample.npy 128 | save angles of all batches generated 'y_bag.npy 129 | yieldXY: if True, generator yields (X, {Y1,Y2}) 130 | otherwise, yields X only (useful for predict_generator() 131 | """ 132 | 133 | if training: 134 | x, y = shuffle(x, y) 135 | offset = 0 136 | ''' 137 | True as long as total number of examples generated is lower than the number of 'samples_per_epoch' set by user. 138 | ''' 139 | while True: 140 | # Initialize X and Y array 141 | X = np.zeros((batch_size, *img_sz), dtype=np.float64) 142 | Y_st = np.zeros((batch_size, num_buckets), dtype=np.float64) 143 | Y_th = np.zeros((batch_size, num_buckets), dtype=np.float64) 144 | # array of labels - steering and throttle as one-hot arrays 145 | #Generate a batch 146 | for example in range(batch_size): 147 | img_adress, label_st, label_th = dir_data +'/'+ x[example + offset], y[example + offset, 0], y[example + offset, 1] 148 | assert os.path.exists(img_adress), 'Image file ['+ img_adress +'] not found-' 149 | if training: 150 | #img, label = image_transformation(img_adress, label, (img_sz[0], img_sz[1])) 151 | img, label_st = image_transform(img_adress, label_st, target_sz=(img_sz[0], img_sz[1]) ) 152 | else: 153 | img, label_st = image_transform(img_adress, label_st, target_sz=(img_sz[0], img_sz[1]) ) 154 | 155 | # update batch X and Y array with new example 156 | X[example,:,:,0] = img 157 | Y_st[example, utils.steering2bucket(label_st)] = 1 158 | Y_th[example, utils.steering2bucket(label_th)] = 1 159 | # when reaching end of original dataset x, loop from start again 160 | # shuffle original dataset / randomly remove 95% of the example with steering angle = 0 161 | if (example + 1) + offset > len(y) - 1: 162 | x, y = shuffle(x, y) 163 | offset = 0 164 | if yieldXY: 165 | yield (X, {'o_st': Y_st, 'o_thr': Y_th}) 166 | else: 167 | yield X 168 | 169 | offset = offset + batch_size 170 | 171 | 172 | def combined_crossentropy(y_true, y_pred): 173 | y_true_steering = y_true[:, :num_outputs] 174 | y_true_throttle = y_true[:, num_outputs:] 175 | y_pred_steering = y_pred[:, :num_outputs] 176 | y_pred_throttle = y_pred[:, num_outputs:] 177 | 178 | steering_crossentropy = K.categorical_crossentropy(y_pred_steering, y_true_steering) 179 | throttle_crossentropy = K.categorical_crossentropy(y_pred_throttle, y_true_throttle) 180 | return (steering_crossentropy + throttle_crossentropy) / 2. 181 | 182 | 183 | def create_model_relu2(): 184 | # size of pooling area for max pooling 185 | pool_size = (2, 2) 186 | 187 | model = Sequential() 188 | 189 | model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same", input_shape=(row, col, ch))) 190 | model.add(Activation('relu')) 191 | model.add(MaxPooling2D(pool_size=pool_size)) 192 | 193 | model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same")) 194 | model.add(Activation('relu')) 195 | model.add(MaxPooling2D(pool_size=pool_size)) 196 | 197 | model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same")) 198 | model.add(Activation('relu')) 199 | model.add(MaxPooling2D(pool_size=pool_size)) 200 | 201 | model.add(Flatten()) 202 | model.add(Dense(256, init='he_normal')) 203 | model.add(Activation('relu')) 204 | model.add(Dropout(.25)) 205 | 206 | model.add(Dense(num_outputs, init='he_normal')) 207 | model.add(Activation('softmax')) 208 | 209 | sgd = RMSprop(lr=0.001) 210 | model.compile(optimizer=sgd, loss=combined_crossentropy, metrics=['accuracy']) 211 | 212 | return model 213 | 214 | 215 | 216 | def create_model_relu(): 217 | pool_size = (2, 2) 218 | model = Sequential() 219 | 220 | model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same", input_shape=(row, col, ch))) 221 | model.add(Activation('relu')) 222 | # model.add(MaxPooling2D(pool_size=pool_size)) #added 223 | model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same")) 224 | model.add(Activation('relu')) 225 | model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same")) 226 | model.add(Flatten()) 227 | model.add(Dropout(.5)) 228 | model.add(Activation('relu')) 229 | # model.add(Dense(512, init='he_normal')) 230 | model.add(Dense(256, init='he_normal')) #mod 231 | model.add(Dropout(.5)) 232 | model.add(Activation('relu')) 233 | model.add(Dense(num_outputs, init='he_normal')) 234 | model.add(Activation('softmax')) 235 | 236 | sgd = RMSprop(lr=0.001) 237 | # sgd = Adam(lr=0.001) #mod 238 | model.compile(optimizer=sgd, loss=combined_crossentropy, metrics=['accuracy']) 239 | 240 | return model 241 | 242 | 243 | def create_model_2softmax(img_size): 244 | keep_rate = 0.5 245 | pool_size = (2, 2) 246 | img_input = Input(shape = img_size) 247 | x = Convolution2D(16, 5, 5, subsample=(2, 2), W_regularizer=l2(0.001), border_mode="same", activation='relu')(img_input) 248 | x = MaxPooling2D(pool_size=pool_size)(x) 249 | x = Dropout(keep_rate)(x) 250 | x = Convolution2D(32, 2, 2, subsample=(1, 1), W_regularizer=l2(0.001), border_mode="valid", activation='relu')(x) 251 | x = MaxPooling2D(pool_size=pool_size)(x) 252 | # end of feature detector 253 | x = Flatten()(x) 254 | x = Dropout(keep_rate)(x) 255 | x = Dense(128, activation='relu')(x) 256 | x = Dropout(keep_rate)(x) 257 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x) 258 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x) 259 | model = Model(input=img_input, output=[o_st, o_thr]) 260 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 261 | 262 | return model 263 | 264 | 265 | def create_modelB_2softmax(img_size): 266 | keep_rate = 0.5 267 | pool_size = (2, 2) 268 | img_input = Input(shape = img_size) 269 | x = Convolution2D(16, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(img_input) 270 | x = MaxPooling2D(pool_size=pool_size)(x) 271 | x = Dropout(keep_rate)(x) 272 | x = Convolution2D(32, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(x) 273 | x = MaxPooling2D(pool_size=pool_size)(x) 274 | x = Dropout(keep_rate)(x) 275 | x = Convolution2D(64, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu')(x) 276 | x = MaxPooling2D(pool_size=pool_size)(x) 277 | x = Flatten()(x) 278 | x = Dropout(keep_rate)(x) 279 | 280 | x1 = Dense(900, activation='relu', W_regularizer=l2(0.001))(x) 281 | x1 = Dropout(keep_rate)(x1) 282 | x1 = Dense(110, activation='relu', W_regularizer=l2(0.001))(x1) 283 | x1 = Dropout(keep_rate)(x1) 284 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x1) 285 | 286 | x2 = Dense(800, activation='relu', W_regularizer=l2(0.001))(x) 287 | x2 = Dropout(keep_rate)(x2) 288 | x2 = Dense(128, activation='relu', W_regularizer=l2(0.001))(x2) 289 | x2 = Dropout(keep_rate)(x2) 290 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x2) 291 | model = Model(input=img_input, output=[o_st, o_thr]) 292 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 293 | 294 | return model 295 | 296 | 297 | 298 | def create_model_light(img_size): 299 | keep_rate = 0.5 300 | pool_size = (2, 2) 301 | img_input = Input(shape = img_size) 302 | x = Convolution2D(8, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(img_input) 303 | x = MaxPooling2D(pool_size=pool_size)(x) 304 | #x = Dropout(keep_rate)(x) 305 | x = Convolution2D(16, 5, 5, subsample=(1, 1), border_mode="same", activation='relu')(x) 306 | x = MaxPooling2D(pool_size=pool_size)(x) 307 | x = Convolution2D(32, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu')(x) 308 | x = MaxPooling2D(pool_size=pool_size)(x) 309 | x = Convolution2D(32, 4, 4, subsample=(1, 1), border_mode="valid", activation='relu')(x) 310 | x = MaxPooling2D(pool_size=pool_size)(x) 311 | # end of feature detector 312 | x = Flatten()(x) 313 | x = Dropout(keep_rate)(x) 314 | x = Dense(256, activation='relu')(x) 315 | x = Dropout(keep_rate)(x) 316 | x = Dense(50, activation='relu')(x) 317 | x = Dropout(keep_rate)(x) 318 | o_st = Dense(num_outputs, activation='softmax', name='o_st')(x) 319 | o_thr = Dense(num_outputs, activation='softmax', name='o_thr')(x) 320 | model = Model(input=img_input, output=[o_st, o_thr]) 321 | model.compile(optimizer='adam', loss={'o_st': 'categorical_crossentropy', 'o_thr': 'categorical_crossentropy'}, metrics=['accuracy']) 322 | 323 | return model 324 | 325 | 326 | 327 | models = { 328 | 'model_wroscoe_mod': model_wroscoe_mod, 329 | } 330 | 331 | if __name__ == "__main__": 332 | config = TrainConfig() 333 | 334 | try: 335 | data_path = os.path.expanduser(sys.argv[1]) 336 | except Exception as e: 337 | print(e, "Usage: ./prepare_data.py ") 338 | sys.exit(-1) 339 | 340 | if not os.path.exists(data_path): 341 | print("Directory %s not found." % data_path) 342 | sys.exit(-1) 343 | 344 | 345 | ############# 346 | # Parameters 347 | ########## 348 | skip = config.skip_ahead 349 | train_img_sz = config.img_resample_dim 350 | train_img_ch = config.num_channels 351 | num_epoch = config.num_epoch 352 | batch_size = config.batch_size 353 | validation_split = config.validation_split 354 | num_outputs = config.num_buckets * 1 355 | data_augmentation = 100 356 | samples_per_epoch = batch_size * data_augmentation 357 | 358 | ########## 359 | # Data Preparation 360 | ########### 361 | print("loading train data csv") 362 | log_data = [] 363 | with open(data_path+'/labels.csv', 'r') as csvfile: 364 | data_vid = csv.reader(csvfile, delimiter=',') 365 | for i, row in enumerate(data_vid): 366 | if i >= skip: 367 | log_data.append(row) 368 | 369 | # log = [img_adress, steering val, throttle val] 370 | log_data = np.array(log_data) 371 | x_ = log_data[:, 0] 372 | y_ = log_data[:, 1::].astype(float) 373 | 374 | #clip y_steering: 60, 120 (check histogram in notebook) 375 | y_[:,0]=np.clip(y_[:,0], 60, 120) 376 | y_[:,1]=np.clip(y_[:,1], 60, 120) 377 | 378 | 379 | #shuffle data 380 | x_, y_= shuffle(x_, y_) 381 | # split train/validation set with ratio: 5:1 382 | X_train, X_val, y_train, y_val = train_test_split(x_, y_, test_size=validation_split, random_state=SEED) 383 | 384 | print('Train set size: {} | Validation set size: {}'.format(len(X_train), len(X_val))) 385 | nb_val_samples = len(y_val) - len(y_val)%batch_size # make validation set size to be a multiple of batch_size 386 | 387 | #################### 388 | # MODEL 389 | ###### 390 | # set of callbacks to save model weights during training when loss of validation set decreases 391 | #model_path = os.path.expanduser('model.h5') 392 | #Save the model after each epoch if the validation loss improved. 393 | save_best = callbacks.ModelCheckpoint("{}/autonomia_cnn_step.h5".format(data_path), monitor='val_loss', verbose=1, 394 | save_best_only=True, mode='min') 395 | #stop training if the validation loss doesn't improve for 5 consecutive epochs. 396 | early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, 397 | verbose=0, mode='auto') 398 | ## checkpoint: save weights everytime val_acc improves 399 | weight_fname=data_path+"/weights-improvement-{epoch:02d}-{val_o_st_acc:.2f}.h5" 400 | checkpoint = ModelCheckpoint(weight_fname, monitor='val_o_st_acc', verbose=1, save_best_only=True, mode='max') 401 | 402 | #callbacks_list = [save_best, early_stop] 403 | callbacks_list = [checkpoint] 404 | 405 | model = models[config.model]((*train_img_sz, train_img_ch)) 406 | print("---------------------------") 407 | print("model %s is created and compiled\r\n" % config.model) 408 | print(model.summary()) 409 | 410 | # and trained it via: 411 | #history = model.fit(X, , batch_size=batch_size, nb_epoch=num_epoch, verbose=1, validation_split=validation_split, callbacks=callbacks_list ) 412 | history = model.fit_generator(batch_generator(X_train, y_train, num_buckets=num_outputs, dir_data=data_path, batch_size=batch_size, img_sz=(*train_img_sz, train_img_ch), training=True), 413 | samples_per_epoch=samples_per_epoch, nb_val_samples=nb_val_samples, 414 | validation_data=batch_generator(X_val, y_val, num_buckets=num_outputs, dir_data=data_path, batch_size=batch_size, img_sz=(*train_img_sz, train_img_ch), 415 | training=False, monitor=False), 416 | nb_epoch=num_epoch, verbose=1, callbacks=callbacks_list) 417 | 418 | 419 | print("saving model and weights") 420 | with open("{}/autonomia_cnn.json".format(data_path), 'w') as f: 421 | f.write(model.to_json()) 422 | 423 | model.save_weights("{}/autonomia_cnn.h5".format(data_path)) 424 | 425 | #clear session to avoid error at the end of program: "AttributeError: 'NoneType' object has no attribute 'TF_DeleteStatus'" 426 | # The alternative does not work: import gc; gc.collect() 427 | # https://github.com/tensorflow/tensorflow/issues/3388 428 | K.clear_session() 429 | --------------------------------------------------------------------------------