├── README.md
└── CozmoAutoDriveCNN
├── Tensorflow
├── cozmo_cnn_models.py
├── TensorflowUtil.py
└── cozmo_cnn.py
└── DriveTool
├── drive_by_cnn.py
└── capture_imgs_remote_driving_cozmo.py
/README.md:
--------------------------------------------------------------------------------
1 | # CozmoSelfDriveToyUsingCNN
2 | ## Note
3 | This toy proj needs Tensorflow and Cozmo SDK provided by Anki [source codes](https://github.com/anki/cozmo-python-sdk/tree/master/examples/apps)
4 |
5 | ### using CNN
6 | 1. base modle is provided in Tensorflow Dir.
7 |
8 | ### Training Pics
9 | 1. Using py code capture_imgs_remote_driving_cozmo.py in DriveTool dir to get training pics
10 | 2. Training CNN module by using py code cozemo_cnn.py in Tensorflow dir to get the module
11 | 3. Using drive_by_cnn.py in DriveTool dir to run Cozmo
12 |
13 | ### Result Video in Youtube
14 | 1. Since I had trained the model in this simple toy track which is drawn by chalk, so the result is displayed at the same place.
15 | 2. After capturing 5034 pics and training the version 1 model, I get 60% accuracy at last.
16 | 3. [Result Video](https://www.youtube.com/watch?v=klVTxu8CQI4)
17 |
--------------------------------------------------------------------------------
/CozmoAutoDriveCNN/Tensorflow/cozmo_cnn_models.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Copyright (c) 2017, benjamin wu
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 | * Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in the
12 | documentation and/or other materials provided with the distribution.
13 | * Neither the name of Ryan Dellana nor the
14 | names of its contributors may be used to endorse or promote products
15 | derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL Ryan Dellana BE LIABLE FOR ANY
21 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 | """
28 | """
29 | Original net from the paper had 1164 n_neurons on the first fully-connected layer instead of 512.
30 | Halving the amount of neurons on the first FC layer reduced the size of trained model by half and sped up training.
31 | """
32 |
33 | import tensorflow as tf
34 | import numpy as np
35 |
36 | from TensorflowUtil import weight_variable, bias_variable, conv2d, conv_layer
37 | from TensorflowUtil import conv_layer_, fc_layer, fc_layer_, identity_in, flattened
38 | from TensorflowUtil import normal_log, negative_log_likelihood, max_pool_2x2
39 |
40 |
41 | class cnn_cccccfffff(object):
42 |
43 | def __init__(self):
44 | #self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
45 | self.x = tf.placeholder(tf.float32, [None, 115, 200, 3])
46 | self.y_ = tf.placeholder(tf.float32, [None, 2])
47 | (self.h_conv1, _) = conv_layer(self.x, conv=(5, 5), stride=2, n_filters=24, use_bias=True)
48 | (self.h_conv2, _) = conv_layer(self.h_conv1, conv=(5, 5), stride=2, n_filters=36, use_bias=True)
49 | (self.h_conv3, _) = conv_layer(self.h_conv2, conv=(5, 5), stride=2, n_filters=48, use_bias=True)
50 | (self.h_conv4, _) = conv_layer(self.h_conv3, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
51 | (self.h_conv5, _) = conv_layer(self.h_conv4, conv=(3, 3), stride=1, n_filters=64, use_bias=True)
52 | self.h_conv5_flat = flattened(self.h_conv5)
53 | (self.h_fc1_drop, _, _, self.keep_prob_fc1) = fc_layer(x=self.h_conv5_flat, n_neurons=512, activation=tf.nn.relu, use_bias=True, dropout=True)
54 | (self.h_fc2_drop, _, _, self.keep_prob_fc2) = fc_layer(self.h_fc1_drop, 100, tf.nn.relu, True, True)
55 | (self.h_fc3_drop, _, _, self.keep_prob_fc3) = fc_layer(self.h_fc2_drop, 50, tf.nn.relu, True, True)
56 | (self.h_fc4_drop, _, _, self.keep_prob_fc4) = fc_layer(self.h_fc3_drop, 10, tf.nn.relu, True, True)
57 | W_fc5 = weight_variable([10, 2])
58 | b_fc5 = bias_variable([2])
59 | self.y_out = tf.matmul(self.h_fc4_drop, W_fc5) + b_fc5
60 | self.loss = tf.reduce_mean(tf.abs(tf.subtract(self.y_, self.y_out)))
61 |
62 |
--------------------------------------------------------------------------------
/CozmoAutoDriveCNN/Tensorflow/TensorflowUtil.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Copyright (c) 2017, benjamin wu
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 | * Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in the
12 | documentation and/or other materials provided with the distribution.
13 | * Neither the name of Ryan Dellana nor the
14 | names of its contributors may be used to endorse or promote products
15 | derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL Ryan Dellana BE LIABLE FOR ANY
21 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 | """
28 |
29 |
30 | import tensorflow as tf
31 | import numpy as np
32 |
33 | def weight_variable(shape):
34 | initial = tf.truncated_normal(shape, stddev=0.1)
35 | return tf.Variable(initial)
36 |
37 | def bias_variable(shape):
38 | initial = tf.constant(0.0, shape=shape)
39 | return tf.Variable(initial)
40 |
41 | def conv2d(x, W, stride):
42 | return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
43 |
44 | def conv_layer(x, conv=(3, 3), stride=1, n_filters=32, use_bias=False):
45 | W = weight_variable([conv[0], conv[1], x.get_shape()[-1].value, n_filters])
46 | if use_bias:
47 | b = bias_variable([n_filters])
48 | return (tf.nn.relu(conv2d(x, W, stride=stride) + b), W)
49 | else:
50 | return (tf.nn.relu(conv2d(x, W, stride=stride)), W)
51 |
52 | def conv_layer_(x, conv=(3, 3), stride=1, n_filters=32, use_bias=False, weights=None):
53 | return tf.nn.relu(conv2d(x, weights, stride=stride))
54 |
55 | def fc_layer(x, n_neurons, activation=tf.tanh, use_bias=True, dropout=False):
56 | W = weight_variable([x.get_shape()[-1].value, n_neurons])
57 | h, b = None, None
58 | if use_bias:
59 | b = bias_variable([n_neurons])
60 | h = activation(tf.matmul(x, W) + b)
61 | else:
62 | h = activation(tf.matmul(x, W))
63 | if dropout:
64 | keep_prob = tf.placeholder(tf.float32)
65 | h_drop = tf.nn.dropout(h, keep_prob)
66 | return (h_drop, W, b, keep_prob)
67 | else:
68 | return (h, W, b, None)
69 |
70 | def fc_layer_(x, n_neurons, activation=tf.tanh, use_bias=True, dropout=False, weights=None, bias=None):
71 | h = None
72 | if use_bias and bias != None:
73 | h = activation(tf.matmul(x, weights) + bias)
74 | else:
75 | h = activation(tf.matmul(x, weights))
76 | return h
77 |
78 | # h_identity_in, W_in = identity_in(x)
79 | def identity_in(x):
80 | shp = x.get_shape()
81 | W = tf.ones([shp[1].value, shp[2].value, shp[3].value])
82 | return tf.mul(x, W), W
83 |
84 | def flattened(x):
85 | product = 1
86 | for d in x.get_shape():
87 | if d.value is not None:
88 | product *= d.value
89 | return tf.reshape(x, [-1, product])
90 |
91 | # Define negative log-likelihood and gradient
92 | def normal_log(X, mu=np.float32(1), sigma=np.float32(1), left=-np.inf, right=np.inf):
93 | val = -tf.log(tf.constant(np.sqrt(2 * np.pi), dtype=tf.float32) * sigma) - \
94 | tf.pow(X - mu, 2) / (tf.constant(2, dtype=tf.float32) * tf.pow(sigma, 2))
95 | return val
96 |
97 | def negative_log_likelihood(X):
98 | return -tf.reduce_sum(normal_log(X))
99 |
100 | def max_pool_2x2(x):
101 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
102 |
--------------------------------------------------------------------------------
/CozmoAutoDriveCNN/DriveTool/drive_by_cnn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2016 Anki, Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License in the file LICENSE.txt or at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | '''Control Cozmo using a webpage on your computer.
18 |
19 | This example lets you control Cozmo by Remote Control, using a webpage served by Flask.
20 | '''
21 |
22 | import json
23 | import sys
24 | import cv2
25 |
26 | sys.path.append('../lib/')
27 | import flask_helpers
28 | import numpy as np
29 | import cozmo
30 | import tensorflow as tf
31 | from cozmo_cnn_models import cnn_cccccfffff
32 | import time
33 | from cozmo.util import degrees, distance_mm, speed_mmps
34 |
35 |
36 |
37 | DEBUG_ANNOTATIONS_DISABLED = 0
38 | DEBUG_ANNOTATIONS_ENABLED_VISION = 1
39 | DEBUG_ANNOTATIONS_ENABLED_ALL = 2
40 |
41 |
42 |
43 | class RemoteControlCozmo:
44 |
45 | def __init__(self, coz):
46 | self.cozmo = coz
47 | self.sess = tf.InteractiveSession()
48 | self.drive_forwards = 0
49 | self.drive_back = 0
50 | self.turn_left = 0
51 | self.turn_right = 0
52 | self.model = cnn_cccccfffff()
53 | saver = tf.train.Saver()
54 | #saver.restore(self.sess, "/Users/benja/code/Cherry-Autonomous-Racecar-master/Tensorflow/cozmo_run_model.ckpt")
55 | saver.restore(self.sess, "") # plz fill the module you got from training
56 |
57 | def go_driving(self, key_code ):
58 | '''Called on any key press or release
59 | Holding a key down may result in repeated run_cnn calls with is_key_down==True
60 | '''
61 |
62 | #speed_changed = (was_go_fast != self.go_fast) or (was_go_slow != self.go_slow)
63 |
64 | # Update state of driving intent from keyboard, and if anything changed then call update_driving
65 | drive = np.argmax(key_code,1)
66 |
67 | self.drive_forwards = 0
68 | self.drive_back = 0
69 | self.turn_left = 0
70 | self.turn_right = 0
71 |
72 | if drive[0] == 3 :
73 | self.drive_forwards = True
74 | print("command forwards")
75 | elif drive[0] == 2:
76 | self.drive_back= True
77 | print("command back")
78 | elif drive[0] == 0:
79 | self.turn_left = True
80 | print("command left")
81 | elif drive[0] == 1:
82 | self.turn_right = True
83 | print("command right")
84 | else:
85 | print(" done know the value ",drive)
86 | self.drive_forwards = True
87 |
88 | #print("drive forwards back left right", drive[0], self.drive_forwards, self.drive_back, self.turn_left, self.turn_right)
89 | self.update_driving()
90 |
91 |
92 |
93 | def drive(self):
94 | ''' Get the image infront and
95 | '''
96 | latest_image = self.cozmo.world.latest_image
97 | screen = np.array(latest_image.raw_image)
98 | screen = cv2.resize(screen, (200, 120), interpolation=cv2.INTER_CUBIC)
99 | cv2.imshow('window1', screen)
100 | cv2.imwrite("./a.jpg", screen)
101 |
102 | image = screen.astype(dtype=np.float32)/255.0
103 |
104 | key_code = self.model.y_out.eval(session=self.sess, feed_dict={self.model.x: [image],
105 | self.model.keep_prob_fc1:1.0, self.model.keep_prob_fc2:1.0,
106 | self.model.keep_prob_fc3:1.0, self.model.keep_prob_fc4:1.0})
107 |
108 | print ("after judgeing, key_code is", key_code )
109 |
110 | self.go_driving(key_code)
111 |
112 |
113 | def update_driving(self):
114 | drive_dir = (self.drive_forwards - self.drive_back)
115 |
116 |
117 | turn_dir = (self.turn_right - self.turn_left)
118 |
119 | if drive_dir < 0:
120 | # It feels more natural to turn the opposite way when reversing
121 | turn_dir = -turn_dir
122 |
123 | #forward_speed = self.pick_speed(150, 75, 50)
124 | forward_speed = 50
125 | turn_speed = 30
126 |
127 | l_wheel_speed = (drive_dir * forward_speed) + (turn_speed * turn_dir)
128 | r_wheel_speed = (drive_dir * forward_speed) - (turn_speed * turn_dir)
129 |
130 | print("cozmo drive_dir dirve_forward drive_back turn_speed turn_dir " , drive_dir,self.drive_forwards,self.drive_back, turn_speed, turn_dir)
131 |
132 | #if self.turn_left == True :
133 | # self.cozmo.turn_in_place(degrees(90)).wait_for_completed()
134 | #elif self.turn_right == True :
135 | # self.cozmo.turn_in_place(degrees(-90)).wait_for_completed()
136 | #else:
137 | self.cozmo.drive_wheels(l_wheel_speed, r_wheel_speed, l_wheel_speed*4, r_wheel_speed*4)
138 | #input()
139 |
140 |
141 | def run(sdk_conn):
142 | robot = sdk_conn.wait_for_robot()
143 |
144 | global remote_control_cozmo
145 | robot.camera.image_stream_enabled = True
146 | remote_control_cozmo = RemoteControlCozmo(robot)
147 |
148 | # Turn on image receiving by the camera
149 |
150 | angle = cozmo.util.Angle ( -0.10, None)
151 |
152 | robot.set_head_angle(angle).wait_for_completed()
153 |
154 | # 87 83 65 68
155 | print(ord('W'),ord('S'),ord('A'), ord('D'))
156 |
157 | while True:
158 | remote_control_cozmo.drive()
159 | time.sleep(0.1)
160 |
161 | if __name__ == '__main__':
162 | cozmo.setup_basic_logging()
163 | cozmo.robot.Robot.drive_off_charger_on_connect = False # RC can drive off charger if required
164 | try:
165 | cozmo.connect(run)
166 | except cozmo.ConnectionError as e:
167 | sys.exit("A connection error occurred: %s" % e)
168 |
--------------------------------------------------------------------------------
/CozmoAutoDriveCNN/Tensorflow/cozmo_cnn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Copyright (c) 2017, benjamin wu
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 | * Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in the
12 | documentation and/or other materials provided with the distribution.
13 | * Neither the name of Ryan Dellana nor the
14 | names of its contributors may be used to endorse or promote products
15 | derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL Ryan Dellana BE LIABLE FOR ANY
21 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 | """
28 |
29 |
30 | from __future__ import absolute_import
31 | from __future__ import division
32 | from __future__ import print_function
33 |
34 | import argparse
35 | import numpy as np
36 | import cv2
37 | import tensorflow as tf
38 | import pickle
39 |
40 | from cozmo_cnn_models import cnn_cccccfffff
41 |
42 | import os
43 | import cv2
44 | import numpy as np
45 | import time
46 |
47 | def load_dataset(path, percent_testing=None):
48 | assert percent_testing is None or (percent_testing >= 0.0 and percent_testing <= 1.0)
49 | x, y, fnames = [], [], []
50 | for i in os.walk(path):
51 | (d, sub_dirs, files_) = i
52 | fnames.extend(files_)
53 | seq_fname = []
54 | for fname in fnames:
55 | seq = float(fname.split('_')[0])
56 | seq_fname.append((seq, fname))
57 | seq_fname.sort()
58 | for (seq, fname) in seq_fname:
59 | #img = cv2.imread(path+'/'+fname, 1) for black and white
60 | img = cv2.imread(path+'/'+fname)
61 | img = cv2.resize(img, (200, 150), interpolation=cv2.INTER_CUBIC)
62 | img = img[35:,:,:]
63 | x.append(img)
64 | timestamp, lwheel, rwheel = fname.split('_')
65 | timestamp, lwheel, rwheel = float(timestamp), float(lwheel)/100.0,float(rwheel.split('.jpg')[0])/100.0
66 |
67 | #y.append([[lwheel], [rwheel]])
68 | y.append(np.array([lwheel, rwheel]))
69 | print('( timestamp, lwheel, rwheel):', timestamp,lwheel,rwheel)
70 |
71 | train_x, train_y, test_x, test_y = [], [], [], []
72 | if percent_testing is not None:
73 | tst_strt = int(len(x)*(1.0-percent_testing))
74 | train_x, train_y, test_x, test_y = x[:tst_strt], y[:tst_strt], x[tst_strt:], y[tst_strt:]
75 | else:
76 | train_x, train_y = x, y
77 | return train_x, train_y, test_x, test_y
78 |
79 |
80 | #path = '/Users/benja/code/cozmo_sdk_examples_0.15.0/apps/TestImgv2/'
81 | path = '' # plz fill the img dir you want to train
82 |
83 | train_x, train_y, test_x, test_y = load_dataset(path=path, percent_testing=0.20)
84 |
85 |
86 |
87 | num_epochs = 100
88 | batch_size = 100
89 |
90 | # Drop items from dataset so that it's divisible by batch_size
91 |
92 | train_x = train_x[0:-1*(len(train_x) % batch_size)]
93 | train_y = train_y[0:-1*(len(train_y) % batch_size)]
94 | test_x = test_x[0:-1*(len(test_x) % batch_size)]
95 | test_y = test_y[0:-1*(len(test_y) % batch_size)]
96 |
97 | print('len(test_x) =', len(test_x))
98 |
99 | batches_per_epoch = int(len(train_x)/batch_size)
100 |
101 | sess = tf.InteractiveSession()
102 | model = cnn_cccccfffff()
103 | train_step = tf.train.AdamOptimizer(1e-4).minimize(model.loss)
104 | correct_prediction = tf.equal(tf.argmax(model.y_out,1), tf.argmax(model.y_,1))
105 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float32"))
106 | saver = tf.train.Saver()
107 | sess.run(tf.global_variables_initializer())
108 |
109 | for i in range(num_epochs):
110 | for b in range(0, batches_per_epoch):
111 | batch = [train_x[b*batch_size:b*batch_size+batch_size], train_y[b*batch_size:b*batch_size+batch_size]]
112 | # --- normalize batch ---
113 | batch_ = [[],[]]
114 | for j in range(len(batch[0])):
115 | batch_[0].append(batch[0][j].astype(dtype=np.float32)/255.0)
116 | batch_[1].append(batch[1][j].astype(dtype=np.float32))
117 | batch = batch_
118 | # ------------------------
119 | train_step.run(feed_dict={model.x:batch[0], model.y_:batch[1], model.keep_prob_fc1:0.8, model.keep_prob_fc2:0.8, model.keep_prob_fc3:0.8, model.keep_prob_fc4:0.8})
120 |
121 | print('epoch', i, 'complete')
122 | if i % 5 == 0:
123 | test_error = 0.0
124 | for b in range(0, len(test_x), batch_size):
125 | batch = [test_x[b:b+batch_size], test_y[b:b+batch_size]]
126 | # --- normalize batch ---
127 | batch_ = [[],[]]
128 | for j in range(len(batch[0])):
129 | batch_[0].append(batch[0][j].astype(dtype=np.float32)/255.0)
130 | batch_[1].append(batch[1][j].astype(dtype=np.float32))
131 | batch = batch_
132 |
133 | test_error_ = model.loss.eval(feed_dict={model.x:batch[0], model.y_:batch[1],
134 | model.keep_prob_fc1:1.0, model.keep_prob_fc2:1.0,
135 | model.keep_prob_fc3:1.0, model.keep_prob_fc4:1.0})
136 |
137 | # y_out = model.y_out.eval(session=sess, feed_dict={model.x:batch[0] ,
138 | # model.keep_prob_fc1:1.0, model.keep_prob_fc2:1.0,
139 | # model.keep_prob_fc3:1.0, model.keep_prob_fc4:1.0})
140 | # print("y out is ", y_out)
141 |
142 | # -----------------------
143 | test_error += test_error_
144 | test_error /= len(test_x)/batch_size
145 | test_accuracy = 1.0 - test_error
146 | print("test accuracy %g"%test_accuracy)
147 |
148 |
149 | filename = saver.save(sess, './cozmo_run_modelv2.ckpt')
150 |
151 |
--------------------------------------------------------------------------------
/CozmoAutoDriveCNN/DriveTool/capture_imgs_remote_driving_cozmo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright (c) 2016 Anki, Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License in the file LICENSE.txt or at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | '''Control Cozmo using a webpage on your computer.
18 |
19 | This example lets you control Cozmo by Remote Control, using a webpage served by Flask.
20 | '''
21 |
22 | import json
23 | import sys
24 | import cv2
25 |
26 | sys.path.append('../lib/')
27 | import flask_helpers
28 | import cozmo
29 | import numpy as np
30 | import time
31 |
32 | try:
33 | from flask import Flask, request
34 | except ImportError:
35 | sys.exit("Cannot import from flask: Do `pip3 install --user flask` to install")
36 |
37 | try:
38 | from PIL import Image, ImageDraw
39 | except ImportError:
40 | sys.exit("Cannot import from PIL: Do `pip3 install --user Pillow` to install")
41 |
42 |
43 | DEBUG_ANNOTATIONS_DISABLED = 0
44 | DEBUG_ANNOTATIONS_ENABLED_VISION = 1
45 | DEBUG_ANNOTATIONS_ENABLED_ALL = 2
46 |
47 |
48 | # Annotator for displaying RobotState (position, etc.) on top of the camera feed
49 | class RobotStateDisplay(cozmo.annotate.Annotator):
50 | def apply(self, image, scale):
51 | d = ImageDraw.Draw(image)
52 |
53 | bounds = [3, 0, image.width, image.height]
54 |
55 | def print_line(text_line):
56 | text = cozmo.annotate.ImageText(text_line, position=cozmo.annotate.TOP_LEFT, color='lightblue')
57 | text.render(d, bounds)
58 | TEXT_HEIGHT = 11
59 | bounds[1] += TEXT_HEIGHT
60 |
61 | robot = self.world.robot
62 |
63 | # Display the Pose info for the robot
64 |
65 | pose = robot.pose
66 | print_line('Pose: Pos = <%.1f, %.1f, %.1f>' % pose.position.x_y_z)
67 | print_line('Pose: Rot quat = <%.1f, %.1f, %.1f, %.1f>' % pose.rotation.q0_q1_q2_q3)
68 | print_line('Pose: angle_z = %.1f' % pose.rotation.angle_z.degrees)
69 | print_line('Pose: origin_id: %s' % pose.origin_id)
70 |
71 | # Display the Accelerometer and Gyro data for the robot
72 |
73 | print_line('Accelmtr: <%.1f, %.1f, %.1f>' % robot.accelerometer.x_y_z)
74 | print_line('Gyro: <%.1f, %.1f, %.1f>' % robot.gyro.x_y_z)
75 |
76 |
77 | def create_default_image(image_width, image_height, do_gradient=False):
78 | '''Create a place-holder PIL image to use until we have a live feed from Cozmo'''
79 | image_bytes = bytearray([0x70, 0x70, 0x70]) * image_width * image_height
80 |
81 | if do_gradient:
82 | i = 0
83 | for y in range(image_height):
84 | for x in range(image_width):
85 | image_bytes[i] = int(255.0 * (x / image_width)) # R
86 | image_bytes[i+1] = int(255.0 * (y / image_height)) # G
87 | image_bytes[i+2] = 0 # B
88 | i += 3
89 |
90 | image = Image.frombytes('RGB', (image_width, image_height), bytes(image_bytes))
91 | return image
92 |
93 |
94 | flask_app = Flask(__name__)
95 | remote_control_cozmo = None
96 | _default_camera_image = create_default_image(320, 240)
97 | _is_mouse_look_enabled_by_default = False
98 |
99 | _display_debug_annotations = DEBUG_ANNOTATIONS_ENABLED_ALL
100 |
101 |
102 | def remap_to_range(x, x_min, x_max, out_min, out_max):
103 | '''convert x (in x_min..x_max range) to out_min..out_max range'''
104 | if x < x_min:
105 | return out_min
106 | elif x > x_max:
107 | return out_max
108 | else:
109 | ratio = (x - x_min) / (x_max - x_min)
110 | return out_min + ratio * (out_max - out_min)
111 |
112 |
113 | class RemoteControlCozmo:
114 |
115 | def __init__(self, coz):
116 | self.cozmo = coz
117 |
118 | self.drive_forwards = 0
119 | self.drive_back = 0
120 | self.turn_left = 0
121 | self.turn_right = 0
122 | self.lift_up = 0
123 | self.lift_down = 0
124 | self.head_up = 0
125 | self.head_down = 0
126 |
127 | self.go_fast = 0
128 | self.go_slow = 0
129 |
130 | self.is_mouse_look_enabled = _is_mouse_look_enabled_by_default
131 | self.mouse_dir = 0
132 |
133 | all_anim_names = list(self.cozmo.anim_names)
134 | all_anim_names.sort()
135 | self.anim_names = []
136 |
137 | # Hide a few specific test animations that don't behave well
138 | bad_anim_names = [
139 | "ANIMATION_TEST",
140 | "soundTestAnim"]
141 |
142 | for anim_name in all_anim_names:
143 | if anim_name not in bad_anim_names:
144 | self.anim_names.append(anim_name)
145 |
146 | default_anims_for_keys = ["anim_bored_01", # 0
147 | "id_poked_giggle", # 1
148 | "anim_pounce_success_02", # 2
149 | "anim_bored_event_02", # 3
150 | "anim_bored_event_03", # 4
151 | "anim_petdetection_cat_01", # 5
152 | "anim_petdetection_dog_03", # 6
153 | "anim_reacttoface_unidentified_02", # 7
154 | "anim_upgrade_reaction_lift_01", # 8
155 | "anim_speedtap_wingame_intensity02_01" # 9
156 | ]
157 |
158 | self.anim_index_for_key = [0] * 10
159 | kI = 0
160 | for default_key in default_anims_for_keys:
161 | try:
162 | anim_idx = self.anim_names.index(default_key)
163 | except ValueError:
164 | print("Error: default_anim %s is not in the list of animations" % default_key)
165 | anim_idx = kI
166 | self.anim_index_for_key[kI] = anim_idx
167 | kI += 1
168 |
169 |
170 | self.action_queue = []
171 | self.text_to_say = "Hi I'm Cozmo"
172 |
173 |
174 | def set_anim(self, key_index, anim_index):
175 | self.anim_index_for_key[key_index] = anim_index
176 |
177 |
178 | def handle_mouse(self, mouse_x, mouse_y, delta_x, delta_y, is_button_down):
179 | '''Called whenever mouse moves
180 | mouse_x, mouse_y are in in 0..1 range (0,0 = top left, 1,1 = bottom right of window)
181 | delta_x, delta_y are the change in mouse_x/y since the last update
182 | '''
183 | if self.is_mouse_look_enabled:
184 | mouse_sensitivity = 1.5 # higher = more twitchy
185 | self.mouse_dir = remap_to_range(mouse_x, 0.0, 1.0, -mouse_sensitivity, mouse_sensitivity)
186 | self.update_driving()
187 |
188 | desired_head_angle = remap_to_range(mouse_y, 0.0, 1.0, 45, -25)
189 | head_angle_delta = desired_head_angle - self.cozmo.head_angle.degrees
190 | head_vel = head_angle_delta * 0.03
191 | self.cozmo.move_head(head_vel)
192 |
193 |
194 | def set_mouse_look_enabled(self, is_mouse_look_enabled):
195 | was_mouse_look_enabled = self.is_mouse_look_enabled
196 | self.is_mouse_look_enabled = is_mouse_look_enabled
197 | if not is_mouse_look_enabled:
198 | # cancel any current mouse-look turning
199 | self.mouse_dir = 0
200 | if was_mouse_look_enabled:
201 | self.update_driving()
202 | self.update_head()
203 |
204 |
205 | def handle_key(self, key_code, is_shift_down, is_ctrl_down, is_alt_down, is_key_down):
206 | '''Called on any key press or release
207 | Holding a key down may result in repeated handle_key calls with is_key_down==True
208 | '''
209 |
210 | # Update desired speed / fidelity of actions based on shift/alt being held
211 | was_go_fast = self.go_fast
212 | was_go_slow = self.go_slow
213 |
214 | self.go_fast = is_shift_down
215 | self.go_slow = is_alt_down
216 |
217 | speed_changed = (was_go_fast != self.go_fast) or (was_go_slow != self.go_slow)
218 |
219 | # Update state of driving intent from keyboard, and if anything changed then call update_driving
220 | update_driving = True
221 | if key_code == ord('W'):
222 | self.drive_forwards = is_key_down
223 | elif key_code == ord('S'):
224 | self.drive_back = is_key_down
225 | elif key_code == ord('A'):
226 | self.turn_left = is_key_down
227 | elif key_code == ord('D'):
228 | self.turn_right = is_key_down
229 | else:
230 | if not speed_changed:
231 | update_driving = False
232 |
233 |
234 | ## capture a pic from cozmo into disk wich a driving command key
235 | ## has beeing pressed
236 | if self.drive_forwards or self.drive_back or self.turn_right or self.turn_left :
237 | print("be press")
238 |
239 |
240 | # Update state of lift move intent from keyboard, and if anything changed then call update_lift
241 | update_lift = True
242 | if key_code == ord('R'):
243 | self.lift_up = is_key_down
244 | elif key_code == ord('F'):
245 | self.lift_down = is_key_down
246 | else:
247 | if not speed_changed:
248 | update_lift = False
249 |
250 | # Update state of head move intent from keyboard, and if anything changed then call update_head
251 | update_head = True
252 | if key_code == ord('T'):
253 | self.head_up = is_key_down
254 | elif key_code == ord('G'):
255 | self.head_down = is_key_down
256 | else:
257 | if not speed_changed:
258 | update_head = False
259 |
260 | # Update driving, head and lift as appropriate
261 | if update_driving:
262 | self.update_driving()
263 | if update_head:
264 | self.update_head()
265 | if update_lift:
266 | self.update_lift()
267 |
268 | # Handle any keys being released (e.g. the end of a key-click)
269 | if not is_key_down:
270 | if (key_code >= ord('0')) and (key_code <= ord('9')):
271 | anim_name = self.key_code_to_anim_name(key_code)
272 | self.play_animation(anim_name)
273 | elif key_code == ord(' '):
274 | self.say_text(self.text_to_say)
275 |
276 |
277 | def key_code_to_anim_name(self, key_code):
278 | key_num = key_code - ord('0')
279 | anim_num = self.anim_index_for_key[key_num]
280 | anim_name = self.anim_names[anim_num]
281 | return anim_name
282 |
283 |
284 | def func_to_name(self, func):
285 | if func == self.try_say_text:
286 | return "say_text"
287 | elif func == self.try_play_anim:
288 | return "play_anim"
289 | else:
290 | return "UNKNOWN"
291 |
292 |
293 | def action_to_text(self, action):
294 | func, args = action
295 | return self.func_to_name(func) + "( " + str(args) + " )"
296 |
297 |
298 | def action_queue_to_text(self, action_queue):
299 | out_text = ""
300 | i = 0
301 | for action in action_queue:
302 | out_text += "[" + str(i) + "] " + self.action_to_text(action)
303 | i += 1
304 | return out_text
305 |
306 |
307 | def queue_action(self, new_action):
308 | if len(self.action_queue) > 10:
309 | self.action_queue.pop(0)
310 | self.action_queue.append(new_action)
311 |
312 |
313 | def try_say_text(self, text_to_say):
314 | try:
315 | self.cozmo.say_text(text_to_say)
316 | return True
317 | except cozmo.exceptions.RobotBusy:
318 | return False
319 |
320 |
321 | def try_play_anim(self, anim_name):
322 | try:
323 | self.cozmo.play_anim(name=anim_name)
324 | return True
325 | except cozmo.exceptions.RobotBusy:
326 | return False
327 |
328 |
329 | def say_text(self, text_to_say):
330 | self.queue_action((self.try_say_text, text_to_say))
331 | self.update()
332 |
333 |
334 | def play_animation(self, anim_name):
335 | self.queue_action((self.try_play_anim, anim_name))
336 | self.update()
337 |
338 |
339 | def update(self):
340 | '''Try and execute the next queued action'''
341 | if len(self.action_queue) > 0:
342 | queued_action, action_args = self.action_queue[0]
343 | if queued_action(action_args):
344 | self.action_queue.pop(0)
345 |
346 |
347 | def pick_speed(self, fast_speed, mid_speed, slow_speed):
348 | if self.go_fast:
349 | if not self.go_slow:
350 | return fast_speed
351 | elif self.go_slow:
352 | return slow_speed
353 | return mid_speed
354 |
355 |
356 | def update_lift(self):
357 | lift_speed = self.pick_speed(8, 4, 2)
358 | lift_vel = (self.lift_up - self.lift_down) * lift_speed
359 | self.cozmo.move_lift(lift_vel)
360 |
361 |
362 | def update_head(self):
363 | if not self.is_mouse_look_enabled:
364 | head_speed = self.pick_speed(2, 1, 0.5)
365 | head_vel = (self.head_up - self.head_down) * head_speed
366 | self.cozmo.move_head(head_vel)
367 |
368 |
369 | def update_driving(self):
370 | drive_dir = (self.drive_forwards - self.drive_back)
371 |
372 | if (drive_dir > 0.1) and self.cozmo.is_on_charger:
373 | # cozmo is stuck on the charger, and user is trying to drive off - issue an explicit drive off action
374 | try:
375 | # don't wait for action to complete - we don't want to block the other updates (camera etc.)
376 | self.cozmo.drive_off_charger_contacts()
377 | except cozmo.exceptions.RobotBusy:
378 | # Robot is busy doing another action - try again next time we get a drive impulse
379 | pass
380 |
381 | turn_dir = (self.turn_right - self.turn_left) + self.mouse_dir
382 |
383 | if drive_dir < 0:
384 | # It feels more natural to turn the opposite way when reversing
385 | turn_dir = -turn_dir
386 |
387 | forward_speed = self.pick_speed(150, 75, 50)
388 | turn_speed = self.pick_speed(100, 50, 30)
389 |
390 | l_wheel_speed = (drive_dir * forward_speed) + (turn_speed * turn_dir)
391 | r_wheel_speed = (drive_dir * forward_speed) - (turn_speed * turn_dir)
392 |
393 | self.cozmo.drive_wheels(l_wheel_speed, r_wheel_speed, l_wheel_speed*4, r_wheel_speed*4)
394 | if l_wheel_speed != 0 or r_wheel_speed != 0 :
395 | screen = np.array(self.cozmo.world.latest_image.raw_image)
396 |
397 | file_name = str(time.time()) + "_"+ str(l_wheel_speed)+"_"+str(r_wheel_speed)+".jpg"
398 | cv2.imwrite('./TestImgv2/'+file_name, screen)
399 |
400 | def get_anim_sel_drop_down(selectorIndex):
401 | html_text = ''''''
409 | return html_text
410 |
411 |
412 | def get_anim_sel_drop_downs():
413 | html_text = ""
414 | for i in range(10):
415 | # list keys 1..9,0 as that's the layout on the keyboard
416 | key = i+1 if (i<9) else 0
417 | html_text += str(key) + ''': ''' + get_anim_sel_drop_down(key) + '''
'''
418 | return html_text
419 |
420 |
421 | def to_js_bool_string(bool_value):
422 | return "true" if bool_value else "false"
423 |
424 |
425 | @flask_app.route("/")
426 | def handle_index_page():
427 | return '''
428 |
429 |
|
437 | |
440 | 441 | |
442 | Controls:443 | 444 |Driving:445 | 446 | W A S D : Drive Forwards / Left / Back / Right447 | Q : Toggle Mouse Look: 448 | Mouse : Move in browser window to aim 449 | (steer and head angle) 450 | (similar to an FPS game) 451 | 452 | T : Move Head Up 453 | G : Move Head Down 454 | 455 | Lift:456 | R : Move Lift Up457 | F: Move Lift Down 458 | General:459 | Shift : Hold to Move Faster (Driving, Head and Lift)460 | Alt : Hold to Move Slower (Driving, Head and Lift) 461 | L : Toggle IR Headlight: 462 | O : Toggle Debug Annotations: 463 | P : Toggle Free Play mode: 464 | Play Animations465 | 0 .. 9 : Play Animation mapped to that key466 | Talk467 | Space : Say 468 | |
469 | 470 | |
471 | Animation key mappings:472 | ''' + get_anim_sel_drop_downs() + '''473 | |
474 |