├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── assets ├── kinect_gopigo.scad ├── kinect_gopigo.stl └── pibot.jpg ├── requirements.txt └── src ├── learner ├── __init__.py └── games.py ├── robot ├── __init__.py ├── actuators.py ├── base.py └── sensors.py ├── run_robot.py ├── test_kinect.py ├── test_motors.py ├── tools ├── __init__.py ├── benchmark.py └── interactive.py └── util ├── __init__.py └── frame_convert.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Misc 2 | .DS_Store 3 | pip-selfcheck.json 4 | .idea/ 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | bin/ 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | include/ 25 | lib/ 26 | lib64/ 27 | local/ 28 | man/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | 36 | # PyInstaller 37 | # Usually these files are written by a python script from a template 38 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 39 | *.manifest 40 | *.spec 41 | 42 | # Installer logs 43 | pip-log.txt 44 | pip-delete-this-directory.txt 45 | 46 | # Unit test / coverage reports 47 | htmlcov/ 48 | .tox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *,cover 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "vendor/keras"] 2 | path = vendor/keras 3 | url = https://github.com/fchollet/keras.git 4 | [submodule "vendor/dqn"] 5 | path = vendor/dqn 6 | url = https://github.com/soumith/deepmind-atari.git 7 | [submodule "vendor/gopigo"] 8 | path = vendor/gopigo 9 | url = https://github.com/DexterInd/GoPiGo.git 10 | [submodule "vendor/libfreenect"] 11 | path = vendor/libfreenect 12 | url = git@github.com:matthiasplappert/libfreenect.git 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Matthias Plappert 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PiBot 2 | 3 | ![pibot](/assets/pibot.jpg?raw=true) 4 | A simple robot platform with extensible sensors written in Python and controllable over Wifi. 5 | 6 | ## Hardware 7 | - [Raspberry Pi 2](https://www.raspberrypi.org/products/raspberry-pi-2-model-b/). You can probably use the newer [Raspberry Pi 3](https://www.raspberrypi.org/products/raspberry-pi-3-model-b/) as well, but I haven't tried it. 8 | - [GoPiGo](http://www.dexterindustries.com/gopigo/), which turns your Raspberry Pi into a basic robot platform. 9 | - [Xbox 360 Kinect](http://www.xbox.com/en-US/xbox-360/accessories/kinect), if you want depth information. Alternatively, you can use any OpenCV-compatible (= most) USB web cam for vision. 10 | - [A decent USB Wifi dongle](https://www.amazon.de/gp/product/B007K871ES/ref=oh_aui_search_detailpage?ie=UTF8&psc=1). I wouldn't get one of the tiny ones since I had bandwidth trouble, presumably due to the tiny antenna. 11 | - A 3D printed holder for the Xbox Kinect. You can use my model (under `assets/kinect_gopigo.stl`), if you want. It contains holes so that you can easily attach it to your GoPiGo body. Alternatively, you can just attach it using tape or whatever. 12 | - A battery pack of your choice. I use 10 AA rechargeable batteries since the Kinect requires at least 12V or it will just randomly shut down during operation, especially if the motors are also running. I use eneloop batteries, which are awesome. I get approx. 2 hours of a set of fully charged batteries. 13 | - Some soldering equipment and patience to put it all together. 14 | 15 | ## Installation on Client 16 | 17 | Simply run the following command to install all dependencies: 18 | ``` 19 | pip install -r requirements.txt 20 | ``` 21 | That's it. 22 | 23 | ## Installation on Raspberry Pi 24 | 25 | ### Basics 26 | ``` 27 | sudo apt-get update 28 | sudo apt-get upgrade 29 | sudo apt-get install build-essential git 30 | ``` 31 | Next, generate keys for Github: https://help.github.com/articles/generating-ssh-keys/ 32 | 33 | ### PiBot and Dependencies 34 | ``` 35 | sudo apt-get install python python-dev python-numpy python-scipy python-setuptools libopencv-dev python-opencv 36 | git clone git@github.com:matthiasplappert/pibot.git ~/pibot 37 | cd ~/pibot 38 | git submodule update --init --recursive 39 | sudo pip install pip --upgrade 40 | sudo pip install -r requirements.txt 41 | ``` 42 | 43 | ### GoPiGo 44 | ``` 45 | cd vendor/gopigo/Setup 46 | chmod +x install.sh 47 | sudo ./install.sh 48 | ``` 49 | 50 | ### Xbox Kinect 51 | ``` 52 | cd /tmp 53 | sudo apt-get install libudev-dev 54 | sudo wget http://sourceforge.net/projects/libusb/files/libusb-1.0/libusb-1.0.19/libusb-1.0.19.tar.bz2/download 55 | tar xvjf download 56 | cd libusb-1.0.19 57 | ./configure 58 | make 59 | sudo make install 60 | cd ~/pibot/vendor/libfreenect 61 | sudo apt-get install cmake freeglut3-dev libxmu-dev libxi-dev 62 | mkdir build 63 | cmake -DBUILD_EXAMPLES=OFF -DLIBUSB_1_INCLUDE_DIR=/usr/local/include/libusb-1.0 -DLIBUSB_1_LIBRARY=/usr/local/lib/libusb-1.0.so -L .. 64 | make 65 | sudo make install 66 | cd ../wrappers/python 67 | sudo python setup.py install 68 | ``` 69 | 70 | TODO: Describe udev rule stuff 71 | 72 | ### WiFi 73 | TODO 74 | 75 | ## Running the Robot 76 | First, connect to the robot over SSH and start it: 77 | ``` 78 | ssh pi@ 79 | cd pibot/src 80 | sudo python run_robot.py 81 | ``` 82 | 83 | Next, you can try out the robot locally by using one of the `test_` files. You always need to specify 84 | the robot's name, which is printed once you start the robot, and probably also the host of the robot depending on 85 | your network configuration. A simple example would be: 86 | ``` 87 | python test_motors.py pibot --host 88 | ``` 89 | -------------------------------------------------------------------------------- /assets/kinect_gopigo.scad: -------------------------------------------------------------------------------- 1 | difference() { 2 | union() { 3 | import("kinect3C.stl"); 4 | 5 | // For measurement 6 | //translate([0, -20, 0]) 7 | // cube(size=[110, 1, 20], center=true); 8 | //translate([-56, 0, 0]) 9 | // cube(size=[2, 40, 20], center=true); 10 | //translate([56, 0, 0]) 11 | // cube(size=[2, 40, 20], center=true); 12 | //translate([50, 0, 0]) 13 | // cube(size=[5, 40, 20], center=false); 14 | //translate([-50-5, -20, 5]) 15 | // cube(size=[5, 40, 20], center=false); 16 | //translate([-50, -15, 0]) 17 | // cube(size=[10, 4.5, 20], center=false); 18 | //translate([-50, 15-4.5, 0]) 19 | // cube(size=[10, 4.5, 20], center=false); 20 | //translate([-50, 2.5, 0]) 21 | // cube(size=[10, 3, 20], center=false); 22 | //translate([-50, -2.5-3, 0]) 23 | // cube(size=[10, 3, 20], center=false); 24 | 25 | // Extensions: original is 40mm, we want 55mm. The end of the original piece is rounded, so add 6mm overlap to compensate. 26 | extension_length = 55-40+6; 27 | translate([40-6,-15,0]) { 28 | cube(size=[extension_length, 30, 5], center=false); 29 | } 30 | translate([-(40-6)-extension_length,-15,0]) { 31 | cube(size=[extension_length, 30, 5], center=false); 32 | } 33 | } 34 | 35 | // Screw holes 36 | screw_radius = 2.5; 37 | screw_fn = 40; 38 | screw_x = 55-5-screw_radius; 39 | screw_y = screw_radius+3+screw_radius; 40 | translate([-screw_x, screw_y, 0], $fn=screw_fn) 41 | cylinder(h=20, r=screw_radius, center=true); 42 | translate([-screw_x, 0, 0], $fn=screw_fn) 43 | cylinder(h=20, r=screw_radius, center=true); 44 | translate([-screw_x, -screw_y, 0]) 45 | cylinder(h=20, r=screw_radius, center=true, $fn=screw_fn); 46 | 47 | translate([screw_x, screw_y, 0], $fn=screw_fn) 48 | cylinder(h=20, r=screw_radius, center=true); 49 | translate([screw_x, 0, 0], $fn=screw_fn) 50 | cylinder(h=20, r=screw_radius, center=true); 51 | translate([screw_x, -screw_y, 0]) 52 | cylinder(h=20, r=screw_radius, center=true, $fn=screw_fn); 53 | } 54 | -------------------------------------------------------------------------------- /assets/pibot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matthiasplappert/pibot/e097c8bfbfd8901eb15e19087c3ca3d7251684a1/assets/pibot.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | Pyro4 4 | -------------------------------------------------------------------------------- /src/learner/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matthiasplappert/pibot/e097c8bfbfd8901eb15e19087c3ca3d7251684a1/src/learner/__init__.py -------------------------------------------------------------------------------- /src/learner/games.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | 5 | from robot.sensors import KinectDepthCamera, KINECT_INVALID_DEPTH, VoltageSensor 6 | from robot.actuators import Motors, SimulatedMotors, MotorAction, KinectTiltMotor, KinectLED, KinectLEDAction 7 | 8 | 9 | class GameEnvironment(object): 10 | def __init__(self, r): 11 | self.robot = r 12 | self._configure_robot() 13 | 14 | def __enter__(self): 15 | if not self.robot.open(): 16 | return None 17 | self._prepare_robot() 18 | return self 19 | 20 | def __exit__(self, exc_type, exc_val, exc_tb): 21 | self.robot.close() 22 | 23 | def step(self, action): 24 | frame, reward, terminal, lives, _ = self.debug_step(action) 25 | return frame, reward, terminal, lives 26 | 27 | def debug_step(self, action): 28 | self._act(action) 29 | sensor_data = self.robot.perceive() 30 | frame, reward, terminal, lives = self._compute_new_state(action, sensor_data) 31 | return frame, reward, terminal, lives, sensor_data 32 | 33 | def reset(self): 34 | pass 35 | 36 | @property 37 | def actions(self): 38 | raise NotImplementedError() 39 | 40 | def _act(self, action): 41 | raise NotImplementedError() 42 | 43 | def _configure_robot(self): 44 | raise NotImplementedError() 45 | 46 | def _prepare_robot(self): 47 | pass 48 | 49 | def _compute_new_state(self, action, sensor_data): 50 | raise NotImplementedError() 51 | 52 | 53 | class ObstacleAvoidanceGameEnvironment(GameEnvironment): 54 | @property 55 | def actions(self): 56 | return self.robot.actuators[0].actions 57 | 58 | def _act(self, action): 59 | actions = [action, None, None] 60 | self.robot.act(actions) 61 | 62 | def _configure_robot(self): 63 | # Configure Kinect. 64 | kinect = KinectDepthCamera() 65 | kinect.resize = (84, 84) 66 | kinect.crop = True 67 | kinect.convert_color = None 68 | 69 | # Configure voltage sensor. We only need this information for debug reasons, so only query very infrequently. 70 | voltage = VoltageSensor() 71 | voltage.step_interval = 1000 72 | 73 | # Configure motors 74 | motors = Motors() 75 | motors.duration = 0.05 76 | motors.speed = 150 77 | 78 | # Pass the configuration to the robot. 79 | self.robot.sensors = [kinect, voltage] 80 | self.robot.actuators = [motors, KinectTiltMotor(), KinectLED()] 81 | 82 | def _prepare_robot(self): 83 | # Tilt Kinect to 20 degrees and disable LED to avoid reflections. 84 | self.robot.act([None, 20, KinectLEDAction.OFF]) 85 | 86 | def _compute_new_state(self, action, sensor_data): 87 | depth_data = sensor_data[0] 88 | voltage = sensor_data[1] 89 | if voltage is not None: 90 | logging.info('system voltage is %fV' % voltage) 91 | 92 | # We reward as follows: 93 | # - if the robot is too close to an obstacle, every action is punished 94 | # - if the robot is not too close to an obstacle AND performs a forward action, reward 95 | # - if the robot is not too close to an obstacle AND performs a non-forward action, do neither punish nor reward 96 | mean_depth = np.mean(depth_data[depth_data < KINECT_INVALID_DEPTH]) 97 | is_too_close = np.isnan(mean_depth) or mean_depth < 500. 98 | reward = 0 99 | if is_too_close: 100 | reward = -1 101 | elif action == MotorAction.FORWARD: 102 | reward = 1 103 | 104 | # Prepare remaining values. 105 | terminal = self._compute_error_rate(depth_data) > .8 106 | depth_data[depth_data == KINECT_INVALID_DEPTH] = 0 # replace errors with 0 -> assume very close approximity for errors 107 | frame = depth_data.astype('float32') / KINECT_INVALID_DEPTH 108 | lives = 1 if not terminal else 0 109 | 110 | return frame, reward, terminal, lives 111 | 112 | def _compute_error_rate(self, depth_data): 113 | n_depth_errors = np.sum(depth_data == KINECT_INVALID_DEPTH) 114 | return float(n_depth_errors) / float(np.size(depth_data)) 115 | 116 | def reset(self): 117 | self.robot.act([None, None, KinectLEDAction.RED]) 118 | for _ in xrange(50): 119 | depth_data = self.robot.perceive()[0] 120 | error_rate = self._compute_error_rate(depth_data) 121 | mean_depth = np.mean(depth_data[depth_data < KINECT_INVALID_DEPTH]) 122 | if error_rate < 0.2 and not np.isnan(mean_depth) and mean_depth > 800: 123 | break 124 | self._act(MotorAction.BACKWARD) 125 | self.robot.act([None, None, KinectLEDAction.OFF]) 126 | 127 | 128 | # class LightSeekingGameEnvironment(GameEnvironment): 129 | # def _compute_new_state(self, sensor_data): 130 | # assert len(sensor_data) >= 2 131 | # light = sensor_data[1] 132 | # if light > 80: 133 | # return 1 134 | # return 0 135 | -------------------------------------------------------------------------------- /src/robot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matthiasplappert/pibot/e097c8bfbfd8901eb15e19087c3ca3d7251684a1/src/robot/__init__.py -------------------------------------------------------------------------------- /src/robot/actuators.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | 4 | try: 5 | import freenect 6 | import cv2 7 | except ImportError: 8 | pass 9 | # Attempt to load gopigo, which is not available when simulating the robot. 10 | try: 11 | import gopigo 12 | gopigo_available = True 13 | except ImportError: 14 | gopigo_available = False 15 | 16 | 17 | class Actuator(object): 18 | def __init__(self): 19 | self.is_open = False 20 | 21 | @property 22 | def actions(self): 23 | raise NotImplementedError() 24 | 25 | def open(self): 26 | if self.is_open or not self._open(): 27 | logging.warning('cannot open sensor %s' % self) 28 | return False 29 | self.is_open = True 30 | return True 31 | 32 | def close(self): 33 | if not self.is_open or not self._close(): 34 | logging.warning('cannot close sensor %s' % self) 35 | return False 36 | self.is_open = False 37 | return True 38 | 39 | def act(self, action): 40 | if not self.is_open: 41 | logging.warning('cannot act with closed actuator %s' % self) 42 | return False 43 | if action is None: 44 | # Do nothing, which is valid. 45 | return True 46 | if action not in self.actions: 47 | logging.warning('unknown action %s for actuator %s' % (action, self)) 48 | return False 49 | return self._act(action) 50 | 51 | def _open(self): 52 | return True 53 | 54 | def _close(self): 55 | return True 56 | 57 | def _act(self, action): 58 | raise NotImplementedError() 59 | 60 | 61 | class MotorAction(object): 62 | IDLE, FORWARD, BACKWARD, TURN_LEFT, TURN_RIGHT = range(5) 63 | 64 | 65 | class Motors(Actuator): 66 | def __init__(self): 67 | super(Motors, self).__init__() 68 | self.speed = 150 69 | 70 | def _open(self): 71 | if not gopigo_available: 72 | return False 73 | gopigo.set_speed(self.speed) 74 | return True 75 | 76 | @property 77 | def actions(self): 78 | return [MotorAction.FORWARD, MotorAction.BACKWARD, MotorAction.IDLE, MotorAction.TURN_LEFT, 79 | MotorAction.TURN_RIGHT] 80 | 81 | def _act(self, action): 82 | if not gopigo_available: 83 | return False 84 | 85 | # Translate to action. 86 | if action == MotorAction.TURN_LEFT: 87 | action_call = gopigo.left 88 | elif action == MotorAction.TURN_RIGHT: 89 | action_call = gopigo.right 90 | elif action == MotorAction.FORWARD: 91 | action_call = gopigo.fwd 92 | elif action == MotorAction.BACKWARD: 93 | action_call = gopigo.bwd 94 | elif action == MotorAction.IDLE: 95 | action_call = gopigo.stop 96 | else: 97 | action_call = None 98 | assert action_call is not None 99 | 100 | # Perform action for `duration` seconds and stop afterwards. 101 | logging.info('performing motor action %d' % action) 102 | action_call() 103 | return True 104 | 105 | 106 | class SimulatedMotors(Motors): 107 | def _act(self, action): 108 | logging.info('simulating motor action %d' % action) 109 | return True 110 | 111 | 112 | class KinectTiltMotor(Actuator): 113 | def _open(self): 114 | return freenect.sync_get_tilt_state() is not None 115 | 116 | @property 117 | def actions(self): 118 | return range(-20, 21) # [-20, 20] 119 | 120 | def _act(self, action): 121 | return freenect.sync_set_tilt_degs(action) == 0 122 | 123 | 124 | class KinectLEDAction(object): 125 | OFF = 0 126 | GREEN = 1 127 | RED = 2 128 | YELLOW = 3 129 | BLINK_GREEN = 4 130 | BLINK_RED_YELLOW = 5 131 | 132 | 133 | class KinectLED(Actuator): 134 | def _open(self): 135 | return freenect.sync_get_tilt_state() is not None 136 | 137 | @property 138 | def actions(self): 139 | return [KinectLEDAction.OFF, KinectLEDAction.GREEN, KinectLEDAction.RED, KinectLEDAction.YELLOW, 140 | KinectLEDAction.BLINK_GREEN, KinectLEDAction.BLINK_RED_YELLOW] 141 | 142 | def _act(self, action): 143 | kinect_action = None 144 | if action == KinectLEDAction.OFF: 145 | kinect_action = freenect.LED_OFF 146 | elif action == KinectLEDAction.GREEN: 147 | kinect_action = freenect.LED_GREEN 148 | elif action == KinectLEDAction.RED: 149 | kinect_action = freenect.LED_RED 150 | elif action == KinectLEDAction.YELLOW: 151 | kinect_action = freenect.LED_YELLOW 152 | elif action == KinectLEDAction.BLINK_GREEN: 153 | kinect_action = freenect.LED_BLINK_GREEN 154 | elif action == KinectLEDAction.BLINK_RED_YELLOW: 155 | kinect_action = freenect.LED_BLINK_RED_YELLOW 156 | else: 157 | raise RuntimeError('Unknown action {}.'.format(action)) 158 | assert kinect_action is not None 159 | return freenect.sync_set_led(action) == 0 160 | -------------------------------------------------------------------------------- /src/robot/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import select 3 | 4 | import Pyro4 5 | import Pyro4.core 6 | import Pyro4.naming 7 | import Pyro4.socketutil 8 | 9 | 10 | class Robot(object): 11 | def __init__(self): 12 | self._sensors = [] 13 | self._actuators = [] 14 | self.n_steps = 0 15 | 16 | @property 17 | def actuators(self): 18 | return self._actuators 19 | 20 | @actuators.setter 21 | def actuators(self, val): 22 | self._actuators = val 23 | 24 | @property 25 | def sensors(self): 26 | return self._sensors 27 | 28 | @sensors.setter 29 | def sensors(self, val): 30 | self._sensors = val 31 | 32 | def open(self): 33 | for sensor in self.sensors: 34 | if sensor.is_open: 35 | continue 36 | if not sensor.open(): 37 | return False 38 | for actuator in self.actuators: 39 | if actuator.is_open: 40 | continue 41 | if not actuator.open(): 42 | return False 43 | return True 44 | 45 | def close(self): 46 | for sensor in self.sensors: 47 | if not sensor.is_open: 48 | continue 49 | if not sensor.close(): 50 | return False 51 | for actuator in self.actuators: 52 | if not actuator.is_open: 53 | continue 54 | if not actuator.close(): 55 | return False 56 | return True 57 | 58 | def perceive(self): 59 | result = [] 60 | for sensor in self.sensors: 61 | data = None 62 | if self.n_steps % sensor.step_interval == 0: 63 | data = sensor.perceive() 64 | if data is None: 65 | logging.error('could not perceive with sensor %s' % sensor) 66 | result.append(data) 67 | return result 68 | 69 | def act(self, actions): 70 | if len(actions) != len(self.actuators): 71 | raise ValueError('must specify an action for each actuator') 72 | 73 | self.n_steps += 1 74 | for idx, actuator in enumerate(self.actuators): 75 | if not actuator.act(actions[idx]): 76 | logging.error('could not perform action on actuator %s' % actuator) 77 | 78 | 79 | def run_robot(name, host, port): 80 | Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle') 81 | 82 | print('initializing robot "%s" ...' % name) 83 | robot = Robot() 84 | print('starting event loop ...') 85 | pyro_event_loop(name, robot, host=host, port=port) 86 | print('shutting down robot ...') 87 | 88 | 89 | def get_remote_robot(name, host, port): 90 | # Use pickle for serialization (because we serialize numpy arrays). 91 | Pyro4.config.SERIALIZER = 'pickle' 92 | 93 | # Connect to robot and configure it. 94 | uri = 'PYRONAME:' + name 95 | if host is not None: 96 | uri += '@' + str(host) 97 | if port is not None: 98 | uri += ':' + str(port) 99 | return Pyro4.Proxy(uri) 100 | 101 | 102 | def pyro_event_loop(name, obj, timeout=3.0, host=None, port=9090, callback=None): 103 | Pyro4.config.SERVERTYPE = 'thread' 104 | if host is None: 105 | host = Pyro4.socketutil.getIpAddress(None, workaround127=True) 106 | 107 | nameserverUri, nameserverDaemon, broadcastServer = Pyro4.naming.startNS(host=host, port=port) 108 | logging.info('name server uri: %s', nameserverUri) 109 | assert broadcastServer is not None 110 | daemon = Pyro4.core.Daemon(host=host) 111 | serverUri = daemon.register(obj) 112 | nameserverDaemon.nameserver.register(name, serverUri) 113 | 114 | # Event loop 115 | while True: 116 | # Create sets of the socket objects we will be waiting on 117 | nameserverSockets = set(nameserverDaemon.sockets) 118 | pyroSockets = set(daemon.sockets) 119 | rs = [broadcastServer] # only the broadcast server is directly usable as a select() object 120 | rs.extend(nameserverSockets) 121 | rs.extend(pyroSockets) 122 | rs, _, _ = select.select(rs, [], [], timeout) 123 | eventsForNameserver = [] 124 | eventsForDaemon = [] 125 | for s in rs: 126 | if s is broadcastServer: 127 | broadcastServer.processRequest() 128 | elif s in nameserverSockets: 129 | eventsForNameserver.append(s) 130 | elif s in pyroSockets: 131 | eventsForDaemon.append(s) 132 | if eventsForNameserver: 133 | nameserverDaemon.events(eventsForNameserver) 134 | if eventsForDaemon: 135 | daemon.events(eventsForDaemon) 136 | 137 | if callback is not None and callable(callback): 138 | callback() 139 | 140 | nameserverDaemon.close() 141 | broadcastServer.close() 142 | daemon.close() 143 | -------------------------------------------------------------------------------- /src/robot/sensors.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | try: 5 | import freenect 6 | import cv2 7 | except ImportError: 8 | pass 9 | try: 10 | import gopigo 11 | gopigo_available = True 12 | except ImportError: 13 | gopigo_available = False 14 | 15 | 16 | KINECT_INVALID_DEPTH = 2047 17 | 18 | 19 | class Sensor(object): 20 | def __init__(self): 21 | self.step_interval = 1 22 | self.is_open = False 23 | 24 | def perceive(self): 25 | if not self.is_open: 26 | logging.warning('cannot perceive with closed sensor %s' % self) 27 | return None 28 | data = self._perceive() 29 | if data is None: 30 | return None 31 | return self._process_data(data) 32 | 33 | def open(self): 34 | if self.is_open or not self._open(): 35 | logging.warning('cannot open sensor %s' % self) 36 | return False 37 | self.is_open = True 38 | return True 39 | 40 | def close(self): 41 | if not self.is_open or not self._close(): 42 | logging.warning('cannot close sensor %s' % self) 43 | return False 44 | self.is_open = False 45 | return True 46 | 47 | def _perceive(self): 48 | raise NotImplementedError() 49 | 50 | def _process_data(self, data): 51 | assert data is not None 52 | return data 53 | 54 | def _open(self): 55 | return True 56 | 57 | def _close(self): 58 | return True 59 | 60 | 61 | class Camera(Sensor): 62 | def __init__(self): 63 | super(Camera, self).__init__() 64 | self.crop = True 65 | self.resize = (100, 100) 66 | self.convert_color = 4L 67 | 68 | def _process_data(self, data): 69 | assert data is not None 70 | 71 | if self.convert_color is not None: 72 | data = cv2.cvtColor(data, self.convert_color) 73 | 74 | # Ensure that we have exactly 3 dimensions: (height, width, channel). This makes cropping easier. 75 | assert data.ndim in (2, 3) 76 | if data.ndim == 2: 77 | data = data.reshape(data.shape + (1,)) 78 | assert data.ndim == 3 79 | if self.crop: 80 | image_shape = data.shape[:2] # = (height, width) 81 | min_dim = np.min(image_shape) 82 | y_start, x_start = np.floor((image_shape - min_dim) / 2) 83 | y_end, x_end = y_start + min_dim, x_start + min_dim 84 | data = data[y_start:y_end, x_start:x_end, :] 85 | assert data.shape[0] == data.shape[1] 86 | if data.shape[2] == 1: 87 | # Convert back to two dimensions. We do this because cv2.resize() will return a 2-dim array if an image has 88 | # only one channel. As a result, the return type would be different depending on the set options. 89 | data = data.reshape(data.shape[:2]) 90 | if self.resize is not None: 91 | data = cv2.resize(data, self.resize) 92 | return data 93 | 94 | def _perceive(self): 95 | raise NotImplementedError() 96 | 97 | 98 | class KinectDepthCamera(Camera): 99 | def __init__(self): 100 | super(KinectDepthCamera, self).__init__() 101 | self.convert_color = None 102 | 103 | def _open(self): 104 | return freenect.sync_get_tilt_state() is not None 105 | 106 | def _perceive(self): 107 | result = freenect.sync_get_depth() 108 | if result is None: 109 | return None 110 | return result[0] # result = (data, timestamp) 111 | 112 | 113 | class KinectVideoCamera(Camera): 114 | def _open(self): 115 | return freenect.sync_get_tilt_state() is not None 116 | 117 | def _perceive(self): 118 | result = freenect.sync_get_video() 119 | if result is None: 120 | return None 121 | return result[0] # result = (data, timestamp) 122 | 123 | 124 | class KinectTiltSensor(Sensor): 125 | def _open(self): 126 | return freenect.sync_get_tilt_state() is not None 127 | 128 | def _perceive(self): 129 | state = freenect.sync_get_tilt_state() 130 | acceleration = freenect.get_mks_accel(state) 131 | return (state.tilt_angle, state.tilt_status) + acceleration 132 | 133 | 134 | class OpenCVCamera(Camera): 135 | def __init__(self): 136 | super(OpenCVCamera, self).__init__() 137 | self.video_capture = None 138 | self.n_grabs = 1 139 | self.camera_index = 0 140 | 141 | def _perceive(self): 142 | if self.video_capture is None or not self.video_capture.isOpened(): 143 | return None 144 | 145 | # Read potentially multiple times to avoid reading stalled data from the internal buffer. 146 | for _ in xrange(self.n_grabs): 147 | self.video_capture.grab() 148 | success, frame = self.video_capture.retrieve() 149 | if not success: 150 | return None 151 | return frame 152 | 153 | def _open(self): 154 | assert self.video_capture is None 155 | try: 156 | vc = cv2.VideoCapture() 157 | vc.open(self.camera_index) 158 | except: 159 | return False 160 | if not vc.isOpened(): 161 | return False 162 | vc.set(cv2.cv.CV_CAP_PROP_CONVERT_RGB, True) 163 | self.video_capture = vc 164 | return True 165 | 166 | def _close(self): 167 | assert self.video_capture is not None 168 | self.video_capture.release() 169 | self.video_capture = None 170 | return True 171 | 172 | 173 | class AnalogSensor(Sensor): 174 | def __init__(self): 175 | super(AnalogSensor, self).__init__() 176 | self.pin = 1 177 | 178 | def _open(self): 179 | if not gopigo_available: 180 | return False 181 | gopigo.pinMode(self.pin, 'INPUT') 182 | return True 183 | 184 | def _perceive(self): 185 | if not gopigo_available: 186 | return None 187 | value = gopigo.analogRead(self.pin) 188 | if value < 0: 189 | return None 190 | return value 191 | 192 | 193 | class VoltageSensor(Sensor): 194 | def _perceive(self): 195 | if not gopigo_available: 196 | return None 197 | return gopigo.volt() 198 | -------------------------------------------------------------------------------- /src/run_robot.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | from robot.base import run_robot 5 | 6 | 7 | def main(args): 8 | run_robot(args.name, args.host, args.port) 9 | 10 | 11 | def get_parser(): 12 | parser = argparse.ArgumentParser(description='Run PiBot.') 13 | parser.add_argument('--name', help='name of the robot', default='pibot') 14 | parser.add_argument('--port', help='port of the robot on the network', default=9090, type=int) 15 | parser.add_argument('--host', help='host of the robot on the network', default=None, type=str) 16 | return parser 17 | 18 | 19 | if __name__ == '__main__': 20 | logging.basicConfig(level=logging.INFO) 21 | main(get_parser().parse_args()) 22 | -------------------------------------------------------------------------------- /src/test_kinect.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | import numpy as np 5 | 6 | from robot.base import get_remote_robot 7 | from robot.sensors import * 8 | from robot.actuators import * 9 | 10 | 11 | def main(args): 12 | print('preparing ...') 13 | robot = get_remote_robot(args.agent, args.host, args.port) 14 | kinect_depth_cam = KinectDepthCamera() 15 | kinect_video_cam = KinectVideoCamera() 16 | kinect_tilt_sensor = KinectTiltSensor() 17 | kinect_tilt_motor = KinectTiltMotor() 18 | kinect_led = KinectLED() 19 | robot.sensors = [kinect_tilt_sensor, kinect_video_cam] 20 | robot.actuators = [kinect_tilt_motor, kinect_led] 21 | robot.open() 22 | 23 | # print('switching LEDs ...') 24 | # for action in kinect_led.actions: 25 | # print '\t' + str(action) 26 | # robot.act([None, action]) 27 | # time.sleep(1) 28 | 29 | # print('\nmoving tilt motor ...') 30 | # print('\t+20 degrees') 31 | # robot.act([20, None]) 32 | # time.sleep(3) 33 | # print('\t-20 degrees') 34 | # robot.act([-20, None]) 35 | # time.sleep(3) 36 | # print('\t 0 degrees') 37 | # robot.act([0, None]) 38 | # time.sleep(3) 39 | 40 | print('\nreading tilt status ...') 41 | for _ in xrange(10): 42 | print '\t' + str(robot.perceive()[0]) 43 | time.sleep(1) 44 | 45 | # print('\nreading depth frames ...') 46 | # for _ in xrange(10): 47 | # print '\t' + str(np.mean(robot.perceive()[0])) 48 | # time.sleep(1) 49 | 50 | # print('\nreading video frames ...') 51 | # for _ in xrange(10): 52 | # print '\t' + str(np.mean(robot.perceive()[1])) 53 | # time.sleep(1) 54 | 55 | robot.close() 56 | 57 | 58 | def get_parser(): 59 | parser = argparse.ArgumentParser(description='Benchmark for PiBot.') 60 | parser.add_argument('--host', help='host of the robot, e.g. 192.168.1.2', type=str, default=None) 61 | parser.add_argument('--port', help='port of the robot, e.g. 9090', type=int, default=9090) 62 | parser.add_argument('--n-iter', help='number of iterations to perform', type=int, default=100) 63 | parser.add_argument('agent', help='name of the robot') 64 | return parser 65 | 66 | 67 | if __name__ == '__main__': 68 | logging.basicConfig(level=logging.INFO) 69 | main(get_parser().parse_args()) 70 | -------------------------------------------------------------------------------- /src/test_motors.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | import numpy as np 5 | 6 | from robot.base import get_remote_robot 7 | from robot.sensors import * 8 | from robot.actuators import * 9 | 10 | 11 | def main(args): 12 | print('preparing ...') 13 | robot = get_remote_robot(args.agent, args.host, args.port) 14 | motors = Motors() 15 | motors.speed = 50 16 | robot.sensors = [VoltageSensor()] 17 | robot.actuators = [motors] 18 | robot.open() 19 | 20 | # Read the sensor value, which is the voltage. 21 | print(robot.perceive()) 22 | 23 | # Drive the motor in different modes. 24 | robot.act([MotorAction.FORWARD]) 25 | time.sleep(1) 26 | robot.act([MotorAction.TURN_LEFT]) 27 | time.sleep(1) 28 | robot.act([MotorAction.TURN_RIGHT]) 29 | time.sleep(1) 30 | robot.act([MotorAction.IDLE]) 31 | time.sleep(1) 32 | 33 | robot.close() 34 | 35 | 36 | def get_parser(): 37 | parser = argparse.ArgumentParser(description='Benchmark for PiBot.') 38 | parser.add_argument('--host', help='host of the robot, e.g. 192.168.1.2', type=str, default=None) 39 | parser.add_argument('--port', help='port of the robot, e.g. 9090', type=int, default=9090) 40 | parser.add_argument('agent', help='name of the robot') 41 | return parser 42 | 43 | 44 | if __name__ == '__main__': 45 | logging.basicConfig(level=logging.INFO) 46 | main(get_parser().parse_args()) 47 | -------------------------------------------------------------------------------- /src/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/matthiasplappert/pibot/e097c8bfbfd8901eb15e19087c3ca3d7251684a1/src/tools/__init__.py -------------------------------------------------------------------------------- /src/tools/benchmark.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import timeit 4 | import random 5 | 6 | import cv2 7 | 8 | from util.frame_convert import pretty_depth_cv 9 | from robot.base import get_remote_robot 10 | from learner.games import ObstacleAvoidanceGameEnvironment 11 | 12 | 13 | def main(args): 14 | print('preparing ...') 15 | robot = get_remote_robot(args.agent, args.host, args.port) 16 | with ObstacleAvoidanceGameEnvironment(robot) as game: 17 | if not game: 18 | print('could not create game') 19 | return 20 | actions = game.actions 21 | game.step(actions[0]) # perform a single step to ensure everything is set up before measuring 22 | 23 | n_iter = args.n_iter 24 | print('performing %d iterations ...' % n_iter) 25 | start = timeit.default_timer() 26 | for i in xrange(n_iter): 27 | frame, reward, terminal, lives = game.step(random.choice(actions)) 28 | if terminal: 29 | print('resetting game ...') 30 | game.reset() 31 | continue 32 | # frame *= 2047. 33 | # cv2.cv.SaveImage('/Users/matze/Desktop/test/out_%.3d.png' % i, pretty_depth_cv(frame)) 34 | # print reward, terminal 35 | 36 | duration = timeit.default_timer() - start 37 | fps = int(float(n_iter) / duration) 38 | print('total time: %fs (%dfps)' % (duration, fps)) 39 | 40 | 41 | def get_parser(): 42 | parser = argparse.ArgumentParser(description='Benchmark for PiBot.') 43 | parser.add_argument('--host', help='host of the robot, e.g. 192.168.1.2', type=str, default=None) 44 | parser.add_argument('--port', help='port of the robot, e.g. 9090', type=int, default=9090) 45 | parser.add_argument('--n-iter', help='number of iterations to perform', type=int, default=100) 46 | parser.add_argument('agent', help='name of the robot') 47 | return parser 48 | 49 | 50 | if __name__ == '__main__': 51 | logging.basicConfig(level=logging.INFO) 52 | main(get_parser().parse_args()) 53 | -------------------------------------------------------------------------------- /src/tools/interactive.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import timeit 4 | import time 5 | import curses 6 | 7 | import cv2 8 | 9 | from game import GameEnvironment 10 | 11 | 12 | def main(args): 13 | game = GameEnvironment(args.agent, host=args.host, port=args.port) 14 | 15 | # Window for frame 16 | cv2.namedWindow('frame') 17 | cv2.namedWindow('processed-frame') 18 | 19 | # Interactive terminal 20 | stdscr = curses.initscr() 21 | stdscr.keypad(True) 22 | curses.halfdelay(1) 23 | while True: 24 | try: 25 | key = stdscr.getkey() 26 | except: 27 | key = '' 28 | 29 | if key == 'KEY_UP': 30 | action = Action.FORWARD 31 | elif key == 'KEY_DOWN': 32 | action = Action.BACKWARD 33 | elif key == 'KEY_LEFT': 34 | action = Action.TURN_LEFT 35 | elif key == 'KEY_RIGHT': 36 | action = Action.TURN_RIGHT 37 | elif key.upper() == 'X': 38 | break 39 | else: 40 | action = Action.IDLE 41 | 42 | start = timeit.default_timer() 43 | frame, reward, terminal, lives, processed_frame, light = game.debug_step(action) 44 | duration = timeit.default_timer() - start 45 | 46 | stdscr.clear() 47 | stdscr.addstr(0, 0, 'action: %d' % action) 48 | stdscr.addstr(1, 0, 'frame: %s' % str(frame.shape)) 49 | stdscr.addstr(2, 0, 'reward: %d' % reward) 50 | stdscr.addstr(3, 0, 'terminal: %s' % terminal) 51 | stdscr.addstr(4, 0, 'lives: %d' % lives) 52 | stdscr.addstr(5, 0, 'light: %d' % light) 53 | stdscr.addstr(6, 0, 'perceive-action cycle duration: %fs' % duration) 54 | stdscr.refresh() 55 | 56 | cv2.imshow('frame', frame) 57 | if processed_frame is not None: 58 | cv2.imshow('processed-frame', processed_frame) 59 | cv2.waitKey(1) 60 | if args.interval > 0: time.sleep(args.interval) 61 | 62 | # Tear down frame 63 | cv2.destroyWindow('frame') 64 | cv2.destroyWindow('processed-frame') 65 | 66 | 67 | def get_parser(): 68 | parser = argparse.ArgumentParser(description='Server for PiBot.') 69 | parser.add_argument('--host', help='host of the robot, e.g. 192.168.1.2', type=str, default=None) 70 | parser.add_argument('--port', help='port of the robot, e.g. 9090', type=int, default=9090) 71 | parser.add_argument('--interval', help='update interval in seconds', type=int, default=0) 72 | parser.add_argument('agent', help='name of the robot') 73 | return parser 74 | 75 | 76 | if __name__ == '__main__': 77 | logging.basicConfig(level=logging.INFO) 78 | main(get_parser().parse_args()) 79 | -------------------------------------------------------------------------------- /src/util/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'matze' 2 | -------------------------------------------------------------------------------- /src/util/frame_convert.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def pretty_depth(depth): 5 | """Converts depth into a 'nicer' format for display 6 | 7 | This is abstracted to allow for experimentation with normalization 8 | 9 | Args: 10 | depth: A numpy array with 2 bytes per pixel 11 | 12 | Returns: 13 | A numpy array that has been processed whos datatype is unspecified 14 | """ 15 | np.clip(depth, 0, 2**10 - 1, depth) 16 | depth = depth.astype(int) 17 | depth >>= 2 18 | depth = depth.astype(np.uint8) 19 | return depth 20 | 21 | 22 | def pretty_depth_cv(depth): 23 | """Converts depth into a 'nicer' format for display 24 | 25 | This is abstracted to allow for experimentation with normalization 26 | 27 | Args: 28 | depth: A numpy array with 2 bytes per pixel 29 | 30 | Returns: 31 | An opencv image who's datatype is unspecified 32 | """ 33 | import cv2.cv as cv 34 | depth = pretty_depth(depth) 35 | image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]), 36 | cv.IPL_DEPTH_8U, 37 | 1) 38 | cv.SetData(image, depth.tostring(), 39 | depth.dtype.itemsize * depth.shape[1]) 40 | return image 41 | 42 | 43 | def video_cv(video): 44 | """Converts video into a BGR format for opencv 45 | 46 | This is abstracted out to allow for experimentation 47 | 48 | Args: 49 | video: A numpy array with 1 byte per pixel, 3 channels RGB 50 | 51 | Returns: 52 | An opencv image who's datatype is 1 byte, 3 channel BGR 53 | """ 54 | import cv2.cv as cv 55 | video = video[:, :, ::-1] # RGB -> BGR 56 | image = cv.CreateImageHeader((video.shape[1], video.shape[0]), 57 | cv.IPL_DEPTH_8U, 58 | 3) 59 | cv.SetData(image, video.tostring(), 60 | video.dtype.itemsize * 3 * video.shape[1]) 61 | return image 62 | --------------------------------------------------------------------------------