├── manu_sawyer
├── src
│ ├── __init__.py
│ ├── grasp_cnn
│ │ ├── __init__.py
│ │ ├── start
│ │ ├── start_emacs
│ │ ├── aolib
│ │ │ ├── demo_pyxmod.py
│ │ │ ├── __init__.py
│ │ │ ├── vlfeat.py
│ │ │ ├── areload.py
│ │ │ ├── pyxmod.py
│ │ │ └── parutil.py
│ │ ├── run.py
│ │ ├── run2.sh
│ │ ├── start.ipy
│ │ ├── run_manu.sh
│ │ ├── upload_model.sh
│ │ ├── old.py
│ │ ├── demo_press.py
│ │ ├── convert_db.py
│ │ ├── params_v2.py
│ │ ├── run1.sh
│ │ ├── new_params.py
│ │ ├── train_grasp.py
│ │ ├── label_video.py
│ │ ├── manu_params.py
│ │ ├── final_params_full.py
│ │ ├── parula.py
│ │ ├── planefit.py
│ │ └── segment.py
│ ├── grip_and_record
│ │ ├── __init__.py
│ │ ├── aolib
│ │ │ ├── demo_pyxmod.py
│ │ │ ├── __init__.py
│ │ │ ├── vlfeat.py
│ │ │ ├── areload.py
│ │ │ ├── pyxmod.py
│ │ │ └── parutil.py
│ │ ├── robot_utils.py
│ │ ├── test_kinect.py
│ │ ├── getch.py
│ │ ├── robot_camera.py
│ │ ├── inverse_kin.py
│ │ ├── transform_test.py
│ │ └── planefit.py
│ ├── .gitignore
│ ├── tensorflow_model_is_gripping
│ │ ├── __init__.py
│ │ ├── install.sh
│ │ ├── aolib
│ │ │ ├── demo_pyxmod.py
│ │ │ ├── __init__.py
│ │ │ ├── vlfeat.py
│ │ │ ├── areload.py
│ │ │ └── pyxmod.py
│ │ ├── download_model.sh
│ │ ├── cam_test.py
│ │ ├── test_tf.py
│ │ ├── grasp_example.py
│ │ ├── demo_press.py
│ │ ├── train_grasp.py
│ │ └── label_video.py
│ ├── H.npy
│ ├── t.npy
│ ├── launch
│ │ ├── start_KinectB.sh
│ │ ├── gelsightA_driver.launch
│ │ └── gelsightB_driver.launch
│ ├── transform.py
│ ├── WSG50_manu_test.py
│ ├── GelSightB.py
│ ├── GelSightA.py
│ ├── calibration_info_sudri.txt
│ ├── calibration_info_nordri.txt
│ ├── visualizer_at_at.py
│ ├── visualizer_at_post2.py
│ ├── video_conv
│ │ ├── RGB2video.py
│ │ └── convert_to_video.py
│ ├── print_cartesian_positions.py
│ ├── correct_name_of_object.py
│ ├── calibrate.py
│ ├── KinectB_hd.py
│ ├── compress_data.py
│ ├── KinectB.py
│ ├── find_cool_GelSight_images.py
│ ├── KinectA.py
│ ├── set_true_label_of_data.py
│ ├── extract_images_for_paper.py
│ ├── demo_pick_object.py
│ ├── WSG50_manu.py
│ ├── demo_torque_control_2_short.py
│ └── process_data.py
├── KinectCalibrationInfo.png
├── RosInfo.txt
├── CoordBoundsSawyer.txt
├── README.md
├── package.xml
└── CMakeLists.txt
├── requirements.txt
├── LICENSE
├── .gitignore
└── README.md
/manu_sawyer/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/manu_sawyer/src/.gitignore:
--------------------------------------------------------------------------------
1 | *.tar
2 | *.hdf5
3 | *.gz
4 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | dotmap
3 | matplotlib
4 | scipy
5 | h5py
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/start:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ipython start.ipy -i -- $*
3 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/install.sh:
--------------------------------------------------------------------------------
1 | pip install tensorflow-gpu
2 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/start_emacs:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ipython start.ipy --simple-prompt -i -- $*
3 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/demo_pyxmod.py:
--------------------------------------------------------------------------------
1 | import pyxmod
2 |
3 | foo = pyxmod.load('foo.pyx')
4 |
5 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/demo_pyxmod.py:
--------------------------------------------------------------------------------
1 | import pyxmod
2 |
3 | foo = pyxmod.load('foo.pyx')
4 |
5 |
--------------------------------------------------------------------------------
/manu_sawyer/src/H.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robertocalandra/the-feeling-of-success/HEAD/manu_sawyer/src/H.npy
--------------------------------------------------------------------------------
/manu_sawyer/src/t.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robertocalandra/the-feeling-of-success/HEAD/manu_sawyer/src/t.npy
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/run.py:
--------------------------------------------------------------------------------
1 |
2 | import grasp_net, manu_params; grasp_net.train(manu_params.gel_im_v1(0), [0])
3 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/aolib/demo_pyxmod.py:
--------------------------------------------------------------------------------
1 | import pyxmod
2 |
3 | foo = pyxmod.load('foo.pyx')
4 |
5 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/download_model.sh:
--------------------------------------------------------------------------------
1 | wget https://people.eecs.berkeley.edu/~owens/press-model/model.zip
2 |
3 |
--------------------------------------------------------------------------------
/manu_sawyer/KinectCalibrationInfo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/robertocalandra/the-feeling-of-success/HEAD/manu_sawyer/KinectCalibrationInfo.png
--------------------------------------------------------------------------------
/manu_sawyer/RosInfo.txt:
--------------------------------------------------------------------------------
1 | To add devel to source: . ~/catkin_ws/devel/setup.bash
2 | then, catkin_make
3 |
4 | To view nodes and stuff: rosrun rqt_graph rqt_graph
5 |
--------------------------------------------------------------------------------
/manu_sawyer/src/launch/start_KinectB.sh:
--------------------------------------------------------------------------------
1 | rosrun kinect2_bridge kinect2_bridge _depth_method:=opengl _reg_method:=cpu /kinect2/sd/image_depth_rect:=/aux/kinect2/sd/image_depth_rect /kinect2/hd/image_color:=/aux/kinect2/hd/image_color
2 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/run2.sh:
--------------------------------------------------------------------------------
1 | #ipython -c "import grasp_net, grasp_params as ps; grasp_net.train(ps.gel_fulldata_v5(), [2,3])"
2 | ipython -c "import grasp_net, grasp_params as ps; grasp_net.train(ps.gel_im_same_v6(), [0, 1, 2,3])"
3 |
--------------------------------------------------------------------------------
/manu_sawyer/CoordBoundsSawyer.txt:
--------------------------------------------------------------------------------
1 | Z:
2 | Lowest: 0.09
3 | Fairly high: 0.42
4 |
5 | X:
6 | Center: 0.6
7 | Towards computer: 0.86
8 | Towards robot: 0.37
9 |
10 | Y:
11 | Center: -0.05
12 | Towards outside: -0.30
13 | Towards computers: 0.19
14 |
15 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | #import util, iputil, img
3 |
4 |
5 | #import img as ig, iputil, util as ut, pyxmod, vlfeat, imtable, areload
6 | __all__ = ['util', 'iputil', 'iplocal', 'pyxmod', 'vlfeat', 'imtable', 'areload', 'img']
7 |
8 |
9 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | #import util, iputil, img
3 |
4 |
5 | #import img as ig, iputil, util as ut, pyxmod, vlfeat, imtable, areload
6 | __all__ = ['util', 'iputil', 'iplocal', 'pyxmod', 'vlfeat', 'imtable', 'areload', 'img']
7 |
8 |
9 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/aolib/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | #import util, iputil, img
3 |
4 |
5 | #import img as ig, iputil, util as ut, pyxmod, vlfeat, imtable, areload
6 | __all__ = ['util', 'iputil', 'iplocal', 'pyxmod', 'vlfeat', 'imtable', 'areload', 'img']
7 |
8 |
9 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/start.ipy:
--------------------------------------------------------------------------------
1 |
2 | import matplotlib as mpl
3 | mpl.use('Agg')
4 |
5 | import aolib.areload; aolib.areload.enable()
6 | %load_ext autoreload
7 | %autoreload 2
8 | # For caffe
9 | import sys
10 | # hide caffe output
11 | if '-h' in sys.argv[1:]:
12 | import os
13 | os.environ['GLOG_minloglevel'] = '2'
14 |
15 | import numpy as np, pylab, aolib.util as ut, aolib.img as ig
16 |
17 |
--------------------------------------------------------------------------------
/manu_sawyer/src/transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | """
4 | This file simply defines the transformation function that take Kinect coordinate to Sawyer coordinates.
5 | """
6 |
7 | # Loading data form calibration
8 | #H = np.load("H.npy")
9 | #t = np.load("t.npy")
10 | H = np.load("/home/manu/ros_ws/src/manu_research/manu_sawyer/src/H.npy")
11 | t = np.load("/home/manu/ros_ws/src/manu_research/manu_sawyer/src/t.npy")
12 |
13 | # Transforms (x,y,z) in Kinect coordinate system to
14 | # Sawyer coordinate system
15 | def transform(x,y,z):
16 | vec = np.array([[x], [y], [z]])
17 | return H.dot(vec) + t
18 |
--------------------------------------------------------------------------------
/manu_sawyer/src/WSG50_manu_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from WSG50_manu import WSG50
3 | import time
4 | import os
5 | import rospy
6 |
7 | # run "roslaunch wsg_50_driver wsg_50_tcp_script.launch" in new terminal first
8 |
9 | # os.system("for pid in $(ps -ef | grep 'wsg_50_tcp_script' | awk '{print $2}'); do kill -9 $pid; done")
10 | # time.sleep(0.2)
11 | # os.system(
12 | # 'roslaunch wsg_50_driver wsg_50_tcp_script.launch > /home/rcalandra/ros_ws/src/manu_sawyer/temp/wsg_50_driver.txt 2>&1&')
13 | # time.sleep(7)
14 | rospy.init_node('qwe', anonymous=True)
15 |
16 | gripper = WSG50()
17 |
18 | print("force:" + str(gripper.get_force()))
19 | gripper.set_force(50)
20 | print("force:" + str(gripper.get_force()))
21 | gripper.grasp(pos=5)
22 | print("force:" + str(gripper.get_force()))
23 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/cam_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import cv2, time
3 |
4 | def main():
5 | print 'Initializing'
6 | cap = cv2.VideoCapture(0)
7 | fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
8 | print 'camera fps =', fps
9 | start = time.time()
10 | num_frames = 0
11 | while True:
12 | #print 'Reading frame'
13 | ret, frame = cap.read()
14 | if not ret:
15 | print 'Could not read from camera'
16 | break
17 | if cv2.waitKey(1) & 0xFF == ord('q'):
18 | break
19 | if num_frames % 30 == 0:
20 | fps = num_frames / float(time.time() - start)
21 | print 'FPS: %.2f' % fps
22 | print frame.shape
23 | cv2.imshow('frame', frame)
24 | num_frames += 1
25 |
26 | cap.release()
27 | cv2.destroyAllWindows()
28 |
29 | if __name__ == '__main__':
30 | main()
31 |
32 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/run_manu.sh:
--------------------------------------------------------------------------------
1 | S=0
2 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.gel_im_v1($S), 0)" &
3 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.im_v1($S), 1)" &
4 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.gel_v1($S), 2)" &
5 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.depth_v1($S), 3)" &
6 | sleep 20000
7 | killall python ipython
8 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.gel0_v1($S), 0)" &
9 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.gel1_v1($S), 1)" &
10 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.im_ee_v1($S), 2)" &
11 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.gel_im_depth_v1($S), 3)" &
12 | sleep 20000
13 | killall python ipython
14 | ipython -c "import grasp_net as net, manu_params; net.train(manu_params.press_v1($S), 0)"
15 |
16 |
17 |
--------------------------------------------------------------------------------
/manu_sawyer/src/GelSightB.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import rospy
3 | from cv_bridge import CvBridge, CvBridgeError
4 | from sensor_msgs.msg import Image
5 | import multiprocessing
6 |
7 | class GelSightB:
8 | def __init__(self, topic='/gelsightB/image_raw'): # /gelsightB/image_raw /image_view_B/output
9 | # Variable
10 | self.img = None
11 |
12 | # Used to convert image from ROS to cv2
13 | self.bridge = CvBridge()
14 |
15 | # The subscriber
16 | self.subscriber = rospy.Subscriber(topic, Image, self.update_image)
17 |
18 | def spin_thread():
19 | rospy.spin()
20 |
21 | self.gelsight_process = multiprocessing.Process(target=spin_thread)
22 | self.gelsight_process.start()
23 |
24 | def get_image(self):
25 | return self.img
26 |
27 | def update_image(self, data):
28 | self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
29 |
30 | def end_process(self):
31 | self.gelsight_process.terminate()
32 | self.gelsight_process.join()
33 |
--------------------------------------------------------------------------------
/manu_sawyer/src/GelSightA.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import rospy
3 | from cv_bridge import CvBridge, CvBridgeError
4 | from sensor_msgs.msg import Image
5 | import multiprocessing
6 |
7 |
8 | class GelSightA:
9 | def __init__(self, topic='/gelsightA/image_raw'): # '/gelsightA/image_raw' /image_view_A/output
10 | # Variable
11 | self.img = None
12 |
13 | # Used to convert image from ROS to cv2
14 | self.bridge = CvBridge()
15 |
16 | # The subscriber
17 | self.subscriber = rospy.Subscriber(topic, Image, self.update_image)
18 |
19 | def spin_thread():
20 | rospy.spin()
21 |
22 | self.gelsight_process = multiprocessing.Process(target=spin_thread)
23 | self.gelsight_process.start()
24 |
25 | def get_image(self):
26 | return self.img
27 |
28 | def update_image(self, data):
29 | self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
30 |
31 | def end_process(self):
32 | self.gelsight_process.terminate()
33 | self.gelsight_process.join()
34 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/upload_model.sh:
--------------------------------------------------------------------------------
1 | # cd /home/ao/Dropbox/gelsight-grasp/results/press-data-v6
2 | # rm model.zip
3 | # zip model.zip training/net.tf-2600*
4 | # scp model.zip owens@knodel:public_html/press-model/model.zip
5 |
6 | # cd /home/ao/Dropbox/gelsight-grasp/results/press-data-v8
7 | # rm model.zip
8 | # #zip model.zip training/net.tf-2600*
9 | # zip model.zip training/net.tf-2200*
10 | # scp model.zip owens@knodel:public_html/press-model/model.zip
11 |
12 |
13 | # cd /home/ao/Dropbox/gelsight-grasp/results/press-data-v11
14 | # rm model.zip
15 | # zip model.zip training/net.tf-4600*
16 | # scp model.zip owens@knodel:public_html/press-model/model.zip
17 |
18 |
19 |
20 | # cd ../results/grasp-params/gel-v1/
21 | # rm grasp_model.zip
22 | # zip grasp_model.zip training/net.tf-2000*
23 | # scp grasp_model.zip owens@knodel:public_html/press-model/grasp_model.zip
24 |
25 |
26 |
27 | cd ../results/grasp-params/gel-v4/
28 | rm grasp_model.zip
29 | zip grasp_model.zip training/net.tf-3999*
30 | scp grasp_model.zip owens@knodel:public_html/press-model/grasp_model.zip
31 |
32 |
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Roberto Calandra
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/test_tf.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf, numpy as np
2 |
3 | n = 1000
4 | d = 32
5 |
6 | def train():
7 | # w*x
8 | np.random.seed(0)
9 | w_gt = np.float32(np.random.randn(d))
10 | x = np.float32(np.random.randn(n, d))
11 | y_gt = np.dot(x, w_gt)
12 |
13 | gpu = '/cpu:0'
14 | print 'w_gt =', w_gt
15 | with tf.Graph().as_default(), tf.device(gpu), tf.Session() as sess:
16 | w = tf.get_variable('w', d, dtype = tf.float32,
17 | initializer = tf.truncated_normal_initializer())
18 | x_tf = tf.constant(x)
19 |
20 | y_hat = x_tf * tf.expand_dims(w, 0)
21 | y_hat = tf.reduce_sum(y_hat, 1)
22 | r = y_hat - tf.constant(y_gt)
23 | loss = tf.reduce_mean(r**2)
24 |
25 | opt = tf.train.MomentumOptimizer(0.01, 0.9)
26 | grad_step = opt.minimize(loss)
27 |
28 | sess.run(tf.global_variables_initializer())
29 |
30 | for i in xrange(10000):
31 | _, loss_val, w_val = sess.run([grad_step, loss, w])
32 | print 'Iteration:', i, 'Loss:', loss_val
33 |
34 | if __name__ == '__main__':
35 | train()
--------------------------------------------------------------------------------
/manu_sawyer/src/launch/gelsightA_driver.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/manu_sawyer/src/launch/gelsightB_driver.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/robot_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from geometry_msgs.msg import (
4 | PoseStamped,
5 | Pose,
6 | Point,
7 | Quaternion,
8 | )
9 |
10 | class Orientations:
11 | #x, y, z define axis, w defines how much rotation around that axis
12 | FORWARD_POINT = Quaternion(
13 | x=0,
14 | y=0.707,
15 | z=0,
16 | w=0.707,
17 | )
18 |
19 | DOWNWARD_POINT = Quaternion(
20 | x=1,
21 | y=0,
22 | z=0,
23 | w=0,
24 | )
25 |
26 | LEFTWARD_POINT = Quaternion(
27 | x=0.707,
28 | y=0,
29 | z=0,
30 | w=-0.707,
31 | )
32 |
33 | LEFTWARD_DIAG = Quaternion(
34 | x=0.5,
35 | y=-0.5,
36 | z=0,
37 | w=-0.707,
38 | )
39 |
40 | POSITION_1 = Quaternion(
41 | x=0.204954637077,
42 | y=0.978093304167,
43 | z=-0.0350698408324,
44 | w=0.00985856726981,
45 | )
46 |
47 | DOWNWARD_ROTATED = Quaternion(
48 | x=0.707,
49 | y=0.707,
50 | z=0,
51 | w=0,
52 | )
53 |
54 | SIDEWAYS_ORIENTED = Quaternion(
55 | x = 0.5,
56 | y = -0.5,
57 | z = 0.5,
58 | w = 0.5
59 | )
60 |
61 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/test_kinect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from robot_camera import RobotCam
3 | import rospy
4 | import time
5 | import matplotlib.pyplot as plt
6 | import cv2
7 | import numpy as np
8 |
9 | rospy.init_node('Testing')
10 | kinect = RobotCam(True)
11 | print "ini done"
12 |
13 | rate = rospy.Rate(30)
14 | # counter = 0
15 | try:
16 | while (True):
17 | # Capture frame-by-frame
18 | init_time = time.time()
19 | frame = kinect.get_latest_image()
20 | # point = kinect.get_point_cloud()
21 | # print(point)
22 | #
23 | print ("TIME ELAPSED", time.time() - init_time)
24 | #
25 | # Our operations on the frame come here
26 | # imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
27 |
28 | # depth_img = depth_img.astype(np.float32) / np.max(depth_img) * 512
29 | # depth_img = depth_img.astype(np.uint8)
30 | # depth_img = np.squeeze(depth_img)
31 | #
32 | # # Display the resulting frame
33 | cv2.imshow('frame', frame)
34 | #
35 | #
36 | # # plt.imshow(frame)
37 | # # plt.show()
38 | if cv2.waitKey(1) & 0xFF == ord('q'):
39 | break
40 | # counter += 1
41 | rate.sleep()
42 | except KeyboardInterrupt:
43 | pass
44 | kinect.end_process()
45 |
--------------------------------------------------------------------------------
/manu_sawyer/src/calibration_info_sudri.txt:
--------------------------------------------------------------------------------
1 |
2 | # The five calibration points in the Kinect coordinate system
3 | p_1_kinect = np.array([0.2430977, 0.08286079, 0.62421233]).reshape((3, 1))
4 | p_2_kinect = np.array([-0.21222512, 0.08257368, 0.61546346]).reshape((3, 1))
5 | p_3_kinect = np.array([0.22151346, -0.22452401, 0.93467556]).reshape((3, 1))
6 | p_4_kinect = np.array([-0.22365152, -0.21754953, 0.92477081]).reshape((3, 1))
7 | p_5_kinect = np.array([-0.00227722, -0.09801589, 0.80091135]).reshape((3, 1))
8 |
9 | # The five corresponding calibration points in the Sawyer coordinate system
10 | # p_1 is in the bottom left corner from the computer facing the robot
11 | p_1_sawyer = np.array([0.843183726766, -0.261815126937, 0.0455590419168]).reshape((3, 1))
12 | # p_2 is in the bottom right corner from the computer facing the robot
13 | p_2_sawyer = np.array([0.839926577688, 0.170350066241, 0.0455282716249]).reshape((3, 1))
14 | # p_3 is in the top left corner from the computer facing the robot
15 | p_3_sawyer = np.array([0.40931661688, -0.265741867857, 0.045578159045]).reshape((3, 1))
16 | # p_4 is in the top right corner from the computer facing the robot
17 | p_4_sawyer = np.array([0.408132053271, 0.167204225895, 0.0455695328147]).reshape((3, 1))
18 | # p_5 is the mid point
19 | p_5_sawyer = np.array([0.589429323552, -0.0405393060661, 0.0466802171036]).reshape((3, 1))
20 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/old.py:
--------------------------------------------------------------------------------
1 | def write_tf():
2 | data = ut.load('../results/grasp-dset/v5/train.pk')
3 | tf_file = '../results/grasp-dset/v5/train.tf'
4 | assert not os.path.exists(tf_file)
5 | writer = tf.python_io.TFRecordWriter(tf_file)
6 | for d in ut.time_est(data):
7 | fbl = lambda x : tf.train.Feature(bytes_list = tf.train.BytesList(value = [x]))
8 | fl = lambda x : tf.train.Feature(float_list = tf.train.FloatList(value = map(float, x.flatten())))
9 | il = lambda x : tf.train.Feature(int64_list = tf.train.Int64List(value = x))
10 |
11 | feat = {'gel0_pre': fbl(d['gel0_pre']),
12 | 'gel1_pre': fbl(d['gel1_pre']),
13 | 'gel0_post': fbl(d['gel0_post']),
14 | 'gel1_post': fbl(d['gel1_post']),
15 | 'im0_pre': fbl(d['im0_pre']),
16 | 'im0_post': fbl(d['im0_post']),
17 | 'im1_pre': fbl(d['im1_pre']),
18 | 'im1_post': fbl(d['im1_post']),
19 | 'depth0_pre': fl(d['depth0_pre']),
20 | 'depth0_post': fl(d['depth0_post']),
21 | 'end_effector' : fl(d['end_effector']),
22 | 'initial_press_prob' : fl(d['initial_press_prob']),
23 | 'is_gripping' : il([d['is_gripping']])}
24 | ex = tf.train.Example(features = tf.train.Features(feature = feat))
25 | writer.write(ex.SerializeToString())
26 | writer.close()
27 |
28 |
--------------------------------------------------------------------------------
/manu_sawyer/README.md:
--------------------------------------------------------------------------------
1 | # Sawyer
2 |
3 | To init a console use:
4 | ```
5 | cd /home/guser/catkin_ws
6 | ./intera.sh
7 | ```
8 |
9 | To run a piece of code use:
10 | ```
11 | rosrun manu_sawyer .py
12 | ```
13 |
14 | To build after a change in the code use:
15 | ```
16 | catkin_make
17 | ```
18 |
19 | To mark file as executable:
20 | go to directory of file
21 | ```
22 | chmod +x .py
23 | ```
24 |
25 | To reset after an emergency stop use:
26 | ```
27 | rosrun intera_interface enable_robot.py -e
28 |
29 | ```
30 |
31 | To install libuvc_camera in ros (for using gelsight):
32 | ```
33 | sudo apt-get install ros-indigo-libuvc-camera
34 | rosdep install libuvc_camera --os=ubuntu:trusty
35 | ```
36 |
37 | To launch necessary rospy processes: (need to sudo ./intera.sh for gelsight driver)
38 | ```
39 | roslaunch manu_sawyer gelsight_driver.launch
40 | roslaunch kinect2_bridge kinect2_bridge.launch
41 | ```
42 |
43 |
44 | # Calibration of KinectA
45 |
46 | Find the coordinates needed for calibration in "src/*_calibration_info.txt"
47 |
48 | To obtain the coordinates in the Sawyer frame:
49 | Put the EE right above the calibration points marked on the table and run "src/print_cartesian_position./py"
50 |
51 | To obtain the coordinates in the KinectA frame:
52 | Put a small cylindrical object at the center of the calibration points marked on the table and run "scr/grip_and_record/transform_test.py"
53 |
54 | Supply/input all the coordinates to "src/calibrate.py".
55 |
56 | Use "src/calibrate.py" to find an affine transformation based on these points and the "transform.py" to actually use the transfrom.
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/grasp_example.py:
--------------------------------------------------------------------------------
1 | import grasp_net, grasp_params, h5py, aolib.img as ig, os, numpy as np, aolib.util as ut
2 |
3 | net_pr = grasp_params.im_fulldata_v5()
4 | net_pr = grasp_params.gel_im_fulldata_v5()
5 | checkpoint_file = '/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-6499'
6 |
7 | gpu = '/gpu:0'
8 | db_file = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_212702.hdf5'
9 |
10 | with h5py.File(db_file, 'r') as db:
11 | pre, mid, _ = grasp_net.milestone_frames(db)
12 |
13 | # sc = lambda x : ig.scale(x, (224, 224))
14 | def sc(x):
15 | """ do a center crop (helps with gelsight) """
16 | x = ig.scale(x, (256, 256))
17 | return ut.crop_center(x, 224)
18 |
19 | u = ig.uncompress
20 | crop = grasp_net.crop_kinect
21 | inputs = dict(
22 | gel0_pre=sc(u(db['GelSightA_image'].value[pre])),
23 | gel1_pre=sc(u(db['GelSightB_image'].value[pre])),
24 | gel0_post=sc(u(db['GelSightA_image'].value[mid])),
25 | gel1_post=sc(u(db['GelSightB_image'].value[mid])),
26 | im0_pre=sc(crop(u(db['color_image_KinectA'].value[pre]))),
27 | im0_post=sc(crop(u(db['color_image_KinectA'].value[mid]))),
28 | # these are probably unnecessary
29 | depth0_pre=sc(crop(db['depth_image_KinectA'].value[pre].astype('float32'))),
30 | depth0_post=sc(crop(db['depth_image_KinectA'].value[mid].astype('float32'))))
31 |
32 | net = grasp_net.NetClf(net_pr, checkpoint_file, gpu)
33 | prob = net.predict(**inputs)
34 |
35 | print 'prob = ', prob
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
103 | .idea/
--------------------------------------------------------------------------------
/manu_sawyer/src/calibration_info_nordri.txt:
--------------------------------------------------------------------------------
1 |
2 | # The nine calibration points in the Kinect coordinate system
3 | p_1_kinect = np.array([0.31798916, 0.00325601, 0.76754364]).reshape((3, 1))
4 | p_2_kinect = np.array([-0.06495789, -0.00613411, 0.78532697]).reshape((3, 1))
5 | p_3_kinect = np.array([-0.42200078, 0.01948896, 0.75328325]).reshape((3, 1))
6 | p_4_kinect = np.array([0.32931394, -0.12782843, 0.95595643]).reshape((3, 1))
7 | p_5_kinect = np.array([-0.0298477 , -0.13292019, 0.95944226]).reshape((3, 1))
8 | p_6_kinect = np.array([-0.40676888, -0.12317579, 0.95006945]).reshape((3, 1))
9 | p_7_kinect = np.array([0.33622021, -0.22669912, 1.09418477]).reshape((3, 1))
10 | p_8_kinect = np.array([-0.02905334, -0.23861358, 1.11272965]).reshape((3, 1))
11 | p_9_kinect = np.array([-0.40161079, -0.2231759 , 1.09171911]).reshape((3, 1))
12 |
13 | # The nine corresponding calibration points in the Sawyer coordinate system
14 | p_1_sawyer = np.array([0.643367191677, -0.350339791193, 0.00328037006498]).reshape((3, 1))
15 | p_2_sawyer = np.array([0.6332503943, 0.0292459324126, 0.00330423965907]).reshape((3, 1))
16 | p_3_sawyer = np.array([0.680385933273, 0.37847117307, 0.000283238475192]).reshape((3, 1))
17 | p_4_sawyer = np.array([0.416538421332, -0.35834409086, 0.00317884107226]).reshape((3, 1))
18 | p_5_sawyer = np.array([0.423452822173, -0.00949250782561, 0.0037321160006]).reshape((3, 1))
19 | p_6_sawyer = np.array([0.442115979626, 0.35916693997, 0.00201009508294]).reshape((3, 1))
20 | p_7_sawyer = np.array([0.248284622296, -0.36458168206, 0.00275356155022]).reshape((3, 1))
21 | p_8_sawyer = np.array([0.24454903839, -0.00529264843613, 0.00249021303604]).reshape((3, 1))
22 | p_9_sawyer = np.array([0.268852273011, 0.351922785944, 0.00141867116927]).reshape((3, 1))
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/demo_press.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import press, cv2, aolib.util as ut, pylab, numpy as np, sys
3 |
4 | #drop_rate = 50
5 | drop_rate = None
6 |
7 | def main():
8 | #train_path = sys.argv[1]
9 | model_path = sys.argv[1]
10 | #vid_file = '/home/ao/Videos/Webcam/train/2_2017-06-01-170024.mp4')
11 | if len(sys.argv) > 2:
12 | vid_file = sys.argv[2]
13 | else:
14 | vid_file = 0
15 | #net = press.NetClf(train_path)
16 | #net = press.NetClf(train_path)
17 | net = press.NetClf(model_path)
18 | #cap = cv2.VideoCapture(0)
19 | cap = cv2.VideoCapture(vid_file)
20 | #cap = cv2.VideoCapture('/home/ao/Videos/Webcam/train/1_2017-05-31-232750.mp4')
21 | #cap = cv2.VideoCapture('/home/ao/Videos/Webcam/train/2017-05-31-232750.mp4')
22 | num_frames = 0
23 | cmap = pylab.cm.RdYlGn
24 | i = 0
25 | while True:
26 | ret, frame = cap.read()
27 | if i == 0:
28 | im0 = frame
29 | i += 1
30 | if drop_rate is not None and (i != 1 and i % drop_rate != 0):
31 | continue
32 | if not ret:
33 | print 'Could not read from camera'
34 | break
35 | if cv2.waitKey(1) & 0xFF == ord('q'):
36 | break
37 | num_frames += 1
38 | #frame = cv2.imread('/home/ao/Dropbox/gelsight-grasp/results/press-data-v1/ims/00000_00073_00011_0.png')
39 | #frame = cv2.imread('/home/ao/Dropbox/gelsight-grasp/results/press-data-v1/ims/00000_00007_00002_0.png')
40 | prob = net.predict(frame, im0)
41 | color = map(int, 255*np.array(cmap(prob))[:3])
42 | cv2.putText(frame, ut.f2(prob), (0, 50),
43 | cv2.FONT_HERSHEY_SIMPLEX, 1, color)
44 | cv2.imshow('frame', frame)
45 |
46 | cap.release()
47 | cv2.destroyAllWindows()
48 |
49 | if __name__ == '__main__':
50 | main()
51 |
52 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/demo_press.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import press, cv2, aolib.util as ut, pylab, numpy as np, sys
3 |
4 | #drop_rate = 50
5 | drop_rate = None
6 |
7 | def main():
8 | #train_path = sys.argv[1]
9 | model_path = sys.argv[1]
10 | #vid_file = '/home/ao/Videos/Webcam/train/2_2017-06-01-170024.mp4')
11 | if len(sys.argv) > 2:
12 | vid_file = sys.argv[2]
13 | else:
14 | vid_file = 0
15 | #net = press.NetClf(train_path)
16 | #net = press.NetClf(train_path)
17 | net = press.NetClf(model_path)
18 | #cap = cv2.VideoCapture(0)
19 | cap = cv2.VideoCapture(vid_file)
20 | #cap = cv2.VideoCapture('/home/ao/Videos/Webcam/train/1_2017-05-31-232750.mp4')
21 | #cap = cv2.VideoCapture('/home/ao/Videos/Webcam/train/2017-05-31-232750.mp4')
22 | num_frames = 0
23 | cmap = pylab.cm.RdYlGn
24 | i = 0
25 | while True:
26 | ret, frame = cap.read()
27 | if i == 0:
28 | im0 = frame
29 | i += 1
30 | if drop_rate is not None and (i != 1 and i % drop_rate != 0):
31 | continue
32 | if not ret:
33 | print 'Could not read from camera'
34 | break
35 | if cv2.waitKey(1) & 0xFF == ord('q'):
36 | break
37 | num_frames += 1
38 | #frame = cv2.imread('/home/ao/Dropbox/gelsight-grasp/results/press-data-v1/ims/00000_00073_00011_0.png')
39 | #frame = cv2.imread('/home/ao/Dropbox/gelsight-grasp/results/press-data-v1/ims/00000_00007_00002_0.png')
40 | prob = net.predict(frame, im0)
41 | color = map(int, 255*np.array(cmap(prob))[:3])
42 | cv2.putText(frame, ut.f2(prob), (0, 50),
43 | cv2.FONT_HERSHEY_SIMPLEX, 1, color)
44 | cv2.imshow('frame', frame)
45 |
46 | cap.release()
47 | cv2.destroyAllWindows()
48 |
49 | if __name__ == '__main__':
50 | main()
51 |
52 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/convert_db.py:
--------------------------------------------------------------------------------
1 | import os, sys, aolib.util as ut, aolib.img as ig, h5py
2 |
3 | pj = ut.pjoin
4 |
5 | in_file = sys.argv[1]
6 | out_file = sys.argv[2]
7 |
8 | convert_im = lambda x : ig.compress(x[:, :, ::-1], 'jpeg')
9 |
10 | with h5py.File(in_file, 'r') as in_db:
11 | # os.system('cp %s %s' % (in_file, out_file))
12 | # print in_db.keys()
13 |
14 | # with h5py.File(out_file, 'r+') as out_db:
15 | # im_names = ['GelSightA_image',
16 | # 'GelSightB_image',
17 | # 'color_image_KinectA',
18 | # 'color_image_KinectB']
19 | # for name in im_names:
20 | # out_db[name] = map(convert_im, in_db[name].value)
21 |
22 |
23 | with h5py.File(out_file, 'w') as out_db:
24 | vals = {}
25 | for k in sorted(in_db.keys()):
26 | if k.startswith('step_'):
27 | im_names = ['GelSightA_image',
28 | 'GelSightB_image',
29 | 'color_image_KinectA',
30 | 'color_image_KinectB']
31 | value_names = ['depth_image_KinectA',
32 | 'depth_image_KinectB',
33 | 'timestamp']
34 | for name in im_names:
35 | ut.add_dict_list(vals, name, convert_im(in_db[k][name].value))
36 | for name in value_names:
37 | ut.add_dict_list(vals, name, in_db[k][name].value)
38 |
39 | else:
40 | #out_db.create_dataset(k, data = in_db[k].value if hasattr(in_db[k], 'value') else in_db[k])
41 | if hasattr(in_db[k], 'value'):
42 | out_db.create_dataset(k, data = in_db[k].value)
43 | else:
44 | print 'skipping:', k
45 |
46 | for name in vals:
47 | out_db.create_dataset(name, data=vals[name])
48 |
49 | print 'Size before:'
50 | os.system('du -ch %s' % in_file)
51 | print 'Size after:'
52 | os.system('du -ch %s' % out_file)
53 |
--------------------------------------------------------------------------------
/manu_sawyer/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | manu_sawyer
4 | 0.0.1
5 | Manu's Sawyer package
6 |
7 | Manu Upadhyaya
8 | Justin Lin
9 | MIT
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | catkin
23 |
24 | rospy
25 | std_msgs
26 | geometry_msgs
27 | sensor_msgs
28 | intera_core_msgs
29 | wsg_50_common
30 | wsg_50_python
31 |
32 | rospy
33 | std_msgs
34 | geometry_msgs
35 | sensor_msgs
36 | intera_core_msgs
37 | wsg_50_common
38 | wsg_50_python
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/params_v2.py:
--------------------------------------------------------------------------------
1 | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os
2 | import tensorflow.contrib.slim as slim
3 | import tensorflow.contrib.slim.nets as nets
4 | import vgg, h5py
5 | import sklearn.metrics
6 |
7 | pj = ut.pjoin
8 |
9 | class Params(ut.Struct):
10 | def __init__(self, **kwargs):
11 | self.__dict__.update(kwargs)
12 |
13 | @property
14 | def train_dir(self):
15 | return ut.mkdir(pj(self.resdir, 'training'))
16 |
17 | @property
18 | def summary_dir(self):
19 | return ut.mkdir(pj(self.resdir, 'summary'))
20 |
21 | def base_v2():
22 | return Params(
23 | dsdir = '/media/4tb/owens/grasp-results/dset/v7-full',
24 | base_lr = 1e-4,
25 | lr_gamma = 0.5,
26 | step_size = 2500,
27 | batch_size = 16,
28 | opt_method = 'adam',
29 | cls_style = 'mlp-1',
30 | model_iter = 7000,
31 | train_iters = 7001)
32 |
33 | def gel_im_v2():
34 | pr = base_v2().updated(
35 | description = 'GelSight + images',
36 | resdir = ut.mkdir('../results/grasp-params/v2/gel-im-v2'),
37 | inputs = ['gel', 'im'])
38 | return pr
39 |
40 | def im_v2():
41 | pr = base_v2().updated(
42 | description = 'images',
43 | resdir = '../results/grasp-params/v2/im-v2',
44 | inputs = ['im'])
45 | return pr
46 |
47 | def gel_im_depth_v2():
48 | pr = base_v2().updated(
49 | description = 'GelSight + images + depth',
50 | resdir = ut.mkdir('../results/grasp-params/v2/gel-im-depth-v2'),
51 | inputs = ['gel', 'im', 'depth'])
52 | return pr
53 |
54 | def depth_v2():
55 | pr = base_v2().updated(
56 | description = 'Depth only',
57 | resdir = ut.mkdir('../results/grasp-params/v2/depth-v2'),
58 | inputs = ['depth'])
59 | return pr
60 |
61 | def im_vgg_legacy_v2():
62 | pr = base_v2().updated(
63 | description = 'images',
64 | cls_style = 'linear',
65 | resdir = '../results/grasp-params/v2/im-vgg-legacy-v2',
66 | inputs = ['im'])
67 | return pr
68 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/run1.sh:
--------------------------------------------------------------------------------
1 | ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_im_v1(1), 0)' &
2 | ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_im_v1(2), 1)' &
3 | ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_im_v1(3), 2)' &
4 | ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_im_v1(4), 3)' &
5 | sleep 18000;
6 | ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_im_v1(0), 0)' &
7 |
8 | # sleep 18000;
9 |
10 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.press_v1(1), 0)' &
11 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.press_v1(1), 0)' &
12 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.press_v1(2), 0)' &
13 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.press_v1(3), 0)' &
14 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.press_v1(4), 0)' &
15 |
16 | # sleep 18000;
17 |
18 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.im_v1(1), 0)' &
19 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.im_v1(2), 1)' &
20 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.im_v1(3), 2)' &
21 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.im_v1(4), 3)' &
22 | # sleep 18000;
23 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.im_v1(0), 0)'
24 | # sleep 18000;
25 |
26 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_v1(1), 0)' &
27 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_v1(2), 1)' &
28 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_v1(3), 2)' &
29 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_v1(4), 3)' &
30 | # sleep 18000;
31 | # ipython -c 'import grasp_net as net, new_params; net.train(new_params.gel_v1(0), 0)'
32 | # sleep 18000;
33 |
34 |
--------------------------------------------------------------------------------
/manu_sawyer/src/visualizer_at_at.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 | import time
11 |
12 | def visualize(filename):
13 | """
14 |
15 | :param filenames: one string
16 | :return:
17 | """
18 |
19 | print('Opening file: %s' % filename)
20 | f = h5py.File(filename, "r")
21 |
22 | times = np.array(f['/timestamp'].value)
23 |
24 | time_at_grasping = float(f["time_at_grasping"].value)
25 | index_time_at_grasping = np.argmin(
26 | np.abs(times - time_at_grasping)) # Find index corresponding to timestamp
27 |
28 | compressed_kinectA = f['/color_image_KinectA'].value[index_time_at_grasping]
29 | KinectA_color_time_at_grasping = np.array(ig.uncompress(compressed_kinectA))
30 |
31 | compressed_kinectB = f['/color_image_KinectB'].value[index_time_at_grasping]
32 | KinectB_color_time_at_grasping = np.array(ig.uncompress(compressed_kinectB))
33 |
34 | compressed_GelSightA = f['/GelSightA_image'].value[index_time_at_grasping]
35 | GelSightA_image_time_at_grasping = np.array(ig.uncompress(compressed_GelSightA))
36 |
37 | compressed_GelSightB = f['/GelSightB_image'].value[index_time_at_grasping]
38 | GelSightB_image_time_at_grasping = np.array(ig.uncompress(compressed_GelSightB))
39 |
40 | fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
41 | ax1.imshow(KinectA_color_time_at_grasping)
42 | ax2.imshow(KinectB_color_time_at_grasping)
43 | ax3.imshow(GelSightA_image_time_at_grasping)
44 | ax4.imshow(GelSightB_image_time_at_grasping)
45 | ax1.axis('off')
46 | ax2.axis('off')
47 | ax3.axis('off')
48 | ax4.axis('off')
49 | plt.draw()
50 | plt.show()
51 |
52 |
53 | if __name__ == '__main__':
54 | namefile = '/home/manu/ros_ws/src/manu_research/data/2017-06-26_210256.hdf5'
55 | namefile = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_235321.hdf5'
56 | visualize(namefile)
57 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/new_params.py:
--------------------------------------------------------------------------------
1 | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os
2 | import tensorflow.contrib.slim as slim
3 | import tensorflow.contrib.slim.nets as nets
4 | import vgg, h5py
5 | import sklearn.metrics
6 |
7 | pj = ut.pjoin
8 |
9 | class Params(ut.Struct):
10 | def __init__(self, **kwargs):
11 | self.__dict__.update(kwargs)
12 |
13 | @property
14 | def train_dir(self):
15 | return ut.mkdir(pj(self.resdir, 'training'))
16 |
17 | @property
18 | def summary_dir(self):
19 | return ut.mkdir(pj(self.resdir, 'summary'))
20 |
21 | def base_v1(split_num):
22 | return Params(
23 | dsdir = '../results/dset/v1-split%d' % split_num,
24 | base_lr = 1e-4,
25 | #base_lr = 1e-2,
26 | #base_lr = 0.01,
27 | #base_lr = 1e-3,
28 | lr_gamma = 0.5,
29 | #step_size = 1500,
30 | step_size = 2000,
31 | batch_size = 16,
32 | opt_method = 'adam',
33 | #opt_method = 'momentum',
34 | cls_style = 'mlp-1',
35 | #model_iter = 5000,
36 | model_iter = 7000,
37 | train_iters = 5001)
38 |
39 | def gel_im_v1(n):
40 | pr = base_v1(n).updated(
41 | description = 'GelSight + images',
42 | resdir = '../results/grasp-params/gel-im-v1-split-%d' % n,
43 | inputs = ['gel', 'im'])
44 | return pr
45 |
46 | def gel_v1(n):
47 | pr = base_v1(n).updated(
48 | description = 'GelSight',
49 | resdir = '../results/grasp-params/gel-v1-split-%d' % n,
50 | inputs = ['gel'])
51 | return pr
52 |
53 | def im_v1(n):
54 | pr = base_v1(n).updated(
55 | description = 'images',
56 | resdir = '../results/grasp-params/im-v1-split-%d' % n,
57 | inputs = ['im'])
58 | return pr
59 |
60 | def depth_v1(n):
61 | pr = base_v1(n).updated(
62 | description = 'images',
63 | resdir = '../results/grasp-params/depth-v1-split-%d' % n,
64 | inputs = ['depth'])
65 | return pr
66 |
67 | def press_v1(n):
68 | pr = base_v1(n).updated(
69 | description = 'GelSight + images',
70 | resdir = '../results/grasp-params/press-split-%d' % n,
71 | inputs = ['press'])
72 | return pr
73 |
--------------------------------------------------------------------------------
/manu_sawyer/src/visualizer_at_post2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 | import time
11 |
12 | def find_cool_images(filename):
13 | """
14 |
15 | :param filenames: one string or a list of strings
16 | :return:
17 | """
18 | print('Opening file: %s' % filename)
19 | f = h5py.File(filename, "r")
20 |
21 | times = np.array(f['/timestamp'].value)
22 |
23 | time_post2_grasping = float(f["time_post2_grasping"].value)
24 | index_time_post2_grasping = np.argmin(
25 | np.abs(times - time_post2_grasping)) # Find index corresponding to timestamp
26 |
27 | compressed_kinectA = f['/color_image_KinectA'].value[index_time_post2_grasping]
28 | KinectA_color_time_post2_grasping = np.array(ig.uncompress(compressed_kinectA))
29 |
30 | compressed_kinectB = f['/color_image_KinectB'].value[index_time_post2_grasping]
31 | KinectB_color_time_post2_grasping = np.array(ig.uncompress(compressed_kinectB))
32 |
33 | compressed_GelSightA = f['/GelSightA_image'].value[index_time_post2_grasping]
34 | GelSightA_image_time_post2_grasping = np.array(ig.uncompress(compressed_GelSightA))
35 |
36 | compressed_GelSightB = f['/GelSightB_image'].value[index_time_post2_grasping]
37 | GelSightB_image_time_post2_grasping = np.array(ig.uncompress(compressed_GelSightB))
38 |
39 | fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
40 | ax1.imshow(KinectA_color_time_post2_grasping)
41 | ax2.imshow(KinectB_color_time_post2_grasping)
42 | ax3.imshow(GelSightA_image_time_post2_grasping)
43 | ax4.imshow(GelSightB_image_time_post2_grasping)
44 | ax1.axis('off')
45 | ax2.axis('off')
46 | ax3.axis('off')
47 | ax4.axis('off')
48 | plt.draw()
49 | # plt.ion()
50 | plt.show()
51 |
52 |
53 | if __name__ == '__main__':
54 | namefile = '/media/backup_disk/dataset_manu/ver2/2017-06-22/2017-06-22_212702.hdf5'
55 | find_cool_images(namefile)
56 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/vlfeat.py:
--------------------------------------------------------------------------------
1 | import os, sys, img as ig, util as ut, scipy.io, numpy as np
2 |
3 | #SiftPath = '../lib/vlfeat_mac/mmmikael-vlfeat-1323904/bin/maci/sift'
4 | SiftPath = '/csail/vision-billf5/aho/mono/lib/vlfeat-0.9.14/bin/glnxa64/sift'
5 |
6 | #from _vlfeat import *
7 | def vl_sift(im, frames = None, orientations = False, peak_thresh = None, edge_thresh = None):
8 | """ Compute SIFT keypoints and descriptors using VLFeat binary.
9 | Should be thread-safe. """
10 | ut.check(frames is None or frames.shape[1] == 4)
11 | # frame_fname = '../tmp/vl_frames.frame'
12 | # im_fname1 = '../tmp/vl_im.png'
13 | # im_fname2 = '../tmp/vl_im.pgm'
14 | # out_fname = '../tmp/vl_out.sift'
15 | frame_fname = ut.make_temp('.frame')
16 | im_fname1 = ut.make_temp('.png')
17 | im_fname2 = ut.make_temp('.pgm')
18 | out_fname = ut.make_temp('.sift')
19 | #ut.write_lines(frame_fname, ('%f %f %f 0 0 %f' % (pt[0], pt[1], s, s) for pt in pts for s in scales))
20 | ig.save(im_fname1, im)
21 | os.system('convert %s %s' % (im_fname1, im_fname2))
22 | frame_opt = ''
23 | if frames is not None:
24 | ut.write_lines(frame_fname, ('%f %f %f %f' % tuple(f) for f in frames))
25 | frame_opt = '--read-frames %s' % frame_fname
26 | orientation_opt = '--orientations' if orientations else ''
27 | peak_opt = '--peak-thresh %f' % peak_thresh if peak_thresh is not None else ''
28 | edge_opt = '--edge-thresh %f' % edge_thresh if edge_thresh is not None else ''
29 | ut.sys_check("%s %s %s %s -o %s %s %s" % (SiftPath, im_fname2, frame_opt, orientation_opt, out_fname, peak_opt, edge_opt))
30 | sift = read_sift(out_fname)
31 | os.system('rm %s %s %s' % (im_fname1, im_fname2, out_fname))
32 | return sift
33 |
34 | def read_sift(sift_fname):
35 | """ Feature format: [[x, y, scale, orientation], ...] """
36 | lines = ut.lines(sift_fname)
37 | if len(lines):
38 | fd = np.array([map(float, line.split()) for line in lines])
39 | f = fd[:,:4]
40 | d = np.uint8(fd[:,4:])
41 | return f, d
42 | else:
43 | return np.zeros((4, 0)), np.uint8(np.zeros((128, 0)))
44 |
45 |
46 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/vlfeat.py:
--------------------------------------------------------------------------------
1 | import os, sys, img as ig, util as ut, scipy.io, numpy as np
2 |
3 | #SiftPath = '../lib/vlfeat_mac/mmmikael-vlfeat-1323904/bin/maci/sift'
4 | SiftPath = '/csail/vision-billf5/aho/mono/lib/vlfeat-0.9.14/bin/glnxa64/sift'
5 |
6 | #from _vlfeat import *
7 | def vl_sift(im, frames = None, orientations = False, peak_thresh = None, edge_thresh = None):
8 | """ Compute SIFT keypoints and descriptors using VLFeat binary.
9 | Should be thread-safe. """
10 | ut.check(frames is None or frames.shape[1] == 4)
11 | # frame_fname = '../tmp/vl_frames.frame'
12 | # im_fname1 = '../tmp/vl_im.png'
13 | # im_fname2 = '../tmp/vl_im.pgm'
14 | # out_fname = '../tmp/vl_out.sift'
15 | frame_fname = ut.make_temp('.frame')
16 | im_fname1 = ut.make_temp('.png')
17 | im_fname2 = ut.make_temp('.pgm')
18 | out_fname = ut.make_temp('.sift')
19 | #ut.write_lines(frame_fname, ('%f %f %f 0 0 %f' % (pt[0], pt[1], s, s) for pt in pts for s in scales))
20 | ig.save(im_fname1, im)
21 | os.system('convert %s %s' % (im_fname1, im_fname2))
22 | frame_opt = ''
23 | if frames is not None:
24 | ut.write_lines(frame_fname, ('%f %f %f %f' % tuple(f) for f in frames))
25 | frame_opt = '--read-frames %s' % frame_fname
26 | orientation_opt = '--orientations' if orientations else ''
27 | peak_opt = '--peak-thresh %f' % peak_thresh if peak_thresh is not None else ''
28 | edge_opt = '--edge-thresh %f' % edge_thresh if edge_thresh is not None else ''
29 | ut.sys_check("%s %s %s %s -o %s %s %s" % (SiftPath, im_fname2, frame_opt, orientation_opt, out_fname, peak_opt, edge_opt))
30 | sift = read_sift(out_fname)
31 | os.system('rm %s %s %s' % (im_fname1, im_fname2, out_fname))
32 | return sift
33 |
34 | def read_sift(sift_fname):
35 | """ Feature format: [[x, y, scale, orientation], ...] """
36 | lines = ut.lines(sift_fname)
37 | if len(lines):
38 | fd = np.array([map(float, line.split()) for line in lines])
39 | f = fd[:,:4]
40 | d = np.uint8(fd[:,4:])
41 | return f, d
42 | else:
43 | return np.zeros((4, 0)), np.uint8(np.zeros((128, 0)))
44 |
45 |
46 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/aolib/vlfeat.py:
--------------------------------------------------------------------------------
1 | import os, sys, img as ig, util as ut, scipy.io, numpy as np
2 |
3 | #SiftPath = '../lib/vlfeat_mac/mmmikael-vlfeat-1323904/bin/maci/sift'
4 | SiftPath = '/csail/vision-billf5/aho/mono/lib/vlfeat-0.9.14/bin/glnxa64/sift'
5 |
6 | #from _vlfeat import *
7 | def vl_sift(im, frames = None, orientations = False, peak_thresh = None, edge_thresh = None):
8 | """ Compute SIFT keypoints and descriptors using VLFeat binary.
9 | Should be thread-safe. """
10 | ut.check(frames is None or frames.shape[1] == 4)
11 | # frame_fname = '../tmp/vl_frames.frame'
12 | # im_fname1 = '../tmp/vl_im.png'
13 | # im_fname2 = '../tmp/vl_im.pgm'
14 | # out_fname = '../tmp/vl_out.sift'
15 | frame_fname = ut.make_temp('.frame')
16 | im_fname1 = ut.make_temp('.png')
17 | im_fname2 = ut.make_temp('.pgm')
18 | out_fname = ut.make_temp('.sift')
19 | #ut.write_lines(frame_fname, ('%f %f %f 0 0 %f' % (pt[0], pt[1], s, s) for pt in pts for s in scales))
20 | ig.save(im_fname1, im)
21 | os.system('convert %s %s' % (im_fname1, im_fname2))
22 | frame_opt = ''
23 | if frames is not None:
24 | ut.write_lines(frame_fname, ('%f %f %f %f' % tuple(f) for f in frames))
25 | frame_opt = '--read-frames %s' % frame_fname
26 | orientation_opt = '--orientations' if orientations else ''
27 | peak_opt = '--peak-thresh %f' % peak_thresh if peak_thresh is not None else ''
28 | edge_opt = '--edge-thresh %f' % edge_thresh if edge_thresh is not None else ''
29 | ut.sys_check("%s %s %s %s -o %s %s %s" % (SiftPath, im_fname2, frame_opt, orientation_opt, out_fname, peak_opt, edge_opt))
30 | sift = read_sift(out_fname)
31 | os.system('rm %s %s %s' % (im_fname1, im_fname2, out_fname))
32 | return sift
33 |
34 | def read_sift(sift_fname):
35 | """ Feature format: [[x, y, scale, orientation], ...] """
36 | lines = ut.lines(sift_fname)
37 | if len(lines):
38 | fd = np.array([map(float, line.split()) for line in lines])
39 | f = fd[:,:4]
40 | d = np.uint8(fd[:,4:])
41 | return f, d
42 | else:
43 | return np.zeros((4, 0)), np.uint8(np.zeros((128, 0)))
44 |
45 |
46 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Research
2 |
3 | This is the code used in the paper:
4 | ```
5 |
6 | ```
7 |
8 | # Hardware Setting
9 | - Sawyer 7-DOF arm (from now on called Nordri)
10 | - 2 Kinects
11 | - 2 GelSight Sensors
12 |
13 | # Software Dependencies
14 |
15 | The following packages need to be installed:
16 | - Python
17 | - Tensorflow
18 | - ROS
19 | - The intera sdk need to be installed
20 | - Install https://github.com/OpenKinect/libfreenect2
21 | - Install https://github.com/code-iai/iai_kinect2
22 |
23 | # Run code
24 |
25 | ```
26 | cd ~/ros_ws
27 | ./nordri.sh
28 | roslaunch wsg_50_driver wsg_50_tcp_script.launch
29 | ```
30 |
31 | ```
32 | cd ~/ros_ws
33 | ./nordri.sh
34 | rosrun kinect2_bridge kinect2_bridge
35 | ```
36 |
37 | ```
38 | cd ~/ros_ws
39 | ./nordri.sh
40 | roslaunch manu_sawyer gelsightA_driver.launch
41 | ```
42 |
43 | ```
44 | cd ~/ros_ws
45 | ./nordri.sh
46 | roslaunch manu_sawyer gelsightB_driver.launch
47 | ```
48 |
49 |
50 | # HOW TO COLLECT DATA:
51 |
52 | - Turn on the robot, gripper, and GelSights (A first and then B, always)
53 | - open `run_experiment_nordri.py` and select the `name` variable (approx. line 62) so that it corresponds to the object that you what to use.
54 | - open `run_experiment_nordri.py` and set the `lower_bound_table_dist` variable (approx. line 57) to an appropriate value. Taller objects will require about 0.05 while smaller can be set as low as 0.015.
55 | - Run `run_experiment_nordri.py` in intera mode
56 | - Follow the instructions printed
57 |
58 | ```
59 | cd ~/ros_ws
60 | ./nordri.sh
61 | rosrun manu_sawyer run_experiment_nordri.py
62 | ```
63 |
64 | ### Common problems/errors and fixes:
65 |
66 | Problem: How to end the script?
67 | Solution: Run `killall -9 python` in a new terminal, and delete the two last files in the `Data` directory, if applicable.
68 |
69 | Problem: The GelSightA topics stops, i.e. the terminal where the GelSightA launch file was run gives an error.
70 | Solution: End the script `run_experiment_nordri.py` and restart the GelSightA. Delete the effected files in the `Data` directory. Then rerun `run_experiment_nordri.py`.
71 |
72 |
--------------------------------------------------------------------------------
/manu_sawyer/src/video_conv/RGB2video.py:
--------------------------------------------------------------------------------
1 | # import R.log as rlog
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 | # TODO: use multiprocessing
6 | # from multiprocessing import Pool
7 |
8 |
9 | def RGB2video(data, nameFile='video', framerate=24, codec='mpeg4', threads=4, verbosity=1, indent=0):
10 | """
11 | Encode and save RGB data as a video file.
12 | Requirements: moviepy
13 | :param data: np.array N x H x W x 3, where H=height, W=width, and N is the number of frames
14 | :param nameFile: name of the file to be saved
15 | :param framerate: scalar>0.
16 | :param codec: string.
17 | :param threads: scalar >0.
18 | :param verbosity: scalar.
19 | :param indent: scalar.
20 | :return:
21 | """
22 | from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter as fwv
23 |
24 | assert framerate > 0, 'framerate must be >0'
25 | assert threads > 0, 'threads must be >0'
26 | assert data.ndim == 4, 'Incorrect shape of data'
27 | # assert data.shape(3) == 3, 'Incorrect shape of data'
28 |
29 | try:
30 | # Write to FFMPEG
31 | extension = '.mp4' # '.avi'
32 | fullNameVideo = nameFile + extension
33 | n_frame = data.shape[0]
34 | resolution = (data.shape[2], data.shape[1]) # (W, H)
35 | # TODO: use rlog.cnd_msg(), after fixing import rlog
36 | print('Resolution: %d x %d fps: %d n_frames: %d' % (resolution[0], resolution[1], framerate, n_frame))
37 | print('Saving to file: ' + fullNameVideo)
38 | a = fwv(filename=fullNameVideo, codec=codec, size=resolution, fps=framerate, preset="slower", threads=threads)
39 | for i in range(n_frame):
40 | # frame = np.swapaxes(data[i, :], 1, 2)
41 | frame = data[i, :].astype('uint8') # Convert to uint8
42 | assert np.all(0 <= frame) and np.all(frame <= 255), 'Value of the pixels is not in [0-255]' # Check data
43 | a.write_frame(frame) # Write to file
44 | # plt.figure()
45 | # plt.imshow(frame/255)
46 | # plt.show
47 | a.close() # Close file
48 | status = 0
49 | # rlog.cnd_status(current_verbosity=verbosity, necessary_verbosity=1, f=0)
50 | # TODO: fix circular: import rlog
51 | except:
52 | print('Something failed')
53 | status = 1
54 |
55 | return status
56 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/getch.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2013-2016, Rethink Robotics
2 | # All rights reserved.
3 | #
4 | # Redistribution and use in source and binary forms, with or without
5 | # modification, are permitted provided that the following conditions are met:
6 | #
7 | # 1. Redistributions of source code must retain the above copyright notice,
8 | # this list of conditions and the following disclaimer.
9 | # 2. Redistributions in binary form must reproduce the above copyright
10 | # notice, this list of conditions and the following disclaimer in the
11 | # documentation and/or other materials provided with the distribution.
12 | # 3. Neither the name of the Rethink Robotics nor the names of its
13 | # contributors may be used to endorse or promote products derived from
14 | # this software without specific prior written permission.
15 | #
16 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 | # POSSIBILITY OF SUCH DAMAGE.
27 |
28 | import sys
29 | import termios
30 | import tty
31 | from select import select
32 |
33 |
34 | def getch(timeout=0.01):
35 | """
36 | Retrieves a character from stdin.
37 | Returns None if no character is available within the timeout.
38 | Blocks if timeout < 0.
39 | """
40 | # If this is being piped to, ignore non-blocking functionality
41 | if not sys.stdin.isatty():
42 | return sys.stdin.read(1)
43 | fileno = sys.stdin.fileno()
44 | old_settings = termios.tcgetattr(fileno)
45 | ch = None
46 | try:
47 | tty.setraw(fileno)
48 | rlist = [fileno]
49 | if timeout >= 0:
50 | [rlist, _, _] = select(rlist, [], [], timeout)
51 | if fileno in rlist:
52 | ch = sys.stdin.read(1)
53 | except Exception as ex:
54 | print "getch", ex
55 | raise OSError
56 | finally:
57 | termios.tcsetattr(fileno, termios.TCSADRAIN, old_settings)
58 | return ch
59 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/train_grasp.py:
--------------------------------------------------------------------------------
1 | import grasp_net, grasp_params as ps, aolib.util as ut, os
2 | pj = ut.pjoin
3 |
4 | #gpus = [1,2]
5 | gpus = [0,1,2,3]
6 | #gpus = [1]
7 |
8 | # def all_params():
9 | # return [ps.gel_v2(),
10 | # ps.im_v2(),
11 | # #ps.depth_v2(),
12 | # #ps.press_v2(),
13 | # #ps.ee_v2(),
14 | # #ps.im_ee_v2(),
15 | # ps.gel0_only_v2(),
16 | # ps.gel1_only_v2(),
17 | # ps.gel_im_v2(),]
18 |
19 | # def all_params():
20 | # return [ps.gel_v3(),
21 | # ps.im_v3(),
22 | # ps.depth_v3(),
23 | # ps.press_v3(),
24 | # ps.ee_v3(),
25 | # ps.im_ee_v3(),
26 | # ps.gel0_only_v3(),
27 | # ps.gel1_only_v3(),
28 | # ps.gel_im_v3(),]
29 |
30 |
31 | # def all_params():
32 | # return [# ps.gel_v4(),
33 | # # ps.im_v4(),
34 | # # ps.depth_v4(),
35 | # # #ps.press_v4(),
36 | # # ps.ee_v4(),
37 | # # ps.im_ee_v4(),
38 | # # ps.gel0_only_v4(),
39 | # # ps.gel1_only_v4(),
40 | # #ps.gel_im_v4(),]
41 |
42 | def all_params():
43 | #return [ps.gel_v5(),]
44 | # ps.im_v5(),
45 | # ps.depth_v5(),
46 | # ps.ee_v5(),
47 | # ps.im_ee_v5(),
48 | # ps.gel0_only_v5(),
49 | # ps.gel1_only_v5(),
50 | # ps.gel_im_v5(),]
51 |
52 | return [#ps.gel_v5(),
53 | #ps.im_v5(),
54 | #ps.depth_v5(),
55 | #ps.ee_v5(),
56 | # ps.im_ee_v5(),
57 | # ps.gel0_only_v5(),
58 | ps.gel1_only_v5(),]
59 | #ps.gel_im_same_v5(),]
60 | #]
61 | #ps.gel_v5_single()]
62 |
63 | def train_all(make_data = False):
64 | if make_data:
65 | #grasp_net.write_data(all_params()[0].dsdir)
66 | grasp_net.write_data(ps.base_v5().dsdir)
67 | #return
68 | #return
69 |
70 | prs = all_params()
71 | for pr in prs:
72 | print 'Running:', pr.resdir.split('/')[-1]
73 | grasp_net.train(pr, gpus)
74 |
75 | def eval_all(run=True, gpu_num=None,test_on_train=False):
76 | print """\\begin{tabular}"""
77 | for pr in all_params():
78 | if run:
79 | if gpu_num is not None:
80 | grasp_net.test(pr, gpu_num, test_on_train = test_on_train)
81 | else:
82 | grasp_net.test(pr, None, test_on_train = test_on_train)
83 | out_file = pj(pr.resdir, 'eval_results.pk')
84 | if os.path.exists(out_file):
85 | r = ut.load(out_file)
86 | print '%s & %s%% & %s%% \\\\' % (pr.description, ut.f2(100*r['acc']), ut.f2(100*r['ap']))
87 | print """\\end{tabular}"""
88 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/train_grasp.py:
--------------------------------------------------------------------------------
1 | import grasp_net, grasp_params as ps, aolib.util as ut, os
2 | pj = ut.pjoin
3 |
4 | #gpus = None
5 | gpus = [1,2]
6 | #gpus = [1]
7 |
8 | # def all_params():
9 | # return [ps.gel_v2(),
10 | # ps.im_v2(),
11 | # #ps.depth_v2(),
12 | # #ps.press_v2(),
13 | # #ps.ee_v2(),
14 | # #ps.im_ee_v2(),
15 | # ps.gel0_only_v2(),
16 | # ps.gel1_only_v2(),
17 | # ps.gel_im_v2(),]
18 |
19 | # def all_params():
20 | # return [ps.gel_v3(),
21 | # ps.im_v3(),
22 | # ps.depth_v3(),
23 | # ps.press_v3(),
24 | # ps.ee_v3(),
25 | # ps.im_ee_v3(),
26 | # ps.gel0_only_v3(),
27 | # ps.gel1_only_v3(),
28 | # ps.gel_im_v3(),]
29 |
30 |
31 | # def all_params():
32 | # return [# ps.gel_v4(),
33 | # # ps.im_v4(),
34 | # # ps.depth_v4(),
35 | # # #ps.press_v4(),
36 | # # ps.ee_v4(),
37 | # # ps.im_ee_v4(),
38 | # # ps.gel0_only_v4(),
39 | # # ps.gel1_only_v4(),
40 | # #ps.gel_im_v4(),]
41 |
42 | def all_params():
43 | #return [ps.gel_v5(),]
44 | # ps.im_v5(),
45 | # ps.depth_v5(),
46 | # ps.ee_v5(),
47 | # ps.im_ee_v5(),
48 | # ps.gel0_only_v5(),
49 | # ps.gel1_only_v5(),
50 | # ps.gel_im_v5(),]
51 | #return [ps.depth_v5()]
52 | #return [ps.gel_v5()]
53 | #return [ps.mean_diff_v5()]
54 | #return [ps.gel_im_fulldata_v5()]
55 | #return [ps.im_fulldata_v5()]
56 | return [ps.im_v5(),
57 | ps.gel_v5(),
58 | ps.gel_im_v5(),
59 | ps.depth_v5(),
60 | ps.ee_v5(),
61 | ps.im_ee_v5(),
62 | ps.gel0_only_v5(),
63 | ps.mean_diff_v5()]
64 |
65 | def train_all(make_data = False):
66 | if make_data:
67 | #grasp_net.write_data(all_params()[0].dsdir)
68 | grasp_net.write_data(ps.base_v5().dsdir)
69 | #return
70 | #return
71 |
72 | prs = all_params()
73 | for pr in prs:
74 | print 'Running:', pr.resdir.split('/')[-1]
75 | grasp_net.train(pr, gpus)
76 |
77 | def eval_all(run=True, use_gpu=False,test_on_train=False):
78 | print """\\begin{tabular}"""
79 | for pr in all_params():
80 | if run:
81 | if use_gpu:
82 | grasp_net.test(pr, gpus[0], test_on_train = test_on_train)
83 | else:
84 | grasp_net.test(pr, None, test_on_train = test_on_train)
85 | out_file = pj(pr.resdir, 'eval_results.pk')
86 | if os.path.exists(out_file):
87 | r = ut.load(out_file)
88 | print '%s & %s%% & %s%% \\\\' % (pr.description, ut.f2(100*r['acc']), ut.f2(100*r['ap']))
89 | print """\\end{tabular}"""
90 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/label_video.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # don't use anaconda python, since its opencv version
3 | # doesn't work with this video
4 |
5 | import sys, os, cv2, time
6 |
7 | #fps = 29.97
8 | #fps = 250.
9 | buf = 0.25
10 |
11 | def main():
12 | print "Mark positives with a space and negatives with 'n'. z = undo"
13 | in_vid_file = sys.argv[1]
14 | print 'Loading:', in_vid_file
15 | cap = cv2.VideoCapture(in_vid_file)
16 | fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
17 | print 'fps =', fps
18 |
19 | start_time = 0. if len(sys.argv) <= 2 else float(sys.argv[2])
20 |
21 | press_time = None
22 | neg_time = None
23 |
24 | out_fname = '.'.join(in_vid_file.split('.')[:-1]) + '.txt'
25 | with open(out_fname, 'a') as out_file:
26 | idx = 0
27 | while True:
28 | ret, frame = cap.read()
29 | if not ret:
30 | print 'Could not read from camera'
31 | print 'ret =', ret
32 | break
33 | k = cv2.waitKey(1)
34 | t = idx / float(fps)
35 | idx += 1
36 |
37 | if t < start_time:
38 | if idx % 100 == 0:
39 | print 'Skipping', t, 'start =', start_time
40 | continue
41 |
42 | is_pressing = press_time is not None and press_time < t < press_time + buf
43 | is_neg = neg_time is not None and neg_time < t < neg_time + buf
44 |
45 | sleep_dur = 0. if is_neg else 3*1./fps
46 | time.sleep(sleep_dur)
47 |
48 | k = k & 0xFF
49 | if k == ord('q'):
50 | break
51 | elif k == ord(' ') or k == ord('p'):
52 | if press_time is None or t > press_time + buf:
53 | if press_time is not None:
54 | out_file.write('p %.4f\n' % press_time)
55 | out_file.flush()
56 | press_time = t
57 | print 'Press:', press_time
58 | elif k == ord('n'):
59 | if neg_time is None or t > neg_time + buf:
60 | if neg_time is not None:
61 | out_file.write('n %.4f\n' % neg_time)
62 | out_file.flush()
63 | neg_time = t
64 | print 'Neg:', neg_time
65 | elif k == ord('z'):
66 | print 'Undoing last press and neg'
67 | press_time = None
68 | neg_time = None
69 |
70 | if is_pressing:
71 | color = (0, 255, 0)
72 | elif is_neg:
73 | color = (0, 0, 255)
74 | else:
75 | color = (255, 0, 0)
76 |
77 | frame = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2))
78 | secs = t
79 | mins = int(secs / 60.)
80 | secs = secs - mins*60
81 | text = '%02d:%02d' % (mins, secs)
82 | cv2.putText(frame, text, (0, 50),
83 | cv2.FONT_HERSHEY_SIMPLEX, 1, color)
84 |
85 | cv2.imshow('frame', frame)
86 |
87 | cap.release()
88 | cv2.destroyAllWindows()
89 |
90 | if __name__ == '__main__':
91 | main()
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/manu_sawyer/src/print_cartesian_positions.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import rospy
4 |
5 | from sensor_msgs.msg import JointState
6 |
7 | from intera_core_msgs.srv import (
8 | SolvePositionFK,
9 | SolvePositionFKRequest,
10 | )
11 |
12 | import intera_interface
13 |
14 | class Print_fwd_kin(object):
15 | def __init__(self, rate):
16 |
17 | print("Initializing node... ")
18 | rospy.init_node("cartestian_joint_printer")
19 |
20 | self.limb = intera_interface.Limb("right")
21 |
22 | limb = 'right'
23 | self.name_of_service = "ExternalTools/" + limb + "/PositionKinematicsNode/FKService"
24 | self.fksvc = rospy.ServiceProxy(self.name_of_service, SolvePositionFK)
25 |
26 |
27 | self.timer = rospy.Timer(rospy.Duration.from_sec(1.0 / rate), self.run_fwd_kin)
28 |
29 |
30 |
31 | def run_fwd_kin(self, event):
32 |
33 | fkreq = SolvePositionFKRequest()
34 | joints = JointState()
35 | joints.name = self.limb.joint_names()
36 |
37 | joints.position = [self.limb.joint_angle(j)
38 | for j in joints.name]
39 |
40 | # Add desired pose for forward kinematics
41 | fkreq.configuration.append(joints)
42 | fkreq.tip_names.append('right_hand')
43 |
44 |
45 | try:
46 | rospy.wait_for_service(self.name_of_service, 5)
47 | resp = self.fksvc(fkreq)
48 | except (rospy.ServiceException, rospy.ROSException), e:
49 | rospy.logerr("Service call failed: %s" % (e,))
50 | return False
51 |
52 | # Check if result valid
53 | if (resp.isValid[0]):
54 | rospy.loginfo("\nFK Joint Angles:\n")
55 | for i, name in enumerate(joints.name):
56 | print name + " %f"%(joints.position[i])
57 |
58 | # ADDED CODE
59 | pose_dict = self.limb.endpoint_pose()
60 | pose_pos = pose_dict['position']
61 | pose_orientation = pose_dict['orientation']
62 | print ("POSITION INFO:")
63 | for i in pose_pos:
64 | print (i)
65 | print ("ORIENTATION INFO:")
66 | for i in pose_pos:
67 | print (i)
68 | # END OF ADDED CODE
69 |
70 | rospy.loginfo("\nFK Cartesian Solution:\n")
71 | rospy.loginfo("------------------")
72 | rospy.loginfo("Response Message:\n%s", resp)
73 | else:
74 | rospy.loginfo("INVALID JOINTS - No Cartesian Solution Found.")
75 |
76 |
77 | def main():
78 | """
79 | printing the
80 | response of whether a valid Cartesian solution was found,
81 | and if so, the corresponding Cartesian pose.
82 | """
83 |
84 | joint_printer = Print_fwd_kin(rate= 1)
85 | rospy.spin()
86 |
87 |
88 | if __name__ == '__main__':
89 | main()
90 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/label_video.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # don't use anaconda python, since its opencv version
3 | # doesn't work with this video
4 |
5 | import sys, os, cv2, time
6 |
7 | #fps = 29.97
8 | #fps = 250.
9 | buf = 0.25
10 |
11 | def main():
12 | print "Mark positives with a space and negatives with 'n'. z = undo"
13 | in_vid_file = sys.argv[1]
14 | print 'Loading:', in_vid_file
15 | cap = cv2.VideoCapture(in_vid_file)
16 | fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
17 | print 'fps =', fps
18 |
19 | start_time = 0. if len(sys.argv) <= 2 else float(sys.argv[2])
20 |
21 | press_time = None
22 | neg_time = None
23 |
24 | out_fname = '.'.join(in_vid_file.split('.')[:-1]) + '.txt'
25 | with open(out_fname, 'a') as out_file:
26 | idx = 0
27 | while True:
28 | ret, frame = cap.read()
29 | if not ret:
30 | print 'Could not read from camera'
31 | print 'ret =', ret
32 | break
33 | k = cv2.waitKey(1)
34 | t = idx / float(fps)
35 | idx += 1
36 |
37 | if t < start_time:
38 | if idx % 100 == 0:
39 | print 'Skipping', t, 'start =', start_time
40 | continue
41 |
42 | is_pressing = press_time is not None and press_time < t < press_time + buf
43 | is_neg = neg_time is not None and neg_time < t < neg_time + buf
44 |
45 | sleep_dur = 0. if is_neg else 3*1./fps
46 | time.sleep(sleep_dur)
47 |
48 | k = k & 0xFF
49 | if k == ord('q'):
50 | break
51 | elif k == ord(' ') or k == ord('p'):
52 | if press_time is None or t > press_time + buf:
53 | if press_time is not None:
54 | out_file.write('p %.4f\n' % press_time)
55 | out_file.flush()
56 | press_time = t
57 | print 'Press:', press_time
58 | elif k == ord('n'):
59 | if neg_time is None or t > neg_time + buf:
60 | if neg_time is not None:
61 | out_file.write('n %.4f\n' % neg_time)
62 | out_file.flush()
63 | neg_time = t
64 | print 'Neg:', neg_time
65 | elif k == ord('z'):
66 | print 'Undoing last press and neg'
67 | press_time = None
68 | neg_time = None
69 |
70 | if is_pressing:
71 | color = (0, 255, 0)
72 | elif is_neg:
73 | color = (0, 0, 255)
74 | else:
75 | color = (255, 0, 0)
76 |
77 | frame = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2))
78 | secs = t
79 | mins = int(secs / 60.)
80 | secs = secs - mins*60
81 | text = '%02d:%02d' % (mins, secs)
82 | cv2.putText(frame, text, (0, 50),
83 | cv2.FONT_HERSHEY_SIMPLEX, 1, color)
84 |
85 | cv2.imshow('frame', frame)
86 |
87 | cap.release()
88 | cv2.destroyAllWindows()
89 |
90 | if __name__ == '__main__':
91 | main()
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/manu_sawyer/src/correct_name_of_object.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 |
11 |
12 | def set_label(path, filenames):
13 | """
14 |
15 | :param filenames: one string or a list of strings
16 | :return:
17 | """
18 | if isinstance(filenames, basestring):
19 | filename = [filenames] # only a string is convert to a list
20 |
21 | for filename in filenames:
22 | path_to_file = path + filename
23 |
24 | print('Opening file: %s' % path_to_file)
25 | f = h5py.File(path_to_file, "r+")
26 |
27 |
28 | times = np.array(f['/timestamp'].value)
29 |
30 | time_pre_grasping = float(f["time_pre_grasping"].value)
31 | index_time_pre_grasping = np.argmin(
32 | np.abs(times - time_pre_grasping)) # Find index corresponding to timestamp
33 |
34 | GelSightA_image_time_pre_grasping = np.array(
35 | ig.uncompress(f['/GelSightA_image'].value[index_time_pre_grasping]))
36 | GelSightB_image_time_pre_grasping = np.array(
37 | ig.uncompress(f['/GelSightB_image'].value[index_time_pre_grasping]))
38 |
39 | KinectA_color_time_pre_grasping = np.array(
40 | ig.uncompress(f['/color_image_KinectA'].value[index_time_pre_grasping]))
41 | KinectB_color_time_pre_grasping = np.array(
42 | ig.uncompress(f['/color_image_KinectB'].value[index_time_pre_grasping]))
43 |
44 | plt.imshow(KinectA_color_time_pre_grasping)
45 | plt.axis('off')
46 | plt.draw()
47 | plt.ion()
48 | plt.show()
49 |
50 | str(f['object_name'].value)
51 |
52 | print("Does the name of the object correspond to the one printed below? [y/n]")
53 | print(str(f['object_name'].value))
54 | done = False
55 | while not done:
56 | c = grip_and_record.getch.getch()
57 | if c:
58 | if c in ['n']:
59 | name = input("Enter the correct name: ")
60 | data = f['object_name']
61 | data[...] = np.asarray([name])
62 | done = True
63 | elif c in ['y']:
64 | done = True
65 |
66 | f.flush()
67 | f.close()
68 | plt.close()
69 |
70 |
71 | if __name__ == '__main__':
72 | directory = "/home/manu/ros_ws/src/manu_research/data/"
73 | import os
74 |
75 | list_filenames = []
76 | for file in os.listdir(directory):
77 | if file.endswith(".hdf5"):
78 | list_filenames.append(file)
79 | list_filenames = sorted(list_filenames)
80 | print(list_filenames)
81 |
82 | path = directory
83 | set_label(path, list_filenames[-30:])
84 |
--------------------------------------------------------------------------------
/manu_sawyer/src/calibrate.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | from cvxpy import *
4 |
5 | """
6 | Script used to find H and t for the transformation
7 | x_Sawyer = H x_Kinect + t
8 | """
9 |
10 | # The nine calibration points in the Kinect coordinate system
11 | p_1_kinect = np.array([0.31798916, 0.00325601, 0.76754364]).reshape((3, 1))
12 | p_2_kinect = np.array([-0.06495789, -0.00613411, 0.78532697]).reshape((3, 1))
13 | p_3_kinect = np.array([-0.42200078, 0.01948896, 0.75328325]).reshape((3, 1))
14 | p_4_kinect = np.array([0.32931394, -0.12782843, 0.95595643]).reshape((3, 1))
15 | p_5_kinect = np.array([-0.0298477 , -0.13292019, 0.95944226]).reshape((3, 1))
16 | p_6_kinect = np.array([-0.40676888, -0.12317579, 0.95006945]).reshape((3, 1))
17 | p_7_kinect = np.array([0.33622021, -0.22669912, 1.09418477]).reshape((3, 1))
18 | p_8_kinect = np.array([-0.02905334, -0.23861358, 1.11272965]).reshape((3, 1))
19 | p_9_kinect = np.array([-0.40161079, -0.2231759, 1.09171911]).reshape((3, 1))
20 |
21 |
22 | p_kinect = [p_1_kinect, p_2_kinect, p_3_kinect, p_4_kinect, p_5_kinect, p_6_kinect, p_7_kinect, p_8_kinect, p_9_kinect]
23 | #p_kinect = [p_1_kinect, p_2_kinect, p_3_kinect, p_4_kinect, p_5_kinect, p_6_kinect]
24 |
25 | # The nine corresponding calibration points in the Sawyer coordinate system
26 | p_1_sawyer = np.array([0.643367191677, -0.350339791193, 0.00328037006498]).reshape((3, 1))
27 | p_2_sawyer = np.array([0.6332503943, 0.0292459324126, 0.00330423965907]).reshape((3, 1))
28 | p_3_sawyer = np.array([0.680385933273, 0.37847117307, 0.000283238475192]).reshape((3, 1))
29 | p_4_sawyer = np.array([0.416538421332, -0.35834409086, 0.00317884107226]).reshape((3, 1))
30 | p_5_sawyer = np.array([0.423452822173, -0.00949250782561, 0.0037321160006]).reshape((3, 1))
31 | p_6_sawyer = np.array([0.442115979626, 0.35916693997, 0.00201009508294]).reshape((3, 1))
32 | p_7_sawyer = np.array([0.248284622296, -0.36458168206, 0.00275356155022]).reshape((3, 1))
33 | p_8_sawyer = np.array([0.24454903839, -0.00529264843613, 0.00249021303604]).reshape((3, 1))
34 | p_9_sawyer = np.array([0.268852273011, 0.351922785944, 0.00141867116927]).reshape((3, 1))
35 |
36 |
37 | p_sawyer = [p_1_sawyer, p_2_sawyer, p_3_sawyer, p_4_sawyer, p_5_sawyer, p_6_sawyer, p_7_sawyer, p_8_sawyer, p_9_sawyer]
38 | #p_sawyer = [p_1_sawyer, p_2_sawyer, p_3_sawyer, p_4_sawyer, p_5_sawyer, p_6_sawyer]
39 |
40 | # Optimization variables
41 | H = Variable(3, 3)
42 | t = Variable(3)
43 |
44 | # Optimization constraints
45 | constraints = []
46 |
47 | # Optimization objective
48 | temp = []
49 | for i in range(len(p_sawyer)):
50 | temp.append(norm(H * p_kinect[i] + t - p_sawyer[i]))
51 | objective = Minimize(sum(temp))
52 |
53 | # Solve optimization problem
54 | prob = Problem(objective, constraints)
55 | prob.solve()
56 |
57 | print("H:\n", H.value)
58 | print("t:\n", t.value)
59 |
60 | np.save("H", H.value)
61 | np.save("t", t.value)
--------------------------------------------------------------------------------
/manu_sawyer/src/video_conv/convert_to_video.py:
--------------------------------------------------------------------------------
1 | # Compatibility Python 2/3
2 | from __future__ import division, print_function, absolute_import
3 |
4 | # ----------------------------------------------------------------------------------------------------------------------
5 |
6 | from dotmap import DotMap
7 | import numpy as np
8 | import h5py
9 | import sys
10 | sys.path.insert(0, '/home/manu/ros_ws/src/Research/manu_sawyer/src/tensorflow_model_is_gripping')
11 | import aolib.img as ig
12 | import RGB2video as RGB2video
13 |
14 |
15 | def convert_to_video(path, list_filenames):
16 |
17 | if isinstance(list_filenames, basestring):
18 | list_filenames = [list_filenames] # only a string is convert to a list
19 |
20 | for file in list_filenames:
21 |
22 | namefile = path + file
23 |
24 | data = DotMap()
25 | data.n_steps = []
26 | data.frequency = []
27 | data.kinect.A.image = []
28 | data.gelsight.A.image = []
29 | data.gelsight.B.image = []
30 |
31 | print('Opening file: %s' % namefile)
32 | f = h5py.File(namefile, "r")
33 | data.n_steps = f['n_steps'].value
34 | print('Number of steps: %d' % data.n_steps)
35 | data.frequency = int(f['frequency'].value)
36 | print('FPS: %d' % data.frequency)
37 |
38 | data.kinect.A.image = map(ig.uncompress, f['/color_image_KinectA'].value)
39 | print("color_image_KinectA done")
40 |
41 | data.gelsight.A.image = map(ig.uncompress, f['/GelSightA_image'].value)
42 | print("GelSightA_image done")
43 |
44 | data.gelsight.B.image = map(ig.uncompress, f['/GelSightB_image'].value)
45 | print("GelSightB_image done")
46 |
47 | # Convert to np arrays
48 | kinect_A = np.asarray(data.kinect.A.image)
49 | print('kinect.A To array done')
50 | gelsight_A = np.asarray(data.gelsight.A.image)
51 | print('gelsight.A To array done')
52 | gelsight_B = np.asarray(data.gelsight.B.image)
53 | print('gelsight.B To array done')
54 |
55 | print(kinect_A.shape)
56 | print(gelsight_A.shape)
57 | print(gelsight_B.shape)
58 |
59 | print(RGB2video.RGB2video(data=kinect_A, nameFile=file + '_kinect_A', framerate=data.frequency))
60 | print(RGB2video.RGB2video(data=gelsight_A, nameFile=file + '_gelsight_A', framerate=data.frequency))
61 | print(RGB2video.RGB2video(data=gelsight_B, nameFile=file + '_gelsight_B', framerate=data.frequency))
62 |
63 | if __name__ == '__main__':
64 | path = '/home/manu/ros_ws/src/Research/manu_sawyer/src/video_conv/'
65 | import os
66 |
67 | list_filenames = []
68 | for file in os.listdir(path):
69 | if file.endswith(".hdf5"):
70 | list_filenames.append(file)
71 | list_filenames = sorted(list_filenames)
72 |
73 | convert_to_video(path=path, list_filenames=list_filenames)
74 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/manu_params.py:
--------------------------------------------------------------------------------
1 | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os
2 | import tensorflow.contrib.slim as slim
3 | import tensorflow.contrib.slim.nets as nets
4 | import vgg, h5py
5 | import sklearn.metrics
6 |
7 | pj = ut.pjoin
8 |
9 | class Params(ut.Struct):
10 | def __init__(self, **kwargs):
11 | self.__dict__.update(kwargs)
12 |
13 | @property
14 | def train_dir(self):
15 | return ut.mkdir(pj(self.resdir, 'training'))
16 |
17 | @property
18 | def summary_dir(self):
19 | return ut.mkdir(pj(self.resdir, 'summary'))
20 |
21 | def base_v1(split_num):
22 | return Params(
23 | dsdir = '/media/4tb/owens/grasp-results/dset/v1-split%d' % split_num,
24 | base_lr = 1e-4,
25 | lr_gamma = 0.5,
26 | step_size = 2000,
27 | batch_size = 16,
28 | opt_method = 'adam',
29 | cls_style = 'mlp-1',
30 | model_iter = 7000,
31 | train_iters = 7001)
32 |
33 | def gel_im_v1(n):
34 | pr = base_v1(n).updated(
35 | description = 'GelSight + images',
36 | resdir = '/home/rail/manu/grasp-results/models/gel-im-v1-%d' % n,
37 | inputs = ['gel', 'im'])
38 | return pr
39 |
40 | def im_v1(n):
41 | pr = base_v1(n).updated(
42 | description = 'Images',
43 | resdir = '/home/rail/manu/grasp-results/models/im-v1-%d' % n,
44 | inputs = ['im'])
45 | return pr
46 |
47 | def im_ee_v1(n):
48 | pr = base_v1(n).updated(
49 | description = 'Images + End effector',
50 | resdir = '/home/rail/manu/grasp-results/models/im-ee-v1-%d' % n,
51 | inputs = ['im', 'ee'])
52 | return pr
53 |
54 | def gel_v1(n):
55 | pr = base_v1(n).updated(
56 | description = 'Gel only',
57 | resdir = '/home/rail/manu/grasp-results/models/gel-v1-%d' % n,
58 | inputs = ['gel'])
59 | return pr
60 |
61 | def gel0_v1(n):
62 | pr = base_v1(n).updated(
63 | description = 'Only gel 0',
64 | resdir = '/home/rail/manu/grasp-results/models/gel0-v1-%d' % n,
65 | inputs = ['gel'],
66 | gels = [0])
67 | return pr
68 |
69 | def gel1_v1(n):
70 | pr = base_v1(n).updated(
71 | description = 'Only gel 1',
72 | resdir = '/home/rail/manu/grasp-results/models/gel1-v1-%d' % n,
73 | inputs = ['gel'],
74 | gels = [1])
75 | return pr
76 |
77 | def press_v1(n):
78 | pr = base_v1(n).updated(
79 | description = 'Hand-coded features',
80 | resdir = '/home/rail/manu/grasp-results/models/press-v1-%d' % n,
81 | inputs = ['press'])
82 | return pr
83 |
84 | def depth_v1(n):
85 | pr = base_v1(n).updated(
86 | description = 'Depth only',
87 | resdir = '/home/rail/manu/grasp-results/models/depth-v1-%d' % n,
88 | inputs = ['depth'])
89 | return pr
90 |
91 | def gel_im_depth_v1(n):
92 | pr = base_v1(n).updated(
93 | description = 'Gel + image + depth',
94 | resdir = '/home/rail/manu/grasp-results/models/gel-im-depth-v1-%d' % n,
95 | inputs = ['gel', 'im', 'depth'])
96 | return pr
97 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/final_params_full.py:
--------------------------------------------------------------------------------
1 | import numpy as np, tensorflow as tf, aolib.util as ut, aolib.img as ig, os
2 | import tensorflow.contrib.slim as slim
3 | import tensorflow.contrib.slim.nets as nets
4 | import vgg, h5py
5 | import sklearn.metrics
6 |
7 | pj = ut.pjoin
8 |
9 | class Params(ut.Struct):
10 | def __init__(self, **kwargs):
11 | self.__dict__.update(kwargs)
12 |
13 | @property
14 | def train_dir(self):
15 | return ut.mkdir(pj(self.resdir, 'training'))
16 |
17 | @property
18 | def summary_dir(self):
19 | return ut.mkdir(pj(self.resdir, 'summary'))
20 |
21 | # def base_v1():
22 | # return Params(
23 | # #dsdir = '/media/4tb/owens/grasp-results/dset-v2/v2-full',
24 | # dsdir = '/z/owens/v2-full',
25 | # base_lr = 1e-4,
26 | # lr_gamma = 0.5,
27 | # step_size = 2000,
28 | # batch_size = 16,
29 | # opt_method = 'adam',
30 | # cls_style = 'mlp-1',
31 | # model_iter = 7000,
32 | # train_iters = 7001)
33 |
34 |
35 |
36 | # def base_v1():
37 | # return Params(
38 | # #dsdir = '/media/4tb/owens/grasp-results/dset-v2/v2-full',
39 | # dsdir = '/z/owens/v2-full',
40 | # base_lr = 1e-4,
41 | # lr_gamma = 0.1,
42 | # step_size = 4000,
43 | # batch_size = 16,
44 | # opt_method = 'adam',
45 | # cls_style = 'mlp-1',
46 | # model_iter = 9000,
47 | # train_iters = 9001)
48 |
49 |
50 | def base_v1():
51 | epochs = 20
52 | dset_batches = 9296/16
53 | return Params(
54 | #dsdir = '/media/4tb/owens/grasp-results/dset-v2/v2-full',
55 | dsdir = '/z/owens/v2-full',
56 | base_lr = 1e-4,
57 | lr_gamma = 0.1,
58 | #step_size = 4000,
59 | step_size = 10*dset_batches,
60 | batch_size = 16,
61 | opt_method = 'adam',
62 | cls_style = 'mlp-1',
63 | model_iter = epochs*dset_batches,
64 | train_iters = epochs*dset_batches+1)
65 |
66 | # def gel_im_v1():
67 | # pr = base_v1().updated(
68 | # description = 'GelSight + images',
69 | # resdir = '/media/4tb/owens/grasp-results/final/gel-im-v1',
70 | # inputs = ['gel', 'im'])
71 | # return pr
72 |
73 |
74 | # def im_v1():
75 | # pr = base_v1().updated(
76 | # description = 'Images only',
77 | # resdir = '/z/owens/grasp-results/final/im-v1',
78 | # inputs = ['im'])
79 | # return pr
80 |
81 | # def gel_im_v1():
82 | # pr = base_v1().updated(
83 | # description = 'GelSight + images',
84 | # resdir = '/media/4tb/owens/grasp-results/final/gel-im-v1',
85 | # inputs = ['gel', 'im'])
86 | # return pr
87 |
88 |
89 | def gel_im_v1():
90 | pr = base_v1().updated(
91 | description = 'Images only',
92 | resdir = '/z/owens/grasp-results/final/gel-im-v1',
93 | inputs = ['im', 'gel'])
94 | return pr
95 |
96 |
97 | def im_v1():
98 | pr = base_v1().updated(
99 | description = 'Images only',
100 | resdir = '/z/owens/grasp-results/final/im-v1',
101 | inputs = ['im'])
102 | return pr
103 |
--------------------------------------------------------------------------------
/manu_sawyer/src/KinectB_hd.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | from cv_bridge import CvBridge, CvBridgeError
5 | import rospy
6 | import multiprocessing
7 | from sensor_msgs.msg import Image
8 | import grip_and_record.locate_cylinder as locate_cylinder
9 | import cv2
10 |
11 |
12 | class KinectB_hd:
13 | def __init__(self):
14 | """
15 | Class for recording data from the KinectB
16 | """
17 | # Set up the subscribers
18 | self.image_topic = "/aux/kinect2/hd/image_color"
19 | # self.image_topic = "/aux/kinect2/qhd/image_color"
20 | self.depth_topic = "/aux/kinect2/sd/image_depth_rect"
21 | rospy.Subscriber(self.image_topic, Image, self.store_latest_color_image)
22 | rospy.Subscriber(self.depth_topic, Image, self.store_latest_depth_image)
23 |
24 | # Defining variables
25 | self.init_depth_image = None
26 | self.depth_image = None
27 | self.color_image = None
28 |
29 | # Needed to transform images
30 | self.bridge = CvBridge()
31 |
32 | # Saving initial images
33 | self.save_initial_color_image()
34 | self.save_initial_depth_image()
35 |
36 | # Starting multiprocessing
37 | def spin_thread():
38 | rospy.spin()
39 |
40 | self.cam_process = multiprocessing.Process(target=spin_thread)
41 | self.cam_process.start()
42 |
43 | def calc_object_loc(self):
44 | # Uses Andrews code to fit a cylinder and returns the center, height, radius and an image of the cylinder.
45 | ini_arr = np.array(self.init_depth_image)
46 | ini_depth = np.array(ini_arr[:, :, 0], 'float32')
47 | curr_arr = np.array(self.depth_image)
48 | curr_depth = np.array(curr_arr[:, :, 0], 'float32')
49 | return locate_cylinder.fit_cylinder(ini_depth, curr_depth)
50 |
51 | def save_initial_color_image(self):
52 | img = rospy.wait_for_message(self.image_topic, Image)
53 | self.color_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
54 | self.color_image = np.flipud(self.color_image)
55 | self.color_image = np.fliplr(self.color_image)
56 |
57 | def save_initial_depth_image(self):
58 | img = rospy.wait_for_message(self.depth_topic, Image)
59 | self.depth_image = self.bridge.imgmsg_to_cv2(img, '16UC1')
60 | self.init_depth_image = self.depth_image
61 |
62 | def store_latest_color_image(self, data):
63 | try:
64 | cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
65 | except CvBridgeError as e:
66 | print(e)
67 | self.color_image = cv_image
68 |
69 | def store_latest_depth_image(self, data):
70 | img = self.bridge.imgmsg_to_cv2(data, '16UC1')
71 | self.depth_image = img
72 |
73 | def get_color_image(self):
74 | return self.color_image
75 |
76 | def get_depth_image(self):
77 | return self.depth_image
78 |
79 | def end_process(self):
80 | self.cam_process.terminate()
81 | self.cam_process.join()
82 |
--------------------------------------------------------------------------------
/manu_sawyer/src/compress_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 |
4 | # Compatibility Python 2/3
5 | from __future__ import division, print_function, absolute_import
6 | from builtins import range
7 | # ----------------------------------------------------------------------------------------------------------------------
8 |
9 | from dotmap import DotMap
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import h5py
13 | import cv2
14 | import os
15 | from tqdm import tqdm
16 |
17 | import tensorflow_model_is_gripping.aolib.img as ig, tensorflow_model_is_gripping.aolib.util as ut
18 |
19 |
20 | def compress_data(namefile):
21 |
22 | def compress_im(im):
23 | s = ig.compress(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), "jpeg")
24 | # assert(np.all(ig.uncompress(im) == im))
25 | return np.asarray(s)
26 |
27 | path, file = os.path.split(namefile)
28 | newnamefile = os.path.join(path, os.path.splitext(file)[0] + '_compressed.hdf5')
29 | newfile = h5py.File(newnamefile, "w")
30 |
31 | print('New file %s' % newnamefile)
32 |
33 | f = h5py.File(namefile, "r")
34 |
35 | n_steps = f['n_steps'].value
36 | print('Number of steps: %d' % n_steps)
37 |
38 | # newfile.create_dataset("frequency", data=f['frequency'].value)
39 | newfile.create_dataset("n_steps", data=f['n_steps'].value)
40 |
41 | for i in tqdm(range(1, n_steps)):
42 | id = '/step_%012d' % i
43 |
44 | # data.time.append(f[id + '/timestamp'].value)
45 | # data.robot.limb.append(f[id + '/limb'].value.flatten())
46 | # data.robot.gripper.append(f[id + '/gripper'].value)
47 | # data.kinect.A.image.append(cv2.cvtColor(f[id + '/color_image_KinectA'].value, cv2.COLOR_BGR2RGB))
48 | # data.kinect.A.depth.append(f[id + '/depth_image_KinectA'].value)
49 | # data.kinect.B.image.append(cv2.cvtColor(f[id + '/color_image_KinectB'].value, cv2.COLOR_BGR2RGB))
50 | # data.kinect.B.depth.append(f[id + '/depth_image_KinectB'].value)
51 | # data.gelsight.A.image.append(f[id + '/GelSightA_image'].value)
52 | # data.gelsight.B.image.append(f[id + '/GelSightB_image'].value)
53 |
54 | id = '/step_%012d' % i
55 | ut.tic('compress')
56 | # depth_image_KinectA = convert_depth(depth_image_KinectA)
57 | color_image_KinectA = compress_im(f[id + '/color_image_KinectA'].value)
58 | # depth_image_KinectB = convert_depth(depth_image_KinectB)
59 | color_image_KinectB = compress_im(f[id + '/color_image_KinectB'].value)
60 | GelSightA_image = compress_im(f[id + '/GelSightA_image'].value)
61 | GelSightB_image = compress_im(f[id + '/GelSightB_image'].value)
62 | ut.toc()
63 |
64 | # f[id + '/color_image_KinectB'] = GelSightB_image
65 |
66 | # Close files
67 | newfile.close()
68 | f.close()
69 |
70 |
71 | if __name__ == '__main__':
72 | nameFile = '/media/data_disk/dataset_manu/2017-06-18/2017-06-18_000134.hdf5'
73 | # nameFile = '/home/guser/catkin_ws/src/manu_research/data/data_trial_00000001.hdf5'
74 | data = compress_data(namefile=nameFile)
--------------------------------------------------------------------------------
/manu_sawyer/src/KinectB.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | from cv_bridge import CvBridge, CvBridgeError
5 | import rospy
6 | import multiprocessing
7 | from sensor_msgs.msg import Image
8 | import grip_and_record.locate_cylinder as locate_cylinder
9 | import cv2
10 |
11 |
12 | class KinectB:
13 | def __init__(self):
14 | """
15 | Class for recording data from the KinectB
16 | """
17 | print("Initializing KinectB")
18 | # Set up the subscribers
19 | # self.image_topic = "/aux/kinect2/hd/image_color"
20 | self.image_topic = "/aux/kinect2/qhd/image_color"
21 | self.depth_topic = "/aux/kinect2/sd/image_depth_rect"
22 |
23 | rospy.Subscriber(self.image_topic, Image, self.store_latest_color_image)
24 | rospy.Subscriber(self.depth_topic, Image, self.store_latest_depth_image)
25 |
26 | # Defining variables
27 | self.init_depth_image = None
28 | self.depth_image = None
29 | self.color_image = None
30 |
31 | # Needed to transform images
32 | self.bridge = CvBridge()
33 |
34 | # Saving initial images
35 | self.save_initial_color_image()
36 | self.save_initial_depth_image()
37 |
38 | # Starting multiprocessing
39 | def spin_thread():
40 | rospy.spin()
41 |
42 | self.cam_process = multiprocessing.Process(target=spin_thread)
43 | self.cam_process.start()
44 | print("Done")
45 |
46 | def calc_object_loc(self):
47 | # Uses Andrews code to fit a cylinder and returns the center, height, radius and an image of the cylinder.
48 | ini_arr = np.array(self.init_depth_image)
49 | ini_depth = np.array(ini_arr[:, :, 0], 'float32')
50 | curr_arr = np.array(self.depth_image)
51 | curr_depth = np.array(curr_arr[:, :, 0], 'float32')
52 | return locate_cylinder.fit_cylinder(ini_depth, curr_depth)
53 |
54 | def save_initial_color_image(self):
55 | img = rospy.wait_for_message(self.image_topic, Image)
56 | self.color_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
57 | self.color_image = np.flipud(self.color_image)
58 | self.color_image = np.fliplr(self.color_image)
59 |
60 | def save_initial_depth_image(self):
61 | img = rospy.wait_for_message(self.depth_topic, Image)
62 | self.depth_image = self.bridge.imgmsg_to_cv2(img, '16UC1')
63 | self.init_depth_image = self.depth_image
64 |
65 | def store_latest_color_image(self, data):
66 | try:
67 | cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
68 | except CvBridgeError as e:
69 | print(e)
70 | self.color_image = cv_image
71 |
72 | def store_latest_depth_image(self, data):
73 | img = self.bridge.imgmsg_to_cv2(data, '16UC1')
74 | self.depth_image = img
75 |
76 | def get_color_image(self):
77 | return self.color_image
78 |
79 | def get_depth_image(self):
80 | return self.depth_image
81 |
82 | def end_process(self):
83 | self.cam_process.terminate()
84 | self.cam_process.join()
85 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/areload.py:
--------------------------------------------------------------------------------
1 | # Automatically reload all of the modules in the current directory
2 | # or its subdirectories every time you enter a command in IPython.
3 | # Usage: from the IPython toplevel, do
4 | # In [1]: import autoreload
5 | # autoreload enabled
6 | # Author: Andrew Owens
7 | import os, sys, traceback
8 |
9 | def relative_module(m):
10 | return hasattr(m, '__file__') \
11 | and ((not m.__file__.startswith('/')) \
12 | or m.__file__.startswith(os.getcwd()))
13 |
14 | def reload_all():
15 | # Reloading __main__ is supposed to throw an error
16 | # For some reason in ipython I did not get an error and lost the
17 | # ability to send reload_all() to my ipython shell after making
18 | # changes.
19 | excludes = set(['__main__', 'autoreload'])
20 | for name, m in list(sys.modules.iteritems()):
21 | # todo: Check source modification time (use os.path.getmtime) to see if it has changed.
22 | if m and relative_module(m) and (name not in excludes) and (not hasattr(m, '__no_autoreload__')):
23 | reload(m)
24 | #superreload.superreload(m)
25 |
26 | def ipython_autoreload_hook(self):
27 | try:
28 | reload_all()
29 | except:
30 | traceback.print_exc()
31 | print 'Reload error. Modules not reloaded.'
32 |
33 | def enable():
34 | try:
35 | import IPython
36 | ip = IPython.get_ipython()
37 | prerun_hook_name = 'pre_run_code_hook'
38 | except:
39 | try:
40 | # IPython 0.11
41 | import IPython.core.ipapi as ipapi
42 | prerun_hook_name = 'pre_run_code_hook'
43 | ip = ipapi.get()
44 | except:
45 | # IPython 0.10.1
46 | import IPython.ipapi as ipapi
47 | prerun_hook_name = 'pre_runcode_hook'
48 | ip = ipapi.get()
49 | ip.set_hook(prerun_hook_name, ipython_autoreload_hook)
50 | print 'autoreload enabled'
51 |
52 | # # taken from ipy_autoreload.py; should probably just use that instead
53 | # def superreload(module, reload=reload):
54 | # """Enhanced version of the builtin reload function.
55 | # superreload replaces the class dictionary of every top-level
56 | # class in the module with the new one automatically,
57 | # as well as every function's code object.
58 | # """
59 | # module = reload(module)
60 | # # iterate over all objects and update them
61 | # count = 0
62 | # for name, new_obj in module.__dict__.items():
63 | # key = (module.__name__, name)
64 | # if _old_objects.has_key(key):
65 | # for old_obj in _old_objects[key]:
66 | # if type(new_obj) == types.ClassType:
67 | # old_obj.__dict__.update(new_obj.__dict__)
68 | # count += 1
69 | # elif type(new_obj) == types.FunctionType:
70 | # update_function(old_obj,
71 | # new_obj,
72 | # "func_code func_defaults func_doc".split())
73 | # count += 1
74 | # elif type(new_obj) == types.MethodType:
75 | # update_function(old_obj.im_func,
76 | # new_obj.im_func,
77 | # "func_code func_defaults func_doc".split())
78 | # count += 1
79 | # return module
80 |
81 |
82 | # automatically enable when imported
83 | __no_autoreload__ = True
84 |
85 | #enable()
86 |
87 |
88 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/areload.py:
--------------------------------------------------------------------------------
1 | # Automatically reload all of the modules in the current directory
2 | # or its subdirectories every time you enter a command in IPython.
3 | # Usage: from the IPython toplevel, do
4 | # In [1]: import autoreload
5 | # autoreload enabled
6 | # Author: Andrew Owens
7 | import os, sys, traceback
8 |
9 | def relative_module(m):
10 | return hasattr(m, '__file__') \
11 | and ((not m.__file__.startswith('/')) \
12 | or m.__file__.startswith(os.getcwd()))
13 |
14 | def reload_all():
15 | # Reloading __main__ is supposed to throw an error
16 | # For some reason in ipython I did not get an error and lost the
17 | # ability to send reload_all() to my ipython shell after making
18 | # changes.
19 | excludes = set(['__main__', 'autoreload'])
20 | for name, m in list(sys.modules.iteritems()):
21 | # todo: Check source modification time (use os.path.getmtime) to see if it has changed.
22 | if m and relative_module(m) and (name not in excludes) and (not hasattr(m, '__no_autoreload__')):
23 | reload(m)
24 | #superreload.superreload(m)
25 |
26 | def ipython_autoreload_hook(self):
27 | try:
28 | reload_all()
29 | except:
30 | traceback.print_exc()
31 | print 'Reload error. Modules not reloaded.'
32 |
33 | def enable():
34 | try:
35 | import IPython
36 | ip = IPython.get_ipython()
37 | prerun_hook_name = 'pre_run_code_hook'
38 | except:
39 | try:
40 | # IPython 0.11
41 | import IPython.core.ipapi as ipapi
42 | prerun_hook_name = 'pre_run_code_hook'
43 | ip = ipapi.get()
44 | except:
45 | # IPython 0.10.1
46 | import IPython.ipapi as ipapi
47 | prerun_hook_name = 'pre_runcode_hook'
48 | ip = ipapi.get()
49 | ip.set_hook(prerun_hook_name, ipython_autoreload_hook)
50 | print 'autoreload enabled'
51 |
52 | # # taken from ipy_autoreload.py; should probably just use that instead
53 | # def superreload(module, reload=reload):
54 | # """Enhanced version of the builtin reload function.
55 | # superreload replaces the class dictionary of every top-level
56 | # class in the module with the new one automatically,
57 | # as well as every function's code object.
58 | # """
59 | # module = reload(module)
60 | # # iterate over all objects and update them
61 | # count = 0
62 | # for name, new_obj in module.__dict__.items():
63 | # key = (module.__name__, name)
64 | # if _old_objects.has_key(key):
65 | # for old_obj in _old_objects[key]:
66 | # if type(new_obj) == types.ClassType:
67 | # old_obj.__dict__.update(new_obj.__dict__)
68 | # count += 1
69 | # elif type(new_obj) == types.FunctionType:
70 | # update_function(old_obj,
71 | # new_obj,
72 | # "func_code func_defaults func_doc".split())
73 | # count += 1
74 | # elif type(new_obj) == types.MethodType:
75 | # update_function(old_obj.im_func,
76 | # new_obj.im_func,
77 | # "func_code func_defaults func_doc".split())
78 | # count += 1
79 | # return module
80 |
81 |
82 | # automatically enable when imported
83 | __no_autoreload__ = True
84 |
85 | #enable()
86 |
87 |
88 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/aolib/areload.py:
--------------------------------------------------------------------------------
1 | # Automatically reload all of the modules in the current directory
2 | # or its subdirectories every time you enter a command in IPython.
3 | # Usage: from the IPython toplevel, do
4 | # In [1]: import autoreload
5 | # autoreload enabled
6 | # Author: Andrew Owens
7 | import os, sys, traceback
8 |
9 | def relative_module(m):
10 | return hasattr(m, '__file__') \
11 | and ((not m.__file__.startswith('/')) \
12 | or m.__file__.startswith(os.getcwd()))
13 |
14 | def reload_all():
15 | # Reloading __main__ is supposed to throw an error
16 | # For some reason in ipython I did not get an error and lost the
17 | # ability to send reload_all() to my ipython shell after making
18 | # changes.
19 | excludes = set(['__main__', 'autoreload'])
20 | for name, m in list(sys.modules.iteritems()):
21 | # todo: Check source modification time (use os.path.getmtime) to see if it has changed.
22 | if m and relative_module(m) and (name not in excludes) and (not hasattr(m, '__no_autoreload__')):
23 | reload(m)
24 | #superreload.superreload(m)
25 |
26 | def ipython_autoreload_hook(self):
27 | try:
28 | reload_all()
29 | except:
30 | traceback.print_exc()
31 | print 'Reload error. Modules not reloaded.'
32 |
33 | def enable():
34 | try:
35 | import IPython
36 | ip = IPython.get_ipython()
37 | prerun_hook_name = 'pre_run_code_hook'
38 | except:
39 | try:
40 | # IPython 0.11
41 | import IPython.core.ipapi as ipapi
42 | prerun_hook_name = 'pre_run_code_hook'
43 | ip = ipapi.get()
44 | except:
45 | # IPython 0.10.1
46 | import IPython.ipapi as ipapi
47 | prerun_hook_name = 'pre_runcode_hook'
48 | ip = ipapi.get()
49 | ip.set_hook(prerun_hook_name, ipython_autoreload_hook)
50 | print 'autoreload enabled'
51 |
52 | # # taken from ipy_autoreload.py; should probably just use that instead
53 | # def superreload(module, reload=reload):
54 | # """Enhanced version of the builtin reload function.
55 | # superreload replaces the class dictionary of every top-level
56 | # class in the module with the new one automatically,
57 | # as well as every function's code object.
58 | # """
59 | # module = reload(module)
60 | # # iterate over all objects and update them
61 | # count = 0
62 | # for name, new_obj in module.__dict__.items():
63 | # key = (module.__name__, name)
64 | # if _old_objects.has_key(key):
65 | # for old_obj in _old_objects[key]:
66 | # if type(new_obj) == types.ClassType:
67 | # old_obj.__dict__.update(new_obj.__dict__)
68 | # count += 1
69 | # elif type(new_obj) == types.FunctionType:
70 | # update_function(old_obj,
71 | # new_obj,
72 | # "func_code func_defaults func_doc".split())
73 | # count += 1
74 | # elif type(new_obj) == types.MethodType:
75 | # update_function(old_obj.im_func,
76 | # new_obj.im_func,
77 | # "func_code func_defaults func_doc".split())
78 | # count += 1
79 | # return module
80 |
81 |
82 | # automatically enable when imported
83 | __no_autoreload__ = True
84 |
85 | #enable()
86 |
87 |
88 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/robot_camera.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 |
5 | import cv2
6 | from cv_bridge import CvBridge, CvBridgeError
7 |
8 | import rospy
9 | import intera_interface
10 | import thread
11 | import multiprocessing
12 | from sensor_msgs.msg import Image
13 | from sensor_msgs.msg import CameraInfo
14 | import locate_cylinder
15 |
16 |
17 | class RobotCam:
18 | def __init__(self, flipped=False):
19 | """
20 | Class for recording data from the Kinect2
21 | :param flipped:
22 | """
23 | self.flipped = flipped
24 | self.image_topic = "/kinect2/hd/image_color"
25 | self.depth_topic = "/kinect2/sd/image_depth_rect"
26 | rospy.Subscriber(self.image_topic, Image, self.store_latest_im)
27 | rospy.Subscriber(self.depth_topic, Image, self.store_latest_d_im)
28 |
29 | self.bridge = CvBridge()
30 | self.save_initial_image()
31 |
32 | self.init_depth = None
33 | self.save_initial_depth()
34 |
35 | def spin_thread():
36 | print "Started spin thread"
37 | rospy.spin()
38 | # thread.start_new(spin_thread, ())
39 | self.cam_process = multiprocessing.Process(target=spin_thread)
40 | self.cam_process.start()
41 |
42 | def calc_object_loc(self):
43 | ini_arr = np.array(self.init_depth)
44 | ini_depth = np.array(ini_arr[:, :, 0], 'float32')
45 | curr_arr = np.array(self.depth_image)
46 | curr_depth = np.array(curr_arr[:, :, 0], 'float32')
47 | return locate_cylinder.fit_cylinder(ini_depth, curr_depth)
48 |
49 | def save_initial_image(self):
50 | print('Saving initial Image')
51 | img = rospy.wait_for_message(self.image_topic, Image)
52 | self.latest_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
53 | if self.flipped:
54 | self.latest_image = cv2.flip(self.latest_image , 0)
55 |
56 | def save_initial_depth(self):
57 | print('Saving initial Depth')
58 | img = rospy.wait_for_message(self.depth_topic, Image)
59 | self.depth_image = self.bridge.imgmsg_to_cv2(img, '16UC1')
60 | self.init_depth = self.depth_image
61 |
62 | def store_latest_im(self, data):
63 | # print ("CAMERA UPDATE")
64 | try:
65 | cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
66 | except CvBridgeError as e:
67 | print(e)
68 |
69 | if self.flipped:
70 | self.latest_image = cv2.flip(cv_image, 0)
71 | else:
72 | self.latest_image = cv_image
73 | self.time_stamp_im = self._time_stamp()
74 |
75 | def store_latest_d_im(self, data):
76 | img = self.bridge.imgmsg_to_cv2(data, '16UC1')
77 | # img = img.astype(np.float32) / np.max(img) * 256
78 | # img = img.astype(np.uint8)
79 | # img = np.squeeze(img)
80 | self.depth_image = img
81 |
82 | def _time_stamp(self):
83 | return rospy.get_time()
84 |
85 | def get_latest_image(self):
86 | return self.latest_image
87 |
88 | def get_depth_image(self):
89 | return self.depth_image
90 |
91 | def end_process(self):
92 | self.cam_process.terminate()
93 | self.cam_process.join()
94 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/parula.py:
--------------------------------------------------------------------------------
1 | from matplotlib.colors import LinearSegmentedColormap
2 |
3 | cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905],
4 | [0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143],
5 | [0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952,
6 | 0.779247619], [0.1252714286, 0.3242428571, 0.8302714286],
7 | [0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238,
8 | 0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571],
9 | [0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571,
10 | 0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429],
11 | [0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667,
12 | 0.8467], [0.0779428571, 0.5039857143, 0.8383714286],
13 | [0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571,
14 | 0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429],
15 | [0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524,
16 | 0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048,
17 | 0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667],
18 | [0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381,
19 | 0.7607190476], [0.0383714286, 0.6742714286, 0.743552381],
20 | [0.0589714286, 0.6837571429, 0.7253857143],
21 | [0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429],
22 | [0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429,
23 | 0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048],
24 | [0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619,
25 | 0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667],
26 | [0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524,
27 | 0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905],
28 | [0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476,
29 | 0.4493904762], [0.609852381, 0.7473142857, 0.4336857143],
30 | [0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333],
31 | [0.7184095238, 0.7411333333, 0.3904761905],
32 | [0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667,
33 | 0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762],
34 | [0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217],
35 | [0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857,
36 | 0.2886428571], [0.9738952381, 0.7313952381, 0.266647619],
37 | [0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857,
38 | 0.2164142857], [0.9955333333, 0.7860571429, 0.196652381],
39 | [0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857],
40 | [0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309],
41 | [0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333,
42 | 0.0948380952], [0.9661, 0.9514428571, 0.0755333333],
43 | [0.9763, 0.9831, 0.0538]]
44 |
45 | parula_map = LinearSegmentedColormap.from_list('parula', cm_data)
46 | # For use of "viscm view"
47 | test_cm = parula_map
48 |
49 | if __name__ == "__main__":
50 | import matplotlib.pyplot as plt
51 | import numpy as np
52 |
53 | try:
54 | from viscm import viscm
55 | viscm(parula_map)
56 | except ImportError:
57 | print("viscm not found, falling back on simple display")
58 | plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
59 | cmap=parula_map)
60 | plt.show()
61 |
--------------------------------------------------------------------------------
/manu_sawyer/src/find_cool_GelSight_images.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 | import time
11 |
12 | def find_cool_images(path, filenames):
13 | """
14 |
15 | :param filenames: one string or a list of strings
16 | :return:
17 | """
18 | if isinstance(filenames, basestring):
19 | filename = [filenames] # only a string is convert to a list
20 |
21 | for filename in filenames:
22 | path_to_file = path + filename
23 |
24 | print('Opening file: %s' % path_to_file)
25 | f = h5py.File(path_to_file, "r")
26 |
27 | times = np.array(f['/timestamp'].value)
28 |
29 | time_at_grasping = float(f["time_at_grasping"].value)
30 | index_time_at_grasping = np.argmin(
31 | np.abs(times - time_at_grasping)) # Find index corresponding to timestamp
32 |
33 | compressed_kinectA = f['/color_image_KinectA'].value[index_time_at_grasping]
34 | KinectA_color_time_at_grasping = np.array(ig.uncompress(compressed_kinectA))
35 |
36 | compressed_kinectB = f['/color_image_KinectB'].value[index_time_at_grasping]
37 | KinectB_color_time_at_grasping = np.array(ig.uncompress(compressed_kinectB))
38 |
39 | compressed_GelSightA = f['/GelSightA_image'].value[index_time_at_grasping]
40 | GelSightA_image_time_at_grasping = np.array(ig.uncompress(compressed_GelSightA))
41 |
42 | compressed_GelSightB = f['/GelSightB_image'].value[index_time_at_grasping]
43 | GelSightB_image_time_at_grasping = np.array(ig.uncompress(compressed_GelSightB))
44 |
45 | fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
46 | ax1.imshow(KinectA_color_time_at_grasping)
47 | ax2.imshow(KinectB_color_time_at_grasping)
48 | ax3.imshow(GelSightA_image_time_at_grasping)
49 | ax4.imshow(GelSightB_image_time_at_grasping)
50 | ax1.axis('off')
51 | ax2.axis('off')
52 | ax3.axis('off')
53 | ax4.axis('off')
54 | plt.draw()
55 | plt.ion()
56 | plt.show()
57 |
58 | print("Is it a cool image? [y/n]")
59 | done = False
60 | cool = False
61 | while not done:
62 | c = grip_and_record.getch.getch()
63 | if c:
64 | if c in ['n']:
65 | cool = False
66 | done = True
67 | elif c in ['y']:
68 | cool = True
69 | done = True
70 |
71 | if cool:
72 | path_cool = "/home/manu/ros_ws/src/manu_research/data/cool_images/"
73 | file = open(path_cool + filename[:-5] + '_GelSightA.jpeg', 'w')
74 | file.write(str(compressed_GelSightA))
75 | file.close()
76 | time.sleep(0.5)
77 | file = open(path_cool + filename[:-5] + '_GelSightB.jpeg', 'w')
78 | file.write(str(compressed_GelSightB))
79 | file.close()
80 |
81 | plt.close()
82 |
83 |
84 | if __name__ == '__main__':
85 | directory = "/media/data_disk/dataset_manu/ver2/2017-06-22/"
86 | import os
87 |
88 | list_filenames = []
89 | for file in os.listdir(directory):
90 | if file.endswith(".hdf5"):
91 | list_filenames.append(file)
92 | list_filenames = sorted(list_filenames)
93 | print(list_filenames)
94 |
95 | path = "/media/data_disk/dataset_manu/ver2/2017-06-22/"
96 | find_cool_images(path, list_filenames[4:])
97 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/inverse_kin.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from geometry_msgs.msg import (
3 | PoseStamped,
4 | PointStamped,
5 | Pose,
6 | Point,
7 | Quaternion,
8 | )
9 | import rospy
10 |
11 | from sensor_msgs.msg import JointState
12 |
13 | from intera_core_msgs.srv import (
14 | SolvePositionIK,
15 | SolvePositionIKRequest,
16 | )
17 |
18 | from std_msgs.msg import Header
19 |
20 |
21 | def get_joint_angles(pose, limb="right", seed_cmd=None, use_advanced_options=False, verbosity=0):
22 | ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
23 | iksvc = rospy.ServiceProxy(ns, SolvePositionIK)
24 | ikreq = SolvePositionIKRequest()
25 |
26 | # Add desired pose for inverse kinematics
27 | stamped_pose = stamp_pose(pose)
28 | ikreq.pose_stamp.append(stamped_pose)
29 | # Request inverse kinematics from base to "right_hand" link
30 | ikreq.tip_names.append('right_hand')
31 |
32 | # rospy.loginfo("Running IK Service Client.")
33 |
34 | # seed_joints = None
35 | if use_advanced_options:
36 | # Optional Advanced IK parameters
37 | # The joint seed is where the IK position solver starts its optimization
38 | ikreq.seed_mode = ikreq.SEED_USER
39 |
40 | # if not seed_joints:
41 | # seed = JointState()
42 | # seed.name = ['right_j0', 'right_j1', 'right_j2', 'right_j3',
43 | # 'right_j4', 'right_j5', 'right_j6']
44 | # seed.position = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
45 | # else:
46 | seed = joint_state_from_cmd(seed_cmd)
47 | ikreq.seed_angles.append(seed)
48 |
49 |
50 | try:
51 | rospy.wait_for_service(ns, 5.0)
52 | resp = iksvc(ikreq)
53 | except (rospy.ServiceException, rospy.ROSException), e:
54 | rospy.logerr("Service call failed: %s" % (e,))
55 | return False
56 |
57 | # Check if result valid, and type of seed ultimately used to get solution
58 | if (resp.result_type[0] > 0):
59 | seed_str = {
60 | ikreq.SEED_USER: 'User Provided Seed',
61 | ikreq.SEED_CURRENT: 'Current Joint Angles',
62 | ikreq.SEED_NS_MAP: 'Nullspace Setpoints',
63 | }.get(resp.result_type[0], 'None')
64 | if verbosity > 0:
65 | rospy.loginfo("SUCCESS - Valid Joint Solution Found from Seed Type: %s" % (seed_str,))
66 | # Format solution into Limb API-compatible dictionary
67 | limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
68 | if verbosity > 0:
69 | rospy.loginfo("\nIK Joint Solution:\n%s", limb_joints)
70 | rospy.loginfo("------------------")
71 | rospy.loginfo("Response Message:\n%s", resp)
72 | else:
73 | rospy.loginfo("INVALID POSE - No Valid Joint Solution Found.")
74 |
75 | return limb_joints
76 |
77 |
78 | def get_pose(x, y, z, o): #o should be a quaternion
79 | p =Pose(
80 | position=Point(
81 | x=x,
82 | y=y,
83 | z=z,
84 | ),
85 | orientation=o
86 | )
87 |
88 | return p
89 |
90 |
91 | def stamp_pose(pose):
92 | hdr = Header(stamp=rospy.Time.now(), frame_id='base')
93 | p = PoseStamped(
94 | header=hdr,
95 | pose=pose
96 | )
97 | return p
98 |
99 |
100 | def joint_state_from_cmd(cmd):
101 | js = JointState()
102 | js.name = cmd.keys()
103 | js.position = cmd.values()
104 | return js
105 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/pyxmod.py:
--------------------------------------------------------------------------------
1 | import os, imp, sys, traceback, tempfile
2 |
3 | # usage:
4 | # import pyxmod
5 | # foo = pyxmod.load('foo', ['bar.o'])
6 | #
7 | # Compiles foo.pyx and links it with the (optional) object file bar.o.
8 | # Returns the resulting module as foo.
9 |
10 | # if COMPILE is false, then load() becomes a normal import
11 |
12 | def load(name, deps = [], rebuild = True):
13 | if rebuild:
14 | return loader.load(name, deps)
15 | else:
16 | return __import__(name)
17 |
18 | def print_error():
19 | """ IPython does not seem to print runtime errors from load()ed
20 | modules correctly, as line numbers from .pyx files are missing.
21 | This function should print them correctly. """
22 | traceback.print_tb(sys.last_traceback)
23 |
24 | class Loader:
25 | def __init__(self):
26 | self.modules = {}
27 |
28 | def load(self, name, deps = []):
29 | reload = True
30 | # only reload if the timestamp has changed
31 | if name in self.modules:
32 | edit_time = max([os.stat('%s.pyx' % name).st_mtime] + [os.stat(dep).st_mtime for dep in deps])
33 | if not hasattr(self.modules[name], '__file__') or os.stat(self.modules[name].__file__).st_mtime > edit_time:
34 | reload = False
35 | if reload:
36 | self.modules[name] = compile_and_load(name, deps)
37 | return self.modules[name]
38 |
39 | # a signal to areload.py to not reload this file
40 | __no_autoreload__ = True
41 |
42 | def compile_and_load(name, deps = []):
43 | """ Build and load the module corresponding to [name].pyx. deps is
44 | the list of object files to be linked against. """
45 | # leaks a directory in /tmp; should fix
46 | work_dir = tempfile.mkdtemp()
47 | new_name = make_temp('.so', dir = work_dir).replace('.so', '')
48 | os.system('cp %s.pyx %s.pyx' % (name, new_name))
49 | if len(deps) > 0:
50 | os.system('cp %s %s' % (' '.join(deps), work_dir))
51 | obj_files = ' '.join(os.path.join(work_dir, d) for d in deps if d.endswith('o'))
52 |
53 | # cmd = ('/afs/csail/group/vision/roger/epd/bin/cython %(new_name)s.pyx;'
54 | # ' g++ -shared -pthread -fPIC -fwrapv -O2 -Wall -fno-strict-aliasing -L'
55 | # ' /afs/csail/group/vision/roger/epd/lib -lpython2.7'
56 | # ' -I /afs/csail/group/vision/roger/epd/include/python2.7'
57 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
58 | # ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
59 | py_dir = '/data/vision/billf/aho-billf/conda/'
60 | py_inc = os.path.join(py_dir, 'include/python2.7')
61 | py_lib = os.path.join(py_dir, 'lib')
62 | np_inc = os.path.join(py_dir, 'lib/python2.7/site-packages/numpy/core/include')
63 | cython_cmd = 'cython'
64 |
65 | cmd = ('%(cython_cmd)s %(new_name)s.pyx;'
66 | ' g++ -fopenmp -shared -pthread -fPIC -fwrapv -O2 -Wall '
67 | '`python-config --libs` -L`python-config --prefix`/lib `python-config --ldflags` `python-config --includes` -I `python -c "import numpy; print numpy.get_include()"`'
68 | # ' %(py_lib)s -lpython2.7'
69 | # ' -I %(py_inc)s'
70 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
71 | ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
72 | print >>sys.stderr, cmd
73 | os.system(cmd)
74 |
75 | print >>sys.stderr, 'loading cython module', new_name + '.so'
76 | m = imp.load_dynamic(os.path.split(new_name)[1], new_name + '.so')
77 | return m
78 |
79 | def make_temp(ext, dir):
80 | fd, fname = tempfile.mkstemp(ext, dir = dir)
81 | os.close(fd)
82 | return fname
83 |
84 | # module-level to avoid reloading already-loaded modules
85 | loader = Loader()
86 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/pyxmod.py:
--------------------------------------------------------------------------------
1 | import os, imp, sys, traceback, tempfile
2 |
3 | # usage:
4 | # import pyxmod
5 | # foo = pyxmod.load('foo', ['bar.o'])
6 | #
7 | # Compiles foo.pyx and links it with the (optional) object file bar.o.
8 | # Returns the resulting module as foo.
9 |
10 | # if COMPILE is false, then load() becomes a normal import
11 |
12 | def load(name, deps = [], rebuild = True):
13 | if rebuild:
14 | return loader.load(name, deps)
15 | else:
16 | return __import__(name)
17 |
18 | def print_error():
19 | """ IPython does not seem to print runtime errors from load()ed
20 | modules correctly, as line numbers from .pyx files are missing.
21 | This function should print them correctly. """
22 | traceback.print_tb(sys.last_traceback)
23 |
24 | class Loader:
25 | def __init__(self):
26 | self.modules = {}
27 |
28 | def load(self, name, deps = []):
29 | reload = True
30 | # only reload if the timestamp has changed
31 | if name in self.modules:
32 | edit_time = max([os.stat('%s.pyx' % name).st_mtime] + [os.stat(dep).st_mtime for dep in deps])
33 | if not hasattr(self.modules[name], '__file__') or os.stat(self.modules[name].__file__).st_mtime > edit_time:
34 | reload = False
35 | if reload:
36 | self.modules[name] = compile_and_load(name, deps)
37 | return self.modules[name]
38 |
39 | # a signal to areload.py to not reload this file
40 | __no_autoreload__ = True
41 |
42 | def compile_and_load(name, deps = []):
43 | """ Build and load the module corresponding to [name].pyx. deps is
44 | the list of object files to be linked against. """
45 | # leaks a directory in /tmp; should fix
46 | work_dir = tempfile.mkdtemp()
47 | new_name = make_temp('.so', dir = work_dir).replace('.so', '')
48 | os.system('cp %s.pyx %s.pyx' % (name, new_name))
49 | if len(deps) > 0:
50 | os.system('cp %s %s' % (' '.join(deps), work_dir))
51 | obj_files = ' '.join(os.path.join(work_dir, d) for d in deps if d.endswith('o'))
52 |
53 | # cmd = ('/afs/csail/group/vision/roger/epd/bin/cython %(new_name)s.pyx;'
54 | # ' g++ -shared -pthread -fPIC -fwrapv -O2 -Wall -fno-strict-aliasing -L'
55 | # ' /afs/csail/group/vision/roger/epd/lib -lpython2.7'
56 | # ' -I /afs/csail/group/vision/roger/epd/include/python2.7'
57 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
58 | # ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
59 | py_dir = '/data/vision/billf/aho-billf/conda/'
60 | py_inc = os.path.join(py_dir, 'include/python2.7')
61 | py_lib = os.path.join(py_dir, 'lib')
62 | np_inc = os.path.join(py_dir, 'lib/python2.7/site-packages/numpy/core/include')
63 | cython_cmd = 'cython'
64 |
65 | cmd = ('%(cython_cmd)s %(new_name)s.pyx;'
66 | ' g++ -fopenmp -shared -pthread -fPIC -fwrapv -O2 -Wall '
67 | '`python-config --libs` -L`python-config --prefix`/lib `python-config --ldflags` `python-config --includes` -I `python -c "import numpy; print numpy.get_include()"`'
68 | # ' %(py_lib)s -lpython2.7'
69 | # ' -I %(py_inc)s'
70 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
71 | ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
72 | print >>sys.stderr, cmd
73 | os.system(cmd)
74 |
75 | print >>sys.stderr, 'loading cython module', new_name + '.so'
76 | m = imp.load_dynamic(os.path.split(new_name)[1], new_name + '.so')
77 | return m
78 |
79 | def make_temp(ext, dir):
80 | fd, fname = tempfile.mkstemp(ext, dir = dir)
81 | os.close(fd)
82 | return fname
83 |
84 | # module-level to avoid reloading already-loaded modules
85 | loader = Loader()
86 |
--------------------------------------------------------------------------------
/manu_sawyer/src/tensorflow_model_is_gripping/aolib/pyxmod.py:
--------------------------------------------------------------------------------
1 | import os, imp, sys, traceback, tempfile
2 |
3 | # usage:
4 | # import pyxmod
5 | # foo = pyxmod.load('foo', ['bar.o'])
6 | #
7 | # Compiles foo.pyx and links it with the (optional) object file bar.o.
8 | # Returns the resulting module as foo.
9 |
10 | # if COMPILE is false, then load() becomes a normal import
11 |
12 | def load(name, deps = [], rebuild = True):
13 | if rebuild:
14 | return loader.load(name, deps)
15 | else:
16 | return __import__(name)
17 |
18 | def print_error():
19 | """ IPython does not seem to print runtime errors from load()ed
20 | modules correctly, as line numbers from .pyx files are missing.
21 | This function should print them correctly. """
22 | traceback.print_tb(sys.last_traceback)
23 |
24 | class Loader:
25 | def __init__(self):
26 | self.modules = {}
27 |
28 | def load(self, name, deps = []):
29 | reload = True
30 | # only reload if the timestamp has changed
31 | if name in self.modules:
32 | edit_time = max([os.stat('%s.pyx' % name).st_mtime] + [os.stat(dep).st_mtime for dep in deps])
33 | if not hasattr(self.modules[name], '__file__') or os.stat(self.modules[name].__file__).st_mtime > edit_time:
34 | reload = False
35 | if reload:
36 | self.modules[name] = compile_and_load(name, deps)
37 | return self.modules[name]
38 |
39 | # a signal to areload.py to not reload this file
40 | __no_autoreload__ = True
41 |
42 | def compile_and_load(name, deps = []):
43 | """ Build and load the module corresponding to [name].pyx. deps is
44 | the list of object files to be linked against. """
45 | # leaks a directory in /tmp; should fix
46 | work_dir = tempfile.mkdtemp()
47 | new_name = make_temp('.so', dir = work_dir).replace('.so', '')
48 | os.system('cp %s.pyx %s.pyx' % (name, new_name))
49 | if len(deps) > 0:
50 | os.system('cp %s %s' % (' '.join(deps), work_dir))
51 | obj_files = ' '.join(os.path.join(work_dir, d) for d in deps if d.endswith('o'))
52 |
53 | # cmd = ('/afs/csail/group/vision/roger/epd/bin/cython %(new_name)s.pyx;'
54 | # ' g++ -shared -pthread -fPIC -fwrapv -O2 -Wall -fno-strict-aliasing -L'
55 | # ' /afs/csail/group/vision/roger/epd/lib -lpython2.7'
56 | # ' -I /afs/csail/group/vision/roger/epd/include/python2.7'
57 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
58 | # ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
59 | py_dir = '/data/vision/billf/aho-billf/conda/'
60 | py_inc = os.path.join(py_dir, 'include/python2.7')
61 | py_lib = os.path.join(py_dir, 'lib')
62 | np_inc = os.path.join(py_dir, 'lib/python2.7/site-packages/numpy/core/include')
63 | cython_cmd = 'cython'
64 |
65 | cmd = ('%(cython_cmd)s %(new_name)s.pyx;'
66 | ' g++ -fopenmp -shared -pthread -fPIC -fwrapv -O2 -Wall '
67 | '`python-config --libs` -L`python-config --prefix`/lib `python-config --ldflags` `python-config --includes` -I `python -c "import numpy; print numpy.get_include()"`'
68 | # ' %(py_lib)s -lpython2.7'
69 | # ' -I %(py_inc)s'
70 | # ' -I /afs/csail/group/vision/roger/epd/lib/python2.7/site-packages/numpy/core/include'
71 | ' -o %(new_name)s.so %(new_name)s.c %(obj_files)s') % locals()
72 | print >>sys.stderr, cmd
73 | os.system(cmd)
74 |
75 | print >>sys.stderr, 'loading cython module', new_name + '.so'
76 | m = imp.load_dynamic(os.path.split(new_name)[1], new_name + '.so')
77 | return m
78 |
79 | def make_temp(ext, dir):
80 | fd, fname = tempfile.mkstemp(ext, dir = dir)
81 | os.close(fd)
82 | return fname
83 |
84 | # module-level to avoid reloading already-loaded modules
85 | loader = Loader()
86 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/transform_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from robot_camera import RobotCam
3 | import rospy
4 | import time
5 | import cv2
6 | import numpy as np
7 | import os
8 |
9 |
10 | # TODO: run kinect script if not already running
11 | # ps aux | grep kinect
12 | # os.system('killall -9 /home/rcalandra/ros_ws/devel/lib/kinect2_bridge/kinect2_bridge')
13 | # os.system('rosrun kinect2_bridge kinect2_bridge > /home/rcalandra/ros_ws/src/manu_sawyer/temp/kinect_bridge_out.txt 2>&1 &')
14 | # time.sleep(10)
15 |
16 | # TODO: Ask to clean table, and take first image...
17 | # TODO: watch out for gripper
18 |
19 | rospy.init_node('Testing')
20 | print('Init camera')
21 | kinect = RobotCam()
22 | print("ini done")
23 |
24 | rate = rospy.Rate(30)
25 |
26 |
27 | try:
28 | # Capture frame-by-frame
29 | i = 0
30 | while True:
31 | init_time = time.time()
32 | print('Getting kinect data')
33 | frame = kinect.get_depth_image()
34 | # camera_depth = np.asarray(self.robot_cam.get_depth_image())
35 | # depth = dset['camera_depth'][:, :, 0]
36 | # depths.append(np.array(depth, 'float32'))
37 |
38 | #depth_img = frame.astype(np.float32) / np.max(frame) * 512
39 | #depth_img = depth_img.astype(np.uint8)
40 | #depth_img = np.squeeze(depth_img)
41 | # # Display the resulting frame
42 | if i == 0:
43 | time.sleep(1)
44 | #
45 | # # plt.imshow(frame)
46 | # # plt.show()
47 | if cv2.waitKey(10) & 0xFF == ord('q'):
48 | # os.system('killall -9 /home/rcalandra/ros_ws/devel/lib/kinect2_bridge/kinect2_bridge')
49 | break
50 |
51 | center, height, d, obj_vis = kinect.calc_object_loc()
52 | print('center =', center)
53 | print('radius =', d)
54 | print('height =', height)
55 | cv2.imshow('frame', obj_vis)
56 | rate.sleep()
57 | i += 1
58 | except KeyboardInterrupt:
59 | print('Error')
60 | pass
61 | kinect.end_process()
62 |
63 |
64 | def on_scan(self, data):
65 | '''
66 | @data: Input pointcloud data. A list of tuple (x,y,X,Y,Z)
67 | Callback every time point cloud data is received.
68 | '''
69 | print("=== Picking points ===")
70 | # side setup
71 | X_MIN = 0
72 | X_MAX = 600
73 | Y_MIN = 50
74 | Y_MAX = 300
75 |
76 | # x_min = 0.32
77 | # x_max = 0.85
78 | # y_min = -0.80
79 | # y_max = -0.30
80 | # z_min = Z_LIMIT
81 | # z_max = 0.05
82 | x_min = 0.32
83 | x_max = 0.85
84 | y_min = -0.82
85 | y_max = -0.27
86 | z_min = Z_LIMIT
87 | z_max = 0.05
88 |
89 | valid = []
90 |
91 | scan_range = product(xrange(X_MIN, X_MAX), xrange(Y_MIN, Y_MAX))
92 |
93 | valid_points = 0
94 | p = pc2.read_points(data, skip_nans=False, uvs=scan_range)
95 |
96 | scan_range = product(xrange(X_MIN, X_MAX), xrange(Y_MIN, Y_MAX))
97 |
98 | valid_points_img = self.cur_img.copy()
99 |
100 | XY = []
101 | Z = []
102 |
103 | for X, Y in scan_range:
104 | point = p.next()
105 | x, y, z, d = np.array(point)
106 | if all([x < x_max, x > x_min, y > y_min, y < y_max]):
107 | valid.append((x, y, z, X, Y))
108 | valid_points_img[:, Y, X] = np.array([0, 0, 0])
109 | if all([x < x_max - 0.05, x > x_min + 0.05]): # extra constraints
110 | XY.append((1.0, x, y))
111 | Z.append(z)
112 |
113 | self.valid_points_publisher.send(valid_points_img)
114 |
115 | A = np.array(XY)
116 | b = np.array(Z)
117 | print A.shape, b.shape
118 | coeffs, _, _, _ = np.linalg.lstsq(A, b)
119 |
120 | print "saving", list(coeffs)
121 | np.save("plane_coeffs", coeffs)
122 |
123 | self.depth_subscriber.unregister()
124 |
125 | print("Done.")
126 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/planefit.py:
--------------------------------------------------------------------------------
1 | # from https://github.com/andrewowens/camo/blob/master/src/planefit.py
2 | import numpy as np, aolib.util as ut, mvg
3 |
4 | def plane_repok(plane):
5 | assert np.allclose(np.linalg.norm(plane[:3]), 1)
6 | return plane
7 |
8 | def fit_plane(pts):
9 | # A = ut.homog(pts.T).T
10 | # b = mvg.solve_0_system(A)
11 | # b /= np.linalg.norm(b[:3])
12 | # return b
13 | axes, _, mean_pt = ut.pca(pts)
14 | w = axes[2]
15 | b = -np.dot(w, mean_pt)
16 | assert np.allclose(np.linalg.norm(w), 1)
17 | return np.array([w[0], w[1], w[2], b], 'd')
18 |
19 | def sample_plane(plane, n, width, noise = 0):
20 | plane_repok(plane)
21 | e1, e2, _ = np.eye(3)
22 | v1 = e1 if (plane[0] == 0 and plane[1] == 0) else ut.normalized(np.array([-plane[1], plane[0], 0], 'd'))
23 | v2 = ut.normalized(np.cross(plane[:3], v1))
24 | #print 'dot', np.dot(v1, plane[:3]), np.dot(v2, plane[:3])
25 | #print 'sample', np.sum(np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T * plane[:3], 1)
26 | center = -plane[3]*plane[:3]
27 | #print 'dot2', np.dot(center, plane[:3]) + plane[3], plane
28 | pts = np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T + center
29 | #print 'ins', len(plane_inliers(plane, pts, 0.05))
30 | pts += np.random.randn(*pts.shape)*noise
31 | return pts
32 |
33 | def plane_from_3(pts):
34 | assert pts.shape == (3, 3)
35 | w = ut.normalized(np.cross(pts[1] - pts[0], pts[2] - pts[0]))
36 | return np.array(list(w) + [-np.dot(w, pts[0])])
37 |
38 | def dist_to_plane(plane, pts, signed = False):
39 | d = plane[3] + np.dot(pts, plane[:3])
40 | if signed:
41 | return d
42 | else:
43 | return np.abs(d)
44 |
45 | def dist_from_plane_ptm(ptm, plane):
46 | dist = np.abs(plane[3] + (ptm[:, :, :3] * plane[:3]).sum(axis = 2))
47 | dist[ptm[:, :, 3] == 0] = np.inf
48 | return dist
49 |
50 | def project_onto_plane(plane, pts):
51 | d = dist_to_plane(plane, pts, signed = True)
52 | proj = pts - d[:, None]*plane[:3]
53 | #print 'should be 0:', planefit.dist_to_plane(plane, proj)
54 | return proj
55 |
56 | def projection_transformation(plane):
57 | w, b = plane[:3], plane[3]
58 | A = (np.outer(w, w) + np.eye(3))
59 | b = -w*b
60 | return A, b
61 |
62 | def test_transform():
63 | projection_transformation
64 |
65 | def plane_inliers(plane, pts, inlier_max_dist):
66 | err = plane[3] + np.dot(pts, plane[:3])
67 | #print 'err', err
68 | return np.nonzero(np.abs(err) <= inlier_max_dist)[0]
69 |
70 | def transform_plane(R, t, plane):
71 | return np.array(list(R.T.dot(plane[:3])) + [np.dot(plane[:3], t) + plane[3]])
72 |
73 | def fit_plane_ransac(pts, dist_thresh, ransac_iters = 200, filter_func = lambda plane : True, seed = 0):
74 | with ut.constant_seed(seed):
75 | if len(pts) < 3:
76 | return np.zeros(4), []
77 |
78 | best_inliers = range(3)
79 | for inds in mvg.ransac_sample(len(pts), 3, ransac_iters):
80 | plane = plane_from_3(pts[inds])
81 | if not np.all(plane[:3] == 0) and filter_func(plane):
82 | err = plane[3] + np.dot(pts, plane[:3])
83 | ins = np.nonzero(np.abs(err) <= dist_thresh)[0]
84 | if len(ins) > len(best_inliers):
85 | best_inliers = ins
86 | #print 'plane before', plane, 'after', fit_plane(pts[best_inliers])
87 |
88 | #print len(best_inliers)
89 | return fit_plane(pts[best_inliers]), best_inliers
90 |
91 | def test_plane_fitting():
92 | noise = 0.0
93 | plane1 = np.array([0., 1./2**0.5, 1/2**0.5, 1.])
94 | plane2 = np.array([1., 0., 0., 5.])
95 | pts1 = sample_plane(plane1, 100, 1, noise)
96 | pts2 = sample_plane(plane2, 20, 1, noise)
97 | pts = np.vstack([pts1, pts2])
98 | plane, _ = fit_plane_ransac(pts, 0.05)
99 | #plane = fit_plane(pts1)
100 | print plane, 'should be', np.array([0., 1., 1., 1.])
101 | true_ins = plane_inliers(plane, pts, 0.05)
102 | print 'ninliers', len(true_ins), 'should be', len(plane_inliers(plane1, pts, 0.05)), 'other hypothesis', len(plane_inliers(plane2, pts, 0.05))
103 | ut.toplevel_locals()
104 |
105 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/planefit.py:
--------------------------------------------------------------------------------
1 | # from https://github.com/andrewowens/camo/blob/master/src/planefit.py
2 | import aolib.util as ut
3 | import mvg
4 | import numpy as np
5 |
6 |
7 | def plane_repok(plane):
8 | assert np.allclose(np.linalg.norm(plane[:3]), 1)
9 | return plane
10 |
11 | def fit_plane(pts):
12 | # A = ut.homog(pts.T).T
13 | # b = mvg.solve_0_system(A)
14 | # b /= np.linalg.norm(b[:3])
15 | # return b
16 | axes, _, mean_pt = ut.pca(pts)
17 | w = axes[2]
18 | b = -np.dot(w, mean_pt)
19 | assert np.allclose(np.linalg.norm(w), 1)
20 | return np.array([w[0], w[1], w[2], b], 'd')
21 |
22 | def sample_plane(plane, n, width, noise = 0):
23 | plane_repok(plane)
24 | e1, e2, _ = np.eye(3)
25 | v1 = e1 if (plane[0] == 0 and plane[1] == 0) else ut.normalized(np.array([-plane[1], plane[0], 0], 'd'))
26 | v2 = ut.normalized(np.cross(plane[:3], v1))
27 | #print 'dot', np.dot(v1, plane[:3]), np.dot(v2, plane[:3])
28 | #print 'sample', np.sum(np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T * plane[:3], 1)
29 | center = -plane[3]*plane[:3]
30 | #print 'dot2', np.dot(center, plane[:3]) + plane[3], plane
31 | pts = np.dot(np.array([v1, v2]).T, np.random.uniform(-width/2, width/2, (2, n))).T + center
32 | #print 'ins', len(plane_inliers(plane, pts, 0.05))
33 | pts += np.random.randn(*pts.shape)*noise
34 | return pts
35 |
36 | def plane_from_3(pts):
37 | assert pts.shape == (3, 3)
38 | w = ut.normalized(np.cross(pts[1] - pts[0], pts[2] - pts[0]))
39 | return np.array(list(w) + [-np.dot(w, pts[0])])
40 |
41 | def dist_to_plane(plane, pts, signed = False):
42 | d = plane[3] + np.dot(pts, plane[:3])
43 | if signed:
44 | return d
45 | else:
46 | return np.abs(d)
47 |
48 | def dist_from_plane_ptm(ptm, plane):
49 | dist = np.abs(plane[3] + (ptm[:, :, :3] * plane[:3]).sum(axis = 2))
50 | dist[ptm[:, :, 3] == 0] = np.inf
51 | return dist
52 |
53 | def project_onto_plane(plane, pts):
54 | d = dist_to_plane(plane, pts, signed = True)
55 | proj = pts - d[:, None]*plane[:3]
56 | #print 'should be 0:', planefit.dist_to_plane(plane, proj)
57 | return proj
58 |
59 | def projection_transformation(plane):
60 | w, b = plane[:3], plane[3]
61 | A = (np.outer(w, w) + np.eye(3))
62 | b = -w*b
63 | return A, b
64 |
65 | def test_transform():
66 | projection_transformation
67 |
68 | def plane_inliers(plane, pts, inlier_max_dist):
69 | err = plane[3] + np.dot(pts, plane[:3])
70 | #print 'err', err
71 | return np.nonzero(np.abs(err) <= inlier_max_dist)[0]
72 |
73 | def transform_plane(R, t, plane):
74 | return np.array(list(R.T.dot(plane[:3])) + [np.dot(plane[:3], t) + plane[3]])
75 |
76 | def fit_plane_ransac(pts, dist_thresh, ransac_iters = 200, filter_func = lambda plane : True, seed = 0):
77 | with ut.constant_seed(seed):
78 | if len(pts) < 3:
79 | return np.zeros(4), []
80 |
81 | best_inliers = range(3)
82 | for inds in mvg.ransac_sample(len(pts), 3, ransac_iters):
83 | plane = plane_from_3(pts[inds])
84 | if not np.all(plane[:3] == 0) and filter_func(plane):
85 | err = plane[3] + np.dot(pts, plane[:3])
86 | ins = np.nonzero(np.abs(err) <= dist_thresh)[0]
87 | if len(ins) > len(best_inliers):
88 | best_inliers = ins
89 | #print 'plane before', plane, 'after', fit_plane(pts[best_inliers])
90 |
91 | #print len(best_inliers)
92 | return fit_plane(pts[best_inliers]), best_inliers
93 |
94 | def test_plane_fitting():
95 | noise = 0.0
96 | plane1 = np.array([0., 1./2**0.5, 1/2**0.5, 1.])
97 | plane2 = np.array([1., 0., 0., 5.])
98 | pts1 = sample_plane(plane1, 100, 1, noise)
99 | pts2 = sample_plane(plane2, 20, 1, noise)
100 | pts = np.vstack([pts1, pts2])
101 | plane, _ = fit_plane_ransac(pts, 0.05)
102 | #plane = fit_plane(pts1)
103 | print plane, 'should be', np.array([0., 1., 1., 1.])
104 | true_ins = plane_inliers(plane, pts, 0.05)
105 | print 'ninliers', len(true_ins), 'should be', len(plane_inliers(plane1, pts, 0.05)), 'other hypothesis', len(plane_inliers(plane2, pts, 0.05))
106 | ut.toplevel_locals()
107 |
108 |
--------------------------------------------------------------------------------
/manu_sawyer/src/KinectA.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | from cv_bridge import CvBridge, CvBridgeError
5 | import rospy
6 | import multiprocessing
7 | from sensor_msgs.msg import Image
8 | import grip_and_record.locate_cylinder as locate_cylinder
9 | import tensorflow_model_is_gripping.aolib.util as ut
10 | import grip_and_record.getch
11 |
12 | cache_path = '/home/manu/ros_ws/src/manu_research/frame_cache'
13 |
14 |
15 | class KinectA:
16 | def __init__(self, save_init=True):
17 | """
18 | Class for recording data from the KinectA
19 | """
20 | print("Initializing KinectA")
21 | # Set up the subscribers
22 | # self.image_topic = "/kinect2/hd/image_color"
23 | self.image_topic = "/kinect2/qhd/image_color"
24 | self.depth_topic = "/kinect2/sd/image_depth_rect"
25 | rospy.Subscriber(self.image_topic, Image, self.store_latest_color_image)
26 | rospy.Subscriber(self.depth_topic, Image, self.store_latest_depth_image)
27 |
28 | # Defining variables
29 | self.init_depth_image = None
30 | self.depth_image = None
31 | self.color_image = None
32 |
33 | # Needed to transform images
34 | self.bridge = CvBridge()
35 |
36 | # Saving initial images
37 | cache_file = ut.pjoin(cache_path, 'kinect_a_init.pk')
38 | if save_init:
39 | # Requests the user to clear the table of all objects.
40 | # Takes initial picture with KinectA.
41 | # The initial picture is needed to localize the object on the table later.
42 | print('Please remove all objects from the table and then press ESC.')
43 | done = False
44 | while not done and not rospy.is_shutdown():
45 | c = grip_and_record.getch.getch()
46 | if c:
47 | if c in ['\x1b', '\x03']:
48 | done = True
49 | ut.mkdir(cache_path)
50 | self.save_initial_color_image()
51 | self.save_initial_depth_image()
52 | ut.save(cache_file, (self.color_image, self.init_depth_image, self.depth_image))
53 | else:
54 | (self.color_image, self.init_depth_image, self.depth_image) = ut.load(cache_file)
55 |
56 | # Starting multiprocessing
57 | def spin_thread():
58 | rospy.spin()
59 |
60 | self.cam_process = multiprocessing.Process(target=spin_thread)
61 | self.cam_process.start()
62 | print("Done")
63 |
64 | def calc_object_loc(self):
65 | # Uses Andrews code to fit a cylinder and returns the center, height, radius and an image of the cylinder.
66 | ini_arr = np.array(self.init_depth_image)
67 | ini_depth = np.array(ini_arr[:, :, 0], 'float32')
68 | curr_arr = np.array(self.depth_image)
69 | curr_depth = np.array(curr_arr[:, :, 0], 'float32')
70 | return locate_cylinder.fit_cylinder(ini_depth, curr_depth)
71 |
72 | def save_initial_color_image(self):
73 | img = rospy.wait_for_message(self.image_topic, Image)
74 | self.color_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
75 | self.color_image = np.flipud(self.color_image)
76 | self.color_image = np.fliplr(self.color_image)
77 |
78 | def save_initial_depth_image(self):
79 | img = rospy.wait_for_message(self.depth_topic, Image)
80 | self.depth_image = self.bridge.imgmsg_to_cv2(img, '16UC1')
81 | self.init_depth_image = self.depth_image
82 |
83 | def store_latest_color_image(self, data):
84 | try:
85 | cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
86 | except CvBridgeError as e:
87 | print(e)
88 | self.color_image = cv_image
89 |
90 | def store_latest_depth_image(self, data):
91 | img = self.bridge.imgmsg_to_cv2(data, '16UC1')
92 | self.depth_image = img
93 |
94 | def get_color_image(self):
95 | return np.flipud(np.fliplr(self.color_image))
96 |
97 | def get_depth_image(self):
98 | return np.flipud(np.fliplr(self.depth_image))
99 |
100 | def end_process(self):
101 | self.cam_process.terminate()
102 | self.cam_process.join()
103 |
--------------------------------------------------------------------------------
/manu_sawyer/src/set_true_label_of_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 |
11 |
12 | def set_label(path, filenames):
13 | """
14 |
15 | :param filenames: one string or a list of strings
16 | :return:
17 | """
18 | if isinstance(filenames, basestring):
19 | filename = [filenames] # only a string is convert to a list
20 |
21 | for filename in filenames:
22 | path_to_file = path + filename
23 |
24 | print('Opening file: %s' % path_to_file)
25 | f = h5py.File(path_to_file, "r")
26 |
27 | n_steps = int(f['n_steps'].value)
28 |
29 | # times = []
30 | # for i in tqdm(range(1, n_steps)):
31 | # id = '/step_%012d' % i
32 | # times.append(f[id + '/timestamp'].value)
33 | # times = np.array(times)
34 |
35 | times = np.array(f['/timestamp'].value)
36 |
37 | time_pre_grasping = float(f["time_pre_grasping"].value)
38 | index_time_pre_grasping = np.argmin(
39 | np.abs(times - time_pre_grasping)) # Find index corresponding to timestamp
40 |
41 | GelSightA_image_time_pre_grasping = np.array(
42 | ig.uncompress(f['/GelSightA_image'].value[index_time_pre_grasping]))
43 | GelSightB_image_time_pre_grasping = np.array(
44 | ig.uncompress(f['/GelSightB_image'].value[index_time_pre_grasping]))
45 |
46 | time_post2_grasping = float(f["time_post2_grasping"].value)
47 | index_time_post2_grasping = np.argmin(
48 | np.abs(times - time_post2_grasping)) # Find index corresponding to timestamp
49 | KinectA_color_time_post2_grasping = np.array(
50 | ig.uncompress(f['/color_image_KinectA'].value[index_time_post2_grasping]))
51 | KinectB_color_time_post2_grasping = np.array(
52 | ig.uncompress(f['/color_image_KinectB'].value[index_time_post2_grasping]))
53 | GelSightA_image_time_post2_grasping = np.array(
54 | ig.uncompress(f['/GelSightA_image'].value[index_time_post2_grasping]))
55 | GelSightB_image_time_post2_grasping = np.array(
56 | ig.uncompress(f['/GelSightB_image'].value[index_time_post2_grasping]))
57 |
58 | fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
59 | ax1.imshow(KinectA_color_time_post2_grasping)
60 | ax2.imshow(KinectB_color_time_post2_grasping)
61 | ax3.imshow(GelSightA_image_time_post2_grasping)
62 | ax4.imshow(GelSightB_image_time_post2_grasping)
63 | ax1.axis('off')
64 | ax2.axis('off')
65 | ax3.axis('off')
66 | ax4.axis('off')
67 | plt.draw()
68 | plt.ion()
69 | plt.show()
70 |
71 | print("Is the robot holding the object in its gripper? [y/n]")
72 | done = False
73 | while not done:
74 | c = grip_and_record.getch.getch()
75 | if c:
76 | if c in ['n']:
77 | is_gripping = False
78 | done = True
79 | elif c in ['y']:
80 | is_gripping = True
81 | done = True
82 |
83 | # TODO: check if folder exists, if not create
84 | path_0 = "/home/manu/ros_ws/src/manu_research/data/"
85 | file = open(path_0 + 'labels/' + filename[:-4] + 'txt', 'w')
86 | file.write(str(is_gripping))
87 | file.close()
88 |
89 | file = h5py.File(path_0 + "for_Andrew/" + filename, "w")
90 | file.create_dataset("GelSightA_image_pre_gripping", data=GelSightA_image_time_pre_grasping)
91 | file.create_dataset("GelSightB_image_pre_gripping", data=GelSightB_image_time_pre_grasping)
92 | file.create_dataset("GelSightA_image_post_gripping", data=GelSightA_image_time_post2_grasping)
93 | file.create_dataset("GelSightB_image_post_gripping", data=GelSightB_image_time_post2_grasping)
94 | file.create_dataset("is_gripping", data=np.asarray([is_gripping]))
95 | file.close()
96 |
97 | f.close()
98 |
99 | plt.close()
100 |
101 |
102 | if __name__ == '__main__':
103 | directory = "/media/data_disk/dataset_manu/ver2/2017-06-22/"
104 | import os
105 |
106 | list_filenames = []
107 | for file in os.listdir(directory):
108 | if file.endswith(".hdf5"):
109 | list_filenames.append(file)
110 | list_filenames = sorted(list_filenames)
111 | print(list_filenames)
112 |
113 | path = directory
114 | set_label(path, list_filenames)
115 |
--------------------------------------------------------------------------------
/manu_sawyer/src/extract_images_for_paper.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import numpy as np
4 | import h5py
5 | from tqdm import tqdm
6 | import cv2
7 | import grip_and_record.getch
8 | import matplotlib.pyplot as plt
9 | import tensorflow_model_is_gripping.aolib.img as ig
10 | import time
11 |
12 |
13 | def find_cool_images(path, filenames):
14 | """
15 |
16 | :param filenames: one string or a list of strings
17 | :return:
18 | """
19 | if isinstance(filenames, basestring):
20 | filename = [filenames] # only a string is convert to a list
21 |
22 | for filename in filenames:
23 | path_to_file = path + filename
24 |
25 | print('Opening file: %s' % path_to_file)
26 | f = h5py.File(path_to_file, "r")
27 |
28 | times = np.array(f['/timestamp'].value)
29 |
30 | time_post2_grasping = float(f["time_at_grasping"].value)
31 | index_time_post2_grasping = np.argmin(
32 | np.abs(times - time_post2_grasping)) # Find index corresponding to timestamp
33 |
34 | compressed_kinectA = f['/color_image_KinectA'].value[index_time_post2_grasping]
35 | KinectA_color_time_post2_grasping = np.array(ig.uncompress(compressed_kinectA))
36 |
37 | compressed_kinectB = f['/color_image_KinectB'].value[index_time_post2_grasping]
38 | KinectB_color_time_post2_grasping = np.array(ig.uncompress(compressed_kinectB))
39 |
40 | kinectA_depth = f['/depth_image_KinectA'].value[index_time_post2_grasping]
41 | KinectA_depth_time_post2_grasping = np.array(kinectA_depth).squeeze()
42 |
43 | kinectB_depth = f['/depth_image_KinectB'].value[index_time_post2_grasping]
44 | KinectB_depth_time_post2_grasping = np.array(kinectB_depth).squeeze()
45 |
46 | compressed_GelSightA = f['/GelSightA_image'].value[index_time_post2_grasping]
47 | GelSightA_image_time_post2_grasping = np.array(ig.uncompress(compressed_GelSightA))
48 |
49 | compressed_GelSightB = f['/GelSightB_image'].value[index_time_post2_grasping]
50 | GelSightB_image_time_post2_grasping = np.array(ig.uncompress(compressed_GelSightB))
51 |
52 | # fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
53 | # ax1.imshow(KinectA_color_time_at_grasping)
54 | # ax2.imshow(KinectB_color_time_at_grasping)
55 | # ax3.imshow(GelSightA_image_time_at_grasping)
56 | # ax4.imshow(GelSightB_image_time_at_grasping)
57 | # ax1.axis('off')
58 | # ax2.axis('off')
59 | # ax3.axis('off')
60 | # ax4.axis('off')
61 | # plt.draw()
62 | # plt.ion()
63 | # plt.show()
64 | #
65 | # print("Is it a cool image? [y/n]")
66 | # done = False
67 | # cool = False
68 | # while not done:
69 | # c = grip_and_record.getch.getch()
70 | # if c:
71 | # if c in ['n']:
72 | # cool = False
73 | # done = True
74 | # elif c in ['y']:
75 | # cool = True
76 | # done = True
77 |
78 |
79 | path_cool = "/home/manu/ros_ws/src/manu_research/data/test_figure/"
80 | file = open(path_cool + filename[:-5] + '_GelSightA_at.jpeg', 'w')
81 | file.write(str(compressed_GelSightA))
82 | file.close()
83 | time.sleep(0.5)
84 | file = open(path_cool + filename[:-5] + '_GelSightB_at.jpeg', 'w')
85 | file.write(str(compressed_GelSightB))
86 | file.close()
87 | time.sleep(0.5)
88 | file = open(path_cool + filename[:-5] + '_kinectA_at.jpeg', 'w')
89 | file.write(str(compressed_kinectA))
90 | file.close()
91 | time.sleep(0.5)
92 | file = open(path_cool + filename[:-5] + '_kinectB_at.jpeg', 'w')
93 | file.write(str(compressed_kinectB))
94 | file.close()
95 |
96 | fig = plt.figure()
97 | print(KinectA_depth_time_post2_grasping.shape)
98 | KinectA_depth_time_post2_grasping = 1024*(KinectA_depth_time_post2_grasping>1024) + KinectA_depth_time_post2_grasping*(KinectA_depth_time_post2_grasping<1024)
99 | plt.imshow(KinectA_depth_time_post2_grasping, cmap='gray')
100 | plt.show()
101 | import scipyplot as spp
102 | spp.save2file(fig=fig, nameFile='depth_image_KinectA_at')
103 |
104 | fig = plt.figure()
105 | KinectB_depth_time_post2_grasping = 1024*(KinectB_depth_time_post2_grasping>1024) + KinectB_depth_time_post2_grasping*(KinectB_depth_time_post2_grasping<1024)
106 | plt.imshow(KinectB_depth_time_post2_grasping, cmap='gray')
107 | plt.show()
108 | spp.save2file(fig=fig, nameFile='depth_image_KinectB_at')
109 |
110 | if __name__ == '__main__':
111 | directory = "/home/manu/ros_ws/src/manu_research/data/test_figure/"
112 | import os
113 |
114 | list_filenames = []
115 | for file in os.listdir(directory):
116 | if file.endswith(".hdf5"):
117 | list_filenames.append(file)
118 | list_filenames = sorted(list_filenames)
119 | print(list_filenames)
120 |
121 | path = "/home/manu/ros_ws/src/manu_research/data/test_figure/"
122 | find_cool_images(path, list_filenames)
123 |
--------------------------------------------------------------------------------
/manu_sawyer/src/demo_pick_object.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #inverse kinematics stuff
4 | import grip_and_record.inverse_kin
5 | from geometry_msgs.msg import (
6 | PoseStamped,
7 | Pose,
8 | Point,
9 | Quaternion,
10 | )
11 | from grip_and_record.robot_utils import Orientations
12 | import rospy
13 | import intera_interface
14 | from intera_interface import CHECK_VERSION
15 | from intera_interface import (
16 | Lights,
17 | Cuff,
18 | RobotParams,
19 | )
20 | # import wsg_50_python
21 | import numpy as np
22 | import time
23 | import grip_and_record.getch
24 | import logging
25 | # from WSG50 import WSG50
26 | logger = logging.getLogger()
27 | logger.setLevel(logging.DEBUG)
28 |
29 | # To log file
30 | fh = logging.FileHandler('demo_run_experiment.log')
31 | fh.setLevel(logging.DEBUG)
32 | logger.addHandler(fh)
33 |
34 |
35 | GRIPPER = 'WSG50'
36 |
37 |
38 | def init_robot(limb_name):
39 | epilog = """
40 | See help inside the example with the '?' key for key bindings.
41 | """
42 | rp = intera_interface.RobotParams()
43 | valid_limbs = rp.get_limb_names()
44 | if not valid_limbs:
45 | rp.log_message(("Cannot detect any limb parameters on this robot. "
46 | "Exiting."), "ERROR")
47 | return
48 |
49 | rp.log_message('Initializing node... ')
50 | rospy.init_node("move_and_grip")
51 |
52 | rp.log_message('Getting robot state... ')
53 | rs = intera_interface.RobotEnable(CHECK_VERSION)
54 | init_state = rs.state().enabled
55 |
56 | def clean_shutdown():
57 | print("\nExiting example.")
58 | if not init_state:
59 | print("Disabling robot...")
60 | rs.disable()
61 | rospy.on_shutdown(clean_shutdown)
62 |
63 | rospy.loginfo("Enabling robot...")
64 | rs.enable()
65 | if not limb_name in valid_limbs:
66 | rp.log_message(("Right is not a valid limb on this robot. "
67 | "Exiting."), "ERROR")
68 | return
69 | limb = intera_interface.Limb(limb_name)
70 | # Move to a safe position
71 | goto_rest_pos(limb=limb, blocking=True)
72 | return limb
73 |
74 |
75 | def init_gripper(gripper='Sawyer'):
76 | if gripper == 'Sawyer':
77 | pass
78 | if gripper == 'WSG50':
79 | gripper = WSG50()
80 | return gripper
81 |
82 |
83 | def goto_rest_pos(limb, blocking=False):
84 | """
85 | Move the arm to a safe rest position
86 | :param limb:
87 | :param blocking: is it a blocking operation?
88 | :return:
89 | """
90 | goto_EE_xyz(limb=limb, xyz=[0.55, 0, 0.55], orientation=Orientations.DOWNWARD_ROTATED, blocking=blocking)
91 |
92 |
93 | def goto_EE_xyz(limb, xyz, orientation=Orientations.DOWNWARD_ROTATED, blocking=False):
94 | """
95 | Move the End-effector to the desired XYZ position and orientation, using inverse kinematic
96 | :param limb: link to the limb being used
97 | :param xyz: list or array [x,y,z] with the coordinates in XYZ positions in limb reference frame
98 | :param orientation:
99 | :return:
100 | """
101 | # TODO: assert x,y,z
102 | des_pose = grip_and_record.inverse_kin.get_pose(xyz[0], xyz[1], xyz[2], orientation)
103 | curr_pos = limb.joint_angles() # Measure current position
104 | joint_positions = grip_and_record.inverse_kin.get_joint_angles(des_pose, limb.name, curr_pos, use_advanced_options=False) # gets joint positions
105 | limb.move_to_joint_positions(joint_positions) # Send the command to the arm
106 | # TODO: implement blocking, in this moment we just wait to allow movemnt
107 | if blocking:
108 | time.sleep(2)
109 |
110 |
111 | def main():
112 | limb_name = "right"
113 | limb = init_robot(limb_name=limb_name)
114 | gripper = init_gripper()
115 | # init_cuff(limb_name=limb_name)
116 | rp = intera_interface.RobotParams() # For logging
117 | time.sleep(1)
118 | limb.set_joint_position_speed(0.12) # Let' s move slowly...
119 |
120 |
121 | # Move arm to set position
122 | des_EE_xyz = np.array([0.55, 0, 0.3])
123 | des_orientation_EE = Orientations.DOWNWARD_ROTATED
124 | rp.log_message('Moving to x=%f y=%f' % (des_EE_xyz[0], des_EE_xyz[1]))
125 | goto_EE_xyz(limb=limb, xyz=des_EE_xyz, orientation=des_orientation_EE)
126 |
127 |
128 | #NOTE: MUST import: import getch
129 | print("Place object beetween the fingers and press Esc to close the gripper.")
130 | done = False
131 | while not done and not rospy.is_shutdown():
132 | c = grip_and_record.getch.getch()
133 | if c:
134 | if c in ['\x1b', '\x03']:
135 | done = True
136 |
137 |
138 | gripper.close() # TODO: make sure that the closure is properly done!
139 | time.sleep(1) # give time to move
140 |
141 | # Raise the object
142 | goto_EE_xyz(limb=limb, xyz=des_EE_xyz + np.array([0, 0, 0.2]), orientation=des_orientation_EE)
143 |
144 |
145 | # Return object to the ground (a little bit higher)
146 | print("If the object is no longer grasped, clean the table and press Esc. Otherwise, Press Esc to return the object to the table.")
147 | done = False
148 | while not done and not rospy.is_shutdown():
149 | c = grip_and_record.getch.getch()
150 | if c:
151 | if c in ['\x1b', '\x03']:
152 | done = True
153 |
154 | # Return object to the ground (a little bit higher)
155 | # TODO: return the object to a new random position ?
156 | goto_EE_xyz(limb=limb, xyz=des_EE_xyz + np.array([0, 0, 0.02]), orientation=Orientations.DOWNWARD_ROTATED)
157 | time.sleep(0.5)
158 | gripper.open() # Open gripper
159 |
160 | # data_recorder.end_processes()
161 | goto_rest_pos(limb=limb, blocking=True) # Go to a safe place before shutting down
162 |
163 | rospy.signal_shutdown("Example finished.")
164 |
165 |
166 | if __name__ == '__main__':
167 | main()
168 |
--------------------------------------------------------------------------------
/manu_sawyer/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(manu_sawyer)
3 |
4 | ## Find catkin macros and libraries
5 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
6 | ## is used, also find other catkin packages
7 | find_package(catkin REQUIRED COMPONENTS
8 | rospy
9 | std_msgs
10 | geometry_msgs
11 | sensor_msgs
12 | intera_core_msgs
13 | wsg_50_common
14 | wsg_50_python
15 | )
16 |
17 | ## System dependencies are found with CMake's conventions
18 | # find_package(Boost REQUIRED COMPONENTS system)
19 |
20 |
21 | ## Uncomment this if the package has a setup.py. This macro ensures
22 | ## modules and global scripts declared therein get installed
23 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
24 | # catkin_python_setup()
25 |
26 | ################################################
27 | ## Declare ROS messages, services and actions ##
28 | ################################################
29 |
30 | ## To declare and build messages, services or actions from within this
31 | ## package, follow these steps:
32 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
33 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
34 | ## * In the file package.xml:
35 | ## * add a build_depend tag for "message_generation"
36 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
37 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
38 | ## but can be declared for certainty nonetheless:
39 | ## * add a run_depend tag for "message_runtime"
40 | ## * In this file (CMakeLists.txt):
41 | ## * add "message_generation" and every package in MSG_DEP_SET to
42 | ## find_package(catkin REQUIRED COMPONENTS ...)
43 | ## * add "message_runtime" and every package in MSG_DEP_SET to
44 | ## catkin_package(CATKIN_DEPENDS ...)
45 | ## * uncomment the add_*_files sections below as needed
46 | ## and list every .msg/.srv/.action file to be processed
47 | ## * uncomment the generate_messages entry below
48 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
49 |
50 | ## Generate messages in the 'msg' folder
51 | # add_message_files(
52 | # FILES
53 | # Message1.msg
54 | # Message2.msg
55 | # )
56 |
57 | ## Generate services in the 'srv' folder
58 | # add_service_files(
59 | # FILES
60 | # Service1.srv
61 | # Service2.srv
62 | # )
63 |
64 | ## Generate actions in the 'action' folder
65 | # add_action_files(
66 | # FILES
67 | # Action1.action
68 | # Action2.action
69 | # )
70 |
71 | ## Generate added messages and services with any dependencies listed here
72 | # generate_messages(
73 | # DEPENDENCIES
74 | # std_msgs
75 | # )
76 |
77 | ################################################
78 | ## Declare ROS dynamic reconfigure parameters ##
79 | ################################################
80 |
81 | ## To declare and build dynamic reconfigure parameters within this
82 | ## package, follow these steps:
83 | ## * In the file package.xml:
84 | ## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
85 | ## * In this file (CMakeLists.txt):
86 | ## * add "dynamic_reconfigure" to
87 | ## find_package(catkin REQUIRED COMPONENTS ...)
88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
89 | ## and list every .cfg file to be processed
90 |
91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
92 | # generate_dynamic_reconfigure_options(
93 | # cfg/DynReconf1.cfg
94 | # cfg/DynReconf2.cfg
95 | # )
96 |
97 | ###################################
98 | ## catkin specific configuration ##
99 | ###################################
100 | ## The catkin_package macro generates cmake config files for your package
101 | ## Declare things to be passed to dependent projects
102 | ## INCLUDE_DIRS: uncomment this if you package contains header files
103 | ## LIBRARIES: libraries you create in this project that dependent projects also need
104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
105 | ## DEPENDS: system dependencies of this project that dependent projects also need
106 | catkin_package(
107 | # INCLUDE_DIRS include
108 | # LIBRARIES berkeley_sawyer
109 | # CATKIN_DEPENDS rospy std_msgs
110 | # DEPENDS system_lib
111 | )
112 |
113 | ###########
114 | ## Build ##
115 | ###########
116 |
117 | ## Specify additional locations of header files
118 | ## Your package locations should be listed before other locations
119 | # include_directories(include)
120 | include_directories(
121 | ${catkin_INCLUDE_DIRS}
122 | )
123 |
124 | ## Declare a C++ library
125 | # add_library(berkeley_sawyer
126 | # src/${PROJECT_NAME}/berkeley_sawyer.cpp
127 | # )
128 |
129 | ## Add cmake target dependencies of the library
130 | ## as an example, code may need to be generated before libraries
131 | ## either from message generation or dynamic reconfigure
132 | # add_dependencies(berkeley_sawyer ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
133 |
134 | ## Declare a C++ executable
135 | # add_executable(berkeley_sawyer_node src/berkeley_sawyer_node.cpp)
136 |
137 | ## Add cmake target dependencies of the executable
138 | ## same as for the library above
139 | # add_dependencies(berkeley_sawyer_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
140 |
141 | ## Specify libraries to link a library or executable target against
142 | # target_link_libraries(berkeley_sawyer_node
143 | # ${catkin_LIBRARIES}
144 | # )
145 |
146 | #############
147 | ## Install ##
148 | #############
149 |
150 | # all install targets should use catkin DESTINATION variables
151 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
152 |
153 | ## Mark executable scripts (Python etc.) for installation
154 | ## in contrast to setup.py, you can choose the destination
155 | # install(PROGRAMS
156 | # scripts/my_python_script
157 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
158 | # )
159 |
160 | ## Mark executables and/or libraries for installation
161 | # install(TARGETS berkeley_sawyer berkeley_sawyer_node
162 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
163 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
164 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
165 | # )
166 |
167 | ## Mark cpp header files for installation
168 | # install(DIRECTORY include/${PROJECT_NAME}/
169 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
170 | # FILES_MATCHING PATTERN "*.h"
171 | # PATTERN ".svn" EXCLUDE
172 | # )
173 |
174 | ## Mark other files for installation (e.g. launch and bag files, etc.)
175 | # install(FILES
176 | # # myfile1
177 | # # myfile2
178 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
179 | # )
180 |
181 | #############
182 | ## Testing ##
183 | #############
184 |
185 | ## Add gtest based cpp test target and link libraries
186 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_berkeley_sawyer.cpp)
187 | # if(TARGET ${PROJECT_NAME}-test)
188 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
189 | # endif()
190 |
191 | ## Add folders to be run by python nosetests
192 | # catkin_add_nosetests(test)
193 |
--------------------------------------------------------------------------------
/manu_sawyer/src/WSG50_manu.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import rospy
4 | from wsg_50_common.srv import *
5 | from std_srvs.srv import *
6 | from wsg_50_common.msg import Status, Cmd
7 | import time
8 | from rospy_message_converter import message_converter
9 | import numpy as np
10 |
11 | ErrorMessage = 'Gripper not connected, skipping gripper command:'
12 |
13 | toRetry = False # keep retry when motion failed
14 |
15 |
16 | class WSG50(object):
17 | def __init__(self, speed=50):
18 | self.speed = speed
19 | self._bounds = np.array([1, 109]) # Limits of the gripper position [mm]
20 | self.subscribe_rostopic()
21 | self.homing()
22 | self.stateini()
23 |
24 |
25 | def stateini(self):
26 | ''' Due to some unknown bugs, calling this program first will make the system more stable'''
27 | self.graspmove_nopending(105, speed=0)
28 | time.sleep(0.2)
29 | self.graspmove_nopending(106, speed=0)
30 | time.sleep(0.2)
31 | self.graspmove_nopending(107, speed=0)
32 | time.sleep(0.2)
33 | self.graspmove_nopending(108, speed=0)
34 |
35 | def subscribe_rostopic(self):
36 | # rospy.init_node('qwe', anonymous=True)
37 | rospy.Subscriber("wsg_50_driver/status", Status)
38 |
39 | def move(self, pos=110, speed=None, toRetry=False):
40 | # move pos range: 0 - 110 mm
41 | # move speed range: 0- 420 mm/s
42 |
43 | if speed is None:
44 | speed = self.speed
45 | self.ack()
46 | command = 'move'
47 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Move)
48 |
49 | while True:
50 | try:
51 | error = srv(pos, speed)
52 | # print '[Gripper] move, return:', error
53 | break
54 | except:
55 | pass
56 | # print '[Gripper] move,', ErrorMessage, command
57 |
58 | if not toRetry: break
59 | time.sleep(0.5)
60 |
61 | def set_position(self, position, velocity=None):
62 | """
63 | Move to the objective position 'width', while stop if the force limit acquired.
64 | Return after the position reached
65 | :param position:
66 | :param velocity:
67 | :return:
68 | """
69 | if velocity is None:
70 | velocity = self.speed # if velocity is not specified, use the default one.
71 | position = np.array(position)
72 | if np.isinf(position) or np.isnan(position):
73 | print('Invalid position')
74 | else:
75 | position = np.minimum(np.maximum(position, self._bounds[0]), self._bounds[1])
76 | # rospy.loginfo('Moving gripper to %f' % position)
77 | self.graspmove_nopending(position, velocity)
78 | Moving = True
79 | while Moving:
80 | # moving_state=rospy.wait_for_message("/wsg_50_driver/moving", std_msg.Bool)
81 | # Moving= moving_state.data
82 | gripper_state = rospy.wait_for_message("/wsg_50_driver/status", Status)
83 | if 'Stopped' in gripper_state.status or 'Target Pos reached' in gripper_state.status:
84 | return
85 |
86 | def open(self, speed=None):
87 | if speed is None:
88 | speed = self.speed
89 | self.ack()
90 | # self.move(109, speed)
91 | self.set_position(position=self._bounds[1], velocity=speed)
92 | time.sleep(0.1)
93 | self.set_position(position=self._bounds[1], velocity=speed)
94 | time.sleep(0.1)
95 | self.set_position(position=self._bounds[1], velocity=speed)
96 |
97 | def close(self, speed=None):
98 | if speed is None:
99 | speed = self.speed
100 | self.ack()
101 | self.move(0, speed)
102 |
103 | def grasp(self, pos=5, speed=None):
104 | if speed is None:
105 | speed = self.speed
106 | self.ack()
107 | command = 'grasp'
108 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Move)
109 |
110 | while True:
111 | try:
112 | error = srv(pos, speed)
113 | # print '[Gripper] grasp, return:', error
114 | break
115 | except:
116 | pass
117 | # print '[Gripper] grasp,', ErrorMessage, command
118 | if not toRetry: break
119 | time.sleep(0.5)
120 |
121 | def graspmove_nopending(self, width, speed=20):
122 | '''move to the objective position 'width', while stop if the force limit acquired
123 | return before the position reached'''
124 | cmd = Cmd()
125 | cmd.mode = 'Script'
126 | cmd.pos = width
127 | cmd.speed = speed
128 | pub = rospy.Publisher('/wsg_50_driver/goal_position', Cmd, queue_size=10)
129 | pub.publish(cmd)
130 |
131 | def release(self, pos=110, speed=80):
132 | if speed is None:
133 | speed = self.speed
134 | self.ack()
135 | command = 'release'
136 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Move)
137 |
138 | while True:
139 | try:
140 | error = srv(pos, speed)
141 | # print '[Gripper] release, return:', error
142 | break
143 | except:
144 | pass
145 | # print '[Gripper] release,', ErrorMessage, command
146 | if not toRetry: break
147 | time.sleep(0.5)
148 |
149 | def homing(self):
150 | self.ack()
151 | command = 'homing'
152 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Empty)
153 | try:
154 | error = srv()
155 | except:
156 | pass
157 | # print '[Gripper] homing,', ErrorMessage, command
158 |
159 | def ack(self):
160 | command = 'ack'
161 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Empty)
162 | try:
163 | error = srv()
164 | except:
165 | pass
166 | # print '[Gripper] ack,', ErrorMessage, command
167 |
168 | def set_force(self, val=5):
169 | # Range: 1 to 80 (N)
170 | command = 'set_force'
171 | srv = rospy.ServiceProxy('/wsg_50_driver/%s' % command, Conf)
172 | try:
173 | error = srv(val)
174 | except:
175 | pass
176 | # print '[Gripper] set_force,', ErrorMessage, command
177 |
178 | def get_force(self):
179 | message = rospy.wait_for_message("/wsg_50_driver/status", Status)
180 | state = message_converter.convert_ros_message_to_dictionary(message)
181 | force = state['force']
182 | return force
183 |
184 | def get_pos(self):
185 | message = rospy.wait_for_message("/wsg_50_driver/status", Status)
186 | state = message_converter.convert_ros_message_to_dictionary(message)
187 | force = state['width']
188 | return force
189 |
190 | def get_state(self):
191 | message = rospy.wait_for_message("/wsg_50_driver/status", Status)
192 | msg = message_converter.convert_ros_message_to_dictionary(message)
193 | state = np.array([msg['width'], msg['speed'], msg['acc'], msg['force']])
194 | return state
195 |
--------------------------------------------------------------------------------
/manu_sawyer/src/demo_torque_control_2_short.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | from __future__ import print_function
3 | from __future__ import absolute_import
4 |
5 | import numpy as np
6 |
7 | import rospy
8 | from std_msgs.msg import Empty
9 |
10 | import intera_interface
11 | from intera_interface import CHECK_VERSION
12 |
13 | from geometry_msgs.msg import Quaternion
14 | from inverse_kin import get_joint_angles, get_pose
15 |
16 |
17 | class SawyerEnv:
18 | def __init__(self, params):
19 |
20 | self._dyn = params.reconfig_server
21 | self._freq, self._missed_cmds = 20.0, 5.0
22 |
23 | # Control parameters
24 | self.bounds = params.bound * np.ones([7])
25 |
26 | # Create our limb instance
27 | self._limb = intera_interface.Limb(params.get('limb', 'right'))
28 |
29 | # Create cuff disable publisher
30 | cuff_ns = "robot/limb/%s/supress_cuff_interaction" % params.get('limb', 'right')
31 | self._pub_cuff_disable = rospy.Publisher(cuff_ns, Empty, queue_size=1)
32 |
33 | # Verify robot is enabled
34 | print("Getting robot state... ")
35 | self._rs = intera_interface.RobotEnable(CHECK_VERSION)
36 | self._init_state = self._rs.state().enabled
37 | print("Enabling robot... ")
38 | self._rs.enable()
39 | print("Running. Ctrl-c to quit")
40 |
41 | # PD controller gains for resets
42 | self.P = np.array([5, 5, 10, 15, 30, 20, 25]) # P controller gains
43 | self.K = np.array([0.7, 0.2, 1.63, 0.5, 3.5, 4.7, 6.0]) # D controller gains
44 |
45 | self._smoother_params = params.smoother_params
46 | self._smoother_history = np.zeros(shape=[self._smoother_params.get("history_length", 1), 7])
47 |
48 | self.sent_torques = []
49 |
50 | def set_command_timeout(self, t):
51 | self._limb.set_command_timeout(t)
52 |
53 | def step(self, a):
54 | self._pub_cuff_disable.publish()
55 |
56 | if not self._rs.state().enabled:
57 | rospy.logerr("Joint torque example failed to meet specified control rate timeout.")
58 | return self._get_obs(), 0, True, dict()
59 | self._set_torque(a)
60 |
61 | ob = self._get_obs()
62 | reward = 0 # Ignore reward
63 | done = False # Never done except for errors
64 | return ob, reward, done, dict()
65 |
66 | def reset(self):
67 | self._move_to_rand_pos()
68 | return self._get_obs()
69 |
70 | def get_torque_log(self):
71 | return self.sent_torques
72 |
73 | def reset_torque_log(self):
74 | self.sent_torques = []
75 |
76 | def clean_shutdown(self):
77 | self._limb.exit_control_mode()
78 |
79 | def _get_obs(self):
80 | """Returns the current observation
81 |
82 | :return:
83 | """
84 | angles = self._joint_dict_to_arr(self._limb.joint_angles())
85 | velocities = self._joint_dict_to_arr(self._limb.joint_velocities())
86 | return np.concatenate([angles, velocities], axis=0)
87 |
88 | def _set_torque(self, torques, log_torque=True, smooth_torque=True):
89 | """Preprocess and send control signal to the Sawyer
90 |
91 | :param torques: Torques to be sent to the Sawyer
92 | :param log_data: bool indicating whether or not to log data.
93 | :return: None
94 | """
95 | assert len(torques) == 7, "Torque must have 7 elements"
96 | torques = np.array(torques)
97 | torques[np.isinf(torques)], torques[np.isnan(torques)] = 0, 0
98 | torques = np.maximum(-self.bounds, np.minimum(self.bounds, torques)) # Bounds torques
99 |
100 | if smooth_torque and self._smoother_params.smooth_torques:
101 | torques = self._smooth_torques(torques)
102 | if log_torque:
103 | self.sent_torques.append(torques)
104 |
105 | self._limb.set_joint_torques(self._arr_to_joint_dict(torques))
106 |
107 | def _move_to_rand_pos(self):
108 | """Moves the end-effector to a random position.
109 |
110 | :return: None
111 | """
112 | control_rate = rospy.Rate(self._freq)
113 |
114 | angle_target = None
115 | unit_vector = np.array([0, 1, 0]) # Axis of rotation
116 | angle = 180 * np.pi / 180 # Angle of rotation
117 | mod_vec = unit_vector * np.sin(angle/2)
118 | quaternion = Quaternion(
119 | x=mod_vec[0], y=mod_vec[1], z=mod_vec[2],
120 | w=np.cos(angle/2)
121 | )
122 |
123 | control_rate = rospy.Rate(self._freq)
124 | self.set_command_timeout((1.0 / self._freq) * self._missed_cmds)
125 |
126 | # Setting the target
127 |
128 | while angle_target is None:
129 | ee_target = [np.random.uniform(0.6, 0.7), np.random.uniform(-0.15, 0.15), np.random.uniform(0.25, 0.45)]
130 | res_dict = get_joint_angles(
131 | get_pose(x=ee_target[0], y=ee_target[1], z=ee_target[2], o=quaternion),
132 | use_advanced_options=False
133 | )
134 | if res_dict is not None:
135 | angle_target = self._joint_dict_to_arr(res_dict)
136 | else:
137 | print("Unreachable target. Sending zero torques and recomputing.")
138 | self._limb.set_joint_torques(self._arr_to_joint_dict(np.zeros(shape=[7])))
139 | control_rate.sleep()
140 |
141 | curr_bound = np.copy(self.bounds)
142 | self.bounds = 9 * np.ones([7])
143 |
144 | # PD Controller
145 |
146 | while np.linalg.norm(angle_target - self._joint_dict_to_arr(self._limb.joint_angles())) > 0.3 or \
147 | np.linalg.norm(self._joint_dict_to_arr(self._limb.joint_velocities())) > 0.01:
148 | if not self._rs.state().enabled:
149 | rospy.logerr("Joint torque example failed to meet "
150 | "specified control rate timeout.")
151 | break
152 |
153 | self._pub_cuff_disable.publish()
154 |
155 | if not self._rs.state().enabled:
156 | return
157 |
158 | cmd = np.zeros(7)
159 | cur_pos = self._joint_dict_to_arr(self._limb.joint_angles())
160 | cur_vel = self._joint_dict_to_arr(self._limb.joint_velocities())
161 | for i in range(7):
162 | e = angle_target[i] - cur_pos[i]
163 | cmd[i] = self.P[i] * e - self.K[i] * cur_vel[i]
164 |
165 | self._set_torque(cmd, log_torque=False, smooth_torque=False)
166 | control_rate.sleep()
167 |
168 | self.bounds = curr_bound
169 |
170 | def _smooth_torques(self, torque):
171 | self._smoother_history = np.concatenate([np.array([torque]), self._smoother_history], axis=0)[:-1]
172 | return np.mean(self._smoother_history, axis=0)
173 |
174 | @staticmethod
175 | def _joint_dict_to_arr(joint_dict):
176 | """Converts from a dictionary representation of the joints
177 | to an array.
178 |
179 | :param joint_dict: The state of the limb as a dictionary.
180 | :return: The converted state.
181 | """
182 | return np.array([
183 | joint_dict['right_j6'],
184 | joint_dict['right_j5'],
185 | joint_dict['right_j4'],
186 | joint_dict['right_j3'],
187 | joint_dict['right_j2'],
188 | joint_dict['right_j1'],
189 | joint_dict['right_j0'],
190 | ])
191 |
192 | @staticmethod
193 | def _arr_to_joint_dict(arr):
194 | """Converts from an array representation of the joints
195 | to a dict.
196 |
197 | :param arr: The state of the limb as an array.
198 | :return: The converted state.
199 | """
200 | return {
201 | 'right_j6': arr[0],
202 | 'right_j5': arr[1],
203 | 'right_j4': arr[2],
204 | 'right_j3': arr[3],
205 | 'right_j2': arr[4],
206 | 'right_j1': arr[5],
207 | 'right_j0': arr[6],
208 | }
209 |
--------------------------------------------------------------------------------
/manu_sawyer/src/process_data.py:
--------------------------------------------------------------------------------
1 | # Compatibility Python 2/3
2 | from __future__ import division, print_function, absolute_import
3 | from builtins import range
4 | # ----------------------------------------------------------------------------------------------------------------------
5 |
6 | from dotmap import DotMap
7 | import matplotlib.pyplot as plt
8 | import numpy as np
9 | import h5py
10 | import cv2
11 | import tensorflow_model_is_gripping.aolib.img as ig, tensorflow_model_is_gripping.aolib.util as ut
12 |
13 | __version__ = '0.2'
14 | __author__ = 'Roberto Calandra'
15 |
16 |
17 | def align_time(times):
18 | return times - times[0], times[0]
19 |
20 |
21 | def delta_time(times):
22 | return times[1:-1] - times[0:-2]
23 |
24 |
25 | def import_data(namefile):
26 | """
27 | Import a file into a formatted structure
28 | :param namefile:
29 | :return: data structure of type DotMap with the following fields:
30 | data.n_steps
31 | data.time
32 | data.kinect.A.image
33 | data.kinect.A.depth
34 | data.kinect.B.image
35 | data.kinect.B.depth
36 | data.gelsight.A.image
37 | data.gelsight.B.image
38 |
39 | """
40 | data = DotMap()
41 | data.n_steps = []
42 | data.frequency = []
43 | data.time = []
44 | data.robot.limb = []
45 | data.robot.gripper = []
46 | data.robot.EE = [] # TODO: not implemented yet
47 | data.kinect.A.image = []
48 | data.kinect.A.depth = []
49 | data.kinect.B.image = []
50 | data.kinect.B.depth = []
51 | data.gelsight.A.image = []
52 | data.gelsight.B.image = []
53 | data.kinect.B.hd.image = []
54 |
55 | print('Opening file: %s' % namefile)
56 | f = h5py.File(namefile, "r")
57 | data.n_steps = f['n_steps'].value
58 | print('Number of steps: %d' % data.n_steps)
59 | data.time = f['/timestamp'].value
60 | data.kinect.A.image = map(ig.uncompress, f['/color_image_KinectA'].value)
61 | data.kinect.A.depth = f['/depth_image_KinectA'].value
62 | data.kinect.B.image = map(ig.uncompress, f['/color_image_KinectB'].value)
63 | #decompress = lambda x : cv2.imdecode(np.asarray(bytearray(x), dtype = np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
64 | #data.kinect.B.image = map(decompress, f['/color_image_KinectB'].value)
65 | data.kinect.B.depth = f['/depth_image_KinectB'].value
66 | #cv2.imencode('.png', data.kinect.B.depth)
67 |
68 | # import lz4
69 | # ut.tic()
70 | # s = lz4.dumps(data.kinect.B.depth.tostring())
71 | # ut.toc('compress')
72 | # g = np.fromstring(lz4.loads(s), dtype = np.uint16)
73 | # g = g.reshape(data.kinect.B.depth.shape)
74 | # #print(g.dtype, g.shape)
75 | # #print(g)
76 | # assert((g == data.kinect.B.depth).all())
77 | # print('size before', ut.guess_bytes(data.kinect.B.depth))
78 | # print('size after', ut.guess_bytes(s))
79 |
80 | # import PIL.Image
81 | # import png
82 | # from StringIO import StringIO
83 | # im = data.kinect.B.depth[0, :, :, 0]
84 | # writer = png.Writer(width = im.shape[1], height = im.shape[0], bitdepth=16, compression = 3, greyscale = True)
85 | # io = StringIO()
86 | # ut.tic()
87 | # print(im.shape)
88 | # #writer.write(open('/tmp/test.png', 'w'), im)
89 | # writer.write(io, im)
90 | # ut.toc()
91 | # #u = io.getvalue()
92 | # import itertools
93 | # #pngdata = png.Reader(StringIO(io.getvalue())).read()[2]
94 | # #pngdata = png.Reader(StringIO(io.getvalue())).asDirect()[2]
95 | # #u = np.vstack(itertools.imap(np.uint16, pngdata))
96 | # #u = np.array(PIL.Image.open('/tmp/test.png'))
97 | # u = np.array(PIL.Image.open(StringIO(io.getvalue())))
98 | # #u = np.array(PIL.Image.open(StringIO(io.getvalue())), dtype = np.uint16)
99 | # #rint(u)
100 | # #print (u.shape, u.dtype, u[0])
101 | # assert((u == im).all())
102 | # print((u == im).all())
103 | # print('size before', ut.guess_bytes(im))
104 | # print('size after', ut.guess_bytes(io.getvalue()))
105 |
106 | # import PIL.Image
107 | # import png
108 | # from StringIO import StringIO
109 | # #im = data.kinect.B.depth[0, :, :, 0]
110 | # im = data.kinect.B.depth[..., 0]
111 | # im = im.reshape((im.shape[0]*im.shape[1], im.shape[2]))
112 | # writer = png.Writer(width=im.shape[1], height=im.shape[0], bitdepth=16, compression=3, greyscale=True)
113 | # io = StringIO()
114 | # ut.tic()
115 | # writer.write(io, im)
116 | # ut.toc()
117 | # #
118 | # import itertools
119 | # u = np.array(PIL.Image.open(StringIO(io.getvalue())))
120 | # assert ((u == im).all())
121 | # print((u == im).all())
122 | # print('size before', ut.guess_bytes(im))
123 | # print('size after', ut.guess_bytes(io.getvalue()))
124 | #
125 | # return
126 |
127 | data.gelsight.A.image = map(ig.uncompress, f['/GelSightA_image'].value)
128 | data.gelsight.B.image = map(ig.uncompress, f['/GelSightB_image'].value)
129 | data.kinect.B.hd.image = f['/initial_hd_color_image_KinectB'].value
130 |
131 | plt.imshow(data.kinect.B.hd.image)
132 |
133 |
134 | # Convert to np arrays
135 | data.time = np.array(data.time)
136 | data.time, data.start_time = align_time(data.time) # align time to first frame
137 | data.robot.limb = np.array(data.robot.limb)
138 | data.robot.gripper = np.array(data.robot.gripper)
139 | data.kinect.A.image = np.array(data.kinect.A.image)
140 | data.kinect.A.depth = np.array(data.kinect.A.depth).squeeze()
141 | data.kinect.B.image = np.array(data.kinect.B.image)
142 | data.kinect.B.depth = np.array(data.kinect.B.depth).squeeze()
143 | data.gelsight.A.image = np.array(data.gelsight.A.image)
144 | data.gelsight.B.image = np.array(data.gelsight.B.image)
145 | data.events = [f['/time_pre_grasping'].value, f['/time_at_grasping'].value, f['/time_post1_grasping'].value,
146 | f['/time_post2_grasping'].value] - data.start_time
147 | print('Done')
148 | return data
149 |
150 |
151 | def plot_state_limb(data):
152 | plt.figure()
153 | plt.plot(data.robot.limb)
154 |
155 |
156 | def plot_gelsight_image(data, gelsight='A', id_frame=0):
157 | plt.figure()
158 | if gelsight == 'A':
159 | plt.imshow(data.gelsight.A.image[id_frame])
160 | plt.title('GelSight A (t=%d)' % id_frame)
161 | if gelsight == 'B':
162 | plt.imshow(data.gelsight.B.image[id_frame])
163 | plt.title('GelSight B (t=%d)' % id_frame)
164 | plt.axis('off')
165 |
166 |
167 | def plot_kinect_depth(data, kinect='A', id_frame=0):
168 | """
169 | Plot image from the kinect depth
170 | :param data:
171 | :param kinect:
172 | :param id_frame:
173 | :return:
174 | """
175 | plt.figure()
176 | if kinect == 'A':
177 | plt.imshow(data.kinect.A.depth[id_frame], cmap='gray')
178 | plt.title('Kinect A (t=%d)' % id_frame)
179 | if kinect == 'B':
180 | plt.imshow(data.kinect.B.depth[id_frame], cmap='gray')
181 | plt.title('Kinect B (t=%d)' % id_frame)
182 | plt.axis('off')
183 | # plt.show()
184 |
185 |
186 | def plot_kinect_image(data, kinect='A', id_frame=0):
187 | plt.figure()
188 | if kinect == 'A':
189 | plt.imshow(data.kinect.A.image[id_frame])
190 | plt.title('Kinect A (t=%d)' % id_frame)
191 | if kinect == 'B':
192 | plt.imshow(data.kinect.B.image[id_frame])
193 | plt.title('Kinect B (t=%d)' % id_frame)
194 | plt.axis('off')
195 |
196 |
197 | def plot_frequency(data):
198 | plt.figure()
199 | # TODO: plot Groundtruth
200 | plt.hist(delta_time(data.time), 200)
201 | plt.xlabel('Delta time')
202 |
203 |
204 | def plot_delta_time(data):
205 | plt.figure()
206 | plt.plot(delta_time(data.time))
207 | plt.ylabel('Delta Time')
208 |
209 |
210 | def plot_events_time(data):
211 | plt.figure()
212 | plt.plot(data.time[2:], delta_time(data.time))
213 | plt.scatter(data.events, np.array([0, 0, 0, 0]))
214 | plt.ylabel('Time')
215 |
216 |
217 | if __name__ == '__main__':
218 | nameFile = '/media/rcalandra/Data/datasets/gelsight/2017-06-06_1056PM_soft_red_cube_00000008.hdf5'
219 | nameFile = '/home/manu/ros_ws/src/manu_research/data/2017-06-20_162020.hdf5'
220 | nameFile = '/home/manu/ros_ws/src/manu_research/data/2017-06-20_162049.hdf5'
221 | nameFile = '/home/manu/ros_ws/src/manu_research/data/2017-06-20_195022.hdf5'
222 | nameFile = '/home/manu/ros_ws/src/manu_research/data/2017-07-06_164134.hdf5'
223 | data = import_data(namefile=nameFile)
224 | plot_state_limb(data=data)
225 | plot_kinect_depth(data=data, kinect='B', id_frame=5)
226 | plot_kinect_image(data=data, kinect='B', id_frame=5)
227 | plot_gelsight_image(data=data, gelsight='A', id_frame=5)
228 | plot_frequency(data)
229 | plot_delta_time(data)
230 | plot_events_time(data)
231 | plt.show()
232 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/segment.py:
--------------------------------------------------------------------------------
1 | import cv2 as cv, numpy as np, planefit, aolib.util as ut, aolib.img as ig, h5py
2 | import scipy.ndimage
3 |
4 | dilate = scipy.ndimage.morphology.binary_dilation
5 |
6 | # todo: support multiple kinects
7 | fx = 364.506103515625
8 | fy = 364.506103515625
9 | cx = 258.99951171875
10 | cy = 202.4136962890625
11 | camera_up_dir = np.array([0., -1., 0])
12 |
13 | def camera_matrix():
14 | return np.array([[fx, 0., cx],
15 | [0., -fy, cy],
16 | [0., 0., 1.]])
17 |
18 | def ptm_from_depth(depth):
19 | """ Returns a "point map" ptm that maps pixel coordinates to 3D
20 | coordinates, and a flag that indicates whether the pixel contains a
21 | valid depth estimate; specifically, ptm[i, j, :] = [X, Y, Z, ok].
22 | """
23 | # adapted from https://github.com/OpenKinect/libfreenect2/issues/41#issuecomment-59919674
24 | depth = np.float32(depth)
25 | yi, xi = map(np.float32, np.mgrid[:depth.shape[0], :depth.shape[1]])
26 | z = depth/1000.
27 | x = (xi - cx)*z / fx
28 | y = -(yi - cy)*z / fy
29 | ok = depth > 0
30 | return np.concatenate([v[:, :, None] for v in [x, y, z, ok]], axis = 2)
31 |
32 | def nrm_from_ptm(ptm, step = 1):
33 | p1 = np.roll(ptm, step, axis = 0)
34 | p2 = np.roll(ptm, step, axis = 1)
35 | v1 = ptm - p1
36 | v2 = ptm - p2
37 | nrm = ut.normalize_im(np.cross(v1[:, :, :3], v2[:, :, :3]))
38 |
39 | nrm[:step, :] = 0
40 | nrm[:, :step] = 0
41 | nrm[ptm[:, :, 3] < 1] = 0
42 | nrm[p1[:, :, 3] < 1] = 0
43 | nrm[p2[:, :, 3] < 1] = 0
44 |
45 | return np.concatenate([nrm, np.any(nrm != 0, axis = 2)[:, :, np.newaxis]], axis = 2)
46 |
47 | def pts_from_ptm(ptm, inds = False):
48 | if inds:
49 | i, j = np.nonzero(ptm[:, :, 3] == 1)
50 | return ptm[i, j, :3], (i, j)
51 | else:
52 | return ptm[ptm[:, :, 3] == 1, :3]
53 |
54 | def color_normals(ptm):
55 | nrm = nrm_from_ptm(ptm, 1)
56 | im = np.uint8(np.abs(nrm[:, :, :3])*255)
57 | im[nrm[:, :, 3] == 0] = 0
58 | return im
59 |
60 | def fit_ground_plane(depth, num_inputs = 4):
61 | # fit plane
62 | ptm = ptm_from_depth(depth)
63 | thresh = 0.02
64 | h, w = ptm.shape[:2]
65 | rect = (int(0.25*w), int(0.4*h), int(0.5*w), int(0.25*h))
66 | ptm_sub = ig.sub_img(ptm, rect)
67 |
68 | plane, _ = planefit.fit_plane_ransac(pts_from_ptm(ptm_sub), thresh, seed = 0)
69 | # choose plane such that the normal points up
70 | if np.dot(plane[:3], camera_up_dir) < 0:
71 | #print 'Flipping plane, so that the normal points up'
72 | plane = -plane
73 | #print 'Plane:', plane
74 |
75 | # show points that fit on plane
76 | nrm_vis = color_normals(ptm)
77 | dist = planefit.dist_from_plane_ptm(ptm, plane)
78 | on_plane = (dist <= thresh) & (ptm[:, :, 3] > 0)
79 | plane_vis = nrm_vis.copy()
80 | plane_vis[on_plane] = 255
81 | #ig.show(['Plane inliers:', plane_vis, 'Normals:', nrm_vis, 'Flipped:', np.flipud(nrm_vis)])
82 | return plane
83 |
84 | def in_largest_cc(mask, dilation = 0):
85 | if dilation:
86 | mask = dilate(mask, iterations = dilation).astype('bool')
87 | labels, num_labels = scipy.ndimage.label(mask)
88 | counts = np.bincount(labels.flatten())
89 | counts[0] = -1
90 | if len(counts) < 2:
91 | raise RuntimeError('Could not find cc!')
92 | in_cc = (labels == counts.argmax())
93 | return in_cc & mask
94 |
95 | def parallel_axes(plane):
96 | x, y, z = plane[:3]
97 | a = np.array
98 | if x == 0:
99 | u = a([0., z, -y])
100 | elif y == 0:
101 | u = a([z, 0., -x])
102 | else:
103 | u = a([-y, x, 0.])
104 | v = ut.normalized(np.cross(plane[:3], u))
105 | return a([u, v])
106 |
107 | def ok_pts(pts):
108 | return pts[pts[:, 3] > 0, :3]
109 |
110 | def fit_cylinder(depth0, depth, plane_thresh = 0.02, new_thresh = 0.02, inlier_frac = 0.9925, show = True):
111 | # compute occupancy map for the first frame (should precompute when using many frames...)
112 | plane = fit_ground_plane(depth0)
113 | ptm0 = ptm_from_depth(depth0)
114 | pts0 = pts_from_ptm(ptm0)
115 | on_plane0 = planefit.dist_from_plane_ptm(ptm0, plane) < plane_thresh
116 | on_plane0 = in_largest_cc(on_plane0)
117 | proj_plane0 = planefit.project_onto_plane(plane, ok_pts(ptm0[on_plane0]))
118 |
119 | ptm = ptm_from_depth(depth)
120 | has_pt = ptm[:, :, 3] != 0
121 |
122 | # find points that are very close to the table's surface
123 | pts, inds = pts_from_ptm(ptm, inds = True)
124 | proj_plane = planefit.project_onto_plane(plane, pts)
125 | ok = ut.knnsearch(proj_plane0, proj_plane, method = 'kd')[0].flatten() <= 0.005
126 | near_surface = np.zeros(ptm.shape[:2], 'bool')
127 | near_surface[inds[0][ok], inds[1][ok]] = True
128 |
129 | # find points that are not in the original point cloud
130 | dist = ut.knnsearch(pts0, pts, method = 'kd')[0].flatten()
131 | new_pt = np.zeros_like(near_surface)
132 | ok = dist > new_thresh
133 | new_pt[inds[0][ok], inds[1][ok]] = True
134 |
135 | # todo: probably want to filter out the robot's gripper, e.g. with a height check
136 | occ_before_cc = new_pt & near_surface & has_pt
137 | occ = in_largest_cc(occ_before_cc)
138 |
139 | # segment object and find height
140 | pts = ptm[occ]
141 | pts = pts[pts[:, 3] > 0, :3]
142 | height = np.percentile(np.abs(planefit.dist_to_plane(plane, pts)), 99.7)
143 | print 'Height: %.3fm' % height
144 |
145 | # find an ellipse that covers the occupied points
146 | # todo: want to deal with self-occlusion here (kinect won't see the backside of the object)
147 | pts = ok_pts(ptm[occ])
148 | proj = planefit.project_onto_plane(plane, pts)
149 | # add a slight bias toward choosing farther z points, since these will often be self-occluded
150 | #center = np.array(list(np.median(proj[:, :2], axis = 0)) + [np.percentile(proj[:, 2], 70.)])
151 | center = np.array(list(np.median(proj[:, :2], axis = 0))
152 | + [np.percentile(proj[:, 2], 75.)])
153 | d = np.sqrt(np.percentile(np.sum((proj - center)**2, 1), 95.)) + 0.01
154 | scales = np.array([d, d])
155 |
156 | # show ellipse
157 | if show:
158 | # show points in/out of cylinder
159 | #nrm_vis = color_normals(ptm)
160 | ptm_vis = ut.clip_rescale_im(ptm[:, :, 2], 0., 2.)
161 | oval_vis1 = ptm_vis.copy()
162 | oval_missing_vis = ptm_vis.copy()
163 | for y in xrange(ptm.shape[0]):
164 | for x in xrange(ptm.shape[1]):
165 | if ptm[y, x, 3]:
166 | pt = ptm[y, x, :3]
167 | proj = (planefit.project_onto_plane(plane, pt[None]) - center)[0]
168 | ok = ((proj[:2]/scales[:2])**2).sum() <= 1.
169 | ok = ok and planefit.dist_to_plane(plane, pt[None], signed = True)[0] <= height
170 | if ok and occ[y, x]:
171 | oval_vis1[y, x] = 255
172 | elif occ[y, x]:
173 | # not covered
174 | oval_missing_vis[y, x] = 255
175 |
176 | # show cylinder
177 | axes = parallel_axes(plane)
178 | pts_lo, pts_hi = [], []
179 | for theta in np.linspace(0., 2*np.pi, 100):
180 | x = np.array([np.cos(theta), np.sin(theta)])
181 | pt = axes.T.dot(x * scales[:2]) + center
182 | pts_lo.append(pt.flatten())
183 | pts_hi.append(pt + height*plane[:3])
184 | print plane[:3]
185 | pts_lo, pts_hi = map(np.array, [pts_lo, pts_hi])
186 | proj_lo = ut.inhomog(camera_matrix().dot(pts_lo.T)).T
187 | proj_hi = ut.inhomog(camera_matrix().dot(pts_hi.T)).T
188 |
189 | oval_vis = ptm_vis.copy()
190 | c1 = (128, 0, 0)
191 | c2 = (0, 0, 128)
192 | c3 = (0, 128, 0)
193 | oval_vis = ig.draw_lines(oval_vis, proj_lo[:-1], proj_lo[1:], colors = c1, width = 2)
194 | oval_vis = ig.draw_lines(oval_vis, proj_hi[:-1], proj_hi[1:], colors = c2, width = 2)
195 | oval_vis = ig.draw_lines(oval_vis, [proj_hi[0]], [proj_lo[0]], colors = c3, width = 2)
196 | oval_vis = ig.draw_lines(oval_vis, [proj_hi[len(proj_hi)/2]],
197 | [proj_lo[len(proj_hi)/2]], colors = c3, width = 2)
198 |
199 | def make_vis(x):
200 | v = ptm_vis.copy()
201 | v[x] = 255
202 | return np.flipud(v)
203 |
204 | ig.show(['parametric ellipse:', np.flipud(oval_vis),
205 | 'ellipse:', np.flipud(oval_vis1),
206 | 'missing:', np.flipud(oval_missing_vis),
207 | 'occ:', make_vis(occ),
208 | 'occ_before_cc:', make_vis(occ_before_cc),
209 | 'near_surface', make_vis(near_surface),
210 | 'new_pt', make_vis(new_pt),
211 | 'has_pt', make_vis(has_pt),
212 | 'on_plane0', make_vis(on_plane0),
213 | 'input:', np.flipud(ptm_vis),
214 | ])
215 |
216 | ut.toplevel_locals()
217 |
218 | #def test(data_file = '/media/ao/hd2/gelsight-data/entire_data_success_test_2_data_0.hdf5'):
219 | def test(data_file = '/media/ao/hd2/gelsight-data/kinect_data.hdf5'):
220 | depths = []
221 | with h5py.File(data_file, 'r') as f:
222 | for frame in [0, 100, 500, 750, 2000]:
223 | dset = f['step_%d' % frame]
224 | depth = dset['camera_depth'][:, :, 0]
225 | depths.append(np.array(depth, 'float32'))
226 | for i in xrange(1, len(depths)):
227 | fit_cylinder(depths[0], depths[i])
228 | ut.toplevel_locals()
229 |
230 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grasp_cnn/aolib/parutil.py:
--------------------------------------------------------------------------------
1 | # todo: rewrite
2 | #from IPython import parallel
3 | import ipyparallel as parallel
4 | import util as ut, os, sys, time
5 | #from IPython.parallel.util import interactive
6 | import subprocess, numpy as np
7 |
8 | ip_parallel = parallel
9 |
10 | p = None
11 | part_info = None
12 | builtin_map = __builtins__['map']
13 |
14 | def relative_module(m):
15 | return hasattr(m, '__file__') \
16 | and ((not m.__file__.startswith('/')) \
17 | or m.__file__.startswith(os.getcwd()))
18 |
19 | def safe_reload(m):
20 | if relative_module(m) and (not hasattr(m, '__no_autoreload__')):
21 | print 'reloading'
22 | reload(m)
23 |
24 | def run_reload():
25 | excludes = set(['__main__', 'autoreload'])
26 | for name, m in list(sys.modules.iteritems()):
27 | if m and relative_module(m) and (name not in excludes) and (not hasattr(m, '__no_autoreload__')):
28 | reload(m)
29 |
30 | class Parallel:
31 | def __init__(self, config = None, rc = None, view = None, bview = None):
32 | self.config = config
33 | self._rc = rc
34 | self._view = view
35 | self._bview = bview
36 |
37 | def init_rc(self, reset = False):
38 | if reset or (self._rc is None):
39 | if self.config is None:
40 | self._rc = ip_parallel.Client()
41 | else:
42 | self._rc = ip_parallel.Client(self.config)
43 | self._view = self._rc.direct_view()
44 | #self._view = self._rc.load_balanced_view()
45 | self._bview = self._rc.load_balanced_view()
46 |
47 | def view(self):
48 | self.init_rc()
49 | return self._view
50 |
51 | def bview(self):
52 | self.init_rc()
53 | return self._bview
54 |
55 |
56 | def rc(self):
57 | self.init_rc()
58 | return self._rc
59 |
60 | def per_machine_subset(self, n_per_machine):
61 | targets = self.targets_by_hostname()
62 | target_subset = ut.flatten([ut.sample_at_most(ts, n_per_machine) \
63 | for ts in targets.values()])
64 | return Parallel(self.config, self.rc(), self.rc()[target_subset])
65 |
66 | def partition(self, n, i):
67 | target_subset = [j for j in self.rc()[:].targets if j % n == i]
68 | return Parallel(self.config, self.rc(), self.rc()[target_subset])
69 |
70 | def targets_by_hostname(self):
71 | # initialize parallel_rc
72 | self.import_modules(['subprocess'], False)
73 | targets_by_hostname = {}
74 | for t in self.rc()[:].targets:
75 | v = self.rc()[[t]]
76 | [hostname] = v.map_sync(interactive(lambda x: subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()), [1])
77 | if hostname not in targets_by_hostname:
78 | targets_by_hostname[hostname] = []
79 | targets_by_hostname[hostname].append(t)
80 | return targets_by_hostname
81 |
82 | def execute(self, code):
83 | r = self.view().execute(code)
84 | r.wait()
85 | if not r.successful():
86 | print 'iputil.execute error:'
87 | print r.result()
88 | assert r.successful()
89 |
90 | # def list_machines(self):
91 | # self.view().execute('import subprocess, os').wait()
92 | # res = self.view().map_sync(interactive(lambda x : subprocess.Popen('hostname', shell=True,
93 | # stdout=subprocess.PIPE).stdout.read().rstrip()), range(200))
94 | # print 'Running on:'
95 | # ut.prn_lines(set(res))
96 |
97 | def list_machines(self):
98 | self.view().execute('import subprocess, os').wait()
99 | # res = self.view().map_sync(interactive(lambda x : (subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip(),
100 | # os.getpid())), range(2000))
101 | res = self.view().map_sync(lambda x : (subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip(),
102 | os.getpid()), range(2000))
103 | print 'Running on:'
104 | machines = {}
105 | for x, p in res:
106 | if x not in machines:
107 | machines[x] = set()
108 | machines[x].add(p)
109 | for x, ps in machines.items():
110 | print x, len(ps)
111 |
112 | def send_var(self, var_name, val):
113 | res = self.view().push({var_name : val})
114 | res.wait()
115 | assert res.successful()
116 |
117 | def import_modules(self, packages, reload = False):
118 | packages = [('aolib.parutil', 'parutil')] + list(packages)
119 | import_cmd = ''
120 | reload_cmd = ''
121 | for p in packages:
122 | if len(import_cmd) != 0:
123 | import_cmd += ', '
124 | reload_cmd += '; '
125 | if type(p) == type((1,)):
126 | import_cmd += '%s as %s' % p
127 | reload_cmd += 'parutil.safe_reload(%s)' % p[1]
128 | else:
129 | import_cmd += p
130 | reload_cmd += 'parutil.safe_reload(%s)' % p
131 |
132 | if reload:
133 | cmd = 'import %s; %s' % (import_cmd, reload_cmd)
134 | else:
135 | cmd = 'import %s' % import_cmd
136 | self.execute(cmd)
137 |
138 | def reload_modules(self):
139 | self.execute('from aolib import parutil as parutil, areload; parutil.run_reload()')
140 | #self.execute('import aolib.parutil as parutil, areload; parutil.run_reload()')
141 |
142 | def send_var_fs(self, var_name, val):
143 | fname = ut.make_temp_nfs('.pk')
144 | ut.save(fname, val)
145 | res = self.view().execute('import util as ut; ut.wait_for_file("%s", 30); %s = ut.load("%s")' % (fname, var_name, fname))
146 | res.wait()
147 | assert res.successful()
148 | os.remove(fname)
149 |
150 | def clear(self):
151 | self.view().results.clear()
152 | self.rc().results.clear()
153 | self.rc().metadata.clear()
154 |
155 | # def maybe_map_sync(self, parallel, f, xs, imports = []):
156 | # if parallel:
157 | # return self.map_sync(f, xs, imports)
158 | # else:
159 | # # assume imports have already been loaded if not parallel
160 | # if type(xs) == type((1,)):
161 | # return map(f, *xs)
162 | # else:
163 | # return map(f, xs)
164 |
165 | # def maybe_map_sync(self, parallel, f, xs, imports = [], vars = {}):
166 | # if parallel:
167 | # return self.map_sync(f, xs, imports)
168 | # else:
169 | # # todo: is it better to save these temporarily? or would it only make it more confusing?
170 | # f.func_globals.update(vars)
171 | # # assume imports have already been loaded if not parallel
172 | # if type(xs) == type((1,)):
173 | # return map(f, *xs)
174 | # else:
175 | # return map(f, xs)
176 |
177 | def maybe_map_sync(self, parallel, f, xs, vars = {}, imports = [], do_reload = True):
178 | if parallel:
179 | return self.map_sync(f, xs, vars, imports, do_reload = do_reload)
180 | else:
181 | # todo: is it better to save these temporarily? or would it only make it more confusing?
182 | if len(vars):
183 | f.func_globals.update(vars)
184 | # assume imports have already been loaded if not parallel
185 | if type(xs) == type((1,)):
186 | return builtin_map(f, *xs)
187 | else:
188 | return builtin_map(f, xs)
189 |
190 | def map(self, parallel, f, xs, vars = {}, imports = [], do_reload = True):
191 | return self.maybe_map_sync(parallel, f, xs, vars, imports, do_reload = do_reload)
192 |
193 | def amap(self, *args, **kwargs):
194 | return np.array(self.map(*args, **kwargs))
195 |
196 | def map_sync(self, f, xs, vars = {}, imports = [], do_reload = True, do_reset = False, use_bview = False):
197 | # if do_reset:
198 | # self.init_rc(do_reset)
199 |
200 | if len(imports):
201 | self.import_modules(imports, True)
202 | if do_reload:
203 | self.reload_modules()
204 |
205 | if len(xs) == 0:
206 | return []
207 |
208 | if use_bview:
209 | view = self.view() if do_reload else self.bview()
210 | else:
211 | view = self.view()
212 | # interpret tuple as multiple args
213 | if type(xs) == type((1,)):
214 | res = view.map_sync(f, *xs)
215 | else:
216 | res = view.map_sync(f, xs)
217 |
218 | self.clear()
219 |
220 | return res
221 |
222 | def abort(self):
223 | self.view().abort()
224 |
225 | # def test(self, do = True, max_wait = 30):
226 | def test(self, do = True, max_wait = 60):
227 | if do:
228 | a = self.view().map_async(lambda x: x, range(100))
229 | start = time.time()
230 | while time.time() - start <= max_wait:
231 | if a.ready():
232 | return True
233 | else:
234 | time.sleep(1)
235 | raise RuntimeError('test() timed out after %s seconds!' % max_wait)
236 |
237 | __no_autoreload__ = True
238 |
239 | def init(do=True, local = True):
240 | if do:
241 | global p, part_info
242 | if local:
243 | config_file = None
244 | else:
245 | config_file = '/data/vision/billf/aho-billf/ipcluster_profile/security/ipcontroller-client.json'
246 | if p is not None and p._rc is not None:
247 | p._rc.close()
248 | p = Parallel(config_file)
249 | if part_info is not None:
250 | print 'parutil partition:', part_info
251 | p = p.partition(*part_info)
252 | for f in dir(p):
253 | if not f.startswith('_') and hasattr(getattr(p, f), '__call__'):
254 | globals()[f] = getattr(p, f)
255 |
256 | init()
257 |
258 | # def restart():
259 | # os.system('./cluster.sh')
260 | # init()
261 |
262 |
263 | def global_partition(n=None, i=None):
264 | global p, part_info
265 | if n is None:
266 | part_info = None
267 | else:
268 | part_info = (n, i)
269 | init()
270 |
271 | def test_partition():
272 | def f(x):
273 | import parutil
274 | return sum(parutil.partition(2, 1).map_sync(lambda x: x, range(10)))
275 | import parutil
276 | parutil.map_sync(lambda x:x, range(10000))
277 | assert sum(parutil.partition(2, 0).map_sync(f, range(1000))) == 1000*sum(range(10))
278 |
279 | def reset(parallel=True):
280 | init(parallel)
281 | test(parallel)
282 |
283 |
284 |
--------------------------------------------------------------------------------
/manu_sawyer/src/grip_and_record/aolib/parutil.py:
--------------------------------------------------------------------------------
1 | # todo: rewrite
2 | #from IPython import parallel
3 | import numpy as np
4 | import subprocess
5 |
6 | import ipyparallel as parallel
7 |
8 | import os
9 | import sys
10 | import time
11 | import util as ut
12 |
13 | ip_parallel = parallel
14 |
15 | p = None
16 | part_info = None
17 | builtin_map = __builtins__['map']
18 |
19 | def relative_module(m):
20 | return hasattr(m, '__file__') \
21 | and ((not m.__file__.startswith('/')) \
22 | or m.__file__.startswith(os.getcwd()))
23 |
24 | def safe_reload(m):
25 | if relative_module(m) and (not hasattr(m, '__no_autoreload__')):
26 | print 'reloading'
27 | reload(m)
28 |
29 | def run_reload():
30 | excludes = set(['__main__', 'autoreload'])
31 | for name, m in list(sys.modules.iteritems()):
32 | if m and relative_module(m) and (name not in excludes) and (not hasattr(m, '__no_autoreload__')):
33 | reload(m)
34 |
35 | class Parallel:
36 | def __init__(self, config = None, rc = None, view = None, bview = None):
37 | self.config = config
38 | self._rc = rc
39 | self._view = view
40 | self._bview = bview
41 |
42 | def init_rc(self, reset = False):
43 | if reset or (self._rc is None):
44 | if self.config is None:
45 | self._rc = ip_parallel.Client()
46 | else:
47 | self._rc = ip_parallel.Client(self.config)
48 | self._view = self._rc.direct_view()
49 | #self._view = self._rc.load_balanced_view()
50 | self._bview = self._rc.load_balanced_view()
51 |
52 | def view(self):
53 | self.init_rc()
54 | return self._view
55 |
56 | def bview(self):
57 | self.init_rc()
58 | return self._bview
59 |
60 |
61 | def rc(self):
62 | self.init_rc()
63 | return self._rc
64 |
65 | def per_machine_subset(self, n_per_machine):
66 | targets = self.targets_by_hostname()
67 | target_subset = ut.flatten([ut.sample_at_most(ts, n_per_machine) \
68 | for ts in targets.values()])
69 | return Parallel(self.config, self.rc(), self.rc()[target_subset])
70 |
71 | def partition(self, n, i):
72 | target_subset = [j for j in self.rc()[:].targets if j % n == i]
73 | return Parallel(self.config, self.rc(), self.rc()[target_subset])
74 |
75 | def targets_by_hostname(self):
76 | # initialize parallel_rc
77 | self.import_modules(['subprocess'], False)
78 | targets_by_hostname = {}
79 | for t in self.rc()[:].targets:
80 | v = self.rc()[[t]]
81 | [hostname] = v.map_sync(interactive(lambda x: subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()), [1])
82 | if hostname not in targets_by_hostname:
83 | targets_by_hostname[hostname] = []
84 | targets_by_hostname[hostname].append(t)
85 | return targets_by_hostname
86 |
87 | def execute(self, code):
88 | r = self.view().execute(code)
89 | r.wait()
90 | if not r.successful():
91 | print 'iputil.execute error:'
92 | print r.result()
93 | assert r.successful()
94 |
95 | # def list_machines(self):
96 | # self.view().execute('import subprocess, os').wait()
97 | # res = self.view().map_sync(interactive(lambda x : subprocess.Popen('hostname', shell=True,
98 | # stdout=subprocess.PIPE).stdout.read().rstrip()), range(200))
99 | # print 'Running on:'
100 | # ut.prn_lines(set(res))
101 |
102 | def list_machines(self):
103 | self.view().execute('import subprocess, os').wait()
104 | # res = self.view().map_sync(interactive(lambda x : (subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip(),
105 | # os.getpid())), range(2000))
106 | res = self.view().map_sync(lambda x : (subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE).stdout.read().rstrip(),
107 | os.getpid()), range(2000))
108 | print 'Running on:'
109 | machines = {}
110 | for x, p in res:
111 | if x not in machines:
112 | machines[x] = set()
113 | machines[x].add(p)
114 | for x, ps in machines.items():
115 | print x, len(ps)
116 |
117 | def send_var(self, var_name, val):
118 | res = self.view().push({var_name : val})
119 | res.wait()
120 | assert res.successful()
121 |
122 | def import_modules(self, packages, reload = False):
123 | packages = [('aolib.parutil', 'parutil')] + list(packages)
124 | import_cmd = ''
125 | reload_cmd = ''
126 | for p in packages:
127 | if len(import_cmd) != 0:
128 | import_cmd += ', '
129 | reload_cmd += '; '
130 | if type(p) == type((1,)):
131 | import_cmd += '%s as %s' % p
132 | reload_cmd += 'parutil.safe_reload(%s)' % p[1]
133 | else:
134 | import_cmd += p
135 | reload_cmd += 'parutil.safe_reload(%s)' % p
136 |
137 | if reload:
138 | cmd = 'import %s; %s' % (import_cmd, reload_cmd)
139 | else:
140 | cmd = 'import %s' % import_cmd
141 | self.execute(cmd)
142 |
143 | def reload_modules(self):
144 | self.execute('from aolib import parutil as parutil, areload; parutil.run_reload()')
145 | #self.execute('import aolib.parutil as parutil, areload; parutil.run_reload()')
146 |
147 | def send_var_fs(self, var_name, val):
148 | fname = ut.make_temp_nfs('.pk')
149 | ut.save(fname, val)
150 | res = self.view().execute('import util as ut; ut.wait_for_file("%s", 30); %s = ut.load("%s")' % (fname, var_name, fname))
151 | res.wait()
152 | assert res.successful()
153 | os.remove(fname)
154 |
155 | def clear(self):
156 | self.view().results.clear()
157 | self.rc().results.clear()
158 | self.rc().metadata.clear()
159 |
160 | # def maybe_map_sync(self, parallel, f, xs, imports = []):
161 | # if parallel:
162 | # return self.map_sync(f, xs, imports)
163 | # else:
164 | # # assume imports have already been loaded if not parallel
165 | # if type(xs) == type((1,)):
166 | # return map(f, *xs)
167 | # else:
168 | # return map(f, xs)
169 |
170 | # def maybe_map_sync(self, parallel, f, xs, imports = [], vars = {}):
171 | # if parallel:
172 | # return self.map_sync(f, xs, imports)
173 | # else:
174 | # # todo: is it better to save these temporarily? or would it only make it more confusing?
175 | # f.func_globals.update(vars)
176 | # # assume imports have already been loaded if not parallel
177 | # if type(xs) == type((1,)):
178 | # return map(f, *xs)
179 | # else:
180 | # return map(f, xs)
181 |
182 | def maybe_map_sync(self, parallel, f, xs, vars = {}, imports = [], do_reload = True):
183 | if parallel:
184 | return self.map_sync(f, xs, vars, imports, do_reload = do_reload)
185 | else:
186 | # todo: is it better to save these temporarily? or would it only make it more confusing?
187 | if len(vars):
188 | f.func_globals.update(vars)
189 | # assume imports have already been loaded if not parallel
190 | if type(xs) == type((1,)):
191 | return builtin_map(f, *xs)
192 | else:
193 | return builtin_map(f, xs)
194 |
195 | def map(self, parallel, f, xs, vars = {}, imports = [], do_reload = True):
196 | return self.maybe_map_sync(parallel, f, xs, vars, imports, do_reload = do_reload)
197 |
198 | def amap(self, *args, **kwargs):
199 | return np.array(self.map(*args, **kwargs))
200 |
201 | def map_sync(self, f, xs, vars = {}, imports = [], do_reload = True, do_reset = False, use_bview = False):
202 | # if do_reset:
203 | # self.init_rc(do_reset)
204 |
205 | if len(imports):
206 | self.import_modules(imports, True)
207 | if do_reload:
208 | self.reload_modules()
209 |
210 | if len(xs) == 0:
211 | return []
212 |
213 | if use_bview:
214 | view = self.view() if do_reload else self.bview()
215 | else:
216 | view = self.view()
217 | # interpret tuple as multiple args
218 | if type(xs) == type((1,)):
219 | res = view.map_sync(f, *xs)
220 | else:
221 | res = view.map_sync(f, xs)
222 |
223 | self.clear()
224 |
225 | return res
226 |
227 | def abort(self):
228 | self.view().abort()
229 |
230 | # def test(self, do = True, max_wait = 30):
231 | def test(self, do = True, max_wait = 60):
232 | if do:
233 | a = self.view().map_async(lambda x: x, range(100))
234 | start = time.time()
235 | while time.time() - start <= max_wait:
236 | if a.ready():
237 | return True
238 | else:
239 | time.sleep(1)
240 | raise RuntimeError('test() timed out after %s seconds!' % max_wait)
241 |
242 | __no_autoreload__ = True
243 |
244 | def init(do=True, local = True):
245 | if do:
246 | global p, part_info
247 | if local:
248 | config_file = None
249 | else:
250 | config_file = '/data/vision/billf/aho-billf/ipcluster_profile/security/ipcontroller-client.json'
251 | if p is not None and p._rc is not None:
252 | p._rc.close()
253 | p = Parallel(config_file)
254 | if part_info is not None:
255 | print 'parutil partition:', part_info
256 | p = p.partition(*part_info)
257 | for f in dir(p):
258 | if not f.startswith('_') and hasattr(getattr(p, f), '__call__'):
259 | globals()[f] = getattr(p, f)
260 |
261 | init()
262 |
263 | # def restart():
264 | # os.system('./cluster.sh')
265 | # init()
266 |
267 |
268 | def global_partition(n=None, i=None):
269 | global p, part_info
270 | if n is None:
271 | part_info = None
272 | else:
273 | part_info = (n, i)
274 | init()
275 |
276 | def test_partition():
277 | def f(x):
278 | import parutil
279 | return sum(parutil.partition(2, 1).map_sync(lambda x: x, range(10)))
280 | import parutil
281 | parutil.map_sync(lambda x:x, range(10000))
282 | assert sum(parutil.partition(2, 0).map_sync(f, range(1000))) == 1000 * sum(range(10))
283 |
284 | def reset(parallel=True):
285 | init(parallel)
286 | test(parallel)
287 |
288 |
289 |
--------------------------------------------------------------------------------