├── .gitignore
├── CMakeLists.txt
├── README.md
├── msg
└── animations_list.msg
├── package.xml
└── src
├── .gitignore
├── BlenderUtils.py
├── ShapekeyStore.py
├── ShapekeyStore2.py
├── Utils.py
├── animate-test.blend
├── bone_array.blend
├── config
├── config.yaml
├── inputs.yaml
└── outputs.yaml
├── inputs
├── Faceshift.py
├── NRegionsOfInterest.py
├── RegionOfInterest.py
├── __init__.py
└── bases
│ ├── ObjectInBlender.py
│ └── __init__.py
├── main.py
├── modes
├── Animations.py
├── Dummy.py
├── LookAround.py
├── ManualHead.py
├── SmartTrack.py
├── SmartTrack2.py
├── TrackDev.py
├── __init__.py
├── controllers
│ ├── __init__.py
│ ├── animate.py
│ ├── demo_loops.py
│ ├── primary.py
│ └── tracking.py
└── smart_track.yaml
├── outputs
├── TimelineAnimation.py
├── __init__.py
└── state
│ ├── CompositeOut.py
│ ├── ParametersOut.py
│ └── __init__.py
├── robo.blend
└── startup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.blend1
2 | *.blend2
3 | *.blend3
4 | *.blend4
5 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(robo_blender)
3 |
4 | ## Find catkin macros and libraries
5 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
6 | ## is used, also find other catkin packages
7 | find_package(catkin REQUIRED
8 | rospy
9 | std_msgs
10 | message_generation
11 | )
12 |
13 | ## System dependencies are found with CMake's conventions
14 | find_package(Boost REQUIRED COMPONENTS system)
15 |
16 |
17 | ## Uncomment this if the package has a setup.py. This macro ensures
18 | ## modules and global scripts declared therein get installed
19 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
20 | # catkin_python_setup()
21 |
22 | ################################################
23 | ## Declare ROS messages, services and actions ##
24 | ################################################
25 |
26 | ## To declare and build messages, services or actions from within this
27 | ## package, follow these steps:
28 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
29 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
30 | ## * In the file package.xml:
31 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
32 | ## * If MSG_DEP_SET isn't empty the following dependencies might have been
33 | ## pulled in transitively but can be declared for certainty nonetheless:
34 | ## * add a build_depend tag for "message_generation"
35 | ## * add a run_depend tag for "message_runtime"
36 | ## * In this file (CMakeLists.txt):
37 | ## * add "message_generation" and every package in MSG_DEP_SET to
38 | ## find_package(catkin REQUIRED COMPONENTS ...)
39 | ## * add "message_runtime" and every package in MSG_DEP_SET to
40 | ## catkin_package(CATKIN_DEPENDS ...)
41 | ## * uncomment the add_*_files sections below as needed
42 | ## and list every .msg/.srv/.action file to be processed
43 | ## * uncomment the generate_messages entry below
44 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
45 |
46 | ## Generate messages in the 'msg' folder
47 | add_message_files(
48 | FILES
49 | animations_list.msg
50 | )
51 |
52 | ## Generate services in the 'srv' folder
53 | # add_service_files(
54 | # FILES
55 | # Service1.srv
56 | # Service2.srv
57 | # )
58 |
59 | ## Generate actions in the 'action' folder
60 | # add_action_files(
61 | # FILES
62 | # Action1.action
63 | # Action2.action
64 | # )
65 |
66 | ## Generate added messages and services with any dependencies listed here
67 | generate_messages(
68 | DEPENDENCIES
69 | #std_msgs # Or other packages containing msgs
70 | )
71 |
72 | ###################################
73 | ## catkin specific configuration ##
74 | ###################################
75 | ## The catkin_package macro generates cmake config files for your package
76 | ## Declare things to be passed to dependent projects
77 | ## INCLUDE_DIRS: uncomment this if you package contains header files
78 | ## LIBRARIES: libraries you create in this project that dependent projects also need
79 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
80 | ## DEPENDS: system dependencies of this project that dependent projects also need
81 | catkin_package(
82 | # INCLUDE_DIRS include
83 | # LIBRARIES robo_blender
84 | # CATKIN_DEPENDS other_catkin_pkg
85 | # DEPENDS system_lib
86 | )
87 |
88 | ###########
89 | ## Build ##
90 | ###########
91 |
92 | ## Specify additional locations of header files
93 | ## Your package locations should be listed before other locations
94 | # include_directories(include)
95 |
96 | ## Declare a cpp library
97 | # add_library(robo_blender
98 | # src/${PROJECT_NAME}/robo_blender.cpp
99 | # )
100 |
101 | ## Declare a cpp executable
102 | # add_executable(robo_blender_node src/robo_blender_node.cpp)
103 |
104 | ## Add cmake target dependencies of the executable/library
105 | ## as an example, message headers may need to be generated before nodes
106 | # add_dependencies(robo_blender_node robo_blender_generate_messages_cpp)
107 |
108 | ## Specify libraries to link a library or executable target against
109 | # target_link_libraries(robo_blender_node
110 | # ${catkin_LIBRARIES}
111 | # )
112 |
113 | #############
114 | ## Install ##
115 | #############
116 |
117 | # all install targets should use catkin DESTINATION variables
118 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
119 |
120 | ## Mark executable scripts (Python etc.) for installation
121 | ## in contrast to setup.py, you can choose the destination
122 | # install(PROGRAMS
123 | # scripts/my_python_script
124 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
125 | # )
126 |
127 | ## Mark executables and/or libraries for installation
128 | # install(TARGETS robo_blender robo_blender_node
129 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
130 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
131 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
132 | # )
133 |
134 | ## Mark cpp header files for installation
135 | # install(DIRECTORY include/${PROJECT_NAME}/
136 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
137 | # FILES_MATCHING PATTERN "*.h"
138 | # PATTERN ".svn" EXCLUDE
139 | # )
140 |
141 | ## Mark other files for installation (e.g. launch and bag files, etc.)
142 | # install(FILES
143 | # # myfile1
144 | # # myfile2
145 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
146 | # )
147 |
148 | #############
149 | ## Testing ##
150 | #############
151 |
152 | ## Add gtest based cpp test target and link libraries
153 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_robo_blender.cpp)
154 | # if(TARGET ${PROJECT_NAME}-test)
155 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
156 | # endif()
157 |
158 | ## Add folders to be run by python nosetests
159 | # catkin_add_nosetests(test)
160 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Robo Blender
2 |
3 | CAUTION
4 | -------
5 | Deprecated/Obsolete. As of December 2014, the ROS-plus-blender
6 | architecture implemented here has been superceded by the "Eva"
7 | ROS-plus-blender architecture that can be found in
8 | [blender_api](http://github.com/hansonrobotics/blender_api) and
9 | [blender_api_msgs](http://github.com/hansonrobotics/blender_api_msgs).
10 | The newer architecture provides a simpler, easier-to-use interface,
11 | and provides a greater number of functions. The below provides
12 | support for the older Arthur and Einstein head designs.
13 |
14 | This provides a ROS node which may be used to control Blender rigs,
15 | and to publish the resulting rig position information (for the neck,
16 | face, eyes). The position information can then be used to drive motors
17 | (for example). The
18 | [pau2motors](http://github.com/hansonrobotics/pau2motors) ROS node
19 | will listen to the published messages to control the Einstein and
20 | Arthur robot heads.
21 |
22 | The node is started within Blender, and has direct access to the
23 | Blender context. It uses different modes to control the rig, with
24 | different modes used for different rigs.
25 |
26 | Currently two rigs are included:
27 | * robo.blend -- Arthur rig, can look around, track objects.
28 | * animate-test.blend -- Arthur rig, can animate four basic expressions.
29 |
30 | Although both of these rigs are "Arthur", they are not compatible
31 | with each other.
32 |
33 | The last working version of the Einstein rig is in the branch named
34 | "einstein-dev". It has not been kept up to date.
35 |
36 | # Running
37 | The easiest way to run this node is to use it with a Docker container.
38 | Docker containers specify all of the pre-requisite software components
39 | that are needed to in order for a demo to be run. An all-in-one
40 | containder for this node has been defined at
41 | [opencog/ros](http://github.com/opencog/ros), in the ros-indigo-animation
42 | image. The demo process is then:
43 |
44 | * Install docker
45 | * git clone http://github.com/opencog/ros
46 | * cd opencog/ros/indigo
47 | * ./build-all.sh
48 | * cd animation and follow instructions in the README file there.
49 |
50 | If you do the above, you can skip the Manual Install steps below.
51 |
52 | ## Manual Install and Operation
53 | ### Pre-requisites
54 | The following packages need to be installed:
55 | ```
56 | apt-get install python3-yaml python3-rospkg
57 | ```
58 | Caution: python3+ros has issues; see this bug:
59 | https://github.com/ros-infrastructure/rospkg/issues/71
60 |
61 | This can be worked-around as follows:
62 | ```
63 | git clone git://github.com/ros/rospkg.git
64 | cd rospkg
65 | python3 setup.py install
66 | cd ..
67 | git clone git://github.com/ros-infrastructure/catkin_pkg.git
68 | cd catkin_pkg
69 | python3 setup.py install
70 | ```
71 | ### Co-requisites
72 | Building this package requires:
73 | * [blender_api_msgs](http://github.com/hansonrobotics/blender_api_msgs)
74 | for API message definitions.
75 |
76 | The tracking mode requires that
77 | [eva_behavior](http://github.com/hansonrobotics/eva_behavior) ROS node
78 | be set up and running. Additional instructions in the 'cookbook'
79 | section, below.
80 |
81 | ### Manual start
82 | To run, start blender, and load either the `robo.blend` or the
83 | `animate-test.blend` rig file. The ROS node is not started until
84 | one of the following is performed:
85 |
86 | * Hover mouse over the text editor and press Alt+P
87 | * Select the text-editor menu "Edit->Run Script" item.
88 | (the text-editor menu is at bottom-left).
89 | * Start blender as `blender robo.blend --enable-autoexec -P startup.py`
90 |
91 | Verify that the ROS node has started: `rostopic list -v` should show
92 | `/cmd_blendermode` as a subscribed topic.
93 |
94 | Don't forget to start `roscore` first.
95 |
96 | # Modes
97 |
98 | The rigs have several different modes in which they can operate. These
99 | are:
100 |
101 | * LookAround -- Random looking about.
102 | * TrackDev -- Head/eyes track a region of interest.
103 | * Animations -- Menu of scripted animations.
104 | * Manual Head -- Manual control from blender, debugging
105 | * Dummy -- Do nothing. Neutral expression.
106 |
107 | The mode is set by sending one of the above modes, as a string,
108 | to the `/cmd_blender` topic. For example:
109 |
110 | `rostopic pub --once /cmd_blendermode std_msgs/String LookAround`
111 |
112 | Both rigs listen for `/cmd_blender` to switch between the different
113 | blender nodes.
114 |
115 | Currently, only the `robo.blend` rig can respond to the LookAround
116 | and TrackDev modes. In contrast, only the `animate.blend` rig can
117 | respond to the Animations mode. Sorry!
118 |
119 | ### LookAround
120 | Move the head and eyes in an endless cycle. Movements are defined via
121 | a python script in the rig. The movements are randomly generated, with
122 | eyes moving faster than head.
123 |
124 | For example:
125 | `rostopic pub --once /cmd_blendermode std_msgs/String LookAround`
126 |
127 | ##### Inputs:
128 | * (none)
129 |
130 | ##### Outputs
131 | * neck_euler - publishes neck angle
132 | * eyes - publishes eyes movements
133 |
134 |
135 | ### TrackDev
136 | Eyes and head track a target, defined by a region of interest (ROI).
137 | The system maintains multiple targets visible in the scene. The target
138 | that will be tracked is selected with the /tracking_action topic.
139 |
140 | For example:
141 | `rostopic pub --once /cmd_blendermode std_msgs/String TrackDev`
142 |
143 | Each ROI is denoted with a cube.
144 |
145 | Currently, the ROI's are human faces, extracted by pi_vision from a usb
146 | video camera feed. Multiple cameras can be supported by adding them to
147 | src/config/inputs.yaml. Each pi_vision node should run in it's own ROS
148 | namespace. The default (root node) camera is bound to chest_pivision
149 | (configurable in inputs.yaml). The eye namespace (export ROS_NAMESPACE=eye)
150 | is currently bound to eye_pivision.
151 |
152 | ##### Topics subscribed:
153 | * /tracking_action (eva_behavior/tracking_action) - Listens for
154 | information on objects to track.
155 |
156 |
157 | #### Inputs
158 | * chest_pivision (NRegionsOfInterest) - populates the scene with ROI
159 | from chest camera.
160 | * eye_pivision (NRegionsOfInterest) - populates the scene with ROI
161 | from eye camera.
162 |
163 | ##### Outputs
164 | * neck_euler - publishes neck angle.
165 | * eyes - publishes eyes movements.
166 |
167 |
168 | ### Animations
169 | Plays pre-defined animations. These are defined as object actions
170 | within the blender rig. The list of valid animations can be obtained
171 | in two ways: using the old `/animations_list` topic or the new
172 | `/blender_api/available_emotion_states` topic. Neither topic list is
173 | published until the rig is placed in animation mode.
174 |
175 | Only the `animate-test.blend` rig supports Animations mode.
176 |
177 | For example:
178 | `rostopic pub --once /cmd_blendermode std_msgs/String Animations`
179 |
180 | ##### Topics subscribed:
181 | * cmd_animations(std_msgs/String) - colon separated string which
182 | sends command (play,stop, pause) follwed optionaly the animation name.
183 |
184 | For example:
185 | `rostopic pub --once /cmd_animations std_msgs/String play:yawn-1`
186 |
187 | ##### Outputs:
188 | * full_head - publishes expression neck and eyes information.
189 |
190 |
191 | ### Manual Head
192 | Used for animation development and debugging. Allows the designer
193 | to control and move the rig, thus controlling the physical robot head.
194 |
195 | ##### Outputs:
196 | * full_head - publishes expressions for neck and eyes positions.
197 |
198 |
199 | ### Dummy
200 | Idle.
201 |
202 | For example:
203 | `rostopic pub --once /cmd_blendermode std_msgs/String Dummy`
204 |
205 | # Inputs
206 | Mainly one input class is currently used:
207 | * NRegionsOfInterest - Listens to the specified *eventtopic* for
208 | eva_behavior/tracking_event messages, which inform about new and
209 | removed sensor_msgs/RegionOfInterest topics of the format specified
210 | in *sourcetopic*. Converts them to 3D space in blender and updates
211 | their position.
212 | * RegionOfInterest - Listens to specified topic for
213 | sensor_msgs/RegionOfInterest message. Converts it to 3D space
214 | in blender and updates the blender object position in the blender.
215 | * Faceshift - allows input from faceshift, changes shapekeys
216 | of the face mesh.
217 |
218 | # Outputs
219 | Outputs are used to publish rig data back to ROS. ROS topics are
220 | defined in config.
221 |
222 | The following outputs are currently used:
223 | * neck_euler - gets neck's Euler rotation, publishes as
224 | basic_head_api.msg/PointHead
225 | * face - gets mesh shapekeys and publishes them as pau2motors/pau.
226 | * eyes - gets eyes angles and publishes them as pau2motors/pau
227 | * eyes_beorn - gets Beorn's rig eyes angles and publishes in
228 | pau2motors/pau message on specified topic.
229 | * neck_euler_beorn - gets Beorn's rig neck rotation in Euler angles
230 | and publishes in basic_head_api.msg/PointHead
231 | * full_head - combines face, neck_euler_beorn, eyes_beorn outputs to one.
232 |
233 |
234 | # Cookbook recipes
235 |
236 | ## Example LookAround Mode
237 | The default rig demonstrates the LookAround mode.
238 |
239 | ```
240 | # Start blender, load head, start scripts
241 | blender ./robo_blender/src/robo.blend --enable-autoexec -P ./robo_blender/src/startup.py
242 |
243 | # Start the look-around mode
244 | rostopic pub --once /cmd_blendermode std_msgs/String LookAround
245 |
246 | # Verify that tracking output is sent to the PAU motors
247 | rostopic echo /arthur/cmd_eyes_pau
248 | ```
249 |
250 | ## Example TrackingDev Mode.
251 | The TrackingDev mode requires that the [eva_behavior] ROS node be set
252 | up and running.
253 | ```
254 | # Make sure all packages can be found.
255 | export ROS_PACKAGE_PATH=${ROS_PACKAGE_PATH}:... uvc_cam, pi_vision, etc.
256 |
257 | # Start up the video camera. Use uvc_cam from hansonrobotics github.
258 | roslaunch uvc_cam uvc_cam.launch device:=/dev/video0
259 |
260 | # Verify camera is working
261 | rosrun image_view image_view image:=/camera/image_raw
262 |
263 | # Start pi_vision face tracker
264 | roslaunch pi_face_tracker face_tracker_uvc_cam.launch
265 |
266 | # Start blender, load head, start scripts
267 | blender ./robo_blender/src/robo.blend --enable-autoexec -P ./robo_blender/src/startup.py
268 |
269 | # Start the eva_behavior node
270 | rosrun eva_behavior general_behavior.py
271 |
272 | # Start the tracker
273 | rostopic pub /cmd_blendermode std_msgs/String TrackDev
274 |
275 | # XXX How to list the valid values for tracking_action?
276 |
277 | # XXX Set the roi to track.
278 | rostopic pub /tracking_action eva_behavior/tracking_action XXX ???
279 |
280 | # Verify that tracking output is sent to the PAU motors
281 | rostopic echo /arthur/cmd_eyes_pau
282 | ```
283 | Example Multi-camera TrackingDev Mode. Same as above, except that new
284 | cameras are now added:
285 | ```
286 | # Specify the 'eye' namespace
287 | export ROS_NAMESPACE=eye
288 |
289 | # Start up the video camera, but on a different vido device.
290 | roslaunch uvc_cam uvc_cam.launch device:=/dev/video1
291 |
292 | # Verify camera is working
293 | rosrun image_view image_view image:=/eye/camera/image_raw
294 |
295 | # Start pi_vision face tracker with the alternate camera
296 | # XXX this doesn't work ...
297 | roslaunch pi_face_tracker face_tracker_uvc_cam.launch input_rgb_image:=/eye/camera/image_raw
298 |
299 | ```
300 | ## Example New-style Animations Mode
301 |
302 | This requies a blender file that contains animations. The animate-test
303 | (another Arthur rig) will do. The blender file should be kept in the
304 | `./robo_blender/src/` directory, else the relative path names for
305 | loading `main.py` will not work.
306 |
307 | ```
308 | # Start blender, load head, start scripts.
309 | blender ./robo_blender/src/animate-test.blend --enable-autoexec -P ./robo_blender/src/startup.py
310 |
311 | # Start the animations mode
312 | rostopic pub --once /cmd_blendermode std_msgs/String Animations
313 |
314 | # Obtain the list of supported animations. The result should show:
315 | # actions: ['angry-1', 'sad-1', 'happy-1', 'surprised-1']
316 | rostopic echo -n 1 /blender_api/available_emotion_states
317 |
318 | # Perform one of the supported animations
319 | rostopic pub --once /blender_api/set_emotion_state blender_api_msgs/EmotionState '{name: sad-1, magnitude: 1.0, duration: [6, 500000000]}'
320 |
321 | # Verify that tracking output is sent to the PAU motors
322 | rostopic echo /arthur/cmd_eyes_pau
323 | ```
324 |
325 | ## Example Old-style Animations Mode
326 |
327 | This requies a blender file that contains animations. The animate-test
328 | (another Arthur rig) will do. The blender file should be kept in the
329 | `./robo_blender/src/` directory, else the relative path names for
330 | loading `main.py` will not work.
331 |
332 | ```
333 | # Start blender, load head, start scripts.
334 | blender ./robo_blender/src/animate-test.blend --enable-autoexec -P ./robo_blender/src/startup.py
335 |
336 | # Start the animations mode
337 | rostopic pub --once /cmd_blendermode std_msgs/String Animations
338 |
339 | # Obtain the list of supported animations. The result should show:
340 | # actions: ['angry-1', 'sad-1', 'happy-1', 'surprised-1']
341 | rostopic echo -n 1 /blender_api/available_emotion_states
342 |
343 | # Perform one of the supported animations
344 | rostopic pub --once /cmd_animations std_msgs/String play:sad-1
345 |
346 | # Verify that tracking output is sent to the PAU motors
347 | rostopic echo /arthur/cmd_eyes_pau
348 |
349 | # Pause the animation
350 | rostopic pub --once /cmd_animations std_msgs/String pause
351 |
352 | # Restart the animation
353 | rostopic pub --once /cmd_animations std_msgs/String play
354 |
355 | # Halt the animation
356 | rostopic pub --once /cmd_animations std_msgs/String stop
357 | ```
358 | #####Copyright (c) 2013-2018 Hanson Robotics, Ltd.
359 |
--------------------------------------------------------------------------------
/msg/animations_list.msg:
--------------------------------------------------------------------------------
1 | string[] actions
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | robo_blender
4 | 0.0.0
5 | The robo_blender package
6 |
7 |
8 |
9 |
10 | ean
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | catkin
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/src/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | __pycache__
--------------------------------------------------------------------------------
/src/BlenderUtils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
3 | import bpy
4 | from mathutils import Matrix
5 |
6 | def get_pose_matrix_in_other_space(mat, pose_bone, pose_refbone=None):
7 | """
8 | Returns the transform matrix relative to pose_bone's current
9 | transform space. In other words, presuming that mat is in
10 | armature space, slapping the returned matrix onto pose_bone
11 | should give it the armature-space transforms of mat.
12 |
13 | If pose_refbone (reference bone) is set, it is used instead of pose_bone's
14 | parent.
15 |
16 | TODO: try to handle cases with axis-scaled parents better.
17 | """
18 | rest = pose_bone.bone.matrix_local.copy()
19 | rest_inv = rest.inverted()
20 | if pose_refbone == None and pose_bone.parent:
21 | pose_refbone = pose_bone.parent
22 | if pose_refbone:
23 | par_mat = pose_refbone.matrix.copy()
24 | par_inv = par_mat.inverted()
25 | par_rest = pose_refbone.bone.matrix_local.copy()
26 | else:
27 | par_mat = Matrix()
28 | par_inv = Matrix()
29 | par_rest = Matrix()
30 |
31 | # Get matrix in bone's current transform space
32 | smat = rest_inv * (par_rest * (par_inv * mat))
33 |
34 | return smat
35 |
36 | def get_local_pose_matrix(pose_bone):
37 | """Returns the local transform matrix of the given pose bone."""
38 | return get_pose_matrix_in_other_space(pose_bone.matrix, pose_bone)
39 |
40 | def get_pose_matrix(pose_bone, pose_refbone):
41 | """
42 | Returns the local transform matrix of the given pose bone relative to the
43 | pose_refbone
44 | """
45 | return get_pose_matrix_in_other_space(pose_bone.matrix, pose_bone, pose_refbone)
46 |
47 | def get_bones_rotation_rad(armature, bone, axis, refbone=None):
48 | if refbone:
49 | mat = get_pose_matrix(
50 | bpy.data.objects[armature].pose.bones[bone],
51 | bpy.data.objects[armature].pose.bones[refbone]
52 | )
53 | else:
54 | mat = get_local_pose_matrix(bpy.data.objects[armature].pose.bones[bone])
55 | return getattr(mat.to_euler(), axis)
56 |
--------------------------------------------------------------------------------
/src/ShapekeyStore.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 |
3 | _shkey_list = [
4 | "EyeBlink_L",
5 | "EyeBlink_R",
6 | "EyeSquint_L",
7 | "EyeSquint_R",
8 | "EyeDown_L",
9 | "EyeDown_R",
10 | "EyeIn_L",
11 | "EyeIn_R",
12 | "EyeOpen_L",
13 | "EyeOpen_R",
14 | "EyeOut_L",
15 | "EyeOut_R",
16 | "EyeUp_L",
17 | "EyeUp_R",
18 | "BrowsD_L",
19 | "BrowsD_R",
20 | "BrowsU_C",
21 | "BrowsU_L",
22 | "BrowsU_R",
23 | "JawFwd",
24 | "JawLeft",
25 | "JawOpen",
26 | "JawChew",
27 | "JawRight",
28 | "MouthLeft",
29 | "MouthRight",
30 | "MouthFrown_L",
31 | "MouthFrown_R",
32 | "MouthSmile_L",
33 | "MouthSmile_R",
34 | "MouthDimple_L",
35 | "MouthDimple_R",
36 | "LipsStretch_L",
37 | "LipsStretch_R",
38 | "LipsUpperClose",
39 | "LipsLowerClose",
40 | "LipsUpperUp",
41 | "LipsLowerDown",
42 | "LipsUpperOpen",
43 | "LipsLowerOpen",
44 | "LipsFunnel",
45 | "LipsPucker",
46 | "ChinLowerRaise",
47 | "ChinUpperRaise",
48 | "Sneer",
49 | "Puff",
50 | "CheekSquint_L",
51 | "CheekSquint_R"
52 | ]
53 |
54 | # Create a dictionary mapping shapekeys to their indices
55 | _shkey2Index = {}
56 | for i in range(len(_shkey_list)):
57 | _shkey2Index[_shkey_list[i]] = i
58 |
59 | def getIndex(shapekey):
60 | """Gets the index of the given shapekey string."""
61 | return _shkey2Index[shapekey]
62 |
63 | def getString(index):
64 | return _shkey_list[index]
65 |
66 | def getIter():
67 | # Returns iterator instead of the list to prevent outside users from
68 | # modifying it.
69 | return iter(_shkey_list)
70 |
--------------------------------------------------------------------------------
/src/ShapekeyStore2.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 |
3 | _shkey_list = [
4 | "00_EyeBlink_L",
5 | "01_EyeBlink_R",
6 | "02_EyeSquint_L",
7 | "03_EyeSquint_R",
8 | "04_EyeDown_L",
9 | "05_EyeDown_R",
10 | "08_EyeOpen_L",
11 | "09_EyeOpen_R",
12 | "12_EyeUp_L",
13 | "13_EyeUp_R",
14 | "14_BrowsD_I_L",
15 | "14_BrowsD_O_L",
16 | "15_BrowsD_O_R",
17 | "15_BrowsD_I_R",
18 | "16_BrowsU_C",
19 | "17_BrowsU_O_L",
20 | "17_BrowsU_I_L",
21 | "18_BrowsU_I_R",
22 | "18_BrowsU_O_R",
23 | "21_JawOpen",
24 | "26_MouthFrown_L",
25 | "27_MouthFrown_R",
26 | "28_MouthSmile_L",
27 | "29_MouthSmile_R",
28 | "32_LipsStretch_L",
29 | "33_LipsStretch_R",
30 | "36_LipsUpperUp_L",
31 | "36_LipsUpperUp_R",
32 | "37_LipsLowerDown_L",
33 | "37_LipsLowerDown_R",
34 | "38_LipsUpperOpen",
35 | "39_LipsLowerOpen",
36 | "40_LipsFunnel",
37 | "41_LipsPucker",
38 | "42_ChinLowerRaise",
39 | "43_ChinUpperRaise",
40 | "44_Sneer_C",
41 | "44_Sneer_L",
42 | "44_Sneer_R",
43 | "46_CheekSquint_L",
44 | "47_CheekSquint_R"
45 | ]
46 |
47 | # Create a dictionary mapping shapekeys to their indices
48 | _shkey2Index = {}
49 | for i in range(len(_shkey_list)):
50 | _shkey2Index[_shkey_list[i]] = i
51 |
52 | def getIndex(shapekey):
53 | """Gets the index of the given shapekey string."""
54 | return _shkey2Index[shapekey]
55 |
56 | def getString(index):
57 | return _shkey_list[index]
58 |
59 | def getIter():
60 | # Returns iterator instead of the list to prevent outside users from
61 | # modifying it.
62 | return iter(_shkey_list)
63 |
--------------------------------------------------------------------------------
/src/Utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
3 | import yaml
4 | import os
5 | import importlib
6 | import genpy
7 | import roslib.message
8 | import copy
9 | import re
10 |
11 | class DictKeyChain:
12 | """
13 | Allows to build a chain of dictionary keys (or array indices or object
14 | attributes), which can be later applied to an obj.
15 |
16 | I.e. a DictKeyChain instance constructed with key_list ['foo', 'bar'], can
17 | be applied on a dictionary {'foo': {'bar': 'myStr'}} to get the 'myStr'
18 | string.
19 | """
20 |
21 | @staticmethod
22 | def _get_from(host, key_list):
23 | for key in key_list:
24 | if hasattr(host, "__getitem__"):
25 | host = host[key]
26 | else:
27 | host = getattr(host, key)
28 | return host
29 |
30 | def get_from(self, host):
31 | return self._get_from(host, self.key_list)
32 |
33 | def set_on(self, host, val):
34 | host = self._get_from(host, self.key_list[:-1])
35 | key = self.key_list[-1]
36 | if hasattr(host, "__setitem__"):
37 | host[key] = val
38 | else:
39 | setattr(host, key, val)
40 |
41 | def __init__(self, key_list):
42 | # Parse strings to integers where possible
43 | self.key_list = list(map(
44 | lambda key: int(key)
45 | if isinstance(key, str) and key.isdigit()
46 | else key,
47 | key_list
48 | ))
49 |
50 | class RosMsgKeyChain(DictKeyChain):
51 |
52 | list_brackets = re.compile(r'\[[^\]]*\]$')
53 |
54 | @classmethod
55 | def _get_ros_arr_class(cls, arr_host, key):
56 | #E.g. trajectory_msgs/JointTrajectoryPoint[]
57 | arr_type = dict(zip(arr_host.__slots__, arr_host._slot_types))[key]
58 | #E.g. trajectory_msgs/JointTrajectoryPoint
59 | entry_type = cls.list_brackets.sub('', arr_type)
60 | return roslib.message.get_message_class(entry_type)
61 |
62 | @classmethod
63 | def _hard_get_from(cls, host, key_list):
64 | for key in key_list:
65 | if isinstance(host, list):
66 | key = int(key)
67 | if key > len(host):
68 | raise IndexError("Index %d of %s is out of bounds." % key, host)
69 | elif key == len(host):
70 | # We can find what class is appropriate to append to this list by
71 | # looking at the parent of this list.
72 | host.append(cls._get_ros_arr_class(*prev_hostkey)())
73 |
74 | prev_hostkey = (host, key)
75 | if isinstance(host, list):
76 | host = host[key]
77 | else:
78 | host = getattr(host, key)
79 | return host
80 |
81 | @staticmethod
82 | def _hard_set_on(host, key, val):
83 | if isinstance(host, list):
84 | key = int(key)
85 | if key > len(host):
86 | raise IndexError("Index %d of %s is out of bounds." % key, host)
87 | elif key == len(host):
88 | host.append(val)
89 | else:
90 | host[key] = val
91 | else:
92 | setattr(host, key, val)
93 |
94 | def hard_set_on(self, host, val):
95 | """
96 | Behaves like set_on(self, host, val), except this method creates necessary
97 | nested ROS messages on its way to the end of the keychain.
98 |
99 | E.g. Key chain ["points", "0", "positions"] applied with this method to an
100 | empty JointTrajectory instance would append the empty "points" list with a
101 | JointTrajectoryPoint instance and set its "positions" attribute to the
102 | given val.
103 | """
104 | host = self._hard_get_from(host, self.key_list[:-1])
105 | key = self.key_list[-1]
106 | self._hard_set_on(host, key, val)
107 |
108 |
109 | def read_yaml(yamlname, loader_filepath=None):
110 | """
111 | Pass __file__ as loader_filepath argument to load file relative to the
112 | caller file location.
113 | """
114 | if not loader_filepath:
115 | loader_filepath = __file__
116 | dirname, filename = os.path.split(os.path.abspath(loader_filepath))
117 |
118 | stream = open(os.path.join(dirname, yamlname), 'r')
119 | result = yaml.load(stream)
120 | stream.close()
121 | return result
122 |
123 | def import_member(name):
124 | """
125 | Behaves like importlib.import_module except that this takes a path
126 | to a member (a class or a function) inside a module, instead of a
127 | path to a module.
128 | """
129 | path = name.split(".")
130 | module = importlib.import_module((".".join(path[:-1])))
131 | return getattr(module, path[-1])
132 |
--------------------------------------------------------------------------------
/src/animate-test.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hansonrobotics/robo_blender/b444f984e5151bdcc041dc6cd5f74712fcea20e4/src/animate-test.blend
--------------------------------------------------------------------------------
/src/bone_array.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hansonrobotics/robo_blender/b444f984e5151bdcc041dc6cd5f74712fcea20e4/src/bone_array.blend
--------------------------------------------------------------------------------
/src/config/config.yaml:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | fps: 30
3 |
--------------------------------------------------------------------------------
/src/config/inputs.yaml:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | - name: faceshift
3 | class: Faceshift
4 | enabled: false
5 | sourcetopic: faceshift_track
6 | binding:
7 | meshname: Head
8 | singles: []
9 | #- varpath: m_headRotation:x
10 | # bonerot: Armature:bracket3:x
11 | #- varpath: m_headRotation:y
12 | # bonerot: Armature:base:y
13 | #- varpath: m_headRotation:z
14 | # bonerot: Armature:bracket2:z
15 |
16 | - name: chest_pivision
17 | class: NRegionsOfInterest
18 | enabled: true
19 | rosbind:
20 | #namespace: chest
21 | eventtopic: tracking_event
22 | sourcetopic: faces/%d
23 | blenderbind:
24 | objectpos:
25 | # coordinates of the center of the webcam screen in blender space.
26 | offset: [0.008,-1,0.458737]
27 | scale: 1.5
28 |
29 | - name: eye_pivision
30 | class: NRegionsOfInterest
31 | enabled: true
32 | rosbind:
33 | namespace: eye
34 | eventtopic: tracking_event
35 | sourcetopic: faces/%d
36 | blenderbind:
37 | objectpos:
38 | offset: RightEye
39 | direction: eyefocus
40 | distance: 1
41 | scale: 1
42 |
--------------------------------------------------------------------------------
/src/config/outputs.yaml:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | #--Original head rig outputs--
3 | - name: neck_euler
4 | class: state.ParametersOut
5 | enabled: true
6 | pubtopic: arthur/point_head
7 | msg: basic_head_api.msg.PointHead
8 | binding: #bonerot format: armature_obj:bone:axis
9 | - varpath: yaw
10 | bonerot: Armature:base:y
11 | - varpath: pitch
12 | bonerot: Armature:bracket3:x
13 | - varpath: roll
14 | bonerot: Armature:bracket2:z
15 |
16 | - name: face
17 | class: state.ParametersOut
18 | enabled: true
19 | pubtopic: arthur/cmd_face_pau
20 | msg: pau2motors.msg.pau
21 | binding:
22 | - varpath: m_coeffs
23 | shkey_mesh: Head
24 |
25 | - name: eyes
26 | class: state.ParametersOut
27 | enabled: true
28 | pubtopic: arthur/cmd_eyes_pau
29 | msg: pau2motors.msg.pau
30 | binding:
31 | - varpath: m_eyeGazeLeftPitch
32 | bonerot: leye:leyebone:x
33 | - varpath: m_eyeGazeLeftYaw
34 | bonerot: leye:leyebone:z
35 | - varpath: m_eyeGazeRightPitch
36 | bonerot: reye:reyebone:x
37 | - varpath: m_eyeGazeRightYaw
38 | bonerot: reye:reyebone:z
39 |
40 |
41 | #- name: full_head
42 | # class: state.CompositeOut
43 | # enabled: true
44 | # outputs:
45 | # - neck_euler
46 | # - face
47 | # - eyes
48 |
49 | #--Beorn rig outputs--
50 | - name: full_head
51 | class: state.CompositeOut
52 | enabled: true
53 | outputs:
54 | - face
55 | - eyes_beorn
56 | - neck_euler_beorn
57 |
58 | - name: eyes_beorn
59 | class: state.ParametersOut
60 | enabled: true
61 | pubtopic: /arthur/cmd_eyes_pau
62 | msg: pau2motors.msg.pau
63 | binding:
64 | - varpath: m_eyeGazeLeftPitch
65 | bonerot: Armature:leyebone:x
66 | - varpath: m_eyeGazeLeftYaw
67 | bonerot: Armature:leyebone:z
68 | - varpath: m_eyeGazeRightPitch
69 | bonerot: Armature:reyebone:x
70 | - varpath: m_eyeGazeRightYaw
71 | bonerot: Armature:reyebone:z
72 |
73 | - name: neck_euler_beorn
74 | class: state.ParametersOut
75 | enabled: true
76 | pubtopic: /arthur/point_head
77 | msg: basic_head_api.msg.PointHead
78 | binding: #bonerot format: armature_obj:bone:axis[:parent_bone]
79 | - varpath: yaw
80 | bonerot: Armature:neck_base:y
81 | scale: 2
82 | - varpath: pitch
83 | bonerot: Armature:bracket3.001:x:neck_base #Sums up both pitch motors
84 | scale: 2
85 | - varpath: roll
86 | bonerot: Armature:bracket3.001:z:neck_base
87 | scale: -1.5
88 |
89 | #--Motor-Coupled outputs
90 | - name: bone_array
91 | class: state.ParametersOut
92 | enabled: false
93 | pubtopic: cmd_joints
94 | msg: trajectory_msgs.msg.JointTrajectory
95 | binding:
96 | - varpath: joint_names
97 | childbones_name: Armature
98 | - varpath: points:0:positions
99 | childbones_rot: Armature:z
100 |
101 | #--Zeno-Specific outputs--
102 | - name: animation
103 | class: TimelineAnimation
104 | enabled: false
105 |
--------------------------------------------------------------------------------
/src/inputs/Faceshift.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import rospy
3 | import bpy
4 | from .bases import InputBase
5 | from pau2motors.msg import pau
6 | import ShapekeyStore
7 |
8 | class Faceshift(InputBase):
9 | """
10 | Shapekey application on mesh - tested and working. Singlebinds - not tested.
11 |
12 | TODO: Figure out another approach instead of setting the shapekeys
13 | directly to the primary mesh (this way no chance is given for the
14 | controller to select and combine the faceshift input). Maybe we should map
15 | faceshift input to another mesh placed next to the primary one.
16 | """
17 |
18 | def build_set_bone_position(self, singlebind_dict):
19 | keychain = Utils.DictKeyChain(singlebind_dict["varpath"].split(":"))
20 | bone_parent, bone, axis = singlebind_dict["bonerot"].split(":")
21 | rot_index = {"x": 0, "y": 1, "z": 2}[axis]
22 |
23 | def processor(msg):
24 | bpy.data.objects[bone_parent].pose.bones[bone].rotation_mode = 'XYZ'
25 | rot = bpy.data.objects[bone_parent].pose.bones[bone].rotation_euler
26 | rot[rot_index] = keychain.get_from(msg)
27 | bpy.data.objects[bone_parent].pose.bones[bone].rotation_euler = rot
28 |
29 | return processor
30 |
31 | def handle_source(self, msg):
32 | if not self.confentry["enabled"]:
33 | return
34 |
35 | # Apply shapekeys
36 | meshname = self.confentry["binding"]["meshname"]
37 | for i in range(len(msg.m_coeffs)):
38 | shapename = ShapekeyStore.getString(i)
39 | bpy.data.meshes[meshname].shape_keys.key_blocks[shapename].value = msg.m_coeffs[i]
40 |
41 | # Process single binds
42 | for processor in self.singlebind_processors:
43 | processor(msg)
44 |
45 |
46 | def __init__(self, confentry):
47 | self.confentry = confentry
48 |
49 | # Build processors for entries in confentry["binding"]["singles"]
50 | self.singlebind_processors = []
51 | for singlebind in self.confentry["binding"]["singles"]:
52 | self.singlebind_processors.append(
53 | self.build_set_bone_position(singlebind)
54 | )
55 |
56 | rospy.Subscriber(confentry["sourcetopic"], pau, self._pend_msg(self.handle_source))
--------------------------------------------------------------------------------
/src/inputs/NRegionsOfInterest.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import rospy
3 | from .bases import InputBase
4 | from .bases.ObjectInBlender import ObjectInBlender
5 | import sensor_msgs.msg, eva_behavior.msg
6 | from mathutils import Vector
7 | import copy
8 | import posixpath
9 |
10 | class Face:
11 | """
12 | Keep track of a single ROI (i.e. a single face in the field of view).
13 | Subscribes to roi topic, updates its position in blender space.
14 | """
15 | def __init__(self, face_id, confentry, get_camerainfo):
16 | face_id = int(face_id)
17 |
18 | #Generate name in binding config
19 | source_topic = posixpath.join(
20 | confentry["rosbind"]["namespace"],
21 | confentry["rosbind"]["sourcetopic"] % face_id
22 | )
23 | blenderbind = copy.deepcopy(confentry["blenderbind"])
24 | blenderbind["objectpos"]["name"] = source_topic
25 | # Blender file with animations sometimes throw exception.
26 | # As we do not need the input for it, we can ignore it.
27 | try:
28 | #Nest an ObjectInBlender instance
29 | self.obj_in_blender = ObjectInBlender.from_binding(blenderbind)
30 |
31 | self.id = face_id
32 | self.get_camerainfo = get_camerainfo
33 |
34 | #Subscribe to generated topic
35 | self.topic = rospy.Subscriber(
36 | source_topic,
37 | sensor_msgs.msg.RegionOfInterest,
38 | self.obj_in_blender._pend_msg(self._handle_source)
39 | )
40 | except RuntimeError:
41 | pass
42 |
43 | def destroy(self):
44 | self.topic.unregister()
45 | self.obj_in_blender.destroy()
46 |
47 | def _roi2point(self, msg):
48 | """
49 | Returns a normalized point at the center of the given RegionOfInterest
50 | message.
51 | """
52 | camerainfo = self.get_camerainfo()
53 | p = Vector([0,0,0])
54 | if camerainfo.width > 0:
55 | p.x = 0.5 - (msg.x_offset+(msg.width/2.0))/camerainfo.width
56 | if camerainfo.height > 0:
57 | p.z = 0.5 - (msg.y_offset+(msg.height/2.0))/camerainfo.height
58 | return p
59 |
60 | def _handle_source(self, roi):
61 | point = self._roi2point(roi)
62 | self.obj_in_blender.set_object_location(point)
63 |
64 | class NRegionsOfInterest(InputBase):
65 | """
66 | Dynamically subscribe and unregister from topics according to events sent to
67 | the 'eventtopic' (specified in config).
68 | """
69 | def __init__(self, confentry):
70 | #Make sure namespace parameter is there, even if it's empty
71 | if not "namespace" in confentry["rosbind"]:
72 | confentry["rosbind"]["namespace"] = ""
73 |
74 | self.confentry = confentry
75 | self.topic = rospy.Subscriber(
76 | posixpath.join(
77 | confentry["rosbind"]["namespace"],
78 | confentry["rosbind"]["eventtopic"]
79 | ),
80 | eva_behavior.msg.event,
81 | self._pend_msg(self._handle_event)
82 | )
83 |
84 | #A dictionary {face id: Face instance} of which pi_vision is currently tracking
85 | self.faces = {}
86 |
87 | def handle_camerainfo(msg):
88 | self.camerainfo = msg
89 | ci_sub.unregister()
90 | ci_sub = rospy.Subscriber(
91 | posixpath.join(confentry["rosbind"]["namespace"], "camera/camera_info"),
92 | sensor_msgs.msg.CameraInfo,
93 | handle_camerainfo
94 | )
95 |
96 | def get_camerainfo(self):
97 | return self.camerainfo
98 |
99 | def _handle_event(self, msg):
100 | if msg.event == "new_face":
101 | self._add_face(int(msg.param))
102 | elif msg.event == "exit":
103 | self._remove_face(int(msg.param))
104 |
105 | def _add_face(self, face_id):
106 | self.faces[face_id] = Face(
107 | face_id,
108 | self.confentry,
109 | self.get_camerainfo
110 | )
111 |
112 | def _remove_face(self, face_id):
113 | if face_id in self.faces:
114 | self.faces[face_id].destroy()
115 | del self.faces[face_id]
116 |
--------------------------------------------------------------------------------
/src/inputs/RegionOfInterest.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import rospy
3 | from .bases.ObjectInBlender import ObjectInBlender
4 | import sensor_msgs.msg
5 | from mathutils import Vector
6 |
7 | class RegionOfInterest(ObjectInBlender):
8 |
9 | def roi2point(self, msg):
10 | """
11 | Returns a normalized point at the center of the given RegionOfInterest
12 | message.
13 | """
14 | p = Vector([0,0,0])
15 | if self.camerainfo.width > 0:
16 | p.x = 0.5 - (msg.x_offset+(msg.width/2.0))/self.camerainfo.width
17 | if self.camerainfo.height > 0:
18 | p.z = 0.5 - (msg.y_offset+(msg.height/2.0))/self.camerainfo.height
19 | return p
20 |
21 | def handle_source(self, msg):
22 | point = self.roi2point(msg)
23 | self.set_object_location(point)
24 |
25 | def change_topic(self,topic):
26 | if self.topic_name != topic:
27 | self.topic.unregister()
28 | self.topic = rospy.Subscriber(
29 | topic,
30 | sensor_msgs.msg.RegionOfInterest,
31 | self._pend_msg(self.handle_source)
32 | )
33 | self.topic_name = topic
34 |
35 |
36 | def __init__(self, confentry):
37 | self.topic = rospy.Subscriber(
38 | confentry["sourcetopic"],
39 | sensor_msgs.msg.RegionOfInterest,
40 | self._pend_msg(self.handle_source)
41 | )
42 | self.topic_name = confentry["sourcetopic"]
43 |
44 | def handle_camerainfo(msg):
45 | self.camerainfo = msg
46 | ci_sub.unregister()
47 | ci_sub = rospy.Subscriber(
48 | '/camera/camera_info',
49 | sensor_msgs.msg.CameraInfo,
50 | handle_camerainfo
51 | )
52 |
53 |
54 | super(RegionOfInterest, self).__init__(confentry)
55 |
--------------------------------------------------------------------------------
/src/inputs/__init__.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import sys, Utils, inspect, traceback, rospy
3 |
4 | __doc__ = """
5 | inputs.py is a module that holds classes and their instances that are
6 | responsible for transferring input data (like from sensors) to Blender space.
7 |
8 | E.g. 'pivision' and 'nmpt_saliency' inputs are instances of RegionOfInterest, which
9 | inherits from ObjectInBlender and control small cubes in Blender space
10 | according to received ROS messages.
11 |
12 | Input instances can be reached by either:
13 | inputs.store. or inputs.get_instance(),
14 | where can be pivision, nmpt_saliency, etc.
15 |
16 | See config/inputs.yaml for available input instances.
17 |
18 | All input classes should inherit from inputs.bases.InputBase
19 | """
20 |
21 | class PendingMsg:
22 | """
23 | A massage and its handler stored to be executed later in a Blender callback
24 | for thread safety.
25 |
26 | Takes the role of what in a previous implementation was called an "op" in
27 | the list "ops".
28 | """
29 |
30 | def execute(self):
31 | """ Should be called from within a Blender callback only. """
32 | self.handler(self.msg)
33 |
34 | def pend(self):
35 | """Call this to enqueue the message for the next Blender callback. """
36 | pend_msg(self)
37 |
38 | def __init__(self, msg, handler):
39 | self.msg = msg
40 | self.handler = handler
41 |
42 | ops = []
43 | def pend_msg(msg):
44 | global ops
45 | ops.append(msg)
46 |
47 | def execute_pending():
48 | """ Should be called from within a Blender callback only. """
49 | global ops
50 | for op in ops:
51 | op.execute()
52 | ops = []
53 |
54 |
55 |
56 | class InstanceStore:
57 | """ Here is where instances of outputs are stored. """
58 |
59 | @staticmethod
60 | def _get_class(name):
61 | module_name = ".".join([__name__, name])
62 | member_name = ".".join([__name__] + [name]*2)
63 | try:
64 | return Utils.import_member(member_name)
65 | except ImportError as err:
66 | if err.name == module_name:
67 | raise NameError("Couldn't find module %s" % module_name)
68 | else:
69 | raise
70 | except AttributeError:
71 | raise NameError("Couldn't load class %s" % member_name)
72 |
73 | @classmethod
74 | def _build_single(cls, confentry):
75 | return cls._get_class(confentry["class"])(confentry)
76 |
77 | def _store_instance(self, name, instance):
78 | self._instances[name] = instance
79 | setattr(self, name, instance)
80 |
81 | def __init__(self, fullconfig):
82 | # Reserve member names
83 | self.orig_members = [member[0] for member in inspect.getmembers(self)]
84 |
85 | # Build and store outputs out of the config
86 | self._instances = {}
87 | for confentry in fullconfig:
88 | try:
89 | if confentry["name"][0] == "_":
90 | raise NameError("Input name %s can't start with _" % confentry["name"])
91 | if confentry["name"] in self.orig_members:
92 | raise NameError("Input name %s is reserved" % confentry["name"])
93 | self._store_instance(confentry["name"], self._build_single(confentry))
94 | except:
95 | print("Exception during input '%s' load:" % confentry["name"])
96 | traceback.print_exc()
97 | rospy.logwarn("Couldn't load input '%s'" % confentry["name"])
98 |
99 | def initialize(fullconfig):
100 | global store
101 | store = InstanceStore(fullconfig)
102 |
103 | def get_instance(name):
104 | return store._instances[name]
105 |
--------------------------------------------------------------------------------
/src/inputs/bases/ObjectInBlender.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import bpy
3 | from . import InputBase
4 | from mathutils import Vector
5 |
6 | class ObjectSelection:
7 | """
8 | Saves and restores selection (including the active object) to allow bpy
9 | operations without disturbing user's perceived selection.
10 | """
11 |
12 | def __init__(self):
13 | active = bpy.context.scene.objects.active
14 | self.active_name = None if active == None else active.name
15 | self.selected_names = [obj.name for obj in bpy.context.selected_objects]
16 |
17 | def restore(self):
18 | bpy.ops.object.select_all(action="DESELECT")
19 | bpy.context.scene.objects.active = (
20 | bpy.data.objects[self.active_name]
21 | if self.active_name != None and self.active_name in bpy.context.scene.objects
22 | else None
23 | )
24 | for name in self.selected_names:
25 | bpy.ops.object.select_pattern(pattern=name)
26 |
27 | class ObjectInBlender(InputBase):
28 | """
29 | A base class that represents a sensory input as an object in Blender space.
30 | """
31 |
32 | #The name of the desired parent object
33 | GROUP_NAME = "inputs"
34 |
35 | def __init__(self, confentry):
36 | self.confentry = confentry
37 | self.binding = confentry["binding"]["objectpos"]
38 | self._location = Vector()
39 | self._update_from_object()
40 |
41 | @classmethod
42 | def from_binding(cls, binding):
43 | return cls({"enabled": True, "binding": binding})
44 |
45 | def destroy(self):
46 | if not self.binding["name"] in bpy.data.objects:
47 | return
48 | #Backup current selection
49 | selection = ObjectSelection()
50 | #Delete binded object
51 | bpy.ops.object.select_all(action="DESELECT")
52 | bpy.data.objects[self.binding["name"]].select = True
53 | bpy.ops.object.delete()
54 | #Restore selection
55 | selection.restore()
56 |
57 | def set_object_location(self, point):
58 | self._confirm_object(self.binding["name"])
59 | # Go straight to _set_object_location() next time.
60 | self.set_object_location = self._set_object_location
61 | self.set_object_location(point)
62 |
63 | @property
64 | def location(self):
65 | self._update_from_object()
66 | return self._location
67 |
68 | def _set_object_location(self, point):
69 | if self.confentry["enabled"]:
70 | offset = None
71 | if isinstance(self.binding["offset"], list):
72 | offset = Vector(self.binding["offset"])
73 | else:
74 | offset = bpy.data.objects[self.binding["offset"]].matrix_world.to_translation()
75 | # Check if vector and distance is set
76 | if 'direction' in self.binding:
77 | distance = 1
78 | if 'distance' in self.binding:
79 | distance = self.binding['distance']
80 | d = bpy.data.objects[self.binding["direction"]].location-offset
81 | d.normalize()
82 | offset = offset + d * distance
83 | self._location = offset + point * self.binding["scale"]
84 | bpy.data.objects[self.binding["name"]].location = self._location
85 |
86 | @classmethod
87 | def _confirm_group(cls):
88 | """Creates an empty object to put the input objects in, if it's not there."""
89 | if cls.GROUP_NAME in bpy.data.objects:
90 | return
91 | #Backup current selection
92 | selection = ObjectSelection()
93 | #Create empty object
94 | bpy.ops.object.empty_add()
95 | new_group = bpy.context.selected_objects[0]
96 | new_group.name = cls.GROUP_NAME
97 | new_group.hide = True
98 | #Restore selection
99 | selection.restore()
100 |
101 | @classmethod
102 | def _confirm_object(cls, name):
103 | """Create blender object with the specified name if it's not there."""
104 | if name in bpy.data.objects:
105 | return
106 | cls._confirm_group()
107 | #Backup current selection
108 | selection = ObjectSelection()
109 | #Create cube
110 | bpy.ops.mesh.primitive_cube_add(radius=0.024)
111 | new_cube = bpy.context.selected_objects[0]
112 | new_cube.parent = bpy.data.objects[cls.GROUP_NAME]
113 | new_cube.name = name
114 | cls._hash_color(new_cube)
115 | #Restore selection
116 | selection.restore()
117 |
118 | @staticmethod
119 | def _hash_color(obj):
120 | """ Color object by its first two letters from its name. """
121 | name_hash = hash(obj.name[:2])
122 | color = (
123 | (name_hash >> 16) % 256,
124 | (name_hash >> 8) % 256,
125 | name_hash % 256
126 | )
127 | mat_name = "#%02X%02X%02X" % color
128 | mat = (
129 | bpy.data.materials[mat_name] if mat_name in bpy.data.materials
130 | else bpy.data.materials.new(mat_name)
131 | )
132 | mat.diffuse_color = tuple([i / 256 for i in color])
133 | obj.data.materials.append(mat)
134 |
135 | def _update_from_object(self):
136 | pass
137 | if self.binding["name"] in bpy.data.objects:
138 | self._location = bpy.data.objects[self.binding["name"]].location
139 |
--------------------------------------------------------------------------------
/src/inputs/bases/__init__.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .. import PendingMsg
3 |
4 | class InputBase:
5 | """ All input classes should implement this base class. """
6 |
7 | def _pend_msg(self, handler):
8 | """
9 | Wrap your incoming ROS message handlers with this method for thread
10 | safety. Handler execution will be postponed until the next Blender
11 | callback.
12 | """
13 | return lambda msg: PendingMsg(msg, handler).pend()
14 |
15 | def __init__(self, confentry):
16 | """
17 | On construction input classes are given an entry from inputs.yaml config.
18 | """
19 | raise NotImplementedError
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
3 | import rospy
4 | import bpy
5 | import math
6 | import yaml
7 | import time
8 | import os
9 | import Utils
10 | from std_msgs.msg import String
11 | from bpy.app.handlers import persistent
12 | import modes, inputs, outputs
13 |
14 | # TODO: Cannot use set keys as they can be saved to blender file
15 | class PersistentParams:
16 |
17 | dictname = "robo_blender"
18 |
19 | def get(self, key):
20 | return bpy.data.scenes[0][self.dictname].get(key)
21 |
22 | def set(self, key, val):
23 | bpy.data.scenes[0][self.dictname][key] = val
24 |
25 | def __init__(self):
26 | #If bpy.data.scenes[0] is stored locally, referencing it often causes Blender
27 | #to crash after Ctrl+Z (undo) is pressed.
28 | #Referencing the scene only from within bpy seems to work fine though.
29 | if not self.dictname in bpy.data.scenes[0].keys():
30 | bpy.data.scenes[0][self.dictname] = {}
31 |
32 |
33 | class RoboBlender:
34 |
35 | config_dir = "config"
36 | step_in_process = False
37 | modes = []
38 |
39 |
40 | def handle_blendermode(self, msg):
41 | # Disable if mode not allowed
42 | msg = msg.data
43 | if not msg in self.modes:
44 | rospy.loginfo("Unsupported mode %s" % msg)
45 | msg = 'Dummy'
46 |
47 | modes.enable(msg)
48 |
49 | def step(self, dt):
50 | inputs.execute_pending()
51 | modes.step(dt)
52 |
53 | def execute(self):
54 | # Try to shut down if the script is already running.
55 | blparams = PersistentParams()
56 | if blparams.get("running"):
57 | blparams.set("running", False)
58 | return
59 | blparams.set("running", True)
60 |
61 | rospy.init_node('robo_blender', anonymous=True)
62 |
63 | inputs_config = []
64 | # current animations blender file do not require inputs
65 | if not 'Animations' in self.modes:
66 | inputs_config = Utils.read_yaml(os.path.join(self.config_dir, "inputs.yaml"))
67 | inputs.initialize(inputs_config)
68 | outputs.initialize(
69 | Utils.read_yaml(os.path.join(self.config_dir, "outputs.yaml"))
70 | )
71 | rospy.Subscriber('cmd_blendermode', String, self.handle_blendermode)
72 |
73 | @persistent
74 | def handle_scene_update(dummy):
75 | # Check whether to shut down
76 | if not blparams.get("running"):
77 | bpy.app.handlers.scene_update_pre.remove(handle_scene_update)
78 | rospy.loginfo("ROBO: Ended")
79 | return
80 |
81 | # Limit execution to intervals of self.frame_interval
82 | t = time.time()
83 | if t - self.lastframe < self.frame_interval or self.step_in_process:
84 | return
85 | self.lastframe = self.lastframe + self.frame_interval
86 | # Limited execution starts here.
87 | self.step_in_process = True
88 | self.step(self.frame_interval)
89 | self.step_in_process = False
90 | # The boolean wrapper above prevents infinite recursion in case step()
91 | # invokes blender to call handle_scene_update again
92 |
93 |
94 | self.lastframe = time.time()
95 | bpy.app.handlers.scene_update_pre.append(handle_scene_update)
96 |
97 | rospy.loginfo("Available modes: " + str(self.modes).strip('[]'))
98 |
99 | # Enable default mode
100 | modes.enable("Dummy")
101 | rospy.loginfo("ROBO: Started")
102 |
103 | def __init__(self):
104 | self.config = Utils.read_yaml(os.path.join(self.config_dir, "config.yaml"))
105 | self.frame_interval = 1.0/self.config["fps"]
106 | if 'targets' in bpy.data.objects:
107 | self.modes = ['Dummy', 'TrackDev', 'LookAround']
108 | else:
109 | self.modes = ['Dummy', 'Animations']
110 |
111 | print("ROBO: Loading")
112 | robo = RoboBlender()
113 | robo.execute()
114 |
--------------------------------------------------------------------------------
/src/modes/Animations.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import primary
3 | from .controllers import animate
4 | import inputs, outputs
5 | import bpy
6 | import rospy
7 | from std_msgs.msg import String
8 | from robo_blender.msg import animations_list
9 |
10 | from blender_api_msgs.msg import AvailableEmotionStates
11 | from blender_api_msgs.msg import AvailableGestures
12 | from blender_api_msgs.msg import EmotionState
13 |
14 |
15 | class Animations:
16 | """
17 | Subscribes to the /cmd_animations topic, and listens for play,
18 | pause and stop messages. Queues up and starts/runs the animations
19 | appropriately. Also published the list of supported animations
20 | at /animations_list.
21 | """
22 | def __init__(self):
23 | # Next playing animation. If not set animation stops
24 | self.init = False
25 | self.next = None
26 | self.command = None
27 | self.isPlaying = False
28 |
29 | self.current = None
30 |
31 | # Parse the old-style command string. We are expecting it to be
32 | # either play:animation_name, stop or pause
33 | def parseCommand(self, msg):
34 | msg = msg.data
35 | data = msg.split(":", 2)
36 | command = data[0]
37 |
38 | # Start playing the animation, or resume the current animation.
39 | # If an animation is playing, then this sets the next animation.
40 | if command == 'play':
41 | self.command = 'play'
42 |
43 | # The animation name could be empty; that's OK.
44 | # Do not clobber the current animation, if a bogus
45 | # animation name was provided.
46 | if 1 < len(data):
47 | if not data[1] in self.animationsList:
48 | rospy.logerr("Unknown animation: " + data[1])
49 | else:
50 | self.next = data[1]
51 | rospy.loginfo("Next animation: " + self.next)
52 | elif self.current:
53 | rospy.loginfo("Resume animation: " + self.current)
54 |
55 | # Halt animation and stops playing
56 | elif command == 'stop':
57 | self.command = 'stop'
58 | self.next = None
59 | rospy.loginfo("Stop animation")
60 | if not self.current:
61 | rospy.logwarn("Stop: no animation playing")
62 |
63 | # Pause animation immediatly
64 | elif command == 'pause':
65 | self.command = 'pause'
66 | rospy.loginfo("Pause animation")
67 |
68 | else:
69 | rospy.logerr("Unsupported command: " + command)
70 |
71 | # Sets next animation
72 | def _setNext(self):
73 | self.current = self.next
74 |
75 | # Don't crash if next animation is 'None'
76 | if self.current:
77 | self.anim.setAnimation(self.current)
78 |
79 | # The new-style blender_api_msgs interface
80 | def setEmotionState(self, msg):
81 | self.command = 'play'
82 | if not msg.name in self.animationsList:
83 | rospy.logerr("Unknown animation: " + msg.name)
84 | else:
85 | self.next = msg.name
86 | rospy.loginfo("Next animation: " + self.next)
87 |
88 |
89 | # This is called every frame
90 | def step(self, dt):
91 | # Make sure we access bpy data and do other task in blender thread
92 | if not self.init:
93 | self.anim = animate.Animate('Armature')
94 | self.animationsList = self.anim.getAnimationList()
95 |
96 | # Initialize the old-style ROS node
97 | self.animationsPub = rospy.Publisher('animations_list',
98 | animations_list, latch=True, queue_size=10)
99 | self.animationsPub.publish(list(self.animationsList.keys()))
100 |
101 | rospy.Subscriber('cmd_animations', String, self.parseCommand)
102 |
103 | # Initialize the new blender_api_msgs ROS node
104 | self.emoPub = rospy.Publisher(
105 | '/blender_api/available_emotion_states',
106 | AvailableEmotionStates, latch=True, queue_size=10)
107 | self.emoPub.publish(list(self.animationsList.keys()))
108 |
109 | # XXX FIXME No gestures!?!
110 | self.gestPub = rospy.Publisher(
111 | '/blender_api/available_gestures',
112 | AvailableGestures, latch=True, queue_size=10)
113 | self.gestPub.publish(list())
114 |
115 | rospy.Subscriber('/blender_api/set_emotion_state',
116 | EmotionState, self.setEmotionState)
117 |
118 | # Other initilizations
119 | self.init = True
120 | self.anim.resetAnimation()
121 |
122 | # Parse any pending commands
123 | if self.command:
124 | if self.command == 'play':
125 | if not self.isPlaying:
126 | if not self.current:
127 | self._setNext()
128 | # If next was null, then, after above, current will
129 | # become null, too.
130 | if self.current:
131 | self.isPlaying = True
132 | self.anim.playAnimation()
133 | else:
134 | rospy.logwarn("Play: no pending animation to restart")
135 |
136 | elif self.command == 'pause':
137 | if self.isPlaying:
138 | self.anim.stopAnimation()
139 | self.isPlaying = False
140 | else:
141 | rospy.logwarn("Pause: no animation playing, can't pause")
142 | self.command = None
143 |
144 | if self.isPlaying:
145 |
146 | rospy.logdebug(self.animationsList[self.current]['length'])
147 | rospy.logdebug(bpy.context.scene.frame_current)
148 | # self.anim.playAnimation()
149 |
150 | # Check to see if animation is done
151 | if bpy.context.scene.frame_current > self.animationsList[self.current]['length']:
152 | if self.next:
153 | self._setNext()
154 | self.anim.playAnimation()
155 | else:
156 | self.isPlaying = False
157 | self.current = None
158 | self.anim.resetAnimation()
159 | outputs.store.full_head.transmit()
160 |
161 |
--------------------------------------------------------------------------------
/src/modes/Dummy.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import primary
3 | from .controllers import demo_loops
4 | import inputs, outputs
5 | import rospy
6 | from .controllers import animate
7 |
8 | class Dummy:
9 | """
10 | Does Nothing. Stops other blender modes
11 | """
12 |
13 | def __init__(self):
14 | self.init = False
15 |
16 | def step(self, dt):
17 | if not self.init:
18 | self.anim = animate.Animate('Armature')
19 | self.anim.stopAnimation()
20 | self.init = True
21 | return True
22 |
--------------------------------------------------------------------------------
/src/modes/LookAround.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import primary
3 | from .controllers import tracking
4 | import inputs, outputs
5 | import rospy
6 | import cmath
7 | from mathutils import Vector
8 | import random
9 | import math
10 |
11 | class ImaginaryObject:
12 |
13 | location = Vector((0.0, 0.0, 0.0))
14 |
15 | @staticmethod
16 | def random_vector(vector_mu, radius_sig):
17 | offset2D = cmath.rect(
18 | random.gauss(0, radius_sig),
19 | random.uniform(0, 2*math.pi)
20 | )
21 |
22 | #Scaled to have the points less stretched over the horizontal axis
23 | offset = Vector((offset2D.real, 0.0, 0.5 * offset2D.imag))
24 | return vector_mu + offset
25 |
26 | def step(self, dt):
27 | if self.timer.step(dt):
28 | self.location = self.random_vector(*self.space_mu_sig)
29 |
30 | def __init__(self, space_mu_sig, time_mu_sig):
31 | """
32 | Arguments:
33 | * space_mu_sig: tuple (Vector around which to generate random location, standard deviation of the radius)
34 | * time_mu_sig: tuple (Average time in which to generate new location, standard deviation of that time)
35 | """
36 | self.space_mu_sig = space_mu_sig
37 | self.timer = tracking.RandomTimer(time_mu_sig, True)
38 |
39 | class LookAround:
40 | """ Implemeent the LookAround mode."""
41 | def step(self, dt):
42 | self.interest.step(dt)
43 | self.tracking_ctrl.step(dt)
44 | outputs.store.eyes.transmit()
45 | outputs.store.neck_euler.transmit()
46 |
47 | def __init__(self):
48 | self.interest = ImaginaryObject(
49 | ((Vector((0.0, -1.0, 0.45)), 0.4)),
50 | (3.0, 1.5)
51 | )
52 | self.tracking_ctrl = tracking.TrackSaccadeCtrl(self.interest)
53 |
--------------------------------------------------------------------------------
/src/modes/ManualHead.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import primary
3 | from .controllers import demo_loops
4 | import inputs, outputs
5 | import rospy
6 |
7 | class ManualHead:
8 | """
9 | Transmits the current neck, face and eye position without actually
10 | controlling anything.
11 | """
12 |
13 | def step(self, dt):
14 | outputs.store.full_head.transmit()
15 |
--------------------------------------------------------------------------------
/src/modes/SmartTrack.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import sys
3 | import bpy, rospy
4 | import Utils
5 | from basic_head_api.msg import MakeFaceExpr
6 | import inputs, outputs
7 | import queue, math, random, time
8 |
9 | class SmartTrack:
10 |
11 | class MovingTarget:
12 | target = None
13 | dest = None
14 | # Realitive point for the head in which we need to move objects
15 | offset = None
16 | #Speed is in radians per frame
17 | speed = 0.01
18 | # variables for operation
19 | startLocation = None
20 | destLocation = None
21 | steps = 0
22 | currentStep = 0
23 | totalSteps = 0
24 |
25 | def __init__(self, target,destination,offset,speed):
26 | self.target = target
27 | self.dest = destination
28 | self.offset = offset
29 | if speed > 0 :
30 | self.speed = speed
31 | pass
32 |
33 | def move(self):
34 | diff = self.dest.location - self.target.location
35 | #ignore small differences in case
36 | if (diff.length < 0.0001):
37 | return
38 | #start new movement if destination location is changed
39 | if (self.destLocation != self.dest.location) :
40 | self.destLocation = self.dest.location.copy()
41 | self.currentStep = 0
42 | #calculate total steps
43 | v1 = self.target.location - self.offset
44 | v2 = self.dest.location - self.offset
45 | ang = v1.angle(v2,0)
46 | self.totalSteps = max(int(ang/self.speed),1)
47 | self.startLocation = self.target.location.copy()
48 | self.currentStep = self.currentStep +1
49 | # Check in case
50 | if self.currentStep > self.totalSteps:
51 | return
52 |
53 | v1 = self.dest.location - self.offset
54 | v2 = self.startLocation - self.offset
55 | v3 = v2.lerp(v1,self.currentStep/self.totalSteps)
56 | self.target.location = v3 + self.offset
57 |
58 | class Behavior:
59 |
60 | def __init__(self,fps,pub):
61 | #ticks synced with robo blender
62 | self.idle = 0
63 | self.current_person = 0
64 | self.current_emotion = None
65 | self.current_intensity = 0
66 | self.emotions = queue.Queue()
67 | #headmovement
68 | self.positions = queue.Queue()
69 | self.distance = 0.
70 | self.fps = fps
71 | self.currentTimer = 0
72 | self.randomEm = ["eureka","innocent","annoyed", "evil","horrified"]
73 | self.pub = pub
74 |
75 | def tick(self):
76 | facepos = inputs.get_instance("pivision").location
77 | self.positions.put(facepos)
78 |
79 | old = None
80 | while self.positions.qsize() >= self.fps:
81 | old = self.positions.get()
82 | if old:
83 | diff = facepos - old
84 | self.distance = math.fabs(diff[0]) + math.fabs(diff[1]) + math.fabs(diff[2])
85 |
86 | if self.distance > 0.05:
87 | self.current_person += 1
88 | self.idle -= 1
89 | if self.current_person > 3:
90 | self.idle = 0
91 | else:
92 | self.idle += 1
93 | if self.idle > 10:
94 | self.current_person = 0
95 | # Top priority tasks:
96 | if self.current_person == 10:
97 | self.meet()
98 | if self.idle == 25:
99 | self.bye()
100 | self.process()
101 |
102 |
103 | def _em(self, emotion,intensity):
104 | msg = MakeFaceExpr()
105 | msg.exprname = emotion
106 | msg.intensity = intensity
107 | self.current_emotion = emotion
108 | self.current_intensity = intensity/11.
109 | self.pub.publish(msg)
110 |
111 | # Put all emotions to queue
112 | def _queue(self,emotion,intensity, timer):
113 | em = {'e': emotion, 'i':intensity, 't':timer}
114 | self.emotions.put(em)
115 |
116 | #Meet new face, do big smile
117 | def meet(self):
118 | self.emotions.queue.clear()
119 | self.currentTimer = 0
120 | self._queue('happy',11,50)
121 | self._queue('happy',10,10)
122 | self._queue('happy',9,10)
123 | self._queue('happy',8,10)
124 |
125 |
126 | def bye(self):
127 | self.emotions.queue.clear()
128 | self.currentTimer = 0
129 | self._queue('sad',7,50)
130 | self._queue('sad',8,10)
131 | self._queue('sad',9,10)
132 | self._queue('sad',10,60)
133 |
134 | def rand(self):
135 | em = self.randomEm[random.randrange(0,len(self.randomEm)-1)]
136 | i = random.randrange(7,11)
137 | timer = random.randrange(50,150)
138 | self._queue(em,i,timer)
139 |
140 | def next(self):
141 | # is meet and current face is detected
142 | if self.current_person > 10:
143 | if self.distance < 0.2 and self.current_intensity > 4:
144 | self._queue(self.current_emotion,self.current_intensity-1,25)
145 | return
146 | elif self.distance < 0.4:
147 | self._queue('surprised',8,25)
148 | return
149 | else:
150 | self._queue('surprised',11,25)
151 | return
152 | if self.idle > 10:
153 | if self.current_intensity > 4:
154 | self._queue(self.current_emotion,self.current_intensity-1,25)
155 | self.rand()
156 |
157 | def process(self):
158 | #no cahnge
159 | if self.currentTimer > 0:
160 | self.currentTimer -= 1
161 | return
162 | # need to do something
163 | if self.emotions.empty():
164 | self.next()
165 | # shouldnt be empty
166 | if self.emotions.empty():
167 | return
168 | em = self.emotions.get()
169 | self.currentTimer = em['t']
170 | self._em(em['e'],em['i'])
171 |
172 | def __init__(self):
173 | self.leftover_time = 0.0
174 |
175 | self.config = Utils.read_yaml("smart_track.yaml", __file__)
176 | self.fps = self.config["fps"]
177 |
178 | self.behavior = self.Behavior(
179 | self.fps,
180 | rospy.Publisher(self.config['emo_topic'], MakeFaceExpr, queue_size=2)
181 | )
182 | self.mTarget = self.MovingTarget(bpy.data.objects['headtarget'],bpy.data.objects['pivision'],bpy.data.objects['nose'].location,0.06)
183 | self.mEyes = self.MovingTarget(bpy.data.objects['eyefocus'],bpy.data.objects['pivision'],bpy.data.objects['nose'].location,0.09)
184 |
185 | def step(self, dt):
186 | # Execute tick() at the rate of self.fps
187 | dt = dt + self.leftover_time
188 | n = int(dt*self.fps)
189 | self.leftover_time = dt - float(n)/self.fps
190 |
191 | for i in range(n):
192 | self.tick()
193 |
194 | def tick(self):
195 | self.behavior.tick()
196 | self.mTarget.move()
197 | self.mEyes.move()
198 | outputs.store.neck_euler.transmit()
199 |
--------------------------------------------------------------------------------
/src/modes/SmartTrack2.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import primary
3 | from .controllers import demo_loops
4 | from .controllers import tracking
5 | import inputs, outputs
6 | import rospy
7 | from basic_head_api.msg import MakeFaceExpr
8 |
9 | class SmartTrack2:
10 | """
11 | Tracks pivison input and produces random, but smooth expressions using
12 | basic_head_api.
13 | """
14 |
15 | def step(self, dt):
16 | self.tracking_ctrl.step(dt)
17 | self.exprs.step(dt)
18 | #outputs.store.neck_euler.transmit()
19 |
20 | def __init__(self):
21 | self.exprs = demo_loops.SmoothExpressions(
22 | rospy.Publisher("make_face_expr", MakeFaceExpr, queue_size=2)
23 | )
24 | self.tracking_ctrl = tracking.TrackSaccadeCtrl(inputs.store.pivision)
25 |
--------------------------------------------------------------------------------
/src/modes/TrackDev.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .controllers import tracking
3 | import outputs
4 | import rospy
5 | from eva_behavior.msg import tracking_action
6 |
7 | class TrackDev:
8 |
9 | def step(self, dt):
10 | self.ctrl.step(dt)
11 | outputs.store.neck_euler.transmit()
12 | outputs.store.eyes.transmit()
13 |
14 | def __init__(self):
15 | self.ctrl = tracking.TrackSaccadeCtrl()
16 |
17 | # Allow only a single subscriber in the class-wide parameter action_topic
18 | cls = type(self)
19 | old_action_topic = getattr(cls, 'action_topic', None)
20 | cls.action_topic = rospy.Subscriber('tracking_action', tracking_action, self.action_cb)
21 | if old_action_topic:
22 | old_action_topic.unregister()
23 |
24 | def action_cb(self, action):
25 | if action.action == 'track':
26 | self.ctrl.track(action.target)
27 | elif action.action == 'glance':
28 | self.ctrl.glance(action.target)
29 |
--------------------------------------------------------------------------------
/src/modes/__init__.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import sys
3 | import importlib
4 | import inspect
5 | import rospy
6 | import Utils
7 | import traceback
8 |
9 | __doc__ = """
10 | --Description--
11 | Package 'modes' holds modules that control the main flow of robot behavior.
12 | When a module is set active, its step() method is called at the fps set in
13 | config/config.yaml.
14 |
15 | A different module can be enabled at any time by calling
16 | the modes.enable() method or sending a ROS message to cmd_blendermode with a
17 | different module name. Currently only one module can be active at a time.
18 |
19 | Note: Not sure if enabling the modes repeatedly will cause the memory to leak.
20 | Keep an eye out.
21 |
22 | --Creating new modes--
23 | Just create a new python file inside the 'modes' directory. It will be found
24 | automatically.
25 |
26 | The new mode file (module) needs to have a class with the same name as the
27 | module with a step(self, dt) method.
28 |
29 | Look at the example mode SmartTrack2.py for a reference.
30 | """
31 |
32 | active = None
33 |
34 | def enable(mode_name):
35 | module_name = ".".join([__name__, mode_name])
36 | member_name = ".".join([__name__] + [mode_name]*2)
37 | try:
38 | mode = Utils.import_member(member_name)
39 | rospy.loginfo("Activating mode %s" % mode_name)
40 | except ImportError as e:
41 | traceback.print_exc()
42 | rospy.logwarn("Couldn't load module %s" % module_name)
43 | return
44 | except AttributeError as e:
45 | traceback.print_exc()
46 | rospy.logwarn("Couldn't load class %s" % member_name)
47 | return
48 |
49 | # Taking this out of the try-except block to not catch AttributeErrors
50 | # during initialization.
51 | global active
52 | active = mode()
53 |
54 | def disable():
55 | global active
56 | active = None
57 | rospy.loginfo("Active mode disabled.")
58 |
59 | def step(dt):
60 | """
61 | 'dt' holds the duration of one frame (the time since the last call) in
62 | seconds.
63 | """
64 | if active != None:
65 | active.step(dt)
66 |
--------------------------------------------------------------------------------
/src/modes/controllers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hansonrobotics/robo_blender/b444f984e5151bdcc041dc6cd5f74712fcea20e4/src/modes/controllers/__init__.py
--------------------------------------------------------------------------------
/src/modes/controllers/animate.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import bpy
3 |
4 | class Animate:
5 | """Low-level interface to blender. Extracts the list of supported
6 | animations from blender. Starts and stops the indicated animation."""
7 | def __init__(self, obj):
8 | #Object for which actions is applied
9 | self.actionObj = bpy.data.objects[obj]
10 |
11 | # Animation list from blender
12 | def getAnimationList(self):
13 | anims = {}
14 | for s in bpy.data.actions.keys():
15 | anims[s] = {}
16 | anims[s]['length'] = bpy.data.actions[s].frame_range[1]-bpy.data.actions[s].frame_range[0]+1
17 | return anims
18 |
19 | # Sets animation
20 | def setAnimation(self,act):
21 | # Check if correct object selected
22 | if self.actionObj.animation_data is not None:
23 | try:
24 | self.actionObj.animation_data.action = bpy.data.actions[act]
25 | bpy.ops.screen.frame_jump()
26 | except KeyError:
27 | rospy.logerror("Unsuported animation name: " + act)
28 | self.actionObj.animation_data.action = None
29 |
30 | def playAnimation(self):
31 | if not bpy.context.screen.is_animation_playing:
32 | bpy.ops.screen.animation_play()
33 |
34 | def stopAnimation(self):
35 | if bpy.context.screen.is_animation_playing:
36 | bpy.ops.screen.animation_play()
37 |
38 | def resetAnimation(self):
39 | bpy.ops.screen.frame_jump()
40 | self.stopAnimation()
41 |
42 |
--------------------------------------------------------------------------------
/src/modes/controllers/demo_loops.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import random
3 | import math
4 |
5 | class SmoothExpressions:
6 | """
7 | Cycles through random face expressions varying the intensity in the form of
8 | a sum of two sines, one of lower and the other of higher frequencies.
9 | """
10 |
11 | class RandomHalfSine:
12 |
13 | timepos = 0.0
14 | length = 0.0
15 |
16 | def step(self, dt):
17 | result = math.sin(self.timepos/self.length*math.pi)
18 | self.timepos += dt
19 | return result
20 |
21 | def is_over(self):
22 | return self.timepos >= self.length
23 |
24 | def start_new(self):
25 | self.length = random.uniform(*self.length_range)
26 | self.timepos = 0.0
27 |
28 | def __init__(self, length_range):
29 | self.length_range = length_range
30 | self.start_new()
31 |
32 | exprstr = None
33 | amplitude2 = 0.0
34 |
35 | def step(self, dt):
36 | intensity = self.sine1.step(dt)*1.3 - self.sine2.step(dt)*self.amplitude2
37 | self.pub.publish(self.exprstr, intensity)
38 |
39 | if self.sine1.is_over():
40 | self.start_new()
41 | if self.sine2.is_over():
42 | self.sine2.start_new()
43 | self.amplitude2 = random.uniform(0.0, 0.3)
44 |
45 | def start_new(self):
46 | self.sine1.start_new()
47 | self.exprstr = random.choice(self.random_em)
48 |
49 | def __init__(self, pub):
50 | self.pub = pub
51 | self.random_em = [
52 | "eureka","innocent","annoyed", "evil","horrified",
53 | "happy", "sad", "surprised"
54 | ]
55 | self.sine1 = self.RandomHalfSine((2.0, 10.0))
56 | self.sine2 = self.RandomHalfSine((0.5, 2.0))
57 |
58 | self.start_new()
59 |
--------------------------------------------------------------------------------
/src/modes/controllers/primary.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import bpy
3 |
4 | def get_head_target():
5 | return bpy.data.objects["headtarget"]
6 |
7 | def get_eyes_target():
8 | return bpy.data.objects["eyefocus"]
--------------------------------------------------------------------------------
/src/modes/controllers/tracking.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from . import primary
3 | from mathutils import Vector
4 | import random
5 | import bpy
6 |
7 | class ExpectObject:
8 | """
9 | Object requested to track can take a few iterations to appear and later be
10 | destroyed. This wrapper fallbacks gracefully to another object in those
11 | cases.
12 | """
13 | @property
14 | def location(self):
15 | obj = self.get()
16 | if obj:
17 | return obj.location
18 | elif self.fallback_obj:
19 | return self.fallback_obj.location
20 |
21 | def get(self):
22 | """ The slash prefix is optional. """
23 | if self.obj_name in bpy.data.objects:
24 | return bpy.data.objects[self.obj_name]
25 | elif '/'+self.obj_name in bpy.data.objects:
26 | return bpy.data.objects['/'+self.obj_name]
27 |
28 | def __init__(self, obj_name, fallback_obj=None):
29 | self.obj_name = obj_name.lstrip('/')
30 | self.fallback_obj = fallback_obj
31 |
32 | class EmaPoint:
33 | """Exponential moving average point"""
34 |
35 | def update(self, sample, dt):
36 | """Update the average with a new sample point."""
37 | #'alpha' is the standard EMA new sample weight over the previous ones
38 | alpha = 2.0/(self.N/dt+1)
39 | self.point = alpha*sample + (1-alpha)*self.point
40 | return self.point
41 |
42 | def __init__(self, N, init_point=Vector()):
43 | """Alpha coefficient is caluclated from N (the samples to average over)."""
44 | #N is the number of seconds to average over. More precisely the number
45 | #of seconds over which added samples gain ~86% of the total weight.
46 | self.N = float(N)
47 | self.point = init_point
48 |
49 | class RandomTimer:
50 |
51 | def step(self, dt):
52 | """ Returns true every time it passes the randomly generated time interval. """
53 | self.time_idle += dt
54 | if self.time_idle > self.interval:
55 | self.clear_time()
56 | return True
57 | return False
58 |
59 | def clear_time(self):
60 | self.time_idle = 0.0
61 | self.interval = random.gauss(*self.mu_sig)
62 |
63 | def __init__(self, mu_sig, initial_trigger=False):
64 | """
65 | Arguments:
66 | * mu_sig: tuple (Average time in which to generate new location, standard deviation of that time)
67 | * initial_trigger: boolean - whether or not to return True in the first call of step()
68 | """
69 | self.mu_sig = mu_sig
70 | self.clear_time()
71 | if initial_trigger:
72 | self.time_idle = self.interval + 1.0
73 |
74 | class SaccadePipe:
75 |
76 | offset = Vector((0, 0, 0))
77 |
78 | @staticmethod
79 | def random_vector(radius):
80 | return Vector(
81 | (random.uniform(-1, 1) for i in range(3))
82 | ).normalized()*random.uniform(0, radius)
83 |
84 | def pipe(self, eyetarget_pos, dt):
85 | if self.timer.step(dt):
86 | self.offset = self.random_vector(self.radius)
87 | return eyetarget_pos + self.offset
88 |
89 | def __init__(self, radius=0.2, interval_mu_sig=(1.5, 0.8)):
90 | self.radius = radius
91 | self.timer = RandomTimer(interval_mu_sig, True)
92 |
93 | class GlancePipe:
94 |
95 | @staticmethod
96 | def weighted_avg(home, dest, weight):
97 | return dest * weight + home * (1 - weight)
98 |
99 | def is_ready(self):
100 | return self.target_obj.get() != None
101 |
102 | def eyes(self, primary_loc):
103 | # Towards the glance target
104 | return self.target_obj.location
105 |
106 | def head(self, primary_loc):
107 | # Towards a middle point between the primary and glance targets
108 | return self.weighted_avg(primary_loc, self.target_obj.location, 0.5)
109 |
110 | def step(self, dt):
111 | return self.timer.step(dt)
112 |
113 | def __init__(self, target_obj, timer):
114 | self.target_obj = target_obj
115 | self.timer = timer
116 |
117 | class TrackSaccadeCtrl:
118 | """
119 | Initialize with the object to track and as long as step(dt) method is called
120 | the head and the eyes will smoothly track the object of interest.
121 |
122 | Eyes will saccade if the object is moving slow enough for the eyes to catch
123 | up with it.
124 | """
125 |
126 | @staticmethod
127 | def distance(v1, v2):
128 | return (v1 - v2).magnitude
129 |
130 | def step(self, dt):
131 | if self.interest_obj == None:
132 | return
133 |
134 | head_target = primary.get_head_target()
135 | eyes_target = primary.get_eyes_target()
136 |
137 | # Update EMAs
138 | if self.glance_pipe and self.glance_pipe.is_ready():
139 | # Towards the glance-piped target
140 | eyes_target.location = self.eyes_ema.update(self.glance_pipe.eyes(self.interest_obj.location), dt)
141 | head_target.location = self.head_ema.update(self.glance_pipe.head(self.interest_obj.location), dt)
142 |
143 | if self.glance_pipe.step(dt):
144 | self.glance_pipe = None
145 | else:
146 | # Towards the primary target
147 | eyes_target.location = self.eyes_ema.update(self.interest_obj.location, dt)
148 | head_target.location = self.head_ema.update(self.interest_obj.location, dt)
149 |
150 | # Saccade if eyes caught up with the interest_obj
151 | if self.distance(
152 | self.eyes_ema.point, self.interest_obj.location
153 | ) < self.saccade.radius*0.5:
154 | eyes_target.location = self.saccade.pipe(eyes_target.location, dt)
155 | else:
156 | self.saccade.timer.clear_time()
157 |
158 | def track(self, target_name):
159 | self.interest_obj = ExpectObject(target_name, primary.get_head_target())
160 |
161 | def glance(self, target_name, mu_sig=(1.25, 0.4)):
162 | self.glance_pipe = GlancePipe(
163 | ExpectObject(target_name, primary.get_head_target()),
164 | RandomTimer(mu_sig)
165 | )
166 |
167 | def __init__(self, interest_obj=None, **kwargs):
168 | """
169 | Argument 'interest_obj' must have attribute "location".
170 |
171 | Takes the following optional keyword arguments:
172 | head_seconds=1.0 #Time for the head to catch up with interest_obj
173 | eyes_seconds=0.1 #Time for the eyes to catch up with interest_obj
174 | radius=0.1 #Maximum distance for the eyes to sacacde around interest_obj
175 |
176 | #Arguments for the Gaussian probability distribution for how often to saccade.
177 | interval_mu_sig=(1.5, 0.8)
178 | """
179 | self.interest_obj = interest_obj
180 | self.head_ema = EmaPoint(
181 | kwargs.get("head_seconds", 1.0),
182 | primary.get_head_target().location
183 | )
184 | self.eyes_ema = EmaPoint(
185 | kwargs.get("eyes_seconds", 0.1),
186 | primary.get_eyes_target().location
187 | )
188 | self.saccade = SaccadePipe(
189 | **{k:v for k,v in kwargs.items()
190 | if k in ["radius", "interval_mu_sig"]}
191 | )
192 | self.glance_pipe = None
193 |
--------------------------------------------------------------------------------
/src/modes/smart_track.yaml:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | fps: 25
3 | emo_topic: make_face_expr
--------------------------------------------------------------------------------
/src/outputs/TimelineAnimation.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import rospy
3 | import Utils
4 |
5 | class FrameMap:
6 | """ Represents the position of an animation in the timeline. """
7 |
8 | def __iter__(self):
9 | return iter(range(self.frame_start, self.frame_end))
10 |
11 | def set_duration(self, val):
12 | self.duration = val
13 |
14 | def get_frame_duration(self):
15 | return float(self.frame_start - self.frame_end)/self.duration
16 |
17 | @classmethod
18 | def from_string(cls, str):
19 | """ Alternative constructor method. """
20 | # 'cls' hold the FrameMap class
21 | # Asterix below means the list str.split(":") will be expanded into
22 | # arguments frame_start, frame_end, min_duration
23 | return cls(*str.split(":"))
24 |
25 | def __init__(self, frame_start, frame_end, min_duration):
26 | self.frame_start = frame_start
27 | self.frame_end = frame_end
28 | self.min_duration = min_duration
29 |
30 | class TimelineAnimation:
31 | """
32 | This output can build and send an animation out, if you give the
33 | animation's location (FrameMap instance).
34 | """
35 |
36 | def send(self, frame_map):
37 | """
38 | Call this from your controller.
39 | Will iterate throught frames, build trajectory messages and send them.
40 | """
41 | # __iter__ method in FrameMap will allow you to iterate like this:
42 | #
43 | #for frame in frame_map:
44 | # print("Current frame: %s" % frame)
45 | raise NotImplementedError
46 |
47 | def __init__(self, confentry):
48 | # Motor config and categories can be saved here (outputs/anim_zeno.yaml)
49 | # until we find a better location for it.
50 | self.config = Utils.read_yaml("anim_zeno.yaml", __file__)
51 | raise NotImplementedError
--------------------------------------------------------------------------------
/src/outputs/__init__.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import Utils
3 | import inspect
4 | import traceback
5 | import rospy
6 |
7 | __doc__ = """
8 | 'outputs' is a package that holds classes and their instances that can, on
9 | command, send parts of the current (or stored?) Blender state to ROS network.
10 |
11 | Output instances can be reached by either:
12 | outputs.store. or outputs.get_instance(),
13 | where can be neck_euler, eyes, etc.
14 |
15 | E.g. The instance outputs.store.neck_euler of class ParametersOut will, when
16 | transmit() method is called, send a message containing the current neck joint
17 | positions in Blender space to basic_head_api as yaw, pitch and roll rotations.
18 |
19 | See default config: config/outputs.yaml for available instances.
20 | """
21 |
22 | class DummyOutput:
23 |
24 | def __getattr__(self, attrname):
25 | """ Return a dummy function that doesn't do anything. """
26 | rospy.logwarn("Output %s is disabled." % self.name)
27 | return lambda *args: None
28 |
29 | def __init__(self, name):
30 | self.name = name
31 |
32 |
33 | class InstanceStore:
34 | """ Here is where instances of outputs are stored. """
35 |
36 | @staticmethod
37 | def _get_class(relative_path):
38 | #E.g. ParametersOut
39 | module_name = relative_path.split(".")[-1]
40 | #E.g. outputs.state.ParametersOut
41 | module_path = ".".join([__name__, relative_path])
42 | #E.g. outputs.state.ParametersOut.ParametersOut
43 | member_path = ".".join([__name__, relative_path, module_name])
44 | try:
45 | return Utils.import_member(member_path)
46 | except ImportError as err:
47 | if err.name == module_name:
48 | raise NameError("Couldn't find module %s" % module_path)
49 | else:
50 | raise
51 | except AttributeError:
52 | raise NameError("Couldn't load class %s" % member_path)
53 |
54 | @classmethod
55 | def _build_single(cls, confentry):
56 | if confentry["enabled"]:
57 | return cls._get_class(confentry["class"])(confentry)
58 | else:
59 | return DummyOutput(confentry["name"])
60 |
61 | def _store_instance(self, name, instance):
62 | self._instances[name] = instance
63 | setattr(self, name, instance)
64 |
65 | def __init__(self, fullconfig):
66 | # Reserve member names
67 | self.orig_members = [member[0] for member in inspect.getmembers(self)]
68 |
69 | # Build and store outputs out of the config
70 | self._instances = {}
71 | for confentry in fullconfig:
72 | try:
73 | if confentry["name"][0] == "_":
74 | raise NameError("Output name %s can't start with _" % confentry["name"])
75 | if confentry["name"] in self.orig_members:
76 | raise NameError("Output name %s is reserved" % confentry["name"])
77 | self._store_instance(confentry["name"], self._build_single(confentry))
78 | except:
79 | print("Exception during output '%s' load:" % confentry["name"])
80 | traceback.print_exc()
81 | rospy.logwarn("Couldn't load output '%s'" % confentry["name"])
82 |
83 | store = None
84 |
85 | def initialize(fullconfig):
86 | global store
87 | store = InstanceStore(fullconfig)
88 |
89 | def get_instance(name):
90 | return store._instances[name]
91 |
--------------------------------------------------------------------------------
/src/outputs/state/CompositeOut.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | from .. import get_instance
3 | from . import StateOutputBase
4 |
5 | class CompositeOut(StateOutputBase):
6 |
7 | # The inside of this function will only be executed once.
8 | # self.outputs cannot be built inside __init__, because other outputs may
9 | # not have been built at that point.
10 | def transmit(self):
11 | self.outputs = [
12 | get_instance(output_name)
13 | for output_name in self.confentry["outputs"]
14 | ]
15 |
16 | # Set _transmit() to be executed when transmit() is called from this
17 | # point on
18 | self.transmit = self._transmit
19 | self.transmit()
20 |
21 | def _transmit(self):
22 | for output in self.outputs:
23 | output.transmit()
24 |
25 | def __init__(self, confentry):
26 | self.confentry = confentry
--------------------------------------------------------------------------------
/src/outputs/state/ParametersOut.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import inspect
3 | import Utils, BlenderUtils, ShapekeyStore, ShapekeyStore2
4 | import bpy
5 | import rospy
6 | from . import StateOutputBase
7 |
8 | class BlenderGetterFactory:
9 |
10 | @staticmethod
11 | def bonerot(bindentry):
12 | args = bindentry["bonerot"].split(":")
13 | scale = 1
14 | if ("scale" in bindentry):
15 | scale = bindentry["scale"]
16 | return lambda: BlenderUtils.get_bones_rotation_rad(*args)*scale
17 |
18 | @staticmethod
19 | def shkey_mesh(bindentry):
20 | mesh = bindentry["shkey_mesh"]
21 |
22 | def get_coeffs(shkeystore, key_blocks):
23 | iterator = shkeystore.getIter()
24 | try:
25 | return [key_blocks[shapekey].value for shapekey in iterator]
26 | except KeyError as err:
27 | rospy.loginfo("%s not compatible: shapekey %s in mesh %s",
28 | shkeystore.__name__, err, mesh
29 | )
30 | return None
31 |
32 | # Pick which ShapekeyStore to use
33 | _shkeystore = [
34 | store for store in [ShapekeyStore, ShapekeyStore2]
35 | if get_coeffs(store, bpy.data.meshes[mesh].shape_keys.key_blocks)
36 | ][0]
37 | rospy.loginfo("Using %s for %s output", _shkeystore.__name__, mesh)
38 |
39 | def func():
40 | # Dictionary {shapekey: key_block object}.
41 | key_blocks = (
42 | bpy.data.meshes[mesh].shape_keys.key_blocks
43 | )
44 | # List of shapekey coefficients.
45 | return get_coeffs(_shkeystore, key_blocks)
46 | return func
47 |
48 | @staticmethod
49 | def childbones_name(bindentry):
50 | armature = bindentry["childbones_name"]
51 | return lambda: [bone for bone in bpy.data.objects[armature].pose.bones.keys()]
52 |
53 | @staticmethod
54 | def childbones_rot(bindentry):
55 | armature, axis = bindentry["childbones_rot"].split(":")
56 | return lambda: [
57 | BlenderUtils.get_bones_rotation_rad(armature, bone, axis)
58 | for bone in bpy.data.objects[armature].pose.bones.keys()
59 | ]
60 |
61 | class ParametersOut(StateOutputBase):
62 |
63 | def build_processor(self, bindentry):
64 | """
65 | Returns a processor function, which will, on the given msg, set the
66 | parameter specified by 'keychain' to the value from blender data
67 | returned by 'blendergetter'.
68 | """
69 |
70 | # Find a function in BlenderGetterFactory whose name is in the bindentry.
71 | # I.e. if "bonerot" in bindentry:
72 | # blendergetter_builder = BlenderGetterFactory.bonerot
73 | # etc.
74 | try:
75 | blendergetter_builder = [
76 | getattr(BlenderGetterFactory, key) for key in bindentry
77 | if hasattr(BlenderGetterFactory, key) and key[0] != "_"
78 | ][0]
79 | except Exception:
80 | raise NameError("Unable to bind %s" % bindentry)
81 |
82 | keychain = Utils.RosMsgKeyChain(bindentry["varpath"].split(":"))
83 | blendergetter = blendergetter_builder(bindentry)
84 | def processor(msg):
85 | keychain.hard_set_on(msg, blendergetter())
86 | return processor
87 |
88 | def transmit(self):
89 | msg = self.build_msg()
90 | try:
91 | self.pub.publish(msg)
92 | except:
93 | pass
94 |
95 | def build_msg(self):
96 | msg = self.msgtype()
97 | for processor in self.processors:
98 | processor(msg)
99 | return msg
100 |
101 | def __init__(self, confentry):
102 | self.msgtype = Utils.import_member(confentry["msg"])
103 | self.pub = rospy.Publisher(confentry["pubtopic"], self.msgtype, queue_size=2)
104 |
105 | self.processors = []
106 | for singlebind in confentry["binding"]:
107 | self.processors.append(self.build_processor(singlebind))
108 |
--------------------------------------------------------------------------------
/src/outputs/state/__init__.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 |
3 | class StateOutputBase:
4 |
5 | def transmit(self):
6 | """
7 | Implement to send out the appropriate part of Blender rig state at current
8 | moment.
9 | """
10 | raise NotImplementedError
11 |
12 | def __init__(self, confentry):
13 | """ Confentry is the yaml object associated with this output. """
14 | return NotImplemented
--------------------------------------------------------------------------------
/src/robo.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hansonrobotics/robo_blender/b444f984e5151bdcc041dc6cd5f74712fcea20e4/src/robo.blend
--------------------------------------------------------------------------------
/src/startup.py:
--------------------------------------------------------------------------------
1 | #Copyright (c) 2013-2018 Hanson Robotics, Ltd.
2 | import bpy
3 | import os
4 | import sys
5 |
6 | dirname = os.path.dirname(bpy.data.filepath)
7 | filename = os.path.join(dirname, "main.py")
8 |
9 | sys.path.insert(0, dirname)
10 | exec(compile(open(filename).read(), filename, 'exec'))
--------------------------------------------------------------------------------