├── .gitignore
├── .guile
├── CMakeLists.txt
├── LICENSE
├── README.md
├── movement
├── CMakeLists.txt
├── README.md
├── atomic-dbg.py
├── atomic.py
├── do-movement.scm
├── movement.scm
└── ros_commo.py
├── notes
├── README.md
├── behavior.cfg
├── btree-demo.scm
├── eva-fsm.scm
├── general_behavior.py
├── notes
└── universal-fsm.scm
├── package.xml
├── scripts
├── config.scm
├── eva.sh
├── install.sh
├── opencog.conf
├── purge-head-packages.sh
├── run.sh
└── setup.sh
├── sensors
├── README.md
├── affect.py
├── atomic_msgs.py
├── audio_power.py
├── chat_track.py
├── control.py
├── control_psi.py
├── face_recog.py
├── face_track.py
├── main.py
├── netcat.py
├── room_brightness.py
├── saliency_track.py
├── sound_track.py
└── tts_feedback.py
└── webui
├── CMakeLists.txt
├── README.md
├── atomic_psi.py
├── psi_ctrl.py
└── webui.scm
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.log
3 | *.tags*
4 | *build*/
5 | *.swp
6 |
--------------------------------------------------------------------------------
/.guile:
--------------------------------------------------------------------------------
1 |
2 | ; Make the arrow-keys work!
3 | (use-modules (ice-9 readline))
4 | (activate-readline)
5 |
6 | ; Handy but dangerous ...
7 | ; (add-to-load-path ".")
8 |
9 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #
2 | # Installation CMake file.
3 | # Currently, used for only one reason only: to install part of the
4 | # OpenCog behavior infrastructure as a guile module, so that other
5 | # dependent code can use it.
6 |
7 | CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
8 |
9 | # Only list install files that have actually changed
10 | set( CMAKE_INSTALL_MESSAGE "LAZY" )
11 |
12 | # The package name MUST match that in the project.xml file
13 | # and, because it's python, must use underscores, not dashes.
14 | PROJECT(ros_behavior_scripting)
15 |
16 | # Set datadir
17 | IF (NOT DEFINED DATADIR)
18 | SET (DATADIR "${CMAKE_INSTALL_PREFIX}/share/opencog")
19 | ENDIF (NOT DEFINED DATADIR)
20 |
21 | # ===================================================================
22 | # ROS stuff
23 |
24 | # execute_process(COMMAND "bash /opt/ros/indogo/setup.bash")
25 |
26 | # The find_package below fails, unless the setup above is sourced,
27 | # first. Oh well.
28 | find_package(catkin REQUIRED COMPONENTS
29 | dynamic_reconfigure
30 | geometry_msgs
31 | hr_msgs
32 | pi_face_tracker
33 | rospy
34 | std_msgs
35 | )
36 |
37 | # ===================================================================
38 | # global includes
39 |
40 | ADD_SUBDIRECTORY(movement)
41 | ADD_SUBDIRECTORY(webui)
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU AFFERO GENERAL PUBLIC LICENSE
2 | Version 3, 19 November 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU Affero General Public License is a free, copyleft license for
11 | software and other kinds of works, specifically designed to ensure
12 | cooperation with the community in the case of network server software.
13 |
14 | The licenses for most software and other practical works are designed
15 | to take away your freedom to share and change the works. By contrast,
16 | our General Public Licenses are intended to guarantee your freedom to
17 | share and change all versions of a program--to make sure it remains free
18 | software for all its users.
19 |
20 | When we speak of free software, we are referring to freedom, not
21 | price. Our General Public Licenses are designed to make sure that you
22 | have the freedom to distribute copies of free software (and charge for
23 | them if you wish), that you receive source code or can get it if you
24 | want it, that you can change the software or use pieces of it in new
25 | free programs, and that you know you can do these things.
26 |
27 | Developers that use our General Public Licenses protect your rights
28 | with two steps: (1) assert copyright on the software, and (2) offer
29 | you this License which gives you legal permission to copy, distribute
30 | and/or modify the software.
31 |
32 | A secondary benefit of defending all users' freedom is that
33 | improvements made in alternate versions of the program, if they
34 | receive widespread use, become available for other developers to
35 | incorporate. Many developers of free software are heartened and
36 | encouraged by the resulting cooperation. However, in the case of
37 | software used on network servers, this result may fail to come about.
38 | The GNU General Public License permits making a modified version and
39 | letting the public access it on a server without ever releasing its
40 | source code to the public.
41 |
42 | The GNU Affero General Public License is designed specifically to
43 | ensure that, in such cases, the modified source code becomes available
44 | to the community. It requires the operator of a network server to
45 | provide the source code of the modified version running there to the
46 | users of that server. Therefore, public use of a modified version, on
47 | a publicly accessible server, gives the public access to the source
48 | code of the modified version.
49 |
50 | An older license, called the Affero General Public License and
51 | published by Affero, was designed to accomplish similar goals. This is
52 | a different license, not a version of the Affero GPL, but Affero has
53 | released a new version of the Affero GPL which permits relicensing under
54 | this license.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | TERMS AND CONDITIONS
60 |
61 | 0. Definitions.
62 |
63 | "This License" refers to version 3 of the GNU Affero General Public License.
64 |
65 | "Copyright" also means copyright-like laws that apply to other kinds of
66 | works, such as semiconductor masks.
67 |
68 | "The Program" refers to any copyrightable work licensed under this
69 | License. Each licensee is addressed as "you". "Licensees" and
70 | "recipients" may be individuals or organizations.
71 |
72 | To "modify" a work means to copy from or adapt all or part of the work
73 | in a fashion requiring copyright permission, other than the making of an
74 | exact copy. The resulting work is called a "modified version" of the
75 | earlier work or a work "based on" the earlier work.
76 |
77 | A "covered work" means either the unmodified Program or a work based
78 | on the Program.
79 |
80 | To "propagate" a work means to do anything with it that, without
81 | permission, would make you directly or secondarily liable for
82 | infringement under applicable copyright law, except executing it on a
83 | computer or modifying a private copy. Propagation includes copying,
84 | distribution (with or without modification), making available to the
85 | public, and in some countries other activities as well.
86 |
87 | To "convey" a work means any kind of propagation that enables other
88 | parties to make or receive copies. Mere interaction with a user through
89 | a computer network, with no transfer of a copy, is not conveying.
90 |
91 | An interactive user interface displays "Appropriate Legal Notices"
92 | to the extent that it includes a convenient and prominently visible
93 | feature that (1) displays an appropriate copyright notice, and (2)
94 | tells the user that there is no warranty for the work (except to the
95 | extent that warranties are provided), that licensees may convey the
96 | work under this License, and how to view a copy of this License. If
97 | the interface presents a list of user commands or options, such as a
98 | menu, a prominent item in the list meets this criterion.
99 |
100 | 1. Source Code.
101 |
102 | The "source code" for a work means the preferred form of the work
103 | for making modifications to it. "Object code" means any non-source
104 | form of a work.
105 |
106 | A "Standard Interface" means an interface that either is an official
107 | standard defined by a recognized standards body, or, in the case of
108 | interfaces specified for a particular programming language, one that
109 | is widely used among developers working in that language.
110 |
111 | The "System Libraries" of an executable work include anything, other
112 | than the work as a whole, that (a) is included in the normal form of
113 | packaging a Major Component, but which is not part of that Major
114 | Component, and (b) serves only to enable use of the work with that
115 | Major Component, or to implement a Standard Interface for which an
116 | implementation is available to the public in source code form. A
117 | "Major Component", in this context, means a major essential component
118 | (kernel, window system, and so on) of the specific operating system
119 | (if any) on which the executable work runs, or a compiler used to
120 | produce the work, or an object code interpreter used to run it.
121 |
122 | The "Corresponding Source" for a work in object code form means all
123 | the source code needed to generate, install, and (for an executable
124 | work) run the object code and to modify the work, including scripts to
125 | control those activities. However, it does not include the work's
126 | System Libraries, or general-purpose tools or generally available free
127 | programs which are used unmodified in performing those activities but
128 | which are not part of the work. For example, Corresponding Source
129 | includes interface definition files associated with source files for
130 | the work, and the source code for shared libraries and dynamically
131 | linked subprograms that the work is specifically designed to require,
132 | such as by intimate data communication or control flow between those
133 | subprograms and other parts of the work.
134 |
135 | The Corresponding Source need not include anything that users
136 | can regenerate automatically from other parts of the Corresponding
137 | Source.
138 |
139 | The Corresponding Source for a work in source code form is that
140 | same work.
141 |
142 | 2. Basic Permissions.
143 |
144 | All rights granted under this License are granted for the term of
145 | copyright on the Program, and are irrevocable provided the stated
146 | conditions are met. This License explicitly affirms your unlimited
147 | permission to run the unmodified Program. The output from running a
148 | covered work is covered by this License only if the output, given its
149 | content, constitutes a covered work. This License acknowledges your
150 | rights of fair use or other equivalent, as provided by copyright law.
151 |
152 | You may make, run and propagate covered works that you do not
153 | convey, without conditions so long as your license otherwise remains
154 | in force. You may convey covered works to others for the sole purpose
155 | of having them make modifications exclusively for you, or provide you
156 | with facilities for running those works, provided that you comply with
157 | the terms of this License in conveying all material for which you do
158 | not control copyright. Those thus making or running the covered works
159 | for you must do so exclusively on your behalf, under your direction
160 | and control, on terms that prohibit them from making any copies of
161 | your copyrighted material outside their relationship with you.
162 |
163 | Conveying under any other circumstances is permitted solely under
164 | the conditions stated below. Sublicensing is not allowed; section 10
165 | makes it unnecessary.
166 |
167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168 |
169 | No covered work shall be deemed part of an effective technological
170 | measure under any applicable law fulfilling obligations under article
171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172 | similar laws prohibiting or restricting circumvention of such
173 | measures.
174 |
175 | When you convey a covered work, you waive any legal power to forbid
176 | circumvention of technological measures to the extent such circumvention
177 | is effected by exercising rights under this License with respect to
178 | the covered work, and you disclaim any intention to limit operation or
179 | modification of the work as a means of enforcing, against the work's
180 | users, your or third parties' legal rights to forbid circumvention of
181 | technological measures.
182 |
183 | 4. Conveying Verbatim Copies.
184 |
185 | You may convey verbatim copies of the Program's source code as you
186 | receive it, in any medium, provided that you conspicuously and
187 | appropriately publish on each copy an appropriate copyright notice;
188 | keep intact all notices stating that this License and any
189 | non-permissive terms added in accord with section 7 apply to the code;
190 | keep intact all notices of the absence of any warranty; and give all
191 | recipients a copy of this License along with the Program.
192 |
193 | You may charge any price or no price for each copy that you convey,
194 | and you may offer support or warranty protection for a fee.
195 |
196 | 5. Conveying Modified Source Versions.
197 |
198 | You may convey a work based on the Program, or the modifications to
199 | produce it from the Program, in the form of source code under the
200 | terms of section 4, provided that you also meet all of these conditions:
201 |
202 | a) The work must carry prominent notices stating that you modified
203 | it, and giving a relevant date.
204 |
205 | b) The work must carry prominent notices stating that it is
206 | released under this License and any conditions added under section
207 | 7. This requirement modifies the requirement in section 4 to
208 | "keep intact all notices".
209 |
210 | c) You must license the entire work, as a whole, under this
211 | License to anyone who comes into possession of a copy. This
212 | License will therefore apply, along with any applicable section 7
213 | additional terms, to the whole of the work, and all its parts,
214 | regardless of how they are packaged. This License gives no
215 | permission to license the work in any other way, but it does not
216 | invalidate such permission if you have separately received it.
217 |
218 | d) If the work has interactive user interfaces, each must display
219 | Appropriate Legal Notices; however, if the Program has interactive
220 | interfaces that do not display Appropriate Legal Notices, your
221 | work need not make them do so.
222 |
223 | A compilation of a covered work with other separate and independent
224 | works, which are not by their nature extensions of the covered work,
225 | and which are not combined with it such as to form a larger program,
226 | in or on a volume of a storage or distribution medium, is called an
227 | "aggregate" if the compilation and its resulting copyright are not
228 | used to limit the access or legal rights of the compilation's users
229 | beyond what the individual works permit. Inclusion of a covered work
230 | in an aggregate does not cause this License to apply to the other
231 | parts of the aggregate.
232 |
233 | 6. Conveying Non-Source Forms.
234 |
235 | You may convey a covered work in object code form under the terms
236 | of sections 4 and 5, provided that you also convey the
237 | machine-readable Corresponding Source under the terms of this License,
238 | in one of these ways:
239 |
240 | a) Convey the object code in, or embodied in, a physical product
241 | (including a physical distribution medium), accompanied by the
242 | Corresponding Source fixed on a durable physical medium
243 | customarily used for software interchange.
244 |
245 | b) Convey the object code in, or embodied in, a physical product
246 | (including a physical distribution medium), accompanied by a
247 | written offer, valid for at least three years and valid for as
248 | long as you offer spare parts or customer support for that product
249 | model, to give anyone who possesses the object code either (1) a
250 | copy of the Corresponding Source for all the software in the
251 | product that is covered by this License, on a durable physical
252 | medium customarily used for software interchange, for a price no
253 | more than your reasonable cost of physically performing this
254 | conveying of source, or (2) access to copy the
255 | Corresponding Source from a network server at no charge.
256 |
257 | c) Convey individual copies of the object code with a copy of the
258 | written offer to provide the Corresponding Source. This
259 | alternative is allowed only occasionally and noncommercially, and
260 | only if you received the object code with such an offer, in accord
261 | with subsection 6b.
262 |
263 | d) Convey the object code by offering access from a designated
264 | place (gratis or for a charge), and offer equivalent access to the
265 | Corresponding Source in the same way through the same place at no
266 | further charge. You need not require recipients to copy the
267 | Corresponding Source along with the object code. If the place to
268 | copy the object code is a network server, the Corresponding Source
269 | may be on a different server (operated by you or a third party)
270 | that supports equivalent copying facilities, provided you maintain
271 | clear directions next to the object code saying where to find the
272 | Corresponding Source. Regardless of what server hosts the
273 | Corresponding Source, you remain obligated to ensure that it is
274 | available for as long as needed to satisfy these requirements.
275 |
276 | e) Convey the object code using peer-to-peer transmission, provided
277 | you inform other peers where the object code and Corresponding
278 | Source of the work are being offered to the general public at no
279 | charge under subsection 6d.
280 |
281 | A separable portion of the object code, whose source code is excluded
282 | from the Corresponding Source as a System Library, need not be
283 | included in conveying the object code work.
284 |
285 | A "User Product" is either (1) a "consumer product", which means any
286 | tangible personal property which is normally used for personal, family,
287 | or household purposes, or (2) anything designed or sold for incorporation
288 | into a dwelling. In determining whether a product is a consumer product,
289 | doubtful cases shall be resolved in favor of coverage. For a particular
290 | product received by a particular user, "normally used" refers to a
291 | typical or common use of that class of product, regardless of the status
292 | of the particular user or of the way in which the particular user
293 | actually uses, or expects or is expected to use, the product. A product
294 | is a consumer product regardless of whether the product has substantial
295 | commercial, industrial or non-consumer uses, unless such uses represent
296 | the only significant mode of use of the product.
297 |
298 | "Installation Information" for a User Product means any methods,
299 | procedures, authorization keys, or other information required to install
300 | and execute modified versions of a covered work in that User Product from
301 | a modified version of its Corresponding Source. The information must
302 | suffice to ensure that the continued functioning of the modified object
303 | code is in no case prevented or interfered with solely because
304 | modification has been made.
305 |
306 | If you convey an object code work under this section in, or with, or
307 | specifically for use in, a User Product, and the conveying occurs as
308 | part of a transaction in which the right of possession and use of the
309 | User Product is transferred to the recipient in perpetuity or for a
310 | fixed term (regardless of how the transaction is characterized), the
311 | Corresponding Source conveyed under this section must be accompanied
312 | by the Installation Information. But this requirement does not apply
313 | if neither you nor any third party retains the ability to install
314 | modified object code on the User Product (for example, the work has
315 | been installed in ROM).
316 |
317 | The requirement to provide Installation Information does not include a
318 | requirement to continue to provide support service, warranty, or updates
319 | for a work that has been modified or installed by the recipient, or for
320 | the User Product in which it has been modified or installed. Access to a
321 | network may be denied when the modification itself materially and
322 | adversely affects the operation of the network or violates the rules and
323 | protocols for communication across the network.
324 |
325 | Corresponding Source conveyed, and Installation Information provided,
326 | in accord with this section must be in a format that is publicly
327 | documented (and with an implementation available to the public in
328 | source code form), and must require no special password or key for
329 | unpacking, reading or copying.
330 |
331 | 7. Additional Terms.
332 |
333 | "Additional permissions" are terms that supplement the terms of this
334 | License by making exceptions from one or more of its conditions.
335 | Additional permissions that are applicable to the entire Program shall
336 | be treated as though they were included in this License, to the extent
337 | that they are valid under applicable law. If additional permissions
338 | apply only to part of the Program, that part may be used separately
339 | under those permissions, but the entire Program remains governed by
340 | this License without regard to the additional permissions.
341 |
342 | When you convey a copy of a covered work, you may at your option
343 | remove any additional permissions from that copy, or from any part of
344 | it. (Additional permissions may be written to require their own
345 | removal in certain cases when you modify the work.) You may place
346 | additional permissions on material, added by you to a covered work,
347 | for which you have or can give appropriate copyright permission.
348 |
349 | Notwithstanding any other provision of this License, for material you
350 | add to a covered work, you may (if authorized by the copyright holders of
351 | that material) supplement the terms of this License with terms:
352 |
353 | a) Disclaiming warranty or limiting liability differently from the
354 | terms of sections 15 and 16 of this License; or
355 |
356 | b) Requiring preservation of specified reasonable legal notices or
357 | author attributions in that material or in the Appropriate Legal
358 | Notices displayed by works containing it; or
359 |
360 | c) Prohibiting misrepresentation of the origin of that material, or
361 | requiring that modified versions of such material be marked in
362 | reasonable ways as different from the original version; or
363 |
364 | d) Limiting the use for publicity purposes of names of licensors or
365 | authors of the material; or
366 |
367 | e) Declining to grant rights under trademark law for use of some
368 | trade names, trademarks, or service marks; or
369 |
370 | f) Requiring indemnification of licensors and authors of that
371 | material by anyone who conveys the material (or modified versions of
372 | it) with contractual assumptions of liability to the recipient, for
373 | any liability that these contractual assumptions directly impose on
374 | those licensors and authors.
375 |
376 | All other non-permissive additional terms are considered "further
377 | restrictions" within the meaning of section 10. If the Program as you
378 | received it, or any part of it, contains a notice stating that it is
379 | governed by this License along with a term that is a further
380 | restriction, you may remove that term. If a license document contains
381 | a further restriction but permits relicensing or conveying under this
382 | License, you may add to a covered work material governed by the terms
383 | of that license document, provided that the further restriction does
384 | not survive such relicensing or conveying.
385 |
386 | If you add terms to a covered work in accord with this section, you
387 | must place, in the relevant source files, a statement of the
388 | additional terms that apply to those files, or a notice indicating
389 | where to find the applicable terms.
390 |
391 | Additional terms, permissive or non-permissive, may be stated in the
392 | form of a separately written license, or stated as exceptions;
393 | the above requirements apply either way.
394 |
395 | 8. Termination.
396 |
397 | You may not propagate or modify a covered work except as expressly
398 | provided under this License. Any attempt otherwise to propagate or
399 | modify it is void, and will automatically terminate your rights under
400 | this License (including any patent licenses granted under the third
401 | paragraph of section 11).
402 |
403 | However, if you cease all violation of this License, then your
404 | license from a particular copyright holder is reinstated (a)
405 | provisionally, unless and until the copyright holder explicitly and
406 | finally terminates your license, and (b) permanently, if the copyright
407 | holder fails to notify you of the violation by some reasonable means
408 | prior to 60 days after the cessation.
409 |
410 | Moreover, your license from a particular copyright holder is
411 | reinstated permanently if the copyright holder notifies you of the
412 | violation by some reasonable means, this is the first time you have
413 | received notice of violation of this License (for any work) from that
414 | copyright holder, and you cure the violation prior to 30 days after
415 | your receipt of the notice.
416 |
417 | Termination of your rights under this section does not terminate the
418 | licenses of parties who have received copies or rights from you under
419 | this License. If your rights have been terminated and not permanently
420 | reinstated, you do not qualify to receive new licenses for the same
421 | material under section 10.
422 |
423 | 9. Acceptance Not Required for Having Copies.
424 |
425 | You are not required to accept this License in order to receive or
426 | run a copy of the Program. Ancillary propagation of a covered work
427 | occurring solely as a consequence of using peer-to-peer transmission
428 | to receive a copy likewise does not require acceptance. However,
429 | nothing other than this License grants you permission to propagate or
430 | modify any covered work. These actions infringe copyright if you do
431 | not accept this License. Therefore, by modifying or propagating a
432 | covered work, you indicate your acceptance of this License to do so.
433 |
434 | 10. Automatic Licensing of Downstream Recipients.
435 |
436 | Each time you convey a covered work, the recipient automatically
437 | receives a license from the original licensors, to run, modify and
438 | propagate that work, subject to this License. You are not responsible
439 | for enforcing compliance by third parties with this License.
440 |
441 | An "entity transaction" is a transaction transferring control of an
442 | organization, or substantially all assets of one, or subdividing an
443 | organization, or merging organizations. If propagation of a covered
444 | work results from an entity transaction, each party to that
445 | transaction who receives a copy of the work also receives whatever
446 | licenses to the work the party's predecessor in interest had or could
447 | give under the previous paragraph, plus a right to possession of the
448 | Corresponding Source of the work from the predecessor in interest, if
449 | the predecessor has it or can get it with reasonable efforts.
450 |
451 | You may not impose any further restrictions on the exercise of the
452 | rights granted or affirmed under this License. For example, you may
453 | not impose a license fee, royalty, or other charge for exercise of
454 | rights granted under this License, and you may not initiate litigation
455 | (including a cross-claim or counterclaim in a lawsuit) alleging that
456 | any patent claim is infringed by making, using, selling, offering for
457 | sale, or importing the Program or any portion of it.
458 |
459 | 11. Patents.
460 |
461 | A "contributor" is a copyright holder who authorizes use under this
462 | License of the Program or a work on which the Program is based. The
463 | work thus licensed is called the contributor's "contributor version".
464 |
465 | A contributor's "essential patent claims" are all patent claims
466 | owned or controlled by the contributor, whether already acquired or
467 | hereafter acquired, that would be infringed by some manner, permitted
468 | by this License, of making, using, or selling its contributor version,
469 | but do not include claims that would be infringed only as a
470 | consequence of further modification of the contributor version. For
471 | purposes of this definition, "control" includes the right to grant
472 | patent sublicenses in a manner consistent with the requirements of
473 | this License.
474 |
475 | Each contributor grants you a non-exclusive, worldwide, royalty-free
476 | patent license under the contributor's essential patent claims, to
477 | make, use, sell, offer for sale, import and otherwise run, modify and
478 | propagate the contents of its contributor version.
479 |
480 | In the following three paragraphs, a "patent license" is any express
481 | agreement or commitment, however denominated, not to enforce a patent
482 | (such as an express permission to practice a patent or covenant not to
483 | sue for patent infringement). To "grant" such a patent license to a
484 | party means to make such an agreement or commitment not to enforce a
485 | patent against the party.
486 |
487 | If you convey a covered work, knowingly relying on a patent license,
488 | and the Corresponding Source of the work is not available for anyone
489 | to copy, free of charge and under the terms of this License, through a
490 | publicly available network server or other readily accessible means,
491 | then you must either (1) cause the Corresponding Source to be so
492 | available, or (2) arrange to deprive yourself of the benefit of the
493 | patent license for this particular work, or (3) arrange, in a manner
494 | consistent with the requirements of this License, to extend the patent
495 | license to downstream recipients. "Knowingly relying" means you have
496 | actual knowledge that, but for the patent license, your conveying the
497 | covered work in a country, or your recipient's use of the covered work
498 | in a country, would infringe one or more identifiable patents in that
499 | country that you have reason to believe are valid.
500 |
501 | If, pursuant to or in connection with a single transaction or
502 | arrangement, you convey, or propagate by procuring conveyance of, a
503 | covered work, and grant a patent license to some of the parties
504 | receiving the covered work authorizing them to use, propagate, modify
505 | or convey a specific copy of the covered work, then the patent license
506 | you grant is automatically extended to all recipients of the covered
507 | work and works based on it.
508 |
509 | A patent license is "discriminatory" if it does not include within
510 | the scope of its coverage, prohibits the exercise of, or is
511 | conditioned on the non-exercise of one or more of the rights that are
512 | specifically granted under this License. You may not convey a covered
513 | work if you are a party to an arrangement with a third party that is
514 | in the business of distributing software, under which you make payment
515 | to the third party based on the extent of your activity of conveying
516 | the work, and under which the third party grants, to any of the
517 | parties who would receive the covered work from you, a discriminatory
518 | patent license (a) in connection with copies of the covered work
519 | conveyed by you (or copies made from those copies), or (b) primarily
520 | for and in connection with specific products or compilations that
521 | contain the covered work, unless you entered into that arrangement,
522 | or that patent license was granted, prior to 28 March 2007.
523 |
524 | Nothing in this License shall be construed as excluding or limiting
525 | any implied license or other defenses to infringement that may
526 | otherwise be available to you under applicable patent law.
527 |
528 | 12. No Surrender of Others' Freedom.
529 |
530 | If conditions are imposed on you (whether by court order, agreement or
531 | otherwise) that contradict the conditions of this License, they do not
532 | excuse you from the conditions of this License. If you cannot convey a
533 | covered work so as to satisfy simultaneously your obligations under this
534 | License and any other pertinent obligations, then as a consequence you may
535 | not convey it at all. For example, if you agree to terms that obligate you
536 | to collect a royalty for further conveying from those to whom you convey
537 | the Program, the only way you could satisfy both those terms and this
538 | License would be to refrain entirely from conveying the Program.
539 |
540 | 13. Remote Network Interaction; Use with the GNU General Public License.
541 |
542 | Notwithstanding any other provision of this License, if you modify the
543 | Program, your modified version must prominently offer all users
544 | interacting with it remotely through a computer network (if your version
545 | supports such interaction) an opportunity to receive the Corresponding
546 | Source of your version by providing access to the Corresponding Source
547 | from a network server at no charge, through some standard or customary
548 | means of facilitating copying of software. This Corresponding Source
549 | shall include the Corresponding Source for any work covered by version 3
550 | of the GNU General Public License that is incorporated pursuant to the
551 | following paragraph.
552 |
553 | Notwithstanding any other provision of this License, you have
554 | permission to link or combine any covered work with a work licensed
555 | under version 3 of the GNU General Public License into a single
556 | combined work, and to convey the resulting work. The terms of this
557 | License will continue to apply to the part which is the covered work,
558 | but the work with which it is combined will remain governed by version
559 | 3 of the GNU General Public License.
560 |
561 | 14. Revised Versions of this License.
562 |
563 | The Free Software Foundation may publish revised and/or new versions of
564 | the GNU Affero General Public License from time to time. Such new versions
565 | will be similar in spirit to the present version, but may differ in detail to
566 | address new problems or concerns.
567 |
568 | Each version is given a distinguishing version number. If the
569 | Program specifies that a certain numbered version of the GNU Affero General
570 | Public License "or any later version" applies to it, you have the
571 | option of following the terms and conditions either of that numbered
572 | version or of any later version published by the Free Software
573 | Foundation. If the Program does not specify a version number of the
574 | GNU Affero General Public License, you may choose any version ever published
575 | by the Free Software Foundation.
576 |
577 | If the Program specifies that a proxy can decide which future
578 | versions of the GNU Affero General Public License can be used, that proxy's
579 | public statement of acceptance of a version permanently authorizes you
580 | to choose that version for the Program.
581 |
582 | Later license versions may give you additional or different
583 | permissions. However, no additional obligations are imposed on any
584 | author or copyright holder as a result of your choosing to follow a
585 | later version.
586 |
587 | 15. Disclaimer of Warranty.
588 |
589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597 |
598 | 16. Limitation of Liability.
599 |
600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608 | SUCH DAMAGES.
609 |
610 | 17. Interpretation of Sections 15 and 16.
611 |
612 | If the disclaimer of warranty and limitation of liability provided
613 | above cannot be given local legal effect according to their terms,
614 | reviewing courts shall apply local law that most closely approximates
615 | an absolute waiver of all civil liability in connection with the
616 | Program, unless a warranty or assumption of liability accompanies a
617 | copy of the Program in return for a fee.
618 |
619 | END OF TERMS AND CONDITIONS
620 |
621 | How to Apply These Terms to Your New Programs
622 |
623 | If you develop a new program, and you want it to be of the greatest
624 | possible use to the public, the best way to achieve this is to make it
625 | free software which everyone can redistribute and change under these terms.
626 |
627 | To do so, attach the following notices to the program. It is safest
628 | to attach them to the start of each source file to most effectively
629 | state the exclusion of warranty; and each file should have at least
630 | the "copyright" line and a pointer to where the full notice is found.
631 |
632 |
633 | Copyright (C)
634 |
635 | This program is free software: you can redistribute it and/or modify
636 | it under the terms of the GNU Affero General Public License as published
637 | by the Free Software Foundation, either version 3 of the License, or
638 | (at your option) any later version.
639 |
640 | This program is distributed in the hope that it will be useful,
641 | but WITHOUT ANY WARRANTY; without even the implied warranty of
642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643 | GNU Affero General Public License for more details.
644 |
645 | You should have received a copy of the GNU Affero General Public License
646 | along with this program. If not, see .
647 |
648 | Also add information on how to contact you by electronic and paper mail.
649 |
650 | If your software can interact with users remotely through a computer
651 | network, you should also make sure that it provides a way for users to
652 | get its source. For example, if your program is a web application, its
653 | interface could display a "Source" link that leads users to an archive
654 | of the code. There are many ways you could offer source, and different
655 | solutions will be better for different programs; see section 13 for the
656 | specific requirements.
657 |
658 | You should also get your employer (if you work as a programmer) or school,
659 | if any, to sign a "copyright disclaimer" for the program, if necessary.
660 | For more information on this, and how to apply and follow the GNU AGPL, see
661 | .
662 |
663 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Eva Robot ROS Sensory+Motor API
2 | ===============================
3 | This repo contains ROS nodes for sensory input (vision, audio) and
4 | motor movements for the Hanson Robotics Eva robot. Previously, this
5 | repo used to contain more of the subsystem, but all of that code has
6 | been moved to the main OpenCog repo:
7 |
8 | https://github.com/opencog/opencog/tree/master/opencog/eva
9 |
10 | What is left here is an assortment of ROS nodes that subscribe
11 | to ROS visual and audio sensory inputs, and forward these to the
12 | OpenCog spactime server (performing the needed format conversion).
13 |
14 | Design goals
15 | ------------
16 | Provide a convenient, flexible interface between ROS and the OpenCog
17 | servers.
18 |
19 | Current Architecture and Design
20 | -------------------------------
21 | At this time, the code here provides a perception subsystem:
22 |
23 | * Several ROS nodes that forward visual and sound data to the
24 | OpenCog spacetime server. This includes 3D locations of visible
25 | faces, the names of any recognized faces (as recognized by some
26 | external software), the direction from which sounds are coming
27 | from, and audio-power samples.
28 |
29 | (This needs to be replaced by a (much) better visual system.)
30 |
31 | Some things it currently doesn't do, but should:
32 |
33 | * Integrate superior face-tracking and face recognition tools.
34 | Right now, the face tracker is recognizes known faces only with
35 | difficulty.
36 |
37 | * Additional sensory systems and sensory inputs. A perception
38 | synthesizer to coordinate all sensory input. High priority:
39 |
40 | ++ Audio power envelope (half-done, see `sensors/audio_power.py`),
41 | fundamental frequency (of voice), rising/falling tone.
42 | Background audio power. Length of silent pauses. Detection
43 | of applause, laughter, loud voices in the background, loud
44 | bangs.
45 |
46 | ++ Video-chaos: Is it light or dark? Is there lots of random
47 | motion in the visual field, or are things visually settled?
48 |
49 | Status
50 | ------
51 | It works and is in regular (daily) use (2015, 2016, 2017...).
52 |
53 | Setup
54 | -------
55 | Initial setup, only needed to be run once.
56 | ```
57 | bash scripts/setup.sh
58 | ```
59 |
60 | Install
61 | -------
62 | Run the following commands.
63 | ```
64 | bash scripts/install.sh
65 | ```
66 |
67 | Running
68 | -------
69 | Run the following commands.
70 | ```
71 | bash scripts/run.sh
72 | ```
73 | could be `han` or `sophia10`
74 |
75 | TODO
76 | ----
77 | * Need major overhaul of the time-space server API's. Need to be able
78 | to query them with pattern matcher
79 | * Need to create time-query atoms
80 | * Need to place sound-source direction into the space-server. (i.e. currently
81 | in time-map.scm map-sound)
82 |
--------------------------------------------------------------------------------
/movement/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | INSTALL (FILES
2 | movement.scm
3 | DESTINATION "${DATADIR}/scm/opencog/"
4 | )
5 |
6 | INSTALL (FILES
7 | do-movement.scm
8 | DESTINATION "${DATADIR}/scm/opencog/movement"
9 | )
10 |
11 | INSTALL (FILES
12 | atomic.py
13 | atomic-dbg.py
14 | ros_commo.py
15 | DESTINATION "${DATADIR}/python/"
16 | )
17 |
--------------------------------------------------------------------------------
/movement/README.md:
--------------------------------------------------------------------------------
1 |
2 | Movement Interfaces
3 | ===================
4 |
5 | This directory defines an API for controlling the robot physical
6 | movements, including making facial expressions, controlling the
7 | eye blink rates, looking at locations, and saying things with
8 | some sort of vocal inflection.
9 |
10 | The API publishes various ROS messages to drive the robot.
11 | The current API assumes that there is some animation rig that
12 | converts animations to actually motor-drive commands. This is
13 | currently `blender`; the blender animation API is defined in
14 | [blender_api](https://github.com/hansonrobotics/blender_api).
15 |
16 | The OpenCog behaviors interface to ROS by calling the functions in
17 | `atomic.py`. This file is just a "thin" wrapper around the actual ROS
18 | code, which is in `ros_commo.py`. A non-ROS, debugging-only interface
19 | is in `atomic-dbg.py`; it does not import ROS, and only prints to
20 | stdout. It can be used for a text-only chatbot.
21 |
22 | Debugging notes
23 | ===============
24 | Cython modules are installed here:
25 | ```
26 | `/usr/local/share/opencog/python/opencog`
27 | ```
28 |
29 | You can get a python command-line from the cogserver, like so:
30 | ```
31 | `rlwrap telnet localhost 17020`
32 | ```
33 | and then enter the python interpreter by saying `py`. You can get
34 | a scheme interpreter by saying `scm`. You can telnet multiple times.
35 | You can also call python from scheme, and scheme from python.
36 |
37 | From the python prompt, the following should list the python
38 | opencog modules:
39 | ```
40 | help('opencog')
41 | ```
42 |
43 | Verifying movements
44 | ===================
45 |
46 | Assuming that `face_id` 29 is in the atomspace, then the robot
47 | can turn to look at that face:
48 |
49 | ```
50 | rostopic pub --once /opencog/glance_at std_msgs/Int32 29
51 | rostopic pub --once /opencog/look_at std_msgs/Int32 29
52 | rostopic pub --once /opencog/gaze_at std_msgs/Int32 29
53 | ```
54 |
--------------------------------------------------------------------------------
/movement/atomic-dbg.py:
--------------------------------------------------------------------------------
1 | #
2 | # atomic-dbg.py - OpenCog python schema debug wrapper.
3 | #
4 | # This is a wrapper for debugging the OpenCog code that controls the Eva
5 | # blender model. It provides exactly the same GroundedPredicateNode functions
6 | # as the normal API, but, instead of sending messages out on ROS, it simply
7 | # prints to stdout. Thus, its a stub, and can be used without starting up
8 | # all of ROS and blender. Handy for behavior debugging, as well as for
9 | # vision-deprived, disembodied chatbot debugging.
10 | #
11 | # Copyright (C) 2015, 2016 Linas Vepstas
12 | #
13 | # This program is free software; you can redistribute it and/or modify
14 | # it under the terms of the GNU Affero General Public License v3 as
15 | # published by the Free Software Foundation and including the exceptions
16 | # at http://opencog.org/wiki/Licenses
17 | #
18 | # This program is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 | # GNU General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Affero General Public License
24 | # along with this program; if not, write to:
25 | # Free Software Foundation, Inc.,
26 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 |
28 | import time
29 | from opencog.atomspace import TruthValue
30 |
31 | # Global functions, because that's what PythonEval expects.
32 | # Would be great if PythonEval was fixed to work smarter, not harder.
33 | #
34 | # Must return TruthValue, since EvaluationLinks expect TruthValues.
35 | #
36 | # The print messages are all parentheical, third-person; that's because
37 | # they will typically be going out to some IRC channel, and should resemble
38 | # captioning-for-the-blind, in this situation (or stage directions).
39 | #
40 | def prt_msg(face_id_node):
41 | face_id = int(face_id_node.name)
42 | print "Python face id", face_id
43 | return TruthValue(1, 1)
44 |
45 | def do_wake_up():
46 | # evl.wake_up()
47 | print "(Eva wakes up)"
48 | return TruthValue(1, 1)
49 |
50 | def do_go_sleep():
51 | # evl.go_sleep()
52 | print "(Eva falls asleep)"
53 | return TruthValue(1, 1)
54 |
55 | def glance_at_face(face_id_node):
56 | face_id = int(float(face_id_node.name))
57 | print "(Eva glances at face id", face_id, ")"
58 | # evl.glance_at(face_id)
59 | return TruthValue(1, 1)
60 |
61 | def look_at_face(face_id_node):
62 | now = time.time()
63 | if now - look_at_face.prev_time > 3 :
64 | look_at_face.prev_time = now
65 | face_id = int(float(face_id_node.name))
66 | print "(Eva looks at face id", face_id, ")"
67 |
68 | # evl.look_at(face_id)
69 | return TruthValue(1, 1)
70 |
71 | look_at_face.prev_time = 0
72 |
73 | def gaze_at_face(face_id_node):
74 | face_id = int(float(face_id_node.name))
75 | print "(Eva gazes at face id", face_id, ")"
76 | # evl.gaze_at(face_id)
77 | return TruthValue(1, 1)
78 |
79 |
80 | # Moves eyes only, not entire head.
81 | def gaze_at_point(x_node, y_node, z_node):
82 | x = float(x_node.name)
83 | y = float(y_node.name)
84 | z = float(z_node.name)
85 |
86 | # Plain-English description of the actions.
87 | if (y < -0.1):
88 | print "(Eva looks to the right)"
89 | elif (y > 0.1):
90 | print "(Eva looks to the left)"
91 |
92 | elif (-0.1 < y and y < 0.1 and -0.1 < z and z < 0.1):
93 | print "(Eva looks straight ahead)"
94 |
95 | if (z < -0.1):
96 | print "(Eva looks down)"
97 | elif (z > 0.1):
98 | print "(Eva looks up)"
99 |
100 | # print "(Eva gazes at point", x, y, z, ")"
101 | # evl.gaze_at_point(x, y, z)
102 | return TruthValue(1, 1)
103 |
104 | # Turns entire head.
105 | def look_at_point(x_node, y_node, z_node):
106 | x = float(x_node.name)
107 | y = float(y_node.name)
108 | z = float(z_node.name)
109 |
110 | # Plain-English description of the actions.
111 | if (y < -0.1):
112 | print "(Eva turns to the right)"
113 | elif (y > 0.1):
114 | print "(Eva turns to the left)"
115 |
116 | elif (-0.1 < y and y < 0.1 and -0.1 < z and z < 0.1):
117 | print "(Eva turns straight ahead)"
118 |
119 | if (z < -0.1):
120 | print "(Eva turns her face downwards)"
121 | elif (z > 0.1):
122 | print "(Eva turns her face upwards)"
123 |
124 |
125 | # print "(Eva looks at point", x, y, z, ")"
126 | # evl.look_at_point(x, y, z)
127 | return TruthValue(1, 1)
128 |
129 | def do_face_expression(face_expression_node, duration_node, intensity_node):
130 | face_expression = face_expression_node.name
131 | duration = float(duration_node.name)
132 | intensity = float(intensity_node.name)
133 | print "(Eva expresses", face_expression, "facial expression for", duration, \
134 | "seconds, with intensity", intensity, ")"
135 |
136 | # print "Python facial expression: ", face_expression, " for ", duration, " int ", intensity
137 | # evl.expression(face_expression, intensity, duration)
138 | return TruthValue(1, 1)
139 |
140 | def do_gesture(gesture_node, intensity_node, repeat_node, speed_node):
141 | gesture = gesture_node.name
142 | intensity = float(intensity_node.name)
143 | repeat = float(repeat_node.name)
144 | speed = float(speed_node.name)
145 | # evl.gesture(gesture, intensity, repeat, speed)
146 | print "(Eva performs gesture:", gesture, ", intensity: ", intensity, \
147 | ", repeat: ", repeat, ", speed: ", speed, ")"
148 | return TruthValue(1, 1)
149 |
150 | def publish_behavior(event_node):
151 | print "(Behavior event:", event_node.name, ")"
152 | return TruthValue(1, 1)
153 |
154 | def explore_saccade():
155 | print "(Eva switches to explore saccade)"
156 | # evl.explore_saccade()
157 | return TruthValue(1, 1)
158 |
159 | def conversational_saccade():
160 | print "(Eva switches to conversational saccade)"
161 | # evl.conversational_saccade()
162 | return TruthValue(1, 1)
163 |
164 | def listening_saccade():
165 | print "(Eva switches to listening saccade)"
166 | # evl.conversational_saccade()
167 | return TruthValue(1, 1)
168 |
169 | def blink_rate(mean_node, var_node):
170 | mean = float(mean_node.name)
171 | var = float(var_node.name)
172 | print "(Eva blink-rate: ", mean, " variation ", var, ")"
173 | # evl.blink_rate(mean, var)
174 | return TruthValue(1, 1)
175 |
176 | def say_text(text_node):
177 | text = text_node.name
178 | print "(Eva says: ", text, ")"
179 | # evl.say_text(text)
180 | return TruthValue(1, 1)
181 |
182 | # Return true as long as ROS is running.
183 | def ros_is_running():
184 | # if (rospy.is_shutdown())
185 | # return TruthValue(0, 1)
186 | return TruthValue(1, 1)
187 |
--------------------------------------------------------------------------------
/movement/atomic.py:
--------------------------------------------------------------------------------
1 | #
2 | # atomic.py - OpenCog python schema to control Eva.
3 | #
4 | # This is a wrapper around a ROS node that is able to control the Eva
5 | # blender model. This wrapper is desined so that each function can be
6 | # called by OpenCog, from a GroundedPredicateNode. The functions are
7 | # simple, and correspond more-or-less directly to the Eva API: the
8 | # blender API can be told to play the smile animation, or the blink
9 | # animation, or frown, fall asleep, turn, look, and so on.
10 | #
11 | # Copyright (C) 2015 Linas Vepstas
12 | #
13 | # This program is free software; you can redistribute it and/or modify
14 | # it under the terms of the GNU Affero General Public License v3 as
15 | # published by the Free Software Foundation and including the exceptions
16 | # at http://opencog.org/wiki/Licenses
17 | #
18 | # This program is distributed in the hope that it will be useful,
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 | # GNU General Public License for more details.
22 | #
23 | # You should have received a copy of the GNU Affero General Public License
24 | # along with this program; if not, write to:
25 | # Free Software Foundation, Inc.,
26 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 |
28 | # XXX To be removed when https://github.com/hansonrobotics/HEAD/issues/618
29 | # is resolved nicely
30 | import sys
31 | sys.path.append("/opt/hansonrobotics/ros/lib/python2.7/dist-packages/")
32 |
33 | import rospy
34 | from ros_commo import EvaControl
35 | from opencog.atomspace import TruthValue
36 |
37 | # The ROS layer.
38 | evl = EvaControl()
39 |
40 | # Global functions, because that's what PythonEval expects.
41 | # Would be great if PythonEval was fixed to work smarter, not harder.
42 | #
43 | # Must return TruthValue, since EvaluationLinks expect TruthValues.
44 |
45 | def do_wake_up():
46 | evl.wake_up()
47 | return TruthValue(1, 1)
48 |
49 | def do_go_sleep():
50 | evl.go_sleep()
51 | return TruthValue(1, 1)
52 |
53 | def glance_at_face(face_id_node):
54 | face_id = int(float(face_id_node.name))
55 | print "Python glance at face id", face_id
56 | evl.glance_at(face_id)
57 | return TruthValue(1, 1)
58 |
59 | def look_at_face(face_id_node):
60 | face_id = int(float(face_id_node.name))
61 | print "Python look at face id", face_id
62 | evl.look_at(face_id)
63 | return TruthValue(1, 1)
64 |
65 | def gaze_at_face(face_id_node):
66 | face_id = int(float(face_id_node.name))
67 | print "Python gaze at face id", face_id
68 | evl.gaze_at(face_id)
69 | return TruthValue(1, 1)
70 |
71 | def gaze_at_point(x_node, y_node, z_node):
72 | x = float(x_node.name)
73 | y = float(y_node.name)
74 | z = float(z_node.name)
75 | evl.gaze_at_point(x, y, z)
76 | return TruthValue(1, 1)
77 |
78 | def look_at_point(x_node, y_node, z_node):
79 | x = float(x_node.name)
80 | y = float(y_node.name)
81 | z = float(z_node.name)
82 | evl.look_at_point(x, y, z)
83 | return TruthValue(1, 1)
84 |
85 | def do_face_expression(face_expression_node, duration_node, intensity_node):
86 | face_expression = face_expression_node.name
87 | intensity = float(intensity_node.name)
88 | duration = float(duration_node.name)
89 | print("Python facial expression: ", face_expression, " for ",
90 | duration, " int ", intensity)
91 | evl.expression(face_expression, intensity, duration)
92 | return TruthValue(1, 1)
93 |
94 | def do_gesture(gesture_node, intensity_node, repeat_node, speed_node):
95 | gesture = gesture_node.name
96 | intensity = float(intensity_node.name)
97 | repeat = float(repeat_node.name)
98 | speed = float(speed_node.name)
99 | print("Python gesture: ", gesture, ", int: ", intensity,
100 | ", rep: ", repeat, ", speed: ", speed)
101 | evl.gesture(gesture, intensity, repeat, speed)
102 | return TruthValue(1, 1)
103 |
104 | def publish_behavior(event_node):
105 | print ("(Behavior event:", event_node.name, ")")
106 | evl.publish_behavior(event_node.name)
107 | return TruthValue(1, 1)
108 |
109 | def explore_saccade():
110 | print "Python: Explore Saccade"
111 | evl.explore_saccade()
112 | return TruthValue(1, 1)
113 |
114 | def conversational_saccade():
115 | print "Python: Conversational Saccade"
116 | evl.conversational_saccade()
117 | return TruthValue(1, 1)
118 |
119 | def listening_saccade():
120 | print "Python: Listening Saccade"
121 | evl.listening_saccade()
122 | return TruthValue(1, 1)
123 |
124 | def blink_rate(mean_node, var_node):
125 | mean = float(mean_node.name)
126 | var = float(var_node.name)
127 | print "Python: blink-rate: ", mean, " variation ", var
128 | evl.blink_rate(mean, var)
129 | return TruthValue(1, 1)
130 |
131 | def say_text(text_node):
132 | text = text_node.name
133 | evl.say_text(text)
134 | return TruthValue(1, 1)
135 |
136 | # Return true as long as ROS is running.
137 | def ros_is_running():
138 | if (rospy.is_shutdown()):
139 | return TruthValue(0, 1)
140 | return TruthValue(1, 1)
141 |
--------------------------------------------------------------------------------
/movement/do-movement.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; do-movement.scm
3 | ;
4 | ; Implement the movement API for ROS/blender animations.
5 | ;
6 |
7 | ; Delete the current definition, if any.
8 | (define (delete-definition STR)
9 | (define dfn
10 | (cog-get-link 'DefineLink 'DefinedPredicateNode
11 | (DefinedPredicate STR)))
12 |
13 | (if (not (null? dfn)) (cog-delete (car dfn)) #f))
14 |
15 | ; -------------------------------------------------------------
16 | ; Request a display of a facial expression (smile, frown, etc.)
17 | ; The expression name should be one of the supported blender animations.
18 | ;
19 | ; Example usage:
20 | ; (cog-evaluate! (Put (DefinedPredicate "Show facial expression")
21 | ; (ListLink (Concept "happy") (Number 6) (Number 0.6))))
22 | ;
23 | (delete-definition "Do show facial expression")
24 | (DefineLink
25 | (DefinedPredicate "Do show facial expression")
26 | (LambdaLink
27 | (VariableList
28 | (Variable "$expr")
29 | (Variable "$duration")
30 | (Variable "$intensity"))
31 | (SequentialAndLink
32 | (EvaluationLink (GroundedPredicate "py:do_face_expression")
33 | (ListLink
34 | (Variable "$expr")
35 | (Variable "$duration")
36 | (Variable "$intensity")))
37 | )))
38 |
39 | ; -------------------------------------------------------------
40 | ; Request a display of a facial gesture (blink, nod, etc.)
41 | ; The expression name should be one of the supported blender animations.
42 | ;
43 | ; Example usage:
44 | ; (cog-evaluate! (Put (DefinedPredicate "Show gesture")
45 | ; (ListLink (Concept "blink") (Number 0.8) (Number 3) (Number 1))))
46 | ;
47 | (delete-definition "Do show gesture")
48 | (DefineLink
49 | (DefinedPredicate "Do show gesture")
50 | (LambdaLink
51 | (VariableList
52 | (Variable "$gest")
53 | (Variable "$insensity")
54 | (Variable "$repeat")
55 | (Variable "$speed"))
56 | (SequentialAndLink
57 | ;; Send it off to ROS to actually do it.
58 | (EvaluationLink (GroundedPredicate "py:do_gesture")
59 | (ListLink
60 | (Variable "$gest")
61 | (Variable "$insensity")
62 | (Variable "$repeat")
63 | (Variable "$speed")))
64 | )))
65 |
66 | ; -------------------------------------------------------------
67 | ; Eye-saccade control.
68 | ; (cog-evaluate! (Put (DefinedPredicate "Say") (Node "this is a test"))))
69 |
70 | (delete-definition "Conversational Saccade")
71 | (delete-definition "Listening Saccade")
72 | (delete-definition "Explore Saccade")
73 |
74 | (DefineLink
75 | (DefinedPredicate "Conversational Saccade")
76 | (LambdaLink
77 | (Evaluation
78 | (GroundedPredicate "py: conversational_saccade")
79 | (List))
80 | ))
81 |
82 | (DefineLink
83 | (DefinedPredicate "Listening Saccade")
84 | (LambdaLink
85 | (Evaluation
86 | (GroundedPredicate "py: listening_saccade")
87 | (List))
88 | ))
89 |
90 | (DefineLink
91 | (DefinedPredicate "Explore Saccade")
92 | (LambdaLink
93 | (Evaluation
94 | (GroundedPredicate "py: explore_saccade")
95 | (List))
96 | ))
97 |
98 | ; -------------------------------------------------------------
99 | ; Control the blink rate of the robot.
100 |
101 | (delete-definition "Blink rate")
102 | (DefineLink
103 | (DefinedPredicate "Blink rate")
104 | (LambdaLink
105 | (VariableList (Variable "$mean") (Variable "$var"))
106 | (SequentialAndLink
107 | ;; Send it off to ROS to actually do it.
108 | (EvaluationLink (GroundedPredicate "py: blink_rate")
109 | (ListLink (Variable "$mean") (Variable "$var")))
110 | )))
111 |
112 | ; -------------------------------------------------------------
113 | ; Request robot to look at a specific coordinate point.
114 | ; Currently, a very thin wrapper around py:look_at_point
115 |
116 | (delete-definition "Do look at point")
117 | (DefineLink
118 | (DefinedPredicate "Do look at point")
119 | (LambdaLink
120 | (VariableList (Variable "$x") (Variable "$y") (Variable "$z"))
121 | (SequentialAndLink
122 | ;; Send it off to ROS to actually do it.
123 | (EvaluationLink (GroundedPredicate "py:look_at_point")
124 | (ListLink (Variable "$x") (Variable "$y") (Variable "$z")))
125 | )))
126 |
127 | ;---------------------------------------------------------------
128 |
129 | ; Request robot to turn eyes at a specific coordinate point.
130 | ; Currently, a very thin wrapper around py:gaze_at_point
131 |
132 | (delete-definition "Do gaze at point")
133 | (DefineLink
134 | (DefinedPredicate "Do gaze at point")
135 | (LambdaLink
136 | (VariableList (Variable "$x") (Variable "$y") (Variable "$z"))
137 | (SequentialAndLink
138 | ;; Log the time.
139 | ; (True (DefinedSchema "set gesture timestamp"))
140 | ;; Send it off to ROS to actually do it.
141 | (EvaluationLink (GroundedPredicate "py:gaze_at_point")
142 | (ListLink (Variable "$x") (Variable "$y") (Variable "$z")))
143 | )))
144 |
145 | ; -------------------------------------------------------------
146 | ; Publish the current behavior.
147 | ; Cheap hack to allow external ROS nodes to know what we are doing.
148 | ; The string name of the node is sent directly as a ROS String message
149 | ; to the "robot_behavior" topic.
150 | ;
151 | ; Example usage:
152 | ; (cog-evaluate! (Put (DefinedPredicate "Publish behavior")
153 | ; (ListLink (Concept "foobar joke"))))
154 | ;
155 | (delete-definition "Publish behavior")
156 | (DefineLink
157 | (DefinedPredicate "Publish behavior")
158 | (LambdaLink
159 | (VariableList (Variable "$bhv"))
160 | ;; Send it off to ROS to actually do it.
161 | (EvaluationLink (GroundedPredicate "py: publish_behavior")
162 | (ListLink (Variable "$bhv")))
163 | ))
164 |
165 | ; -------------------------------------------------------------
166 |
167 | ; Call once, to fall asleep.
168 | (delete-definition "Do go to sleep")
169 | (DefineLink
170 | (DefinedPredicate "Do go to sleep")
171 | ; Play the go-to-sleep animation.
172 | (Evaluation (GroundedPredicate "py:do_go_sleep") (ListLink))
173 | )
174 |
175 | ; Wake-up sequence
176 | (delete-definition "Do wake up")
177 | (DefineLink
178 | (DefinedPredicate "Do wake up")
179 |
180 | ; Run the wake animation.
181 | (Evaluation (GroundedPredicate "py:do_wake_up") (ListLink))
182 | )
183 |
184 | ; -------------------------------------------------------------
185 | ; Say something. To test run,
186 | ; (cog-evaluate! (Put (DefinedPredicate "Say") (Node "this is a test"))))
187 | (delete-definition "Say")
188 | (DefineLink
189 | (DefinedPredicate "Say")
190 | (LambdaLink (Variable "sentence")
191 | (Evaluation
192 | (GroundedPredicate "py: say_text")
193 | (List (Variable "sentence")))
194 | ))
195 |
196 | ; -------------------------------------------------------------
197 | ; Return true if ROS is still running.
198 | (delete-definition "ROS is running?")
199 | (DefineLink
200 | (DefinedPredicate "ROS is running?")
201 | (Evaluation
202 | (GroundedPredicate "py:ros_is_running") (ListLink)))
203 |
204 | ; -------------------------------------------------------------
205 | *unspecified* ; Make the load be silent
206 |
--------------------------------------------------------------------------------
/movement/movement.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; ROS robot movement module.
3 | ;
4 | ; Provides interfaces for making physical movements, and
5 | ; for saying things.
6 |
7 | (define-module (opencog movement))
8 |
9 | (use-modules (opencog) (opencog atom-types) (opencog python))
10 |
11 | (load "movement/do-movement.scm")
12 |
13 | ; Try loading the python code from this directory; else got for the
14 | ; install directory. This assumes that the the current directory
15 | ; is in the python sys.path.
16 | ;
17 | ; If roscore is not running, then the load will hang. Thus, to avoid the
18 | ; hang, we test to see if we can talk to roscore. If we cannot, then load
19 | ; only the debug interfaces.
20 | ;
21 | ;
22 | (define-public (start-ros-movement-node)
23 | (python-eval "
24 | import os.path
25 | import sys
26 | import rosgraph
27 | # This hard-coded install path from CMakefile
28 | sys.path.append('/usr/local/share/opencog/python')
29 | try:
30 | # Throw an exception if roscore is not running.
31 | rosgraph.Master('/rostopic').getPid()
32 | if (os.path.isfile('atomic.py')):
33 | # Python3 does not support execfile any longer
34 | # execfile('atomic.py'))
35 | exec(open('atomic.py').read())
36 | else:
37 | # execfile('/usr/local/share/opencog/python/atomic.py')
38 | exec(open('/usr/local/share/opencog/python/atomic.py').read())
39 | ros_is_running()
40 | print 'Loaded the OpenCog ROS Movement API'
41 | except:
42 | # execfile('/usr/local/share/opencog/python/atomic-dbg.py')
43 | exec(open('/usr/local/share/opencog/python/atomic-dbg.py').read())
44 | print 'Loaded the OpenCog Movement Debug API'
45 | "))
46 |
--------------------------------------------------------------------------------
/movement/ros_commo.py:
--------------------------------------------------------------------------------
1 | #
2 | # ros_commo.py - ROS messaging module for OpenCog behaviors.
3 | # Copyright (C) 2015 Hanson Robotics
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU Affero General Public License v3 as
7 | # published by the Free Software Foundation and including the exceptions
8 | # at http://opencog.org/wiki/Licenses
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Affero General Public License
16 | # along with this program; if not, write to:
17 | # Free Software Foundation, Inc.,
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 |
20 | import rosmsg
21 | import rospy
22 | import roslib
23 | import time
24 | import logging
25 | import random
26 | import tf
27 | import numpy
28 | # Eva ROS message imports
29 | from std_msgs.msg import String, Int32
30 | from blender_api_msgs.msg import AvailableEmotionStates, AvailableGestures
31 | from blender_api_msgs.msg import EmotionState
32 | from blender_api_msgs.msg import SetGesture
33 | from blender_api_msgs.msg import Target
34 | from blender_api_msgs.msg import BlinkCycle
35 | from blender_api_msgs.msg import SaccadeCycle
36 | from blender_api_msgs.msg import SomaState
37 | # from chatbot.msg import ChatMessage
38 | # from msg import ChatMessage
39 | from hr_msgs.msg import ChatMessage
40 |
41 | logger = logging.getLogger('hr.OpenCog_Eva')
42 |
43 | # ROS interfaces for the Atomese (OpenCog) Behavior Tree. Publishes
44 | # ROS messages for animation control (smiling, frowning), and subscribes
45 | # to STT/TTS and chatbot messages.
46 | #
47 | # This is meant to be a convenience wrapper, allowing Eva to be
48 | # controlled from OpenCog Atomese. Although it probably works as
49 | # a stand-alone ROS node, it was not designed to be used that way.
50 | # In particular, the python interpreter built into the atomspace
51 | # will be runnig this code.
52 | #
53 | # It currently handles both control messages (publishing of expression
54 | # and gesture animations), as well as some sensory input (mostly
55 | # STT, TTS and chatbot interactions). Visual servoing for face tracking
56 | # is done by a stand-alone ROS node, in the face_tracker directory.
57 | #
58 | # This does listen to several topics that are used to turn behaviors on
59 | # and off:
60 | #
61 | # `/behavior_switch`, which is used to start and stop the behavior tree.
62 | #
63 | # `/behavior_control`, which is used to enable/disable the publication
64 | # of classes of expression/gesture messages.
65 | #
66 | class EvaControl():
67 |
68 | # Control bitflags. Bit-wise anded with control_mode. If the bit
69 | # is set, then the corresponding ROS message is emitted, else it
70 | # is not.
71 | C_EXPRESSION = 1
72 | C_GESTURE = 2
73 | C_SOMA = 4
74 | C_SACCADE = 8
75 | C_EYES = 16
76 | C_FACE = 32
77 |
78 | def step(self):
79 | print "step once"
80 | return not rospy.is_shutdown()
81 | # Temporary disable sleeping.
82 | def go_sleep(self):
83 | # Vytas altered this in commit
84 | # 67ba02f75c5f82f4abb3e600711c97f65f007534
85 | # presumably because of conflicts with the current blender model!?
86 | # Or perhaps the behavior tree is sleeping too often?
87 | # self.soma_state('sleep', 1, 1, 3)
88 | # self.soma_state('normal', 0, 1, 0)
89 | self.soma_state('normal', 0.1, 1, 3)
90 |
91 | def wake_up(self):
92 | # self.soma_state('sleep', 0, 1, 0)
93 | self.soma_state('normal', 0.1, 1, 3)
94 |
95 | # ----------------------------------------------------------
96 | # Wrapper for facial expressions
97 | def expression(self, name, intensity, duration):
98 | if 'noop' == name or (not self.control_mode & self.C_EXPRESSION):
99 | return
100 | # Create the message
101 | exp = EmotionState()
102 | exp.name = name
103 | exp.magnitude = intensity
104 | exp.duration.secs = int(duration)
105 | exp.duration.nsecs = 1000000000 * (duration - int(duration))
106 | self.expression_pub.publish(exp)
107 | print "Publish facial expression:", exp.name
108 |
109 | # Wrapper for Soma state expressions
110 | def soma_state(self, name, intensity, rate, ease_in=0.0):
111 | if 'noop' == name or (not self.control_mode & self.C_SOMA):
112 | return
113 | # Create the message
114 | soma = SomaState()
115 | soma.name = name
116 | soma.magnitude = intensity
117 | soma.rate = rate
118 | soma.ease_in.secs = int(ease_in)
119 | soma.ease_in.nsecs = 1000000000 * (ease_in - int(ease_in))
120 | self.soma_pub.publish(soma)
121 | print "Publish soma state:", soma.name, "intensity:", intensity
122 |
123 | # Wrapper for gestures
124 | def gesture(self, name, intensity, repeat, speed):
125 | if 'noop' == name or (not self.control_mode & self.C_GESTURE):
126 | return
127 | # Create the message
128 | ges = SetGesture()
129 | ges.name = name
130 | ges.magnitude = intensity
131 | ges.repeat = repeat
132 | ges.speed = speed
133 | self.gesture_pub.publish(ges)
134 | print "Published gesture: ", ges.name
135 |
136 | # ----------------------------------------------------------
137 | # Look at, gaze at, glance at face id's
138 | # Look_at turns entire head in that direction, once.
139 | # Gaze_at has the eyes track the face location (servoing)
140 | # Glance_t is a momentary eye movement towards the face target.
141 |
142 | def look_at(self, face_id):
143 | # Can get called 10x/second, don't print.
144 | # print "----- Looking at face: " + str(face_id)
145 | if not self.control_mode & self.C_EYES:
146 | return
147 | self.look_at_pub.publish(face_id)
148 |
149 | def gaze_at(self, face_id):
150 | print "----- Gazing at face: " + str(face_id)
151 | self.gaze_at_pub.publish(face_id)
152 |
153 | def glance_at(self, face_id):
154 | print "----- Glancing at face: " + str(face_id)
155 | self.glance_at_pub.publish(face_id)
156 |
157 | # ----------------------------------------------------------
158 | # Explicit directional look-at, gaze-at locations
159 |
160 | # Turn only the eyes towards the given target point.
161 | # Coordinates: meters; x==forward, y==to Eva's left.
162 | def gaze_at_point(self, x, y, z):
163 | xyz1 = numpy.array([x,y,z,1.0])
164 | xyz = numpy.dot(self.conv_mat, xyz1)
165 | trg = Target()
166 | trg.x = xyz[0]
167 | trg.y = xyz[1]
168 | trg.z = xyz[2]
169 | # print "gaze at point: ", trg.x, trg.y, trg.z
170 | self.gaze_pub.publish(trg)
171 |
172 | # Turn head towards the given target point.
173 | # Coordinates: meters; x==forward, y==to Eva's left.
174 | def look_at_point(self, x, y, z):
175 | xyz1 = numpy.array([x, y, z, 1.0])
176 | xyz = numpy.dot(self.conv_mat, xyz1)
177 | trg = Target()
178 | trg.x = xyz[0]
179 | trg.y = xyz[1]
180 | trg.z = xyz[2]
181 | # print "look at point: ", trg.x, trg.y, trg.z
182 | self.turn_pub.publish(trg)
183 |
184 | # ----------------------------------------------------------
185 |
186 | # Tell the world what we are up to. This is so that other
187 | # subsystems can listen in on what we are doing.
188 | # XXX FIXME ... remove this?? Kino wanted this for his stuff,
189 | # but I don't think it's used anywhere.
190 | def publish_behavior(self, event):
191 | print "----- Behavior pub: " + event
192 | self.behavior_pub.publish(event)
193 |
194 | # ----------------------------------------------------------
195 |
196 | # Tell the TTS subsystem to vocalize a plain text-string
197 | def say_text(self, text_to_say):
198 | rospy.logwarn('publishing text to TTS ' + text_to_say)
199 | self.tts_pub.publish(text_to_say)
200 |
201 | # ----------------------------------------------------------
202 | # Wrapper for saccade generator.
203 |
204 | # Explore-the-room saccade when not conversing.
205 | # ??? Is this exploring the room, or someone's face? I'm confused.
206 | def explore_saccade(self):
207 | if not self.control_mode & self.C_SACCADE:
208 | return
209 | # Switch to conversational (micro) saccade parameters
210 | msg = SaccadeCycle()
211 | msg.mean = 0.8 # saccade_explore_interval_mean
212 | msg.variation = 0.3 # saccade_explore_interval_var
213 | msg.paint_scale = 0.3 # saccade_explore_paint_scale
214 | # From study face, maybe better default should be defined for
215 | # explore
216 | msg.eye_size = 15 # saccade_study_face_eye_size
217 | msg.eye_distance = 100 # saccade_study_face_eye_distance
218 | msg.mouth_width = 90 # saccade_study_face_mouth_width
219 | msg.mouth_height = 27 # saccade_study_face_mouth_height
220 | msg.weight_eyes = 0.8 # saccade_study_face_weight_eyes
221 | msg.weight_mouth = 0.2 # saccade_study_face_weight_mouth
222 | self.saccade_pub.publish(msg)
223 |
224 | # Used during conversation to study face being looked at.
225 | def conversational_saccade(self):
226 | if not self.control_mode & self.C_SACCADE:
227 | return
228 | # Switch to conversational (micro) saccade parameters
229 | msg = SaccadeCycle()
230 | msg.mean = 0.8 # saccade_micro_interval_mean
231 | msg.variation = 0.5 # saccade_micro_interval_var
232 | msg.paint_scale = 0.3 # saccade_micro_paint_scale
233 | #
234 | msg.eye_size = 11.5 # saccade_study_face_eye_size
235 | msg.eye_distance = 100 # saccade_study_face_eye_distance
236 | msg.mouth_width = 90 # saccade_study_face_mouth_width
237 | msg.mouth_height = 5 # saccade_study_face_mouth_height
238 | msg.weight_eyes = 0.8 # saccade_study_face_weight_eyes
239 | msg.weight_mouth = 0.2 # saccade_study_face_weight_mouth
240 | self.saccade_pub.publish(msg)
241 |
242 | # Used during conversation to study face being looked at.
243 | def listening_saccade(self):
244 | if not self.control_mode & self.C_SACCADE:
245 | return
246 | # Switch to conversational (micro) saccade parameters
247 | msg = SaccadeCycle()
248 | msg.mean = 1 # saccade_micro_interval_mean
249 | msg.variation = 0.6 # saccade_micro_interval_var
250 | msg.paint_scale = 0.3 # saccade_micro_paint_scale
251 | #
252 | msg.eye_size = 11 # saccade_study_face_eye_size
253 | msg.eye_distance = 80 # saccade_study_face_eye_distance
254 | msg.mouth_width = 50 # saccade_study_face_mouth_width
255 | msg.mouth_height = 13.0 # saccade_study_face_mouth_height
256 | msg.weight_eyes = 0.8 # saccade_study_face_weight_eyes
257 | msg.weight_mouth = 0.2 # saccade_study_face_weight_mouth
258 | self.saccade_pub.publish(msg)
259 |
260 |
261 | # ----------------------------------------------------------
262 | # Wrapper for controlling the blink rate.
263 | def blink_rate(self, mean, variation):
264 | msg = BlinkCycle()
265 | msg.mean = mean
266 | msg.variation = variation
267 | self.blink_pub.publish(msg)
268 |
269 | # ----------------------------------------------------------
270 | # Autonomous behaviors. These by-pass opencog completely
271 | #
272 | # The chat_heard message is of type chatbot/ChatMessage
273 | # from chatbot.msg import ChatMessage
274 | #
275 | # Stops the robot from droning on and on.
276 | # XXX surely there is a better solution than this...
277 | def chat_perceived_text_cb(self, chat_heard):
278 | if 'shut up' in chat_heard.utterance.lower():
279 | self.tts_control_pub.publish("shutup")
280 | return
281 |
282 | # Chatbot requests blink.
283 | # XXX FIXME ... which chatbot? Who published this message?
284 | # How does this work? Where is the chatbot AIML/ChatScript
285 | # markup for this, exactly? This needs to be documented
286 | # or removed as being unused.
287 | def chatbot_blink_cb(self, blink):
288 |
289 | rospy.loginfo(blink.data + ' says blink')
290 | blink_probabilities = {
291 | 'chat_heard' : 0.4,
292 | 'chat_saying' : 0.7,
293 | 'tts_end' : 0.7 }
294 | # If we get a string not in the dictionary, return 1.0.
295 | blink_probability = blink_probabilities[blink.data]
296 | if random.random() < blink_probability:
297 | self.gesture('blink', 1.0, 1, 1.0)
298 |
299 | # ----------------------------------------------------------
300 | # Subscription callbacks
301 |
302 | # Get the list of available gestures.
303 | def get_gestures_cb(self, msg):
304 | print("Available Gestures:" + str(msg.data))
305 |
306 | # Get the list of available facial expressions.
307 | def get_expressions_cb(self, msg):
308 | print("Available Facial Expressions:" + str(msg.data))
309 |
310 | # Turn behaviors on and off.
311 | #
312 | # 'btree_on' and 'btree_off' data-strings shouldn't be used, as they are
313 | # meant for switching on and off non-opencog demos.
314 | def behavior_switch_callback(self, data):
315 | if data.data == "opencog_on":
316 | if not self.running:
317 | self.running = True
318 | if data.data == "opencog_off":
319 | if self.running:
320 | self.look_at(0)
321 | self.gaze_at(0)
322 | self.running = False
323 |
324 | # Data is a bit-flag that enables/disables publication of messages.
325 | def behavior_control_callback(self, data):
326 | self.control_mode = data.data
327 |
328 | def __init__(self):
329 | # Full control by default
330 | self.control_mode = 255
331 | self.running = True
332 |
333 | # The below will hang until roscore is started!
334 | rospy.init_node("OpenCog_Eva")
335 | print("Starting OpenCog Behavior Node")
336 |
337 | # ----------------
338 | # Obtain the blender-to-camera coordinate-frame conversion
339 | # matrix. XXX FIXME This is some insane hack that does not
340 | # make sense. All 3D coordinates are supposed to be in
341 | # head-centered coordinates, bot for the sensory subsystem,
342 | # and also for the look-at subsystem. However, someone
343 | # screwed something up somewhere, and now we hack around
344 | # it here. XXX This is really bad spaghetti-code programming.
345 | # Fuck.
346 | lstn = tf.TransformListener()
347 | try:
348 | print("Waiting for the camera-blender transform\n")
349 | lstn.waitForTransform('camera', 'blender',
350 | rospy.Time(0), rospy.Duration(10.0)) # world
351 | except Exception:
352 | print("No camera transforms!\n")
353 | exit(1)
354 |
355 | print("Got the camera-blender transform\n")
356 | (trans,rot) = lstn.lookupTransform(
357 | 'blender' , 'camera', rospy.Time(0))
358 | a = tf.listener.TransformerROS()
359 | self.conv_mat = a.fromTranslationRotation(trans,rot)
360 |
361 | # ----------------
362 | # Get the available facial animations
363 | rospy.Subscriber("/blender_api/available_emotion_states",
364 | AvailableEmotionStates, self.get_expressions_cb)
365 |
366 | rospy.Subscriber("/blender_api/available_gestures",
367 | AvailableGestures, self.get_gestures_cb)
368 |
369 | # Send out facial expressions and gestures.
370 | self.expression_pub = rospy.Publisher("/blender_api/set_emotion_state",
371 | EmotionState, queue_size=1)
372 | self.gesture_pub = rospy.Publisher("/blender_api/set_gesture",
373 | SetGesture, queue_size=1)
374 | self.soma_pub = rospy.Publisher("/blender_api/set_soma_state",
375 | SomaState, queue_size=2)
376 | self.blink_pub = rospy.Publisher("/blender_api/set_blink_randomly",
377 | BlinkCycle, queue_size=1)
378 | self.saccade_pub = rospy.Publisher("/blender_api/set_saccade",
379 | SaccadeCycle, queue_size=1)
380 |
381 | # ----------------
382 | # XYZ coordinates of where to turn and look.
383 | self.turn_pub = rospy.Publisher("/blender_api/set_face_target",
384 | Target, queue_size=1)
385 |
386 | self.gaze_pub = rospy.Publisher("/blender_api/set_gaze_target",
387 | Target, queue_size=1)
388 |
389 | # Int32 faceid of the face to glence at or turn and face.
390 | self.glance_at_pub = rospy.Publisher("/opencog/glance_at",
391 | Int32, queue_size=1)
392 |
393 | self.look_at_pub = rospy.Publisher("/opencog/look_at",
394 | Int32, queue_size=1)
395 |
396 | self.gaze_at_pub = rospy.Publisher("/opencog/gaze_at",
397 | Int32, queue_size=1)
398 |
399 | # ----------------
400 | rospy.logwarn("setting up chatbot affect perceive and express links")
401 |
402 | # Publish cues to the chatbot, letting it know what we are doing.
403 | self.behavior_pub = rospy.Publisher("robot_behavior",
404 | String, queue_size=1)
405 |
406 | # Tell the TTS subsystem what to vocalize
407 | # self.tts_pub = rospy.Publisher("tts", String, queue_size=1)
408 | # self.tts_pub = rospy.Publisher("/robot/chatbot_responses", String, queue_size=1)
409 | self.tts_pub = rospy.Publisher("chatbot_responses", String, queue_size=1)
410 |
411 | # Tell the chatbot what sort of affect to apply during
412 | # TTS vocalization. (Huhh???) XXX this needs documentation.
413 | self.affect_pub = rospy.Publisher("chatbot_affect_express",
414 | String, queue_size=1)
415 |
416 | # Used to stop the vocalization.
417 | self.tts_control_pub = rospy.Publisher("tts_control",
418 | String, queue_size=1)
419 |
420 | # ----------------
421 | # Subscriptions needed for autonomous behaviors.
422 | # XXX FIXME both of these should probably be removed.
423 | # String text of what the robot heard (from TTS)
424 | rospy.Subscriber("chatbot_speech", ChatMessage,
425 | self.chat_perceived_text_cb)
426 |
427 | # Chatbot can request blinks correlated with hearing and speaking.
428 | rospy.Subscriber("chatbot_blink", String, self.chatbot_blink_cb)
429 |
430 | # ----------------
431 | # Boolean flag, turn the behavior tree on and off (set it running,
432 | # or stop it)
433 | rospy.Subscriber("/behavior_switch", String, \
434 | self.behavior_switch_callback)
435 |
436 | # Bit-flag to enable/disable publication of various classes of
437 | # expressions and gestures.
438 | rospy.Subscriber("/behavior_control", Int32, \
439 | self.behavior_control_callback)
440 |
441 | # ----------------------------------------------------------------
442 |
--------------------------------------------------------------------------------
/notes/README.md:
--------------------------------------------------------------------------------
1 |
2 | Notes
3 | =====
4 |
5 | The files in this directory are non-essential to the system; they're
6 | just some random sketches, ideas and notes.
7 |
8 | * `btree-demo.scm`: a super-simplified example of a behavior tree,
9 | implemented
10 | * `general_behavior.py`: a copy of the original Owyl behavior tree.
11 | * `behavior.cfg`: a copy of the original Owyl config file.
12 | * `universal-fsm.scm`: a finite state machine
13 |
14 |
15 | Some random notes about OpenPsi
16 | -------------------------------
17 |
18 | Demands
19 | * Entertain people
20 | * Make people smile
21 | * Crack jokes
22 | * Answer questions
23 | * Recognize people
24 | * Greet people
25 | * Inquire about their well-being
26 |
27 | See Psi Theory http://en.wikipedia.org/wiki/Psi-Theory
28 | See also Amen's implementation.
29 |
30 | GUI Requirements
31 | ----------------
32 | A GUI is needed to edit the behavior tree. Some notes about the
33 | requirements immediately below.
34 |
35 | Goal: Create a Web UI that will allow theatrical performance artists
36 | to script theatrical performances for the Eva head. An example of a
37 | simple theatrical performance might be ''When a someone enters the room,
38 | briefly glance their way, give a faint smile, then resume with the
39 | previous activity.''
40 |
41 | The output of the script should be sequences of ROS messages that
42 | control the Eva head (turn, look, smile), as well as the timing, pitch
43 | and modulation of speech.
44 |
45 | Creating the above might be a relatively straight-forward programming
46 | task, were it not for one additional requirement: the scripts should be
47 | executable within the OpenCog AtomSpace framework. This is because we
48 | want, as a long-range goal, for OpenCog to be able to control the robot
49 | behavior itself, starting with more basic Psi-Theory urges and desires,
50 | rather than having Eva be an animated puppet.
51 |
52 | This means that the authoring tool needs to be able to work with not
53 | just some convenient scripting language, but it should work very
54 | explicitly with a certain OpenCog-based scripting language.
55 |
56 | Visual Programming interfaces
57 | -----------------------------
58 | Visual programming Language
59 | http://en.wikipedia.org/wiki/Visual_programming_language
60 |
61 | Some possible visual programming tools:
62 |
63 | ===Yes
64 | * JS bootstrap + JS Angular + stuff Vytas recommended
65 |
66 | ===Maybe
67 | * statecharts.org (Yakindu)
68 | * Snap!
69 | -- Pluses: web-based, AGPL
70 | -- Minuses: too focused on programming theory
71 | * Scratch
72 | * EToys -- based on squeak/smalltalk
73 | * Python Notebooks?
74 |
75 | ===No
76 | * Alice (for teaching programming)
77 | * AgentSheets (proprietary)
78 | * Flowgarithm (too low level)
79 | * Hopscotch (proprietary)
80 | * LARP (flowcharts, too low-level)
81 | * Raptor (flowchart-based, too low-level, non-web)
82 | * Visual Logic (proprietary, flowchart, non-web)
83 | * ToonTalk (aimed at children, no web UI)
84 | * StarLog (programming language, not visual)
85 | * Cameleon (programming language, not visual)
86 |
87 |
88 | Chat interfaces:
89 | ----------------
90 | -- gtalk uses XMPP
91 | gtalk is going away
92 |
93 | -- gplus hangouts is the new thing.
94 | * protocol is proprietary.
95 | * API is here: https://developers.google.com/+/hangouts/api/
96 | The API sucks. It assumes ... all the wrong things. WTF, google.
97 | * https://github.com/tdryer/hangups/ but it seems to be chat only?
98 | * hangouts seems to use, maybe, use webRTC
99 |
100 | -- blender: preferences->system
101 | frame server port 8080
102 | I can't figure out how to start the frame server.
103 |
104 | -- screen-grab:
105 | * VLC is supposed to be able to do this, but its
106 | slow/compute-intensive
107 |
108 | * http://www.maartenbaert.be/simplescreenrecorder/
109 | * ffmpeg/libav can do live-streaming
110 | http://www.maartenbaert.be/simplescreenrecorder/live-streaming/
111 | * istanbul has a pipeline: istximagesrc name=videosource display-name=:0.0 screen-num=0 use-damage=false show-pointer=false ! video/x-raw-rgb,framerate=10/1 ! videorate ! ffmpegcolorspace ! videoscale method=1 ! video/x-raw-yuv,width=1020,height=480,framerate=10/1 ! theoraenc ! oggmux name=mux ! filesink location=/tmp/tmprSI2tO
112 | * Just need to replace the filesink in last step with a pipe or
113 | char device ...
114 |
115 | -- WebRTC
116 | * Complete demo here:
117 | https://bitbucket.org/webrtc/codelab
118 | http://www.html5rocks.com/en/tutorials/webrtc/basics/
119 | * screensharing w/ webrtc
120 |
121 |
122 | Resources:
123 | ----------
124 | * The current Hanson robotics Web motors user interface:
125 | https://github.com/hansonrobotics/ros_motors_webui
126 |
--------------------------------------------------------------------------------
/notes/behavior.cfg:
--------------------------------------------------------------------------------
1 | [emotion]
2 | default_emotion = happy
3 | default_emotion_intensity = 0.5
4 | default_emotion_duration = 1.0
5 |
6 | emotion_scale_stage = 2.0
7 | emotion_scale_closeup = 1
8 |
9 | new_arrival_emotions = surprised
10 | new_arrival_emotions_probabilities = 1.0
11 | new_arrival_emotions_intensity_min = 0.2
12 | new_arrival_emotions_intensity_max = 0.4
13 | new_arrival_emotions_duration_min = 10
14 | new_arrival_emotions_duration_max = 15
15 |
16 | frustrated_emotions = sad, confused, recoil, surprised
17 | frustrated_emotions_probabilities = 0.4, 0.4, 0.1, 0.1
18 | frustrated_emotions_intensity_min = 0.6, 0.6, 0.1, 0.1
19 | frustrated_emotions_intensity_max = 0.8, 0.8, 0.2, 0.2
20 | frustrated_emotions_duration_min = 5, 5, 5, 5
21 | frustrated_emotions_duration_max = 15, 15, 15, 15
22 |
23 | positive_emotions = happy, comprehending, engaged
24 | positive_emotions_probabilities = 0.4, 0.2, 0.2
25 | positive_emotions_intensity_min = 0.6, 0.5, 0.5
26 | positive_emotions_intensity_max = 0.8, 0.8, 0.8
27 | positive_emotions_duration_min = 10, 10, 10
28 | positive_emotions_duration_max = 15, 15, 15
29 |
30 | non_positive_emotion = surprised
31 | non_positive_emotion_probabilities = 0.1
32 | non_positive_emotion_intensity_min = 0.6
33 | non_positive_emotion_intensity_max = 0.9
34 | non_positive_emotion_duration_min = 0.2
35 | non_positive_emotion_duration_max = 0.4
36 |
37 | bored_emotions = bored, sad, happy
38 | bored_emotions_probabilities = 0.7, 0.1, 0.2
39 | bored_emotions_intensity_min = 0.4, 0.1, 0.1
40 | bored_emotions_intensity_max = 0.7, 0.3, 0.3
41 | bored_emotions_duration_min = 10, 10, 10
42 | bored_emotions_duration_max = 15, 15, 15
43 |
44 | non_bored_emotion = surprised
45 | non_bored_emotion_probabilities = 0.2
46 | non_bored_emotion_intensity_min = 0.4
47 | non_bored_emotion_intensity_max = 0.7
48 | non_bored_emotion_duration_min = 0.2
49 | non_bored_emotion_duration_max = 0.4
50 |
51 | sleep_emotions = happy
52 | sleep_emotions_probabilities = 1.0
53 | sleep_emotions_intensity_min = 0.0
54 | sleep_emotions_intensity_max = 0.1
55 | sleep_emotions_duration_min = 5
56 | sleep_emotions_duration_max = 15
57 |
58 | non_sleep_emotion = non_sleep
59 | non_sleep_emotion_probabilities = 0.2
60 | non_sleep_emotion_intensity_min = 0.6
61 | non_sleep_emotion_intensity_max = 0.9
62 | non_sleep_emotion_duration_min = 0.2
63 | non_sleep_emotion_duration_max = 0.3
64 |
65 | wake_up_emotions = surprised, happy, irritated
66 | wake_up_emotions_probabilities = 0.45, 0.45, 0.1
67 | wake_up_emotions_intensity_min = 0.2, 0.5, 0.1
68 | wake_up_emotions_intensity_max = 0.6, 0.7, 0.4
69 | wake_up_emotions_duration_min = 5, 5, 1
70 | wake_up_emotions_duration_max = 15, 15, 4
71 |
72 | [gesture]
73 | gesture_scale_stage = 2.0
74 | gesture_scale_closeup = 1
75 |
76 | positive_gestures = nod-1, nod-2
77 | positive_gestures_probabilities = 0.1, 0.1
78 | positive_gestures_intensity_min = 0.6, 0.2
79 | positive_gestures_intensity_max = 0.9, 0.4
80 | positive_gestures_repeat_min = 1.0, 1.0
81 | positive_gestures_repeat_max = 1.0, 1.0
82 | positive_gestures_speed_min = 0.5, 0.8
83 | positive_gestures_speed_max = 0.8, 0.9
84 |
85 | bored_gestures = yawn-1
86 | bored_gestures_probabilities = 0.001
87 | bored_gestures_intensity_min = 0.6
88 | bored_gestures_intensity_max = 0.9
89 | bored_gestures_repeat_min = 1.0
90 | bored_gestures_repeat_max = 1.0
91 | bored_gestures_speed_min = 1
92 | bored_gestures_speed_max = 1
93 |
94 | sleep_gestures = blink-sleepy
95 | sleep_gestures_probabilities = 1.0
96 | sleep_gestures_intensity_min = 0.7
97 | sleep_gestures_intensity_max = 1.0
98 | sleep_gestures_repeat_min = 1.0
99 | sleep_gestures_repeat_max = 1.0
100 | sleep_gestures_speed_min = 1.0
101 | sleep_gestures_speed_max = 1.0
102 |
103 | wake_up_gestures = shake-2, shake-3, blink
104 | wake_up_gestures_probabilities = 0.4, 0.3, 0.3
105 | wake_up_gestures_intensity_min = 0.7, 0.6, 0.8
106 | wake_up_gestures_intensity_max = 1.0, 1.0, 1.0
107 | wake_up_gestures_repeat_min = 1.0, 1.0, 2.0
108 | wake_up_gestures_repeat_max = 1.0, 1.0, 4.0
109 | wake_up_gestures_speed_min = 0.7, 0.7, 0.9
110 | wake_up_gestures_speed_max = 0.8, 0.8, 1.0
111 |
112 | [interaction]
113 | duration_min = 1
114 | duration_max = 3
115 | time_to_change_face_target_min = 8
116 | time_to_change_face_target_max = 10
117 | glance_probability = 0.7
118 | glance_probability_for_new_faces = 0.5
119 | glance_probability_for_lost_faces = 0.5
120 | max_glance_distance = 1.0
121 | z_pitch_eyes = 0.0
122 | face_study_probabilities = 0.2
123 | face_study_duration_min = 1
124 | face_study_duration_max = 2
125 | face_study_z_pitch_nose = -0.05
126 | face_study_z_pitch_mouth = -0.1
127 | face_study_y_pitch_left_ear = -0.1
128 | face_study_y_pitch_right_ear = 0.1
129 |
130 | [boredom]
131 | sleep_probability = 0.01
132 | sleep_duration_min = 2
133 | sleep_duration_max = 4
134 | search_for_attention_duration_min = 1
135 | search_for_attention_duration_max = 4
136 | search_for_attention_x = 1, 1, 1, 1, 1, 1, 1, 1
137 | search_for_attention_y = 0.1, -0.3, 0.2, -0.1, -0.4, -0.2, 0.05, 0.35
138 | search_for_attention_z = 0, 0, 0, 0, 0, 0, 0, 0
139 | wake_up_probability = 0.5
140 | time_to_wake_up = 5
141 |
--------------------------------------------------------------------------------
/notes/btree-demo.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; btree-demo.scm
3 | ;
4 | ; A super-simplified example of a behavior tree, implemented
5 | ; in the atomspace. This is built with the same general conceptual
6 | ; design as the "real" behavior tree.
7 | ;
8 |
9 | (add-to-load-path "/usr/local/share/opencog/scm")
10 |
11 | (use-modules (opencog))
12 | (use-modules (opencog query))
13 | (use-modules (opencog exec))
14 |
15 | (load-from-path "utilities.scm")
16 |
17 | ; ------------------------------------------------------
18 | ; Same as in eva-fsm.scm
19 |
20 | ; Is the room empty, or is someone in it?
21 | (define room-state (AnchorNode "Room State"))
22 | (define room-empty (ConceptNode "room empty"))
23 | (define room-nonempty (ConceptNode "room nonempty"))
24 |
25 | (define soma-state (AnchorNode "Soma State"))
26 | (define soma-sleeping (ConceptNode "Sleeping"))
27 |
28 | ;; Assume room empty at first
29 | (ListLink room-state room-empty)
30 | (ListLink soma-state soma-sleeping)
31 |
32 |
33 | (define (print-msg) (display "Triggered\n") (stv 1 1))
34 | (define (print-atom atom) (format #t "Triggered: ~a \n" atom) (stv 1 1))
35 | ; ------------------------------------------------------
36 | ;;
37 | ;; Determine if the atomspace contains the link
38 | ;; (ListLink (AnchorNode "Room State") (ConceptNode "room empty"))
39 | ;; If the atomspace does contain the above, then the print-msg
40 | ;; function is run.
41 | ; ------------------------------------------------------
42 | #|
43 | (define empty-seq
44 | (SatisfactionLink
45 | (SequentialAndLink
46 | (PresentLink (ListLink room-state (VariableNode "$x")))
47 | (EqualLink (VariableNode "$x") room-empty)
48 | (EvaluationLink
49 | (GroundedPredicateNode "scm: print-msg")
50 | (ListLink))
51 | )))
52 | |#
53 |
54 |
55 | (DefineLink
56 | (DefinedPredicateNode "is-empty")
57 | (EqualLink
58 | (SetLink room-empty)
59 | (GetLink (ListLink room-state (VariableNode "$x")))
60 | ))
61 |
62 | (DefineLink
63 | (DefinedPredicateNode "is-sleeping")
64 | (EqualLink
65 | (SetLink soma-sleeping)
66 | (GetLink (ListLink soma-state (VariableNode "$y")))
67 | ))
68 |
69 | (DefineLink
70 | (DefinedPredicateNode "Print Msg")
71 | (EvaluationLink
72 | (GroundedPredicateNode "scm: print-msg")
73 | (ListLink))
74 | )
75 |
76 | (define empty-seq
77 | (SatisfactionLink
78 | (SequentialAndLink
79 | (DefinedPredicateNode "is-empty")
80 | (DefinedPredicateNode "Print Msg")
81 | (DefinedPredicateNode "is-sleeping")
82 | (DefinedPredicateNode "Print Msg")
83 | )))
84 |
85 | (cog-satisfy empty-seq)
86 |
--------------------------------------------------------------------------------
/notes/eva-fsm.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; Experimental Eva Behavior Finite State Machine (FSM).
3 | ;
4 | ; A simple finite state machine for controlling Eva's behaviors
5 | ;
6 | (add-to-load-path "/usr/local/share/opencog/scm")
7 |
8 | (use-modules (opencog))
9 | (use-modules (opencog exec))
10 | (use-modules (opencog query))
11 |
12 | (load-from-path "utilities.scm")
13 | (load-from-path "faces.scm")
14 |
15 | (define eva-trans (ConceptNode "Eva Transition Rule"))
16 |
17 | ; What emotional state is eva currently displaying?
18 | (define eva-state (AnchorNode "Eva Current State"))
19 | (define eva-bored (ConceptNode "Eva bored"))
20 | (define eva-surprised (ConceptNode "Eva surprised"))
21 |
22 | ;; Eva's inital state of the FSM
23 | (ListLink eva-state eva-bored)
24 |
25 | ;; Misc state transistions.
26 | ;; if room empty and someone entered -> room non-empty
27 | ;; if eva-bored and someone entered -> eva surprised
28 |
29 | (define (ola) (display "ola\n"))
30 | (define (bye) (display "bye\n"))
31 |
32 | (ContextLink
33 | (AndLink eva-bored room-nonempty)
34 | (ListLink eva-trans eva-surprised)
35 | (ExecutionOutputLink
36 | (GroundedSchemaNode "scm: ola")
37 | (ListLink)
38 | )
39 | )
40 |
41 | (ContextLink
42 | (AndLink eva-surprised room-empty)
43 | (ListLink eva-trans eva-bored)
44 | (ExecutionOutputLink
45 | (GroundedSchemaNode "scm: bye")
46 | (ListLink)
47 | )
48 | )
49 |
50 | ;; Hack job
51 | (define wtf
52 | (let ((var-eva-state (VariableNode "$eva-state"))
53 | (var-eva-next-state (VariableNode "$eva-next-state"))
54 | (var-room-state (VariableNode "$eva-room-state"))
55 | (var-action (VariableNode "$eva-action")))
56 | (BindLink
57 | (VariableList
58 | var-eva-state var-eva-next-state var-room-state var-action
59 | )
60 | (AndLink
61 | ;; If Eva is in the current state ...
62 | (ListLink eva-state var-eva-state)
63 |
64 | ;; ...and the room is in state ...
65 | (ListLink room-state var-room-state)
66 |
67 | ;; ... and there is a transition ...
68 | (ContextLink
69 | (AndLink var-eva-state var-room-state)
70 | (ListLink eva-trans var-eva-next-state)
71 | var-action
72 | )
73 | )
74 | (AndLink
75 | ;; ... Then, leave the current state ...
76 | (DeleteLink (ListLink eva-state var-eva-state))
77 |
78 | ;; ... And transition to the new state ...
79 | (ListLink eva-state var-eva-next-state)
80 |
81 | ;; and perform the action
82 | var-action
83 | )
84 | )
85 | )
86 | )
87 |
88 |
89 | #|
90 | ;; Example usage ...
91 |
92 | (cog-bind chk-room-empty)
93 | (cog-bind chk-room-non-empty)
94 | (show-room-state)
95 |
96 | (cog-incoming-set (PredicateNode "visible face"))
97 |
98 |
99 | (cog-bind wtf)
100 |
101 | (define look-left
102 | (EvaluationLink
103 | (GroundedPredicateNode "py: do_look_left")
104 | (ListLink)))
105 | (define look-right
106 | (EvaluationLink
107 | (GroundedPredicateNode "py: do_look_right")
108 | (ListLink)))
109 | (cog-evaluate! look-left)
110 | (cog-evaluate! look-right)
111 |
112 |
113 | |#
114 |
115 | ;; ----
116 |
--------------------------------------------------------------------------------
/notes/general_behavior.py:
--------------------------------------------------------------------------------
1 | #
2 | # general_behavior.py - The primary Owyl behavior tree
3 | # Copyright (C) 2014 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 |
19 | # system imports
20 | import copy
21 | import os
22 | import random
23 | import time
24 |
25 | # tool imports
26 | import owyl
27 | from owyl import blackboard
28 | import rospy
29 | import roslib
30 | import ConfigParser
31 | import csv
32 |
33 | # message imports
34 | from std_msgs.msg import String
35 | from blender_api_msgs.msg import AvailableEmotionStates, AvailableGestures
36 | from blender_api_msgs.msg import EmotionState
37 | from blender_api_msgs.msg import SetGesture
38 | from blender_api_msgs.msg import Target
39 |
40 | # local stuff.
41 | from face_track import FaceTrack
42 |
43 | # Basic holder for emotion-expression properties and probabilities
44 | class Emotion:
45 | def __init__(self, name) :
46 | self.name = name
47 | self.probability = 0.0
48 | self.min_intensity = 0.0
49 | self.max_intensity = 1.0
50 | self.min_duration = 5.0
51 | self.max_duration = 15.0
52 |
53 | # Basic holder for gesture properties and probabilities
54 | class Gesture:
55 | def __init__(self, name):
56 | self.name = name
57 | self.probability = 0.0
58 | self.min_intensity = 0.0
59 | self.max_intensity = 1.0
60 | self.min_repeat = 0.0
61 | self.max_repeat = 1.0
62 | self.min_speed = 0.0
63 | self.max_speed = 1.0
64 |
65 | class Tree():
66 | # ---------
67 | # Config File utilities
68 | def unpack_config_emotions(self, config, emo_class) :
69 |
70 | def get_values(from_config, num_values):
71 | rtn_values = [float(z.strip()) for z in from_config.split(",")]
72 | if len(rtn_values) != num_values:
73 | raise Exception("List lengths don't match!")
74 | return rtn_values
75 |
76 | names = [x.strip() for x in config.get("emotion", emo_class).split(",")]
77 | numb = len(names)
78 |
79 | probs = get_values(config.get("emotion", \
80 | emo_class + "_probabilities"), numb)
81 | mins = get_values(config.get("emotion", \
82 | emo_class + "_intensity_min"), numb)
83 | maxs = get_values(config.get("emotion", \
84 | emo_class + "_intensity_max"), numb)
85 |
86 | dins = get_values(config.get("emotion", \
87 | emo_class + "_duration_min"), numb)
88 | daxs = get_values(config.get("emotion", \
89 | emo_class + "_duration_max"), numb)
90 |
91 | self.blackboard["emotion_classes"].append(emo_class)
92 |
93 | emos = []
94 | for (n,p,mi,mx,di,dx) in zip (names, probs, mins, maxs, dins, daxs):
95 | emo = Emotion(n)
96 | emo.probability = p
97 | emo.min_intensity = mi
98 | emo.max_intensity = mx
99 | emo.min_duration = di
100 | emo.max_duration = dx
101 | emos.append(emo)
102 |
103 | self.blackboard[emo_class] = emos
104 |
105 | def unpack_config_gestures(self, config, ges_class) :
106 |
107 | def get_values(from_config, num_values):
108 | rtn_values = [float(z.strip()) for z in from_config.split(",")]
109 | if len(rtn_values) != num_values:
110 | raise Exception("List lengths don't match!")
111 | return rtn_values
112 |
113 | names = [x.strip() for x in config.get("gesture", ges_class).split(",")]
114 | numb = len(names)
115 |
116 | probs = get_values(config.get("gesture", \
117 | ges_class + "_probabilities"), numb)
118 | mins = get_values(config.get("gesture", \
119 | ges_class + "_intensity_min"), numb)
120 | maxs = get_values(config.get("gesture", \
121 | ges_class + "_intensity_max"), numb)
122 |
123 | rins = get_values(config.get("gesture", \
124 | ges_class + "_repeat_min"), numb)
125 | raxs = get_values(config.get("gesture", \
126 | ges_class + "_repeat_max"), numb)
127 |
128 | sins = get_values(config.get("gesture", \
129 | ges_class + "_speed_min"), numb)
130 | saxs = get_values(config.get("gesture", \
131 | ges_class + "_speed_max"), numb)
132 |
133 | self.blackboard["gesture_classes"].append(ges_class)
134 |
135 | gestures = []
136 | for (n,p,mi,mx,ri,rx,si,sa) in zip (names, probs, mins, maxs, rins, raxs, sins, saxs):
137 | ges = Gesture(n)
138 | ges.probability = p
139 | ges.min_intensity = mi
140 | ges.max_intensity = mx
141 | ges.min_repeat = ri
142 | ges.max_repeat = rx
143 | ges.min_speed = si
144 | ges.max_speed = sa
145 | gestures.append(ges)
146 |
147 | self.blackboard[ges_class] = gestures
148 |
149 | def unpack_config_look_around(self, config):
150 | def get_values(from_config, num_values):
151 | rtn_values = [float(z.strip()) for z in from_config.split(",")]
152 | if len(rtn_values) != num_values:
153 | raise Exception("List lengths don't match!")
154 | return rtn_values
155 |
156 | x_coordinates = [float(x.strip()) for x in config.get("boredom", "search_for_attention_x").split(",")]
157 | numb = len(x_coordinates)
158 |
159 | y_coordinates = get_values(config.get("boredom", "search_for_attention_y"), numb)
160 | z_coordinates = get_values(config.get("boredom", "search_for_attention_z"), numb)
161 |
162 | for (x, y, z) in zip (x_coordinates, y_coordinates, z_coordinates):
163 | trg = Target()
164 | trg.x = x
165 | trg.y = y
166 | trg.z = z
167 | self.blackboard["search_for_attention_targets"].append(trg)
168 |
169 |
170 | def __init__(self):
171 |
172 | self.blackboard = blackboard.Blackboard("rig expressions")
173 |
174 | config = ConfigParser.ConfigParser()
175 | config.readfp(open(os.path.join(os.path.dirname(__file__), "../behavior.cfg")))
176 | self.blackboard["current_emotion"] = config.get("emotion", "default_emotion")
177 | self.blackboard["current_emotion_intensity"] = config.getfloat("emotion", "default_emotion_intensity")
178 | self.blackboard["current_emotion_duration"] = config.getfloat("emotion", "default_emotion_duration")
179 | self.blackboard["emotion_classes"] = []
180 | self.blackboard["gesture_classes"] = []
181 | self.blackboard["emotion_scale_stage"] = config.getfloat("emotion", "emotion_scale_stage")
182 | self.blackboard["emotion_scale_closeup"] = config.getfloat("emotion", "emotion_scale_closeup")
183 | self.blackboard["gesture_scale_stage"] = config.getfloat("gesture", "gesture_scale_stage")
184 | self.blackboard["gesture_scale_closeup"] = config.getfloat("gesture", "gesture_scale_closeup")
185 |
186 | self.unpack_config_emotions(config, "frustrated_emotions")
187 |
188 | self.unpack_config_emotions(config, "positive_emotions")
189 | self.unpack_config_emotions(config, "non_positive_emotion")
190 |
191 | self.unpack_config_emotions(config, "bored_emotions")
192 | self.unpack_config_emotions(config, "non_bored_emotion")
193 |
194 | self.unpack_config_emotions(config, "sleep_emotions")
195 | self.unpack_config_emotions(config, "non_sleep_emotion")
196 |
197 | self.unpack_config_emotions(config, "wake_up_emotions")
198 |
199 | self.unpack_config_emotions(config, "new_arrival_emotions")
200 |
201 | self.unpack_config_gestures(config, "positive_gestures")
202 |
203 | self.unpack_config_gestures(config, "bored_gestures")
204 |
205 | self.unpack_config_gestures(config, "sleep_gestures")
206 |
207 | self.unpack_config_gestures(config, "wake_up_gestures")
208 |
209 | self.blackboard["min_duration_for_interaction"] = config.getfloat("interaction", "duration_min")
210 | self.blackboard["max_duration_for_interaction"] = config.getfloat("interaction", "duration_max")
211 | self.blackboard["time_to_change_face_target_min"] = config.getfloat("interaction", "time_to_change_face_target_min")
212 | self.blackboard["time_to_change_face_target_max"] = config.getfloat("interaction", "time_to_change_face_target_max")
213 | self.blackboard["glance_probability"] = config.getfloat("interaction", "glance_probability")
214 | self.blackboard["glance_probability_for_new_faces"] = config.getfloat("interaction", "glance_probability_for_new_faces")
215 | self.blackboard["glance_probability_for_lost_faces"] = config.getfloat("interaction", "glance_probability_for_lost_faces")
216 | self.blackboard["z_pitch_eyes"] = config.getfloat("interaction", "z_pitch_eyes")
217 | self.blackboard["max_glance_distance"] = config.getfloat("interaction", "max_glance_distance")
218 | self.blackboard["face_study_probabilities"] = config.getfloat("interaction", "face_study_probabilities")
219 | self.blackboard["face_study_duration_min"] = config.getfloat("interaction", "face_study_duration_min")
220 | self.blackboard["face_study_duration_max"] = config.getfloat("interaction", "face_study_duration_max")
221 | self.blackboard["face_study_z_pitch_nose"] = config.getfloat("interaction", "face_study_z_pitch_nose")
222 | self.blackboard["face_study_z_pitch_mouth"] = config.getfloat("interaction", "face_study_z_pitch_mouth")
223 | self.blackboard["face_study_y_pitch_left_ear"] = config.getfloat("interaction", "face_study_y_pitch_left_ear")
224 | self.blackboard["face_study_y_pitch_right_ear"] = config.getfloat("interaction", "face_study_y_pitch_right_ear")
225 | self.blackboard["sleep_probability"] = config.getfloat("boredom", "sleep_probability")
226 | self.blackboard["sleep_duration_min"] = config.getfloat("boredom", "sleep_duration_min")
227 | self.blackboard["sleep_duration_max"] = config.getfloat("boredom", "sleep_duration_max")
228 | self.blackboard["search_for_attention_index"] = 0
229 | self.blackboard["search_for_attention_duration_min"] = config.getfloat("boredom", "search_for_attention_duration_min")
230 | self.blackboard["search_for_attention_duration_max"] = config.getfloat("boredom", "search_for_attention_duration_max")
231 | self.blackboard["search_for_attention_targets"] = []
232 | self.unpack_config_look_around(config)
233 | self.blackboard["wake_up_probability"] = config.getfloat("boredom", "wake_up_probability")
234 | self.blackboard["time_to_wake_up"] = config.getfloat("boredom", "time_to_wake_up")
235 |
236 | ##### Other System Variables #####
237 | self.blackboard["show_expression_since"] = None
238 |
239 | # ID's of faces newly seen, or lost. Integer ID.
240 | self.blackboard["new_face"] = 0
241 | self.blackboard["lost_face"] = 0
242 | # IDs of faces in the scene, updated once per cycle
243 | self.blackboard["face_targets"] = []
244 | # IDs of faces in the scene, updated immediately
245 | self.blackboard["background_face_targets"] = []
246 | self.blackboard["current_glance_target"] = 0
247 | self.blackboard["current_face_target"] = 0
248 | self.blackboard["interact_with_face_target_since"] = 0.0
249 | self.blackboard["sleep_since"] = 0.0
250 | self.blackboard["bored_since"] = 0.0
251 | self.blackboard["is_interruption"] = False
252 | self.blackboard["is_sleeping"] = False
253 | self.blackboard["behavior_tree_on"] = False
254 | self.blackboard["stage_mode"] = False
255 | # Flags to indicate which part of the face will be studied
256 | self.blackboard["face_study_nose"] = False
257 | self.blackboard["face_study_mouth"] = False
258 | self.blackboard["face_study_left_ear"] = False
259 | self.blackboard["face_study_right_ear"] = False
260 |
261 |
262 | ##### ROS Connections #####
263 | self.facetrack = FaceTrack(self.blackboard)
264 |
265 | rospy.Subscriber("behavior_switch", String, self.behavior_switch_callback)
266 | rospy.Subscriber("/blender_api/available_emotion_states",
267 | AvailableEmotionStates, self.get_emotion_states_cb)
268 |
269 | rospy.Subscriber("/blender_api/available_gestures",
270 | AvailableGestures, self.get_gestures_cb)
271 |
272 | # Emotional content that the chatbot perceived i.e. did it hear
273 | # (or reply with) angry words, polite words, etc?
274 | # currently supplying string rather than specific EmotionState with timing,
275 | # allowing that to be handled here where timings have been tuned
276 | print ' setting up chatbot_affect link'
277 | rospy.Subscriber("/eva/chatbot_affect_perceive", String,
278 | self.chatbot_affect_perceive_callback)
279 |
280 | self.do_pub_gestures = True
281 | self.do_pub_emotions = True
282 | self.emotion_pub = rospy.Publisher("/blender_api/set_emotion_state",
283 | EmotionState, queue_size=1)
284 | self.gesture_pub = rospy.Publisher("/blender_api/set_gesture",
285 | SetGesture, queue_size=1)
286 | self.affect_pub = rospy.Publisher("/eva/chatbot_affect_express",
287 | EmotionState, queue_size=1)
288 | self.tree = self.build_tree()
289 | time.sleep(0.1)
290 |
291 | log_filename = os.path.join(os.path.abspath(
292 | os.path.dirname(__file__)), "../bhlog.csv")
293 | self.log_file = open(log_filename, 'wb')
294 | self.log_file.write('Action,Timestamp,Event\n')
295 | try:
296 | while not rospy.is_shutdown():
297 | self.tree.next()
298 | finally:
299 | self.log_file.close()
300 |
301 |
302 | # Pick a random expression out of the class of expressions,
303 | # and display it. Return the display emotion, or None if none
304 | # were picked. Optional argument force guarantees a return.
305 | def pick_random_expression(self, emo_class_name, trigger, force=False):
306 | random_number = random.random()
307 | tot = 0
308 | emo = None
309 | emos = self.blackboard[emo_class_name]
310 | # print emos
311 | for emotion in emos:
312 | tot += emotion.probability
313 | if random_number <= tot:
314 | emo = emotion
315 | break
316 | if emo:
317 | if force==True:
318 | intensity = (emo.min_intensity + emo.max_intensity)/2.0
319 | duration = emo.max_duration
320 | else:
321 | intensity = random.uniform(emo.min_intensity, emo.max_intensity)
322 | duration = random.uniform(emo.min_duration, emo.max_duration)
323 | self.show_emotion(emo.name, intensity, duration, trigger)
324 | elif force==True:
325 | # force said show something but nothing picked, so choose first
326 | print 'force branch chosen'
327 | emo=emos[0]
328 | intensity = 0.6 * emo.max_intensity
329 | duration = emo.max_duration
330 | self.show_emotion(emo.name, intensity, duration, trigger)
331 |
332 | return emo
333 |
334 | def pick_random_gesture(self, ges_class_name, trigger):
335 | random_number = random.random()
336 | tot = 0
337 | ges = None
338 | gestures = self.blackboard[ges_class_name]
339 | for gesture in gestures:
340 | tot += gesture.probability
341 | if random_number <= tot:
342 | ges = gesture
343 | break
344 |
345 | if ges:
346 | intensity = random.uniform(ges.min_intensity, ges.max_intensity)
347 | repeat = random.uniform(ges.min_repeat, ges.max_repeat)
348 | speed = random.uniform(ges.min_speed, ges.max_speed)
349 | self.show_gesture(ges.name, intensity, repeat, speed, trigger)
350 |
351 | return ges
352 |
353 |
354 | # Pick the name of a random emotion, excluding those from
355 | # the exclude list
356 | def pick_random_emotion_name(self, exclude) :
357 | ixnay = [ex.name for ex in exclude]
358 | emos = self.blackboard["emotions"]
359 | if None == emos:
360 | return None
361 | emo_name = random.choice([other for other in emos if other not in ixnay])
362 | return emo_name
363 |
364 | # Pick a so-called "instant" or "flash" expression
365 | def pick_instant(self, emo_class, exclude_class, trigger) :
366 | emo = self.pick_random_expression(exclude_class)
367 | if emo :
368 | exclude = self.blackboard[emo_class]
369 | emo_name = self.pick_random_emotion_name(exclude)
370 | tense = random.uniform(emo.min_intensity, emo.max_intensity)
371 | durat = random.uniform(emo.min_duration, emo.max_duration)
372 | self.show_emotion(emo_name, tense, durat, trigger)
373 | print "----- Instant expression: " + emo_name + " (" + \
374 | str(tense) + ") for " + str(durat) + " seconds"
375 | return emo_name
376 |
377 | # ------------------------------------------------------------------
378 | # The various behavior trees
379 |
380 | # Actions that are taken when a face becomes visible.
381 | # If there were no people in the scene, she always interacts with that person
382 | # If she is already interacting with someone else in the scene,
383 | # she will either glance at the new face or ignore it, depends on the dice roll
384 | # If she has been interacting with another person for a while,
385 | # the probability of glancing at a new face is higher
386 | def someone_arrived(self) :
387 | tree = owyl.sequence(
388 | self.is_someone_arrived(),
389 | owyl.selector(
390 | ##### There previously were no people in the scene #####
391 | owyl.sequence(
392 | self.were_no_people_in_the_scene(),
393 | self.assign_face_target(variable="current_face_target", value="new_face"),
394 | self.record_start_time(variable="interact_with_face_target_since"),
395 | self.show_expression(emo_class="new_arrival_emotions", trigger="someone_arrived"),
396 | self.interact_with_face_target(id="current_face_target", new_face=True, trigger="someone_arrived")
397 | ),
398 |
399 | ##### Currently interacting with someone #####
400 | owyl.sequence(
401 | self.is_interacting_with_someone(),
402 | self.dice_roll(event="glance_new_face"),
403 | self.glance_at_new_face(trigger="someone_arrived")
404 | ),
405 |
406 | ##### Does Nothing #####
407 | owyl.sequence(
408 | self.print_status(str="----- Ignoring the new face!"),
409 | self.log(behavior="ignore_face", trigger="someone_arrived"),
410 | owyl.succeed()
411 | )
412 | ),
413 | self.clear_new_face_target()
414 | )
415 | return tree
416 |
417 | # ---------------------------
418 | # Actions that are taken when a face leaves
419 | # If she was interacting with that person, she will be frustrated
420 | # If she was interacting with someone else,
421 | # she will either glance at the lost face or ignore it, depends on the dice roll
422 | def someone_left(self) :
423 | tree = owyl.sequence(
424 | self.is_someone_left(),
425 | owyl.selector(
426 | ##### Was Interacting With That Person #####
427 | owyl.sequence(
428 | self.was_interacting_with_that_person(),
429 | self.return_to_neutral_position(trigger="someone_left"),
430 | self.show_frustrated_expression(trigger="someone_left")
431 | ),
432 |
433 | ##### Is Interacting With Someone Else #####
434 | owyl.sequence(
435 | self.is_interacting_with_someone(),
436 | self.dice_roll(event="glance_lost_face"),
437 | self.glance_at_lost_face(trigger="someone_left")
438 | ),
439 |
440 | ##### Does Nothing #####
441 | owyl.sequence(
442 | self.print_status(str="----- Ignoring the lost face!"),
443 | self.log(behavior="ignore_face", trigger="someone_left"),
444 | owyl.succeed()
445 | )
446 | ),
447 | self.clear_lost_face_target()
448 | )
449 | return tree
450 |
451 | # -----------------------------
452 | # Interact with people
453 | # If she is not currently interacting with anyone, or it's time to switch target
454 | # she will start interacting with someone else
455 | # Otherwise she will continue with the current interaction
456 | # she may also glance at other people if there are more than one people in the scene
457 | def interact_with_people(self) :
458 | tree = owyl.sequence(
459 | self.is_face_target(),
460 | owyl.selector(
461 | ##### Start A New Interaction #####
462 | owyl.sequence(
463 | owyl.selector(
464 | self.is_not_interacting_with_someone(),
465 | owyl.sequence(
466 | self.is_more_than_one_face_target(),
467 | self.is_time_to_change_face_target()
468 | )
469 | ),
470 | self.select_a_face_target(),
471 | self.record_start_time(variable="interact_with_face_target_since"),
472 | self.interact_with_face_target(id="current_face_target", new_face=False, trigger="people_in_scene")
473 | ),
474 |
475 | ##### Glance At Other Faces & Continue With The Last Interaction #####
476 | owyl.sequence(
477 | self.print_status(str="----- Continue interaction"),
478 | owyl.selector(
479 | owyl.sequence(
480 | self.is_more_than_one_face_target(),
481 | self.dice_roll(event="group_interaction"),
482 | self.select_a_glance_target(),
483 | self.glance_at(id="current_glance_target", trigger="people_in_scene")
484 | ),
485 | owyl.succeed()
486 | ),
487 | self.interact_with_face_target(id="current_face_target", new_face=False, trigger="people_in_scene"),
488 | owyl.selector(
489 | owyl.sequence(
490 | self.dice_roll(event="face_study_saccade"),
491 | self.face_study_saccade(id="current_face_target", trigger="people_in_scene")
492 | ),
493 | owyl.succeed()
494 | )
495 | )
496 | )
497 | )
498 | return tree
499 |
500 |
501 | # -------------------
502 | # Nothing interesting is happening.
503 | # She will look around and search for attention. She may go to sleep,
504 | # and it's more likely to happen if she has been bored for a while.
505 | # She wakes up whenever there's an interruption, e.g. someone arrives
506 | # or after a timeout.
507 | def nothing_is_happening(self) :
508 | tree = owyl.sequence(
509 | owyl.selector(
510 | ##### Is Not Sleeping #####
511 | owyl.sequence(
512 | self.is_not_sleeping(),
513 | owyl.selector(
514 | ##### Go To Sleep #####
515 | owyl.sequence(
516 | self.dice_roll(event="go_to_sleep"),
517 | self.record_start_time(variable="sleep_since"),
518 | self.print_status(str="----- Go to sleep!"),
519 | self.go_to_sleep(trigger="nothing_is_happening")
520 | ),
521 |
522 | ##### Search For Attention #####
523 | self.search_for_attention(trigger="nothing_is_happening")
524 | )
525 | ),
526 |
527 | ##### Is Sleeping #####
528 | owyl.selector(
529 | ##### Wake Up #####
530 | owyl.sequence(
531 | self.dice_roll(event="wake_up"),
532 | self.is_time_to_wake_up(),
533 | self.wake_up(trigger="time_to_wake_up"),
534 | ),
535 |
536 | ##### Continue To Sleep #####
537 | owyl.sequence(
538 | self.print_status(str="----- Continue to sleep."),
539 | self.go_to_sleep(trigger="nothing_is_happening")
540 | )
541 | )
542 | ),
543 |
544 | ##### If Interruption && Sleeping -> Wake Up #####
545 | owyl.sequence(
546 | self.is_interruption(),
547 | self.is_sleeping(),
548 | self.wake_up(trigger="interruption"),
549 | self.print_status(str="----- Interruption: Wake up!"),
550 | )
551 | )
552 | return tree
553 |
554 | # ------------------------------------------------------------------
555 | # Build the main tree
556 | def build_tree(self):
557 | eva_behavior_tree = \
558 | owyl.repeatAlways(
559 | owyl.selector(
560 | owyl.sequence(
561 | self.is_behavior_tree_on(),
562 | self.sync_variables(),
563 | ########## Main Events ##########
564 | owyl.selector(
565 | self.someone_arrived(),
566 | self.someone_left(),
567 | self.interact_with_people(),
568 | self.nothing_is_happening()
569 | )
570 | ),
571 | self.idle_spin()
572 | )
573 | )
574 | return owyl.visit(eva_behavior_tree, blackboard=self.blackboard)
575 |
576 | # Print a single status message
577 | @owyl.taskmethod
578 | def print_status(self, **kwargs):
579 | print kwargs["str"]
580 | yield True
581 |
582 | # Print emotional state
583 | @owyl.taskmethod
584 | def sync_variables(self, **kwargs):
585 | self.blackboard["face_targets"] = self.blackboard["background_face_targets"]
586 | yield True
587 |
588 | @owyl.taskmethod
589 | def dice_roll(self, **kwargs):
590 | if kwargs["event"] == "glance_new_face":
591 | if self.blackboard["glance_probability_for_new_faces"] > 0 and self.blackboard["interact_with_face_target_since"] > 0:
592 | skew = (time.time() - self.blackboard["interact_with_face_target_since"]) / self.blackboard["time_to_change_face_target_max"]
593 | if random.random() < self.blackboard["glance_probability_for_new_faces"] + skew:
594 | yield True
595 | else:
596 | yield False
597 | else:
598 | yield False
599 | elif kwargs["event"] == "group_interaction":
600 | if random.random() < self.blackboard["glance_probability"]:
601 | yield True
602 | else:
603 | yield False
604 | elif kwargs["event"] == "face_study_saccade":
605 | if random.random() < self.blackboard["face_study_probabilities"]:
606 | yield True
607 | else:
608 | yield False
609 | elif kwargs["event"] == "go_to_sleep":
610 | if self.blackboard["sleep_probability"] > 0 and self.blackboard["bored_since"] > 0:
611 | skew = (time.time() - self.blackboard["bored_since"]) / \
612 | (self.blackboard["search_for_attention_duration_max"] / self.blackboard["sleep_probability"])
613 | if random.random() < self.blackboard["sleep_probability"] + skew:
614 | yield True
615 | else:
616 | yield False
617 | else:
618 | yield False
619 | elif kwargs["event"] == "wake_up":
620 | if random.random() < self.blackboard["wake_up_probability"]:
621 | yield True
622 | else:
623 | yield False
624 | else:
625 | if random.random() > 0.5:
626 | yield True
627 | else:
628 | yield False
629 |
630 | @owyl.taskmethod
631 | def is_someone_arrived(self, **kwargs):
632 | self.blackboard["is_interruption"] = False
633 | if self.blackboard["new_face"] > 0:
634 | self.blackboard["bored_since"] = 0
635 | print("----- Someone arrived! id: " + str(self.blackboard["new_face"]))
636 | yield True
637 | else:
638 | yield False
639 |
640 | @owyl.taskmethod
641 | def is_someone_left(self, **kwargs):
642 | self.blackboard["is_interruption"] = False
643 | if self.blackboard["lost_face"] > 0:
644 | print("----- Someone left! id: " + str(self.blackboard["lost_face"]))
645 | yield True
646 | else:
647 | yield False
648 |
649 | @owyl.taskmethod
650 | def is_interacting_with_someone(self, **kwargs):
651 | if self.blackboard["current_face_target"]:
652 | "----- Is Interacting With Someone!"
653 | yield True
654 | else:
655 | yield False
656 |
657 | @owyl.taskmethod
658 | def is_not_interacting_with_someone(self, **kwargs):
659 | if not self.blackboard["current_face_target"]:
660 | yield True
661 | else:
662 | yield False
663 |
664 | @owyl.taskmethod
665 | def were_no_people_in_the_scene(self, **kwargs):
666 | if len(self.blackboard["face_targets"]) == 1:
667 | print("----- Previously, no one in the scene!")
668 | yield True
669 | else:
670 | yield False
671 |
672 | @owyl.taskmethod
673 | def was_interacting_with_that_person(self, **kwargs):
674 | if self.blackboard["current_face_target"] == self.blackboard["lost_face"]:
675 | self.blackboard["current_face_target"] = 0
676 | print("----- Lost face " + str(self.blackboard["lost_face"]) +
677 | ", but was interacting with them!")
678 | yield True
679 | else:
680 | yield False
681 |
682 | @owyl.taskmethod
683 | def is_face_target(self, **kwargs):
684 | if len(self.blackboard["face_targets"]) > 0:
685 | yield True
686 | else:
687 | yield False
688 |
689 | @owyl.taskmethod
690 | def is_more_than_one_face_target(self, **kwargs):
691 | if len(self.blackboard["face_targets"]) > 1:
692 | yield True
693 | else:
694 | yield False
695 |
696 | @owyl.taskmethod
697 | def is_time_to_change_face_target(self, **kwargs):
698 | if self.blackboard["interact_with_face_target_since"] > 0 and \
699 | (time.time() - self.blackboard["interact_with_face_target_since"]) >= \
700 | random.uniform(self.blackboard["time_to_change_face_target_min"], self.blackboard["time_to_change_face_target_max"]):
701 | print "----- Time to start a new interaction!"
702 | yield True
703 | else:
704 | yield False
705 |
706 | @owyl.taskmethod
707 | def is_time_to_wake_up(self, **kwargs):
708 | if self.blackboard["sleep_since"] > 0 and (time.time() - self.blackboard["sleep_since"]) >= self.blackboard["time_to_wake_up"]:
709 | yield True
710 | else:
711 | yield False
712 |
713 | @owyl.taskmethod
714 | def is_sleeping(self, **kwargs):
715 | if self.blackboard["is_sleeping"]:
716 | yield True
717 | else:
718 | yield False
719 |
720 | @owyl.taskmethod
721 | def is_not_sleeping(self, **kwargs):
722 | if not self.blackboard["is_sleeping"]:
723 | yield True
724 | else:
725 | yield False
726 |
727 | @owyl.taskmethod
728 | def is_interruption(self, **kwargs):
729 | if self.blackboard["is_interruption"]:
730 | yield True
731 | else:
732 | yield False
733 |
734 | @owyl.taskmethod
735 | def is_behavior_tree_on(self, **kwargs):
736 | if self.blackboard["behavior_tree_on"]:
737 | yield True
738 | else:
739 | yield False
740 |
741 | @owyl.taskmethod
742 | def assign_face_target(self, **kwargs):
743 | self.blackboard[kwargs["variable"]] = self.blackboard[kwargs["value"]]
744 | yield True
745 |
746 | @owyl.taskmethod
747 | def select_a_face_target(self, **kwargs):
748 | self.blackboard["current_face_target"] = FaceTrack.random_face_target(self.blackboard["face_targets"])
749 | yield True
750 |
751 | @owyl.taskmethod
752 | def select_a_glance_target(self, **kwargs):
753 | self.blackboard["current_glance_target"] = FaceTrack.random_face_target(self.blackboard["face_targets"], self.blackboard["current_face_target"])
754 | yield True
755 |
756 | @owyl.taskmethod
757 | def record_start_time(self, **kwargs):
758 | self.blackboard[kwargs["variable"]] = time.time()
759 | yield True
760 |
761 | @owyl.taskmethod
762 | def interact_with_face_target(self, **kwargs):
763 | face_id = self.blackboard[kwargs["id"]]
764 | trigger = kwargs["trigger"]
765 | self.facetrack.look_at_face(face_id)
766 | self.write_log("look_at_" + str(face_id), time.time(), trigger)
767 |
768 | if self.should_show_expression("positive_emotions") or kwargs["new_face"]:
769 | # Show a positive expression, either with or without an instant expression in advance
770 | if random.random() < self.blackboard["non_positive_emotion_probabilities"]:
771 | self.pick_instant("positive_emotions", "non_positive_emotion", trigger)
772 | else:
773 | self.pick_random_expression("positive_emotions", trigger)
774 |
775 | ##### Show A Positive Gesture #####
776 | self.pick_random_gesture("positive_gestures", trigger)
777 |
778 | interval = 0.01
779 | duration = random.uniform(self.blackboard["min_duration_for_interaction"], self.blackboard["max_duration_for_interaction"])
780 | print "----- Interacting w/face id:" + str(face_id) + " for " + str(duration)[:5] + " seconds"
781 | self.break_if_interruptions(interval, duration)
782 | yield True
783 |
784 | @owyl.taskmethod
785 | def face_study_saccade(self, **kwargs):
786 | face_id = self.blackboard[kwargs["id"]]
787 | duration = random.uniform(self.blackboard["face_study_duration_min"], self.blackboard["face_study_duration_max"])
788 |
789 | # Randomly pick which part of the face to study
790 | which_part = random.randint(1, 4)
791 | if which_part == 1:
792 | self.blackboard["face_study_nose"] = True
793 | print "----- Studying face:" + str(face_id) + " (nose)"
794 | elif which_part == 2:
795 | self.blackboard["face_study_mouth"] = True
796 | print "----- Studying face:" + str(face_id) + " (mouth)"
797 | elif which_part == 3:
798 | self.blackboard["face_study_left_ear"] = True
799 | print "----- Studying face:" + str(face_id) + " (left ear)"
800 | elif which_part == 4:
801 | self.blackboard["face_study_right_ear"] = True
802 | print "----- Studying face:" + str(face_id) + " (right ear)"
803 |
804 | self.facetrack.study_face(face_id, duration)
805 | self.write_log("face_study", time.time(), kwargs["trigger"])
806 | yield True
807 |
808 | @owyl.taskmethod
809 | def glance_at(self, **kwargs):
810 | face_id = self.blackboard[kwargs["id"]]
811 | print "----- Glancing at face:" + str(face_id)
812 | glance_seconds = 1
813 | self.facetrack.glance_at_face(face_id, glance_seconds)
814 | self.write_log("glance_at_" + str(face_id), time.time(), kwargs["trigger"])
815 | yield True
816 |
817 | @owyl.taskmethod
818 | def glance_at_new_face(self, **kwargs):
819 | face_id = self.blackboard["new_face"]
820 | print "----- Glancing at new face:" + str(face_id)
821 | glance_seconds = 1
822 | self.facetrack.glance_at_face(face_id, glance_seconds)
823 | self.write_log("glance_at_" + str(face_id), time.time(), kwargs["trigger"])
824 | yield True
825 |
826 | @owyl.taskmethod
827 | def glance_at_lost_face(self, **kwargs):
828 | print "----- Glancing at lost face:" + str(self.blackboard["lost_face"])
829 | face_id = self.blackboard["lost_face"]
830 | self.facetrack.glance_at_face(face_id, 1)
831 | self.write_log("glance_at_" + str(face_id), time.time(), kwargs["trigger"])
832 | yield True
833 |
834 | @owyl.taskmethod
835 | def show_expression(self, **kwargs):
836 | self.pick_random_expression(kwargs["emo_class"], kwargs["trigger"])
837 | yield True
838 |
839 | @owyl.taskmethod
840 | def show_frustrated_expression(self, **kwargs):
841 | self.pick_random_expression("frustrated_emotions", kwargs["trigger"])
842 | yield True
843 |
844 | @owyl.taskmethod
845 | def return_to_neutral_position(self, **kwargs):
846 | self.facetrack.look_at_face(0)
847 | self.write_log("look_at_neutral", time.time(), kwargs["trigger"])
848 | yield True
849 |
850 | # Accept an expression name, intensity and duration, and publish it
851 | # as a ROS message both to blender, and to the chatbot. Currently,
852 | # exactly the same message format is used for both blender and the
853 | # chatbot. This may change in the future(?)
854 | def show_emotion(self, expression, intensity, duration, trigger):
855 |
856 | # Try to avoid showing more than one expression at once
857 | now = time.time()
858 | since = self.blackboard["show_expression_since"]
859 | durat = self.blackboard["current_emotion_duration"]
860 | if since is not None and (now - since < 0.7 * durat) :
861 | return
862 |
863 | # Update the blackboard
864 | self.blackboard["current_emotion"] = expression
865 | self.blackboard["current_emotion_intensity"] = intensity
866 | self.blackboard["current_emotion_duration"] = duration
867 |
868 | # Create the message
869 | exp = EmotionState()
870 | exp.name = self.blackboard["current_emotion"]
871 | exp.magnitude = self.blackboard["current_emotion_intensity"]
872 | intsecs = int(duration)
873 | exp.duration.secs = intsecs
874 | exp.duration.nsecs = 1000000000 * (duration - intsecs)
875 | # emotion_pub goes to blender and tts;
876 | if (self.do_pub_emotions) :
877 | self.emotion_pub.publish(exp)
878 | self.write_log(exp.name, time.time(), trigger)
879 |
880 | print "----- Show expression: " + expression + " (" + str(intensity)[:5] + ") for " + str(duration)[:4] + " seconds"
881 | self.blackboard["show_expression_since"] = time.time()
882 |
883 | # Accept an gesture name, intensity, repeat (perform how many times)
884 | # and speed and then publish it as a ros message.
885 | def show_gesture(self, gesture, intensity, repeat, speed, trigger):
886 | ges = SetGesture()
887 | ges.name = gesture
888 | ges.magnitude = intensity
889 | ges.repeat = repeat
890 | ges.speed = speed
891 | if (self.do_pub_gestures) :
892 | self.gesture_pub.publish(ges)
893 | self.write_log(ges.name, time.time(), trigger)
894 |
895 | print "----- Show gesture: " + gesture + " (" + str(intensity)[:5] + ")"
896 |
897 | @owyl.taskmethod
898 | def search_for_attention(self, **kwargs):
899 | print("----- Search for attention")
900 | trigger = kwargs["trigger"]
901 | if self.blackboard["bored_since"] == 0:
902 | self.blackboard["bored_since"] = time.time()
903 |
904 | # Send out the look around msg
905 | current_idx = self.blackboard["search_for_attention_index"]
906 | look_around_trg = self.blackboard["search_for_attention_targets"][current_idx]
907 | self.facetrack.look_pub.publish(look_around_trg)
908 | self.write_log("look_around", time.time(), trigger)
909 |
910 | # Update / reset the index
911 | if self.blackboard["search_for_attention_index"] + 1 < len(self.blackboard["search_for_attention_targets"]):
912 | self.blackboard["search_for_attention_index"] += 1
913 | else:
914 | self.blackboard["search_for_attention_index"] = 0
915 |
916 | if self.should_show_expression("bored_emotions"):
917 | # Show a bored expression, either with or without an instant expression in advance
918 | if random.random() < self.blackboard["non_bored_emotion_probabilities"]:
919 | self.pick_instant("bored_emotions", "non_bored_emotion", trigger)
920 | else:
921 | self.pick_random_expression("bored_emotions", trigger)
922 |
923 | ##### Show A Bored Gesture #####
924 | self.pick_random_gesture("bored_gestures", trigger)
925 |
926 | interval = 0.01
927 | duration = random.uniform(self.blackboard["search_for_attention_duration_min"], self.blackboard["search_for_attention_duration_max"])
928 | self.break_if_interruptions(interval, duration)
929 | yield True
930 |
931 | # To determine whether it is a good time to show another expression
932 | # Can be used to avoid making expressions too frequently
933 | def should_show_expression(self, emo_class):
934 | since = self.blackboard["show_expression_since"]
935 | if since is not None and (time.time() - since) >= (self.blackboard["current_emotion_duration"] / 4):
936 | return True
937 | else:
938 | return False
939 |
940 | @owyl.taskmethod
941 | def go_to_sleep(self, **kwargs):
942 | self.blackboard["is_sleeping"] = True
943 | self.blackboard["bored_since"] = 0.0
944 |
945 | ##### Show A Sleep Expression #####
946 | self.pick_random_emotion_name(self.blackboard["sleep_emotions"])
947 |
948 | ##### Show A Sleep Gesture #####
949 | self.pick_random_gesture("sleep_gestures", "nothing_is_happening")
950 |
951 | interval = 0.01
952 | duration = random.uniform(self.blackboard["sleep_duration_min"], self.blackboard["sleep_duration_max"])
953 | self.break_if_interruptions(interval, duration)
954 | yield True
955 |
956 | @owyl.taskmethod
957 | def wake_up(self, **kwargs):
958 | print "----- Wake up!"
959 | trigger = kwargs["trigger"]
960 | self.blackboard["is_sleeping"] = False
961 | self.blackboard["sleep_since"] = 0.0
962 | self.blackboard["bored_since"] = 0.0
963 |
964 | ##### Show A Wake Up Expression #####
965 | self.pick_random_expression("wake_up_emotions", trigger)
966 |
967 | ##### Show A Wake Up Gesture #####
968 | self.pick_random_gesture("wake_up_gestures", trigger)
969 |
970 | yield True
971 |
972 | @owyl.taskmethod
973 | def clear_new_face_target(self, **kwargs):
974 | #if not self.blackboard["is_interruption"]:
975 | print "----- Cleared new face: " + str(self.blackboard["new_face"])
976 | self.blackboard["new_face"] = 0
977 | yield True
978 |
979 | @owyl.taskmethod
980 | def clear_lost_face_target(self, **kwargs):
981 | print "----- Cleared lost face: " + str(self.blackboard["lost_face"])
982 | self.blackboard["lost_face"] = 0
983 | yield True
984 |
985 | # This avoids burning CPU time when the behavior system is off.
986 | # Mostly it sleeps, and periodically checks for interrpt messages.
987 | @owyl.taskmethod
988 | def idle_spin(self, **kwargs):
989 | if self.blackboard["behavior_tree_on"]:
990 | yield True
991 |
992 | # Sleep for 1 second.
993 | time.sleep(1)
994 | yield True
995 |
996 | def break_if_interruptions(self, interval, duration):
997 | while duration > 0:
998 | time.sleep(interval)
999 | duration -= interval
1000 | if self.blackboard["is_interruption"]:
1001 | break
1002 |
1003 | # Return the subset of 'core' strings that are in 'avail' strings.
1004 | # Note that 'avail' strings might contain longer names,
1005 | # e.g. "happy-3", whereas core just contains "happy". We want to
1006 | # return "happy-3" in that case, as well as happy-2 and happy-1
1007 | # if they are there.
1008 | def set_intersect(self, emo_class, avail) :
1009 | emos = self.blackboard[emo_class]
1010 | rev = []
1011 | for emo in emos:
1012 | for a in avail:
1013 | if emo.name in a:
1014 | # Copy the emotion, but give it the new name!
1015 | nemo = copy.deepcopy(emo)
1016 | nemo.name = a
1017 | rev.append(nemo)
1018 |
1019 | # Now, renormalize the probabilities
1020 | tot = 0.0
1021 | for emo in rev:
1022 | tot += emo.probability
1023 | for emo in rev:
1024 | emo.probability /= tot
1025 |
1026 | self.blackboard[emo_class] = rev
1027 |
1028 | @owyl.taskmethod
1029 | def log(self, **kwargs):
1030 | self.write_log(kwargs["behavior"], time.time(), kwargs["trigger"])
1031 |
1032 | def write_log(self, behavior, log_time, trigger):
1033 | logger = csv.writer(
1034 | self.log_file, delimiter=",", lineterminator='\n')
1035 | logger.writerow([behavior, log_time, trigger])
1036 | self.log_file.flush()
1037 |
1038 | # Get the list of available emotions. Update our master list,
1039 | # and cull the various subclasses appropriately.
1040 | def get_emotion_states_cb(self, msg) :
1041 | print("Available Emotion States:" + str(msg.data))
1042 | # Update the complete list of emtions.
1043 | self.blackboard["emotions"] = msg.data
1044 |
1045 | # Reconcile the other classes
1046 | self.set_intersect("frustrated_emotions", msg.data)
1047 | self.set_intersect("positive_emotions", msg.data)
1048 | self.set_intersect("bored_emotions", msg.data)
1049 | self.set_intersect("sleep_emotions", msg.data)
1050 | self.set_intersect("wake_up_emotions", msg.data)
1051 | self.set_intersect("new_arrival_emotions", msg.data)
1052 |
1053 |
1054 | def get_gestures_cb(self, msg) :
1055 | print("Available Gestures:" + str(msg.data))
1056 |
1057 | # Rescale the intensity of the expressions.
1058 | def rescale_intensity(self, emo_scale, gest_scale) :
1059 | for emo_class in self.blackboard["emotion_classes"]:
1060 | for emo in self.blackboard[emo_class]:
1061 | emo.min_intensity *= emo_scale
1062 | emo.max_intensity *= emo_scale
1063 |
1064 | for ges_class in self.blackboard["gesture_classes"]:
1065 | for ges in self.blackboard[ges_class]:
1066 | ges.min_intensity *= gest_scale
1067 | ges.max_intensity *= gest_scale
1068 |
1069 | # Turn behaviors on and off.
1070 | def behavior_switch_callback(self, data):
1071 | if data.data == "btree_on":
1072 | self.do_pub_gestures = True
1073 | self.do_pub_emotions = True
1074 | self.blackboard["is_interruption"] = False
1075 |
1076 | emo_scale = self.blackboard["emotion_scale_closeup"]
1077 | ges_scale = self.blackboard["gesture_scale_closeup"]
1078 |
1079 | # If the current mode is stage mode, then tone things down.
1080 | if self.blackboard["stage_mode"]:
1081 | print("----- Switch to close-up mode")
1082 | emo_scale /= self.blackboard["emotion_scale_stage"]
1083 | ges_scale /= self.blackboard["gesture_scale_stage"]
1084 |
1085 | else:
1086 | print("----- Behavior tree enabled, closeup mode.")
1087 |
1088 | self.rescale_intensity(emo_scale, ges_scale)
1089 | self.blackboard["stage_mode"] = False
1090 | self.blackboard["behavior_tree_on"] = True
1091 |
1092 | elif data.data == "btree_on_stage":
1093 | self.do_pub_gestures = True
1094 | self.do_pub_emotions = True
1095 | self.blackboard["is_interruption"] = False
1096 |
1097 | emo_scale = self.blackboard["emotion_scale_stage"]
1098 | ges_scale = self.blackboard["gesture_scale_stage"]
1099 |
1100 | # If previously in close-up mode, exaggerate the emotions
1101 | # for the stage settting.
1102 | if self.blackboard["behavior_tree_on"] and not self.blackboard["stage_mode"]:
1103 | print("----- Switch to stage mode")
1104 | emo_scale /= self.blackboard["emotion_scale_closeup"]
1105 | ges_scale /= self.blackboard["gesture_scale_closeup"]
1106 | else:
1107 | print("----- Behavior tree enabled, stage mode.")
1108 |
1109 | self.rescale_intensity(emo_scale, ges_scale)
1110 | self.blackboard["stage_mode"] = True
1111 | self.blackboard["behavior_tree_on"] = True
1112 |
1113 | elif data.data == "emotion_off":
1114 | rospy.logwarn("emotion expression disabled)")
1115 | self.do_pub_emotions = False
1116 |
1117 | elif data.data == "gesture_off":
1118 | self.do_pub_gestures = False
1119 |
1120 | elif data.data == "btree_off":
1121 | self.blackboard["is_interruption"] = True
1122 | self.blackboard["behavior_tree_on"] = False
1123 | self.blackboard["stage_mode"] = False
1124 | print("---- Behavior tree disabled")
1125 |
1126 | # The perceived emotional content in the message.
1127 | # emo is of type EmotionState
1128 | def chatbot_affect_perceive_callback(self, emo):
1129 |
1130 | rospy.loginfo('chatbot perceived emo class ='+emo.data)
1131 | # for now pass through to blender using random positive or non_positive class
1132 | # in future we want more cognitive / behavior
1133 | # pick random emotions may not do anything depending on random number so add force optional arg
1134 | force=True
1135 |
1136 | if emo.data == 'happy':
1137 | chosen_emo=self.pick_random_expression("positive_emotions",force)
1138 | else:
1139 | chosen_emo=self.pick_random_expression("frustrated_emotions",force)
1140 | # publish this message to cause chatbot to emit response if it's waiting
1141 | #
1142 |
1143 | exp = EmotionState()
1144 | # getting from blackboard seems to be inconsistent with expected state
1145 | exp.name = self.blackboard["current_emotion"]
1146 | exp.magnitude = 0.5
1147 | # use zero for duration, tts can compute if needed
1148 | exp.duration.secs = 3.0
1149 | exp.duration.nsecs = 0
1150 | self.affect_pub.publish(exp)
1151 | rospy.loginfo('picked and expressed '+chosen_emo.name)
1152 |
--------------------------------------------------------------------------------
/notes/notes:
--------------------------------------------------------------------------------
1 | =======================================================================
2 | (get-total-atoms-in-atomspace)
3 |
4 | (print-toplevel-counts)
5 | type: NumberNode total 137 top 75
6 | type: ListLink total 313 top 1
7 | type: StateLink total 382 top 309
8 |
9 | guile> (get-total-atoms-in-atomspace)
10 | 1563
11 |
12 | (run-face-test)
13 |
14 | (get-total-atoms-in-atomspace)
15 | 1565
16 |
17 | (print-toplevel-counts)
18 | type: ListLink total 310 top 0
19 | type: StateLink total 402 top 329
20 |
21 | =======================================================================
22 |
--------------------------------------------------------------------------------
/notes/universal-fsm.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; Universal Finite State Machine (FSM) constructor.
3 | ;
4 | ; This illustrates the general (universal) FSM state machine constructor.
5 | ; This allows mutlple FSM's to be simultaneously defined and operated
6 | ; asynchronously from each-other.
7 | ;
8 | (use-modules (opencog))
9 | (use-modules (opencog query))
10 |
11 | ;;; A Universal Deterministic Finite State Machine Constructor.
12 | ;;;
13 | ;;; This will create a deterministic FSM; that is, a rule that will
14 | ;;; transition any arbitrary deterministic FSM from state to state,
15 | ;;; given only its name, and the name given to the transition rules.
16 | ;;;
17 | ;;; Create a BindLink that can take an FSM with the name `fsm-name`
18 | ;;; and stores it's state in `fsm-state`. After the BindLink is
19 | ;;; created, each invocation of it will advance the FSM bu one step.
20 | ;;;
21 | (define (create-fsm fsm-name fsm-state)
22 | (BindLink
23 | ;; We will need to find the current and the next state
24 | (VariableList
25 | (VariableNode "$curr-state")
26 | (VariableNode "$next-state")
27 | )
28 | (ImplicationLink
29 | (AndLink
30 | ;; If we are in the current state ...
31 | (ListLink
32 | fsm-state
33 | (VariableNode "$curr-state")
34 | )
35 | ;; ... and there is a transition to another state...
36 | (ContextLink
37 | (VariableNode "$curr-state")
38 | (ListLink
39 | fsm-name
40 | (VariableNode "$next-state")
41 | )
42 | )
43 | )
44 | (AndLink
45 | ;; ... then transistion to the next state ...
46 | (ListLink
47 | fsm-state
48 | (VariableNode "$next-state")
49 | )
50 | ;; ... and leave the current state.
51 | (DeleteLink
52 | (ListLink
53 | fsm-state
54 | (VariableNode "$curr-state")
55 | )
56 | )
57 | )
58 | )
59 | )
60 | )
61 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | ros_behavior_scripting
4 | 0.4.0
5 | The ROS interfaces to sensors and animation
6 |
7 | linas
8 |
9 | Affero GPL
10 |
11 | http://opencog.org/
12 |
13 | linas
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | catkin
28 | dynamic_reconfigure
29 | geometry_msgs
30 | hr_msgs
31 | pi_face_tracker
32 | rospy
33 | std_msgs
34 | dynamic_reconfigure
35 | geometry_msgs
36 | hr_msgs
37 | pi_face_tracker
38 | rospy
39 | std_msgs
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/scripts/config.scm:
--------------------------------------------------------------------------------
1 | (use-modules (opencog)
2 | (opencog nlp relex2logic)
3 | (opencog cogserver)
4 | (opencog logger)
5 | (opencog openpsi)
6 | (opencog ghost)
7 | (opencog ghost predicates))
8 |
9 | (define (single-rule)
10 | (use-modules (opencog eva-behavior))
11 | ; Load rules: The following is just a placeholder, a module of the rulebase
12 | ; should be loaded instead.
13 | (ghost-parse "s: (hi robot) Hello human")
14 |
15 | )
16 | (define (multiple-rules)
17 | (use-modules (opencog eva-behavior))
18 | (ghost-parse-file "test.ghost")
19 | )
20 |
21 | (define (ros-multiple-rules)
22 | (use-modules (opencog movement))
23 | (ghost-parse-file "test.ghost")
24 | )
25 |
26 | ; TODO: change to command-line argument.
27 | (define ghost-rule-base 1)
28 | (case ghost-rule-base
29 | ((1) (single-rule))
30 | ((2) (multiple-rules))
31 | ((3) (ros-multiple-rules))
32 | )
33 |
34 | (start-cogserver "opencog.conf")
35 | (cog-logger-set-stdout! #f)
36 |
37 | ; Start the ghost loop
38 | (ghost-run)
39 |
--------------------------------------------------------------------------------
/scripts/eva.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | #
3 | # eva.sh
4 | #
5 | # ROS + blender launch script for the Hanson Robotics Eva blender head.
6 | # This shell script wiill start up the various bits and pieces needed to
7 | # get things to work.
8 | #
9 | # It needs to run in the catkin_ws directory where the various ROS nodes
10 | # and blender models were installed. It assumes that catkin_make was
11 | # already run.
12 | #
13 | # If you have not yet installed the pre-requisites, then do this:
14 | # cd ~ ; mkdir src ; cd src
15 | # echo "source /opt/ros/indigo/setup.bash" >> ~/.bashrc
16 | # mkdir catkin ; cd catkin ; mkdir src ;
17 | # cd src
18 | # git clone git@github.com:hansonrobotics/blender_api
19 | # git clone git@github.com:hansonrobotics/blender_api_msgs
20 | # git clone git@github.com:hansonrobotics/pau2motors
21 | # cd ..
22 | # catkin build
23 |
24 | # Change this for your setup!
25 | export CATKINDIR="."
26 | export BLENDIR="$CATKINDIR/../hr/blender_api"
27 | export OCBHAVE="$CATKINDIR/../opencog/ros-behavior-scripting/"
28 | export PYTHONPATH=$PYTHONPATH:`pwd`/$OCBHAVE/src
29 |
30 | # Without this, some ros messages seem to run astray.
31 | export ROS_IP=127.0.0.1
32 |
33 | source devel/setup.sh
34 | echo "Starting... this will take 15-20 seconds..."
35 |
36 | # Use byobu so that the scroll bars actually work.
37 | byobu new-session -d -n 'ros' 'roscore; $SHELL'
38 | sleep 4;
39 |
40 | # Single Video (body) camera and face tracker.
41 | tmux new-window -n 'trk' 'roslaunch robots_config tracker-single-cam.launch; $SHELL'
42 |
43 | # Publish the geometry messages. This includes tf2 which holds
44 | # the face locations.
45 | tmux new-window -n 'geo' 'roslaunch robots_config geometry.launch gui:=false; $SHELL'
46 |
47 | ### Start the blender GUI.
48 | tmux new-window -n 'eva' 'cd $BLENDIR && blender -y Eva.blend -P autostart.py; $SHELL'
49 |
50 | # Start the cogserver.
51 | # It seems to take more than 5 seconds to load all scripts!?
52 | cd $OCBHAVE/src
53 | tmux new-window -n 'cog' 'guile -l btree-eva.scm; $SHELL'
54 | sleep 10
55 |
56 | # Run the various sensory-input modules, including the face-tracker.
57 | # tmux new-window -n 'face' '$OCBHAVE/face_track/main.py; $SHELL'
58 | tmux new-window -n 'sen' '../sensors/main.py; $SHELL'
59 |
60 | # Telnet shell
61 | tmux new-window -n 'tel' 'rlwrap telnet localhost 17020; $SHELL'
62 |
63 | # Spare-usage shell
64 | tmux new-window -n 'bash' '$SHELL'
65 |
66 | # Fix the annoying byobu display.
67 | echo "tmux_left=\"session\"" > $HOME/.byobu/status
68 | echo "tmux_right=\"load_average disk_io\"" >> $HOME/.byobu/status
69 | tmux attach
70 |
71 | echo "Started"
72 |
--------------------------------------------------------------------------------
/scripts/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Script for building and installing.
4 |
5 | GIT_REPO="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/.."
6 |
7 | if [ ! -d "$GIT_REPO/build" ]; then
8 | mkdir "$GIT_REPO/build"
9 | fi
10 |
11 | cd $GIT_REPO/build
12 | source /opt/hansonrobotics/ros/setup.bash
13 | cmake ..
14 | sudo make install
15 |
--------------------------------------------------------------------------------
/scripts/opencog.conf:
--------------------------------------------------------------------------------
1 | #
2 | # This file provides an example configuration for a production OpenCog
3 | # server. Particularly noteworthy is the list of automatically loaded
4 | # modules.
5 | #
6 | SERVER_PORT = 17020
7 | LOG_FILE = /tmp/ob-behave.log
8 |
9 | # Other possible log levels are "error", "warn", "info", "debug" and "fine"
10 | # LOG_LEVEL = debug
11 | LOG_LEVEL = info
12 | LOG_TO_STDOUT = true
13 | SERVER_CYCLE_DURATION = 100
14 | IDLE_CYCLES_PER_TICK = 3
15 |
16 | # Economic Attention Allocation parameters
17 | STARTING_STI_FUNDS = 100000
18 | STARTING_LTI_FUNDS = 100000
19 | TARGET_STI_FUNDS = 100000
20 | TARGET_LTI_FUNDS = 100000
21 | STI_FUNDS_BUFFER = 50000
22 | LTI_FUNDS_BUFFER = 10000
23 | MIN_STI = 0E
24 |
25 | ECAN_MAX_ATOM_STI_RENT = 30
26 |
27 | ECAN_STARTING_ATOM_STI_RENT = 25
28 | ECAN_STARTING_ATOM_STI_WAGE = 5
29 |
30 | ECAN_STARTING_ATOM_LTI_RENT = 1
31 | ECAN_STARTING_ATOM_LTI_WAGE = 10
32 |
33 | ECAN_AF_RENT_FREQUENCY = 10
34 | ECAN_AFB_DECAY = 0.1
35 | ECAN_AFB_BOTTOM = 150
36 |
37 | #For ForgettingAgent
38 | ECAN_ATOMSPACE_MAXSIZE = 100000
39 | ECAN_ATOMSPACE_ACCEPTABLE_SIZE_SPREAD = 100
40 |
41 | #For HebbianLinkCreationAgent
42 | ECAN_MAXLINKS = 300
43 | ECAN_LOCAL_FAR_LINK_RATIO = 10
44 |
45 | #For WARent/DiffusionAgent
46 | ECAN_DIFFUSION_TOURNAMENT_SIZE = 5
47 | ECAN_RENT_TOURNAMENT_SIZE = 5
48 |
49 | #Used by ImportanceDiffusionAgent class
50 | #0 => flat rent, 1 => exp rent, 2 => log rent, 3 => linear rent
51 | ECAN_RENT_TYPE = 0
52 | ECAN_RENT_AMNESTY = 5
53 | ECAN_RENT_EQUATION_PARAMETER_0 = 0.05
54 | ECAN_RENT_EQUATION_PARAMETER_1 = 0.0
55 |
56 | #End of ImportanceDiffusionAgent class
57 |
58 | #Used by SimpleImportanceDiffusionAgent class
59 | #Maximum percentage of STI that is spread from an atom
60 | ECAN_MAX_SPREAD_PERCENTAGE = 0.6
61 |
62 | # If false, will diffuse along hebbian links only. If true,
63 | # will also diffuse to all non-hebbian incident atoms in the
64 | # incoming and outgoing sets
65 | ECAN_SPREAD_HEBBIAN_ONLY = false
66 |
67 | # Maximum percentage that will be available for diffusion to hebbian links
68 | HEBBIAN_MAX_ALLOCATION_PERCENTAGE = 0.5
69 | ECAN_CONVERT_LINKS = false
70 | ECAN_CONVERSION_THRESHOLD = 15
71 |
72 | # spread deciding function type (HPERBOLIC = 0 and STEP = 1 )
73 | SPREAD_DECIDER_TYPE = 1
74 | #END of SimpleImportanceDiffusionAgent params
75 |
76 | #ForgettingAgent params
77 | ECAN_FORGET_PERCENTAGE = 0.001
78 |
79 | #END of Economic Attention Allocation parameters
80 |
81 |
82 | # Use this command PROMPT when telnet/terminal doesn't support ANSI
83 | PROMPT = "opencog> "
84 | # Prompt with ANSI color codes
85 | ANSI_PROMPT = "[0;32mopencog[1;32m> [0m"
86 | # Use this guile PROMPT when telnet/terminal doesn't support ANSI
87 | SCM_PROMPT = "guile> "
88 | # Prompt with ANSI color codes
89 | ANSI_SCM_PROMPT = "[0;34mguile[1;34m> [0m"
90 | # Global option so that modules know whether they should output ANSI color
91 | # codes
92 | ANSI_ENABLED = true
93 |
94 | # Cogserver in OSX will automatically change .so extension to .dylib
95 | # if .so exists.
96 | MODULES = libbuiltinreqs.so,
97 | libscheme-shell.so,
98 | libpy-shell.so
99 |
100 | # IMPORTANT!
101 | # Database login credentials. Change these to reflect your actual setup!
102 | # For information on how to set up atomspace persistance, see the README
103 | # file in opencog/persist/sql/README.
104 | #
105 | # STORAGE = "opencog-atomspace"
106 | # STORAGE_USERNAME = "opencog_user"
107 | # STORAGE_PASSWD = "asdf"
108 | #
109 | # Parameters for ZeroMQ AtomSpace Event Publisher
110 | ZMQ_EVENT_USE_PUBLIC_IP = TRUE
111 | ZMQ_EVENT_PORT = 5563
112 |
113 | # Parameters for RuleEngine
114 | # RULE_ENGINE_TRIGGERED_ON = [1 ,2 ,3]
115 | # 1-when atom added 2-when atom enters to AF 3-both on 1 and 2
116 | RULE_ENGINE_TRIGGERED_ON = 1
117 |
--------------------------------------------------------------------------------
/scripts/purge-head-packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | packages="
4 | head-asr
5 | head-basic-head-api
6 | head-blender-api
7 | head-blender-api-msgs
8 | head-cereproc
9 | head-chatbot
10 | head-chatscript
11 | head-chatscript-han
12 | head-chatscript-sophia
13 | head-data
14 | head-deps-all
15 | head-dynamixel-motor
16 | head-festival-api
17 | head-hr
18 | head-hr-ext
19 | head-hr-msgs
20 | head-libmongoc
21 | head-marky-markov
22 | head-motors-safety
23 | head-opencv
24 | head-pau2motors
25 | head-perception
26 | head-performances
27 | head-pi-vision
28 | head-pocketsphinx
29 | head-python-emopy
30 | head-python-hr-characters
31 | head-python-iflytek
32 | head-python-pololu-motors
33 | head-python-ttsserver
34 | head-realsense-ros
35 | head-ros-audio-stream
36 | head-ros-emotion
37 | head-ros-misc
38 | head-ros-pololu
39 | head-ros-tts
40 | head-saliency-tracker
41 | head-sophia-blender-api
42 | head-sound
43 | head-sphinxbase
44 | head-ttsserver-voice-api
45 | head-webui
46 | "
47 |
48 | # A loop is used, because packages installed by hrtool may differ from version
49 | # to version, or installation for some might have failed, and so trying to
50 | # purge all packages in one go might fail.
51 | for i in $packages;do
52 | sudo apt-get -y purge $i
53 | done
54 |
--------------------------------------------------------------------------------
/scripts/run.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | #
3 | # Script for running opencog with HEAD.
4 |
5 | set -e
6 | _session_name="opencog"
7 |
8 | source /opt/hansonrobotics/ros/setup.bash
9 |
10 | # hrtool workspace
11 | HR_WS="$(hr env | grep HR_WORKSPACE | cut -d = -f 2)"
12 |
13 | # The HEAD setup has its own python virutal environment thus the need to update
14 | # the PYTHONPATH
15 | PYTHON_PATH="${PYTHONPATH}:/usr/local/lib/python2.7/dist-packages"
16 |
17 | # Run a robot/simulation.
18 | if [ $# -eq 0 ]; then
19 | echo "Pass at the robot name, which would be passed on to 'hr run --dev'"
20 | exit 1
21 | fi
22 |
23 | # NOTE: Since the HEAD services may fail randomly, start opencog
24 | # in separate tmux session.
25 | start_opencog_tmux_session()
26 | {
27 | echo "Start opencog services in a new background tmux session"
28 | # Start relex
29 | tmux new-session -d -s "$_session_name" -n "relex" \
30 | "cd $HR_WS/OpenCog/relex &&
31 | bash opencog-server.sh;
32 | $SHELL"
33 |
34 | # Start the cogserver
35 | tmux new-window -t "$_session_name:" -n "ghost" \
36 | "cd $HR_WS/OpenCog/ros-behavior-scripting/scripts &&
37 | guile -l config.scm;
38 | $SHELL"
39 |
40 | # Start passing sensory inputs to the cogserver
41 | tmux new-window -t "$_session_name:" -n "rbs" \
42 | "export PYTHONPATH=$PYTHON_PATH &&
43 | cd $HR_WS/OpenCog/ros-behavior-scripting/sensors &&
44 | python main.py ;
45 | $SHELL"
46 |
47 | echo "Finished starting opencog services in a new background tmux session"
48 | }
49 |
50 | # Start opencog tmux session
51 | tmux has-session -t "$_session_name" || start_opencog_tmux_session
52 |
53 | # Start HEAD tmux session
54 | tmux has-session -t "$1" || hr run --dev $1
55 |
56 |
--------------------------------------------------------------------------------
/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | #
3 | # Script for setting up HEAD for building rbs
4 | set -e
5 |
6 | # TODO move to octool.
7 | GIT_REPO="$(dirname $(readlink -f ${BASH_SOURCE[0]}))/.."
8 |
9 | # Install hrtool.
10 | curl https://raw.githubusercontent.com/hansonrobotics/hrtool/master/get_hr.bash|bash
11 | hr install head-hr head-hr-ext
12 | hash -r
13 |
14 | # Configure the hansonrobotics workspace.
15 | echo yes | hr init
16 | hr cmd use_git_ssh # use ssh to minimize interactions
17 | hr install head
18 |
19 | # Clone opencog repos.
20 | hr update opencog
21 |
22 | ## Configure ros-behavior-scripting for building using `hr build opencog`
23 | ln -s "$GIT_REPO" "$HR_WS/HEAD/src/ros-behavior-scripting"
24 |
25 |
26 | printf "Finished configuring setup for running opencog with HEAD \n"
27 |
--------------------------------------------------------------------------------
/sensors/README.md:
--------------------------------------------------------------------------------
1 |
2 | ROS Messaging Shims
3 | ===================
4 |
5 | The code in this directory translates sensory input (vision, sound)
6 | between ROS and OpenCog. It is written in python, because python
7 | is a very natural API for ROS. Each module subscribes to some ROS topic,
8 | and when ROS messages are received, these are converted into atomese
9 | and sent on the to cogserver. The interface defined here, and the
10 | translation to atomese rather informal, ad hoc. Change it as required,
11 | to modernize/update.
12 |
13 | The `face_track.py` file implements a ROS node to pass information
14 | about the visible human faces into the opencog `spacetime` server.
15 | It receives 3D face locations from the webcam + `pi_vision` subsystem,
16 | and auditory infromation from the `many_ears` subsystem.
17 |
18 | As an example, face-detected events are converted to atoms of the form
19 | ```
20 | (EvaluationLink
21 | (PredicateNode "visible face")
22 | (ListLink
23 | (NumberNode "12"))) ;; the face id is currently an integer.
24 | ```
25 |
26 | Similarly, we have:
27 | * `audio_power.py` - general loudness and sudden sounds (bangs, shouts)
28 | * `chat_track.py` - speech-to-text messages
29 | * `sound_track.py` - sound source localization (3D direction)
30 |
31 | General Intent
32 | --------------
33 | The general intention here is that there are many special-purpose
34 | sensory systems (microphones, cameras) and sensory-processing systems
35 | (deep neural nets, speech-to-text) that are not part of the core
36 | OpenCog framework. The code here is responsible for getting that data
37 | to the Cogserver. Some specifics:
38 |
39 | ## Audio:
40 | * Sudden changes in loudness, e.g. bangs crashes, shouts, claps.
41 | * Audio power:
42 | * loud noises: clapping hands, loud background noises (construction
43 | machinery, convention-hall chaos, street traffic), laughing,
44 | cheering, whistling, booing.
45 | * normal volume: speech, background music, background muttering,
46 | * quiet sounds: squeaking chairs, footsteps, whispering, wind noise,
47 | rustling leaves, distant traffic, distant children playing, etc.
48 | * Audio frequency:
49 | * High piches: whistling, motorcycle whines.
50 | * Low frequency: rumbling trucks, construction.
51 | * Fundammental frequency of voice - this helps distinguish male and
52 | female speakers.
53 | * Voice rising and falling pitch - is the speaker excited?
54 | Distracted? Bored? Sad? Shy?
55 | * Audio chaos:
56 | * Are there lots of rapid sound changes (typical of voices)?
57 | * Length of pauses in speech: is speech rapid and clipped, or slow?
58 | * Is speech being interrupted by other noises?
59 | * Is there drumming, tapping?
60 | * Is there fairly quiet but chatoic noise in the backgground
61 | (e.g. people talking in another room)?
62 | * Sound localization:
63 | * What direction is the sound coming from?
64 | * loud-bang-noise -- what direction was it from?
65 | * References:
66 | - [Root Mean Square (RMS)](http://www.gaussianwaves.com/2015/07/significance-of-rms-root-mean-square-value/)
67 | - [Frequency Detection] (https://gist.github.com/endolith/255291)
68 |
69 |
70 | Running
71 | -------
72 | Just start `main.py` in a terminal. This does not have any of the
73 | pretty ROS rosrun, config, setup.py stuff in it yet. Its a quick hack.
74 |
75 |
76 | TODO
77 | ----
78 | Maybe the `pi_vision` subsystem should be replaced by this:
79 | * http://wiki.ros.org/face_detection_tracking
80 | * https://github.com/phil333/face_detection
81 | * http://www.phil.lu/?page_id=328
82 |
83 |
84 | Design discussion
85 | -----------------
86 | There are two design choices for having OpenCog interact with ROS:
87 |
88 | A) Write a cogserver agent that subscribes to ROS messages, and then
89 | pokes the resulting data directly into the atomspace and/or the
90 | spacetime server.
91 |
92 | B) Write a stand-alone ROS node that subscribes to ROS messages, and
93 | then converts these into (scheme) strings, which are then sent to
94 | the cogserver.
95 |
96 | Lets review the pro's and cons of each. Choice A seems direct; however,
97 | it would require a putting a significant amount of ROS code running
98 | within the cogserver. For each received message, the ROS message would
99 | need to be converted into Atoms. However, Python is single-threaded;
100 | running python in the cogserver requires grabbing the Python GIL. Thus,
101 | ultimately, this is not scalable: there is a bottleneck. Python does
102 | not work well in such scenarios.
103 |
104 | Design A could work if the ROS code was written in C++ instead of python,
105 | but that woud be yucky: C++ is hard to write. The ROS build system
106 | conflicts with the opencog build system. Converting ROS messges into
107 | atoms, in C++, is icky. The rosrun main loop can conflict with the
108 | cogserver main loop.
109 |
110 | Design choice B is scalable, because we can run as many guile threads
111 | as we want. Its more CPU intensive though: on the cogserver side, for
112 | each utf8-string message, we have to create a thread, grab an unused
113 | guile interpreter, interpret the string, poke the atoms into the
114 | atomspace, and shut it all down again.
115 |
116 | For Eva, the number of messages that we anticipate sending to the
117 | cogserver is low: currently, no more than a few hundred per second,
118 | and so either solution A or B should work fine. Solution B was
119 | implemented because it was easier.
120 |
121 | Sending messages from OpenCog
122 | -----------------------------
123 | Sending ROS messages is simpler than subscribing to them, because
124 | there is no need for a ROS main loop. These can be sent directly
125 | from OpenCog. Using python is the simplest, and the file
126 | `../src/ros_commo.py` implements a normal python class. This has a
127 | silly wrapper around it, because the current OpenCog python API does
128 | not play nice with python classes. The wrapper is in `../src/atomic.py`.
129 |
--------------------------------------------------------------------------------
/sensors/affect.py:
--------------------------------------------------------------------------------
1 | #
2 | # affect.py - Emotional affect detected in spoken speech.
3 | # Copyright (C) 2015, 2017 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from std_msgs.msg import String
22 | from atomic_msgs import AtomicMsgs
23 |
24 | '''
25 | This implements a ROS node that subscribes to the
26 | `chatbot_affect_perceive` topic, and passes the perception
27 | to the cogserver. Currently, the perception uses Dave DeMaris'
28 | adaptation of advertizing data to extract whether the speaker
29 | is in a good mood, or is negative.
30 | '''
31 |
32 | class Affect:
33 | def __init__(self):
34 | self.atomo = AtomicMsgs()
35 | rospy.Subscriber("chatbot_affect_perceive", String,
36 | self.language_affect_perceive_cb)
37 |
38 | # The perceived emotional content of words spoken to the robot.
39 | # That is, were people being rude to the robot? Polite to it? Angry
40 | # with it? We subscribe; there may be multiple publishers of this
41 | # message: it might be supplied by some linguistic-processing module,
42 | # or it might be supplied by a chatbot.
43 | #
44 | # emo is of type std_msgs/String
45 | def language_affect_perceive_cb(self, emo):
46 | print 'chatbot perceived affect class =' + emo.data
47 | rospy.loginfo('chatbot perceived affect class =' + emo.data)
48 | if emo.data == "happy":
49 | # behavior tree will use these predicates
50 | self.atomo.affect_happy()
51 |
52 | else:
53 | self.atomo.affect_negative()
54 |
--------------------------------------------------------------------------------
/sensors/atomic_msgs.py:
--------------------------------------------------------------------------------
1 | #
2 | # atomic_msgs.py - Send data to the cogserver/atomspace.
3 | #
4 | # Copyright (C) 2015,2016,2017 Linas Vepstas
5 | # Copyright (C) 2016,2017 Hanson Robotics
6 | #
7 | # This program is free software; you can redistribute it and/or modify
8 | # it under the terms of the GNU Affero General Public License v3 as
9 | # published by the Free Software Foundation and including the exceptions
10 | # at http://opencog.org/wiki/Licenses
11 | #
12 | # This program is distributed in the hope that it will be useful,
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | # GNU General Public License for more details.
16 | #
17 | # You should have received a copy of the GNU Affero General Public License
18 | # along with this program; if not, write to:
19 | # Free Software Foundation, Inc.,
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 |
22 | from netcat import netcat
23 |
24 | # The code here is a quick, cheap hack to place information into the
25 | # cogserver atomspace. It opens a socket to the cogserver, and sends
26 | # scheme snippets across. These areu usually some Atomese.
27 | #
28 | class AtomicMsgs:
29 |
30 | def __init__(self):
31 | self.hostname = "localhost"
32 | self.port = 17001
33 | self.OCTOMAP_NAME = "faces"
34 | self.SPATIAL_RESOLUTION = "0.1"
35 | self.TIME_RESOLUTION = "10"
36 | self.TIME_UNITS = "100"
37 |
38 | # --------------------------------------------------------
39 | # Wholeshow control -- Start and stop openpsi
40 | def wholeshow_stop(self):
41 | netcat(self.hostname, self.port, "(disable-all-demos)")
42 | netcat(self.hostname, self.port, "(halt)")
43 |
44 | def wholeshow_start(self):
45 | netcat(self.hostname, self.port, "(enable-all-demos)")
46 | netcat(self.hostname, self.port, '(run)')
47 |
48 | # --------------------------------------------------------
49 | # Set the facetracking state in atomspace
50 | def update_ft_state_to_atomspace(self, enabled):
51 | if enabled:
52 | state = 'on'
53 | else:
54 | state = 'off'
55 | face = '(StateLink face-tracking-state face-tracking-%s)\n' % state
56 | netcat(self.hostname, self.port, face)
57 |
58 | # --------------------------------------------------------
59 | # Face-tracking stuff
60 |
61 | # Add a newly visible face to the atomspace.
62 | def add_face_to_atomspace(self, faceid):
63 | face = "(EvaluationLink (PredicateNode \"visible face\") " + \
64 | "(ListLink (NumberNode \"" + str(faceid) + "\")))\n"
65 | netcat(self.hostname, self.port, face)
66 | print "New visible face in atomspace: ", faceid
67 |
68 | # Focus attention on specific face.
69 | # Build string to force attention to focus on the requested face.
70 | # This bypasses the normal "new face is visible" sequence, and
71 | # immediately shifts Eva's attention to this face.
72 | def add_tracked_face_to_atomspace(self, faceid):
73 | face = '(StateLink request-eye-contact-state (NumberNode "' + \
74 | str(faceid) + '"))\n'
75 | netcat(self.hostname, self.port, face)
76 | print "Force focus of attention on face: ", faceid
77 |
78 | # Remove a face (make it no longer visible).
79 | def remove_face_from_atomspace(self, faceid):
80 |
81 | # AtomSpace cog-delete takes handle as an argument.
82 | msg = self.delete_face(faceid)
83 | netcat(self.hostname, self.port, msg)
84 | print "Removed face from atomspace: ", faceid
85 |
86 | # Build string to delete the face, and also to garbage-collect
87 | # the ListLink and NumberNode. In the long run, explicit deletes
88 | # should not be needed, because the attention-allocation code
89 | # should do this. However, attention-alloc does not yet work.
90 | def delete_face(self, faceid):
91 |
92 | # Delete the association between the recognized and tracked face.
93 | pattern = "(EvaluationLink (Predicate \"name\") " + \
94 | "(ListLink (ConceptNode \"" + str(faceid) + "\") " + \
95 | "(VariableNode \"reco-id\")))"
96 |
97 | # XXX FIXME -- need to also delete the ListLink above.
98 | del_reco = "(cog-execute! (PutLink (DeleteLink " + pattern + \
99 | ") (GetLink " + pattern + ")))\n"
100 | face = del_reco + \
101 | "(cog-delete " + \
102 | " (EvaluationLink (PredicateNode \"visible face\") " + \
103 | " (ListLink (NumberNode \"" + str(faceid) + "\"))))\n" + \
104 | "(cog-delete " + \
105 | " (ListLink (NumberNode \"" + str(faceid) + "\")))\n" + \
106 | "(cog-delete (NumberNode \"" + str(faceid) + "\"))\n"
107 | return face
108 |
109 | def create_face_octomap(self):
110 | ldcmd = '(use-modules (opencog pointmem))'
111 | cmd = '(cog-pointmem-create-map (ConceptNode "'+self.OCTOMAP_NAME+'") \
112 | (ListLink (NumberNode "'+ self.SPATIAL_RESOLUTION+'") \
113 | (NumberNode "'+ self.TIME_RESOLUTION+'") \
114 | (NumberNode "'+ self.TIME_UNITS+'")))'
115 | cmd = ldcmd + "\n" + cmd + "\n"
116 | print("Sending message %s " % cmd)
117 | netcat(self.hostname, self.port, cmd)
118 |
119 | # Face postions in the space-server
120 | def update_face_octomap(self, faceid, xx, yy, zz):
121 | face = '(cog-pointmem-map-atom (ConceptNode "'+self.OCTOMAP_NAME+'") \
122 | (NumberNode "'+ str(faceid)+'" (av 5 0 0)) \
123 | (ListLink (NumberNode "'+ str(xx)+'") \
124 | (NumberNode "'+ str(yy)+'") \
125 | (NumberNode "'+ str(zz)+'")))'
126 | face = face + "\n"
127 | print("Sending message %s " % face)
128 | netcat(self.hostname, self.port, face)
129 |
130 | # --------------------------------------------------------
131 |
132 | def face_recognition(self, tracker_id, name):
133 | '''
134 | Associate a face-recognition ID with a face-tracker ID.
135 |
136 | `tracker_id` is the ID that the 3D face-location tracker is using.
137 | Currently, the tracker-ID is an integer, stored as a NumberNode
138 | in the atomspace.
139 |
140 | `rec_id` is "0" for an unrecognized face and some other string
141 | for a recognized face. It is currently stored as a ConceptNode.
142 | '''
143 | fc = '(make-recognized-face ' + str(tracker_id) + ' "' + name + '")\n'
144 | netcat(self.hostname, self.port, fc)
145 |
146 | # --------------------------------------------------------
147 | # Speech-to-text stuff
148 | def who_said(self, stt):
149 | spoke = "(who-said? \"" + stt + "\")\n"
150 | netcat(self.hostname, self.port, spoke)
151 |
152 | # Pass the text that STT heard into opencog.
153 | # Rather than setting state, we're going to trigger a script, here.
154 | def perceived_text(self, text):
155 | netcat(self.hostname, self.port,
156 | '(cog-evaluate! (PutLink (DefinedPredicate "heard text")' +
157 | ' (SentenceNode "' + text + '")))')
158 |
159 | # Affect in speech
160 | # Indicate that the robot heard freindly speech
161 | def affect_happy(self):
162 | netcat(self.hostname, self.port, "(State chat-affect chat-happy)")
163 |
164 | # Indicate that the robot heard negative speech
165 | def affect_negative(self):
166 | netcat(self.hostname, self.port, "(State chat-affect chat-negative)")
167 |
168 | # --------------------------------------------------------
169 | # Text-to-speech stuff
170 | # Let atomspace know that vocalization has started or ended.
171 | def vocalization_started(self):
172 | netcat(self.hostname, self.port, "(State chat-state chat-start)")
173 |
174 | def vocalization_ended(self):
175 | netcat(self.hostname, self.port, "(State chat-state chat-stop)")
176 |
177 | # --------------------------------------------------------
178 | # Sound localization -- send 3D xyz coordinate of sound source
179 | def update_sound(self, x, y, z):
180 | snd = "(map-sound " + str(x) + " " + str(y) + " " + str(z) + ")\n"
181 | netcat(self.hostname, self.port, snd)
182 |
183 | def audio_energy(self, decibel):
184 | # A StateLink is used because evaluation of psi-rules should
185 | # only depend on the most recent value.
186 | deci = '(StateLink (AnchorNode "Decibel value") ' + \
187 | ' (NumberNode ' + str(decibel) + '))\n'
188 | netcat(self.hostname, self.port, deci)
189 |
190 | # Louds bands, explosions, hand-claps, shouts.
191 | def audio_bang(self, decibel):
192 | loud = '(StateLink (AnchorNode "Sudden sound change value")' + \
193 | ' (NumberNode ' + str(decibel) + '))\n'
194 | netcat(self.hostname, self.port, loud)
195 |
196 | #saliency location
197 | #Degree of the salient point
198 | def saliency(self, x, y, z, deg):
199 | sal = '(StateLink (AnchorNode "Salient location")' + \
200 | '(List (NumberNode '+ str(x)+ ')' + \
201 | ' (NumberNode '+ str(y) + ')' + \
202 | ' (NumberNode '+ str(z) + ')))\n' + \
203 | '(StateLink (AnchorNode "Salient degree")' + \
204 | ' (NumberNode '+ str(deg) + '))\n'
205 | netcat(self.hostname, self.port, sal)
206 |
207 | #room luminance <=25 - dark, <=40 - normal, >40 - bright
208 | def room_brightness(self, bright):
209 | room = '(StateLink (AnchorNode "luminance")' +\
210 | ' (NumberNode ' + str(bright) +'))\n'
211 | netcat(self.hostname, self.port, room)
212 |
213 | # --------------------------------------------------------
214 | # Generic
215 | def evaluate_scm(self, scm_string):
216 | netcat(self.hostname, self.port, scm_string)
217 |
--------------------------------------------------------------------------------
/sensors/audio_power.py:
--------------------------------------------------------------------------------
1 | #
2 | # audio_power.py - Sound energy and power.
3 | # Copyright (C) 2016 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from atomic_msgs import AtomicMsgs
22 |
23 | from hr_msgs.msg import audiodata
24 |
25 | '''
26 | This implements a ROS node that subscribes to the `audio_sensors`
27 | topic, and passes the audio power data to the cogserver. This is
28 | used by OpenCog to react to loud sounds, sudden changes, and
29 | general background noise levels.
30 |
31 | An enhancement would be a a neural net that responded to clapping,
32 | cheering, or other common sound events, identified them, labelled
33 | them, and passed them on into the atomspace.
34 | '''
35 |
36 | class AudioPower:
37 | def __init__(self):
38 | self.atomo = AtomicMsgs()
39 | rospy.Subscriber("audio_sensors", audiodata, self.audio_cb)
40 |
41 | def audio_cb(self, data):
42 | #print "SuddenChange {}".format(data.SuddenChange)
43 | if data.SuddenChange:
44 | print "Heard a loud bang!"
45 | self.atomo.audio_bang(1.0)
46 | else:
47 | self.atomo.audio_bang(0.0)
48 |
49 | self.atomo.audio_energy(data.Decibel)
50 |
--------------------------------------------------------------------------------
/sensors/chat_track.py:
--------------------------------------------------------------------------------
1 | #
2 | # chat_track.py - Misc chatbot message handling.
3 | # Copyright (C) 2014,2015,2016 Hanson Robotics
4 | # Copyright (C) 2015,2016 Linas Vepstas
5 | #
6 | # This library is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License as published by the Free Software Foundation; either
9 | # version 2.1 of the License, or (at your option) any later version.
10 | #
11 | # This library is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 | # Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public
17 | # License along with this library; if not, write to the Free Software
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 |
20 | import rospy
21 | from hr_msgs.msg import ChatMessage
22 | from atomic_msgs import AtomicMsgs
23 |
24 | '''
25 | Subscribe to text ROS messages, typically from the speech-to-text
26 | subsystem, and pass these onwards into the cogserver.
27 |
28 | Unit test by saying
29 | rostopic pub --once chatbot_speech std_msgs/String "Hello Sopha!"
30 | '''
31 |
32 | class ChatTrack:
33 |
34 | def __init__(self):
35 | self.atomo = AtomicMsgs()
36 | rospy.Subscriber("chatbot_speech", ChatMessage,
37 | self.chat_perceived_text_cb)
38 |
39 | # ---------------------------------------------------------------
40 | # Speech-to-text callback
41 | def chat_perceived_text_cb(self, msg):
42 | if msg.confidence >= 50:
43 | # XXX FIXME WTF Why are there two of these????
44 | # Surely one of these is enough to do the trick!
45 | self.atomo.who_said(msg.utterance)
46 | self.atomo.perceived_text(msg.utterance)
47 |
--------------------------------------------------------------------------------
/sensors/control.py:
--------------------------------------------------------------------------------
1 | #
2 | # control.py - Control messages issued by the operator or pupeteer
3 | # Copyright (C) 2016, 2017 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from std_msgs.msg import String
22 | from atomic_msgs import AtomicMsgs
23 |
24 | '''
25 | This implements a ROS node that subscribes to a mish-mash of
26 | control and pupeteering topics. Most of these are generated by
27 | the GUI control panel. Some of these are fairly fundamental
28 | (turning the robot on and off) and some are hacky (fine-tuning
29 | misc openpsi parameters).
30 | '''
31 |
32 | class Control:
33 | def __init__(self):
34 | self.atomo = AtomicMsgs()
35 | rospy.Subscriber("/behavior_switch", String,
36 | self.behavior_switch_cb)
37 |
38 | # The 'btree_on' and 'btree_off' data-strings shouldn't be used,
39 | # as they are meant for switching on and off non-opencog demos.
40 | def behavior_switch_cb(self, data):
41 | print "Received /behavior_switch " + data.data
42 | if data.data == "opencog_on":
43 | self.atomo.wholeshow_start()
44 |
45 | if data.data == "opencog_off":
46 | self.atomo.wholeshow_stop()
47 |
--------------------------------------------------------------------------------
/sensors/control_psi.py:
--------------------------------------------------------------------------------
1 | #
2 | # control_psi.py - Control messages issued by the operator or pupeteer
3 | # Copyright (C) 2016, 2017 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | import rosmsg
22 | import yaml
23 | from atomic_msgs import AtomicMsgs
24 | from dynamic_reconfigure.msg import Config
25 |
26 |
27 | '''
28 | This implements a ROS node that subscribes to a mish-mash of
29 | control and pupeteering topics. Most of these are generated by
30 | the GUI control panel. Some of these are fairly fundamental
31 | (turning the robot on and off) and some are hacky (fine-tuning
32 | misc openpsi parameters).
33 | '''
34 |
35 | class ControlPsi:
36 | def __init__(self):
37 | # A list of parameter names that are mirrored in opencog
38 | # for controling psi-rules
39 | self.param_list = []
40 | # Parameter dictionary that is used for updating states
41 | # recorded in the atomspace. It is used to cache the
42 | # atomspace values.
43 | self.param_dict = {}
44 |
45 | self.atomo = AtomicMsgs()
46 | rospy.Subscriber("/opencog_control/parameter_updates", Config,
47 | self.openpsi_control_cb)
48 |
49 | # For web-ui interface
50 | def openpsi_control_cb(self, data):
51 | """
52 | This function is used for interactively modifying the weight of
53 | openpsi rules.
54 | """
55 | param_yaml = rosmsg.get_yaml_for_msg(data.doubles + data.ints)
56 | self.param_list = yaml.load(param_yaml)
57 |
58 | for i in self.param_list:
59 | # Populate the parameter dictionary
60 | if i["name"] not in self.param_dict:
61 | self.param_dict[i["name"]] = i["value"]
62 |
63 | if i["name"] == "max_waiting_time":
64 | scm_str = '''(StateLink
65 | (AnchorNode "Chatbot: MaxWaitingTime")
66 | (TimeNode %f))''' % (i["value"])
67 | else:
68 | scm_str = '''(StateLink
69 | (ListLink
70 | (ConceptNode "OpenPsi: %s")
71 | (ConceptNode "OpenPsi: weight"))
72 | (NumberNode %f))''' % (i["name"], i["value"])
73 |
74 | self.atomo.evaluate_scm(scm_str)
75 |
--------------------------------------------------------------------------------
/sensors/face_recog.py:
--------------------------------------------------------------------------------
1 | #
2 | # face_recog.py - Face Recognition
3 | # Copyright (C) 2016 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from atomic_msgs import AtomicMsgs
22 |
23 | from hr_msgs.msg import f_id
24 | from hr_msgs.msg import faces_ids
25 |
26 | # Push information about recognized faces into the atomspace.
27 | #
28 | # This listens to the `/camera/face_recognition` ROS topic. Note that
29 | # some ofther face-id subsystem generates face-recognition messages
30 | # to the `/camera/face_locations` topic, using a different message
31 | # format. (See the `face_track.py` file). I am not sure what subsystem
32 | # publishes where, or why. XXX FIXME Figure out why tehre are two
33 | # different face-recognition subsystems in use, document them, and
34 | # standardize on the message formats used.
35 | class FaceRecog:
36 | def __init__(self):
37 | self.atomo = AtomicMsgs()
38 | rospy.Subscriber('/camera/face_recognition', faces_ids, self.face_cb)
39 |
40 | def face_cb(self, data):
41 | for fc in data.faces:
42 | self.atomo.face_recognition(fc.id, fc.name);
43 |
--------------------------------------------------------------------------------
/sensors/face_track.py:
--------------------------------------------------------------------------------
1 | #
2 | # face_track.py - Registery and tracking of visible human faces
3 | # Copyright (C) 2014,2015,2016 Hanson Robotics
4 | # Copyright (C) 2015,2016 Linas Vepstas
5 | #
6 | # This library is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License as published by the Free Software Foundation; either
9 | # version 2.1 of the License, or (at your option) any later version.
10 | #
11 | # This library is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 | # Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public
17 | # License along with this library; if not, write to the Free Software
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 |
20 | import rospy
21 | import logging
22 |
23 | from std_msgs.msg import Int32
24 | # FIXME: In developer role of hrtool hr_msgs and pi_face_tracker conflict, why?
25 | # from hr_msgs.msg import FaceEvent, Faces
26 | from pi_face_tracker.msg import FaceEvent, Faces
27 |
28 | from atomic_msgs import AtomicMsgs
29 |
30 | logger = logging.getLogger('hr.eva_behavior.face_track')
31 |
32 | # Thin python wrapper, to subscribe to face-tracking ROS messages,
33 | # (face ID's, 3D face locations) and then re-wrap these as OpenCog
34 | # atoms, via AtomicMsgs, and forward them on into the OpenCog
35 | # space-time server.
36 | #
37 | class FaceTrack:
38 |
39 | # Control flags. Ideally, FaceTrack should publish targets using
40 | # ros_commo EvaControl class.
41 | C_EYES = 16
42 | C_FACE = 32
43 | # Face tracking will be disabled if neither of these flags are set.
44 | # (this allows for a manual over-ride of face-tracking by other
45 | # control processes.)
46 | C_FACE_TRACKING = C_FACE | C_EYES
47 |
48 | def __init__(self):
49 |
50 | # The OpenCog API. This is used to send face data to OpenCog.
51 | self.atomo = AtomicMsgs()
52 | self.atomo.create_face_octomap()
53 |
54 | # List of currently visible faces
55 | self.visible_faces = []
56 |
57 | # Subscribed pi_vision topics and events
58 | self.TOPIC_FACE_EVENT = "/camera/face_event"
59 | self.EVENT_NEW_FACE = "new_face"
60 | self.EVENT_LOST_FACE = "lost_face"
61 | self.EVENT_RECOGNIZED_FACE = "recognized_face"
62 | # Overrides current face being tracked by WebUI
63 | self.EVENT_TRACK_FACE = "track_face"
64 |
65 | self.TOPIC_FACE_LOCATIONS = "/camera/face_locations"
66 |
67 | # Face appearance/disappearance from pi_vision
68 | rospy.Subscriber(self.TOPIC_FACE_EVENT, FaceEvent, self.face_event_cb)
69 |
70 | # Face location information from pi_vision
71 | rospy.Subscriber(self.TOPIC_FACE_LOCATIONS, Faces, self.face_loc_cb)
72 |
73 | rospy.Subscriber("/behavior_control", Int32, self.behavior_control_cb)
74 |
75 | # Control Eyes and face by default
76 | self.control_mode = 255
77 |
78 | # ----------------------------------------------------------
79 | # Start tracking a face
80 | def add_face(self, faceid):
81 | if faceid in self.visible_faces:
82 | return
83 |
84 | self.visible_faces.append(faceid)
85 |
86 | logger.info("New face added to visibile faces: " +
87 | str(self.visible_faces))
88 | self.atomo.add_face_to_atomspace(faceid)
89 |
90 |
91 | # Stop tracking a face
92 | def remove_face(self, faceid):
93 | self.atomo.remove_face_from_atomspace(faceid)
94 |
95 | if faceid in self.visible_faces:
96 | self.visible_faces.remove(faceid)
97 |
98 | logger.info("Lost face; visibile faces now: " + str(self.visible_faces))
99 |
100 | # Force the robot to turn its attention to the given
101 | # face (to interact with, talk with) that face.
102 | def track_face(self, faceid):
103 | if faceid in self.visible_faces:
104 | logger.info("Face requested interaction: " + str(faceid))
105 | self.atomo.add_tracked_face_to_atomspace(faceid)
106 |
107 | # ----------------------------------------------------------
108 | # pi_vision ROS callbacks
109 |
110 | # pi_vision ROS callback, called when a new face is detected,
111 | # or a face is lost. Also called for recognized faces.
112 | #
113 | # This callback handles recognized faces using a special message
114 | # format, published on the `/camera/face_locations`. Note that
115 | # there is also a different topic for recognized faces, called
116 | # `/camera/face_recognition`. See the `face-recog.py` file for
117 | # details. I am not sure what subsystem published which message
118 | # type. XXX FIXME - figure out why there are two different
119 | # face recognition subsystems, and standardize one which we
120 | # should use.
121 | def face_event_cb(self, data):
122 | if not self.control_mode & self.C_FACE_TRACKING:
123 | return
124 |
125 | if data.face_event == self.EVENT_NEW_FACE:
126 | self.add_face(data.face_id)
127 |
128 | elif data.face_event == self.EVENT_LOST_FACE:
129 | self.remove_face(data.face_id)
130 |
131 | elif data.face_event == self.EVENT_TRACK_FACE:
132 | self.track_face(data.face_id)
133 |
134 | elif data.face_event == self.EVENT_RECOGNIZED_FACE:
135 | self.atomo.face_recognition(data.face_id, data.recognized_id)
136 |
137 | # pi_vision ROS callback, called when pi_vision has new face
138 | # location data for us. This happens frequently (about 10x/second)
139 | def face_loc_cb(self, data):
140 | if not self.control_mode & self.C_FACE_TRACKING:
141 | return
142 |
143 | for face in data.faces:
144 | # Update location of a face. The location is stored in the
145 | # OpenCog space server (octomap).
146 | if face.id in self.visible_faces:
147 | self.atomo.update_face_octomap(face.id,
148 | face.point.x, face.point.y, face.point.z)
149 |
150 |
151 | # Enable/disable Opencog face-tracking. This is driven by the
152 | # master control GUI. XXX FIXME -- why should this ever be disabled?
153 | # OpenCog should always know about faces; perhaps it is congtrol of
154 | # head and eye movements that should be disabled?
155 | def behavior_control_cb(self, data):
156 | # Is facetracking currently enabled?
157 | facetracking = self.control_mode & self.C_FACE_TRACKING
158 | self.control_mode = data.data
159 | print("New Control mode %i" % self.control_mode )
160 |
161 | # If face-tracking was enabled, and is now disabled ...
162 | if facetracking > 0 and self.control_mode & self.C_FACE_TRACKING == 0:
163 | self.atomo.update_ft_state_to_atomspace(False)
164 | # Need to clear faces:
165 | for face in self.visible_faces[:]:
166 | self.remove_face(face)
167 |
168 | elif self.control_mode & self.C_FACE_TRACKING > 0:
169 | self.atomo.update_ft_state_to_atomspace(True)
170 |
--------------------------------------------------------------------------------
/sensors/main.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | #
3 | # main.py - Main entry point for the ROS-to-OpenCog converter
4 | # Copyright (C) 2015 Hanson Robotics
5 | #
6 | # This program is free software; you can redistribute it and/or modify
7 | # it under the terms of the GNU Affero General Public License v3 as
8 | # published by the Free Software Foundation and including the exceptions
9 | # at http://opencog.org/wiki/Licenses
10 | #
11 | # This program is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Affero General Public
17 | # License
18 | # along with this program; if not, write to:
19 | # Free Software Foundation, Inc.,
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 |
22 | # XXX To be removed when https://github.com/hansonrobotics/HEAD/issues/618
23 | # is resolved nicely
24 | import sys
25 | sys.path.append("/opt/hansonrobotics/ros/lib/python2.7/dist-packages/")
26 |
27 | import logging
28 | import rospy
29 | from affect import Affect
30 | from audio_power import AudioPower
31 | from chat_track import ChatTrack
32 | from control import Control
33 | from control_psi import ControlPsi
34 | from face_recog import FaceRecog
35 | from face_track import FaceTrack
36 | from sound_track import SoundTrack
37 | from room_brightness import RoomBrightness
38 | from saliency_track import SaliencyTrack
39 | from tts_feedback import TTSFeedback
40 |
41 | rospy.init_node("OpenCog_ROS_bridge")
42 | logging.info("Starting the OpenCog ROS Bridge")
43 | print "Starting the OpenCog ROS Bridge"
44 |
45 | co = Control()
46 | cp = ControlPsi()
47 | af = Affect()
48 | ap = AudioPower()
49 | ct = ChatTrack()
50 | fc = FaceRecog()
51 | ft = FaceTrack()
52 | st = SoundTrack()
53 | br = RoomBrightness()
54 | sl = SaliencyTrack()
55 | tf = TTSFeedback
56 |
57 | try:
58 | rospy.spin()
59 | except rospy.ROSInterruptException as e:
60 | print(e)
61 |
62 | print "Exit OpenCog ROS bridge"
63 |
--------------------------------------------------------------------------------
/sensors/netcat.py:
--------------------------------------------------------------------------------
1 | #
2 | # netcat.py - Quick-n-dirty network interface
3 | # Copyright (C) 2015 Linas Vepstas
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU Affero General Public License v3 as
7 | # published by the Free Software Foundation and including the exceptions
8 | # at http://opencog.org/wiki/Licenses
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Affero General Public License
16 | # along with this program; if not, write to:
17 | # Free Software Foundation, Inc.,
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 |
20 |
21 | import socket
22 |
23 | # This implements netcat in python.
24 | #
25 | # If you don't now what netcat is, then you should google it.
26 | # Its important and not complicated.
27 | #
28 | def netcat(hostname, port, content) :
29 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
30 |
31 | # If the cogserver is down, the connection will fail.
32 | try:
33 | s.connect((hostname, port))
34 | except socket.error as msg:
35 | print "Connect failed: ", msg
36 | s.close()
37 | return 1 # non-zero means failure
38 |
39 | s.sendall(content)
40 | s.shutdown(socket.SHUT_WR)
41 | while True:
42 | data = s.recv(1024)
43 | if not data or data == "":
44 | break
45 | # print "Received:", repr(data)
46 | # print "Connection closed."
47 | s.close()
48 | return 0 # zero means success
49 |
--------------------------------------------------------------------------------
/sensors/room_brightness.py:
--------------------------------------------------------------------------------
1 | #
2 | # room_brightness.py - Sound energy and power.
3 | # Copyright (C) 2016 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from atomic_msgs import AtomicMsgs
22 |
23 | from hr_msgs.msg import Luminance
24 |
25 | '''
26 | This implements a ROS node that subscribes to the
27 | `/opencog/room_luminance` topic and replays it to
28 | the cogserver.
29 | '''
30 |
31 | class RoomBrightness:
32 | def __init__(self):
33 | self.atomo = AtomicMsgs()
34 | rospy.Subscriber('/opencog/room_luminance', Luminance, self.bright_cb)
35 |
36 | def bright_cb(self, data):
37 | self.atomo.room_brightness(data.value)
38 |
--------------------------------------------------------------------------------
/sensors/saliency_track.py:
--------------------------------------------------------------------------------
1 | #
2 | # saliency_track.py - Sound energy and power.
3 | # Copyright (C) 2016 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from atomic_msgs import AtomicMsgs
22 |
23 | from hr_msgs.msg import targets
24 |
25 | '''
26 | This implements a ROS node that subscribes to the `/nmpt_saliency_point`
27 | updates saliency
28 | '''
29 |
30 | class SaliencyTrack:
31 | def __init__(self):
32 | self.atomo = AtomicMsgs()
33 | rospy.Subscriber('/nmpt_saliency_point', targets, self.sal_cb)
34 |
35 | def sal_cb(self, data):
36 | loc = data.positions[0]
37 | z=-(loc.y*2.0-1.0)
38 | x=1.0
39 | y=-1.0*(loc.x*2.0-1.0)
40 | #print "locations x="+str(x)+" y="+str(y)+" z="+str(z)+"\n"
41 | self.atomo.saliency(x,y,z,data.degree)
42 |
--------------------------------------------------------------------------------
/sensors/sound_track.py:
--------------------------------------------------------------------------------
1 | #
2 | # sound_track.py - Tracking of sound sources
3 | # Copyright (C) 2014,2015,2016 Hanson Robotics
4 | # Copyright (C) 2015,2016 Linas Vepstas
5 | #
6 | # This library is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License as published by the Free Software Foundation; either
9 | # version 2.1 of the License, or (at your option) any later version.
10 | #
11 | # This library is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 | # Lesser General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Lesser General Public
17 | # License along with this library; if not, write to the Free Software
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 |
20 | import rospy
21 | from atomic_msgs import AtomicMsgs
22 | from geometry_msgs.msg import PoseStamped # for sound localization
23 |
24 | # Thin python wrapper, to subscribe to ManyEars sound-source ROS
25 | # messages, and then re-wrap these as opencog atoms, via AtomicMsgs,
26 | # and forward them on into the OpenCog space-time server.
27 | #
28 | class SoundTrack:
29 |
30 | def __init__(self):
31 |
32 | # The OpenCog API. This is used to send sound localization
33 | # data to OpenCog.
34 | self.atomo = AtomicMsgs()
35 |
36 | # Sound localization
37 | parameter_name = "sound_localization/mapping_matrix"
38 | if rospy.has_param(parameter_name):
39 | self.sl_matrix = rospy.get_param(parameter_name)
40 | rospy.Subscriber("/manyears/source_pose", PoseStamped, \
41 | self.sound_cb)
42 | print "Sound localization is enabled"
43 | else :
44 | print "Sound localization is disabled"
45 |
46 | # ---------------------------------------------------------------
47 | # Store the location of the strongest sound-source in the
48 | # OpenCog space server. This data arrives at a rate of about
49 | # 30 Hz, currently, from ManyEars.
50 | def sound_cb(self, msg):
51 | # Convert to camera coordinates, using an affine matrix
52 | # (which combines a rotation and translation).
53 | #
54 | # A typical sl_matrix looks like this:
55 | #
56 | # 0.943789 0.129327 0.304204 0.00736024
57 | # -0.131484 0.991228 -0.0134787 0.00895614
58 | # -0.303278 -0.0272767 0.952513 0.0272001
59 | # 0 0 0 1
60 | #
61 | vs = [msg.pose.position.x, \
62 | msg.pose.position.y, \
63 | msg.pose.position.z, \
64 | 1]
65 |
66 | r = [0, 0, 0, 0]
67 | for i in range(0,3):
68 | for j in range(0,3):
69 | r[i] += self.sl_matrix[i][j] * vs[j]
70 |
71 | self.atomo.update_sound(r[0], r[1], r[2])
72 |
73 | # ----------------------------------------------------------
74 |
--------------------------------------------------------------------------------
/sensors/tts_feedback.py:
--------------------------------------------------------------------------------
1 | #
2 | # tts_feedback.py - TTS (text to speech) feedback.
3 | # Copyright (C) 2016,2017 Hanson Robotics
4 | #
5 | # This library is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License as published by the Free Software Foundation; either
8 | # version 2.1 of the License, or (at your option) any later version.
9 | #
10 | # This library is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 | # Lesser General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Lesser General Public
16 | # License along with this library; if not, write to the Free Software
17 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 | # 02110-1301 USA
19 |
20 | import rospy
21 | from std_msgs.msg import String
22 | from atomic_msgs import AtomicMsgs
23 |
24 | '''
25 | This implements a ROS node that subscribes to the `speech_events`
26 | topic, and passes these perceptions back to the cogserver.
27 | Currently, this is used to tell the cogserver when the TTS module
28 | has started, and finished vocalizing. That is, we sent it a
29 | sentence; we just want to know when it is actually saying it.
30 | '''
31 |
32 | class TTSFeedback:
33 | # Receive messages that indicate that TTS (or chatbot) has started
34 | # or finished vocalizing.
35 | def __init__(self):
36 | self.atomo = AtomicMsgs()
37 | rospy.Subscriber("speech_events", String, self.speech_event_cb)
38 |
39 | # Notification from text-to-speech (TTS) module, that it has
40 | # started, or stopped vocalizing. This message might be published
41 | # by either the TTS module itself, or by some external chatbot.
42 | #
43 | # rostopic pub --once speech_events std_msgs/String start
44 | # rostopic pub --once speech_events std_msgs/String stop
45 | def speech_event_cb(self, speech_event):
46 | print('speech_event, type ' + speech_event.data)
47 | if speech_event.data == "start":
48 | rospy.loginfo("starting speech")
49 | self.atomo.vocalization_started()
50 | elif speech_event.data == "stop":
51 | rospy.loginfo("ending speech")
52 | self.atomo.vocalization_ended()
53 | elif speech_event.data.startswith("duration"):
54 | rospy.loginfo("speech_event.data {}".format(speech_event.data))
55 | else:
56 | rospy.logerr("unknown speech_events message: " + speech_event.data)
57 |
--------------------------------------------------------------------------------
/webui/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | INSTALL (FILES
2 | webui.scm
3 | DESTINATION "${DATADIR}/scm/opencog/"
4 | )
5 |
6 | INSTALL (FILES
7 | atomic_psi.py
8 | DESTINATION "${DATADIR}/python/"
9 | )
10 |
--------------------------------------------------------------------------------
/webui/README.md:
--------------------------------------------------------------------------------
1 |
2 | WebUI Control panel
3 | ===================
4 |
5 | Stuff for controlling the OpenPsi paramaters via a web user interface.
6 |
--------------------------------------------------------------------------------
/webui/atomic_psi.py:
--------------------------------------------------------------------------------
1 | #
2 | # atomic_psi.py - OpenCog python schema to control OpenPsi parameters
3 | #
4 | # Copyright (C) 2016 Hanson Robotics
5 | #
6 | # This program is free software; you can redistribute it and/or modify
7 | # it under the terms of the GNU Affero General Public License v3 as
8 | # published by the Free Software Foundation and including the exceptions
9 | # at http://opencog.org/wiki/Licenses
10 | #
11 | # This program is distributed in the hope that it will be useful,
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 | # GNU General Public License for more details.
15 | #
16 | # You should have received a copy of the GNU Affero General Public License
17 | # along with this program; if not, write to:
18 | # Free Software Foundation, Inc.,
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 |
21 | from psi_ctrl import PsiControl
22 | from opencog.atomspace import TruthValue
23 |
24 | # The ROS layer.
25 | psi = PsiControl()
26 |
27 | # Global functions, because that's what PythonEval expects.
28 | # Would be great if PythonEval was fixed to work smarter, not harder.
29 | #
30 | # Must return TruthValue, since EvaluationLinks expect TruthValues.
31 |
32 | # Update dynamic paramater cache
33 | def update_opencog_control_parameter(name_node, value_node):
34 | try:
35 | name = name_node.name
36 | value = float(value_node.name)
37 | psi.update_opencog_control_parameter(name, value)
38 | return TruthValue(1, 1)
39 | except:
40 | return TruthValue(0, 1)
41 |
42 | # Update dynamic parameters
43 | def push_parameter_update():
44 | psi.push_parameter_update()
45 | return TruthValue(1, 1)
46 |
--------------------------------------------------------------------------------
/webui/psi_ctrl.py:
--------------------------------------------------------------------------------
1 | #
2 | # psi_ctrl.py - ROS messaging module for OpenPsi control.
3 | # Copyright (C) 2016 Hanson Robotics
4 | #
5 | # This program is free software; you can redistribute it and/or modify
6 | # it under the terms of the GNU Affero General Public License v3 as
7 | # published by the Free Software Foundation and including the exceptions
8 | # at http://opencog.org/wiki/Licenses
9 | #
10 | # This program is distributed in the hope that it will be useful,
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | # GNU General Public License for more details.
14 | #
15 | # You should have received a copy of the GNU Affero General Public License
16 | # along with this program; if not, write to:
17 | # Free Software Foundation, Inc.,
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 |
20 | import rospy
21 | import logging
22 | import dynamic_reconfigure.client
23 |
24 | logger = logging.getLogger('hr.OpenPsi')
25 |
26 | class PsiControl():
27 |
28 | def update_opencog_control_parameter(self, name, value):
29 | """
30 | This function is used for updating ros parameters that are used to
31 | modify the weight of openpsi rules. When the changes in weight occur
32 | independent of changes in HEAD's web-ui.
33 | """
34 | update = False
35 | param_name = name[len(self.psi_prefix) - 1:]
36 |
37 | # Update parameter
38 | if (param_name in self.param_dict) and \
39 | (self.param_dict[param_name] != value):
40 | self.param_dict[param_name] = value
41 | self.update_parameters = True
42 |
43 |
44 | def push_parameter_update(self):
45 | if self.update_parameters and not rospy.is_shutdown():
46 | if self.client is None:
47 | return
48 | self.client.update_configuration(self.param_dict)
49 | self.update_parameters = False
50 |
51 | def __init__(self):
52 |
53 | # The below will hang until roscore is started!
54 | rospy.init_node("OpenPsi_control")
55 | print("Starting OpenCog OpenPsi Control Node")
56 |
57 | # ----------------
58 | # Parameter dictionary that is used for updating states
59 | # recorded in the atomspace. It is used to cache the
60 | # atomspace values, thus updating of the dictionary is
61 | # only made from opencog side (openpsi updating rule).
62 | self.param_dict = {}
63 |
64 | # For controlling when to push updates, for saving bandwidth.
65 | self.update_parameters = False
66 | self.psi_prefix = "OpenPsi: "
67 |
68 | # For web ui based control of openpsi contorled-psi-rules
69 | try:
70 | self.client = dynamic_reconfigure.client.Client("/opencog_control", timeout=2)
71 | except Exception:
72 | self.client = None
73 |
74 | # ----------------------------------------------------------------
75 |
--------------------------------------------------------------------------------
/webui/webui.scm:
--------------------------------------------------------------------------------
1 | ;
2 | ; webui.scm
3 | ;
4 | ; User Interface for OpenPsi control
5 |
6 | (define-module (opencog webui))
7 |
8 | (use-modules (opencog) (opencog atom-types) (opencog openpsi))
9 |
10 | ;
11 | ; NOTE: updating of parameters is divided into steps of upating the parameter
12 | ; cache and then pushing the update, so as to simply syncing the values.
13 | ; If one pushes a partial updated cache results in the publishing of the change
14 | ; to /opencog_control/parameter_updates topic thus resulting in an undesirable
15 | ; state in the atomspace.
16 |
17 | ; Update dynamic parameter cache
18 | (DefineLink
19 | (DefinedPredicate "update-opencog-control-parameter")
20 | (LambdaLink
21 | (VariableList
22 | (TypedVariableLink
23 | (VariableNode "psi-rule-alias")
24 | (TypeNode "ConceptNode"))
25 | (TypedVariableLink
26 | (VariableNode "psi-rule-weight")
27 | (TypeNode "NumberNode")))
28 | (Evaluation
29 | (GroundedPredicate "py: update_opencog_control_parameter")
30 | (List
31 | (VariableNode "psi-rule-alias")
32 | (VariableNode "psi-rule-weight")))
33 | ))
34 |
35 | ; Push dynamic parameter cache values
36 | (Define
37 | (DefinedPredicate "push-parameter-update")
38 | (Evaluation
39 | (GroundedPredicate "py: push_parameter_update")
40 | (List))
41 | )
42 |
43 | ; This is needed as the parameters (stored in ros_commo/param_dict)
44 | ; may be updated by a separate thread, so doing this is to make
45 | ; sure that the full set of parameters will only be pushed when the
46 | ; thread has finished the update, so as to avoid having half-updated
47 | ; set of parameters pushed (and as a result re-applied to opencog via
48 | ; the msg being published on /opencog_control/parameter_updates)
49 | (Define
50 | (DefinedPredicate "parameter-update-is-done")
51 | (Equal
52 | (Set psi-controller-idle)
53 | (Get (State psi-controller (Variable "$x"))))
54 | )
55 |
56 | (Define
57 | (DefinedPredicate "update-web-ui")
58 | (SequentialAnd
59 | (True (PutLink
60 | (DefinedPredicate "update-opencog-control-parameter")
61 | (DefinedSchema "psi-controlled-rule-state")))
62 | (DefinedPredicate "parameter-update-is-done")
63 | (DefinedPredicate "push-parameter-update")
64 | ))
65 |
66 | ; -------------------------------------------------------------
67 | ; Now hook it up.
68 |
69 | ; Any changes to the weight for controlled-psi-rules are pushed to
70 | ; ros dynamic-parameters. Thus the web-ui mirrors the opencog
71 | ; wholeshow state.
72 | (psi-rule (list (DefinedPredicate "ROS is running?"))
73 | (DefinedPredicate "update-web-ui")
74 | update-demand-satisfied (stv 1 1) update-demand)
75 |
76 | ; -------------------------------------------------------------
77 | *unspecified* ; Make the load be silent
78 |
--------------------------------------------------------------------------------