├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── SECURITY.md ├── azure-pipelines.yml ├── docker └── ubuntu-xenial-kinetic │ ├── Dockerfile │ ├── build.sh │ ├── run_docker_container.sh │ ├── runtime_test.py │ └── runtime_test.sh ├── docs ├── azure_resources.md ├── images │ ├── AzurePortalLanguageUnderstanding-Create-Succed-4.JPG │ ├── AzurePortalLanguageUnderstanding-Create.JPG │ ├── AzurePortalLanguageUnderstanding-CreateResource.JPG │ ├── AzurePortalLanguageUnderstanding-GetKey-5.JPG │ ├── AzurePortalLanguageUnderstandingSearch.JPG │ ├── ComputerVisionSetup-1.JPG │ ├── ComputerVisionSetup-2.JPG │ ├── ComputerVisionSetup-3.JPG │ ├── ComputerVisionSetup-4.JPG │ ├── LUIS-AppKey-12.JPG │ ├── LUIS-Import-8.JPG │ ├── LUIS-ImportApp-7.JPG │ ├── LUIS-Publish-14.JPG │ ├── LUIS-Resource-Assign-12.JPG │ ├── LUIS-Resource-Assign-Prediction-13.JPG │ ├── LUIS-Singup-6.JPG │ ├── LUIS-SuccessfulImport-11-type-move_arm.JPG │ ├── LUIS-SuccessfulImport-9.JPG │ ├── LUIS-TrainingResults-11.JPG │ ├── SawyerExecuteCommand.JPG │ ├── SawyerRun-BotFramework.JPG │ ├── SawyerRunCube.JPG │ ├── SawyerRunOpenBot.JPG │ ├── SawyerRunPython.JPG │ ├── SawyerRunShowStatistics.JPG │ ├── sawyer-gazebo-1.png │ ├── sawyer-gazebo-2.png │ └── sawyer-rviz.png └── robotics_technologies.md ├── intera.sh ├── resources ├── Images │ ├── cube-blue.png │ ├── cube-green.png │ └── cube-red.png ├── azure_cv_resources.json ├── luis_azure_template.json └── robotics-demo-luis-app.json ├── setup ├── robot-challenge-setup.sh └── sawyer_urdf.xml └── src ├── chatbot ├── SawyerBot.bot ├── bot-move-grippers.py ├── bot-stats-node.py ├── bot-wave-arm-node.py ├── talk-to-my-robot-completed.py └── talk-to-my-robot.py ├── sawyer_ik_5d ├── CMakeLists.txt ├── include │ └── sawyer_ik_5d │ │ └── sawyer_ik_5d.h ├── package.xml └── src │ └── sawyer_ik_5d_node.cpp └── sorting_demo ├── CMakeLists.txt ├── config └── rviz.rviz ├── launch ├── robot_sorting_demo_control.launch ├── sawyer_moveit.launch └── sorting_demo.launch ├── models ├── block │ └── block.urdf.xacro ├── table │ ├── materials │ │ └── textures │ │ │ ├── Maple.jpg │ │ │ ├── Maple_dark.jpg │ │ │ └── Wood_Floor_Dark.jpg │ ├── meshes │ │ └── cafe_table.dae │ ├── model.config │ └── model.sdf └── tray │ └── tray.urdf.xacro ├── package.xml ├── scripts └── test_moveit.py ├── setup.py ├── share ├── head_mask.png ├── test_head_simulator │ ├── 1_color.png │ ├── 2_colors.png │ ├── base_1.png │ ├── base_2.png │ ├── grouping.png │ ├── joined_blobs.png │ └── no_cubes.png ├── test_right_hand_simulator │ ├── angles1.png │ ├── angles2.png │ ├── border.png │ ├── debug0.png │ ├── debug1.png │ ├── debug2.png │ ├── debug3.png │ ├── debug4.png │ ├── debug5.png │ ├── debug6.png │ ├── debug7.png │ ├── debug8.png │ ├── joined_cubes_1.png │ ├── joined_cubes_2.png │ └── many.png ├── testobstacle.txt └── web_view.html └── src ├── __init__.py └── sorting_demo ├── __init__.py ├── concepts ├── __init__.py ├── block.py ├── gripper_state.py ├── table.py └── tray.py ├── cv_detection_camera_helper.py ├── cv_detection_head.py ├── cv_detection_right_hand.py ├── demo_constants.py ├── environment_estimation.py ├── gazebo_models.py ├── goto_angles_in_contact.py ├── gripper_action_server.py ├── program.py ├── real_gripper.py ├── reinforcement_learning └── task_space_simulation.py ├── robot_control.py ├── robot_funcs.py ├── robot_tasks_facade.py ├── task_planner.py ├── tasks └── __init__.py ├── tasync.py ├── trajectory_action_server.py ├── trajectory_planner.py └── utils ├── __init__.py └── mathutils.py /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | ## 4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 5 | 6 | # User-specific files 7 | *.suo 8 | *.user 9 | *.userosscache 10 | *.sln.docstates 11 | *.vscode 12 | 13 | # User-specific files (MonoDevelop/Xamarin Studio) 14 | *.userprefs 15 | 16 | # Build results 17 | [Dd]ebug/ 18 | [Dd]ebugPublic/ 19 | [Rr]elease/ 20 | [Rr]eleases/ 21 | x64/ 22 | x86/ 23 | bld/ 24 | [Bb]in/ 25 | [Oo]bj/ 26 | [Ll]og/ 27 | 28 | # Visual Studio 2015/2017 cache/options directory 29 | .vs/ 30 | # Uncomment if you have tasks that create the project's static files in wwwroot 31 | #wwwroot/ 32 | 33 | # Visual Studio 2017 auto generated files 34 | Generated\ Files/ 35 | 36 | # MSTest test Results 37 | [Tt]est[Rr]esult*/ 38 | [Bb]uild[Ll]og.* 39 | 40 | # NUNIT 41 | *.VisualState.xml 42 | TestResult.xml 43 | 44 | # Build Results of an ATL Project 45 | [Dd]ebugPS/ 46 | [Rr]eleasePS/ 47 | dlldata.c 48 | 49 | # Benchmark Results 50 | BenchmarkDotNet.Artifacts/ 51 | 52 | # .NET Core 53 | project.lock.json 54 | project.fragment.lock.json 55 | artifacts/ 56 | **/Properties/launchSettings.json 57 | 58 | # StyleCop 59 | StyleCopReport.xml 60 | 61 | # Files built by Visual Studio 62 | *_i.c 63 | *_p.c 64 | *_i.h 65 | *.ilk 66 | *.meta 67 | *.obj 68 | *.iobj 69 | *.pch 70 | *.pdb 71 | *.ipdb 72 | *.pgc 73 | *.pgd 74 | *.rsp 75 | *.sbr 76 | *.tlb 77 | *.tli 78 | *.tlh 79 | *.tmp 80 | *.tmp_proj 81 | *.log 82 | *.vspscc 83 | *.vssscc 84 | .builds 85 | *.pidb 86 | *.svclog 87 | *.scc 88 | 89 | # Chutzpah Test files 90 | _Chutzpah* 91 | 92 | # Visual C++ cache files 93 | ipch/ 94 | *.aps 95 | *.ncb 96 | *.opendb 97 | *.opensdf 98 | *.sdf 99 | *.cachefile 100 | *.VC.db 101 | *.VC.VC.opendb 102 | 103 | # Visual Studio profiler 104 | *.psess 105 | *.vsp 106 | *.vspx 107 | *.sap 108 | 109 | # Visual Studio Trace Files 110 | *.e2e 111 | 112 | # TFS 2012 Local Workspace 113 | $tf/ 114 | 115 | # Guidance Automation Toolkit 116 | *.gpState 117 | 118 | # ReSharper is a .NET coding add-in 119 | _ReSharper*/ 120 | *.[Rr]e[Ss]harper 121 | *.DotSettings.user 122 | 123 | # JustCode is a .NET coding add-in 124 | .JustCode 125 | 126 | # TeamCity is a build add-in 127 | _TeamCity* 128 | 129 | # DotCover is a Code Coverage Tool 130 | *.dotCover 131 | 132 | # AxoCover is a Code Coverage Tool 133 | .axoCover/* 134 | !.axoCover/settings.json 135 | 136 | # Visual Studio code coverage results 137 | *.coverage 138 | *.coveragexml 139 | 140 | # NCrunch 141 | _NCrunch_* 142 | .*crunch*.local.xml 143 | nCrunchTemp_* 144 | 145 | # MightyMoose 146 | *.mm.* 147 | AutoTest.Net/ 148 | 149 | # Web workbench (sass) 150 | .sass-cache/ 151 | 152 | # Installshield output folder 153 | [Ee]xpress/ 154 | 155 | # DocProject is a documentation generator add-in 156 | DocProject/buildhelp/ 157 | DocProject/Help/*.HxT 158 | DocProject/Help/*.HxC 159 | DocProject/Help/*.hhc 160 | DocProject/Help/*.hhk 161 | DocProject/Help/*.hhp 162 | DocProject/Help/Html2 163 | DocProject/Help/html 164 | 165 | # Click-Once directory 166 | publish/ 167 | 168 | # Publish Web Output 169 | *.[Pp]ublish.xml 170 | *.azurePubxml 171 | # Note: Comment the next line if you want to checkin your web deploy settings, 172 | # but database connection strings (with potential passwords) will be unencrypted 173 | *.pubxml 174 | *.publishproj 175 | 176 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 177 | # checkin your Azure Web App publish settings, but sensitive information contained 178 | # in these scripts will be unencrypted 179 | PublishScripts/ 180 | 181 | # NuGet Packages 182 | *.nupkg 183 | # The packages folder can be ignored because of Package Restore 184 | **/[Pp]ackages/* 185 | # except build/, which is used as an MSBuild target. 186 | !**/[Pp]ackages/build/ 187 | # Uncomment if necessary however generally it will be regenerated when needed 188 | #!**/[Pp]ackages/repositories.config 189 | # NuGet v3's project.json files produces more ignorable files 190 | *.nuget.props 191 | *.nuget.targets 192 | 193 | # Microsoft Azure Build Output 194 | csx/ 195 | *.build.csdef 196 | 197 | # Microsoft Azure Emulator 198 | ecf/ 199 | rcf/ 200 | 201 | # Windows Store app package directories and files 202 | AppPackages/ 203 | BundleArtifacts/ 204 | Package.StoreAssociation.xml 205 | _pkginfo.txt 206 | *.appx 207 | 208 | # Visual Studio cache files 209 | # files ending in .cache can be ignored 210 | *.[Cc]ache 211 | # but keep track of directories ending in .cache 212 | !*.[Cc]ache/ 213 | 214 | # Others 215 | ClientBin/ 216 | ~$* 217 | *~ 218 | *.dbmdl 219 | *.dbproj.schemaview 220 | *.jfm 221 | *.pfx 222 | *.publishsettings 223 | orleans.codegen.cs 224 | 225 | # Including strong name files can present a security risk 226 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 227 | #*.snk 228 | 229 | # Since there are multiple workflows, uncomment next line to ignore bower_components 230 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 231 | #bower_components/ 232 | 233 | # RIA/Silverlight projects 234 | Generated_Code/ 235 | 236 | # Backup & report files from converting an old project file 237 | # to a newer Visual Studio version. Backup files are not needed, 238 | # because we have git ;-) 239 | _UpgradeReport_Files/ 240 | Backup*/ 241 | UpgradeLog*.XML 242 | UpgradeLog*.htm 243 | ServiceFabricBackup/ 244 | *.rptproj.bak 245 | 246 | # SQL Server files 247 | *.mdf 248 | *.ldf 249 | *.ndf 250 | 251 | # Business Intelligence projects 252 | *.rdl.data 253 | *.bim.layout 254 | *.bim_*.settings 255 | *.rptproj.rsuser 256 | 257 | # Microsoft Fakes 258 | FakesAssemblies/ 259 | 260 | # GhostDoc plugin setting file 261 | *.GhostDoc.xml 262 | 263 | # Node.js Tools for Visual Studio 264 | .ntvs_analysis.dat 265 | node_modules/ 266 | 267 | # Visual Studio 6 build log 268 | *.plg 269 | 270 | # Visual Studio 6 workspace options file 271 | *.opt 272 | 273 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 274 | *.vbw 275 | 276 | # Visual Studio LightSwitch build output 277 | **/*.HTMLClient/GeneratedArtifacts 278 | **/*.DesktopClient/GeneratedArtifacts 279 | **/*.DesktopClient/ModelManifest.xml 280 | **/*.Server/GeneratedArtifacts 281 | **/*.Server/ModelManifest.xml 282 | _Pvt_Extensions 283 | 284 | # Paket dependency manager 285 | .paket/paket.exe 286 | paket-files/ 287 | 288 | # FAKE - F# Make 289 | .fake/ 290 | 291 | # JetBrains Rider 292 | .idea/ 293 | *.sln.iml 294 | 295 | # CodeRush 296 | .cr/ 297 | 298 | # Python Tools for Visual Studio (PTVS) 299 | __pycache__/ 300 | *.pyc 301 | 302 | # Cake - Uncomment if you are using it 303 | # tools/** 304 | # !tools/packages.config 305 | 306 | # Tabs Studio 307 | *.tss 308 | 309 | # Telerik's JustMock configuration file 310 | *.jmconfig 311 | 312 | # BizTalk build output 313 | *.btp.cs 314 | *.btm.cs 315 | *.odx.cs 316 | *.xsd.cs 317 | 318 | # OpenCover UI analysis results 319 | OpenCover/ 320 | 321 | # Azure Stream Analytics local run output 322 | ASALocalRun/ 323 | 324 | # MSBuild Binary and Structured Log 325 | *.binlog 326 | 327 | # NVidia Nsight GPU debugger configuration file 328 | *.nvuser 329 | 330 | # MFractors (Xamarin productivity tool) working folder 331 | .mfractor/ 332 | .vscode/c_cpp_properties.json 333 | .vscode/c_cpp_properties.json 334 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "submodules/intera_sdk"] 2 | path = src/submodules/intera_sdk 3 | url = https://github.com/RethinkRobotics/intera_sdk 4 | [submodule "submodules/sawyer_simulator"] 5 | path = src/submodules/sawyer_simulator 6 | url = https://github.com/RethinkRobotics/sawyer_simulator.git 7 | [submodule "submodules/intera_common"] 8 | path = src/submodules/intera_common 9 | url = https://github.com/RethinkRobotics/intera_common 10 | [submodule "submodules/sawyer_moveit"] 11 | path = src/submodules/sawyer_moveit 12 | url = https://github.com/pabloinigoblasco/sawyer_moveit 13 | [submodule "submodules/sawyer_robot"] 14 | path = src/submodules/sawyer_robot 15 | url = https://github.com/pabloinigoblasco/sawyer_robot.git 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | jobs: 5 | - job: Sawyer 6 | timeoutInMinutes: 120 7 | pool: 8 | name: 'nvidia-docker-pool' 9 | 10 | steps: 11 | - task: Bash@3 12 | inputs: 13 | workingDirectory: 'docker/ubuntu-xenial-kinetic' 14 | targetType: 'inline' 15 | script: './build.sh' 16 | - task: Bash@3 17 | timeoutInMinutes: 10 18 | inputs: 19 | workingDirectory: 'docker/ubuntu-xenial-kinetic' 20 | targetType: 'inline' 21 | script: './run_docker_container.sh' 22 | env: 23 | DISPLAY: :0.0 24 | - task: Bash@3 25 | inputs: 26 | targetType: 'inline' 27 | script: docker rmi ubuntu1604_sawyer 28 | -------------------------------------------------------------------------------- /docker/ubuntu-xenial-kinetic/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | MAINTAINER Pablo Inigo 4 | 5 | ENV DEBIAN_FRONTEND noninteractive 6 | 7 | RUN apt-get update && apt-mark hold iptables && \ 8 | apt-get -y dist-upgrade && apt-get autoremove -y && apt-get clean 9 | RUN apt-get install -y dbus-x11 procps psmisc apt-utils x11-utils x11-xserver-utils kmod xz-utils && \ 10 | apt-get -y install dialog git vim software-properties-common debconf-utils wget curl apt-transport-https bzip2 zip iputils-ping telnet less 11 | 12 | # OpenGL / MESA 13 | RUN apt-get install -y mesa-utils mesa-utils-extra libxv1 14 | 15 | # Language/locale settings 16 | # replace en_US by your desired locale setting, 17 | # for example de_DE for german. 18 | ENV LANG en_US.UTF-8 19 | RUN echo $LANG UTF-8 > /etc/locale.gen 20 | RUN apt-get install -y locales && update-locale --reset LANG=$LANG 21 | 22 | # some utils to have proper menus, mime file types etc. 23 | RUN apt-get install -y --no-install-recommends xdg-utils xdg-user-dirs \ 24 | menu menu-xdg mime-support desktop-file-utils 25 | 26 | # Install fonts 27 | # We just need to drop these into the shared fonts folder, the font cache will be rebuilt when we add the desktop environments 28 | #RUN wget https://dl.1001fonts.com/mclaren.zip && \ 29 | # unzip -d /usr/share/fonts/truetype/mclaren mclaren.zip && \ 30 | # rm mclaren.zip && \ 31 | # mkdir -p /usr/share/fonts/truetype/monofur && \ 32 | # cd /usr/share/fonts/truetype/monofur && \ 33 | # wget https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Regular/complete/monofur%20Nerd%20Font%20Complete%20Mono.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur Nerd Font Complete Mono.ttf' \ 34 | # https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Regular/complete/monofur%20Nerd%20Font%20Complete.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur Nerd Font Complete.ttf' \ 35 | # https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Italic/complete/monofur%20italic%20Nerd%20Font%20Complete%20Mono.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur italic Nerd Font Complete Mono.ttf' \ 36 | # https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Italic/complete/monofur%20italic%20Nerd%20Font%20Complete.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur italic Nerd Font Complete.ttf' \ 37 | # https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Bold/complete/monofur%20bold%20Nerd%20Font%20Complete%20Mono.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur bold Nerd Font Complete Mono.ttf.ttf' \ 38 | # https://github.com/ryanoasis/nerd-fonts/blob/master/patched-fonts/Monofur/Bold/complete/monofur%20bold%20Nerd%20Font%20Complete.ttf?raw=true -O '/usr/share/fonts/truetype/monofur/monofur bold Nerd Font Complete.ttf' 39 | 40 | RUN apt-get install sudo 41 | RUN sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' 42 | RUN sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 43 | RUN curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | sudo apt-key add - 44 | RUN sudo apt-get update 45 | RUN ln -fs /usr/share/zoneinfo/US/Pacific-New /etc/localtime && apt-get install tzdata 46 | 47 | 48 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y git python sudo gconf-service gconf2 libxss1 libnss3 libnotify4 software-properties-common apt-utils 49 | 50 | RUN apt-get install -y chromium-browser sudo mesa-utils 51 | 52 | # create user 53 | RUN bash -c "useradd -ms /bin/bash nvagent" 54 | RUN usermod -aG sudo nvagent 55 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 56 | 57 | USER nvagent 58 | WORKDIR /home/nvagent 59 | RUN echo "source /opt/ros/kinetic/setup.bash" >> .bashrc 60 | 61 | # downloading repo and configuring environment 62 | RUN bash -c 'cd $HOME; git clone https://github.com/pabloinigoblasco/AI-Robot-Challenge-Lab.git --recurse-submodules' 63 | 64 | RUN bash -c 'export DEBIAN_FRONTEND=noninteractive; $HOME/AI-Robot-Challenge-Lab/setup/robot-challenge-setup.sh' 65 | WORKDIR /home/nvagent/AI-Robot-Challenge-Lab/ 66 | 67 | RUN git pull 68 | RUN git submodule init; git submodule update --recursive 69 | RUN bash -c 'catkin build' 70 | 71 | RUN sudo apt-get install -y x11-apps python-pip 72 | RUN python -m pip install requests --user 73 | 74 | COPY runtime_test.sh runtime_test.sh 75 | COPY runtime_test.py runtime_test.py 76 | RUN sudo echo "export SVGA_VGPU10=0" >> ~/.bashrc 77 | RUN sudo wget http://packages.osrfoundation.org/gazebo.key -O - | sudo apt-key add - 78 | RUN sudo sh -c 'echo "deb http://packages.osrfoundation.org/gazebo/ubuntu-stable `lsb_release -cs` main" > /etc/apt/sources.list.d/gazebo-stable.list' 79 | RUN sudo apt-get update 80 | RUN sudo apt-get install -y gazebo7 81 | 82 | 83 | #ENTRYPOINT /bin/bash 84 | ENTRYPOINT /home/nvagent/AI-Robot-Challenge-Lab/runtime_test.sh sim 85 | 86 | -------------------------------------------------------------------------------- /docker/ubuntu-xenial-kinetic/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export DEBIAN_FRONTEND="noninteractive" 3 | sudo docker build -t ubuntu1604_sawyer . --no-cache 4 | -------------------------------------------------------------------------------- /docker/ubuntu-xenial-kinetic/run_docker_container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo startxfce4& 3 | sleep 2; 4 | export DISPLAY=:0 5 | sudo docker run --rm --gpus device=0 -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --env QT_X11_NO_MITSHM=1 --privileged -t ubuntu1604_sawyer 6 | RES=$? 7 | 8 | echo "RUNTIME TEST RESULT: $RES" 9 | 10 | -------------------------------------------------------------------------------- /docker/ubuntu-xenial-kinetic/runtime_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import os 5 | import rospy 6 | from threading import Thread 7 | import time 8 | import signal 9 | import requests 10 | 11 | #--------------------------------------------- 12 | def exit_properly_runtime_test(): 13 | f = open("result.txt", "w") 14 | f.write("0") 15 | f.close() 16 | rospy.signal_shutdown("SUCCESS TEST") 17 | os.kill(os.getpid(), signal.SIGUSR1) 18 | sys.exit(0) 19 | 20 | class ExitCommand(Exception): 21 | pass 22 | 23 | def signal_handler(signal, frame): 24 | sys.exit(1) 25 | 26 | #timeout countdown 27 | def start_countdown(seconds): 28 | def timeout_countdown(seconds): 29 | time.sleep(seconds) 30 | rospy.logfatal("COUNTDOWN ERROR RUNTIME TEST") 31 | os.kill(os.getpid(), signal.SIGUSR1) 32 | sys.exit(1) 33 | 34 | t= Thread(target = lambda: timeout_countdown(seconds)) 35 | t.start() 36 | #--------------------------------------------- 37 | # launch countdown 38 | 39 | signal.signal(signal.SIGUSR1, signal_handler) 40 | 41 | import gazebo_msgs.msg 42 | import intera_core_msgs.msg 43 | rospy.init_node("test_node") 44 | start_countdown(320) 45 | rospy.logwarn("[Test Node] waiting from gazebo message, first time can be slow (gazebo has to download models)...") 46 | rospy.wait_for_message("/gazebo/link_states", gazebo_msgs.msg.LinkStates,timeout=200) 47 | 48 | # wait until box is picked up 49 | def on_boxes_state_message(msg): 50 | #rospy.loginfo("received from test node: " + str(msg)) 51 | 52 | str_msg = "" 53 | exit_flag = False 54 | for i, pose in enumerate(msg.pose): 55 | name = msg.name[i] 56 | if "block" in name: 57 | str_msg += str(name) + ".y: "+ str(pose.position.y) +"\n" 58 | if pose.position.y >0.75: 59 | exit_flag = True 60 | rospy.logwarn("success, cube properly sent to tray. Ending!") 61 | break 62 | 63 | rospy.loginfo_throttle(1.0, "[Test Node] "+ str_msg) 64 | 65 | if exit_flag: 66 | exit_properly_runtime_test() 67 | 68 | sub = rospy.Subscriber("/gazebo/link_states", gazebo_msgs.msg.LinkStates, on_boxes_state_message) 69 | 70 | rospy.logwarn("[Test Node] waiting sorting demo...") 71 | rospy.wait_for_message("/robot/limb/right/joint_command", intera_core_msgs.msg.JointCommand, timeout=60) 72 | 73 | rospy.logwarn("[Test Node] Calling start sorting...") 74 | rospy.sleep(10) 75 | res = requests.get('http://localhost:5000/start') 76 | rospy.logwarn(str(res)) 77 | 78 | rospy.spin() 79 | 80 | 81 | -------------------------------------------------------------------------------- /docker/ubuntu-xenial-kinetic/runtime_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2013-2016, Rethink Robotics 3 | # All rights reserved. 4 | 5 | # This file is to be used in the *root* of your Catkin workspace. 6 | 7 | # This is a convenient script which will set up your ROS environment and 8 | # should be executed with every new instance of a shell in which you plan on 9 | # working with Intera. 10 | 11 | # Clear any previously set your_ip/your_hostname 12 | unset your_ip 13 | unset your_hostname 14 | #-----------------------------------------------------------------------------# 15 | # USER CONFIGURABLE ROS ENVIRONMENT VARIABLES # 16 | #-----------------------------------------------------------------------------# 17 | # Note: If ROS_MASTER_URI, ROS_IP, or ROS_HOSTNAME environment variables were 18 | # previously set (typically in your .bashrc or .bash_profile), those settings 19 | # will be overwritten by any variables set here. 20 | 21 | # Specify Robot's hostname 22 | robot_hostname="paule.local" 23 | 24 | # Set *Either* your computers ip address or hostname. Please note if using 25 | # your_hostname that this must be resolvable to the Robot. 26 | # your_ip="127.0.XXX.XXX" 27 | your_hostname=`hostname` 28 | 29 | # Specify ROS distribution (e.g. indigo, hydro, etc.) 30 | ros_version="kinetic" 31 | #-----------------------------------------------------------------------------# 32 | 33 | tf=$(mktemp) 34 | trap "rm -f -- '${tf}'" EXIT 35 | 36 | # If this file specifies an ip address or hostname - unset any previously set 37 | # ROS_IP and/or ROS_HOSTNAME. 38 | # If this file does not specify an ip address or hostname - use the 39 | # previously specified ROS_IP/ROS_HOSTNAME environment variables. 40 | if [ -n "${your_ip}" ] || [ -n "${your_hostname}" ]; then 41 | unset ROS_IP && unset ROS_HOSTNAME 42 | else 43 | your_ip="${ROS_IP}" && your_hostname="${ROS_HOSTNAME}" 44 | fi 45 | 46 | # If argument provided, set robot_hostname to argument 47 | # If argument is sim or local, set robot_hostname to localhost 48 | if [ -n "${1}" ]; then 49 | if [[ "${1}" == "sim" ]] || [[ "${1}" == "local" ]]; then 50 | robot_hostname="localhost" 51 | if [[ -z ${your_ip} || "${your_ip}" == "192.168.XXX.XXX" ]] && \ 52 | [[ -z ${your_hostname} || "${your_hostname}" == "my_computer.local" ]]; then 53 | your_hostname="localhost" 54 | your_ip="" 55 | fi 56 | else 57 | robot_hostname="${1}" 58 | fi 59 | fi 60 | 61 | topdir=$(basename $(readlink -f $(dirname ${BASH_SOURCE[0]}))) 62 | 63 | cat <<-EOF > ${tf} 64 | [ -s "\${HOME}"/.bashrc ] && source "\${HOME}"/.bashrc 65 | [ -s "\${HOME}"/.bash_profile ] && source "\${HOME}"/.bash_profile 66 | 67 | # verify this script is moved out of intera_sdk folder 68 | if [[ -e "${topdir}/intera_sdk/package.xml" ]]; then 69 | echo -ne "EXITING - This script must be moved from the intera_sdk folder \ 70 | to the root of your catkin workspace.\n" 71 | exit 1 72 | fi 73 | 74 | # verify ros_version lowercase 75 | ros_version="$(tr [A-Z] [a-z] <<< "${ros_version}")" 76 | 77 | # check for ros installation 78 | if [ ! -d "/opt/ros" ] || [ ! "$(ls -A /opt/ros)" ]; then 79 | echo "EXITING - No ROS installation found in /opt/ros." 80 | echo "Is ROS installed?" 81 | exit 1 82 | fi 83 | 84 | # if set, verify user has modified the robot_hostname 85 | if [ -n ${robot_hostname} ] && \ 86 | [[ "${robot_hostname}" == "robot_hostname.local" ]]; then 87 | echo -ne "EXITING - Please edit this file, modifying the \ 88 | 'robot_hostname' variable to reflect your Robot's current hostname.\n" 89 | exit 1 90 | fi 91 | 92 | # if set, verify user has modified their ip address (your_ip) 93 | if [ -n ${your_ip} ] && [[ "${your_ip}" == "192.168.XXX.XXX" ]]; then 94 | echo -ne "EXITING - Please edit this file, modifying the 'your_ip' \ 95 | variable to reflect your current IP address.\n" 96 | exit 1 97 | fi 98 | 99 | # if set, verify user has modified their computer hostname (your_hostname) 100 | if [ -n ${your_hostname} ] && \ 101 | [[ "${your_hostname}" == "my_computer.local" ]]; then 102 | echo -ne "EXITING - Please edit this file, modifying the \ 103 | 'your_hostname' variable to reflect your current PC hostname.\n" 104 | exit 1 105 | fi 106 | 107 | # verify user does not have both their ip *and* hostname set 108 | if [ -n "${your_ip}" ] && [ -n "${your_hostname}" ]; then 109 | echo -ne "EXITING - Please edit this file, modifying to specify \ 110 | *EITHER* your_ip or your_hostname.\n" 111 | exit 1 112 | fi 113 | 114 | # verify that one of your_ip, your_hostname, ROS_IP, or ROS_HOSTNAME is set 115 | if [ -z "${your_ip}" ] && [ -z "${your_hostname}" ]; then 116 | echo -ne "EXITING - Please edit this file, modifying to specify \ 117 | your_ip or your_hostname.\n" 118 | exit 1 119 | fi 120 | 121 | # verify specified ros version is installed 122 | ros_setup="/opt/ros/\${ros_version}" 123 | if [ ! -d "\${ros_setup}" ]; then 124 | echo -ne "EXITING - Failed to find ROS \${ros_version} installation \ 125 | in \${ros_setup}.\n" 126 | exit 1 127 | fi 128 | 129 | # verify the ros setup.sh file exists 130 | if [ ! -s "\${ros_setup}"/setup.sh ]; then 131 | echo -ne "EXITING - Failed to find the ROS environment script: \ 132 | "\${ros_setup}"/setup.sh.\n" 133 | exit 1 134 | fi 135 | 136 | # verify the user is running this script in the root of the catkin 137 | # workspace and that the workspace has been compiled. 138 | if [ ! -s "devel/setup.bash" ]; then 139 | echo -ne "EXITING - \n1) Please verify that this script is being run \ 140 | in the root of your catkin workspace.\n2) Please verify that your workspace \ 141 | has been built (source /opt/ros/\${ros_version}/setup.sh; catkin_make).\n\ 142 | 3) Run this script again upon completion of your workspace build.\n" 143 | exit 1 144 | fi 145 | 146 | [ -n "${your_ip}" ] && export ROS_IP="${your_ip}" 147 | [ -n "${your_hostname}" ] && export ROS_HOSTNAME="${your_hostname}" 148 | [ -n "${robot_hostname}" ] && \ 149 | export ROS_MASTER_URI="http://${robot_hostname}:11311" 150 | 151 | # source the catkin setup bash script 152 | source devel/setup.bash 153 | 154 | # setup the bash prompt 155 | export __ROS_PROMPT=\${__ROS_PROMPT:-0} 156 | [ \${__ROS_PROMPT} -eq 0 -a -n "\${PROMPT_COMMAND}" ] && \ 157 | export __ORIG_PROMPT_COMMAND=\${PROMPT_COMMAND} 158 | 159 | __ros_prompt () { 160 | if [ -n "\${__ORIG_PROMPT_COMMAND}" ]; then 161 | eval \${__ORIG_PROMPT_COMMAND} 162 | fi 163 | if ! echo \${PS1} | grep '\[intera' &>/dev/null; then 164 | export PS1="\[\033[00;33m\][intera - \ 165 | \${ROS_MASTER_URI}]\[\033[00m\] \${PS1}" 166 | fi 167 | } 168 | 169 | if [ "\${TERM}" != "dumb" ]; then 170 | export PROMPT_COMMAND=__ros_prompt 171 | __ROS_PROMPT=1 172 | elif ! echo \${PS1} | grep '\[intera' &>/dev/null; then 173 | export PS1="[intera - \${ROS_MASTER_URI}] \${PS1}" 174 | fi 175 | 176 | export SVGA_VGPU10=0 177 | source /opt/ros/kinetic/setup.bash 178 | source devel/setup.bash 179 | roslaunch sorting_demo sorting_demo.launch & 180 | python ./runtime_test.py 181 | 182 | exit 183 | 184 | EOF 185 | 186 | echo "1" > result.txt 187 | ${SHELL} --rcfile ${tf} 188 | exit `cat result.txt` 189 | 190 | 191 | -------------------------------------------------------------------------------- /docs/azure_resources.md: -------------------------------------------------------------------------------- 1 | ## Setup your Azure subscription 2 | 3 | This lab **requires** an Azure subscription in order to create the Language Understanding LUIS model and the Custom Vision model. 4 | If you need a new Azure subscription, then there are a couple of options to get a free subscription: 5 | 6 | 1. The easiest way to sign up for an Azure subscription is with VS Dev Essentials and a personal Microsoft account (like @outlook.com). This does require a credit card; however, there is a spending limit in the subscription so it won't be charged unless you explicitly remove the limit. 7 | 8 | * Open Microsoft Edge and go to the [Microsoft VS Dev Essentials site](https://visualstudio.microsoft.com/dev-essentials/). 9 | * Click **Join or access now**. 10 | * Sign in using your personal Microsoft account. 11 | * If prompted, click Confirm to agree to the terms and conditions. 12 | * Find the Azure tile and click the **Activate** link. 13 | 14 | 1. Alternatively, if the above isn't suitable, you can sign up for a free Azure trial. 15 | 16 | * Open Microsoft Edge and go to the [free Azure trial page](https://azure.microsoft.com/en-us/free/). 17 | * Click **Start free**. 18 | * Sign in using your personal Microsoft account. 19 | 20 | 1. Complete the Azure sign up steps and wait for the subscription to be provisioned. This usually only takes a couple of minutes. 21 | 22 | ## Create the LUIS resource in Azure: 23 | 24 | 1. Go to the [Azure Portal](https://portal.azure.com) and log in with your credentials. 25 | 1. Click **Create Resource [+]** from the left menu and search for **Language Understanding**. 26 | 1. **Select** the first result and then click the **Create** button. 27 |

28 | 29 |

30 | 31 | 4. Provide the required information: 32 | * App name: `robotics-luis`. 33 | * Location: `West US`. 34 | * Pricing tier: `F0 (5 Calls per second, 10K Calls per month)`. 35 | * Create a new resource group with the name: `robotics-lab`. 36 | * **Confirm** that you read and understood the terms by **checking** the box. 37 | 38 |

39 | 40 |

41 | 42 | 5. Click **Create**. This step might take a few seconds. 43 | 44 |

45 | 46 |

47 | 1. Once the deployment is complete, you will see a **Deployment succeeded** notification. 48 | 1. Go to **All Resources** in the left pane and **search** for the new resource (`robotics-luis`). 49 | 1. **Click** on the resource. 50 | 51 | Every published LUIS application needs an assigned runtime key. You can assign your runtime key to your application in the LUIS portal. We will copy ``Key1`` into **Notepad**. We'll need this key later on. 52 | 53 |

54 | 55 |

56 | 57 | ## Create the Custom Vision resource in Azure: 58 | 59 | 1. Return to the [Azure Portal](https://portal.azure.com). 60 | 1. Click **Create Resource [+]** from the left menu and search for **Computer Vision**. 61 | 1. **Select** the first result and then click the **Create** button. 62 |

63 | 64 |

65 | 66 | 4. Provide the required information: 67 | * Name: `robotics-computer-vision`. 68 | * Select your preferred subscription. 69 | * Select the location: `West US`. 70 | * Select the the Pricing tier: `F0 (20 Calls per minute, 5k Calls per month)`. 71 | * Select the previously created resource group: `robotics-lab`. 72 | 73 |

74 | 75 |

76 | 77 | 5. Click **Create** to create the resource and deploy it. This step might take a few moments. 78 | 1. Once the deployment is complete, you will see a **Deployment succeeded** notification. 79 | 1. Go to **All Resources** in the left pane and **search** for the new resource (`robotics-computer-vision`). 80 | 1. **Click** on the resource. 81 | 1. Go to the **Keys** page. 82 | 1. Copy the **Key 1** value into **Notepad**. 83 | > NOTE: We'll need this key later on. 84 | 85 |

86 | 87 |

88 | 89 | -------------------------------------------------------------------------------- /docs/images/AzurePortalLanguageUnderstanding-Create-Succed-4.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/AzurePortalLanguageUnderstanding-Create-Succed-4.JPG -------------------------------------------------------------------------------- /docs/images/AzurePortalLanguageUnderstanding-Create.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/AzurePortalLanguageUnderstanding-Create.JPG -------------------------------------------------------------------------------- /docs/images/AzurePortalLanguageUnderstanding-CreateResource.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/AzurePortalLanguageUnderstanding-CreateResource.JPG -------------------------------------------------------------------------------- /docs/images/AzurePortalLanguageUnderstanding-GetKey-5.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/AzurePortalLanguageUnderstanding-GetKey-5.JPG -------------------------------------------------------------------------------- /docs/images/AzurePortalLanguageUnderstandingSearch.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/AzurePortalLanguageUnderstandingSearch.JPG -------------------------------------------------------------------------------- /docs/images/ComputerVisionSetup-1.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/ComputerVisionSetup-1.JPG -------------------------------------------------------------------------------- /docs/images/ComputerVisionSetup-2.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/ComputerVisionSetup-2.JPG -------------------------------------------------------------------------------- /docs/images/ComputerVisionSetup-3.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/ComputerVisionSetup-3.JPG -------------------------------------------------------------------------------- /docs/images/ComputerVisionSetup-4.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/ComputerVisionSetup-4.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-AppKey-12.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-AppKey-12.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-Import-8.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-Import-8.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-ImportApp-7.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-ImportApp-7.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-Publish-14.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-Publish-14.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-Resource-Assign-12.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-Resource-Assign-12.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-Resource-Assign-Prediction-13.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-Resource-Assign-Prediction-13.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-Singup-6.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-Singup-6.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-SuccessfulImport-11-type-move_arm.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-SuccessfulImport-11-type-move_arm.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-SuccessfulImport-9.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-SuccessfulImport-9.JPG -------------------------------------------------------------------------------- /docs/images/LUIS-TrainingResults-11.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/LUIS-TrainingResults-11.JPG -------------------------------------------------------------------------------- /docs/images/SawyerExecuteCommand.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerExecuteCommand.JPG -------------------------------------------------------------------------------- /docs/images/SawyerRun-BotFramework.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerRun-BotFramework.JPG -------------------------------------------------------------------------------- /docs/images/SawyerRunCube.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerRunCube.JPG -------------------------------------------------------------------------------- /docs/images/SawyerRunOpenBot.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerRunOpenBot.JPG -------------------------------------------------------------------------------- /docs/images/SawyerRunPython.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerRunPython.JPG -------------------------------------------------------------------------------- /docs/images/SawyerRunShowStatistics.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/SawyerRunShowStatistics.JPG -------------------------------------------------------------------------------- /docs/images/sawyer-gazebo-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/sawyer-gazebo-1.png -------------------------------------------------------------------------------- /docs/images/sawyer-gazebo-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/sawyer-gazebo-2.png -------------------------------------------------------------------------------- /docs/images/sawyer-rviz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/docs/images/sawyer-rviz.png -------------------------------------------------------------------------------- /docs/robotics_technologies.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Introduction to Robotics 4 | 5 | ## Summary of technologies used 6 | 7 | ### **ROS** 8 | 9 | ROS (Robot Operating System) is robotics middleware licensed under an open source, BSD license. Although ROS is not an Operative System, it provides libraries, hardware abstraction, device drivers, visualizers, message-passing, package management, and other tools to help software developers create robot applications. It also supports collaboration through Repositories. 10 | 11 | Last Release: ROS Melodic Morenia. Supported on Ubuntu Artful and Bionic, along with Debian Stretch. 12 | 13 | ROS is a distributed framework of processes (aka Nodes) that enables executables to be individually added, which makes the framework very modular. These processes can be grouped into Packages and Stacks, which can be easily shared and distributed. 14 | 15 | Although there are some Robotics Kits available in the market, ROS is quickly becoming the new standard for both industrial and research robotics as it integrates both hardware and software for industrial applications. 16 | 17 | **Languages:** Python 2.7 18 | 19 | **System Requirements:** Ubuntu 16.04 20 | 21 | **Other Supported Technologies (reference only):** C++ and Lisp 22 | 23 | ROS supports Unix-like systems, specifically Ubuntu and Mac OS X, but the community has been adding support to Fedora, Gentoo, Arch Linux and other Linux platforms. 24 | 25 | 26 | ### **Gazebo** 27 | 28 | Gazebo is a 3D robot simulation tool that seamlessly integrates with ROS (i.e. the Gazebo server can run in a ROS environment). Gazebo allows you to build 3D scenarios on your computer with robots, using obstacles and other objects. This allows you to test robots in complex or dangerous scenarios without any harm to the real robot. Most of the time it is faster and more cost effective to simply run a simulator instead of running the whole scenario on a real robot. It uses a physics engines called ODE (Open Dynamics Engine) for realistic movements. 29 | 30 | Gazebo has two main components: the server, which acts like a back-end to compute all the logic for your testing scenarios, and a client, which acts as a graphical front-end. This is very useful as sometimes you might only want to execute the tests without the UI in order to speed up the execution process. 31 | 32 | Gazebo was used to simulate the atlas robot for the Virtual Robotics Challenge (the precursor to the DARPA robotics challenge), which required participants to build a software simulation of human rescue work. 33 | 34 | **Languages:** C++ API 35 | 36 | **System Requirements:** Linux 37 | 38 | 39 | ### **RViz** 40 | 41 | RViz is an open source 3D visualizer for the Robot Operating System (ROS) framework. It uses sensors data and custom visualization markers to develop robot capabilities in a 3D environment. 42 | 43 | Features: 44 | - Motion planning: the process of breaking down a desired movement task into discrete motions that satisfy movement constraints and possibly optimize some aspects of the movement. 45 | - Object detection: visualization and recognition of objects using the camera, for example recognizing cubes of different colors. 46 | - Calibration: geometric camera calibration is used to estimate parameters internal to the camera that affect the image processing. 47 | - Debugging. 48 | - RViz visualization widget. 49 | - 3D stereo rendering: when using 2 connected cameras to record 3D images, it displays a different view to each eye so that the scene appears to have depth. 50 | 51 | RViz provides a CLI tool that lets you execute Python or C++ scripts with controls. 52 | 53 | ### **Sawyer** 54 | 55 | Sawyer is an integrated collaborative robot (aka cobot) solution designed with embedded vision, smart swappable grippers, and high resolution force control. The robot's purpose is to automate specific repetitive industrial tasks. It comes with an arm that has a gripper which can be easily replaced by one of the available options from the ClickSmart Gripper Kit. 56 | 57 | Features: 58 | - Sawyer comes with highly sensitive torque sensors embedded into every joint. This allows you to control force where delicate part insertion is critical or use force if needed. It can maneuver into tight spaces and it has a long reach of up to 1260 mm (50 inches). 59 | - Comes with an embedded vision system used for the robot positioning. It also allows external vision systems like cameras. 60 | - Fast to deploy as many pieces are plug & play with integrated sensors. 61 | 62 | ### **MoveIt!** 63 | 64 | MoveIt is software for motion and path planning. Users can access actions and services using: C++, Python, or through a GUI. 65 | 66 | Features: 67 | - Manipulation 68 | - Motion planning: the process of breaking down a desired movement task into discrete motions that satisfy movement constraints and possibly optimize some aspects of the movement. 69 | - 3D perception: allows the robot to extract 3D information from the world and their own movements so that they can accomplish navigation and manipulation tasks. 70 | - Kinematics: geometry for moving the arms. 71 | - Control and navigation: underneath it uses OMPL (Open Motion Planning Library) and requires a controller to send messages to the hardware. MoveIt provides a Fake Controller to interact with the hardware using ROS messages but you can replace it with your own plugin which controls the robot if needed. 72 | 73 | The planning scene feature allows you to monitor the state, sensor and world geometry information. -------------------------------------------------------------------------------- /intera.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) 2013-2016, Rethink Robotics 3 | # All rights reserved. 4 | 5 | # This file is to be used in the *root* of your Catkin workspace. 6 | 7 | # This is a convenient script which will set up your ROS environment and 8 | # should be executed with every new instance of a shell in which you plan on 9 | # working with Intera. 10 | 11 | # Clear any previously set your_ip/your_hostname 12 | unset your_ip 13 | unset your_hostname 14 | #-----------------------------------------------------------------------------# 15 | # USER CONFIGURABLE ROS ENVIRONMENT VARIABLES # 16 | #-----------------------------------------------------------------------------# 17 | # Note: If ROS_MASTER_URI, ROS_IP, or ROS_HOSTNAME environment variables were 18 | # previously set (typically in your .bashrc or .bash_profile), those settings 19 | # will be overwritten by any variables set here. 20 | 21 | # Specify Robot's hostname 22 | robot_hostname="paule.local" 23 | 24 | # Set *Either* your computers ip address or hostname. Please note if using 25 | # your_hostname that this must be resolvable to the Robot. 26 | # your_ip="127.0.XXX.XXX" 27 | your_hostname=`hostname` 28 | 29 | 30 | # Specify ROS distribution (e.g. indigo, hydro, etc.) 31 | ros_version="kinetic" 32 | #-----------------------------------------------------------------------------# 33 | 34 | tf=$(mktemp) 35 | trap "rm -f -- '${tf}'" EXIT 36 | 37 | # If this file specifies an ip address or hostname - unset any previously set 38 | # ROS_IP and/or ROS_HOSTNAME. 39 | # If this file does not specify an ip address or hostname - use the 40 | # previously specified ROS_IP/ROS_HOSTNAME environment variables. 41 | if [ -n "${your_ip}" ] || [ -n "${your_hostname}" ]; then 42 | unset ROS_IP && unset ROS_HOSTNAME 43 | else 44 | your_ip="${ROS_IP}" && your_hostname="${ROS_HOSTNAME}" 45 | fi 46 | 47 | # If argument provided, set robot_hostname to argument 48 | # If argument is sim or local, set robot_hostname to localhost 49 | if [ -n "${1}" ]; then 50 | if [[ "${1}" == "sim" ]] || [[ "${1}" == "local" ]]; then 51 | robot_hostname="localhost" 52 | if [[ -z ${your_ip} || "${your_ip}" == "192.168.XXX.XXX" ]] && \ 53 | [[ -z ${your_hostname} || "${your_hostname}" == "my_computer.local" ]]; then 54 | your_hostname="localhost" 55 | your_ip="" 56 | fi 57 | else 58 | robot_hostname="${1}" 59 | fi 60 | fi 61 | 62 | topdir=$(basename $(readlink -f $(dirname ${BASH_SOURCE[0]}))) 63 | 64 | cat <<-EOF > ${tf} 65 | [ -s "\${HOME}"/.bashrc ] && source "\${HOME}"/.bashrc 66 | [ -s "\${HOME}"/.bash_profile ] && source "\${HOME}"/.bash_profile 67 | 68 | # verify this script is moved out of intera_sdk folder 69 | if [[ -e "${topdir}/intera_sdk/package.xml" ]]; then 70 | echo -ne "EXITING - This script must be moved from the intera_sdk folder \ 71 | to the root of your catkin workspace.\n" 72 | exit 1 73 | fi 74 | 75 | # verify ros_version lowercase 76 | ros_version="$(tr [A-Z] [a-z] <<< "${ros_version}")" 77 | 78 | # check for ros installation 79 | if [ ! -d "/opt/ros" ] || [ ! "$(ls -A /opt/ros)" ]; then 80 | echo "EXITING - No ROS installation found in /opt/ros." 81 | echo "Is ROS installed?" 82 | exit 1 83 | fi 84 | 85 | # if set, verify user has modified the robot_hostname 86 | if [ -n ${robot_hostname} ] && \ 87 | [[ "${robot_hostname}" == "robot_hostname.local" ]]; then 88 | echo -ne "EXITING - Please edit this file, modifying the \ 89 | 'robot_hostname' variable to reflect your Robot's current hostname.\n" 90 | exit 1 91 | fi 92 | 93 | # if set, verify user has modified their ip address (your_ip) 94 | if [ -n ${your_ip} ] && [[ "${your_ip}" == "192.168.XXX.XXX" ]]; then 95 | echo -ne "EXITING - Please edit this file, modifying the 'your_ip' \ 96 | variable to reflect your current IP address.\n" 97 | exit 1 98 | fi 99 | 100 | # if set, verify user has modified their computer hostname (your_hostname) 101 | if [ -n ${your_hostname} ] && \ 102 | [[ "${your_hostname}" == "my_computer.local" ]]; then 103 | echo -ne "EXITING - Please edit this file, modifying the \ 104 | 'your_hostname' variable to reflect your current PC hostname.\n" 105 | exit 1 106 | fi 107 | 108 | # verify user does not have both their ip *and* hostname set 109 | if [ -n "${your_ip}" ] && [ -n "${your_hostname}" ]; then 110 | echo -ne "EXITING - Please edit this file, modifying to specify \ 111 | *EITHER* your_ip or your_hostname.\n" 112 | exit 1 113 | fi 114 | 115 | # verify that one of your_ip, your_hostname, ROS_IP, or ROS_HOSTNAME is set 116 | if [ -z "${your_ip}" ] && [ -z "${your_hostname}" ]; then 117 | echo -ne "EXITING - Please edit this file, modifying to specify \ 118 | your_ip or your_hostname.\n" 119 | exit 1 120 | fi 121 | 122 | # verify specified ros version is installed 123 | ros_setup="/opt/ros/\${ros_version}" 124 | if [ ! -d "\${ros_setup}" ]; then 125 | echo -ne "EXITING - Failed to find ROS \${ros_version} installation \ 126 | in \${ros_setup}.\n" 127 | exit 1 128 | fi 129 | 130 | # verify the ros setup.sh file exists 131 | if [ ! -s "\${ros_setup}"/setup.sh ]; then 132 | echo -ne "EXITING - Failed to find the ROS environment script: \ 133 | "\${ros_setup}"/setup.sh.\n" 134 | exit 1 135 | fi 136 | 137 | # verify the user is running this script in the root of the catkin 138 | # workspace and that the workspace has been compiled. 139 | if [ ! -s "devel/setup.bash" ]; then 140 | echo -ne "EXITING - \n1) Please verify that this script is being run \ 141 | in the root of your catkin workspace.\n2) Please verify that your workspace \ 142 | has been built (source /opt/ros/\${ros_version}/setup.sh; catkin_make).\n\ 143 | 3) Run this script again upon completion of your workspace build.\n" 144 | exit 1 145 | fi 146 | 147 | [ -n "${your_ip}" ] && export ROS_IP="${your_ip}" 148 | [ -n "${your_hostname}" ] && export ROS_HOSTNAME="${your_hostname}" 149 | [ -n "${robot_hostname}" ] && \ 150 | export ROS_MASTER_URI="http://${robot_hostname}:11311" 151 | 152 | # source the catkin setup bash script 153 | source devel/setup.bash 154 | 155 | # setup the bash prompt 156 | export __ROS_PROMPT=\${__ROS_PROMPT:-0} 157 | [ \${__ROS_PROMPT} -eq 0 -a -n "\${PROMPT_COMMAND}" ] && \ 158 | export __ORIG_PROMPT_COMMAND=\${PROMPT_COMMAND} 159 | 160 | __ros_prompt () { 161 | if [ -n "\${__ORIG_PROMPT_COMMAND}" ]; then 162 | eval \${__ORIG_PROMPT_COMMAND} 163 | fi 164 | if ! echo \${PS1} | grep '\[intera' &>/dev/null; then 165 | export PS1="\[\033[00;33m\][intera - \ 166 | \${ROS_MASTER_URI}]\[\033[00m\] \${PS1}" 167 | fi 168 | } 169 | 170 | if [ "\${TERM}" != "dumb" ]; then 171 | export PROMPT_COMMAND=__ros_prompt 172 | __ROS_PROMPT=1 173 | elif ! echo \${PS1} | grep '\[intera' &>/dev/null; then 174 | export PS1="[intera - \${ROS_MASTER_URI}] \${PS1}" 175 | fi 176 | 177 | EOF 178 | 179 | ${SHELL} --rcfile ${tf} 180 | 181 | rm -f -- "${tf}" 182 | trap - EXIT 183 | 184 | # vim: noet 185 | -------------------------------------------------------------------------------- /resources/Images/cube-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/resources/Images/cube-blue.png -------------------------------------------------------------------------------- /resources/Images/cube-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/resources/Images/cube-green.png -------------------------------------------------------------------------------- /resources/Images/cube-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/resources/Images/cube-red.png -------------------------------------------------------------------------------- /resources/azure_cv_resources.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "accounts_robotics_computer_vision_name": { 6 | "defaultValue": "robotics-computer-vision", 7 | "type": "String" 8 | } 9 | }, 10 | "variables": {}, 11 | "resources": [ 12 | { 13 | "type": "Microsoft.CognitiveServices/accounts", 14 | "apiVersion": "2017-04-18", 15 | "name": "[parameters('accounts_robotics_computer_vision_name')]", 16 | "location": "westus", 17 | "sku": { 18 | "name": "F0" 19 | }, 20 | "kind": "ComputerVision", 21 | "properties": {} 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /resources/luis_azure_template.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", 3 | "contentVersion": "1.0.0.0", 4 | "parameters": { 5 | "accounts_robotics_luis_name": { 6 | "defaultValue": "robotics-luis", 7 | "type": "String" 8 | } 9 | }, 10 | "variables": {}, 11 | "resources": [ 12 | { 13 | "type": "Microsoft.CognitiveServices/accounts", 14 | "apiVersion": "2017-04-18", 15 | "name": "[parameters('accounts_robotics_luis_name')]", 16 | "location": "westus", 17 | "sku": { 18 | "name": "F0" 19 | }, 20 | "kind": "LUIS", 21 | "properties": {} 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /resources/robotics-demo-luis-app.json: -------------------------------------------------------------------------------- 1 | { 2 | "luis_schema_version": "3.0.0", 3 | "versionId": "0.1", 4 | "name": "robotics-demo", 5 | "desc": "", 6 | "culture": "en-us", 7 | "intents": [ 8 | { 9 | "name": "MoveArm" 10 | }, 11 | { 12 | "name": "MoveGrippers" 13 | }, 14 | { 15 | "name": "None" 16 | }, 17 | { 18 | "name": "ShowStats" 19 | } 20 | ], 21 | "entities": [], 22 | "composites": [], 23 | "closedLists": [ 24 | { 25 | "name": "action", 26 | "subLists": [ 27 | { 28 | "canonicalForm": "open", 29 | "list": [] 30 | }, 31 | { 32 | "canonicalForm": "close", 33 | "list": [] 34 | } 35 | ], 36 | "roles": [] 37 | } 38 | ], 39 | "patternAnyEntities": [], 40 | "regex_entities": [], 41 | "prebuiltEntities": [], 42 | "model_features": [], 43 | "regex_features": [], 44 | "patterns": [], 45 | "utterances": [ 46 | { 47 | "text": "can you move your arm?", 48 | "intent": "MoveArm", 49 | "entities": [] 50 | }, 51 | { 52 | "text": "close the grippers", 53 | "intent": "MoveGrippers", 54 | "entities": [] 55 | }, 56 | { 57 | "text": "move arm", 58 | "intent": "MoveArm", 59 | "entities": [] 60 | }, 61 | { 62 | "text": "move arm robot", 63 | "intent": "MoveArm", 64 | "entities": [] 65 | }, 66 | { 67 | "text": "move your arm in circles", 68 | "intent": "MoveArm", 69 | "entities": [] 70 | }, 71 | { 72 | "text": "open grippers", 73 | "intent": "MoveGrippers", 74 | "entities": [] 75 | }, 76 | { 77 | "text": "please move your arm", 78 | "intent": "MoveArm", 79 | "entities": [] 80 | }, 81 | { 82 | "text": "show me your statistics", 83 | "intent": "ShowStats", 84 | "entities": [] 85 | }, 86 | { 87 | "text": "show stats", 88 | "intent": "ShowStats", 89 | "entities": [] 90 | }, 91 | { 92 | "text": "wave arm", 93 | "intent": "MoveArm", 94 | "entities": [] 95 | } 96 | ] 97 | } -------------------------------------------------------------------------------- /setup/robot-challenge-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Debug - Break on error 4 | set -e 5 | 6 | # Hack: Clear any file locks 7 | sudo rm /var/lib/apt/lists/lock 8 | 9 | # Color it 10 | NC='\033[0m' # No Color 11 | RED='\033[0;31m' 12 | GREEN='\033[0;32m' 13 | YELLOW='\033[1;33m' 14 | BLUE='\033[0;34m' 15 | 16 | # Time it 17 | scriptstart=$(date +%s) 18 | 19 | # 20 | # ROS image for Sawyer development 21 | # 22 | 23 | # author="Paul Stubbs (pstubbs@microsoft.com)" 24 | # description="Microsoft Robot Challenge 2018" 25 | # version="1.0" 26 | 27 | # 28 | # 29 | # 30 | # Install Bot Framework + Emulator 31 | echo -e ${GREEN} 32 | echo -e "***\n***\n***\n***\nInstall bot framework + Emulator\n***\n***\n***\n***" 33 | echo -e ${NC} 34 | # Time it 35 | start=$(date +%s) 36 | # 37 | # 38 | 39 | sudo apt-get update 40 | # install unmet dependencies 41 | sudo apt-get -f install -y 42 | sudo apt-get -f install libindicator7 -y 43 | sudo apt-get -f install libappindicator1 -y 44 | # install any unmet dependencies 45 | sudo apt-get -f install -y 46 | sudo apt-get install -y wget gconf-service 47 | 48 | TEMP_DEB="$(mktemp)" 49 | wget -O "$TEMP_DEB" 'https://roboticslabstorage.blob.core.windows.net/github-assets/botframework-emulator_4.0.15-alpha_amd64.deb' 50 | sudo dpkg -i "$TEMP_DEB" 51 | rm -f "$TEMP_DEB" 52 | # install any unmet dependencies 53 | sudo apt-get -f install -y 54 | sudo apt autoremove -y 55 | 56 | # Install Python 3.6 57 | echo -e ${GREEN} 58 | echo -e "***\n***\n***\n***\nInstall Python 3.6\n***\n***\n***\n***" 59 | echo -e ${NC} 60 | sudo add-apt-repository -y ppa:deadsnakes/ppa 61 | sudo apt-get update -y 62 | sudo apt-get install -y python3.6 63 | sudo apt-get install -y python3-pip 64 | # install any unmet dependencies 65 | sudo apt-get -f install -y 66 | python3.6 -m pip install --upgrade pip 67 | 68 | # Install Bot Framework Deps 69 | echo -e ${GREEN} 70 | echo -e "***\n***\n***\n***\nInstall bot framework Dependencies\n***\n***\n***\n***" 71 | echo -e ${NC} 72 | 73 | python3.6 -m pip install --user aiohttp==3.5.1 74 | python3.6 -m pip install --user requests 75 | python3.6 -m pip install --user botbuilder.schema 76 | python3.6 -m pip install --user botbuilder.core 77 | 78 | # Time it 79 | end=$(date +%s) 80 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 81 | echo -e ${BLUE} 82 | echo -e "Elapsed Time: ${runtime}" 83 | echo -e ${NC} 84 | 85 | # 86 | # 87 | # 88 | # Install ROS 89 | echo -e ${GREEN} 90 | echo -e "***\n***\n***\n***\nInstall ROS\n***\n***\n***\n***" 91 | echo -e ${NC} 92 | # Time it 93 | start=$(date +%s) 94 | # 95 | # 96 | # http://sdk.rethinkrobotics.com/intera/Workstation_Setup 97 | 98 | # Update to lateset software lists 99 | echo -e ${GREEN} 100 | echo -e "***\n***\n***\n***\nUpdate to lateset software lists\n***\n***\n***\n***" 101 | echo -e ${NC} 102 | 103 | # Install some common CLI tools 104 | sudo apt-get update -y 105 | sudo apt-get install -y software-properties-common curl 106 | 107 | sudo -E apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 108 | 109 | curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | sudo apt-key add - 110 | 111 | # Configure Ubuntu repositories. Setup sources.list 112 | sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu xenial main" > /etc/apt/sources.list.d/ros-latest.list' 113 | 114 | # Setup keys 115 | #sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116 116 | 117 | 118 | # Install ROS Kinetic Desktop FUll 119 | echo -e ${GREEN} 120 | echo -e "***\n***\n***\n***\n Install ROS Kinetic Desktop FUll \n***\n***\n***\n***" 121 | echo -e ${NC} 122 | 123 | sudo apt-get update -y 124 | sudo apt-get install -y ros-kinetic-desktop-full 125 | 126 | # Initialize rosdep 127 | sudo rosdep init || echo -e "${YELLOW}ROSDep Already Exists.${NC}" 128 | rosdep update 129 | 130 | # Install rosinstall 131 | sudo apt-get install -y python-rosinstall -y 132 | 133 | 134 | # Time it 135 | end=$(date +%s) 136 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 137 | echo -e ${BLUE} 138 | echo -e "Elapsed Time: ${runtime}" 139 | echo -e ${NC} 140 | 141 | 142 | # 143 | # 144 | # 145 | # Install Intera SDK Dependencies 146 | echo -e ${GREEN} 147 | echo -e "***\n***\n***\n***\nInstall Intera SDK Dependencies\n***\n***\n***\n***" 148 | echo -e ${NC} 149 | # Time it 150 | start=$(date +%s) 151 | # 152 | # 153 | # 154 | 155 | # Install SDK Dependencies 156 | # Update to lateset software lists 157 | 158 | sudo apt-get update 159 | sudo apt-get install -y \ 160 | git-core \ 161 | python-argparse \ 162 | python-wstool \ 163 | python-vcstools \ 164 | python-rosdep \ 165 | ros-kinetic-control-msgs \ 166 | ros-kinetic-joystick-drivers \ 167 | ros-kinetic-xacro \ 168 | ros-kinetic-tf2-ros \ 169 | ros-kinetic-rviz \ 170 | ros-kinetic-cv-bridge \ 171 | ros-kinetic-actionlib \ 172 | ros-kinetic-actionlib-msgs \ 173 | ros-kinetic-dynamic-reconfigure \ 174 | ros-kinetic-trajectory-msgs \ 175 | ros-kinetic-rospy-message-converter 176 | 177 | # Time it 178 | end=$(date +%s) 179 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 180 | echo -e ${BLUE} 181 | echo -e "Elapsed Time: ${runtime}" 182 | echo -e ${NC} 183 | 184 | 185 | cd ~/AI-Robot-Challenge-Lab 186 | source /opt/ros/kinetic/setup.bash 187 | rosdep install --from-paths src --ignore-src -r -y 188 | catkin build 189 | 190 | # http://sdk.rethinkrobotics.com/intera/Gazebo_Tutorial 191 | # 192 | # 193 | # 194 | # Configure Sawyer with Gazebo 195 | # 196 | # 197 | # 198 | 199 | # Install Prerequisites 200 | # Update to lateset software lists 201 | echo -e ${GREEN} 202 | echo -e "***\n***\n***\n***\nUpdate to lateset software lists\n***\n***\n***\n***" 203 | echo -e ${NC} 204 | # Time it 205 | start=$(date +%s) 206 | # 207 | # 208 | # 209 | sudo apt-get update -y 210 | sudo apt-get install -y \ 211 | gazebo7 \ 212 | ros-kinetic-qt-build \ 213 | ros-kinetic-gazebo-ros-control \ 214 | ros-kinetic-gazebo-ros-pkgs \ 215 | ros-kinetic-ros-control \ 216 | ros-kinetic-control-toolbox \ 217 | ros-kinetic-realtime-tools \ 218 | ros-kinetic-ros-controllers \ 219 | ros-kinetic-xacro \ 220 | python-wstool \ 221 | ros-kinetic-tf-conversions \ 222 | ros-kinetic-kdl-parser \ 223 | ros-kinetic-sns-ik-lib 224 | 225 | # Time it 226 | end=$(date +%s) 227 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 228 | echo -e ${BLUE} 229 | echo -e "Elapsed Time: ${runtime}" 230 | echo -e ${NC} 231 | 232 | 233 | # 234 | # 235 | # 236 | # Install other tools 237 | # 238 | # 239 | # 240 | 241 | # 242 | # 243 | # 244 | # Install Chromium Browser 245 | echo -e ${GREEN} 246 | echo -e "***\n***\n***\n***\nInstall Chromium Browser\n***\n***\n***\n***" 247 | echo -e ${NC} 248 | # Time it 249 | start=$(date +%s) 250 | # 251 | # 252 | # 253 | #https://www.linuxbabe.com/ubuntu/install-google-chrome-ubuntu-16-04-lts 254 | sudo su -c "echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' >> /etc/apt/sources.list" 255 | TEMP_DEB="$(mktemp)" 256 | wget -O "$TEMP_DEB" 'https://dl.google.com/linux/linux_signing_key.pub' 257 | sudo apt-key add "$TEMP_DEB" 258 | rm -f "$TEMP_DEB" 259 | sudo apt-get update 260 | sudo apt-get install -y google-chrome-stable 261 | 262 | 263 | # Time it 264 | end=$(date +%s) 265 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 266 | echo -e ${BLUE} 267 | echo -e "Elapsed Time: ${runtime}" 268 | echo -e ${NC} 269 | 270 | 271 | # 272 | # 273 | # 274 | # Install Visual Studio Code 275 | echo -e ${GREEN} 276 | echo -e "***\n***\n***\n***\nInstall Visual Studio Code\n***\n***\n***\n***" 277 | echo -e ${NC} 278 | # Time it 279 | start=$(date +%s) 280 | # 281 | # 282 | # 283 | #https://tecadmin.net/install-visual-studio-code-editor-ubuntu/ 284 | sudo su -c "echo 'deb [arch=amd64] http://packages.microsoft.com/repos/vscode stable main' >> /etc/apt/sources.list.d/vscode.list" 285 | curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg 286 | sudo mv microsoft.gpg /etc/apt/trusted.gpg.d/microsoft.gpg 287 | sudo apt-get update 288 | sudo apt-get install -y code 289 | 290 | # Time it 291 | end=$(date +%s) 292 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 293 | echo -e ${BLUE} 294 | echo -e "Elapsed Time: ${runtime}" 295 | echo -e ${NC} 296 | 297 | # 298 | # 299 | # 300 | # Update the machine 301 | echo -e ${GREEN} 302 | echo -e "***\n***\n***\n***\nUpdate the Machine\n***\n***\n***\n***" 303 | echo -e ${NC} 304 | # 305 | # 306 | # 307 | sudo apt-get update 308 | sudo apt-get upgrade -y 309 | sudo apt-get dist-upgrade -y 310 | sudo apt-get autoremove -y 311 | sudo apt-get autoclean -y 312 | 313 | # Time it 314 | start=${scriptstart} 315 | end=$(date +%s) 316 | runtime=$(python -c "print '%u:%02u' % ((${end} - ${start})/60, (${end} - ${start})%60)") 317 | echo -e ${BLUE} 318 | echo -e "Total Elapsed Time: ${runtime}" 319 | echo -e ${NC} -------------------------------------------------------------------------------- /src/chatbot/SawyerBot.bot: -------------------------------------------------------------------------------- 1 | { 2 | "name": "RoboticsBot", 3 | "description": "", 4 | "secretKey": "", 5 | "services": [ 6 | { 7 | "appId": "", 8 | "id": "http://localhost:9000/api/messages", 9 | "type": "endpoint", 10 | "appPassword": "", 11 | "endpoint": "http://localhost:9000/api/messages", 12 | "name": "RoboticsBot" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /src/chatbot/bot-move-grippers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2013-2018, Rethink Robotics Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | SDK Gripper Example: keyboard 19 | """ 20 | import argparse 21 | 22 | import rospy 23 | import time 24 | 25 | import intera_interface 26 | import intera_external_devices 27 | from intera_interface import CHECK_VERSION 28 | 29 | def move_gripper(limb, action): 30 | # initialize interfaces 31 | print("Getting robot state...") 32 | rs = intera_interface.RobotEnable(CHECK_VERSION) 33 | init_state = rs.state() 34 | gripper = None 35 | original_deadzone = None 36 | 37 | def clean_shutdown(): 38 | if gripper and original_deadzone: 39 | gripper.set_dead_zone(original_deadzone) 40 | print("Finished.") 41 | 42 | try: 43 | gripper = intera_interface.Gripper(limb + '_gripper') 44 | except (ValueError, OSError) as e: 45 | rospy.logerr("Could not detect an electric gripper attached to the robot.") 46 | clean_shutdown() 47 | return 48 | rospy.on_shutdown(clean_shutdown) 49 | 50 | original_deadzone = gripper.get_dead_zone() 51 | # WARNING: setting the deadzone below this can cause oscillations in 52 | # the gripper position. However, setting the deadzone to this 53 | # value is required to achieve the incremental commands in this example 54 | gripper.set_dead_zone(0.001) 55 | rospy.loginfo("Gripper deadzone set to {}".format(gripper.get_dead_zone())) 56 | rospy.loginfo("Enabling robot...") 57 | rs.enable() 58 | print("Controlling grippers.") 59 | if (action == "open"): 60 | gripper.open() 61 | rospy.sleep(1.0) 62 | print("Opened grippers") 63 | elif (action == "close"): 64 | gripper.close() 65 | rospy.sleep(1.0) 66 | print("Closed grippers") 67 | 68 | # force shutdown call if caught by key handler 69 | rospy.signal_shutdown("Example finished.") 70 | 71 | 72 | def main(): 73 | """RSDK Gripper Example: send a command to control the grippers. 74 | 75 | Run this example to command various gripper movements while 76 | adjusting gripper parameters, including calibration, and velocity: 77 | Uses the intera_interface.Gripper class and the 78 | helper function, intera_external_devices.getch. 79 | """ 80 | epilog = """ 81 | See help inside the example with the '?' key for key bindings. 82 | """ 83 | rp = intera_interface.RobotParams() 84 | valid_limbs = rp.get_limb_names() 85 | if not valid_limbs: 86 | rp.log_message(("Cannot detect any limb parameters on this robot. " 87 | "Exiting."), "ERROR") 88 | return 89 | limb = valid_limbs[0] 90 | print("Using limb: {}.".format(limb)) 91 | arg_fmt = argparse.RawDescriptionHelpFormatter 92 | parser = argparse.ArgumentParser(formatter_class=arg_fmt, 93 | description=main.__doc__, 94 | epilog=epilog) 95 | parser.add_argument( 96 | "-a", "--action", dest="action", default="open", 97 | choices=["open", "close"], 98 | help="Action to perform with the gripper. Options: close or open" 99 | ) 100 | args = parser.parse_args(rospy.myargv()[1:]) 101 | 102 | print("Initializing node... ") 103 | rospy.init_node("sdk_gripper_keyboard") 104 | 105 | move_gripper(limb, args.action) 106 | 107 | 108 | if __name__ == '__main__': 109 | main() 110 | -------------------------------------------------------------------------------- /src/chatbot/bot-stats-node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import rospy 5 | import intera_interface 6 | 7 | from std_msgs.msg import String 8 | from intera_interface import CHECK_VERSION 9 | 10 | 11 | def main(): 12 | rospy.init_node('Demo_Stats') 13 | rs = intera_interface.RobotEnable(CHECK_VERSION) 14 | state = rs.state() 15 | print state 16 | rospy.signal_shutdown("Demo_Stats finished.") 17 | 18 | 19 | if __name__ == '__main__': 20 | sys.exit(main()) 21 | 22 | -------------------------------------------------------------------------------- /src/chatbot/bot-wave-arm-node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | 5 | # rospy - ROS Python API 6 | import rospy 7 | 8 | # intera_interface - Sawyer Python API 9 | import intera_interface 10 | 11 | # initialize our ROS node, registering it with the Master 12 | rospy.init_node('Hello_Sawyer') 13 | 14 | # create an instance of intera_interface's Limb class 15 | limb = intera_interface.Limb('right') 16 | 17 | # get the right limb's current joint angles 18 | angles = limb.joint_angles() 19 | 20 | # print the current joint angles as valid json 21 | print json.dumps(angles) 22 | 23 | # move to neutral pose 24 | limb.move_to_neutral() 25 | 26 | # get the right limb's current joint angles now that it is in neutral 27 | angles = limb.joint_angles() 28 | 29 | # print the current joint angles again as valid json 30 | print json.dumps(angles) 31 | 32 | 33 | # reassign new joint angles (all zeros) which we will later command to the limb 34 | angles['right_j0']=0.0 35 | angles['right_j1']=0.0 36 | angles['right_j2']=0.0 37 | angles['right_j3']=0.0 38 | angles['right_j4']=0.0 39 | angles['right_j5']=0.0 40 | angles['right_j6']=0.0 41 | 42 | # print the joint angle command as valid json 43 | print json.dumps(angles) 44 | 45 | # move the right arm to those joint angles 46 | limb.move_to_joint_positions(angles) 47 | 48 | # Sawyer wants to say hello, let's wave the arm 49 | 50 | # store the first wave position 51 | wave_1 = {'right_j6': -1.5126, 'right_j5': -0.3438, 'right_j4': 1.5126, 'right_j3': -1.3833, 'right_j2': 0.03726, 'right_j1': 0.3526, 'right_j0': -0.4259} 52 | 53 | # store the second wave position 54 | wave_2 = {'right_j6': -1.5101, 'right_j5': -0.3806, 'right_j4': 1.5103, 'right_j3': -1.4038, 'right_j2': -0.2609, 'right_j1': 0.3940, 'right_j0': -0.4281} 55 | 56 | 57 | # wave three times 58 | for _move in range(3): 59 | limb.move_to_joint_positions(wave_1) 60 | rospy.sleep(0.5) 61 | limb.move_to_joint_positions(wave_2) 62 | rospy.sleep(0.5) 63 | -------------------------------------------------------------------------------- /src/chatbot/talk-to-my-robot-completed.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | import re 4 | from io import BytesIO 5 | 6 | # Used to call Python 2.7 from 3.6 7 | import subprocess 8 | 9 | from aiohttp import web 10 | from botbuilder.core import ( 11 | BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext) 12 | from botbuilder.schema import (Activity, ActivityTypes) 13 | 14 | # Settings 15 | LUIS_APP_ID = 'UPDATE_THIS_KEY' 16 | LUIS_SUBSCRIPTION_KEY = 'UPDATE_THIS_KEY' 17 | COMPUTER_VISION_SUBSCRIPTION_KEY = "UPDATE_THIS_KEY" 18 | 19 | COMPUTER_VISION_ANALYZE_URL = "https://westus.api.cognitive.microsoft.com/vision/v2.0/analyze" 20 | BOT_APP_ID = '' 21 | BOT_APP_PASSWORD = '' 22 | SETTINGS = BotFrameworkAdapterSettings(BOT_APP_ID, BOT_APP_PASSWORD) 23 | ADAPTER = BotFrameworkAdapter(SETTINGS) 24 | 25 | SIM_API_HOST = 'http://localhost:5000' 26 | 27 | import os 28 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 29 | ROOT_DIR = os.path.join(SCRIPT_DIR, "..", "..") 30 | 31 | class ComputerVisionApiService: 32 | @staticmethod 33 | def analyze_image(image_url): 34 | # Analyze Image Request Headers and Parameters (do not uncomment this line) 35 | headers = { 36 | 'Ocp-Apim-Subscription-Key': COMPUTER_VISION_SUBSCRIPTION_KEY, 37 | 'Content-Type': 'application/octet-stream' 38 | } 39 | params = {'visualFeatures': 'Color'} 40 | 41 | # Get Image Bytes Content (do not uncomment this line) 42 | image_data = BytesIO(requests.get(image_url).content) 43 | 44 | try: 45 | # Process Image (do not uncomment this line) 46 | print('Processing image: {}'.format(image_url)) 47 | response = requests.post( 48 | COMPUTER_VISION_ANALYZE_URL, headers=headers, params=params, data=image_data) 49 | response.raise_for_status() 50 | analysis = response.json() 51 | dominant_color = analysis["color"]["dominantColors"][0] 52 | 53 | return dominant_color 54 | 55 | except Exception as e: 56 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 57 | 58 | 59 | class LuisResponse(): 60 | def __init__(self, intent, entity_value=None, entity_type=None): 61 | self.intent = intent 62 | self.entity_value = entity_value 63 | self.entity_type = entity_type 64 | 65 | 66 | class LuisApiService: 67 | @staticmethod 68 | def post_utterance(message): 69 | # Post Utterance Request Headers and Params (do not uncomment this line) 70 | headers = {'Ocp-Apim-Subscription-Key': LUIS_SUBSCRIPTION_KEY} 71 | params = { 72 | # Query parameter 73 | 'q': message, 74 | # Optional request parameters, set to default values 75 | 'timezoneOffset': '0', 76 | 'verbose': 'false', 77 | 'spellCheck': 'false', 78 | 'staging': 'false', 79 | } 80 | 81 | try: 82 | # LUIS Response (do not uncomment this line) 83 | r = requests.get('https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/%s' % 84 | LUIS_APP_ID, headers=headers, params=params) 85 | topScoreIntent = r.json()['topScoringIntent'] 86 | entities = r.json()['entities'] 87 | intent = topScoreIntent['intent'] if topScoreIntent['score'] > 0.5 else 'None' 88 | entity = entities[0] if len(entities) > 0 else None 89 | 90 | return LuisResponse(intent, entity['entity'], entity['type']) if entity else LuisResponse(intent) 91 | 92 | except Exception as e: 93 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 94 | 95 | 96 | class BotRequestHandler: 97 | async def create_reply_activity(request_activity, text) -> Activity: 98 | return Activity( 99 | type=ActivityTypes.message, 100 | channel_id=request_activity.channel_id, 101 | conversation=request_activity.conversation, 102 | recipient=request_activity.from_property, 103 | from_property=request_activity.recipient, 104 | text=text, 105 | service_url=request_activity.service_url) 106 | 107 | async def handle_message(context: TurnContext) -> web.Response: 108 | activity = context.activity 109 | if activity.text: 110 | # Get LUIS result (do not uncomment this line) 111 | luis_result = LuisApiService.post_utterance(activity.text) 112 | 113 | response = await BotRequestHandler.create_reply_activity(activity, f'Top Intent: {luis_result.intent}.') 114 | await context.send_activity(response) 115 | 116 | if luis_result.intent == 'MoveArm': 117 | BotCommandHandler.move_arm() 118 | 119 | print("MoveArm") 120 | elif luis_result.intent == 'MoveGrippers': 121 | # Set Move Grippers Handler (do not uncomment this line) 122 | BotCommandHandler.move_grippers(luis_result.entity_value) 123 | 124 | print("MoveGrippers") 125 | elif luis_result.intent == 'ShowStats': 126 | # Set Show Stats Handler (do not uncomment this line) 127 | stats = BotCommandHandler.show_stats() 128 | response = await BotRequestHandler.create_reply_activity(activity, stats) 129 | await context.send_activity(response) 130 | 131 | print("ShowStats") 132 | else: 133 | response = await BotRequestHandler.create_reply_activity(activity, 'Please provide a valid instruction') 134 | await context.send_activity(response) 135 | else: 136 | await BotRequestHandler.process_image(activity, context) 137 | 138 | return web.Response(status=202) 139 | 140 | async def handle_conversation_update(context: TurnContext) -> web.Response: 141 | if context.activity.members_added[0].id != context.activity.recipient.id: 142 | response = await BotRequestHandler.create_reply_activity(context.activity, 'Welcome to Sawyer Robot!') 143 | await context.send_activity(response) 144 | return web.Response(status=200) 145 | 146 | async def unhandled_activity() -> web.Response: 147 | return web.Response(status=404) 148 | 149 | async def process_image(activity: Activity, context: TurnContext): 150 | # Implement Process Image Method (do not uncomment this line) 151 | image_url = BotRequestHandler.get_image_url(activity.attachments) 152 | 153 | if image_url: 154 | dominant_color = ComputerVisionApiService.analyze_image(image_url) 155 | response = await BotRequestHandler.create_reply_activity(activity, f'Do you need a {dominant_color} cube? Let me find one for you!') 156 | await context.send_activity(response) 157 | BotCommandHandler.move_cube(dominant_color) 158 | else: 159 | response = await BotRequestHandler.create_reply_activity(activity, 'Please provide a valid instruction or image.') 160 | await context.send_activity(response) 161 | 162 | return None 163 | 164 | def get_image_url(attachments): 165 | p = re.compile('^image/(jpg|jpeg|png|gif)$') 166 | for attachment in attachments: 167 | rs = p.match(attachment.content_type) 168 | if rs: 169 | return attachment.content_url 170 | return None 171 | 172 | @staticmethod 173 | async def request_handler(context: TurnContext) -> web.Response: 174 | if context.activity.type == 'message': 175 | return await BotRequestHandler.handle_message(context) 176 | elif context.activity.type == 'conversationUpdate': 177 | return await BotRequestHandler.handle_conversation_update(context) 178 | else: 179 | return await BotRequestHandler.unhandled_activity() 180 | 181 | async def messages(req: web.web_request) -> web.Response: 182 | body = await req.json() 183 | activity = Activity().deserialize(body) 184 | auth_header = req.headers['Authorization'] if 'Authorization' in req.headers else '' 185 | try: 186 | return await ADAPTER.process_activity(activity, auth_header, BotRequestHandler.request_handler) 187 | except Exception as e: 188 | raise e 189 | 190 | 191 | class BotCommandHandler: 192 | def move_arm(): 193 | print('Moving arm... do something cool') 194 | # launch your python2 script using bash 195 | 196 | python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-wave-arm-node.py'" 197 | process = subprocess.Popen( 198 | python2_command, stdout=subprocess.PIPE, shell=True) 199 | output, error = process.communicate() # receive output from the python2 script 200 | 201 | print('done moving arm . . .') 202 | print('returncode: ' + str(process.returncode)) 203 | print('output: ' + output.decode("utf-8")) 204 | 205 | def move_grippers(action): 206 | # Implement Move Grippers Command (do not uncomment this line) 207 | print(f'{action} grippers... wait a few seconds') 208 | # launch your python2 script using bash 209 | 210 | python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-move-grippers.py -a {}'".format( 211 | action) 212 | process = subprocess.Popen( 213 | python2_command, stdout=subprocess.PIPE, shell=True) 214 | output, error = process.communicate() # receive output from the python2 script 215 | 216 | print('done moving grippers . . .') 217 | print('returncode: ' + str(process.returncode)) 218 | print('output: ' + output.decode("utf-8")) 219 | 220 | return None 221 | 222 | def show_stats(): 223 | # Implement Show Stats Command (do not uncomment this line) 224 | print('Showing stats... do something') 225 | # launch your python2 script using bash 226 | python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-stats-node.py'" 227 | 228 | process = subprocess.Popen( 229 | python2_command, stdout=subprocess.PIPE, shell=True) 230 | output, error = process.communicate() # receive output from the python2 script 231 | result = output.decode("utf-8") 232 | 233 | print('done getting state . . .') 234 | print('returncode: ' + str(process.returncode)) 235 | print('output: ' + result + '\n') 236 | return result 237 | 238 | return None # REMOVE this line when you uncomment the line above 239 | 240 | def move_cube(color): 241 | # Implement Move Cube Command (do not uncomment this line) 242 | print(f'Moving {color} cube...') 243 | try: 244 | r = requests.get(f'{SIM_API_HOST}/put_block_into_tray/{color}/1') 245 | r.raise_for_status() 246 | print('done moving cube . . .') 247 | except Exception as e: 248 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 249 | 250 | return None 251 | 252 | 253 | app = web.Application() 254 | app.router.add_post('/api/messages', BotRequestHandler.messages) 255 | 256 | try: 257 | web.run_app(app, host='localhost', port=9000) 258 | print('Started http server on localhost:9000') 259 | except Exception as e: 260 | raise e 261 | -------------------------------------------------------------------------------- /src/chatbot/talk-to-my-robot.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | import re 4 | from io import BytesIO 5 | 6 | # Used to call Python 2.7 from 3.6 7 | import subprocess 8 | 9 | from aiohttp import web 10 | from botbuilder.core import ( 11 | BotFrameworkAdapter, BotFrameworkAdapterSettings, TurnContext) 12 | from botbuilder.schema import (Activity, ActivityTypes) 13 | 14 | 15 | # Settings 16 | LUIS_APP_ID = 'UPDATE_THIS_KEY' 17 | LUIS_SUBSCRIPTION_KEY = 'UPDATE_THIS_KEY' 18 | COMPUTER_VISION_SUBSCRIPTION_KEY = "UPDATE_THIS_KEY" 19 | 20 | COMPUTER_VISION_ANALYZE_URL = "https://westus.api.cognitive.microsoft.com/vision/v2.0/analyze" 21 | BOT_APP_ID = '' 22 | BOT_APP_PASSWORD = '' 23 | SETTINGS = BotFrameworkAdapterSettings(BOT_APP_ID, BOT_APP_PASSWORD) 24 | ADAPTER = BotFrameworkAdapter(SETTINGS) 25 | 26 | SIM_API_HOST = 'http://localhost:5000' 27 | 28 | import os 29 | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) 30 | ROOT_DIR = os.path.join(SCRIPT_DIR, "..", "..") 31 | 32 | 33 | class ComputerVisionApiService: 34 | @staticmethod 35 | def analyze_image(image_url): 36 | # Analyze Image Request Headers and Parameters (do not uncomment this line) 37 | # headers = { 38 | # 'Ocp-Apim-Subscription-Key': COMPUTER_VISION_SUBSCRIPTION_KEY, 39 | # 'Content-Type': 'application/octet-stream' 40 | # } 41 | # params = {'visualFeatures': 'Color'} 42 | 43 | # Get Image Bytes Content (do not uncomment this line) 44 | #image_data = BytesIO(requests.get(image_url).content) 45 | 46 | try: 47 | # Process Image (do not uncomment this line) 48 | # print('Processing image: {}'.format(image_url)) 49 | # response = requests.post(COMPUTER_VISION_ANALYZE_URL, headers=headers, params=params, data=image_data) 50 | # response.raise_for_status() 51 | # analysis = response.json() 52 | # dominant_color = analysis["color"]["dominantColors"][0] 53 | 54 | # return dominant_color 55 | 56 | return None # REMOVE this line when you uncomment the line above 57 | except Exception as e: 58 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 59 | 60 | 61 | class LuisResponse(): 62 | def __init__(self, intent, entity_value=None, entity_type=None): 63 | self.intent = intent 64 | self.entity_value = entity_value 65 | self.entity_type = entity_type 66 | 67 | 68 | class LuisApiService: 69 | @staticmethod 70 | def post_utterance(message): 71 | # Post Utterance Request Headers and Params (do not uncomment this line) 72 | # headers = {'Ocp-Apim-Subscription-Key': LUIS_SUBSCRIPTION_KEY} 73 | # params = { 74 | # # Query parameter 75 | # 'q': message, 76 | # # Optional request parameters, set to default values 77 | # 'timezoneOffset': '0', 78 | # 'verbose': 'false', 79 | # 'spellCheck': 'false', 80 | # 'staging': 'false', 81 | # } 82 | 83 | try: 84 | # LUIS Response (do not uncomment this line) 85 | # r = requests.get('https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/%s' % LUIS_APP_ID, headers=headers, params=params) 86 | # topScoreIntent = r.json()['topScoringIntent'] 87 | # entities = r.json()['entities'] 88 | # intent = topScoreIntent['intent'] if topScoreIntent['score'] > 0.5 else 'None' 89 | # entity = entities[0] if len(entities) > 0 else None 90 | 91 | # return LuisResponse(intent, entity['entity'], entity['type']) if entity else LuisResponse(intent) 92 | 93 | return None # REMOVE this line when you uncomment the line above 94 | 95 | except Exception as e: 96 | print("[Errno {0}] {1}".format(e.errno, e.strerror)) 97 | 98 | 99 | class BotRequestHandler: 100 | async def create_reply_activity(request_activity, text) -> Activity: 101 | return Activity( 102 | type=ActivityTypes.message, 103 | channel_id=request_activity.channel_id, 104 | conversation=request_activity.conversation, 105 | recipient=request_activity.from_property, 106 | from_property=request_activity.recipient, 107 | text=text, 108 | service_url=request_activity.service_url) 109 | 110 | async def handle_message(context: TurnContext) -> web.Response: 111 | activity = context.activity 112 | if activity.text: 113 | # Get LUIS result (do not uncomment this line) 114 | #luis_result = LuisApiService.post_utterance(activity.text) 115 | 116 | response = await BotRequestHandler.create_reply_activity(activity, f'Top Intent: {luis_result.intent}.') 117 | await context.send_activity(response) 118 | 119 | if luis_result.intent == 'MoveArm': 120 | BotCommandHandler.move_arm() 121 | 122 | print("MoveArm") 123 | elif luis_result.intent == 'MoveGrippers': 124 | # Set Move Grippers Handler (do not uncomment this line) 125 | # BotCommandHandler.move_grippers(luis_result.entity_value) 126 | 127 | print("MoveGrippers") 128 | elif luis_result.intent == 'ShowStats': 129 | # Set Show Stats Handler (do not uncomment this line) 130 | # stats = BotCommandHandler.show_stats() 131 | # response = await BotRequestHandler.create_reply_activity(activity, stats) 132 | # await context.send_activity(response) 133 | 134 | print("ShowStats") 135 | else: 136 | response = await BotRequestHandler.create_reply_activity(activity, 'Please provide a valid instruction') 137 | await context.send_activity(response) 138 | else: 139 | await BotRequestHandler.process_image(activity, context) 140 | 141 | return web.Response(status=202) 142 | 143 | async def handle_conversation_update(context: TurnContext) -> web.Response: 144 | if context.activity.members_added[0].id != context.activity.recipient.id: 145 | response = await BotRequestHandler.create_reply_activity(context.activity, 'Welcome to Sawyer Robot!') 146 | await context.send_activity(response) 147 | return web.Response(status=200) 148 | 149 | async def unhandled_activity() -> web.Response: 150 | return web.Response(status=404) 151 | 152 | async def process_image(activity: Activity, context: TurnContext): 153 | # Implement Process Image Method (do not uncomment this line) 154 | # image_url = BotRequestHandler.get_image_url(activity.attachments) 155 | 156 | # if image_url: 157 | # dominant_color = ComputerVisionApiService.analyze_image(image_url) 158 | # response = await BotRequestHandler.create_reply_activity(activity, f'Do you need a {dominant_color} cube? Let me find one for you!') 159 | # await context.send_activity(response) 160 | # BotCommandHandler.move_cube(dominant_color) 161 | # else: 162 | # response = await BotRequestHandler.create_reply_activity(activity, 'Please provide a valid instruction or image.') 163 | # await context.send_activity(response) 164 | 165 | return None 166 | 167 | def get_image_url(attachments): 168 | p = re.compile('^image/(jpg|jpeg|png|gif)$') 169 | for attachment in attachments: 170 | rs = p.match(attachment.content_type) 171 | if rs: 172 | return attachment.content_url 173 | return None 174 | 175 | @staticmethod 176 | async def request_handler(context: TurnContext) -> web.Response: 177 | if context.activity.type == 'message': 178 | return await BotRequestHandler.handle_message(context) 179 | elif context.activity.type == 'conversationUpdate': 180 | return await BotRequestHandler.handle_conversation_update(context) 181 | else: 182 | return await BotRequestHandler.unhandled_activity() 183 | 184 | async def messages(req: web.web_request) -> web.Response: 185 | body = await req.json() 186 | activity = Activity().deserialize(body) 187 | auth_header = req.headers['Authorization'] if 'Authorization' in req.headers else '' 188 | try: 189 | return await ADAPTER.process_activity(activity, auth_header, BotRequestHandler.request_handler) 190 | except Exception as e: 191 | raise e 192 | 193 | 194 | class BotCommandHandler: 195 | def move_arm(): 196 | print('Moving arm... do something cool') 197 | # launch your python2 script using bash 198 | 199 | python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-move-grippers.py -a {}'".format(action) 200 | process = subprocess.Popen( 201 | python2_command, stdout=subprocess.PIPE, shell=True) 202 | output, error = process.communicate() # receive output from the python2 script 203 | 204 | print('done moving arm . . .') 205 | print('returncode: ' + str(process.returncode)) 206 | print('output: ' + output.decode("utf-8")) 207 | 208 | def move_grippers(action): 209 | # Implement Move Grippers Command (do not uncomment this line) 210 | # print(f'{action} grippers... wait a few seconds') 211 | # # launch your python2 script using bash 212 | # 213 | # python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-move-grippers.py -a {}'".format(action) 214 | # process = subprocess.Popen(python2_command, stdout=subprocess.PIPE, shell=True) 215 | # output, error = process.communicate() # receive output from the python2 script 216 | 217 | # print('done moving grippers . . .') 218 | # print('returncode: ' + str(process.returncode)) 219 | # print('output: ' + output.decode("utf-8")) 220 | 221 | return None 222 | 223 | def show_stats(): 224 | # Implement Show Stats Command (do not uncomment this line) 225 | # print('Showing stats... do something') 226 | # # launch your python2 script using bash 227 | # python2_command = "bash -c 'source "+ ROOT_DIR + "/devel/setup.bash; python2.7 " + ROOT_DIR +"/src/chatbot/bot-stats-node.py'" 228 | 229 | # process = subprocess.Popen(python2_command, stdout=subprocess.PIPE, shell=True) 230 | # output, error = process.communicate() # receive output from the python2 script 231 | # result = output.decode("utf-8") 232 | 233 | # print('done getting state . . .') 234 | # print('returncode: ' + str(process.returncode)) 235 | # print('output: ' + result + '\n') 236 | # return result 237 | 238 | return None # REMOVE this line when you uncomment the line above 239 | 240 | def move_cube(color): 241 | # Implement Move Cube Command (do not uncomment this line) 242 | # print(f'Moving {color} cube...') 243 | # try: 244 | # r = requests.get(f'{SIM_API_HOST}/put_block_into_tray/{color}/1') 245 | # r.raise_for_status() 246 | # print('done moving cube . . .') 247 | # except Exception as e: 248 | # print("[Errno {0}] {1}".format(e.errno, e.strerror)) 249 | 250 | return None 251 | 252 | 253 | app = web.Application() 254 | app.router.add_post('/api/messages', BotRequestHandler.messages) 255 | 256 | try: 257 | web.run_app(app, host='localhost', port=9000) 258 | print('Started http server on localhost:9000') 259 | except Exception as e: 260 | raise e 261 | -------------------------------------------------------------------------------- /src/sawyer_ik_5d/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(sawyer_ik_5d) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | kdl_parser 12 | moveit_msgs 13 | roscpp 14 | ) 15 | 16 | find_package(orocos_kdl) 17 | 18 | ## System dependencies are found with CMake's conventions 19 | # find_package(Boost REQUIRED COMPONENTS system) 20 | 21 | 22 | ## Uncomment this if the package has a setup.py. This macro ensures 23 | ## modules and global scripts declared therein get installed 24 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 25 | # catkin_python_setup() 26 | 27 | ################################################ 28 | ## Declare ROS messages, services and actions ## 29 | ################################################ 30 | 31 | ## To declare and build messages, services or actions from within this 32 | ## package, follow these steps: 33 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 34 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 35 | ## * In the file package.xml: 36 | ## * add a build_depend tag for "message_generation" 37 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 38 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 39 | ## but can be declared for certainty nonetheless: 40 | ## * add a exec_depend tag for "message_runtime" 41 | ## * In this file (CMakeLists.txt): 42 | ## * add "message_generation" and every package in MSG_DEP_SET to 43 | ## find_package(catkin REQUIRED COMPONENTS ...) 44 | ## * add "message_runtime" and every package in MSG_DEP_SET to 45 | ## catkin_package(CATKIN_DEPENDS ...) 46 | ## * uncomment the add_*_files sections below as needed 47 | ## and list every .msg/.srv/.action file to be processed 48 | ## * uncomment the generate_messages entry below 49 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 50 | 51 | ## Generate messages in the 'msg' folder 52 | # add_message_files( 53 | # FILES 54 | # Message1.msg 55 | # Message2.msg 56 | # ) 57 | 58 | ## Generate services in the 'srv' folder 59 | # add_service_files( 60 | # FILES 61 | # Service1.srv 62 | # Service2.srv 63 | # ) 64 | 65 | ## Generate actions in the 'action' folder 66 | # add_action_files( 67 | # FILES 68 | # Action1.action 69 | # Action2.action 70 | # ) 71 | 72 | ## Generate added messages and services with any dependencies listed here 73 | # generate_messages( 74 | # DEPENDENCIES 75 | # std_msgs # Or other packages containing msgs 76 | # ) 77 | 78 | ################################################ 79 | ## Declare ROS dynamic reconfigure parameters ## 80 | ################################################ 81 | 82 | ## To declare and build dynamic reconfigure parameters within this 83 | ## package, follow these steps: 84 | ## * In the file package.xml: 85 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 86 | ## * In this file (CMakeLists.txt): 87 | ## * add "dynamic_reconfigure" to 88 | ## find_package(catkin REQUIRED COMPONENTS ...) 89 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 90 | ## and list every .cfg file to be processed 91 | 92 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 93 | # generate_dynamic_reconfigure_options( 94 | # cfg/DynReconf1.cfg 95 | # cfg/DynReconf2.cfg 96 | # ) 97 | 98 | ################################### 99 | ## catkin specific configuration ## 100 | ################################### 101 | ## The catkin_package macro generates cmake config files for your package 102 | ## Declare things to be passed to dependent projects 103 | ## INCLUDE_DIRS: uncomment this if your package contains header files 104 | ## LIBRARIES: libraries you create in this project that dependent projects also need 105 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 106 | ## DEPENDS: system dependencies of this project that dependent projects also need 107 | catkin_package( 108 | # INCLUDE_DIRS include 109 | # LIBRARIES sawyer_ik_5d 110 | # CATKIN_DEPENDS orocos_kdl roscpp 111 | # DEPENDS system_lib 112 | ) 113 | 114 | ########### 115 | ## Build ## 116 | ########### 117 | 118 | ## Specify additional locations of header files 119 | ## Your package locations should be listed before other locations 120 | include_directories( 121 | # include 122 | ${catkin_INCLUDE_DIRS} 123 | ) 124 | 125 | ## Declare a C++ library 126 | # add_library(${PROJECT_NAME} 127 | # src/${PROJECT_NAME}/sawyer_ik_5d.cpp 128 | # ) 129 | 130 | ## Add cmake target dependencies of the library 131 | ## as an example, code may need to be generated before libraries 132 | ## either from message generation or dynamic reconfigure 133 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 134 | 135 | ## Declare a C++ executable 136 | ## With catkin_make all packages are built within a single CMake context 137 | ## The recommended prefix ensures that target names across packages don't collide 138 | add_executable(${PROJECT_NAME}_node src/sawyer_ik_5d_node.cpp) 139 | 140 | ## Rename C++ executable without prefix 141 | ## The above recommended prefix causes long target names, the following renames the 142 | ## target back to the shorter version for ease of user use 143 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 144 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 145 | 146 | ## Add cmake target dependencies of the executable 147 | ## same as for the library above 148 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 149 | 150 | ## Specify libraries to link a library or executable target against 151 | target_link_libraries(${PROJECT_NAME}_node 152 | ${catkin_LIBRARIES} 153 | ) 154 | 155 | ############# 156 | ## Install ## 157 | ############# 158 | 159 | # all install targets should use catkin DESTINATION variables 160 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 161 | 162 | ## Mark executable scripts (Python etc.) for installation 163 | ## in contrast to setup.py, you can choose the destination 164 | # install(PROGRAMS 165 | # scripts/my_python_script 166 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 167 | # ) 168 | 169 | ## Mark executables and/or libraries for installation 170 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node 171 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 172 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 173 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 174 | # ) 175 | 176 | ## Mark cpp header files for installation 177 | # install(DIRECTORY include/${PROJECT_NAME}/ 178 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 179 | # FILES_MATCHING PATTERN "*.h" 180 | # PATTERN ".svn" EXCLUDE 181 | # ) 182 | 183 | ## Mark other files for installation (e.g. launch and bag files, etc.) 184 | # install(FILES 185 | # # myfile1 186 | # # myfile2 187 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 188 | # ) 189 | 190 | ############# 191 | ## Testing ## 192 | ############# 193 | 194 | ## Add gtest based cpp test target and link libraries 195 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_sawyer_ik_5d.cpp) 196 | # if(TARGET ${PROJECT_NAME}-test) 197 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 198 | # endif() 199 | 200 | ## Add folders to be run by python nosetests 201 | # catkin_add_nosetests(test) 202 | -------------------------------------------------------------------------------- /src/sawyer_ik_5d/include/sawyer_ik_5d/sawyer_ik_5d.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sawyer_ik_5d/include/sawyer_ik_5d/sawyer_ik_5d.h -------------------------------------------------------------------------------- /src/sawyer_ik_5d/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | sawyer_ik_5d 4 | 0.0.0 5 | The sawyer_ik_5d package 6 | 7 | 8 | 9 | 10 | Pablo Inigo Blasco 11 | 12 | 13 | 14 | 15 | 16 | copyright 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | orocos_kdl 53 | kdl_parser 54 | moveit_msgs 55 | roscpp 56 | orocos_kdl 57 | kdl_parser 58 | moveit_msgs 59 | roscpp 60 | orocos_kdl 61 | kdl_parser 62 | moveit_msgs 63 | roscpp 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /src/sorting_demo/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(sorting_demo) 3 | 4 | find_package(catkin REQUIRED COMPONENTS 5 | cv_bridge 6 | rospy 7 | actionlib 8 | sensor_msgs 9 | std_msgs 10 | control_msgs 11 | trajectory_msgs 12 | dynamic_reconfigure 13 | intera_core_msgs 14 | intera_motion_msgs 15 | gazebo_msgs 16 | ) 17 | 18 | catkin_python_setup() 19 | 20 | catkin_package( 21 | CATKIN_DEPENDS 22 | rospy 23 | intera_core_msgs 24 | gazebo_msgs 25 | ) 26 | 27 | 28 | ############# 29 | ## Install ## 30 | ############# 31 | 32 | install(PROGRAMS 33 | src/sorting_demo/program.py 34 | scripts/test_moveit.py 35 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 36 | ) 37 | 38 | 39 | foreach(dir launch models) 40 | install(DIRECTORY ${dir}/ 41 | DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/${dir}) 42 | endforeach(dir) 43 | -------------------------------------------------------------------------------- /src/sorting_demo/launch/robot_sorting_demo_control.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /src/sorting_demo/launch/sawyer_moveit.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /src/sorting_demo/launch/sorting_demo.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /src/sorting_demo/models/block/block.urdf.xacro: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | $(arg material) 30 | 10000 31 | 10000 32 | 33 | 34 | 35 | 36 | 37 | 0.456 38 | true 39 | 0.05 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /src/sorting_demo/models/table/materials/textures/Maple.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/models/table/materials/textures/Maple.jpg -------------------------------------------------------------------------------- /src/sorting_demo/models/table/materials/textures/Maple_dark.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/models/table/materials/textures/Maple_dark.jpg -------------------------------------------------------------------------------- /src/sorting_demo/models/table/materials/textures/Wood_Floor_Dark.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/models/table/materials/textures/Wood_Floor_Dark.jpg -------------------------------------------------------------------------------- /src/sorting_demo/models/table/model.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Cafe table 5 | 1.0 6 | model-1_4.sdf 7 | model.sdf 8 | 9 | 10 | Nate Koenig 11 | nate@osrfoundation.org 12 | 13 | 14 | 15 | A model of a cafe table. 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /src/sorting_demo/models/table/model.sdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | true 5 | 6 | 7 | 0 0 0.755 0 0 0 8 | 9 | 10 | 0.913 0.913 0.04 11 | 12 | 13 | 14 | 15 | 16 | 0 0 0.37 0 0 0 17 | 18 | 19 | 0.042 0.042 0.74 20 | 21 | 22 | 23 | 24 | 25 | 0 0 0.02 0 0 0 26 | 27 | 28 | 0.56 0.56 0.04 29 | 30 | 31 | 32 | 33 | 34 | 35 | 1.0 36 | false 37 | 0.05 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | model://sorting_demo/models/table/meshes/cafe_table.dae 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /src/sorting_demo/models/tray/tray.urdf.xacro: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | ${material} 35 | 1000 36 | 1000 37 | 38 | 39 | 40 | 41 | 42 | 0.456 43 | true 44 | 0.05 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | true 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /src/sorting_demo/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | sorting_demo 4 | 0.0.0 5 | The sorting demo package 6 | 7 | 8 | 9 | 10 | Pablo Iñigo Blasco 11 | Manuel Caballero Sánchez 12 | 13 | 14 | 15 | 16 | Microsoft Copyright 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | catkin 51 | 52 | rospy 53 | intera_core_msgs 54 | gazebo_msgs 55 | joint_state_controller 56 | position_controllers 57 | velocity_controllers 58 | force_torque_sensor_controller 59 | imu_sensor_controller 60 | ros_control 61 | joint_trajectory_controller 62 | gripper_action_controller 63 | gazebo_ros_control 64 | gazebo_plugins 65 | moveit 66 | rospy_message_converter 67 | gazebo_plugins 68 | python-flask 69 | python-catkin-tools 70 | intera_interface 71 | timed_roslaunch 72 | 73 | 74 | rospy 75 | rospack 76 | intera_core_msgs 77 | intera_interface 78 | sawyer_gazebo 79 | gazebo_ros 80 | gazebo_msgs 81 | joint_state_controller 82 | moveit 83 | timed_roslaunch 84 | 85 | position_controllers 86 | velocity_controllers 87 | force_torque_sensor_controller 88 | imu_sensor_controller 89 | ros_control 90 | joint_trajectory_controller 91 | gripper_action_controller 92 | rospy_message_converter 93 | intera_interface 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | -------------------------------------------------------------------------------- /src/sorting_demo/scripts/test_moveit.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import geometry_msgs 4 | import rospy 5 | 6 | from sorting_demo.trajectory_planner import TrajectoryPlanner 7 | 8 | if __name__ == "__main__": 9 | rospy.init_node('move_group_python_interface_tutorial', 10 | anonymous=True) 11 | 12 | planner = TrajectoryPlanner() 13 | 14 | pose_target = geometry_msgs.msg.Pose() 15 | 16 | pose_target.orientation.w = 1.0 17 | pose_target.position.x = 0.7 18 | pose_target.position.y = 0.0 19 | pose_target.position.z = 0.5 20 | 21 | #planner.move_to_cartesian_target(pose_target) 22 | 23 | planner.move_to_joint_target([0.0,0.0,0.0,0.0,0.0,0.0,0.0]) -------------------------------------------------------------------------------- /src/sorting_demo/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from distutils.core import setup 3 | from catkin_pkg.python_setup import generate_distutils_setup 4 | 5 | 6 | from distutils.core import setup 7 | from catkin_pkg.python_setup import generate_distutils_setup 8 | """ 9 | setup_args = generate_distutils_setup( 10 | package=['sorting_demo'], 11 | package_dir={'': 'src'}, 12 | ) 13 | 14 | setup(**setup_args) 15 | """ 16 | 17 | d = generate_distutils_setup() 18 | d['packages'] = ['sorting_demo', 19 | 'sorting_demo.utils', 20 | 'sorting_demo.object_detection', 21 | 'intera_interface', 'intera_control', 22 | 'intera_dataflow', 23 | 'intera_io', 24 | 'intera_joint_trajectory_action', 25 | 'intera_motion_interface'] 26 | d['package_dir'] = {'': 'src', 'utils': 'src/utils'} 27 | d['scripts']=['scripts/test_moveit.py'] 28 | 29 | setup(**d) 30 | -------------------------------------------------------------------------------- /src/sorting_demo/share/head_mask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/head_mask.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/1_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/1_color.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/2_colors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/2_colors.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/base_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/base_1.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/base_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/base_2.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/grouping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/grouping.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/joined_blobs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/joined_blobs.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_head_simulator/no_cubes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_head_simulator/no_cubes.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/angles1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/angles1.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/angles2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/angles2.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/border.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/border.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug0.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug1.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug2.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug3.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug4.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug5.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug6.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug7.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/debug8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/debug8.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/joined_cubes_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/joined_cubes_1.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/joined_cubes_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/joined_cubes_2.png -------------------------------------------------------------------------------- /src/sorting_demo/share/test_right_hand_simulator/many.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/share/test_right_hand_simulator/many.png -------------------------------------------------------------------------------- /src/sorting_demo/share/testobstacle.txt: -------------------------------------------------------------------------------- 1 | box_scene 2 | * box1 3 | 1 4 | box 5 | 0.1 0.2 0.3 6 | 1.0 1.0 1.0 7 | 0 0 0 1 8 | 0 1 0 1 9 | . 10 | 11 | #box_scene # maybe scene name 12 | #* box1 # object name 13 | #1 # number of shapes 14 | #box # type of shape (box, cylinder, sphere, I think) 15 | #0.1 0.2 0.3 # size of shape 16 | #1.0 1.0 1.0 # position of shape 17 | #0 0 0 1 # orientation of shape 18 | #0 1 0 1 # RGBA color of shape 19 | #. # necessary final dot? 20 | -------------------------------------------------------------------------------- /src/sorting_demo/share/web_view.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 29 | 126 | 127 | 128 | 129 | 130 |
131 |
132 |

REST Endpoints

133 |

Observers

134 | 135 | 136 | 137 | 138 |

Lifecycle

139 | 140 | 141 | 142 | 143 |

Operations Primitives

144 | 145 | 146 |

Others

147 | 148 | 149 | 150 |
151 |
152 |

Params

153 |
154 | p1: 155 | 160 |
161 |
162 | p2: 163 | 168 |
169 |
170 |

Planer state:

171 |
172 |
173 |
174 |

Table:

175 | 176 |
177 |
178 |
179 |
180 |

Tray 1:

181 | 182 |
183 |
184 |

Tray 2:

185 | 186 |
187 | 188 |
189 |

Tray 3:

190 | 191 |
192 |
193 |

Grip:

194 | 195 |
196 |
197 |
198 |
199 |
200 | 201 |
202 |
203 |
204 | 205 | 206 | -------------------------------------------------------------------------------- /src/sorting_demo/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/src/__init__.py -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/src/sorting_demo/__init__.py -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/concepts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/src/sorting_demo/concepts/__init__.py -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/concepts/block.py: -------------------------------------------------------------------------------- 1 | import re 2 | from rospy_message_converter import message_converter 3 | import demo_constants 4 | 5 | class BlockState: 6 | regex = re.compile(r'block(\d+)\.*') 7 | 8 | def __init__(self, id, pose): 9 | self.perception_id = None 10 | 11 | self.gazebo_id = id 12 | self.pose = pose 13 | self.color = None 14 | self.homogeneous_transform = None 15 | 16 | if not demo_constants.is_real_robot(): 17 | search = BlockState.regex.search(id) 18 | self.num = int(search.group(1)) 19 | else: 20 | self.num = int(id) 21 | 22 | self.gazebo_pose = None 23 | self.grasp_pose = None 24 | self.place_pose = None 25 | 26 | 27 | self.table_grasp_pose = None 28 | self.tray_place_pose = None 29 | self.table_place_pose = None 30 | 31 | # the 2d blob point estimabion based on the head_image processing 32 | self.hue_estimation = None 33 | 34 | # the color estimation based on the head image processing 35 | self.hue_estimation = None 36 | 37 | # the 3d pose estimation from the head image processing 38 | self.headview_pose_estimation = None 39 | 40 | self.tabletop_arm_view_estimated_pose = None 41 | self.traytop_arm_view_estimated_pose =None 42 | 43 | self.tray = None 44 | 45 | 46 | 47 | def __str__(self): 48 | return "[Block estpos = %s]" % str(self.headview_pose_estimation) 49 | 50 | def get_state(self): 51 | return {"id": self.perception_id , "table_pose": message_converter.convert_ros_message_to_dictionary(self.gazebo_pose), 52 | "color": self.color} 53 | 54 | @staticmethod 55 | def is_block(id): 56 | num = BlockState.regex.search(id) 57 | return num is not None 58 | 59 | def get_color(self): 60 | return self.color 61 | 62 | def is_color(self, color): 63 | return color.upper() in self.color.upper() 64 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/concepts/gripper_state.py: -------------------------------------------------------------------------------- 1 | class GripperState: 2 | def __init__(self): 3 | self.holding_block = None 4 | def get_state(self): 5 | return {"holding_block": None if self.holding_block is None else self.holding_block.get_state()} -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/concepts/table.py: -------------------------------------------------------------------------------- 1 | class Table: 2 | def __init__(self): 3 | self.blocks = [] 4 | 5 | def get_blocks(self): 6 | return self.blocks 7 | 8 | def get_state(self): 9 | return {"blocks": [b.get_state() for b in self.blocks]} 10 | 11 | def notify_gripper_pick(self, b,gripper): 12 | self.blocks.remove(b) 13 | gripper.holding_block = b 14 | 15 | def notify_gripper_place(self, b, gripper): 16 | gripper.holding_block = None 17 | self.blocks.append(b) -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/concepts/tray.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import re 3 | 4 | import math 5 | import rospy 6 | from rospy_message_converter import message_converter 7 | import tf 8 | import tf.transformations 9 | from geometry_msgs.msg import Quaternion 10 | 11 | class TrayState: 12 | regex = re.compile(r'tray(\d+)\.*') 13 | 14 | def __init__(self, gazebo_id, pose, TRAY_SURFACE_THICKNESS=0.04): 15 | self.gazebo_id = gazebo_id 16 | self.pose = pose 17 | self.gazebo_pose = None 18 | self.TRAY_SURFACE_THICKNESS =TRAY_SURFACE_THICKNESS 19 | search = TrayState.regex.search(gazebo_id) 20 | self.num = int(search.group(1)) 21 | self.blocks = [] 22 | 23 | @staticmethod 24 | def is_tray(id): 25 | num = TrayState.regex.search(id) 26 | return num is not None 27 | 28 | def notify_place_block(self, block, gripper_state): 29 | block.tray = self 30 | self.blocks.append(block) 31 | gripper_state.holding_block = None 32 | 33 | 34 | def notify_pick_block(self, block, gripper_state): 35 | block.tray = None 36 | self.blocks.remove(block) 37 | gripper_state.holding_block = block 38 | 39 | def get_tray_pick_location(self): 40 | """ 41 | provides the grasping pose for the tray 42 | 43 | :return: geometry_msg/Pose 44 | """ 45 | copyfinalpose = copy.deepcopy(self.gazebo_pose) 46 | copyfinalpose.position.y -= 0.15 47 | copyfinalpose.position.z -= 0.02 48 | return copyfinalpose 49 | 50 | def get_tray_place_block_pose(self): 51 | #return self.gazebo_pose 52 | copygazebopose = copy.deepcopy(self.gazebo_pose) 53 | 54 | yoffset = -0.08 + 0.075 * len(self.blocks) 55 | copygazebopose.position.y -= yoffset 56 | copygazebopose.position.z += self.TRAY_SURFACE_THICKNESS 57 | 58 | zrot = tf.transformations.quaternion_from_euler(0, 0, -math.pi/2.0) 59 | 60 | trayorientatin = [copygazebopose.orientation.x, copygazebopose.orientation.y, copygazebopose.orientation.z, copygazebopose.orientation.w] 61 | # oorient = [overhead_orientation.x,overhead_orientation.y,overhead_orientation.z,overhead_orientation.w] 62 | 63 | # resultingorient = tf.transformations.quaternion_multiply(cubeorientation, tf.transformations.quaternion_conjugate(oorient)) 64 | resultingorient = tf.transformations.quaternion_multiply(trayorientatin, zrot) 65 | 66 | # resultingorient = cubeorientation 67 | 68 | 69 | copygazebopose.orientation = Quaternion(x=resultingorient[0], y=resultingorient[1], z=resultingorient[2], 70 | w=resultingorient[3]) 71 | 72 | 73 | rospy.logwarn("Tray Place location (objects %d, %lf): " % (len(self.blocks), yoffset) + str(copygazebopose)) 74 | 75 | return copygazebopose 76 | 77 | 78 | def get_state(self): 79 | return {"pose": message_converter.convert_ros_message_to_dictionary(self.gazebo_pose), "blocks": [b.get_state() for b in self.blocks]} 80 | 81 | def __str__(self): 82 | return "Tray: "+ str(self.gazebo_id) + ",num: " + str(self.num) + " -> " + str(len(self.blocks)) + " items" 83 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/cv_detection_camera_helper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import Queue 3 | import numpy 4 | 5 | import rospy 6 | import tf 7 | 8 | from sensor_msgs.msg import CameraInfo 9 | from image_geometry import PinholeCameraModel 10 | 11 | from visualization_msgs.msg import * 12 | 13 | import intera_interface 14 | 15 | class CameraHelper: 16 | """ 17 | A helper class to take pictures with the Sawyer camera and unproject points from the images 18 | """ 19 | def __init__(self, camera_name, base_frame, table_height): 20 | """ 21 | Initialize the instance 22 | 23 | :param camera_name: The camera name. One of (head_camera, right_hand_camera) 24 | :param base_frame: The frame for the robot base 25 | :param table_height: The table height with respect to base_frame 26 | """ 27 | self.camera_name = camera_name 28 | self.base_frame = base_frame 29 | self.table_height = table_height 30 | 31 | self.image_queue = Queue.Queue() 32 | self.pinhole_camera_model = PinholeCameraModel() 33 | self.tf_listener = tf.TransformListener() 34 | 35 | camera_info_topic = "/io/internal_camera/{}/camera_info".format(camera_name) 36 | camera_info = rospy.wait_for_message(camera_info_topic, CameraInfo) 37 | 38 | self.pinhole_camera_model.fromCameraInfo(camera_info) 39 | 40 | cameras = intera_interface.Cameras() 41 | cameras.set_callback(camera_name, self.__show_image_callback, rectify_image=True) 42 | 43 | def __show_image_callback(self, img_data): 44 | """ 45 | Image callback for the camera 46 | 47 | :param img_data: The image received from the camera 48 | """ 49 | if self.image_queue.empty(): 50 | self.image_queue.put(img_data) 51 | 52 | def take_single_picture(self): 53 | """ 54 | Take one picture from the specified camera 55 | 56 | :returns: The image 57 | """ 58 | with self.image_queue.mutex: 59 | self.image_queue.queue.clear() 60 | 61 | cameras = intera_interface.Cameras() 62 | 63 | cameras.start_streaming(self.camera_name) 64 | 65 | image_data = self.image_queue.get(block=True, timeout=None) 66 | 67 | cameras.stop_streaming(self.camera_name) 68 | 69 | return image_data 70 | 71 | def project_point_on_table(self, point): 72 | """ 73 | Projects the 2D point from the camera image on the table 74 | 75 | :param point: The 2D point in the form (x, y) 76 | :return: The 3D point in the coordinate space of the frame that was specified when the object was initialized 77 | """ 78 | 79 | # Get camera frame name 80 | camera_frame = self.pinhole_camera_model.tfFrame() 81 | 82 | # Check that both frames exist 83 | if self.tf_listener.frameExists(self.base_frame) and self.tf_listener.frameExists(camera_frame): 84 | # Get transformation 85 | time = self.tf_listener.getLatestCommonTime(self.base_frame, camera_frame) 86 | camera_translation_base, camera_orientation_base = self.tf_listener.lookupTransform(self.base_frame, camera_frame, time) 87 | 88 | # Unproject point 89 | unprojected_ray_camera = self.pinhole_camera_model.projectPixelTo3dRay(point) 90 | 91 | # Rotate ray based on the frame transformation 92 | camera_frame_rotation_matrix = tf.transformations.quaternion_matrix(camera_orientation_base) 93 | unprojected_ray_base = numpy.dot(camera_frame_rotation_matrix[:3,:3], unprojected_ray_camera) 94 | 95 | # Intersect ray with base plane 96 | point_height = camera_translation_base[2] - self.table_height 97 | factor = -point_height / unprojected_ray_base[2] 98 | intersection = camera_translation_base + unprojected_ray_base * factor 99 | 100 | return intersection 101 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/cv_detection_head.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import numpy 4 | import os 5 | 6 | from matplotlib import pyplot 7 | 8 | import rospy 9 | import cv2 10 | import rospkg 11 | 12 | from cv_bridge import CvBridge, CvBridgeError 13 | 14 | from visualization_msgs.msg import * 15 | 16 | from cv_detection_camera_helper import CameraHelper 17 | 18 | def get_blobs_info(cv_image): 19 | """ 20 | Gets information about the colored blobs in the image 21 | 22 | :param cv_image: The OpenCV image to get blobs from 23 | :return: A dictionary containing an array of points for each colored blob found, represented by its hue. 24 | For example, in an image with 2 red blobs and 3 blue blobs, it will return 25 | {0: [(639, 558), (702, 555)], 120: [(567, 550), (698, 515), (648, 515)]} 26 | """ 27 | 28 | # Show original image 29 | #cv2.imshow("Original image", cv_image) 30 | 31 | # Apply blur 32 | BLUR_SIZE = 3 33 | cv_image_blur = cv2.GaussianBlur(cv_image, (BLUR_SIZE, BLUR_SIZE), 0) 34 | #cv2.imshow("Blur", cv_image_blur) 35 | 36 | # Apply ROI mask 37 | mask_path = rospkg.RosPack().get_path('sorting_demo') + "/share/head_mask.png" 38 | cv_mask = cv2.imread(mask_path) 39 | cv_mask = cv2.cvtColor(cv_mask, cv2.COLOR_BGR2GRAY) 40 | 41 | cv_image_masked = cv2.bitwise_and(cv_image_blur, cv_image_blur, mask=cv_mask) 42 | #cv2.imshow("Masked original", cv_image_masked) 43 | 44 | # HSV split 45 | cv_image_hsv = cv2.cvtColor(cv_image_masked, cv2.COLOR_BGR2HSV) 46 | cv_image_h, cv_image_s, cv_image_v = cv2.split(cv_image_hsv) 47 | #cv2.imshow("Image H", cv_image_h) 48 | #cv2.imshow("Image S", cv_image_s) 49 | #cv2.imshow("Image V", cv_image_v) 50 | 51 | # Apply CLAHE to Value channel 52 | CLAHE_SIZE = 16 53 | clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(CLAHE_SIZE, CLAHE_SIZE)) 54 | cv_image_v_clahe = clahe.apply(cv_image_v) 55 | 56 | # Merge channels 57 | cv_image_clahe = cv2.merge((cv_image_h, cv_image_s, cv_image_v_clahe)) 58 | #cv_image_clahe_rgb = cv2.cvtColor(cv_image_clahe, cv2.COLOR_HSV2BGR) 59 | #cv2.imshow("CLAHE", cv_image_clahe_rgb) 60 | 61 | # Multiply Saturation and Value channels to separate the cubes, removing the table 62 | cv_image_sv_multiplied = cv2.multiply(cv_image_s, cv_image_v_clahe, scale=1/255.0) 63 | #cv2.imshow("Image S*V", cv_image_sv_multiplied) 64 | 65 | # Binarize the result 66 | BIN_THRESHOLD = 64 67 | #_, cv_image_binary = cv2.threshold(cv_image_sv_multiplied, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) 68 | _, cv_image_binary = cv2.threshold(cv_image_sv_multiplied, BIN_THRESHOLD, 255, cv2.THRESH_BINARY) 69 | #cv2.imshow("Threshold", cv_image_binary) 70 | 71 | # Erode the image to use as mask 72 | EROSION_SIZE = 3 73 | erosion_kernel = numpy.ones((EROSION_SIZE, EROSION_SIZE), numpy.uint8) 74 | cv_image_binary_eroded = cv2.erode(cv_image_binary, erosion_kernel) 75 | 76 | # Calculate H-channel histogram, applying the binarized image as mask 77 | cv_image_h_histogram = cv2.calcHist([cv_image_h], [0], cv_image_binary_eroded, [180], [0, 179]) 78 | 79 | # Smoothen the histogram to find local maxima 80 | HISTOGRAM_BLUR_SIZE = 11 81 | histogram_length = len(cv_image_h_histogram) 82 | cv_image_h_histogram_wrap = cv2.repeat(cv_image_h_histogram, 3, 1) 83 | cv_image_h_histogram_smooth = cv2.GaussianBlur(cv_image_h_histogram_wrap, (HISTOGRAM_BLUR_SIZE, HISTOGRAM_BLUR_SIZE), 0) 84 | cv_image_h_histogram_cut = cv_image_h_histogram_smooth[histogram_length : 2 * histogram_length] 85 | 86 | # Collect peaks 87 | histogram_peaks = [i for i in range(len(cv_image_h_histogram_cut)) 88 | if cv_image_h_histogram_cut[(i - 1) % histogram_length] < cv_image_h_histogram_cut[i] > cv_image_h_histogram_cut[(i + 1) % histogram_length]] 89 | 90 | # Filter peaks 91 | PEAK_MINIMUM = 100 # Ideally below the value of a single cube 92 | PEAK_MAXIMUM = 500 # Ideally above the value of all the cubes of a single color 93 | histogram_high_peaks = filter(lambda p : PEAK_MINIMUM < cv_image_h_histogram_cut[p] < PEAK_MAXIMUM, histogram_peaks) 94 | #print(histogram_high_peaks) 95 | #pyplot.clf() 96 | #pyplot.plot(cv_image_h_histogram_cut) 97 | #pyplot.pause(0.001) 98 | 99 | # Process every color found in the histogram 100 | blob_info = {} 101 | cv_image_contours_debug = cv2.cvtColor(cv_image_binary_eroded, cv2.COLOR_GRAY2BGR) 102 | 103 | for current_hue in histogram_high_peaks: 104 | # Perform a Hue rotation that will be used to make detecting the edge colors easier (red in HSV corresponds to both 0 and 180) 105 | HUE_AMPLITUDE = 5 106 | cv_image_h_rotated = cv_image_h.astype(numpy.int16) 107 | cv_image_h_rotated[:] -= current_hue 108 | cv_image_h_rotated[:] += HUE_AMPLITUDE 109 | cv_image_h_rotated = numpy.remainder(cv_image_h_rotated, 180) 110 | cv_image_h_rotated = cv_image_h_rotated.astype(numpy.uint8) 111 | #cv2.imshow("Hue rotation {}".format(histogram_high_peaks.index(current_hue)), cv_image_h_rotated) 112 | 113 | # Binarize using range function 114 | cv_image_h_inrange = cv2.inRange(cv_image_h_rotated, 0, HUE_AMPLITUDE * 2) 115 | 116 | # Apply binary mask (consider that both black and the edge color have hue 0) 117 | cv_image_h_masked = cv2.bitwise_and(cv_image_h_inrange, cv_image_h_inrange, mask=cv_image_binary_eroded) 118 | #cv2.imshow("inRange {}".format(histogram_high_peaks.index(current_hue)), cv_image_h_masked) 119 | 120 | # Find contours 121 | _, contours, _ = cv2.findContours(cv_image_h_masked.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) 122 | 123 | # Filter by area 124 | MINIMUM_AREA_SIZE = 50 125 | contours_filtered_area = filter(lambda cnt: cv2.contourArea(cnt) > MINIMUM_AREA_SIZE, contours) 126 | 127 | # Calculate convex hull 128 | convex_contours = [cv2.convexHull(cnt) for cnt in contours_filtered_area] 129 | 130 | contour_color_hsv = numpy.array([[[current_hue, 255, 255]]], numpy.uint8) 131 | contour_color_rgb = cv2.cvtColor(contour_color_hsv, cv2.COLOR_HSV2BGR)[0][0].tolist() 132 | #cv2.drawContours(cv_image_contours_debug, convex_contours, -1, contour_color_rgb, 1) 133 | 134 | # Find centroids 135 | contour_moments = [cv2.moments(cnt) for cnt in convex_contours] 136 | contour_centroids = [(int(moments["m10"] / moments["m00"]), int(moments["m01"] / moments["m00"])) for moments in contour_moments if moments["m00"] != 0] 137 | for (cx, cy) in contour_centroids: 138 | cv2.circle(cv_image_contours_debug, (cx, cy), 3, contour_color_rgb, cv2.FILLED) 139 | 140 | # Collect data 141 | blob_info[current_hue] = contour_centroids 142 | 143 | cv2.imwrite("/tmp/head_contours.jpg", cv_image_contours_debug) 144 | 145 | return blob_info 146 | 147 | def __publish_marker(publisher, index, point): 148 | """ 149 | Publishes a cube-shaped marker 150 | 151 | :param index: The marker index 152 | :param point: The 3D position of the marker 153 | """ 154 | marker = Marker() 155 | marker.header.frame_id = "base" 156 | marker.id = index 157 | marker.type = Marker.CUBE 158 | marker.scale.x = 0.04 159 | marker.scale.y = 0.04 160 | marker.scale.z = 0.04 161 | marker.color.r = 1.0 162 | marker.color.g = 1.0 163 | marker.color.b = 0.0 164 | marker.color.a = 1.0 165 | marker.pose.position.x = point[0] 166 | marker.pose.position.y = point[1] 167 | marker.pose.position.z = point[2] 168 | 169 | publisher.publish(marker) 170 | 171 | def test_head_ros(): 172 | """ 173 | Test the blob detection and CameraHelper class using ROS 174 | """ 175 | rospy.init_node('cv_detection_head_camera') 176 | 177 | camera_name = "head_camera" 178 | 179 | TABLE_HEIGHT = demo_constants.TABLE_HEIGHT 180 | camera_helper = CameraHelper(camera_name, "base", TABLE_HEIGHT) 181 | 182 | bridge = CvBridge() 183 | 184 | publisher = rospy.Publisher("cube_position_estimation", Marker, queue_size=10) 185 | 186 | try: 187 | while not rospy.is_shutdown(): 188 | # Take picture 189 | img_data = camera_helper.take_single_picture() 190 | 191 | # Convert to OpenCV format 192 | cv_image = bridge.imgmsg_to_cv2(img_data, "bgr8") 193 | 194 | # Save for debugging 195 | #cv2.imwrite("/tmp/debug.png", cv_image) 196 | 197 | # Get color blobs info 198 | blob_info = get_blobs_info(cv_image) 199 | 200 | # Project the points on 3D space 201 | points = [y for x in blob_info.values() for y in x] 202 | 203 | for index, point in enumerate(points): 204 | projected = camera_helper.project_point_on_table(point) 205 | __publish_marker(publisher, index, projected) 206 | 207 | # Wait for a key press 208 | cv2.waitKey(1) 209 | 210 | rospy.sleep(0.1) 211 | except CvBridgeError, err: 212 | rospy.logerr(err) 213 | 214 | # Exit 215 | cv2.destroyAllWindows() 216 | 217 | def test_head_cam(): 218 | """ 219 | Test the blob detection using a USB camera 220 | """ 221 | 222 | # Create a video capture object 223 | capture = cv2.VideoCapture(1) 224 | capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) 225 | capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) 226 | 227 | while True: 228 | # Capture a frame 229 | ret, cv_image = capture.read() 230 | 231 | if ret: 232 | # Save for debugging 233 | #cv2.imwrite("/tmp/debug.png", cv_image) 234 | 235 | # Get color blobs info 236 | blob_info = get_blobs_info(cv_image) 237 | print(blob_info) 238 | 239 | # Check for a press on the Escape key 240 | if cv2.waitKey(1) & 0xFF == 27: 241 | break 242 | 243 | # Exit 244 | capture.release() 245 | cv2.destroyAllWindows() 246 | 247 | def test_head_debug(): 248 | """ 249 | Test the blob detection using images on disk 250 | """ 251 | 252 | # Get files 253 | path = rospkg.RosPack().get_path('sorting_demo') + "/share/test_head_simulator" 254 | files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] 255 | #print(files) 256 | 257 | # Process files 258 | for f in files: 259 | # Get image path 260 | image_path = os.path.join(path, f) 261 | print(image_path) 262 | 263 | # Read image 264 | cv_image = cv2.imread(image_path) 265 | 266 | # Get color blobs info 267 | blob_info = get_blobs_info(cv_image) 268 | print(blob_info) 269 | 270 | # Wait for a key press 271 | cv2.waitKey(0) 272 | 273 | # Exit 274 | cv2.destroyAllWindows() 275 | 276 | if __name__ == '__main__': 277 | sys.exit(test_head_debug()) 278 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/demo_constants.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import rospy 4 | 5 | CUBE_EDGE_LENGTH = 0.04 6 | 7 | BLOCK_COLOR_MAPPINGS = [ 8 | {"material": "Gazebo/Green"}, 9 | {"material": "Gazebo/Red"}, 10 | {"material": "Gazebo/Blue"}, 11 | {"material": "Gazebo/Green"}, 12 | {"material": "Gazebo/Red"}, 13 | {"material": "Gazebo/Blue"}, 14 | {"material": "Gazebo/Red"}, 15 | {"material": "Gazebo/Blue"}, 16 | {"material": "Gazebo/Green"} 17 | ] 18 | 19 | TRAY_COLORS = ["Red", "Green", "Blue"] 20 | 21 | TABLE_HEIGHT = -0.15 22 | 23 | TRAY_SURFACE_THICKNESS = 0.04 24 | 25 | ARM_TOP_VIEW_Z_OFFSET = 0.05 # meters 26 | 27 | SIMULATE_TRAY_BLOCK_DETECTION = True 28 | 29 | 30 | def is_real_robot(): 31 | if rospy.has_param("/use_sim_time"): 32 | return not rospy.get_param("/use_sim_time") 33 | else: 34 | return False 35 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/gazebo_models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import math 3 | import random 4 | import sys 5 | 6 | import rospkg 7 | import rospy 8 | import tf 9 | import xacro 10 | 11 | from geometry_msgs.msg import Pose, Point, Quaternion 12 | from gazebo_msgs.srv import SpawnModel, DeleteModel 13 | 14 | from demo_constants import BLOCK_COLOR_MAPPINGS, CUBE_EDGE_LENGTH 15 | 16 | 17 | def spawn_urdf(name, description_xml, pose, reference_frame): 18 | rospy.wait_for_service('/gazebo/spawn_urdf_model') 19 | try: 20 | spawn_urdf = rospy.ServiceProxy('/gazebo/spawn_urdf_model', SpawnModel) 21 | resp_urdf = spawn_urdf(name, description_xml, "/", pose, reference_frame) 22 | except rospy.ServiceException as e: 23 | rospy.logerr("Spawn URDF service call failed: {0}".format(e)) 24 | 25 | 26 | def load_xacro_file(file_path, mappings): 27 | urdf_doc = xacro.process_file(file_path, mappings=mappings) 28 | urdf_xml = urdf_doc.toprettyxml(indent=' ', encoding='utf-8') 29 | urdf_xml = urdf_xml.replace('\n', '') 30 | return urdf_xml 31 | 32 | 33 | def spawn_xacro_urdf_model(name, path, pose, reference_frame, mappings): 34 | description_xml = load_xacro_file(path, mappings) 35 | spawn_urdf(name, description_xml, pose, reference_frame) 36 | 37 | def spawn_xacro_sdf_model(name, path, pose, reference_frame, mappings): 38 | description_xml = load_xacro_file(path, mappings) 39 | spawn_sdf(name, description_xml, pose, reference_frame) 40 | 41 | def spawn_urdf_model(name, path, pose, reference_frame): 42 | description_xml = '' 43 | with open(path, "r") as model_file: 44 | description_xml = model_file.read().replace('\n', '') 45 | 46 | spawn_urdf(name, description_xml, pose, reference_frame) 47 | 48 | 49 | def spawn_sdf(name, description_xml, pose, reference_frame): 50 | rospy.wait_for_service('/gazebo/spawn_sdf_model') 51 | try: 52 | spawn_sdf = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel) 53 | resp_sdf = spawn_sdf(name, description_xml, "/", pose, reference_frame) 54 | except rospy.ServiceException as e: 55 | rospy.logerr("Spawn SDF service call failed: {0}".format(e)) 56 | 57 | def spawn_sdf_model(name, path, pose, reference_frame): 58 | # Load Model SDF 59 | description_xml = '' 60 | with open(path, "r") as model_file: 61 | description_xml = model_file.read().replace('\n', '') 62 | spawn_sdf(name, description_xml, pose,reference_frame) 63 | 64 | 65 | 66 | def load_gazebo_models(): 67 | model_list = [] 68 | 69 | world_reference_frame = "world" 70 | 71 | # sorting_demo model path 72 | sorting_demo_models_path = rospkg.RosPack().get_path('sorting_demo') + "/models/" 73 | 74 | # Spawn Blocks Table 75 | blocks_table_name = "blocks_table" 76 | blocks_table_path = sorting_demo_models_path + "table/model.sdf" 77 | blocks_table_pose = Pose(position=Point(x=0.75, y=0.0, z=0.0)) 78 | 79 | spawn_sdf_model(blocks_table_name, blocks_table_path, blocks_table_pose, world_reference_frame) 80 | model_list.append(blocks_table_name) 81 | 82 | # Spawn Trays Table 83 | trays_table_name = "trays_table" 84 | trays_table_path = sorting_demo_models_path + "table/model.sdf" 85 | trays_table_pose = Pose(position=Point(x=0.0, y=0.95, z=0.0)) 86 | 87 | spawn_sdf_model(trays_table_name, trays_table_path, trays_table_pose, world_reference_frame) 88 | model_list.append(trays_table_name) 89 | 90 | # Spawn trays 91 | tray_path = sorting_demo_models_path + "tray/tray.urdf.xacro" 92 | 93 | tray_poses = [Pose(position=Point(x=-0.3, y=0.7, z=0.7828)), 94 | Pose(position=Point(x=0.0, y=0.7, z=0.7828)), 95 | Pose(position=Point(x=0.3, y=0.7, z=0.7828))] 96 | 97 | for (i, pose) in enumerate(tray_poses): 98 | name = "tray{}".format(i) 99 | spawn_xacro_urdf_model(name, tray_path, pose, world_reference_frame, {}) 100 | model_list.append(name) 101 | 102 | # Spawn blocks 103 | block_path = sorting_demo_models_path + "block/block.urdf.xacro" 104 | 105 | block_poses = [] 106 | k = 3 107 | for i in xrange(k): 108 | for j in xrange(k): 109 | q = tf.transformations.quaternion_from_euler(random.uniform(0, 2 * math.pi), random.uniform(0, 2 * math.pi), 110 | random.uniform(0, 2 * math.pi)) 111 | 112 | 113 | block_poses.append(Pose(position=Point(x=0.45 + j * 0.15 + random.uniform(-1, 1) * 0.03, 114 | y=-0.15 + i * 0.15 + random.uniform(-1, 1) * 0.03, z=0.7725), 115 | orientation=Quaternion(x=q[0], y=q[1], z=q[2], w=q[3]))) 116 | 117 | #block_poses.append(Pose(position=Point(x= 0.45 + j*0.12 +random.uniform(-1, 1)*0.03 , y= -0.15 + i * 0.15 +random.uniform(-1, 1)*0.03, z=0.7725))) 118 | 119 | """ 120 | Pose(position=Point(x=0.60, y=0.1265, z=0.7725)), 121 | Pose(position=Point(x=0.80, y=0.12, z=0.7725)), 122 | Pose(position=Point(x=0.60, y=-0.1, z=0.7725)), 123 | Pose(position=Point(x=0.80, y=-0.1, z=0.7725)), 124 | Pose(position=Point(x=0.4225, y=-0.1, z=0.7725)), 125 | Pose(position=Point(x=0.60, y=-0.35, z=0.7725)), 126 | Pose(position=Point(x=0.80, y=-0.35, z=0.7725)), 127 | Pose(position=Point(x=0.4225, y=-0.35, z=0.7725)) 128 | """ 129 | 130 | for (i, (pose, color_mappings)) in enumerate(zip(block_poses, BLOCK_COLOR_MAPPINGS)): 131 | name = "block{}".format(i) 132 | 133 | mappings = {"edge_length" : str(CUBE_EDGE_LENGTH)} 134 | mappings.update(color_mappings) 135 | 136 | spawn_xacro_urdf_model(name, block_path, pose, world_reference_frame, mappings) 137 | model_list.append(name) 138 | 139 | return model_list, block_poses 140 | 141 | 142 | def delete_gazebo_models(model_list): 143 | # This will be called on ROS Exit, deleting Gazebo models 144 | # Do not wait for the Gazebo Delete Model service, since 145 | # Gazebo should already be running. If the service is not 146 | # available since Gazebo has been killed, it is fine to error out 147 | try: 148 | delete_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel) 149 | 150 | for model in model_list: 151 | resp_delete = delete_model(model) 152 | except rospy.ServiceException as e: 153 | print("Delete Model service call failed: {0}".format(e)) 154 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/goto_angles_in_contact.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | # Copyright (c) 2016-2018, Rethink Robotics Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | from rospkg.distro import current_distro_codename 17 | 18 | import rospy 19 | import argparse 20 | from geometry_msgs.msg import Pose 21 | from intera_motion_interface import ( 22 | MotionTrajectory, 23 | MotionWaypoint, 24 | MotionWaypointOptions, 25 | InteractionOptions 26 | ) 27 | from intera_motion_msgs.msg import TrajectoryOptions 28 | from intera_interface import Limb 29 | from intera_motion_interface.utility_functions import int2bool 30 | 31 | 32 | def interaction_joint_trajectory(limb, 33 | joint_angles, 34 | trajType, 35 | interaction_active, 36 | interaction_control_mode, 37 | interaction_frame, 38 | speed_ratio, 39 | accel_ratio, 40 | K_impedance, 41 | max_impedance, 42 | in_endpoint_frame, 43 | force_command, 44 | K_nullspace, 45 | endpoint_name, 46 | timeout, 47 | disable_damping_in_force_control, 48 | disable_reference_resetting, 49 | rotations_for_constrained_zeroG): 50 | try: 51 | 52 | traj = MotionTrajectory(limb=limb) 53 | 54 | wpt_opts = MotionWaypointOptions(max_joint_speed_ratio=speed_ratio, 55 | max_joint_accel=accel_ratio) 56 | 57 | waypoint = MotionWaypoint(options = wpt_opts.to_msg(), limb = limb) 58 | 59 | # one single waypoint 60 | current_joint_angles = limb.joint_ordered_angles() 61 | waypoint.set_joint_angles(joint_angles = current_joint_angles ) 62 | traj.append_waypoint(waypoint.to_msg()) 63 | 64 | if len(current_joint_angles ) != len(joint_angles): 65 | rospy.logerr('The number of joint_angles must be %d', len(current_joint_angles )) 66 | return None 67 | 68 | # ----- testing intermediate points with real robot 69 | middle_joint_angles = [ (current_joint_angles[i] + joint_angles[i])/2.0 for i in xrange(len(current_joint_angles))] 70 | waypoint.set_joint_angles(joint_angles=middle_joint_angles) 71 | traj.append_waypoint(waypoint.to_msg()) 72 | 73 | waypoint.set_joint_angles(joint_angles = joint_angles) 74 | traj.append_waypoint(waypoint.to_msg()) 75 | # ----- end testing intermediate points with real robot 76 | 77 | # set the interaction control options in the current configuration 78 | interaction_options = InteractionOptions() 79 | trajectory_options = TrajectoryOptions() 80 | trajectory_options.interaction_control = True 81 | trajectory_options.interpolation_type = trajType 82 | 83 | interaction_options.set_interaction_control_active(int2bool(interaction_active)) 84 | interaction_options.set_K_impedance(K_impedance) 85 | interaction_options.set_max_impedance(int2bool(max_impedance)) 86 | interaction_options.set_interaction_control_mode(interaction_control_mode) 87 | interaction_options.set_in_endpoint_frame(int2bool(in_endpoint_frame)) 88 | interaction_options.set_force_command(force_command) 89 | interaction_options.set_K_nullspace(K_nullspace) 90 | interaction_options.set_endpoint_name(endpoint_name) 91 | if len(interaction_frame) < 7: 92 | rospy.logerr('The number of elements must be 7!') 93 | elif len(interaction_frame) == 7: 94 | 95 | quat_sum_square = interaction_frame[3]*interaction_frame[3] + interaction_frame[4]*interaction_frame[4] + interaction_frame[5]*interaction_frame[5] + interaction_frame[6]*interaction_frame[6] 96 | if quat_sum_square < 1.0 + 1e-7 and quat_sum_square > 1.0 - 1e-7: 97 | target_interaction_frame = Pose() 98 | target_interaction_frame.position.x = interaction_frame[0] 99 | target_interaction_frame.position.y = interaction_frame[1] 100 | target_interaction_frame.position.z = interaction_frame[2] 101 | target_interaction_frame.orientation.w = interaction_frame[3] 102 | target_interaction_frame.orientation.x = interaction_frame[4] 103 | target_interaction_frame.orientation.y = interaction_frame[5] 104 | target_interaction_frame.orientation.z = interaction_frame[6] 105 | interaction_options.set_interaction_frame(target_interaction_frame) 106 | else: 107 | rospy.logerr('Invalid input to quaternion! The quaternion must be a unit quaternion!') 108 | else: 109 | rospy.logerr('Invalid input to interaction_frame!') 110 | 111 | interaction_options.set_disable_damping_in_force_control(disable_damping_in_force_control) 112 | interaction_options.set_disable_reference_resetting(disable_reference_resetting) 113 | interaction_options.set_rotations_for_constrained_zeroG(rotations_for_constrained_zeroG) 114 | 115 | trajectory_options.interaction_params = interaction_options.to_msg() 116 | traj.set_trajectory_options(trajectory_options) 117 | 118 | result = traj.send_trajectory(timeout=timeout) 119 | if result is None: 120 | rospy.logerr('Trajectory FAILED to send!') 121 | return 122 | 123 | if result.result: 124 | rospy.loginfo('Motion controller successfully finished the trajectory with interaction options set!') 125 | else: 126 | rospy.logerr('Motion controller failed to complete the trajectory with error %s', 127 | result.errorId) 128 | 129 | # print the resultant interaction options 130 | rospy.loginfo('Interaction Options:\n%s', interaction_options.to_msg()) 131 | 132 | except rospy.ROSInterruptException: 133 | rospy.logerr('Keyboard interrupt detected from the user. %s', 134 | 'Exiting before trajectory completion.') 135 | 136 | 137 | def main(): 138 | """ 139 | Move the robot arm to the specified configuration 140 | with the desired interaction control options. 141 | Call using: 142 | $ rosrun intera_examples go_to_joint_angles_in_contact.py [arguments: see below] 143 | 144 | -q 0.0 0.0 0.0 0.0 0.0 0.0 0.0 145 | --> Go to joint pose: 0.0 0.0 0.0 0.0 0.0 0.0 0.0 using default settings 146 | 147 | -q 0.1 -0.2 0.15 -0.05 -0.08 0.14 -0.04 -s 0.1 148 | --> Go to pose [...] with a speed ratio of 0.1 149 | 150 | -q -0.2 0.1 0.1 0.2 -0.3 0.2 0.4 -s 0.9 -a 0.1 151 | --> Go to pose [...] witha speed ratio of 0.9 and an accel ratio of 0.1 152 | 153 | --trajType CARTESIAN 154 | --> Use a Cartesian interpolated endpoint path to reach the goal 155 | 156 | === Interaction Mode options === 157 | 158 | -st 1 159 | --> Set the interaction controller state (1 for True, 0 for False) in the current configuration 160 | 161 | -k 500.0 500.0 500.0 10.0 10.0 10.0 162 | --> Set K_impedance to [500.0 500.0 500.0 10.0 10.0 10.0] in the current configuration 163 | 164 | -m 0 165 | --> Set max_impedance to False for all 6 directions in the current configuration 166 | 167 | -m 1 1 0 1 1 1 168 | --> Set max_impedance to [True True False True True True] in the current configuration 169 | 170 | -kn 5.0 3.0 5.0 4.0 6.0 4.0 6.0 171 | --> Set K_nullspace to [5.0 3.0 5.0 4.0 6.0 4.0 6.0] in the current configuration 172 | 173 | -f 0.0 0.0 30.0 0.0 0.0 0.0 174 | --> Set force_command to [0.0 0.0 30.0 0.0 0.0 0.0] in the current configuration 175 | 176 | -ef 177 | --> Set in_endpoint_frame to True in the current configuration 178 | 179 | -en 'right_hand' 180 | --> Specify the desired endpoint frame where impedance and force control behaviors are defined 181 | 182 | -md 1 183 | --> Set interaction_control_mode to impedance mode for all 6 directions in the current configuration 184 | (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit) 185 | 186 | -md 1 1 2 1 1 1 187 | --> Set interaction_control_mode to [impedance, impedance, force, impedance, impedance, impedance] in the current configuration 188 | (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit) 189 | """ 190 | 191 | arg_fmt = argparse.RawDescriptionHelpFormatter 192 | parser = argparse.ArgumentParser(formatter_class=arg_fmt, 193 | description=main.__doc__) 194 | parser.add_argument( 195 | "-q", "--joint_angles", type=float, 196 | nargs='+', default=[0.0, -0.9, 0.0, 1.8, 0.0, -0.9, 0.0], 197 | help="A list of joint angles, one for each of the 7 joints, J0...J6") 198 | parser.add_argument( 199 | "-s", "--speed_ratio", type=float, default=0.1, 200 | help="A value between 0.001 (slow) and 1.0 (maximum joint velocity)") 201 | parser.add_argument( 202 | "-a", "--accel_ratio", type=float, default=0.5, 203 | help="A value between 0.001 (slow) and 1.0 (maximum joint accel)") 204 | parser.add_argument( 205 | "-t", "--trajType", type=str, default='JOINT', 206 | choices=['JOINT', 'CARTESIAN'], 207 | help="trajectory interpolation type") 208 | parser.add_argument( 209 | "-st", "--interaction_active", type=int, default=1, choices = [0, 1], 210 | help="Activate (1) or Deactivate (0) interaction controller") 211 | parser.add_argument( 212 | "-k", "--K_impedance", type=float, 213 | nargs='+', default=[1300.0, 1300.0, 1300.0, 30.0, 30.0, 30.0], 214 | help="A list of desired stiffnesses, one for each of the 6 directions -- stiffness units are (N/m) for first 3 and (Nm/rad) for second 3 values") 215 | parser.add_argument( 216 | "-m", "--max_impedance", type=int, 217 | nargs='+', default=[1, 1, 1, 1, 1, 1], choices = [0, 1], 218 | help="A list of impedance modulation state, one for each of the 6 directions (a single value can be provided to apply the same value to all the directions) -- 0 for False, 1 for True") 219 | parser.add_argument( 220 | "-md", "--interaction_control_mode", type=int, 221 | nargs='+', default=[1, 1, 1, 1, 1, 1], choices = [1,2,3,4], 222 | help="A list of desired interaction control mode (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit), one for each of the 6 directions") 223 | parser.add_argument( 224 | "-fr", "--interaction_frame", type=float, 225 | nargs='+', default=[0, 0, 0, 1, 0, 0, 0], 226 | help="Specify the reference frame for the interaction controller -- first 3 values are positions [m] and last 4 values are orientation in quaternion (w, x, y, z)") 227 | parser.add_argument( 228 | "-ef", "--in_endpoint_frame", action='store_true', default=False, 229 | help="Set the desired reference frame to endpoint frame; otherwise, it is base frame by default") 230 | parser.add_argument( 231 | "-en", "--endpoint_name", type=str, default='right_hand', 232 | help="Set the desired endpoint frame by its name; otherwise, it is right_hand frame by default") 233 | parser.add_argument( 234 | "-f", "--force_command", type=float, 235 | nargs='+', default=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 236 | help="A list of desired force commands, one for each of the 6 directions -- in force control mode this is the vector of desired forces/torques to be regulated in (N) and (Nm), in impedance with force limit mode this vector specifies the magnitude of forces/torques (N and Nm) that the command will not exceed") 237 | parser.add_argument( 238 | "-kn", "--K_nullspace", type=float, 239 | nargs='+', default=[5.0, 10.0, 5.0, 10.0, 5.0, 10.0, 5.0], 240 | help="A list of desired nullspace stiffnesses, one for each of the 7 joints (a single value can be provided to apply the same value to all the directions) -- units are in (Nm/rad)") 241 | parser.add_argument( 242 | "-dd", "--disable_damping_in_force_control", action='store_true', default=False, 243 | help="Disable damping in force control") 244 | parser.add_argument( 245 | "-dr", "--disable_reference_resetting", action='store_true', default=False, 246 | help="The reference signal is reset to actual position to avoid jerks/jumps when interaction parameters are changed. This option allows the user to disable this feature.") 247 | parser.add_argument( 248 | "-rc", "--rotations_for_constrained_zeroG", action='store_true', default=False, 249 | help="Allow arbitrary rotational displacements from the current orientation for constrained zero-G (use only for a stationary reference orientation)") 250 | parser.add_argument( 251 | "--timeout", type=float, default=None, 252 | help="Max time in seconds to complete motion goal before returning. None is interpreted as an infinite timeout.") 253 | 254 | args = parser.parse_args(rospy.myargv()[1:]) 255 | 256 | 257 | rospy.init_node('go_to_joint_angles_in_contact_py') 258 | limb = Limb() 259 | 260 | interaction_joint_trajectory(limb, 261 | joint_angles = args.joint_angles, 262 | trajType= args.trajType, 263 | interaction_control_mode= args.interaction_control_mode, 264 | interaction_active= args.interaction_active, 265 | interaction_frame= args.interaction_frame, 266 | speed_ratio = args.speed_ratio, 267 | accel_ratio = args.accel_ratio, 268 | K_impedance= args.K_impedance, 269 | max_impedance = args.max_impedance, 270 | in_endpoint_frame = args.in_endpoint_frame, 271 | force_command = args.force_command, 272 | K_nullspace = args.K_nullspace, 273 | endpoint_name = args.endpoint_name, 274 | timeout = args.timeout, 275 | disable_damping_in_force_control=args.disable_damping_in_force_control, 276 | disable_reference_resetting = args.disable_reference_resetting , 277 | rotations_for_constrained_zeroG = args.rotations_for_constrained_zeroG 278 | ) 279 | 280 | if __name__ == '__main__': 281 | main() 282 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/gripper_action_server.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from math import fabs 4 | 5 | import rospy 6 | 7 | import actionlib 8 | import demo_constants 9 | 10 | from control_msgs.msg import ( 11 | GripperCommandAction, 12 | GripperCommandFeedback, 13 | GripperCommandResult, 14 | ) 15 | import real_gripper 16 | from real_gripper import PSGGripper 17 | import intera_interface 18 | 19 | 20 | class GripperActionServer(object): 21 | def __init__(self): 22 | self._ns = "/robot/gripper_action_server" 23 | 24 | if demo_constants.is_real_robot(): 25 | self._gripper = PSGGripper() # intera_interface.Gripper() 26 | else: 27 | self._gripper = intera_interface.Gripper() 28 | 29 | # Action Server 30 | self._server = actionlib.SimpleActionServer( 31 | self._ns, 32 | GripperCommandAction, 33 | execute_cb=self._on_gripper_action, 34 | auto_start=False) 35 | self._action_name = rospy.get_name() 36 | self._server.start() 37 | self._gripper.set_dead_zone("0.021") 38 | 39 | # Action Feedback/Result 40 | self.feedback_msg = GripperCommandFeedback() 41 | self.result_msg = GripperCommandResult() 42 | 43 | # Initialize Parameters 44 | self._timeout = 5.0 45 | 46 | def _update_feedback(self, position): 47 | self.feedback_msg.position = self._gripper.get_position() 48 | self.feedback_msg.effort = self._gripper.get_force() 49 | self.feedback_msg.stalled = False 50 | self.feedback_msg.reached_goal = False 51 | 52 | self._server.publish_feedback(self.feedback_msg) 53 | 54 | def _command_gripper_update(self, position, is_close_goal): 55 | if is_close_goal: 56 | self._gripper.close() 57 | rospy.logwarn("cmd: close") 58 | else: 59 | self._gripper.open() 60 | rospy.logwarn("cmd: open") 61 | 62 | # self._gripper.set_position(position) 63 | 64 | def check_success(self, position, close_goal): 65 | 66 | rospy.logwarn("gripping force: " + str(self._gripper.get_force())) 67 | rospy.logwarn("gripper position: " + str(self._gripper.get_position())) 68 | rospy.logwarn("gripper position deadzone: " + str(self._gripper.get_dead_zone())) 69 | 70 | if not self._gripper.is_moving(): 71 | success = True 72 | else: 73 | success = False 74 | 75 | # success = fabs(self._gripper.get_position() - position) < self._gripper.get_dead_zone() 76 | 77 | 78 | rospy.logwarn("gripping success: " + str(success)) 79 | 80 | return success 81 | 82 | def _on_gripper_action(self, goal): 83 | # Store position and effort from call 84 | # Position to 0:100 == close:open 85 | position = goal.command.position 86 | effort = goal.command.max_effort 87 | 88 | # Check for errors 89 | if self._gripper.has_error(): 90 | rospy.logerr("%s: Gripper error - please restart action server." % 91 | (self._action_name,)) 92 | self._server.set_aborted() 93 | 94 | is_close_goal = position < 0.02 95 | 96 | # Reset feedback/result 97 | self._update_feedback(position) 98 | 99 | # 20 Hz gripper state rate 100 | control_rate = rospy.Rate(20.0) 101 | 102 | # Record start time 103 | start_time = rospy.get_time() 104 | 105 | def now_from_start(start): 106 | return rospy.get_time() - start 107 | 108 | self._command_gripper_update(position, is_close_goal) 109 | rospy.sleep(0.2) 110 | 111 | # Continue commanding goal until success or timeout 112 | while ((now_from_start(start_time) < self._timeout or 113 | self._timeout < 0.0) and not rospy.is_shutdown()): 114 | if self._server.is_preempt_requested(): 115 | self._gripper.stop() 116 | rospy.loginfo("%s: Gripper Action Preempted" % 117 | (self._action_name,)) 118 | self._server.set_preempted(self.result_msg) 119 | return 120 | self._update_feedback(position) 121 | if self.check_success(position, is_close_goal): 122 | self._server.set_succeeded(self.result_msg) 123 | return 124 | self._command_gripper_update(position, is_close_goal) 125 | control_rate.sleep() 126 | 127 | # Gripper failed to achieve goal before timeout/shutdown 128 | self._gripper.stop() 129 | if not rospy.is_shutdown(): 130 | rospy.logerr("%s: Gripper Command Not Achieved in Allotted Time" % 131 | (self._action_name,)) 132 | self._update_feedback(position) 133 | self._server.set_aborted(self.result_msg) 134 | 135 | 136 | import argparse 137 | 138 | import rospy 139 | 140 | from dynamic_reconfigure.server import Server 141 | 142 | 143 | def start_server(gripper): 144 | print("Initializing node... ") 145 | rospy.init_node("rsdk_gripper_action_server%s" % 146 | ("" if gripper == 'both' else "_" + gripper,)) 147 | print("Initializing gripper action server...") 148 | 149 | gas = GripperActionServer() 150 | 151 | print("Running. Ctrl-c to quit") 152 | rospy.spin() 153 | 154 | 155 | def main(): 156 | arg_fmt = argparse.ArgumentDefaultsHelpFormatter 157 | parser = argparse.ArgumentParser(formatter_class=arg_fmt) 158 | parser.add_argument("-g", "--gripper", dest="gripper", default="both", 159 | choices=['both', 'left', 'right'], 160 | help="gripper action server limb", ) 161 | args = parser.parse_args(rospy.myargv()[1:]) 162 | start_server(args.gripper) 163 | 164 | 165 | if __name__ == "__main__": 166 | main() 167 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/program.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sys 3 | import os 4 | 5 | # Assuming that pdvsd is located in the working folder 6 | sys.path.append(os.getcwd()) 7 | 8 | import functools 9 | import sys 10 | import rospy 11 | 12 | import demo_constants 13 | import gazebo_models 14 | from task_planner import TaskPlanner 15 | 16 | def main(): 17 | 18 | rospy.init_node("sorting_demo") 19 | rospy.logwarn("Hello world") 20 | 21 | if not demo_constants.is_real_robot(): 22 | # Load Gazebo Models via Spawning Services 23 | # Note that the models reference is the /world frame 24 | # and the IK operates with respect to the /base frame 25 | model_list, original_block_poses = gazebo_models.load_gazebo_models() 26 | # Remove models from the scene on shutdown 27 | rospy.on_shutdown(functools.partial(gazebo_models.delete_gazebo_models, model_list)) 28 | 29 | rospy.logwarn("Creating task planner") 30 | 31 | task_planner = TaskPlanner() 32 | 33 | rospy.logwarn("Hello world") 34 | 35 | task_planner.create_go_home_task(check_obstacles=False).result() 36 | 37 | task_facade = task_planner.get_task_facade() 38 | 39 | #task_facade.start() 40 | 41 | task_facade.run_rest_server() 42 | 43 | # rospy.sleep(15) 44 | # task_facade.pick_block_by_color("BLUE") 45 | # task_facade.put_block_into_tray("BLUE", "1") 46 | 47 | # task_facade.put_all_contents_on_table() 48 | 49 | # task_facade.pause() 50 | 51 | # rospy.sleep(30) 52 | # task_facade.resume() 53 | 54 | # task_facade.stop("GREEN") 55 | 56 | rospy.logwarn("task planner spin") 57 | task_planner.spin() 58 | 59 | # ask the robot to greet 60 | # task_facade.greet() 61 | 62 | 63 | if __name__ == '__main__': 64 | sys.exit(main()) 65 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/real_gripper.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class PSGGripper(): 4 | def __init__(self): 5 | pass 6 | 7 | def get_position(self): 8 | return 0 9 | 10 | def get_force(self): 11 | return 0 12 | 13 | def set_dead_zone(self, v): 14 | return 0 15 | 16 | def get_dead_zone(self): 17 | return 0 18 | 19 | def is_moving(self): 20 | return 0 21 | 22 | def has_error(self): 23 | return False 24 | 25 | def stop(self): 26 | pass 27 | 28 | def close(self): 29 | pass 30 | 31 | def open(self): 32 | pass -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/reinforcement_learning/task_space_simulation.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from gym import utils 3 | from gym.envs.toy_text import discrete 4 | import numpy as np 5 | 6 | class PositionEnv(discrete.DiscreteEnv): 7 | """ 8 | Grid 5x5 world: 25 slots 4 posibles values (B,G,R,-) 9 | 25^4 = 390625 states (390K) 10 | 11 | Actions: 25 (pick or place) 12 | 13 | The action policy is going to be: 390K 14 | 15 | """ 16 | 17 | def __init__(self, nS, nA, P, isd): 18 | super(PositionEnv, self).__init__(nS, nA, P, isd) 19 | 20 | self.desc = np.array() 21 | self.rows = 5 22 | self.cols = 5 23 | self.values = 3 + 1 24 | 25 | self.states = (self.rows*self.cols)^(self.values) 26 | self.actions = self.rows* self.cols 27 | 28 | 29 | # transition model 30 | # {action: [(probability, nextstate, reward, done)]} 31 | self.P = {s: {a: [] for a in range(self.actions)} for s in range(self.states)} 32 | 33 | 34 | def is_holand_flag(self, ): 35 | pass 36 | 37 | def encode(self): 38 | pass 39 | 40 | 41 | def step(self,action): 42 | pass 43 | 44 | 45 | def render(self): 46 | pass 47 | 48 | 49 | def encode_state_aux(i, values, slots): 50 | if slots == 1: 51 | return [i] 52 | else: 53 | current = i % values 54 | return encode_state_aux(i / values, values, slots - 1) + [current] 55 | 56 | slots = 16 57 | values = 4 58 | 59 | count = values**slots 60 | 61 | for i in xrange(count): 62 | print str(i)+ ":" + str(encode_state_aux(i, values, slots)) -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/robot_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import copy 3 | from datetime import time 4 | 5 | import intera_interface 6 | import rospy 7 | from geometry_msgs.msg import ( 8 | Pose, 9 | ) 10 | 11 | from sorting_demo import demo_constants 12 | from real_gripper import PSGGripper 13 | 14 | class SawyerRobotControl(object): 15 | def __init__(self, trajectory_planner, limb="right", hover_distance=0.08, tip_name="right_gripper_tip"): 16 | """ 17 | :param limb: 18 | :param hover_distance: 19 | :param tip_name: 20 | """ 21 | self.trajectory_planner = trajectory_planner 22 | self._limb_name = limb # string 23 | self._tip_name = tip_name # string 24 | self._hover_distance = hover_distance # in meters 25 | self._limb = intera_interface.Limb(limb) 26 | 27 | if demo_constants.is_real_robot(): 28 | self._gripper =PSGGripper() 29 | else: 30 | self._gripper = intera_interface.Gripper() 31 | 32 | self._robot_enable = intera_interface.RobotEnable() 33 | 34 | # verify robot is enabled 35 | print("Getting robot state... ") 36 | self._rs = intera_interface.RobotEnable(intera_interface.CHECK_VERSION) 37 | self._init_state = self._rs.state().enabled 38 | print("Enabling robot... ") 39 | self._rs.enable() 40 | 41 | """ 42 | def move_to_start(self, start_angles=None): 43 | ''' 44 | :param start_angles: 45 | :return: 46 | ''' 47 | print("Moving the {0} arm to start pose...".format(self._limb_name)) 48 | if not start_angles: 49 | start_angles = dict(zip(self._joint_names, [0] * 7)) 50 | self._guarded_move_to_joint_position(start_angles) 51 | self.gripper_open() 52 | """ 53 | 54 | 55 | 56 | def gripper_open(self): 57 | """ 58 | :return: 59 | """ 60 | rospy.logwarn("OPENING GRIPPER") 61 | self._gripper.open() 62 | while self._gripper.is_moving() and not rospy.is_shutdown(): 63 | rospy.sleep(0.4) 64 | 65 | def gripper_close(self): 66 | """ 67 | :return: 68 | """ 69 | rospy.logwarn("CLOSING GRIPPER") 70 | self._gripper.close() 71 | 72 | while self._gripper.is_moving() and not rospy.is_shutdown(): 73 | rospy.sleep(0.1) 74 | 75 | def _guarded_move_to_joint_position(self, joint_angles, timeout=5.0): 76 | """ 77 | :param joint_angles: 78 | :param timeout: 79 | :return: 80 | """ 81 | if rospy.is_shutdown(): 82 | return 83 | if joint_angles: 84 | self._limb.move_to_joint_positions(joint_angles, timeout=timeout) 85 | else: 86 | rospy.logerr("No Joint Angles provided for move_to_joint_positions. Staying put.") 87 | 88 | 89 | 90 | 91 | def disable(self): 92 | """ 93 | 94 | :return: 95 | """ 96 | self._robot_enable.disable() 97 | 98 | def enable(self): 99 | """ 100 | 101 | :return: 102 | """ 103 | self._robot_enable.enable() 104 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/robot_funcs.py: -------------------------------------------------------------------------------- 1 | import math 2 | import rospy 3 | 4 | 5 | def force_joint_limits(jntspos): 6 | joint_limits = [(-3.0503, 3.0503), (-3.8095, 2.2736), (-3.0426, 3.0426), (-3.0439, 3.0439)] 7 | 8 | rospy.logwarn(jntspos) 9 | for i in xrange(len(joint_limits)): 10 | lower = joint_limits[i][0] 11 | upper = joint_limits[i][1] 12 | if jntspos[i] < lower and (jntspos[i] + 2.0 * math.pi) < upper: 13 | jntspos[i] = jntspos[i] + 2 * math.pi 14 | 15 | if jntspos[i] > upper and (jntspos[i] - 2.0 * math.pi) > lower: 16 | jntspos[i] = jntspos[i] - 2 * math.pi 17 | 18 | return jntspos -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/robot_tasks_facade.py: -------------------------------------------------------------------------------- 1 | import flask 2 | import rospy 3 | from flask import Flask 4 | 5 | 6 | class RobotTaskFacade: 7 | """ 8 | class specially designed to control the robot via voice commands. Some examples could be: 9 | 'Hello robot' 10 | 'Hey robot! What task are you doing now?' 11 | 'Hey robot! Put all red pieces on the tray 2' 12 | 'Hey robot! Pause your work' 13 | 'Hey robot! Give me a red brick' 14 | 'Hey robot! Stop sorting green pieces' 15 | 'Hey robot! restart your work' 16 | 'Hey robot! What is the color of the piece you are grabbing' 17 | 'Hey robot! Put all tray contents on the table' 18 | """ 19 | 20 | def __init__(self, task_planner): 21 | self.task_planner = task_planner 22 | 23 | self.app = Flask(__name__) 24 | 25 | @self.app.route("/state") 26 | def get_state(): 27 | try: 28 | return flask.json.jsonify(response="ACK", result=self.get_state()) 29 | except Exception as ex: 30 | return flask.json.jsonify(response="ERR", message=ex.message) 31 | 32 | @self.app.route("/current_task") 33 | def get_current_task(): 34 | try: 35 | return flask.json.jsonify(response= "ACK", result = self.get_current_task()) 36 | except Exception as ex: 37 | return flask.json.jsonify(response= "ERR", message = ex.message) 38 | 39 | @self.app.route("/count_table_pieces/") 40 | def count_pieces_on_table_by_color(color): 41 | try: 42 | return flask.json.jsonify(response="ACK", result=self.count_pieces_on_table_by_color(color)) 43 | except Exception as ex: 44 | return flask.json.jsonify(response="ERR", message=ex.message) 45 | 46 | @self.app.route("/current_piece_color") 47 | def get_current_piece_color(): 48 | try: 49 | return flask.json.jsonify(response="ACK", result=self.get_current_piece_color()) 50 | except Exception as ex: 51 | return flask.json.jsonify(response="ERR", message=ex.message) 52 | 53 | @self.app.route("/pause") 54 | def pause(): 55 | try: 56 | return flask.json.jsonify(response="ACK", result=self.pause()) 57 | except Exception as ex: 58 | return flask.json.jsonify(response="ERR", message=ex.message) 59 | 60 | @self.app.route("/resume") 61 | def resume(): 62 | try: 63 | return flask.json.jsonify(response="ACK", result=self.resume()) 64 | except Exception as ex: 65 | return flask.json.jsonify(response="ERR", message=ex.message) 66 | 67 | @self.app.route("/start") 68 | def start(): 69 | try: 70 | return flask.json.jsonify(response="ACK", result=self.start()) 71 | except Exception as ex: 72 | return flask.json.jsonify(response="ERR", message=ex.message) 73 | 74 | @self.app.route("/stop") 75 | def stop(): 76 | try: 77 | return flask.json.jsonify(response="ACK", result=self.stop()) 78 | except Exception as ex: 79 | return flask.json.jsonify(response="ERR", message=ex.message) 80 | 81 | @self.app.route("/greet") 82 | def greet(): 83 | try: 84 | return flask.json.jsonify(response="ACK", result=self.greet()) 85 | except Exception as ex: 86 | return flask.json.jsonify(response="ERR", message=ex.message) 87 | 88 | @self.app.route("/put_all_contents_on_table") 89 | def put_all_contents_on_table(): 90 | try: 91 | return flask.json.jsonify(response="ACK", result=self.put_all_contents_on_table()) 92 | except Exception as ex: 93 | return flask.json.jsonify(response="ERR", message=ex.message) 94 | 95 | 96 | @self.app.route("/pick_block_by_color/") 97 | def pick_block_by_color(color): 98 | try: 99 | return flask.json.jsonify(response="ACK", result=self.pick_block_by_color(color)) 100 | except Exception as ex: 101 | return flask.json.jsonify(response="ERR", message=ex.message) 102 | 103 | @self.app.route("/place_block_to_tray/") 104 | def place_block_to_tray_id(trayid): 105 | try: 106 | return flask.json.jsonify(response="ACK", result=self.place_block_to_tray(int(trayid))) 107 | except Exception as ex: 108 | return flask.json.jsonify(response="ERR", message=ex.message) 109 | 110 | 111 | @self.app.route("/put_block_into_tray//") 112 | def put_block_into_tray( color, trayid): 113 | try: 114 | return flask.json.jsonify(response="ACK", result=self.put_block_into_tray(color,int(trayid))) 115 | except Exception as ex: 116 | return flask.json.jsonify(response="ERR", message=ex.message) 117 | 118 | @self.app.route("/view") 119 | def web_interface(): 120 | import rospkg 121 | 122 | webviewpath = rospkg.RosPack().get_path('sorting_demo') + "/share/web_view.html" 123 | with open(webviewpath, 'r') as myfile: 124 | data = myfile.read() 125 | return data 126 | 127 | 128 | def run_rest_server(self): 129 | self.app.run(threaded=True) 130 | 131 | # -------- observer methods ------------------ 132 | 133 | def get_state(self): 134 | """ 135 | 136 | :return: the state of the application 137 | """ 138 | 139 | return self.task_planner.get_state() 140 | 141 | def get_current_task(self): 142 | """ 143 | :return: str - it gets the name of the task the robot is doing 144 | """ 145 | # returns the higher level task 146 | 147 | task_stack = self.task_planner.get_task_stack() 148 | 149 | return task_stack 150 | 151 | def count_pieces_on_table_by_color(self, color): 152 | """ 153 | :param color: str - "red", "blue", "green" 154 | :return: 155 | """ 156 | count = self.task_planner.count_pieces_on_table_by_color(color) 157 | return count 158 | 159 | def get_current_piece_color(self): 160 | """ 161 | 162 | :return: 163 | """ 164 | return None if self.task_planner.gripper_state.holding_block is None else self.task_planner.gripper_state.holding_block.get_color() 165 | 166 | # ---------- lifecycle --------------------------- 167 | def pause(self): 168 | """ 169 | 'Hey robot! Stop your work' 170 | 171 | It pauses the loop job 172 | :return: 173 | """ 174 | self.task_planner.pause() 175 | return "ACK" 176 | 177 | def resume(self): 178 | """ 179 | resume the loop job 180 | :return: 181 | """ 182 | self.task_planner.resume() 183 | return "ACK" 184 | 185 | def start(self): 186 | """ 187 | 188 | :return: 189 | """ 190 | self.task_planner.execute_task(self.task_planner.create_main_loop_task) 191 | return "ACK" 192 | 193 | def stop(self): 194 | """ 195 | 196 | :return: 197 | """ 198 | self.task_planner.stop() 199 | return "ACK" 200 | 201 | # --------- request tasks methods --------------------- 202 | 203 | def greet(self): 204 | """ 205 | 'Hello robot' 206 | :return: 207 | """ 208 | self.task_planner.execute_task(self.task_planner.create_greet_task) 209 | return "ACK" 210 | 211 | def put_all_contents_on_table(self): 212 | """ 213 | 'Hey robot! Put all red pieces on the tray 2' 214 | :return: 215 | """ 216 | self.task_planner.execute_task(self.task_planner.put_all_contents_on_table) 217 | return "ACK" 218 | 219 | def pick_block_by_color(self, color): 220 | """ 221 | 'Hey robot! Give me a red brick' 222 | 223 | 224 | :param color: str - "red", "blue", "green" 225 | :return: 226 | """ 227 | 228 | self.task_planner.execute_task(self.task_planner.pick_block_from_table_by_color, args=[color]) 229 | return "ACK" 230 | 231 | def place_block_to_tray(self, trayid): 232 | self.task_planner.execute_task(self.task_planner.place_block_to_tray, args=[trayid]) 233 | return "ACK" 234 | 235 | 236 | def put_block_into_tray(self, color, trayid): 237 | """ 238 | 'Hey robot! Put a red piece on the tray 2' 239 | 240 | :param color: str - "red", "blue", "green" 241 | :param tray: 242 | :return: 243 | """ 244 | 245 | self.task_planner.execute_task(self.task_planner.put_block_into_tray_task, args=[color,trayid]) 246 | return "ACK" 247 | 248 | # ------------ configure loop sorting --------------------- 249 | #@route("/disable_sorting_by_color/')") 250 | def disable_sorting_by_color(self, color): 251 | """ 252 | 'Hey robot! Stop sorting green pieces' 253 | :param color: 254 | :return: 255 | """ 256 | 257 | self.task_planner.execute_task(lambda: self.task_planner.disable_sorting_by_color(color)) 258 | 259 | #@route("/enable_sorting_by_color/')") 260 | def enable_sorting_by_color(self,color): 261 | """ 262 | 'Hey robot! Stop sorting green pieces' 263 | :param color: 264 | :return: 265 | """ 266 | self.task_planner.execute_task(lambda: self.task_planner.enable_sorting_by_color(color)) 267 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/src/sorting_demo/tasks/__init__.py -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/tasync.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | import rospy 3 | import traceback 4 | 5 | import sys 6 | 7 | 8 | def tasync(taskname): 9 | def wrapper(f): 10 | @wraps(f) 11 | def wrapped(self, *f_args, **f_kwargs): 12 | 13 | if rospy.is_shutdown(): 14 | sys.exit(0) 15 | 16 | if self.has_cancel_signal(): 17 | raise Exception("canceling") 18 | rospy.logerr("trying to invoke but cancel signal: " + str(taskname)) 19 | self.print_tasks() 20 | return Task("CANCEL", None) 21 | 22 | if self.pause_flag: 23 | rospy.logerr("PAUSE") 24 | while self.pause_flag and not rospy.is_shutdown(): 25 | rospy.sleep(0.5) 26 | rospy.logwarn("Task %s is paused" % taskname) 27 | 28 | tt = Task(taskname, None) 29 | 30 | def lamb(): 31 | res = None 32 | try: 33 | # f_kwargs["task"] = tt 34 | rospy.logwarn("launching task") 35 | res = f(self, *f_args, **f_kwargs) 36 | except Exception as ex: 37 | rospy.logerr("task wrapping error (%s): %s" % (taskname, str(ex))) 38 | traceback.print_exc() 39 | return res 40 | 41 | self.add_task(tt) 42 | 43 | rospy.logwarn("launching task") 44 | fut = self.executor.submit(lamb) 45 | tt.future = fut 46 | 47 | def got_result(fut): 48 | try: 49 | rospy.logwarn("removing task: " + tt.name) 50 | self.remove_task(tt) 51 | except Exception as ex: 52 | rospy.logwarn("error at done callback: " + tt.name + str(ex)) 53 | 54 | self.print_tasks() 55 | 56 | fut.add_done_callback(got_result) 57 | 58 | return tt 59 | 60 | return wrapped 61 | 62 | return wrapper 63 | 64 | 65 | class Task: 66 | def __init__(self, name, future): 67 | self.name = name 68 | self.future = future 69 | self.marked_cancel = False 70 | self.resultcancel = None 71 | 72 | def cancel(self): 73 | self.marked_cancel = True 74 | self.resultcancel = self.future.cancel() 75 | 76 | def result(self): 77 | if self.future is not None: 78 | return self.future.result() 79 | else: 80 | return None 81 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/trajectory_action_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2013-2018, Rethink Robotics Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | Intera SDK Joint Trajectory Controller 19 | Unlike other robots running ROS, this is not a Motor Controller plugin, 20 | but a regular node using the SDK interface. 21 | """ 22 | import argparse 23 | import importlib 24 | 25 | import rospy 26 | from dynamic_reconfigure.server import Server 27 | import intera_interface 28 | 29 | from intera_joint_trajectory_action.joint_trajectory_action import ( 30 | JointTrajectoryActionServer, 31 | ) 32 | from trajectory_msgs.msg import ( 33 | JointTrajectoryPoint, 34 | ) 35 | 36 | def start_server(limb, rate, mode, valid_limbs): 37 | rospy.loginfo("Initializing node... ") 38 | rospy.init_node("sdk_{0}_joint_trajectory_action_server{1}".format( 39 | mode, "" if limb == 'all_limbs' else "_" + limb,)) 40 | 41 | rospy.wait_for_service("/ExternalTools/right/PositionKinematicsNode/IKService") 42 | 43 | rospy.loginfo("Initializing joint trajectory action server...") 44 | robot_name = intera_interface.RobotParams().get_robot_name().lower().capitalize() 45 | config_module = "intera_interface.cfg" 46 | if mode == 'velocity': 47 | config_name = ''.join([robot_name,"VelocityJointTrajectoryActionServerConfig"]) 48 | elif mode == 'position': 49 | config_name = ''.join([robot_name,"PositionJointTrajectoryActionServerConfig"]) 50 | else: 51 | config_name = ''.join([robot_name,"PositionFFJointTrajectoryActionServerConfig"]) 52 | cfg = importlib.import_module('.'.join([config_module,config_name])) 53 | dyn_cfg_srv = Server(cfg, lambda config, level: config) 54 | jtas = [] 55 | if limb == 'all_limbs': 56 | for current_limb in valid_limbs: 57 | jtas.append(JointTrajectoryActionServer(current_limb, dyn_cfg_srv, 58 | rate, mode)) 59 | else: 60 | jtas.append(JointTrajectoryActionServer(limb, dyn_cfg_srv, rate, mode)) 61 | 62 | 63 | def cleanup(): 64 | for j in jtas: 65 | j.clean_shutdown() 66 | 67 | rospy.on_shutdown(cleanup) 68 | rospy.loginfo("Joint Trajectory Action Server Running. Ctrl-c to quit") 69 | rospy.spin() 70 | 71 | 72 | def main(): 73 | rp = intera_interface.RobotParams() 74 | valid_limbs = rp.get_limb_names() 75 | if not valid_limbs: 76 | rp.log_message(("Cannot detect any limb parameters on this robot. " 77 | "Exiting."), "ERROR") 78 | return 79 | # Add an option for starting a server for all valid limbs 80 | all_limbs = valid_limbs 81 | all_limbs.append("all_limbs") 82 | arg_fmt = argparse.ArgumentDefaultsHelpFormatter 83 | parser = argparse.ArgumentParser(formatter_class=arg_fmt) 84 | parser.add_argument( 85 | "-l", "--limb", dest="limb", default=valid_limbs[0], 86 | choices=all_limbs, 87 | help="joint trajectory action server limb" 88 | ) 89 | parser.add_argument( 90 | "-r", "--rate", dest="rate", default=100.0, 91 | type=float, help="trajectory control rate (Hz)" 92 | ) 93 | parser.add_argument( 94 | "-m", "--mode", default='position_w_id', 95 | choices=['position_w_id', 'position', 'velocity'], 96 | help="control mode for trajectory execution" 97 | ) 98 | args = parser.parse_args(rospy.myargv()[1:]) 99 | 100 | start_server(args.limb, args.rate, args.mode, valid_limbs) 101 | 102 | 103 | if __name__ == "__main__": 104 | main() 105 | -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/AI-Robot-Challenge-Lab/6868c8d3b739b7fe8f1e582316fc46d80ec27ceb/src/sorting_demo/src/sorting_demo/utils/__init__.py -------------------------------------------------------------------------------- /src/sorting_demo/src/sorting_demo/utils/mathutils.py: -------------------------------------------------------------------------------- 1 | import tf 2 | import tf.transformations 3 | import numpy 4 | import geometry_msgs.msg 5 | from geometry_msgs.msg import Pose, Point, Quaternion 6 | 7 | 8 | def inverse_compose(basehomopose, homopose): 9 | """ 10 | :param basehomopose: 4x4 homogeneous transform matrix 11 | :param homopose: 4x4 homogeneous transform matrix 12 | :return: 13 | """ 14 | return numpy.matmul(numpy.linalg.inv(basehomopose), homopose) 15 | 16 | 17 | def get_homo_matrix_from_pose_msg(pose, tag=""): 18 | basetrans = tf.transformations.translation_matrix((pose.position.x, 19 | pose.position.y, 20 | pose.position.z)) 21 | 22 | baserot = tf.transformations.quaternion_matrix((pose.orientation.x, 23 | pose.orientation.y, 24 | pose.orientation.z, 25 | pose.orientation.w)) 26 | 27 | # rospy.loginfo(tag + " basetrans: " + str(basetrans)) 28 | # rospy.loginfo(tag +" baserot: " + str(baserot)) 29 | 30 | combined = numpy.matmul(basetrans, baserot) 31 | 32 | # rospy.loginfo(tag + " combined: " + str(combined)) 33 | trans = tf.transformations.translation_from_matrix(combined) 34 | quat = tf.transformations.quaternion_from_matrix(combined) 35 | 36 | # rospy.loginfo(tag + " back basetrans: " + str(trans)) 37 | # rospy.loginfo(tag +" back baserot: " + str(quat)) 38 | 39 | return combined 40 | 41 | 42 | def homotransform_to_pose_msg(homotransform): 43 | trans = tf.transformations.translation_from_matrix(homotransform) 44 | quat = tf.transformations.quaternion_from_matrix(homotransform) 45 | return Pose( 46 | position=Point(x=trans[0], y=trans[1], z=trans[2]), 47 | orientation=Quaternion(x=quat[0], y=quat[1], z=quat[2], w=quat[3])) 48 | 49 | 50 | def composition(parent, child): 51 | return numpy.matmul(parent, child) 52 | 53 | 54 | def rot_y(pitch): 55 | return [[numpy.cos(pitch), 0, numpy.sin(pitch), 0], [0, 1, 0, 0], [-numpy.sin(pitch), 0, numpy.cos(pitch), 0], 56 | [0, 0, 0, 1]] 57 | 58 | 59 | def rot_z(yaw): 60 | return [[numpy.cos(yaw), -numpy.sin(yaw), 0, 0], [numpy.sin(yaw), numpy.cos(yaw), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] 61 | 62 | 63 | def rot_x(roll): 64 | return [[1, 0, 0, 0], [0, numpy.cos(roll), -numpy.sin(roll), 0], [0, numpy.sin(roll), numpy.cos(roll), 0], 65 | [0, 0, 0, 1]] 66 | --------------------------------------------------------------------------------