├── aws ├── .gitignore ├── gzweb5Nvidia │ ├── gazebo_entrypoint.sh │ ├── gzweb_entrypoint.sh │ └── Dockerfile ├── run.sh ├── gzweb5Nvidia.yml ├── setup_local.md ├── setup_remote.md └── README.md └── README.md /aws/.gitignore: -------------------------------------------------------------------------------- 1 | nvidia-driver.run 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # gazebo_docker_demos 2 | A collection of Docker demos for Gazebo 3 | -------------------------------------------------------------------------------- /aws/gzweb5Nvidia/gazebo_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # setup ros environment 5 | source "/usr/share/gazebo/setup.sh" 6 | export QT_X11_NO_MITSHM=1 7 | 8 | exec "$@" 9 | -------------------------------------------------------------------------------- /aws/gzweb5Nvidia/gzweb_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | 5 | # setup ros environment 6 | echo GAZEBO_MASTER_URI=$GAZEBO_MASTER_URI 7 | /root/gzweb/./start_gzweb.sh 8 | 9 | exec "$@" 10 | -------------------------------------------------------------------------------- /aws/run.sh: -------------------------------------------------------------------------------- 1 | docker run -ti \ 2 | -u $(whoami) -w "$HOME" \ 3 | -v /home:/home \ 4 | -v /dev/snd:/dev/snd \ 5 | -v "/etc/passwd:/etc/passwd:ro" \ 6 | -v "/etc/shadow:/etc/shadow:ro" \ 7 | -v "/etc/group:/etc/group:ro" \ 8 | -v "/etc/sudoers.d:/etc/sudoers.d:ro" \ 9 | -v "/etc/localtime:/etc/localtime:ro" \ 10 | -e DISPLAY=unix$DISPLAY \ 11 | -v /tmp/.X11-unix:/tmp/.X11-unix \ 12 | --device /dev/nvidia0:/dev/nvidia0 \ 13 | --device /dev/nvidiactl:/dev/nvidiactl \ 14 | "$@" 15 | -------------------------------------------------------------------------------- /aws/gzweb5Nvidia/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM osrf/gazebo:gzweb5 2 | 3 | # install packages 4 | RUN apt-get update && apt-get install -y \ 5 | binutils \ 6 | mesa-utils \ 7 | module-init-tools \ 8 | x-window-system\ 9 | && rm -rf /var/lib/apt/lists/* 10 | 11 | # install nvidia drivers 12 | ADD nvidia-driver.run /tmp/nvidia-driver.run 13 | RUN sh /tmp/nvidia-driver.run -a -N --ui=none --no-kernel-module \ 14 | && rm /tmp/nvidia-driver.run 15 | 16 | # setup entrypoint 17 | COPY ./gazebo_entrypoint.sh / 18 | COPY ./gzweb_entrypoint.sh / 19 | 20 | ENTRYPOINT ["/gazebo_entrypoint.sh"] 21 | CMD ["bash"] 22 | -------------------------------------------------------------------------------- /aws/gzweb5Nvidia.yml: -------------------------------------------------------------------------------- 1 | gzweb: 2 | image: osrf/gazebo:gzweb5Nvidia 3 | user: root 4 | ports: 5 | - "80:8080" 6 | - "7681:7681" 7 | links: 8 | - "gzserver" 9 | environment: 10 | - "GAZEBO_MASTER_URI=http://gzserver:11345" 11 | entrypoint: "/gzweb_entrypoint.sh" 12 | command: 'ping gzserver' 13 | 14 | 15 | gzserver: 16 | image: osrf/gazebo:gzweb5Nvidia 17 | user: ubuntu 18 | working_dir: /home/ubuntu 19 | volumes: 20 | - "/home:/home" 21 | - "/dev/snd:/dev/snd" 22 | - "/etc/group:/etc/group:ro" 23 | - "/etc/passwd:/etc/passwd:ro" 24 | - "/etc/shadow:/etc/shadow:ro" 25 | - "/etc/sudoers.d:/etc/sudoers.d:ro" 26 | - "/etc/localtime:/etc/localtime:ro" 27 | - "/tmp/.X11-unix:/tmp/.X11-unix:rw" 28 | devices: 29 | - "/dev/nvidia0" 30 | - "/dev/nvidiactl" 31 | environment: 32 | - "DISPLAY=:0" 33 | - "QT_X11_NO_MITSHM=1" 34 | entrypoint: "/gazebo_entrypoint.sh" 35 | command: gzserver --verbose 36 | -------------------------------------------------------------------------------- /aws/setup_local.md: -------------------------------------------------------------------------------- 1 | # Get Docker Experimental 2 | # https://github.com/docker/docker/tree/master/experimental 3 | wget -qO- https://experimental.docker.com/ | sh 4 | 5 | # setup docker group 6 | # https://docs.docker.com/installation/ubuntulinux/ 7 | # log out, log in 8 | 9 | # install docker swarm 10 | # http://docs.docker.com/swarm/ 11 | 12 | # install docker compose 13 | # https://docs.docker.com/compose/install/ 14 | 15 | # install docker machine 16 | # https://docs.docker.com/machine/ 17 | 18 | # add your AWS cradentials to enviroment 19 | export AWS_ACCESS_KEY_ID=#################### 20 | export AWS_SECRET_ACCESS_KEY=######################################## 21 | 22 | 23 | 24 | # what region the VM should be started 25 | export AWS_REGION=us-west-2 26 | # as well as what zone in the region's site (region specific) 27 | export AWS_ZONE=b 28 | # we'll specify what VM image to use (region specific) 29 | # use the aws image to enable graphical hardware accelaration 30 | export AWS_AMI_ID=ami-b7babb87 31 | # we'll specify what hardware to use (region specific) 32 | # use the GPU cluster for rendering images 33 | export AWS_INSTANCE_TYPE=g2.2xlarge 34 | # security group same as default, docker-machine, but with added http=80 + gzweb=7681 inbound. 35 | # default being i.e. ssh=22 + dockerPort=2376 + swarmPort=3376 inbound 36 | export AWS_SECURITY_GROUP=sg-3515d051 37 | # Virtual Private Cloud network 38 | export AWS_VPC_ID=vpc-e2eb6787 39 | 40 | 41 | # Use docker_macine to make aws istance as swarm master 42 | sid=docker-machine -D create \ 43 | --driver amazonec2 \ 44 | --amazonec2-vpc-id vpc-e2eb6787 \ 45 | --amazonec2-security-group sg-3515d051 \ 46 | --amazonec2-region us-west-2 \ 47 | --amazonec2-zone b \ 48 | --amazonec2-ami ami-b7babb87 \ 49 | --amazonec2-instance-type g2.2xlarge \ 50 | --swarm \ 51 | --swarm-master \ 52 | swarm-master 53 | 54 | docker-machine -D create \ 55 | --driver amazonec2 \ 56 | --amazonec2-vpc-id vpc-722ea217 \ 57 | --amazonec2-region us-west-2 \ 58 | --amazonec2-zone b \ 59 | --amazonec2-ami ami-b7babb87 \ 60 | --amazonec2-instance-type g2.2xlarge \ 61 | --swarm \ 62 | --swarm-master \ 63 | swarm-master 64 | 65 | docker-machine --debug create \ 66 | --driver amazonec2 \ 67 | --amazonec2-vpc-id vpc-46251523 \ 68 | --amazonec2-zone b \ 69 | test1 70 | 71 | 72 | # We'll use compose to tell our swarm master how to run gzserver on aws 73 | # and begien gzclient GUI locally 74 | # 75 | 76 | # point docker client to the swarm master node 77 | eval "$(docker-machine env swarm-master)" 78 | 79 | # launch gazebo with gzserver and gzweb services 80 | docker-compose -f gzweb5Nvidia.yml up 81 | -------------------------------------------------------------------------------- /aws/setup_remote.md: -------------------------------------------------------------------------------- 1 | # Setting up our AMI 2 | For this setup we'll start with a stock Ubnutu 14.04 LTS AMI. 3 | 4 | # Installs 5 | 6 | ## Tools 7 | Add some helpful tools 8 | ```shell 9 | sudo apt-get update 10 | sudo apt-get install -y \ 11 | fish \ 12 | glances \ 13 | byobu \ 14 | wget 15 | ``` 16 | 17 | ## Docker 18 | Install experimental docker 19 | ```shell 20 | wget -qO- https://experimental.docker.com/ | sh 21 | ``` 22 | > add user to docker group 23 | 24 | ```shell 25 | sudo usermod -aG docker ubuntu 26 | ``` 27 | 28 | ## Graphics 29 | 30 | ### Nvidia 31 | Install nvidia driver, here we'll just piggyback off the cuda. More details [here](# http://www.r-tutor.com/gpu-computing/cuda-installation/cuda7.0-ubuntu 32 | ). 33 | ```shell 34 | export CUDA_DEB_URL=http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.0-28_amd64.deb 35 | export CUDA_DEB=cuda-repo-ubuntu1404_7.0-28_amd64.deb 36 | wget $CUDA_DEB_URL 37 | sudo dpkg -i $CUDA_DEB 38 | sudo apt-get update 39 | sudo apt-get install cuda-7-0 40 | ``` 41 | > reboot and test drivers and test with `nvidia-smi`. Should look somthing like this: 42 | 43 | ```shell 44 | nvidia-smi 45 | Thu Jul 9 06:59:14 2015 46 | +------------------------------------------------------+ 47 | | NVIDIA-SMI 346.46 Driver Version: 346.46 | 48 | |-------------------------------+----------------------+----------------------+ 49 | | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | 50 | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | 51 | |===============================+======================+======================| 52 | | 0 GRID K520 Off | 0000:00:03.0 Off | N/A | 53 | | N/A 29C P8 17W / 125W | 41MiB / 4095MiB | 0% Default | 54 | +-------------------------------+----------------------+----------------------+ 55 | 56 | +-----------------------------------------------------------------------------+ 57 | | Processes: GPU Memory | 58 | | GPU PID Type Process name Usage | 59 | |=============================================================================| 60 | | 0 14999 G /usr/bin/X 22MiB | 61 | | 0 25995 G gzserver 6MiB | 62 | +-----------------------------------------------------------------------------+ 63 | ``` 64 | > Doesn't work? Go down the [rabbit hole](http://tleyden.github.io/blog/2014/10/25/cuda-6-dot-5-on-aws-gpu-instance-running-ubuntu-14-dot-04/) and read the `Disable Nouveau` section, watch the versions though: `sudo apt-get install linux-headers-$(uname -r)` 65 | 66 | ### X server 67 | Install and setup X display. Snippets taken from cloudsim [setup scripts](https://bitbucket.org/osrf/cloudsim/src/f605c94afd95e0078401ec1130b7c206c69380bc/cloudsimd/launchers/launch_utils/startup_scripts.py?at=default). 68 | 69 | ```shell 70 | sudo apt-get update 71 | apt-get install -y \ 72 | linux-headers-`uname -r` \ 73 | pciutils \ 74 | lsof \ 75 | gnome-session \ 76 | gnome-session-fallback \ 77 | xserver-xorg-core \ 78 | xserver-xorg \ 79 | mesa-utils \ 80 | lightdm \ 81 | x11-xserver-utils 82 | ``` 83 | > Have the NVIDIA tools create the xorg configuration file for us, retrieiving the PCI BusID for the current system. The BusID can vary from machine to machine. 84 | 85 | ```shell 86 | nvidia-xconfig --use-display-device=None --virtual=1280x1024 --busid "nvidia-xconfig --query-gpu-info | grep BusID | head -n 1 | sed 's/PCI BusID : PCI:/PCI:/'" 87 | export DISPLAY=:0 88 | ``` 89 | > Configure lightdm 90 | 91 | ```shell 92 | echo " 93 | [SeatDefaults] 94 | greeter-session=unity-greeter 95 | autologin-user=ubuntu 96 | autologin-user-timeout=0 97 | user-session=gnome-fallback 98 | " > /etc/lightdm/lightdm.conf 99 | ``` 100 | > Check that lightdm runs 101 | 102 | ```shell 103 | initctl stop lightdm || true 104 | initctl start lightdm 105 | ``` 106 | -------------------------------------------------------------------------------- /aws/README.md: -------------------------------------------------------------------------------- 1 | # Launching Gazebo + Docker and Amazon AWS 2 | In this tutorial we'll use Docker to deploy Gazebo with gzserver and gzweb running in a GPU instance on AWS connected to our local host running gzclient. 3 | 4 | ## Dependencies 5 | Here is a breakdown of the dependencies across our demo application: 6 | 7 | #### Local 8 | * [Docker](https://www.docker.com/) 9 | * [Docker Compose](https://docs.docker.com/compose/) 10 | * [Docker Machine](https://docs.docker.com/machine/) 11 | * [Docker Swarm](https://docs.docker.com/swarm/) 12 | * [Nvidia](https://developer.nvidia.com/cuda-downloads) 13 | 14 | #### Remote 15 | * [Docker](https://www.docker.com/) 16 | * [Nvidia](https://developer.nvidia.com/cuda-downloads) 17 | * [AWS](http://aws.amazon.com/) 18 | 19 | #### Image 20 | * [Gazebo](http://gazebosim.org/) 21 | * [Nvidia](https://developer.nvidia.com/cuda-downloads) 22 | 23 | ## Setup 24 | 25 | ### Local 26 | For our local setup, we'll need to install Docker along with the other Docker tools so that we can make the necessary API calls to deploy our remote setup. If you wish to run gzclient locally as well, we'll need to have the GPU drivers installed so that we can mount the necessary devices into the container running gzclient for rendering in the GUI. 27 | 28 | ### Remote 29 | For our remote setup, we'll need an appropriate ami or virtual image to use for our GPU instance, so the Docker engine as well as the relevant Nvidia drivers installed. You can use this public AMI `ami-77dbdb47`, however this AMI is region specific to Oregon. If you need to make you own, fallow the remote_setup.md for further instructions. The other thing we'll need to do is make a security group with the appropriate inbound and outbound permissions, i.e. the port openings for gzweb and docker connections. Again see the remote_setup.md for details. 30 | 31 | ### Images 32 | For our image setup, we'll need to have Gazebo installed, meaning both gzserver and gzclient, as well as gzweb. We'll also need the matching Nvidia driver installed in the image to boot if we wish to have any of the sensors rendering properly. For this you can use the public docker image `osrf/gazebo:gzweb5Nvidia` available [here](https://registry.hub.docker.com/u/osrf/gazebo/). The Dockerfile to make this image is also within this tutorial. 33 | 34 | ## Deployment 35 | So get things started we'll use docker-machine to create our aws GPU instance for us with our desired configuration, and designating it as our swam master. Then we'll launch gzserver and gzweb on the remote instance and attach a new network to the running container. Once the server is running, we should be able point your web browser to the remote instance's external address and see our simulation's interface. Finally we'll add our local docker engine to the swarm cluster and we'll start gzclient in a locally running container attached to the same network allowing gzclient to connect to gzserver. 36 | 37 | ### Making a remote machine 38 | We'll need to use our AWS credentials, so add them to your shell session as environmental variables: 39 | ```shell 40 | export AWS_ACCESS_KEY_ID=#################### 41 | export AWS_SECRET_ACCESS_KEY=######################################## 42 | ``` 43 | 44 | > Now create our AWS GPU instance and swarm master 45 | * what region the VM should be started 46 | * `us-west-2` 47 | * as well as what zone in the region's site (region specific) 48 | * `b` 49 | * we'll specify what VM image to use (region specific). Use the aws image to enable graphical hardware acceleration 50 | * `ami-6dd8d85d` 51 | * we'll specify what hardware to use (region specific). Use the GPU cluster for rendering images 52 | * `g2.2xlarge` 53 | * security group same as default, docker-machine, but with added http=80 + gzweb=7681 inbound. Default being i.e. ssh=22 + dockerPort=2376 + swarmPort=3376 inbound 54 | * `sg-3515d051` 55 | * Virtual Private Cloud network corresponding to the used security group 56 | * `vpc-e2eb6787` 57 | 58 | >Use docker_macine to make aws instance as swarm master 59 | 60 | ```shell 61 | docker-machine -D create \ 62 | --driver amazonec2 \ 63 | --amazonec2-vpc-id vpc-722ea217 \ 64 | --amazonec2-region us-west-2 \ 65 | --amazonec2-zone b \ 66 | --amazonec2-ami ami-77dbdb47 \ 67 | --amazonec2-instance-type g2.2xlarge \ 68 | --swarm \ 69 | --swarm-master \ 70 | swarm-master 71 | ``` 72 | 73 | ### Starting gzserver and gzweb 74 | Now we'll point docker client to the swarm master node: 75 | ```shell 76 | eval "$(docker-machine env swarm-master)" 77 | ``` 78 | And then launch the gzserver and gzweb services using the compose file from inside this demo directory 79 | ```shell 80 | docker-compose -f gzweb5Nvidia.yml up 81 | ``` 82 | 83 | ### Loading gzweb 84 | **TODO** For some reason, docker-machine always wants to create a security group, never use the one given to it. So just let it make it's own group named `Docker+Machine`, and then edit that security group from the AWS console to allow for the http=80 + gzweb=7681 inbound rules. 85 | 86 | Then point your browser to the AWS external address. 87 | 88 | ### Creating a network 89 | 90 | ### Connecting our local machine 91 | 92 | ### Starting gzclient 93 | 94 | 95 | ## Tear down 96 | 97 | 98 | ### Stopping the services 99 | To stop the gzserver and gzweb services: 100 | ```shell 101 | docker-compose -f gzweb5Nvidia.yml stop 102 | docker-compose -f gzweb5Nvidia.yml rm 103 | ``` 104 | 105 | ### Removing the machinesTo terminate the AWS instance and remove the remote docker engine, swarm-master: 106 | ```shell 107 | docker-machine rm swarm-master 108 | ``` 109 | 110 | ## Troubleshooting 111 | 112 | * **Q: Do I need to use a GPU instance:** 113 | The GPU instance on EC2 are expensive. Doe this Gazebo demo requare I use them? 114 | 115 | **A: No, not neccesaraly** 116 | The GPUs are only neccesary if you requare any cameras or scene rendering done by the server for computer vision related simulations. If the only visual aspect you need is for the client GUI, then that is graphic dependancy for the host running the client, not the server running the simulation, gzserver. So to modify this demo to use a cheeper instance, just comment out the docker-machine argument specifing the GPU instance. By defult, the AWS driver will use a t2.micro instance. You could also comment out the AMI image and use the defult, as the one specifed is only customly modified for the added Nvidia drivers and enabeled X server setup for gzserver to work with. Lastly, you'll need to comment out the device lines in the compose file before using it. As the Nvidia device will not exsist on a t2.micro, and thus fail to be mounted into the container running gzserver. 117 | 118 | * **Q: I can't create a new machine with docker-machine because of key pairs:** 119 | The error mentions that a key pair with the same name already exists. 120 | 121 | **A: Check your AWS console** 122 | If you are having issues creating the machine, you may see an error about a key pair for `swarm-master` can not be created as as it already exists. This may be due to prior failed attempts in creating an engin and not being removed properly. Simply check your AWS web console and remove the old key pair so that is can be regenerated when you try agian. 123 | 124 | * **Q: I can't create a new machine with docker-machine because it already exists:** 125 | The error mentions that a an engin with the same name already exists. 126 | 127 | **A: Check your list of docker-machines** 128 | This may be due to not having removed the last machine named swarm-master from docker-machine. The name chosen, `swarm-master` in not special, as it only serves to help us identify the master of a paricular swarm. But if you try and make a second master with the same name as a machine that is still listed, it will fail. To list the existing machines use: `docker-machine ls` and to remove a particulare machine use" `docker-machine rm `. 129 | 130 | * **Q: I can't connect to gzweb with my browser:** 131 | The machine was created secsesfully, and the gzserver and gzweb services where started sucsesfully by docker-compose, but I still can't connect to gzweb through my web browser using the machine's public address. 132 | 133 | **A: Check your security group in the AWS console** 134 | If everything was started succsesfuly and you can see the gzweb pinging the gzserver from the logging output, but still can not connect to gzweb, then you may need to check your AWS securaty groups. The default security group the docker-machine creates for the EC2 instance to use only provides inbound accsess for the three neccesary docker ports for remote API and SSH access. Using the AWS console you can edit the securaty group named `docker-machine` and add the additinal inbound rules for the HTTP port: 80 and gzweb port: 7681. 135 | 136 | * **Q: I built my own docker image for gzweb and it doesn't look right:** 137 | I built my own docker image for gzweb from scratch using the Dockerfile provided for gzweb and it doesn't look right becuase many of the preview icons for the interface are missing. 138 | 139 | **A: The build procces for gzweb is a bit weird currently** 140 | If you just use `docker build` to create the image for gzweb, you'll need to do a bit of extra handywork to complete the whole gzweb build prosses. The build requares rendering the collection of preview icons for all the 3D objects using gzserver. To do this gzserver needs desplay access, not somthing that is given in a docker build command. So after the `docker-build` is finished, I run a container from the image with x server and gpu hardware mounted, rerun the comilation with the added rendering argument, and commit the new state of the container to the same image tag. This is why I've done this for up and have shared it through Dcoker Hub regestry. Perhaps as we polish out gzweb, we'll find a simplermeanse of rendering durring the build process. 141 | ## Sources 142 | --------------------------------------------------------------------------------