├── www ├── robots.txt ├── assets │ ├── setup-xterm.js │ ├── button.png │ ├── swarm.png │ ├── xterm │ │ └── addons │ │ │ ├── fullscreen │ │ │ ├── fullscreen.css │ │ │ ├── fullscreen.js.map │ │ │ └── fullscreen.js │ │ │ ├── zmodem │ │ │ ├── zmodem.js │ │ │ └── zmodem.js.map │ │ │ ├── webLinks │ │ │ ├── webLinks.js.map │ │ │ └── webLinks.js │ │ │ ├── fit │ │ │ ├── fit.js.map │ │ │ └── fit.js │ │ │ └── terminado │ │ │ ├── terminado.js │ │ │ └── terminado.js.map │ ├── package-lock.json │ ├── editor.css │ ├── landing.css │ └── style.css ├── 503.html ├── ooc.html └── bypass.html ├── .gitattributes ├── .gitignore ├── dockerfiles ├── k8s │ ├── resolv.conf.override │ ├── tokens.csv │ ├── daemon.json │ ├── kubelet.service │ ├── kubernetes.repo │ ├── kubelet.env │ ├── motd │ ├── docker.service │ └── Dockerfile ├── dind │ ├── ssh_config │ ├── .gitconfig │ ├── ucp-config.toml │ ├── sudo │ ├── .profile │ ├── ee │ │ ├── daemon.json │ │ ├── ucp-key.pem │ │ ├── cert.pem │ │ ├── key.pem │ │ └── ucp-cert.pem │ ├── .editorconfig │ ├── docker-prompt │ ├── daemon.json │ ├── motd │ ├── .vimrc │ ├── update_images.sh │ ├── modprobe.sh │ ├── Dockerfile.middle │ ├── Dockerfile.middle-gpu │ ├── docker-entrypoint.sh │ ├── .inputrc │ ├── Dockerfile.dind-ee │ ├── ucp.sh │ ├── ucp-beta.sh │ ├── Dockerfile.base │ ├── workshop.lic │ ├── Dockerfile.base-gpu │ ├── Dockerfile │ └── copy_certs.ps1 └── pwm │ ├── .gitconfig │ ├── .vimrc │ ├── .profile │ ├── daemon.json │ ├── sudo │ ├── motd │ ├── Dockerfile │ └── .inputrc ├── id ├── generator.go ├── xid.go └── mock.go ├── tools.go ├── pwd ├── types │ ├── client.go │ ├── user.go │ ├── session.go │ ├── instance.go │ ├── playground_test.go │ └── playground.go ├── playground.go ├── user.go └── client.go ├── scheduler ├── task │ ├── types.go │ ├── check_ports_test.go │ ├── check_ports.go │ ├── check_k8s_cluster_status_task.go │ ├── check_swarm_ports.go │ ├── check_swarm_ports_test.go │ ├── check_swarm_status.go │ ├── check_k8s_cluster_exposed_ports.go │ ├── collect_stats_test.go │ └── check_swarm_status_test.go └── scheduler_test.go ├── event ├── mock.go ├── event.go ├── local_broker.go └── local_broker_test.go ├── handlers ├── get_instance_images.go ├── close_session.go ├── delete_instance.go ├── user.go ├── fstree_instance.go ├── get_session.go ├── exec.go ├── file_instance.go ├── cookie_id.go ├── ping.go ├── session_setup.go ├── home.go ├── new_instance.go ├── file_upload.go ├── playground.go ├── exam_run.go └── new_session.go ├── haproxy └── haproxy.cfg ├── docker ├── factory_mock.go ├── factory.go └── local_cached_factory.go ├── provisioner ├── factory.go ├── provisioner.go └── overlay.go ├── k8s ├── factory_mock.go ├── local_cached_factory.go └── factory.go ├── Dockerfile ├── internal └── addgenheader │ └── addgenheader.go ├── .github └── workflows │ ├── docker-rose-develop.yml │ ├── docker-rose-release.yml │ ├── test.yml │ ├── docker-rose-develop-debug.yml │ └── docker-base-middle.yml ├── Dockerfile.l2 ├── prometheus.yml ├── LICENSE ├── storage ├── storage.go └── mock.go ├── router ├── host.go ├── host_test.go └── l2 │ └── l2_test.go ├── docker-compose.yml ├── README.md.orig ├── README.md └── api.go /www/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: / 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | config/gen_bindata.go linguist-generated 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | play-with-docker 2 | node_modules 3 | /vendor 4 | -------------------------------------------------------------------------------- /dockerfiles/k8s/resolv.conf.override: -------------------------------------------------------------------------------- 1 | nameserver 8.8.8.8 2 | -------------------------------------------------------------------------------- /dockerfiles/dind/ssh_config: -------------------------------------------------------------------------------- 1 | Host * 2 | StrictHostKeyChecking no 3 | -------------------------------------------------------------------------------- /dockerfiles/dind/.gitconfig: -------------------------------------------------------------------------------- 1 | [url "https://"] 2 | insteadOf = git:// 3 | -------------------------------------------------------------------------------- /dockerfiles/pwm/.gitconfig: -------------------------------------------------------------------------------- 1 | [url "https://"] 2 | insteadOf = git:// 3 | -------------------------------------------------------------------------------- /id/generator.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | type Generator interface { 4 | NewId() string 5 | } 6 | -------------------------------------------------------------------------------- /www/assets/setup-xterm.js: -------------------------------------------------------------------------------- 1 | Terminal.applyAddon(fit); 2 | Terminal.applyAddon(fullscreen); 3 | 4 | -------------------------------------------------------------------------------- /dockerfiles/k8s/tokens.csv: -------------------------------------------------------------------------------- 1 | 31ada4fd-adec-460c-809a-9e56ceb75269,pwd,pwd,"system:admin,system:masters" 2 | -------------------------------------------------------------------------------- /www/assets/button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/freeCompilerCamp/play-with-compiler/HEAD/www/assets/button.png -------------------------------------------------------------------------------- /www/assets/swarm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/freeCompilerCamp/play-with-compiler/HEAD/www/assets/swarm.png -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/jteeuwen/go-bindata" 7 | ) 8 | -------------------------------------------------------------------------------- /dockerfiles/pwm/.vimrc: -------------------------------------------------------------------------------- 1 | syntax on 2 | set autoindent 3 | set expandtab 4 | set number 5 | set shiftwidth=2 6 | set softtabstop=2 7 | -------------------------------------------------------------------------------- /dockerfiles/dind/ucp-config.toml: -------------------------------------------------------------------------------- 1 | [cluster_config] 2 | custom_kubelet_flags = ["--http-check-frequency=20s", "--containerized=false"] 3 | -------------------------------------------------------------------------------- /id/xid.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import "github.com/rs/xid" 4 | 5 | type XIDGenerator struct { 6 | } 7 | 8 | func (x XIDGenerator) NewId() string { 9 | return xid.New().String() 10 | } 11 | -------------------------------------------------------------------------------- /dockerfiles/pwm/.profile: -------------------------------------------------------------------------------- 1 | export PS1='\e[1m\e[31m[\h] \e[32m\e[34m\u@$(hostname -i)\e[35m \w\e[0m\n$ ' 2 | alias vi='vim' 3 | export PATH=$PATH:/root/go/bin 4 | cat /etc/motd 5 | echo $BASHPID > /var/run/cwd 6 | -------------------------------------------------------------------------------- /dockerfiles/pwm/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimental": true, 3 | "debug": true, 4 | "log-level": "info", 5 | "insecure-registries": ["127.0.0.1"], 6 | "hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2375"] 7 | } 8 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/fullscreen/fullscreen.css: -------------------------------------------------------------------------------- 1 | .xterm.fullscreen { 2 | position: fixed; 3 | top: 0; 4 | bottom: 0; 5 | left: 0; 6 | right: 0; 7 | width: auto; 8 | height: auto; 9 | z-index: 255; 10 | } 11 | -------------------------------------------------------------------------------- /id/mock.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import "github.com/stretchr/testify/mock" 4 | 5 | type MockGenerator struct { 6 | mock.Mock 7 | } 8 | 9 | func (m *MockGenerator) NewId() string { 10 | args := m.Called() 11 | return args.String(0) 12 | } 13 | -------------------------------------------------------------------------------- /dockerfiles/k8s/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimental": true, 3 | "debug": true, 4 | "log-level": "info", 5 | "storage-driver": "vfs", 6 | "insecure-registries": ["127.0.0.1"], 7 | "hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2375"] 8 | } 9 | -------------------------------------------------------------------------------- /dockerfiles/dind/sudo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This is shim to help with the case were pasted commands from a readme assume you are not root. Since this isto be run by root, it should effectively be a dummy command that allows the parameters to pass through. 4 | 5 | exec "$@" 6 | -------------------------------------------------------------------------------- /dockerfiles/pwm/sudo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This is shim to help with the case were pasted commands from a readme assume you are not root. Since this isto be run by root, it should effectively be a dummy command that allows the parameters to pass through. 4 | 5 | exec "$@" 6 | -------------------------------------------------------------------------------- /dockerfiles/k8s/kubelet.service: -------------------------------------------------------------------------------- 1 | [Service] 2 | Restart=always 3 | EnvironmentFile=/etc/systemd/system/kubelet.env 4 | ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CGROUP_ARGS $KUBELET_EXTRA_ARGS 5 | -------------------------------------------------------------------------------- /dockerfiles/dind/.profile: -------------------------------------------------------------------------------- 1 | export PS1='\[\033[01;32m\]\u@\h:\W$ \[\033[0m\]' 2 | 3 | export PATH=$HOME/bin:$HOME/.local/bin:$PATH 4 | clear 5 | cat /etc/motd 6 | #echo $BASHPID > /var/run/cwd 7 | 8 | alias ll='ls -alF' 9 | alias la='ls -A' 10 | alias l='ls -CF' 11 | alias ls='ls --color=auto' 12 | alias vi='vim' 13 | -------------------------------------------------------------------------------- /pwd/types/client.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type Client struct { 4 | Id string `json:"id" bson:"id"` 5 | SessionId string `json:"session_id" bson:"session_id"` 6 | ViewPort ViewPort `json:"viewport"` 7 | } 8 | 9 | type ViewPort struct { 10 | Rows uint `json:"rows"` 11 | Cols uint `json:"cols"` 12 | } 13 | -------------------------------------------------------------------------------- /dockerfiles/k8s/kubernetes.repo: -------------------------------------------------------------------------------- 1 | [kubernetes] 2 | name=Kubernetes 3 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 4 | enabled=1 5 | gpgcheck=1 6 | repo_gpgcheck=1 7 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 8 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 9 | -------------------------------------------------------------------------------- /dockerfiles/dind/ee/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimental": true, 3 | "debug": true, 4 | "log-level": "info", 5 | "insecure-registries": ["127.0.0.1"], 6 | "hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2376"], 7 | "tls": true, 8 | "tlscert": "/opt/pwd/certs/cert.pem", 9 | "tlskey": "/opt/pwd/certs/key.pem" 10 | } 11 | -------------------------------------------------------------------------------- /dockerfiles/dind/.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | charset = utf-8 9 | indent_style = space 10 | indent_size = 4 11 | 12 | # Tab indentation (no size specified) 13 | [{Makefile,*.go}] 14 | indent_style = tab 15 | -------------------------------------------------------------------------------- /dockerfiles/dind/docker-prompt: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/sh 3 | case "$DOCKER_HOST" in 4 | *:3376) 5 | echo swarm 6 | ;; 7 | *:2376) 8 | echo $DOCKER_MACHINE_NAME 9 | ;; 10 | *:2375) 11 | echo $DOCKER_MACHINE_NAME 12 | ;; 13 | *:55555) 14 | echo $DOCKER_MACHINE_NAME 15 | ;; 16 | "") 17 | echo local 18 | ;; 19 | *) 20 | echo unknown 21 | ;; 22 | esac 23 | -------------------------------------------------------------------------------- /scheduler/task/types.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | type ClusterStatus struct { 4 | IsManager bool `json:"is_manager"` 5 | IsWorker bool `json:"is_worker"` 6 | Instance string `json:"instance"` 7 | } 8 | 9 | type ClusterPorts struct { 10 | Manager string `json:"manager"` 11 | Instances []string `json:"instances"` 12 | Ports []int `json:"ports"` 13 | } 14 | -------------------------------------------------------------------------------- /dockerfiles/dind/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "experimental": true, 3 | "debug": true, 4 | "log-level": "info", 5 | "insecure-registries": ["127.0.0.1"], 6 | "hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2375"], 7 | "tls": DOCKER_TLSENABLE, 8 | "tlscacert": "DOCKER_TLSCACERT", 9 | "tlscert": "DOCKER_TLSCERT", 10 | "tlskey": "DOCKER_TLSKEY" 11 | } 12 | -------------------------------------------------------------------------------- /www/assets/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "requires": true, 3 | "lockfileVersion": 1, 4 | "dependencies": { 5 | "xterm": { 6 | "version": "3.14.5", 7 | "resolved": "https://registry.npmjs.org/xterm/-/xterm-3.14.5.tgz", 8 | "integrity": "sha512-DVmQ8jlEtL+WbBKUZuMxHMBgK/yeIZwkXB81bH+MGaKKnJGYwA+770hzhXPfwEIokK9On9YIFPRleVp/5G7z9g==" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /www/assets/editor.css: -------------------------------------------------------------------------------- 1 | .alert-top { 2 | position: absolute; 3 | top: 0; 4 | right: 0; 5 | width:100px; 6 | display:none; 7 | text-align: center; 8 | padding: 3px; 9 | height: 30px; 10 | margin-bottom: 0px; 11 | } 12 | 13 | .alert-newfile { 14 | text-align: center; 15 | padding: 3px; 16 | font-size: 15px; 17 | } 18 | 19 | .col-md-3 { 20 | overflow-x: auto; 21 | } 22 | -------------------------------------------------------------------------------- /event/mock.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import "github.com/stretchr/testify/mock" 4 | 5 | type Mock struct { 6 | M mock.Mock 7 | } 8 | 9 | func (m *Mock) Emit(name EventType, sessionId string, args ...interface{}) { 10 | m.M.Called(name, sessionId, args) 11 | } 12 | 13 | func (m *Mock) On(name EventType, handler Handler) { 14 | m.M.Called(name, handler) 15 | } 16 | 17 | func (m *Mock) OnAny(handler AnyHandler) { 18 | m.M.Called(handler) 19 | } 20 | -------------------------------------------------------------------------------- /handlers/get_instance_images.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | ) 8 | 9 | func GetInstanceImages(rw http.ResponseWriter, req *http.Request) { 10 | playground := core.PlaygroundFindByDomain(req.Host) 11 | if playground == nil { 12 | log.Printf("Playground for domain %s was not found!", req.Host) 13 | rw.WriteHeader(http.StatusBadRequest) 14 | return 15 | } 16 | json.NewEncoder(rw).Encode(playground.AvailableDinDInstanceImages) 17 | } 18 | -------------------------------------------------------------------------------- /haproxy/haproxy.cfg: -------------------------------------------------------------------------------- 1 | defaults 2 | mode http 3 | timeout connect 5000ms 4 | 5 | frontend http-in 6 | bind *:8080 7 | timeout client 120m 8 | 9 | acl host_direct hdr_reg(host) -i ^.*\.direct\..*?:?.*$ 10 | 11 | use_backend l2 if host_direct 12 | 13 | default_backend pwd 14 | 15 | backend pwd 16 | timeout connect 5000ms 17 | timeout server 120m 18 | 19 | server node1 pwd:3000 20 | 21 | backend l2 22 | timeout connect 5000ms 23 | timeout server 120m 24 | 25 | server node2 l2:443 26 | -------------------------------------------------------------------------------- /dockerfiles/dind/motd: -------------------------------------------------------------------------------- 1 | ############################################################### 2 | # WARNING!!!! # 3 | # This is a sandbox environment. Using personal credentials # 4 | # is HIGHLY! discouraged. Any consequences of doing so are # 5 | # completely the user's responsibilites. # 6 | # # 7 | # The FreeCompilerCamp team, based on PWD. # 8 | ############################################################### 9 | -------------------------------------------------------------------------------- /dockerfiles/pwm/motd: -------------------------------------------------------------------------------- 1 | ############################################################### 2 | # WARNING!!!! # 3 | # This is a sandbox environment. Using personal credentials # 4 | # is HIGHLY! discouraged. Any consequences of doing so are # 5 | # completely the user's responsibilites. # 6 | # # 7 | # The PWD team. # 8 | ############################################################### 9 | -------------------------------------------------------------------------------- /docker/factory_mock.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "github.com/play-with-docker/play-with-docker/pwd/types" 5 | "github.com/stretchr/testify/mock" 6 | ) 7 | 8 | type FactoryMock struct { 9 | mock.Mock 10 | } 11 | 12 | func (m *FactoryMock) GetForSession(session *types.Session) (DockerApi, error) { 13 | args := m.Called(session) 14 | return args.Get(0).(DockerApi), args.Error(1) 15 | } 16 | 17 | func (m *FactoryMock) GetForInstance(instance *types.Instance) (DockerApi, error) { 18 | args := m.Called(instance) 19 | return args.Get(0).(DockerApi), args.Error(1) 20 | } 21 | -------------------------------------------------------------------------------- /provisioner/factory.go: -------------------------------------------------------------------------------- 1 | package provisioner 2 | 3 | type instanceProvisionerFactory struct { 4 | windows InstanceProvisionerApi 5 | dind InstanceProvisionerApi 6 | } 7 | 8 | func NewInstanceProvisionerFactory(w InstanceProvisionerApi, d InstanceProvisionerApi) InstanceProvisionerFactoryApi { 9 | return &instanceProvisionerFactory{windows: w, dind: d} 10 | } 11 | 12 | func (p *instanceProvisionerFactory) GetProvisioner(instanceType string) (InstanceProvisionerApi, error) { 13 | if instanceType == "windows" { 14 | return p.windows, nil 15 | } else { 16 | return p.dind, nil 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /pwd/types/user.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type User struct { 4 | Id string `json:"id" bson:"id"` 5 | Name string `json:"name" bson:"name"` 6 | ProviderUserId string `json:"provider_user_id" bson:"provider_user_id"` 7 | Avatar string `json:"avatar" bson:"avatar"` 8 | Provider string `json:"provider" bson:"provider"` 9 | Email string `json:"email" bson:"email"` 10 | IsBanned bool `json:"banned" bson:"banned"` 11 | } 12 | 13 | type LoginRequest struct { 14 | Id string `json:"id" bson:"id"` 15 | Provider string `json:"provider" bson:"provider"` 16 | } 17 | -------------------------------------------------------------------------------- /dockerfiles/dind/.vimrc: -------------------------------------------------------------------------------- 1 | 2 | syntax on 3 | set autoindent 4 | set number 5 | set printoptions=number:y 6 | set encoding=utf-8 7 | set wrap 8 | set shiftwidth=4 9 | set showmode 10 | set warn 11 | set tabstop=4 12 | set expandtab 13 | set stal=1 14 | set wrapscan 15 | set dir=~ 16 | set backupdir=~ 17 | set autochdir 18 | set nospell 19 | set ruler 20 | set paste 21 | set cole=0 22 | if has("autocmd") 23 | au FileType html,css setlocal shiftwidth=2 tabstop=2 24 | au BufRead,BufNewFile *.md set filetype=markdown 25 | au BufReadPost * if line("'\"") > 0 && line("'\"") <= line("$") 26 | \| exe "normal! g'\"" | endif 27 | endif 28 | -------------------------------------------------------------------------------- /k8s/factory_mock.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "github.com/play-with-docker/play-with-docker/pwd/types" 5 | "github.com/stretchr/testify/mock" 6 | "k8s.io/client-go/kubernetes" 7 | ) 8 | 9 | type FactoryMock struct { 10 | mock.Mock 11 | } 12 | 13 | func (m *FactoryMock) GetKubeletForInstance(i *types.Instance) (*KubeletClient, error) { 14 | args := m.Called(i) 15 | return args.Get(0).(*KubeletClient), args.Error(1) 16 | } 17 | 18 | func (m *FactoryMock) GetForInstance(instance *types.Instance) (*kubernetes.Clientset, error) { 19 | args := m.Called(instance) 20 | return args.Get(0).(*kubernetes.Clientset), args.Error(1) 21 | } 22 | -------------------------------------------------------------------------------- /dockerfiles/dind/update_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FREECC_DOCKER_REPO="freecompilercamp/pwc" 4 | 5 | # List all the docker image tags 6 | FREECC_DOCKER_IMAGES=" \ 7 | 16.04 \ 8 | 18.04 \ 9 | full \ 10 | llvm10 \ 11 | llvm10-gpu \ 12 | rose-bug \ 13 | rose-debug \ 14 | rose-develop-debug-weekly \ 15 | rose-develop-weekly \ 16 | rose-exam \ 17 | rose-release-weekly \ 18 | " 19 | 20 | # Iterate the string variable using for loop 21 | for tag in ${FREECC_DOCKER_IMAGES}; do 22 | #echo ${FREECC_DOCKER_REPO}:$tag 23 | docker pull ${FREECC_DOCKER_REPO}:$tag 24 | done 25 | 26 | docker image prune -f 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.9 2 | 3 | COPY . /go/src/github.com/play-with-docker/play-with-docker 4 | 5 | WORKDIR /go/src/github.com/play-with-docker/play-with-docker 6 | 7 | RUN ssh-keygen -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key >/dev/null 8 | 9 | RUN CGO_ENABLED=0 go build -a -installsuffix nocgo -o /go/bin/play-with-docker . 10 | 11 | 12 | FROM alpine 13 | 14 | RUN apk --update add ca-certificates 15 | RUN mkdir -p /app/pwd 16 | 17 | COPY --from=0 /go/bin/play-with-docker /app/play-with-docker 18 | COPY --from=0 /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_rsa_key 19 | COPY ./www /app/www 20 | 21 | WORKDIR /app 22 | CMD ["./play-with-docker"] 23 | 24 | EXPOSE 3000 25 | -------------------------------------------------------------------------------- /dockerfiles/k8s/kubelet.env: -------------------------------------------------------------------------------- 1 | KUBELET_KUBECONFIG_ARGS=" --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" 2 | KUBELET_SYSTEM_PODS_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --pod-infra-container-image=k8s.gcr.io/pause:3.2" 3 | KUBELET_NETWORK_ARGS="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 4 | KUBELET_DNS_ARGS="--cluster-dns=10.96.0.10 --cluster-domain=cluster.local" 5 | KUBELET_AUTHZ_ARGS="--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt" 6 | KUBELET_CGROUP_ARGS="--cgroup-driver=cgroupfs" 7 | KUBELET_EXTRA_ARGS="--fail-swap-on=false --resolv-conf=/etc/resolv.conf.override" 8 | -------------------------------------------------------------------------------- /dockerfiles/dind/modprobe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | # "modprobe" without modprobe 5 | # https://twitter.com/lucabruno/status/902934379835662336 6 | 7 | # this isn't 100% fool-proof, but it'll have a much higher success rate than simply using the "real" modprobe 8 | 9 | # Docker often uses "modprobe -va foo bar baz" 10 | # so we ignore modules that start with "-" 11 | for module; do 12 | if [ "${module#-}" = "$module" ]; then 13 | ip link show "$module" || true 14 | lsmod | grep "$module" || true 15 | fi 16 | done 17 | 18 | # remove /usr/local/... from PATH so we can exec the real modprobe as a last resort 19 | export PATH='/usr/sbin:/usr/bin:/sbin:/bin' 20 | exec modprobe "$@" 21 | -------------------------------------------------------------------------------- /internal/addgenheader/addgenheader.go: -------------------------------------------------------------------------------- 1 | // addgenheader is a simple program that adds a DO NOT EDIT style 2 | // comment at the top of a file. Because some generators do not do 3 | // this, e.g. go-bindata 4 | package main 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "io/ioutil" 10 | "os" 11 | "strings" 12 | ) 13 | 14 | func main() { 15 | var buf bytes.Buffer 16 | fmt.Fprintf(&buf, "// %v DO NOT EDIT\n", strings.TrimSpace(os.Args[2])) 17 | fmt.Fprintf(&buf, "\n") 18 | byts, err := ioutil.ReadFile(os.Args[1]) 19 | if err != nil { 20 | panic(err) 21 | } 22 | fmt.Fprintf(&buf, "%s", byts) 23 | if err := ioutil.WriteFile(os.Args[1], buf.Bytes(), 0666); err != nil { 24 | panic(err) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/docker-rose-develop.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image ROSE Develop Weekly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 1 * * 0' 6 | 7 | jobs: 8 | 9 | build: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Build the docker image of ROSE develop branch 17 | run: | 18 | cd $GITHUB_WORKSPACE/dockerfiles/dind 19 | docker build . --file Dockerfile.rose-develop --tag freecc_rose_develop 20 | echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u ${{ secrets.DOCKERHUB_ID }} --password-stdin 21 | docker tag freecc_rose_develop freecompilercamp/pwc:rose-develop-weekly 22 | docker push freecompilercamp/pwc:rose-develop-weekly 23 | 24 | -------------------------------------------------------------------------------- /.github/workflows/docker-rose-release.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image ROSE Release Weekly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 1 * * 0' 6 | 7 | jobs: 8 | 9 | build: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Build the docker image of ROSE release branch 17 | run: | 18 | cd $GITHUB_WORKSPACE/dockerfiles/dind 19 | docker build . --file Dockerfile.rose-release --tag freecc_rose_release 20 | echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u ${{ secrets.DOCKERHUB_ID }} --password-stdin 21 | docker tag freecc_rose_release freecompilercamp/pwc:rose-release-weekly 22 | docker push freecompilercamp/pwc:rose-release-weekly 23 | 24 | -------------------------------------------------------------------------------- /handlers/close_session.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | 7 | "github.com/gorilla/mux" 8 | "github.com/play-with-docker/play-with-docker/storage" 9 | ) 10 | 11 | func CloseSession(rw http.ResponseWriter, req *http.Request) { 12 | vars := mux.Vars(req) 13 | sessionId := vars["sessionId"] 14 | 15 | session, err := core.SessionGet(sessionId) 16 | if err == storage.NotFoundError { 17 | rw.WriteHeader(http.StatusNotFound) 18 | return 19 | } else if err != nil { 20 | rw.WriteHeader(http.StatusInternalServerError) 21 | return 22 | } 23 | 24 | if err := core.SessionClose(session); err != nil { 25 | log.Println(err) 26 | rw.WriteHeader(http.StatusInternalServerError) 27 | return 28 | } 29 | 30 | } 31 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | pull_request: 6 | branches: 7 | - '**' 8 | 9 | name: Go 10 | jobs: 11 | test: 12 | strategy: 13 | fail-fast: false 14 | matrix: 15 | os: [ubuntu-latest] 16 | go_version: ["1.14.9", "1.15.2"] 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Checkout code 20 | uses: actions/checkout@v2 21 | - name: Install Go 22 | uses: actions/setup-go@v2 23 | with: 24 | go-version: ${{ matrix.go_version }} 25 | - name: Generate 26 | run: go generate ./... 27 | - name: Test 28 | run: go test ./... 29 | - name: Verify clean commit 30 | run: test -z "$(git status --porcelain)" || (git status; git diff; false) 31 | -------------------------------------------------------------------------------- /.github/workflows/docker-rose-develop-debug.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image ROSE Develop Debug Weekly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 1 * * 0' 6 | 7 | jobs: 8 | 9 | build: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Build the docker image of ROSE develop branch in debug mode 17 | run: | 18 | cd $GITHUB_WORKSPACE/dockerfiles/dind 19 | docker build . --file Dockerfile.rose-develop-debug --tag freecc_rose_develop_debug 20 | echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u ${{ secrets.DOCKERHUB_ID }} --password-stdin 21 | docker tag freecc_rose_develop_debug freecompilercamp/pwc:rose-develop-debug-weekly 22 | docker push freecompilercamp/pwc:rose-develop-debug-weekly 23 | 24 | -------------------------------------------------------------------------------- /handlers/delete_instance.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/gorilla/mux" 7 | "github.com/play-with-docker/play-with-docker/storage" 8 | ) 9 | 10 | func DeleteInstance(rw http.ResponseWriter, req *http.Request) { 11 | vars := mux.Vars(req) 12 | sessionId := vars["sessionId"] 13 | instanceName := vars["instanceName"] 14 | 15 | s, err := core.SessionGet(sessionId) 16 | if s != nil { 17 | i := core.InstanceGet(s, instanceName) 18 | err := core.InstanceDelete(s, i) 19 | if err != nil { 20 | rw.WriteHeader(http.StatusInternalServerError) 21 | return 22 | } 23 | } else if err == storage.NotFoundError { 24 | rw.WriteHeader(http.StatusInternalServerError) 25 | return 26 | } else if err != nil { 27 | rw.WriteHeader(http.StatusInternalServerError) 28 | return 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /Dockerfile.l2: -------------------------------------------------------------------------------- 1 | FROM golang:1.9 2 | 3 | # Copy the runtime dockerfile into the context as Dockerfile 4 | COPY . /go/src/github.com/play-with-docker/play-with-docker 5 | 6 | WORKDIR /go/src/github.com/play-with-docker/play-with-docker 7 | 8 | 9 | RUN ssh-keygen -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key >/dev/null 10 | 11 | WORKDIR /go/src/github.com/play-with-docker/play-with-docker/router/l2 12 | 13 | RUN CGO_ENABLED=0 go build -a -installsuffix nocgo -o /go/bin/play-with-docker-l2 . 14 | 15 | 16 | FROM alpine 17 | 18 | RUN apk --update add ca-certificates 19 | RUN mkdir /app 20 | 21 | COPY --from=0 /go/bin/play-with-docker-l2 /app/play-with-docker-l2 22 | COPY --from=0 /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_rsa_key 23 | 24 | WORKDIR /app 25 | CMD ["./play-with-docker-l2", "-ssh_key_path", "/etc/ssh/ssh_host_rsa_key"] 26 | 27 | EXPOSE 22 53 443 8080 28 | -------------------------------------------------------------------------------- /dockerfiles/k8s/motd: -------------------------------------------------------------------------------- 1 | 2 | WARNING!!!! 3 | 4 | This is a sandbox environment. Using personal credentials 5 | is HIGHLY! discouraged. Any consequences of doing so, are 6 | completely the user's responsibilites. 7 | 8 | You can bootstrap a cluster as follows: 9 | 10 | 1. Initializes cluster master node: 11 | 12 | kubeadm init --apiserver-advertise-address $(hostname -i) --pod-network-cidr 10.5.0.0/16 13 | 14 | 15 | 2. Initialize cluster networking: 16 | 17 | kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml 18 | 19 | 20 | 3. (Optional) Create an nginx deployment: 21 | 22 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx-app.yaml 23 | 24 | 25 | The PWK team. 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /handlers/user.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/play-with-docker/play-with-docker/storage" 10 | ) 11 | 12 | type PublicUserInfo struct { 13 | Id string `json:"id"` 14 | Avatar string `json:"avatar"` 15 | Name string `json:"name"` 16 | } 17 | 18 | func GetUser(rw http.ResponseWriter, req *http.Request) { 19 | vars := mux.Vars(req) 20 | userId := vars["userId"] 21 | 22 | u, err := core.UserGet(userId) 23 | if err != nil { 24 | if storage.NotFound(err) { 25 | log.Printf("User with id %s was not found\n", userId) 26 | rw.WriteHeader(http.StatusNotFound) 27 | return 28 | } 29 | log.Println(err) 30 | rw.WriteHeader(http.StatusInternalServerError) 31 | return 32 | } 33 | 34 | pui := PublicUserInfo{Id: u.Id, Avatar: u.Avatar, Name: u.Name} 35 | json.NewEncoder(rw).Encode(pui) 36 | } 37 | -------------------------------------------------------------------------------- /handlers/fstree_instance.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | ) 10 | 11 | func fsTree(rw http.ResponseWriter, req *http.Request) { 12 | vars := mux.Vars(req) 13 | sessionId := vars["sessionId"] 14 | instanceName := vars["instanceName"] 15 | 16 | s, _ := core.SessionGet(sessionId) 17 | if s == nil { 18 | rw.WriteHeader(http.StatusNotFound) 19 | return 20 | } 21 | 22 | i := core.InstanceGet(s, instanceName) 23 | if i == nil { 24 | rw.WriteHeader(http.StatusNotFound) 25 | return 26 | } 27 | 28 | tree, err := core.InstanceFSTree(i) 29 | 30 | if err != nil { 31 | log.Println(err) 32 | rw.WriteHeader(http.StatusInternalServerError) 33 | return 34 | } 35 | 36 | rw.Header().Set("content-type", "application/json") 37 | if _, err = io.Copy(rw, tree); err != nil { 38 | log.Println(err) 39 | rw.WriteHeader(http.StatusInternalServerError) 40 | return 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /pwd/types/session.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type SessionConfig struct { 8 | Playground *Playground 9 | UserId string 10 | Duration time.Duration 11 | Stack string 12 | StackName string 13 | ImageName string 14 | } 15 | 16 | type Session struct { 17 | Id string `json:"id" bson:"id"` 18 | CreatedAt time.Time `json:"created_at" bson:"created_at"` 19 | ExpiresAt time.Time `json:"expires_at" bson:"expires_at"` 20 | PwdIpAddress string `json:"pwd_ip_address" bson:"pwd_ip_address"` 21 | Ready bool `json:"ready" bson:"ready"` 22 | Stack string `json:"stack" bson:"stack"` 23 | StackName string `json:"stack_name" bson:"stack_name"` 24 | ImageName string `json:"image_name" bson:"image_name"` 25 | Host string `json:"host" bson:"host"` 26 | UserId string `json:"user_id" bson:"user_id"` 27 | PlaygroundId string `json:"playground_id" bson:"playground_id"` 28 | } 29 | -------------------------------------------------------------------------------- /event/event.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | type EventType string 4 | 5 | func (e EventType) String() string { 6 | return string(e) 7 | } 8 | 9 | var ( 10 | INSTANCE_VIEWPORT_RESIZE = EventType("instance viewport resize") 11 | INSTANCE_DELETE = EventType("instance delete") 12 | INSTANCE_NEW = EventType("instance new") 13 | INSTANCE_STATS = EventType("instance stats") 14 | SESSION_NEW = EventType("session new") 15 | SESSION_END = EventType("session end") 16 | SESSION_READY = EventType("session ready") 17 | SESSION_BUILDER_OUT = EventType("session builder out") 18 | PLAYGROUND_NEW = EventType("playground_new") 19 | ) 20 | 21 | type Handler func(id string, args ...interface{}) 22 | type AnyHandler func(eventType EventType, id string, args ...interface{}) 23 | 24 | type EventApi interface { 25 | Emit(name EventType, id string, args ...interface{}) 26 | On(name EventType, handler Handler) 27 | OnAny(handler AnyHandler) 28 | } 29 | -------------------------------------------------------------------------------- /prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 10 | rule_files: 11 | # - "first.rules" 12 | # - "second.rules" 13 | 14 | # A scrape configuration containing exactly one endpoint to scrape: 15 | # Here it's Prometheus itself. 16 | scrape_configs: 17 | # The job name is added as a label `job=` to any timeseries scraped from this config. 18 | - job_name: 'pwd' 19 | 20 | # metrics_path defaults to '/metrics' 21 | # scheme defaults to 'http'. 22 | 23 | static_configs: 24 | - targets: ['pwd1:3000', 'pwd2:3000'] 25 | -------------------------------------------------------------------------------- /handlers/get_session.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | "github.com/play-with-docker/play-with-docker/storage" 11 | ) 12 | 13 | type SessionInfo struct { 14 | *types.Session 15 | Instances map[string]*types.Instance `json:"instances"` 16 | } 17 | 18 | func GetSession(rw http.ResponseWriter, req *http.Request) { 19 | vars := mux.Vars(req) 20 | sessionId := vars["sessionId"] 21 | 22 | session, err := core.SessionGet(sessionId) 23 | if err == storage.NotFoundError { 24 | rw.WriteHeader(http.StatusNotFound) 25 | return 26 | } else if err != nil { 27 | rw.WriteHeader(http.StatusNotFound) 28 | return 29 | } 30 | 31 | instances, err := core.InstanceFindBySession(session) 32 | if err != nil { 33 | log.Println(err) 34 | rw.WriteHeader(http.StatusInternalServerError) 35 | return 36 | } 37 | is := map[string]*types.Instance{} 38 | for _, i := range instances { 39 | is[i.Name] = i 40 | } 41 | 42 | json.NewEncoder(rw).Encode(SessionInfo{session, is}) 43 | } 44 | -------------------------------------------------------------------------------- /handlers/exec.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | ) 10 | 11 | type execRequest struct { 12 | Cmd []string `json:"command"` 13 | } 14 | 15 | type execResponse struct { 16 | ExitCode int `json:"status_code"` 17 | } 18 | 19 | func Exec(rw http.ResponseWriter, req *http.Request) { 20 | vars := mux.Vars(req) 21 | sessionId := vars["sessionId"] 22 | instanceName := vars["instanceName"] 23 | 24 | var er execRequest 25 | err := json.NewDecoder(req.Body).Decode(&er) 26 | if err != nil { 27 | rw.WriteHeader(http.StatusBadRequest) 28 | return 29 | } 30 | 31 | s, _ := core.SessionGet(sessionId) 32 | if s == nil { 33 | rw.WriteHeader(http.StatusNotFound) 34 | return 35 | } 36 | i := core.InstanceGet(s, instanceName) 37 | if i == nil { 38 | rw.WriteHeader(http.StatusNotFound) 39 | return 40 | } 41 | 42 | code, err := core.InstanceExec(i, er.Cmd) 43 | 44 | if err != nil { 45 | log.Println(err) 46 | rw.WriteHeader(http.StatusInternalServerError) 47 | return 48 | } 49 | 50 | json.NewEncoder(rw).Encode(execResponse{code}) 51 | } 52 | -------------------------------------------------------------------------------- /www/503.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Docker Playground 5 | 6 | 7 | 8 | 16 | 17 | 18 |
19 | An error has occurred. If you have some time, please report it. Thanks! 20 |
21 | 22 | -------------------------------------------------------------------------------- /event/local_broker.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import "sync" 4 | 5 | type localBroker struct { 6 | sync.Mutex 7 | 8 | handlers map[EventType][]Handler 9 | anyHandlers []AnyHandler 10 | } 11 | 12 | func NewLocalBroker() *localBroker { 13 | return &localBroker{handlers: map[EventType][]Handler{}, anyHandlers: []AnyHandler{}} 14 | } 15 | 16 | func (b *localBroker) On(name EventType, handler Handler) { 17 | b.Lock() 18 | defer b.Unlock() 19 | 20 | if b.handlers[name] == nil { 21 | b.handlers[name] = []Handler{} 22 | } 23 | b.handlers[name] = append(b.handlers[name], handler) 24 | } 25 | 26 | func (b *localBroker) OnAny(handler AnyHandler) { 27 | b.Lock() 28 | defer b.Unlock() 29 | 30 | b.anyHandlers = append(b.anyHandlers, handler) 31 | } 32 | 33 | func (b *localBroker) Emit(name EventType, sessionId string, args ...interface{}) { 34 | go func() { 35 | b.Lock() 36 | defer b.Unlock() 37 | 38 | for _, handler := range b.anyHandlers { 39 | handler(name, sessionId, args...) 40 | } 41 | if b.handlers[name] != nil { 42 | for _, handler := range b.handlers[name] { 43 | handler(sessionId, args...) 44 | } 45 | } 46 | }() 47 | } 48 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Marcos Lilljedhal and Jonathan Leibiusky 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /handlers/file_instance.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/base64" 5 | "io" 6 | "log" 7 | "net/http" 8 | 9 | "github.com/gorilla/mux" 10 | ) 11 | 12 | func file(rw http.ResponseWriter, req *http.Request) { 13 | vars := mux.Vars(req) 14 | sessionId := vars["sessionId"] 15 | instanceName := vars["instanceName"] 16 | 17 | query := req.URL.Query() 18 | 19 | path := query.Get("path") 20 | 21 | if path == "" { 22 | rw.WriteHeader(http.StatusBadRequest) 23 | return 24 | } 25 | 26 | s, _ := core.SessionGet(sessionId) 27 | if s == nil { 28 | rw.WriteHeader(http.StatusNotFound) 29 | return 30 | } 31 | 32 | i := core.InstanceGet(s, instanceName) 33 | if i == nil { 34 | rw.WriteHeader(http.StatusNotFound) 35 | return 36 | } 37 | 38 | instanceFile, err := core.InstanceFile(i, path) 39 | 40 | if err != nil { 41 | log.Println(err) 42 | rw.WriteHeader(http.StatusInternalServerError) 43 | return 44 | } 45 | 46 | encoder := base64.NewEncoder(base64.StdEncoding, rw) 47 | 48 | if _, err = io.Copy(encoder, instanceFile); err != nil { 49 | log.Println(err) 50 | rw.WriteHeader(http.StatusInternalServerError) 51 | return 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /handlers/cookie_id.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/play-with-docker/play-with-docker/config" 7 | ) 8 | 9 | type CookieID struct { 10 | Id string `json:"id"` 11 | UserName string `json:"user_name"` 12 | UserAvatar string `json:"user_avatar"` 13 | ProviderId string `json:"provider_id"` 14 | } 15 | 16 | func (c *CookieID) SetCookie(rw http.ResponseWriter, host string) error { 17 | if encoded, err := config.SecureCookie.Encode("id", c); err == nil { 18 | cookie := &http.Cookie{ 19 | Name: "id", 20 | Value: encoded, 21 | Domain: host, 22 | Path: "/", 23 | SameSite: http.SameSiteNoneMode, 24 | Secure: config.UseLetsEncrypt, 25 | HttpOnly: true, 26 | } 27 | http.SetCookie(rw, cookie) 28 | } else { 29 | return err 30 | } 31 | return nil 32 | } 33 | func ReadCookie(r *http.Request) (*CookieID, error) { 34 | if cookie, err := r.Cookie("id"); err == nil { 35 | value := &CookieID{} 36 | if err = config.SecureCookie.Decode("id", cookie.Value, &value); err == nil { 37 | return value, nil 38 | } else { 39 | return nil, err 40 | } 41 | } else { 42 | return nil, err 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /www/ooc.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Docker Playground 5 | 6 | 7 | 8 | 16 | 17 | 18 |
19 | We are really sorry but we are out of capacity and cannot create your session at the moment. Please try again later. 20 |
21 | 22 | -------------------------------------------------------------------------------- /dockerfiles/k8s/docker.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=https://docs.docker.com 4 | 5 | [Service] 6 | # the default is not to use systemd for cgroups because the delegate issues still 7 | # exists and systemd currently does not support the cgroup feature set required 8 | # for containers run by docker 9 | ExecStart=/usr/bin/dockerd 10 | ExecReload=/bin/kill -s HUP $MAINPID 11 | # Having non-zero Limit*s causes performance problems due to accounting overhead 12 | # in the kernel. We recommend using cgroups to do container-local accounting. 13 | LimitNOFILE=infinity 14 | LimitNPROC=infinity 15 | LimitCORE=infinity 16 | # Uncomment TasksMax if your systemd version supports it. 17 | # Only systemd 226 and above support this version. 18 | #TasksMax=infinity 19 | TimeoutStartSec=0 20 | # set delegate yes so that systemd does not reset the cgroups of docker containers 21 | Delegate=yes 22 | # kill only the docker process, not all processes in the cgroup 23 | KillMode=process 24 | # restart the docker process if it exits prematurely 25 | Restart=on-failure 26 | StartLimitBurst=3 27 | StartLimitInterval=60s 28 | 29 | [Install] 30 | WantedBy=multi-user.target 31 | -------------------------------------------------------------------------------- /pwd/playground.go: -------------------------------------------------------------------------------- 1 | package pwd 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/play-with-docker/play-with-docker/event" 7 | "github.com/play-with-docker/play-with-docker/pwd/types" 8 | "github.com/satori/go.uuid" 9 | ) 10 | 11 | func (p *pwd) PlaygroundNew(playground types.Playground) (*types.Playground, error) { 12 | playground.Id = uuid.NewV5(uuid.NamespaceOID, playground.Domain).String() 13 | if err := p.storage.PlaygroundPut(&playground); err != nil { 14 | log.Printf("Error saving playground %s. Got: %v\n", playground.Id, err) 15 | return nil, err 16 | } 17 | 18 | p.event.Emit(event.PLAYGROUND_NEW, playground.Id) 19 | return &playground, nil 20 | } 21 | 22 | func (p *pwd) PlaygroundGet(id string) *types.Playground { 23 | if playground, err := p.storage.PlaygroundGet(id); err != nil { 24 | log.Printf("Error retrieving playground %s. Got: %v\n", id, err) 25 | return nil 26 | } else { 27 | return playground 28 | } 29 | } 30 | 31 | func (p *pwd) PlaygroundFindByDomain(domain string) *types.Playground { 32 | id := uuid.NewV5(uuid.NamespaceOID, domain).String() 33 | return p.PlaygroundGet(id) 34 | } 35 | 36 | func (p *pwd) PlaygroundList() ([]*types.Playground, error) { 37 | return p.storage.PlaygroundGetAll() 38 | } 39 | -------------------------------------------------------------------------------- /handlers/ping.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "net/http" 7 | "time" 8 | 9 | client "github.com/docker/docker/client" 10 | "github.com/play-with-docker/play-with-docker/config" 11 | "github.com/shirou/gopsutil/load" 12 | ) 13 | 14 | func Ping(rw http.ResponseWriter, req *http.Request) { 15 | defer latencyHistogramVec.WithLabelValues("ping").Observe(float64(time.Since(time.Now()).Nanoseconds()) / 1000000) 16 | // Get system load average of the last 5 minutes and compare it against a threashold. 17 | 18 | c, err := client.NewClientWithOpts() 19 | 20 | if err != nil { 21 | rw.WriteHeader(http.StatusInternalServerError) 22 | return 23 | } 24 | 25 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 26 | defer cancel() 27 | 28 | if _, err := c.Info(ctx); err != nil && err == context.DeadlineExceeded { 29 | log.Printf("Docker info took to long to respond\n") 30 | rw.WriteHeader(http.StatusGatewayTimeout) 31 | return 32 | } 33 | 34 | a, err := load.Avg() 35 | if err != nil { 36 | log.Println("Cannot get system load average!", err) 37 | } else { 38 | if a.Load5 > config.MaxLoadAvg { 39 | log.Printf("System load average is too high [%f]\n", a.Load5) 40 | rw.WriteHeader(http.StatusInsufficientStorage) 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/fullscreen/fullscreen.js.map: -------------------------------------------------------------------------------- 1 | {"version":3,"file":"fullscreen.js","sources":["../../../src/addons/fullscreen/fullscreen.ts","../../../node_modules/browser-pack/_prelude.js"],"sourcesContent":["/**\n * Copyright (c) 2014 The xterm.js authors. All rights reserved.\n * @license MIT\n */\n\nimport { Terminal } from 'xterm';\n\n/**\n * Toggle the given terminal's fullscreen mode.\n * @param term The terminal to toggle full screen mode\n * @param fullscreen Toggle fullscreen on (true) or off (false)\n */\nexport function toggleFullScreen(term: Terminal, fullscreen: boolean): void {\n let fn: (...tokens: string[]) => void;\n\n if (typeof fullscreen === 'undefined') {\n fn = (term.element.classList.contains('fullscreen')) ?\n term.element.classList.remove : term.element.classList.add;\n } else if (!fullscreen) {\n fn = term.element.classList.remove;\n } else {\n fn = term.element.classList.add;\n }\n\n fn = fn.bind(term.element.classList);\n fn('fullscreen');\n}\n\nexport function apply(terminalConstructor: typeof Terminal): void {\n (terminalConstructor.prototype).toggleFullScreen = function (fullscreen: boolean): void {\n toggleFullScreen(this, fullscreen);\n };\n}\n",null],"names":[],"mappings":"ACAA;;;ADYA;AACA;AAEA;AACA;AACA;AACA;AAAA;AACA;AACA;AAAA;AACA;AACA;AAEA;AACA;AACA;AAdA;AAgBA;AACA;AACA;AACA;AACA;AAJA;"} -------------------------------------------------------------------------------- /scheduler/task/check_ports_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/play-with-docker/play-with-docker/docker" 8 | "github.com/play-with-docker/play-with-docker/event" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestCheckPorts_Name(t *testing.T) { 14 | e := &event.Mock{} 15 | f := &docker.FactoryMock{} 16 | 17 | task := NewCheckPorts(e, f) 18 | 19 | assert.Equal(t, "CheckPorts", task.Name()) 20 | e.M.AssertExpectations(t) 21 | f.AssertExpectations(t) 22 | } 23 | 24 | func TestCheckPorts_Run(t *testing.T) { 25 | d := &docker.Mock{} 26 | e := &event.Mock{} 27 | f := &docker.FactoryMock{} 28 | 29 | i := &types.Instance{ 30 | IP: "10.0.0.1", 31 | Name: "aaaabbbb_node1", 32 | SessionId: "aaaabbbbcccc", 33 | } 34 | 35 | d.On("GetPorts").Return([]uint16{8080, 9090}, nil) 36 | f.On("GetForInstance", i).Return(d, nil) 37 | e.M.On("Emit", CheckPortsEvent, "aaaabbbbcccc", []interface{}{DockerPorts{Instance: "aaaabbbb_node1", Ports: []int{8080, 9090}}}).Return() 38 | 39 | task := NewCheckPorts(e, f) 40 | ctx := context.Background() 41 | 42 | err := task.Run(ctx, i) 43 | 44 | assert.Nil(t, err) 45 | d.AssertExpectations(t) 46 | e.M.AssertExpectations(t) 47 | f.AssertExpectations(t) 48 | } 49 | -------------------------------------------------------------------------------- /handlers/session_setup.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/play-with-docker/play-with-docker/pwd" 10 | ) 11 | 12 | func SessionSetup(rw http.ResponseWriter, req *http.Request) { 13 | vars := mux.Vars(req) 14 | sessionId := vars["sessionId"] 15 | 16 | body := pwd.SessionSetupConf{PlaygroundFQDN: req.Host, DindVolumeSize: "5G"} 17 | 18 | json.NewDecoder(req.Body).Decode(&body) 19 | 20 | s, err := core.SessionGet(sessionId) 21 | if err != nil { 22 | rw.WriteHeader(http.StatusInternalServerError) 23 | return 24 | } 25 | 26 | playground := core.PlaygroundGet(s.PlaygroundId) 27 | if playground == nil { 28 | log.Printf("Playground with id %s for session %s was not found!", s.PlaygroundId, s.Id) 29 | rw.WriteHeader(http.StatusBadRequest) 30 | return 31 | } 32 | 33 | if len(playground.DindVolumeSize) > 0 { 34 | body.DindVolumeSize = playground.DindVolumeSize 35 | } 36 | 37 | err = core.SessionSetup(s, body) 38 | if err != nil { 39 | if pwd.SessionNotEmpty(err) { 40 | log.Println("Cannot setup a session that contains instances") 41 | rw.WriteHeader(http.StatusConflict) 42 | rw.Write([]byte("Cannot setup a session that contains instances")) 43 | return 44 | } 45 | log.Println(err) 46 | rw.WriteHeader(http.StatusInternalServerError) 47 | return 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /dockerfiles/dind/Dockerfile.middle: -------------------------------------------------------------------------------- 1 | FROM freecompilercamp/pwc:base 2 | 3 | RUN set -eux; \ 4 | apt-get -y install \ 5 | btrfs-tools \ 6 | e2fsprogs \ 7 | iptables \ 8 | openssl \ 9 | uidmap \ 10 | xfsprogs \ 11 | xz-utils \ 12 | # pigz: https://github.com/moby/moby/pull/35697 (faster gzip implementation) 13 | pigz \ 14 | ; \ 15 | # only install zfs if it's available for the current architecture 16 | if zfs="$(apt-cache policy zfs | grep Candidate | grep none)" && [ -z "$zfs" ]; then \ 17 | apt-get -y install zfs; \ 18 | fi 19 | 20 | 21 | # TODO aufs-tools 22 | 23 | # set up subuid/subgid so that "--userns-remap=default" works out-of-the-box 24 | RUN set -x \ 25 | && addgroup --system dockremap \ 26 | && useradd --system -g dockremap dockremap \ 27 | && echo 'dockremap:165536:65536' >> /etc/subuid \ 28 | && echo 'dockremap:165536:65536' >> /etc/subgid 29 | 30 | # https://github.com/docker/docker/tree/master/hack/dind 31 | ENV DIND_COMMIT 37498f009d8bf25fbb6199e8ccd34bed84f2874b 32 | 33 | RUN set -eux; \ 34 | wget -O /usr/local/bin/dind "https://raw.githubusercontent.com/docker/docker/${DIND_COMMIT}/hack/dind"; \ 35 | chmod +x /usr/local/bin/dind 36 | 37 | COPY dockerd-entrypoint.sh /usr/local/bin/ 38 | 39 | VOLUME /var/lib/docker 40 | EXPOSE 2375 2376 41 | 42 | ENTRYPOINT ["dockerd-entrypoint.sh"] 43 | CMD [] 44 | -------------------------------------------------------------------------------- /dockerfiles/dind/Dockerfile.middle-gpu: -------------------------------------------------------------------------------- 1 | FROM freecompilercamp/pwc:base-gpu 2 | 3 | RUN set -eux; \ 4 | apt-get -y install \ 5 | btrfs-tools \ 6 | e2fsprogs \ 7 | iptables \ 8 | openssl \ 9 | uidmap \ 10 | xfsprogs \ 11 | xz-utils \ 12 | # pigz: https://github.com/moby/moby/pull/35697 (faster gzip implementation) 13 | pigz \ 14 | ; \ 15 | # only install zfs if it's available for the current architecture 16 | if zfs="$(apt-cache policy zfs | grep Candidate | grep none)" && [ -z "$zfs" ]; then \ 17 | apt-get -y install zfs; \ 18 | fi 19 | 20 | 21 | # TODO aufs-tools 22 | 23 | # set up subuid/subgid so that "--userns-remap=default" works out-of-the-box 24 | RUN set -x \ 25 | && addgroup --system dockremap \ 26 | && useradd --system -g dockremap dockremap \ 27 | && echo 'dockremap:165536:65536' >> /etc/subuid \ 28 | && echo 'dockremap:165536:65536' >> /etc/subgid 29 | 30 | # https://github.com/docker/docker/tree/master/hack/dind 31 | ENV DIND_COMMIT 37498f009d8bf25fbb6199e8ccd34bed84f2874b 32 | 33 | RUN set -eux; \ 34 | wget -O /usr/local/bin/dind "https://raw.githubusercontent.com/docker/docker/${DIND_COMMIT}/hack/dind"; \ 35 | chmod +x /usr/local/bin/dind 36 | 37 | COPY dockerd-entrypoint.sh /usr/local/bin/ 38 | 39 | VOLUME /var/lib/docker 40 | EXPOSE 2375 2376 41 | 42 | ENTRYPOINT ["dockerd-entrypoint.sh"] 43 | CMD [] 44 | -------------------------------------------------------------------------------- /scheduler/task/check_ports.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/play-with-docker/play-with-docker/docker" 8 | "github.com/play-with-docker/play-with-docker/event" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | ) 11 | 12 | type DockerPorts struct { 13 | Instance string `json:"instance"` 14 | Ports []int `json:"ports"` 15 | } 16 | 17 | type checkPorts struct { 18 | event event.EventApi 19 | factory docker.FactoryApi 20 | } 21 | 22 | var CheckPortsEvent event.EventType 23 | 24 | func init() { 25 | CheckPortsEvent = event.EventType("instance docker ports") 26 | } 27 | 28 | func (t *checkPorts) Name() string { 29 | return "CheckPorts" 30 | } 31 | 32 | func (t *checkPorts) Run(ctx context.Context, instance *types.Instance) error { 33 | dockerClient, err := t.factory.GetForInstance(instance) 34 | if err != nil { 35 | log.Println(err) 36 | return err 37 | } 38 | 39 | ps, err := dockerClient.GetPorts() 40 | if err != nil { 41 | log.Println(err) 42 | return err 43 | } 44 | ports := make([]int, len(ps)) 45 | for i, port := range ps { 46 | ports[i] = int(port) 47 | } 48 | 49 | t.event.Emit(CheckPortsEvent, instance.SessionId, DockerPorts{Instance: instance.Name, Ports: ports}) 50 | return nil 51 | } 52 | 53 | func NewCheckPorts(e event.EventApi, f docker.FactoryApi) *checkPorts { 54 | return &checkPorts{event: e, factory: f} 55 | } 56 | -------------------------------------------------------------------------------- /event/local_broker_test.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestLocalBroker_On(t *testing.T) { 11 | broker := NewLocalBroker() 12 | 13 | called := 0 14 | receivedSessionId := "" 15 | receivedArgs := []interface{}{} 16 | 17 | wg := sync.WaitGroup{} 18 | wg.Add(1) 19 | 20 | broker.On(INSTANCE_NEW, func(sessionId string, args ...interface{}) { 21 | called++ 22 | receivedSessionId = sessionId 23 | receivedArgs = args 24 | wg.Done() 25 | }) 26 | broker.Emit(SESSION_READY, "1") 27 | broker.Emit(INSTANCE_NEW, "2", "foo", "bar") 28 | 29 | wg.Wait() 30 | 31 | assert.Equal(t, 1, called) 32 | assert.Equal(t, "2", receivedSessionId) 33 | assert.Equal(t, []interface{}{"foo", "bar"}, receivedArgs) 34 | } 35 | 36 | func TestLocalBroker_OnAny(t *testing.T) { 37 | broker := NewLocalBroker() 38 | 39 | var receivedEvent EventType 40 | receivedSessionId := "" 41 | receivedArgs := []interface{}{} 42 | 43 | wg := sync.WaitGroup{} 44 | wg.Add(1) 45 | 46 | broker.OnAny(func(eventType EventType, sessionId string, args ...interface{}) { 47 | receivedSessionId = sessionId 48 | receivedArgs = args 49 | receivedEvent = eventType 50 | wg.Done() 51 | }) 52 | broker.Emit(SESSION_READY, "1") 53 | 54 | wg.Wait() 55 | 56 | var expectedArgs []interface{} 57 | assert.Equal(t, SESSION_READY, receivedEvent) 58 | assert.Equal(t, "1", receivedSessionId) 59 | assert.Equal(t, expectedArgs, receivedArgs) 60 | } 61 | -------------------------------------------------------------------------------- /provisioner/provisioner.go: -------------------------------------------------------------------------------- 1 | package provisioner 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "net" 8 | 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | ) 11 | 12 | var OutOfCapacityError = errors.New("OutOfCapacity") 13 | 14 | func OutOfCapacity(e error) bool { 15 | return e == OutOfCapacityError 16 | } 17 | 18 | type InstanceProvisionerApi interface { 19 | InstanceNew(session *types.Session, conf types.InstanceConfig) (*types.Instance, error) 20 | InstanceDelete(session *types.Session, instance *types.Instance) error 21 | InstanceExec(instance *types.Instance, cmd []string) (int, error) 22 | InstanceExecOutput(instance *types.Instance, cmd []string) (io.Reader, error) 23 | InstanceFSTree(instance *types.Instance) (io.Reader, error) 24 | InstanceFile(instance *types.Instance, filePath string) (io.Reader, error) 25 | 26 | InstanceResizeTerminal(instance *types.Instance, cols, rows uint) error 27 | InstanceGetTerminal(instance *types.Instance) (net.Conn, error) 28 | 29 | InstanceUploadFromUrl(instance *types.Instance, fileName, dest, url string) error 30 | InstanceUploadFromReader(instance *types.Instance, fileName, dest string, reader io.Reader) error 31 | } 32 | 33 | type SessionProvisionerApi interface { 34 | SessionNew(ctx context.Context, session *types.Session) error 35 | SessionClose(session *types.Session) error 36 | } 37 | 38 | type InstanceProvisionerFactoryApi interface { 39 | GetProvisioner(instanceType string) (InstanceProvisionerApi, error) 40 | } 41 | -------------------------------------------------------------------------------- /scheduler/task/check_k8s_cluster_status_task.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/play-with-docker/play-with-docker/event" 8 | "github.com/play-with-docker/play-with-docker/k8s" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | ) 11 | 12 | type checkK8sClusterStatusTask struct { 13 | event event.EventApi 14 | factory k8s.FactoryApi 15 | } 16 | 17 | var CheckK8sStatusEvent event.EventType 18 | 19 | func init() { 20 | CheckK8sStatusEvent = event.EventType("instance k8s status") 21 | } 22 | 23 | func NewCheckK8sClusterStatus(e event.EventApi, f k8s.FactoryApi) *checkK8sClusterStatusTask { 24 | return &checkK8sClusterStatusTask{event: e, factory: f} 25 | } 26 | 27 | func (c *checkK8sClusterStatusTask) Name() string { 28 | return "CheckK8sClusterStatus" 29 | } 30 | 31 | func (c checkK8sClusterStatusTask) Run(ctx context.Context, i *types.Instance) error { 32 | status := ClusterStatus{Instance: i.Name} 33 | 34 | kc, err := c.factory.GetKubeletForInstance(i) 35 | if err != nil { 36 | log.Println(err) 37 | c.event.Emit(CheckSwarmStatusEvent, i.SessionId, status) 38 | return err 39 | } 40 | 41 | if isManager, err := kc.IsManager(); err != nil { 42 | c.event.Emit(CheckSwarmStatusEvent, i.SessionId, status) 43 | return err 44 | } else if !isManager { 45 | // Not a manager node, nothing to do for this task 46 | status.IsWorker = true 47 | } else { 48 | status.IsManager = true 49 | } 50 | 51 | c.event.Emit(CheckK8sStatusEvent, i.SessionId, status) 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /handlers/home.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/gorilla/mux" 10 | "github.com/play-with-docker/play-with-docker/storage" 11 | ) 12 | 13 | func Home(w http.ResponseWriter, r *http.Request) { 14 | vars := mux.Vars(r) 15 | sessionId := vars["sessionId"] 16 | 17 | s, err := core.SessionGet(sessionId) 18 | if err == storage.NotFoundError { 19 | // Session doesn't exist (can happen if closing the sessions an reloading the page, or similar). 20 | w.WriteHeader(http.StatusNotFound) 21 | return 22 | } else if err != nil { 23 | w.WriteHeader(http.StatusInternalServerError) 24 | return 25 | } 26 | if s.Stack != "" { 27 | go core.SessionDeployStack(s) 28 | } 29 | 30 | playground := core.PlaygroundGet(s.PlaygroundId) 31 | if playground == nil { 32 | log.Printf("Playground with id %s for session %s was not found!", s.PlaygroundId, s.Id) 33 | w.WriteHeader(http.StatusBadRequest) 34 | return 35 | } 36 | 37 | index := filepath.Join("./www", playground.AssetsDir, "/index.html") 38 | if _, err := os.Stat(index); os.IsNotExist(err) { 39 | index = "./www/default/index.html" 40 | } 41 | 42 | http.ServeFile(w, r, index) 43 | } 44 | 45 | func Landing(rw http.ResponseWriter, req *http.Request) { 46 | playground := core.PlaygroundFindByDomain(req.Host) 47 | if playground == nil { 48 | log.Printf("Playground for domain %s was not found!", req.Host) 49 | rw.WriteHeader(http.StatusNotFound) 50 | return 51 | } 52 | 53 | rw.Write(landings[playground.Id]) 54 | 55 | } 56 | -------------------------------------------------------------------------------- /scheduler/task/check_swarm_ports.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/play-with-docker/play-with-docker/docker" 8 | "github.com/play-with-docker/play-with-docker/event" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | ) 11 | 12 | type checkSwarmPorts struct { 13 | event event.EventApi 14 | factory docker.FactoryApi 15 | } 16 | 17 | var CheckSwarmPortsEvent event.EventType 18 | 19 | func init() { 20 | CheckSwarmPortsEvent = event.EventType("instance docker swarm ports") 21 | } 22 | 23 | func (t *checkSwarmPorts) Name() string { 24 | return "CheckSwarmPorts" 25 | } 26 | 27 | func (t *checkSwarmPorts) Run(ctx context.Context, instance *types.Instance) error { 28 | dockerClient, err := t.factory.GetForInstance(instance) 29 | if err != nil { 30 | log.Println(err) 31 | return err 32 | } 33 | 34 | status, err := getDockerSwarmStatus(ctx, dockerClient) 35 | if err != nil { 36 | log.Println(err) 37 | return err 38 | } 39 | 40 | if !status.IsManager { 41 | return nil 42 | } 43 | 44 | hosts, ps, err := dockerClient.GetSwarmPorts() 45 | if err != nil { 46 | log.Println(err) 47 | return err 48 | } 49 | ports := make([]int, len(ps)) 50 | for i, port := range ps { 51 | ports[i] = int(port) 52 | } 53 | 54 | t.event.Emit(CheckSwarmPortsEvent, instance.SessionId, ClusterPorts{Manager: instance.Name, Instances: hosts, Ports: ports}) 55 | return nil 56 | } 57 | 58 | func NewCheckSwarmPorts(e event.EventApi, f docker.FactoryApi) *checkSwarmPorts { 59 | return &checkSwarmPorts{event: e, factory: f} 60 | } 61 | -------------------------------------------------------------------------------- /pwd/user.go: -------------------------------------------------------------------------------- 1 | package pwd 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/play-with-docker/play-with-docker/pwd/types" 7 | "github.com/play-with-docker/play-with-docker/storage" 8 | ) 9 | 10 | var userBannedError = errors.New("User is banned") 11 | 12 | func (p *pwd) UserNewLoginRequest(providerName string) (*types.LoginRequest, error) { 13 | req := &types.LoginRequest{Id: p.generator.NewId(), Provider: providerName} 14 | if err := p.storage.LoginRequestPut(req); err != nil { 15 | return nil, err 16 | } 17 | return req, nil 18 | } 19 | 20 | func (p *pwd) UserGetLoginRequest(id string) (*types.LoginRequest, error) { 21 | if req, err := p.storage.LoginRequestGet(id); err != nil { 22 | return nil, err 23 | } else { 24 | return req, nil 25 | } 26 | } 27 | 28 | func (p *pwd) UserLogin(loginRequest *types.LoginRequest, user *types.User) (*types.User, error) { 29 | if err := p.storage.LoginRequestDelete(loginRequest.Id); err != nil { 30 | return nil, err 31 | } 32 | u, err := p.storage.UserFindByProvider(user.Provider, user.ProviderUserId) 33 | 34 | if err != nil { 35 | if storage.NotFound(err) { 36 | user.Id = p.generator.NewId() 37 | if err := p.storage.UserPut(user); err != nil { 38 | return nil, err 39 | } 40 | return user, nil 41 | } 42 | return nil, err 43 | } 44 | return u, nil 45 | } 46 | func (p *pwd) UserGet(id string) (*types.User, error) { 47 | var user *types.User 48 | if user, err := p.storage.UserGet(id); err != nil { 49 | return nil, err 50 | } else if user.IsBanned { 51 | return user, userBannedError 52 | } 53 | return user, nil 54 | } 55 | -------------------------------------------------------------------------------- /storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/play-with-docker/play-with-docker/pwd/types" 7 | ) 8 | 9 | var NotFoundError = errors.New("NotFound") 10 | 11 | func NotFound(e error) bool { 12 | return e == NotFoundError 13 | } 14 | 15 | type StorageApi interface { 16 | SessionGet(id string) (*types.Session, error) 17 | SessionGetAll() ([]*types.Session, error) 18 | SessionPut(session *types.Session) error 19 | SessionDelete(id string) error 20 | SessionCount() (int, error) 21 | 22 | InstanceGet(name string) (*types.Instance, error) 23 | InstancePut(instance *types.Instance) error 24 | InstanceDelete(name string) error 25 | InstanceCount() (int, error) 26 | InstanceFindBySessionId(sessionId string) ([]*types.Instance, error) 27 | 28 | WindowsInstanceGetAll() ([]*types.WindowsInstance, error) 29 | WindowsInstancePut(instance *types.WindowsInstance) error 30 | WindowsInstanceDelete(id string) error 31 | 32 | ClientGet(id string) (*types.Client, error) 33 | ClientPut(client *types.Client) error 34 | ClientDelete(id string) error 35 | ClientCount() (int, error) 36 | ClientFindBySessionId(sessionId string) ([]*types.Client, error) 37 | 38 | LoginRequestPut(loginRequest *types.LoginRequest) error 39 | LoginRequestGet(id string) (*types.LoginRequest, error) 40 | LoginRequestDelete(id string) error 41 | 42 | UserFindByProvider(providerName, providerUserId string) (*types.User, error) 43 | UserPut(user *types.User) error 44 | UserGet(id string) (*types.User, error) 45 | 46 | PlaygroundPut(playground *types.Playground) error 47 | PlaygroundGet(id string) (*types.Playground, error) 48 | PlaygroundGetAll() ([]*types.Playground, error) 49 | } 50 | -------------------------------------------------------------------------------- /pwd/types/instance.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "context" 4 | 5 | type Instance struct { 6 | Name string `json:"name" bson:"name"` 7 | Image string `json:"image" bson:"image"` 8 | Hostname string `json:"hostname" bson:"hostname"` 9 | IP string `json:"ip" bson:"ip"` 10 | RoutableIP string `json:"routable_ip" bson:"routable_id"` 11 | ServerCert []byte `json:"server_cert" bson:"server_cert"` 12 | ServerKey []byte `json:"server_key" bson:"server_key"` 13 | CACert []byte `json:"ca_cert" bson:"ca_cert"` 14 | Cert []byte `json:"cert" bson:"cert"` 15 | Key []byte `json:"key" bson:"key"` 16 | Tls bool `json:"tls" bson:"tls"` 17 | SessionId string `json:"session_id" bson:"session_id"` 18 | ProxyHost string `json:"proxy_host" bson:"proxy_host"` 19 | SessionHost string `json:"session_host" bson:"session_host"` 20 | Type string `json:"type" bson:"type"` 21 | WindowsId string `json:"-" bson:"windows_id"` 22 | ctx context.Context `json:"-" bson:"-"` 23 | } 24 | 25 | type WindowsInstance struct { 26 | Id string `bson:"id"` 27 | SessionId string `bson:"session_id"` 28 | } 29 | 30 | type InstanceConfig struct { 31 | ImageName string 32 | Hostname string 33 | ServerCert []byte 34 | ServerKey []byte 35 | CACert []byte 36 | Cert []byte 37 | Key []byte 38 | Tls bool 39 | PlaygroundFQDN string 40 | Type string 41 | DindVolumeSize string 42 | Envs []string 43 | Networks []string 44 | } 45 | -------------------------------------------------------------------------------- /dockerfiles/k8s/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | COPY ./systemctl /usr/bin/systemctl 4 | COPY ./kubernetes.repo /etc/yum.repos.d/ 5 | 6 | 7 | 8 | RUN yum install -y kubectl-1.18.4 kubeadm-1.18.4 kubelet-1.18.4 \ 9 | #&& mv -f /etc/systemd/system/kubelet.service.d/10-kubeadm.conf /etc/systemd/system/kubelet.service \ 10 | && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo \ 11 | && yum install -y docker-ce git bash-completion \ 12 | && sed -i -e '4d;5d;8d' /lib/systemd/system/docker.service \ 13 | && yum clean all 14 | 15 | RUN curl -Lf -o /usr/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 \ 16 | && curl -Lf -o /usr/bin/docker-compose https://github.com/docker/compose/releases/download/1.21.0/docker-compose-$(uname -s)-$(uname -m) \ 17 | && chmod +x /usr/bin/jq /usr/bin/docker-compose 18 | 19 | 20 | VOLUME ["/var/lib/kubelet"] 21 | 22 | COPY ./kube* /etc/systemd/system/ 23 | COPY ./wrapkubeadm.sh /usr/local/bin/kubeadm 24 | COPY ./tokens.csv /etc/pki/tokens.csv 25 | COPY ./daemon.json /etc/docker/ 26 | COPY ./resolv.conf.override /etc/ 27 | COPY ./docker.service /usr/lib/systemd/system/ 28 | 29 | COPY motd /etc/motd 30 | 31 | RUN echo $'cat /etc/motd \n\ 32 | export PS1="[\h \W]$ "' >> /root/.bash_profile 33 | 34 | RUN echo 'source <(kubectl completion bash)' >>~/.bashrc \ 35 | && kubectl completion bash >> /etc/bash_completion.d/kubectl 36 | 37 | RUN mkdir -p /root/.kube && ln -s /etc/kubernetes/admin.conf /root/.kube/config \ 38 | && rm -f /etc/machine-id 39 | 40 | WORKDIR /root 41 | 42 | CMD mount --make-shared / && systemctl start docker && systemctl start kubelet \ 43 | && while true; do bash -l; done 44 | -------------------------------------------------------------------------------- /.github/workflows/docker-base-middle.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image Base-Middle 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | schedule: 7 | - cron: '0 0 * * 0' 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Build the base image 19 | run: | 20 | cd $GITHUB_WORKSPACE/dockerfiles/dind 21 | docker build . --file Dockerfile.base --tag fcc_base 22 | echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u ${{ secrets.DOCKERHUB_ID }} --password-stdin 23 | docker tag fcc_base freecompilercamp/pwc:base 24 | docker push freecompilercamp/pwc:base 25 | 26 | - name: Build the middle image 27 | run: | 28 | cd $GITHUB_WORKSPACE/dockerfiles/dind 29 | docker build . --file Dockerfile.middle --tag fcc_middle 30 | docker tag fcc_middle freecompilercamp/pwc:middle 31 | docker push freecompilercamp/pwc:middle 32 | 33 | - name: Build the base image with GPU support 34 | run: | 35 | cd $GITHUB_WORKSPACE/dockerfiles/dind 36 | docker build . --file Dockerfile.base-gpu --tag fcc_base_gpu 37 | echo ${{ secrets.DOCKERHUB_TOKEN }} | docker login -u ${{ secrets.DOCKERHUB_ID }} --password-stdin 38 | docker tag fcc_base_gpu freecompilercamp/pwc:base-gpu 39 | docker push freecompilercamp/pwc:base-gpu 40 | 41 | - name: Build the middle image with GPU support 42 | run: | 43 | cd $GITHUB_WORKSPACE/dockerfiles/dind 44 | docker build . --file Dockerfile.middle-gpu --tag fcc_middle_gpu 45 | docker tag fcc_middle_gpu freecompilercamp/pwc:middle-gpu 46 | docker push freecompilercamp/pwc:middle-gpu 47 | -------------------------------------------------------------------------------- /router/host.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | const hostPattern = "^.*ip([0-9]{1,3}-[0-9]{1,3}-[0-9]{1,3}-[0-9]{1,3})-([0-9|a-z]+)(?:-?([0-9]{1,5}))?(?:\\.([a-z|A-Z|0-9|_|\\-\\.]+))?(?:\\:([0-9]{1,5}))?$" 11 | 12 | var hostRegex *regexp.Regexp 13 | 14 | func init() { 15 | hostRegex = regexp.MustCompile(hostPattern) 16 | } 17 | 18 | type HostOpts struct { 19 | TLD string 20 | EncodedPort int 21 | Port int 22 | } 23 | 24 | type HostInfo struct { 25 | SessionId string 26 | InstanceIP string 27 | TLD string 28 | EncodedPort int 29 | Port int 30 | } 31 | 32 | func EncodeHost(sessionId, instanceIP string, opts HostOpts) string { 33 | encodedIP := strings.Replace(instanceIP, ".", "-", -1) 34 | 35 | sub := fmt.Sprintf("ip%s-%s", encodedIP, sessionId) 36 | if opts.EncodedPort > 0 { 37 | sub = fmt.Sprintf("%s-%d", sub, opts.EncodedPort) 38 | } 39 | if opts.TLD != "" { 40 | sub = fmt.Sprintf("%s.%s", sub, opts.TLD) 41 | } 42 | if opts.Port > 0 { 43 | sub = fmt.Sprintf("%s:%d", sub, opts.Port) 44 | } 45 | 46 | return sub 47 | } 48 | 49 | func DecodeHost(host string) (HostInfo, error) { 50 | info := HostInfo{} 51 | 52 | matches := hostRegex.FindStringSubmatch(host) 53 | if len(matches) != 6 { 54 | return HostInfo{}, fmt.Errorf("Couldn't find host in string") 55 | } 56 | 57 | info.InstanceIP = strings.Replace(matches[1], "-", ".", -1) 58 | info.SessionId = matches[2] 59 | info.TLD = matches[4] 60 | 61 | if matches[3] != "" { 62 | i, _ := strconv.Atoi(matches[3]) 63 | info.EncodedPort = i 64 | } 65 | if matches[5] != "" { 66 | i, _ := strconv.Atoi(matches[5]) 67 | info.Port = i 68 | } 69 | 70 | return info, nil 71 | } 72 | -------------------------------------------------------------------------------- /router/host_test.go: -------------------------------------------------------------------------------- 1 | package router 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestEncodeHostInfo(t *testing.T) { 10 | host := EncodeHost("aaabbbcccddd", "10.0.0.1", HostOpts{}) 11 | assert.Equal(t, "ip10-0-0-1-aaabbbcccddd", host) 12 | 13 | opts := HostOpts{EncodedPort: 8080} 14 | host = EncodeHost("aaabbbcccddd", "10.0.0.1", opts) 15 | assert.Equal(t, "ip10-0-0-1-aaabbbcccddd-8080", host) 16 | 17 | opts = HostOpts{TLD: "foo.bar"} 18 | host = EncodeHost("aaabbbcccddd", "10.0.0.1", opts) 19 | assert.Equal(t, "ip10-0-0-1-aaabbbcccddd.foo.bar", host) 20 | 21 | opts = HostOpts{TLD: "foo.bar", EncodedPort: 8080, Port: 443} 22 | host = EncodeHost("aaabbbcccddd", "10.0.0.1", opts) 23 | assert.Equal(t, "ip10-0-0-1-aaabbbcccddd-8080.foo.bar:443", host) 24 | } 25 | 26 | func TestDecodeHostInfo(t *testing.T) { 27 | info, err := DecodeHost("ip10-0-0-1-aaabbbcccddd") 28 | assert.Nil(t, err) 29 | assert.Equal(t, HostInfo{InstanceIP: "10.0.0.1", SessionId: "aaabbbcccddd"}, info) 30 | 31 | info, err = DecodeHost("ip10-0-0-1-aaabbbcccddd-8080") 32 | assert.Nil(t, err) 33 | assert.Equal(t, HostInfo{InstanceIP: "10.0.0.1", SessionId: "aaabbbcccddd", EncodedPort: 8080}, info) 34 | 35 | info, err = DecodeHost("ip10-0-0-1-aaabbbcccddd-8080.foo.bar") 36 | assert.Nil(t, err) 37 | assert.Equal(t, HostInfo{InstanceIP: "10.0.0.1", SessionId: "aaabbbcccddd", EncodedPort: 8080, TLD: "foo.bar"}, info) 38 | 39 | info, err = DecodeHost("ip10-0-0-1-aaabbbcccddd-8080.foo.bar:443") 40 | assert.Nil(t, err) 41 | assert.Equal(t, HostInfo{InstanceIP: "10.0.0.1", SessionId: "aaabbbcccddd", EncodedPort: 8080, TLD: "foo.bar", Port: 443}, info) 42 | 43 | _, err = DecodeHost("ip10-0-0-1") 44 | assert.NotNil(t, err) 45 | } 46 | -------------------------------------------------------------------------------- /scheduler/scheduler_test.go: -------------------------------------------------------------------------------- 1 | package scheduler 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/play-with-docker/play-with-docker/event" 8 | "github.com/play-with-docker/play-with-docker/pwd" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | "github.com/play-with-docker/play-with-docker/storage" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | type fakeTask struct { 15 | name string 16 | } 17 | 18 | func (f fakeTask) Name() string { 19 | return f.name 20 | } 21 | func (f fakeTask) Run(ctx context.Context, instance *types.Instance) error { 22 | return nil 23 | } 24 | 25 | func TestScheduler_getMatchedTasks(t *testing.T) { 26 | tasks := []Task{ 27 | fakeTask{name: "docker_task1"}, 28 | fakeTask{name: "docker_task2"}, 29 | fakeTask{name: "k8s_task1"}, 30 | fakeTask{name: "k8s_task2"}, 31 | } 32 | 33 | _s := &storage.Mock{} 34 | _e := &event.Mock{} 35 | _p := &pwd.Mock{} 36 | 37 | s, err := NewScheduler(tasks, _s, _e, _p) 38 | assert.Nil(t, err) 39 | 40 | // No matches 41 | matched := s.getMatchedTasks(&types.Playground{Tasks: []string{}}) 42 | assert.Empty(t, matched) 43 | 44 | // Match everything 45 | matched = s.getMatchedTasks(&types.Playground{Tasks: []string{".*"}}) 46 | assert.Subset(t, tasks, matched) 47 | assert.Len(t, matched, len(tasks)) 48 | 49 | // Match some 50 | matched = s.getMatchedTasks(&types.Playground{Tasks: []string{"docker_.*"}}) 51 | assert.Subset(t, []Task{fakeTask{name: "docker_task1"}, fakeTask{name: "docker_task2"}}, matched) 52 | assert.Len(t, matched, 2) 53 | 54 | // Match exactly 55 | matched = s.getMatchedTasks(&types.Playground{Tasks: []string{"docker_task1", "docker_task3"}}) 56 | assert.Subset(t, []Task{fakeTask{name: "docker_task1"}}, matched) 57 | assert.Len(t, matched, 1) 58 | } 59 | -------------------------------------------------------------------------------- /scheduler/task/check_swarm_ports_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | dockerTypes "github.com/docker/docker/api/types" 8 | "github.com/docker/docker/api/types/swarm" 9 | "github.com/play-with-docker/play-with-docker/docker" 10 | "github.com/play-with-docker/play-with-docker/event" 11 | "github.com/play-with-docker/play-with-docker/pwd/types" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestCheckSwarmPorts_Name(t *testing.T) { 16 | e := &event.Mock{} 17 | f := &docker.FactoryMock{} 18 | 19 | task := NewCheckSwarmPorts(e, f) 20 | 21 | assert.Equal(t, "CheckSwarmPorts", task.Name()) 22 | e.M.AssertExpectations(t) 23 | f.AssertExpectations(t) 24 | } 25 | 26 | func TestCheckSwarmPorts_RunWhenManager(t *testing.T) { 27 | d := &docker.Mock{} 28 | e := &event.Mock{} 29 | f := &docker.FactoryMock{} 30 | 31 | i := &types.Instance{ 32 | IP: "10.0.0.1", 33 | Name: "aaaabbbb_node1", 34 | SessionId: "aaaabbbbcccc", 35 | Hostname: "node1", 36 | } 37 | info := dockerTypes.Info{ 38 | Swarm: swarm.Info{ 39 | LocalNodeState: swarm.LocalNodeStateActive, 40 | ControlAvailable: true, 41 | }, 42 | } 43 | 44 | f.On("GetForInstance", i).Return(d, nil) 45 | d.On("DaemonInfo").Return(info, nil) 46 | d.On("GetSwarmPorts").Return([]string{"aaaabbbb_node1", "aaaabbbb_node2"}, []uint16{8080, 9090}, nil) 47 | e.M.On("Emit", CheckSwarmPortsEvent, "aaaabbbbcccc", []interface{}{ClusterPorts{Manager: i.Name, Instances: []string{i.Name, "aaaabbbb_node2"}, Ports: []int{8080, 9090}}}).Return() 48 | 49 | task := NewCheckSwarmPorts(e, f) 50 | ctx := context.Background() 51 | 52 | err := task.Run(ctx, i) 53 | 54 | assert.Nil(t, err) 55 | d.AssertExpectations(t) 56 | e.M.AssertExpectations(t) 57 | f.AssertExpectations(t) 58 | } 59 | -------------------------------------------------------------------------------- /dockerfiles/dind/ee/ucp-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDo9AKL1GqvsprU 3 | pdIYJ8FBXOA/LH+GH65JPV3Ps8PDZ6mCFjJqo0HJpARUo1He5s+86gBLOF0nV1l3 4 | JTMpShT+ObyGBUrscd1AOkURfrRrP6vhejfZHpaKsbn4Zd2FTczTJHNu7TNnHt5V 5 | miCc/8dxckOG5/XqGIcqYKvoxhdXglj3EszETFk5MHh+kYAuiLK+fY4RkCFESGJJ 6 | f7qfjHsomBryBif/R/z2ju6nUNhBLB6/6BCBGNMaWjXGuC47O16RzfJ29uEa/tov 7 | tSP/Oo9xW9ecN+eKhBXuQQNzPyC6kXJXuBkqiVHUbwhAl2ZNpgeC7MVcXlDeUpVb 8 | HX38JJnzAgMBAAECggEAVqm4bMa4bea3HRcXYu8fQS7JKhdm1cHhd9PBm6yXzpE5 9 | CXEyjmNv7RD8n3Qm2BLsA67WLyWn2iPv35hSQTETQETAcudzKSVvFx7WZRzLB/8m 10 | 9XofXsG3ZZ+avONAlwALjB1KaGEMN3fPZO8y5NVvIDBPGNggr1cyqbxPGAjh1Cav 11 | Laqki0rdPfr3FhxTyPBdmBFDcaMLc77Yl/7rmQJRYWb1qe+g4SEG4xXmEYpcpSUz 12 | zDJZAkY5XAO5cHU5EoKgKJedVBNxqAaRtaisO9yv+CKMqD83hAWhXqeK1bSphghs 13 | 2qIkzNe134ZNUBbmK2FDsAbiPMHNcMKuI4ljfb78iQKBgQD5oZ/uzaYTt6ZQQzKq 14 | rQFA2DxSlBt4Ewae5n6JYzw0hIjRf7LvitZF9zKXcMkHP2QcL+5RiibyJ6ohGypa 15 | jpDP+m5e0B5tS6gEgFzBnrXWbjnrDxUR5Qj0lKg3uuOXw8OdwNxn+MulKkIfGyTW 16 | pCu7G1nh/kltwvN87s4cJycwnwKBgQDu5XUyIcok8nxcBwtxu3zFdtdNn+P4Yq1a 17 | W2sUEUEJUDwcUZqksPIxQhG/SMEEtBqii+EJj3nAlaWItBgTE37mzKGyKv16ZiM1 18 | hr+Rlv5AURxER+Eo4JLFqULZKwMaDlXDrFdV2ulF+6SXWOqKrp4/6sPYxtxHmKfs 19 | oBnXq/4yLQKBgCQFl5+NG2cC/EPevoP0fRbPXT0JVEFqdW0ek6ndoQVvDpM0myyH 20 | 202zUyCZTNj348lRfVFU3zPYV2t5kQ4KPolUePLDk3BwF2m24CusbE7qDv+FaKPx 21 | ae5pOTD5jfgLbsHn36Y9N5240FvOve0fOZRBaSH8YLovBJXFnAZh+/y/AoGALZzQ 22 | CJddAjruNZ/+tmNmykkLiL2riERG9waXZkh5E28nWvzVuvYx9+e2fcBFYkGFCF4O 23 | xIWJaJTp+zTvl8zUIPsXMG524UTZGiI1N3YN63fRHtRekDB4tZbAtbg5qmLsSyT/ 24 | s9vNSFhor6EBfyMiAfAwHpaxflYOUearqHslWK0CgYEAzi/B0azCOaDqzpp6RhAL 25 | rhTRFfu2HR8wN8EJLOSbBbUnlSSJHdnHJBwyyXe3shD/rETLV8dHx+6/k47e1l2d 26 | MUlsad/dOKQyL2pY7UodBzPJkIkmwknDnKzioGety8Tb98oUSTQ8oHfHMuRBOie9 27 | mq1MSTuZyZtsdSXnFhH3qNc= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/fullscreen/fullscreen.js: -------------------------------------------------------------------------------- 1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.fullscreen = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i hr { 41 | margin: 2rem 0; 42 | } 43 | 44 | /* Main marketing message and sign up button */ 45 | .jumbotron { 46 | text-align: center; 47 | border-bottom: .05rem solid #e5e5e5; 48 | } 49 | .jumbotron .btn { 50 | padding: .75rem 1.5rem; 51 | font-size: 1.5rem; 52 | } 53 | .btn.dropdown-toggle, .dropdown-menu a { 54 | cursor: pointer; 55 | } 56 | 57 | /* Supporting marketing content */ 58 | .marketing { 59 | margin: 3rem 0; 60 | } 61 | .marketing p + h4 { 62 | margin-top: 1.5rem; 63 | } 64 | 65 | /* Responsive: Portrait tablets and up */ 66 | @media screen and (min-width: 48em) { 67 | /* Remove the padding we set earlier */ 68 | .header, 69 | .marketing, 70 | .footer { 71 | padding-right: 0; 72 | padding-left: 0; 73 | } 74 | /* Space out the masthead */ 75 | .header { 76 | margin-bottom: 2rem; 77 | } 78 | /* Remove the bottom border on the jumbotron for visual effect */ 79 | .jumbotron { 80 | border-bottom: 0; 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /dockerfiles/dind/ee/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE9jCCAt6gAwIBAgIQSCiXatddwed3bL9M9bierjANBgkqhkiG9w0BAQsFADAO 3 | MQwwCgYDVQQKEwNVQ1AwHhcNMTcwOTE1MjAzMzAwWhcNMjAwODMwMjAzMzAwWjAO 4 | MQwwCgYDVQQKEwNVQ1AwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCq 5 | prmPRweArtZQ6HHDeYCSC3WxQOy6hakc3VZa6JEldbEoVjOc7MqZNPvTIp8b/W8H 6 | O10ibEGZ03vyeq10UsiFQiYmdhn1SqEilZnFSo892PSpGaN7VO325uUnIccJqc3O 7 | 0YOdvNCdp9roZZ/K7z9nuC37cLy6+Lq2oLr1WYAxncJHedUi3LQCC+2qEBIVL+md 8 | 9yE8amFrYbDhbNqmIcAJ2KmkqBPa0Pa+Qe1FxqQI5zJOT5rOJgF3JbWeqFpm0Zjx 9 | CPTt0cPY4lyQ2U9lyMXJmS4+R0wekkZXywaU1mJi3JJIlMSBMWmWoTrx5mLVWOLv 10 | u44hYerfOmN+ImXRWq4NAPLi4722/OLzCmFn81fdUHOFyxg2Tr23b6I6sMyUfLJ0 11 | lqS+thJ7N/tcQe3nTeQm9dcruDbJpjJQrQkjq9CFFsxNEXBT6EFMRp8oDutOAyHf 12 | guVeqdH5kz6vprNiLfSTqqZSEeQokRkHTyxpZ4grBBCiocsAxm8yLNqhcg3w44CN 13 | 9G/3pylgu7xYSEXHYnnlxsk0MHxDFZ4NTo0UBuyIuozoePIS63GvsyBsBzKzO/RZ 14 | NsnPm3klZ4QnT3dIe0eRtCyu/prRmEMD/zC20fRcAuiG7jyV9NB/9mbLeDjAAngW 15 | 1UhrWpAMiObQZN4h5+ofc0EXFHVvOWaqBmYXlNlEeQIDAQABo1AwTjAOBgNVHQ8B 16 | Af8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMAwGA1UdEwEB 17 | /wQCMAAwDwYDVR0RBAgwBocEfwAAATANBgkqhkiG9w0BAQsFAAOCAgEAFT1PFimD 18 | KGg8fVjuUO9IXf12LA7c+M6Eltyz22Ffxgopl1eHi4xHEfU94ueUAmODrag7Rc4E 19 | VmvrMFIsuFrX/xYjiu2gpHPOP2nQjNRAwKDU0gr2bZ+y98EBtlYO/aFMmYCxJr7B 20 | 6esyA7I/cwLTxaNoTh67VTdPhfDmuEshoQn7Mtop38suevU5YBMTmUl7cp8bVdib 21 | j7UkTq0oRKmAchMAz3W0TgGw9ZKJzU6zEck/3Csz5RWlTI9HV7R7J8aGEIeHGf/i 22 | G+tfg0T8h+rQPkyCic5DIYuQzZ/P9pfJkedZuQU/mu0U/0IsNdkv9NX/4RQazu/Q 23 | OzQ71FOO2HR/S3hcLzS1Iy2zrHbARwji/Sr95gVE1Z4QCK2xSvyy9aqzHwRfc4SX 24 | AzaJhkACCnY7VDK6WJW7jnfkYco+l0tczDkyPjE7h3wP35tCuAZAvGkcrIbBL4oR 25 | 8bnwYAOqiG0cPBmFDBYW7v19qIspw5XDjfMu4YEHon7pYdiKK0Brf0iL+Ep4b1oB 26 | 8uvAysbc2Z/gIj1AsfnwSnrzcvzO6H1oCye277cSn2Z/ebiBaQi+kR3mubX96aPy 27 | bFc9Xb11/y0Y7kYmJ3ifHDJkWerpz5bWEm2KDq1qsFRH9zUMEVfJAXThITawqfuG 28 | 3UBYWv8RePLnRbbnPuSaO9slNCoKl3NLqyk= 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | services: 3 | haproxy: 4 | container_name: haproxy 5 | restart: always 6 | image: haproxy 7 | ports: 8 | - "5010:8080" 9 | volumes: 10 | - ./haproxy:/usr/local/etc/haproxy 11 | 12 | pwd: 13 | # pwd daemon container always needs to be named this way 14 | container_name: pwd 15 | restart: always 16 | # use the latest golang image 17 | image: golang 18 | # go to the right place and starts the app 19 | command: /bin/sh -c 'ssh-keygen -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key >/dev/null; cd /go/src/; if [ -e /runbin/pwd ]; then /runbin/pwd -save /pwd/sessions -name l2; else go run api.go -save /pwd/sessions -name l2; fi' 20 | volumes: 21 | # since this app creates networks and launches containers, we need to talk to docker daemon 22 | - /var/run/docker.sock:/var/run/docker.sock 23 | # mount the box mounted shared folder to the container 24 | - $PWD:/go/src 25 | - sessions:/pwd 26 | l2: 27 | container_name: l2 28 | restart: always 29 | # use the latest golang image 30 | image: golang 31 | # go to the right place and starts the app 32 | command: /bin/sh -c 'ssh-keygen -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key >/dev/null; cd /go/src/router/l2; if [ -e /runbin/l2 ]; then /runbin/l2 -ssh_key_path /etc/ssh/ssh_host_rsa_key -name l2 -save /pwd/networks; else go run l2.go -ssh_key_path /etc/ssh/ssh_host_rsa_key -name l2 -save /pwd/networks; fi' 33 | volumes: 34 | - /var/run/docker.sock:/var/run/docker.sock 35 | - $PWD:/go/src 36 | - networks:/pwd 37 | ports: 38 | - "8022:22" 39 | - "8053:53" 40 | - "443:443" 41 | volumes: 42 | sessions: 43 | networks: 44 | -------------------------------------------------------------------------------- /dockerfiles/pwm/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION=docker:stable-dind 2 | FROM ${VERSION} 3 | 4 | RUN apk add --no-cache git tmux vim curl bash build-base qemu-img qemu-system-x86_64 5 | 6 | ENV GOPATH /root/go 7 | ENV PATH $PATH:$GOPATH 8 | 9 | # Use specific moby commit due to vendoring mismatch 10 | ENV MOBY_COMMIT="d9d2a91780b34b92e669bbfa099f613bd9fad6bb" 11 | 12 | RUN mkdir /root/go && apk add --no-cache go \ 13 | && go get -u -d github.com/moby/tool/cmd/moby && (cd $GOPATH/src/github.com/moby/tool/cmd/moby && git checkout $MOBY_COMMIT && go install) \ 14 | && go get -u github.com/linuxkit/linuxkit/src/cmd/linuxkit \ 15 | && rm -rf /root/go/pkg && rm -rf /root/go/src && rm -rf /usr/lib/go 16 | 17 | 18 | # Add bash completion and set bash as default shell 19 | RUN mkdir /etc/bash_completion.d \ 20 | && curl https://raw.githubusercontent.com/docker/cli/master/contrib/completion/bash/docker -o /etc/bash_completion.d/docker \ 21 | && sed -i "s/ash/bash/" /etc/passwd 22 | 23 | 24 | # Replace modprobe with a no-op to get rid of spurious warnings 25 | # (note: we can't just symlink to /bin/true because it might be busybox) 26 | RUN rm /sbin/modprobe && echo '#!/bin/true' >/sbin/modprobe && chmod +x /sbin/modprobe 27 | 28 | # Install a nice vimrc file and prompt (by soulshake) 29 | COPY ["sudo", "/usr/local/bin/"] 30 | COPY [".vimrc", ".profile", ".inputrc", ".gitconfig", "./root/"] 31 | COPY ["motd", "/etc/motd"] 32 | COPY ["daemon.json", "/etc/docker/"] 33 | 34 | # Move to our home 35 | WORKDIR /root 36 | 37 | 38 | # Remove IPv6 alias for localhost and start docker in the background ... 39 | CMD cat /etc/hosts >/etc/hosts.bak && \ 40 | sed 's/^::1.*//' /etc/hosts.bak > /etc/hosts && \ 41 | mount -t securityfs none /sys/kernel/security && \ 42 | dockerd &>/docker.log & \ 43 | while true ; do /bin/bash -l; done 44 | # ... and then put a shell in the foreground, restarting it if it exits 45 | -------------------------------------------------------------------------------- /handlers/new_instance.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | 9 | "github.com/gorilla/mux" 10 | "github.com/play-with-docker/play-with-docker/provisioner" 11 | "github.com/play-with-docker/play-with-docker/pwd/types" 12 | ) 13 | 14 | func NewInstance(rw http.ResponseWriter, req *http.Request) { 15 | vars := mux.Vars(req) 16 | sessionId := vars["sessionId"] 17 | 18 | body := types.InstanceConfig{PlaygroundFQDN: req.Host, DindVolumeSize: "5G"} 19 | 20 | json.NewDecoder(req.Body).Decode(&body) 21 | 22 | s, err := core.SessionGet(sessionId) 23 | if err != nil { 24 | rw.WriteHeader(http.StatusInternalServerError) 25 | return 26 | } 27 | 28 | playground := core.PlaygroundGet(s.PlaygroundId) 29 | if playground == nil { 30 | log.Printf("Playground with id %s for session %s was not found!", s.PlaygroundId, s.Id) 31 | rw.WriteHeader(http.StatusBadRequest) 32 | return 33 | } 34 | 35 | if body.Type == "windows" && !playground.AllowWindowsInstances { 36 | rw.WriteHeader(http.StatusUnauthorized) 37 | return 38 | } 39 | 40 | instances, err := core.InstanceFindBySession(s) 41 | 42 | if err != nil { 43 | log.Println(err) 44 | rw.WriteHeader(http.StatusInternalServerError) 45 | return 46 | } 47 | 48 | if playground.MaxInstances > 0 && len(instances) >= playground.MaxInstances { 49 | log.Println(err) 50 | rw.WriteHeader(http.StatusConflict) 51 | return 52 | } 53 | 54 | if len(playground.DindVolumeSize) > 0 { 55 | body.DindVolumeSize = playground.DindVolumeSize 56 | } 57 | 58 | i, err := core.InstanceNew(s, body) 59 | if err != nil { 60 | if provisioner.OutOfCapacity(err) { 61 | rw.WriteHeader(http.StatusServiceUnavailable) 62 | fmt.Fprintln(rw, `{"error": "out_of_capacity"}`) 63 | return 64 | } 65 | log.Println(err) 66 | rw.WriteHeader(http.StatusInternalServerError) 67 | return 68 | //TODO: Set a status error 69 | } else { 70 | json.NewEncoder(rw).Encode(i) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /handlers/file_upload.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "net/http" 7 | "path/filepath" 8 | 9 | "github.com/gorilla/mux" 10 | "github.com/play-with-docker/play-with-docker/storage" 11 | ) 12 | 13 | func FileUpload(rw http.ResponseWriter, req *http.Request) { 14 | vars := mux.Vars(req) 15 | sessionId := vars["sessionId"] 16 | instanceName := vars["instanceName"] 17 | 18 | s, err := core.SessionGet(sessionId) 19 | if err == storage.NotFoundError { 20 | rw.WriteHeader(http.StatusNotFound) 21 | return 22 | } else if err != nil { 23 | rw.WriteHeader(http.StatusInternalServerError) 24 | return 25 | } 26 | i := core.InstanceGet(s, instanceName) 27 | 28 | // Path to upload the file to 29 | path := req.URL.Query().Get("path") 30 | 31 | // allow up to 32 MB which is the default 32 | 33 | // has a url query parameter, ignore body 34 | if url := req.URL.Query().Get("url"); url != "" { 35 | 36 | _, fileName := filepath.Split(url) 37 | 38 | err := core.InstanceUploadFromUrl(i, fileName, path, req.URL.Query().Get("url")) 39 | if err != nil { 40 | log.Println(err) 41 | rw.WriteHeader(http.StatusInternalServerError) 42 | return 43 | } 44 | rw.WriteHeader(http.StatusOK) 45 | return 46 | } else { 47 | red, err := req.MultipartReader() 48 | if err != nil { 49 | log.Println(err) 50 | rw.WriteHeader(http.StatusBadRequest) 51 | return 52 | } 53 | 54 | for { 55 | p, err := red.NextPart() 56 | if err == io.EOF { 57 | break 58 | } 59 | if err != nil { 60 | log.Println(err) 61 | continue 62 | } 63 | 64 | if p.FileName() == "" { 65 | continue 66 | } 67 | err = core.InstanceUploadFromReader(i, p.FileName(), path, p) 68 | if err != nil { 69 | log.Println(err) 70 | rw.WriteHeader(http.StatusInternalServerError) 71 | return 72 | } 73 | 74 | log.Printf("Uploaded [%s] to [%s]\n", p.FileName(), i.Name) 75 | } 76 | rw.WriteHeader(http.StatusOK) 77 | return 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /dockerfiles/dind/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | # first arg is `-f` or `--some-option` 5 | if [ "${1#-}" != "$1" ]; then 6 | set -- docker "$@" 7 | fi 8 | 9 | # if our command is a valid Docker subcommand, let's invoke it through Docker instead 10 | # (this allows for "docker run docker ps", etc) 11 | if docker help "$1" > /dev/null 2>&1; then 12 | set -- docker "$@" 13 | fi 14 | 15 | _should_tls() { 16 | [ -n "${DOCKER_TLS_CERTDIR:-}" ] \ 17 | && [ -s "$DOCKER_TLS_CERTDIR/client/ca.pem" ] \ 18 | && [ -s "$DOCKER_TLS_CERTDIR/client/cert.pem" ] \ 19 | && [ -s "$DOCKER_TLS_CERTDIR/client/key.pem" ] 20 | } 21 | 22 | # if we have no DOCKER_HOST but we do have the default Unix socket (standard or rootless), use it explicitly 23 | if [ -z "${DOCKER_HOST:-}" ] && [ -S /var/run/docker.sock ]; then 24 | export DOCKER_HOST=unix:///var/run/docker.sock 25 | elif [ -z "${DOCKER_HOST:-}" ] && XDG_RUNTIME_DIR="${XDG_RUNTIME_DIR:-/run/user/$(id -u)}" && [ -S "$XDG_RUNTIME_DIR/docker.sock" ]; then 26 | export DOCKER_HOST="unix://$XDG_RUNTIME_DIR/docker.sock" 27 | fi 28 | 29 | # if DOCKER_HOST isn't set (no custom setting, no default socket), let's set it to a sane remote value 30 | if [ -z "${DOCKER_HOST:-}" ]; then 31 | if _should_tls || [ -n "${DOCKER_TLS_VERIFY:-}" ]; then 32 | export DOCKER_HOST='tcp://docker:2376' 33 | else 34 | export DOCKER_HOST='tcp://docker:2375' 35 | fi 36 | fi 37 | if [ "${DOCKER_HOST#tcp:}" != "$DOCKER_HOST" ] \ 38 | && [ -z "${DOCKER_TLS_VERIFY:-}" ] \ 39 | && [ -z "${DOCKER_CERT_PATH:-}" ] \ 40 | && _should_tls \ 41 | ; then 42 | export DOCKER_TLS_VERIFY=1 43 | export DOCKER_CERT_PATH="$DOCKER_TLS_CERTDIR/client" 44 | fi 45 | 46 | if [ "$1" = 'dockerd' ]; then 47 | cat >&2 <<-'EOW' 48 | 49 | 📎 Hey there! It looks like you're trying to run a Docker daemon. 50 | 51 | You probably should use the "dind" image variant instead, something like: 52 | 53 | docker run --privileged --name some-docker ... docker:dind ... 54 | 55 | See https://hub.docker.com/_/docker/ for more documentation and usage examples. 56 | 57 | EOW 58 | sleep 3 59 | fi 60 | 61 | exec "$@" 62 | -------------------------------------------------------------------------------- /pwd/client.go: -------------------------------------------------------------------------------- 1 | package pwd 2 | 3 | import ( 4 | "log" 5 | "time" 6 | 7 | "github.com/play-with-docker/play-with-docker/event" 8 | "github.com/play-with-docker/play-with-docker/pwd/types" 9 | ) 10 | 11 | func (p *pwd) ClientNew(id string, session *types.Session) *types.Client { 12 | defer observeAction("ClientNew", time.Now()) 13 | c := &types.Client{Id: id, SessionId: session.Id} 14 | if err := p.storage.ClientPut(c); err != nil { 15 | log.Println("Error saving client", err) 16 | } 17 | return c 18 | } 19 | 20 | func (p *pwd) ClientResizeViewPort(c *types.Client, cols, rows uint) { 21 | defer observeAction("ClientResizeViewPort", time.Now()) 22 | c.ViewPort.Rows = rows 23 | c.ViewPort.Cols = cols 24 | 25 | if err := p.storage.ClientPut(c); err != nil { 26 | log.Println("Error saving client", err) 27 | return 28 | } 29 | p.notifyClientSmallestViewPort(c.SessionId) 30 | } 31 | 32 | func (p *pwd) ClientClose(client *types.Client) { 33 | defer observeAction("ClientClose", time.Now()) 34 | // Client has disconnected. Remove from session and recheck terminal sizes. 35 | if err := p.storage.ClientDelete(client.Id); err != nil { 36 | log.Println("Error deleting client", err) 37 | return 38 | } 39 | p.notifyClientSmallestViewPort(client.SessionId) 40 | } 41 | 42 | func (p *pwd) ClientCount() int { 43 | count, err := p.storage.ClientCount() 44 | if err != nil { 45 | log.Println("Error counting clients", err) 46 | return 0 47 | } 48 | return count 49 | } 50 | 51 | func (p *pwd) notifyClientSmallestViewPort(sessionId string) { 52 | instances, err := p.storage.InstanceFindBySessionId(sessionId) 53 | if err != nil { 54 | log.Printf("Error finding instances for session [%s]. Got: %v\n", sessionId, err) 55 | return 56 | } 57 | 58 | vp := p.SessionGetSmallestViewPort(sessionId) 59 | // Resize all terminals in the session 60 | for _, instance := range instances { 61 | err := p.InstanceResizeTerminal(instance, vp.Rows, vp.Cols) 62 | if err != nil { 63 | log.Println("Error resizing terminal", err) 64 | } 65 | } 66 | p.event.Emit(event.INSTANCE_VIEWPORT_RESIZE, sessionId, vp.Cols, vp.Rows) 67 | } 68 | -------------------------------------------------------------------------------- /router/l2/l2_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/play-with-docker/play-with-docker/router" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDirector(t *testing.T) { 11 | info, err := director(router.ProtocolHTTP, "ip10-0-0-1-aabb-8080.foo.bar") 12 | assert.Nil(t, err) 13 | assert.Equal(t, "10.0.0.1:8080", info.Dst.String()) 14 | 15 | info, err = director(router.ProtocolHTTP, "ip10-0-0-1-aabb.foo.bar") 16 | assert.Nil(t, err) 17 | assert.Equal(t, "10.0.0.1:80", info.Dst.String()) 18 | 19 | info, err = director(router.ProtocolHTTPS, "ip10-0-0-1-aabb.foo.bar") 20 | assert.Nil(t, err) 21 | assert.Equal(t, "10.0.0.1:443", info.Dst.String()) 22 | 23 | info, err = director(router.ProtocolSSH, "ip10-0-0-1-aabb.foo.bar") 24 | assert.Nil(t, err) 25 | assert.Equal(t, "10.0.0.1:22", info.Dst.String()) 26 | assert.Equal(t, "root", info.SSHUser) 27 | 28 | info, err = director(router.ProtocolDNS, "ip10-0-0-1-aabb.foo.bar") 29 | assert.Nil(t, err) 30 | assert.Equal(t, "10.0.0.1:53", info.Dst.String()) 31 | 32 | info, err = director(router.ProtocolHTTP, "ip10-0-0-1-aabb.foo.bar:9090") 33 | assert.Nil(t, err) 34 | assert.Equal(t, "10.0.0.1:9090", info.Dst.String()) 35 | 36 | info, err = director(router.ProtocolHTTP, "ip10-0-0-1-aabb-2222.foo.bar:9090") 37 | assert.Nil(t, err) 38 | assert.Equal(t, "10.0.0.1:2222", info.Dst.String()) 39 | 40 | info, err = director(router.ProtocolHTTP, "lala.ip10-0-0-1-aabb-2222.foo.bar") 41 | assert.Nil(t, err) 42 | assert.Equal(t, "10.0.0.1:2222", info.Dst.String()) 43 | 44 | info, err = director(router.ProtocolHTTP, "lala.ip10-0-0-1-aabb-2222") 45 | assert.Nil(t, err) 46 | assert.Equal(t, "10.0.0.1:2222", info.Dst.String()) 47 | 48 | info, err = director(router.ProtocolHTTP, "ip10-0-0-1-aabb-2222") 49 | assert.Nil(t, err) 50 | assert.Equal(t, "10.0.0.1:2222", info.Dst.String()) 51 | 52 | info, err = director(router.ProtocolHTTP, "ip10-0-0-1-aabb") 53 | assert.Nil(t, err) 54 | assert.Equal(t, "10.0.0.1:80", info.Dst.String()) 55 | 56 | _, err = director(router.ProtocolHTTP, "lala10-0-0-1-aabb.foo.bar") 57 | assert.NotNil(t, err) 58 | } 59 | -------------------------------------------------------------------------------- /scheduler/task/check_k8s_cluster_exposed_ports.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "log" 6 | 7 | "github.com/play-with-docker/play-with-docker/event" 8 | "github.com/play-with-docker/play-with-docker/k8s" 9 | "github.com/play-with-docker/play-with-docker/pwd/types" 10 | meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ) 12 | 13 | type checkK8sClusterExposedPortsTask struct { 14 | event event.EventApi 15 | factory k8s.FactoryApi 16 | } 17 | 18 | var CheckK8sClusterExpoedPortsEvent event.EventType 19 | 20 | func init() { 21 | CheckK8sClusterExpoedPortsEvent = event.EventType("instance k8s cluster ports") 22 | } 23 | 24 | func (t *checkK8sClusterExposedPortsTask) Name() string { 25 | return "CheckK8sClusterPorts" 26 | } 27 | 28 | func NewCheckK8sClusterExposedPorts(e event.EventApi, f k8s.FactoryApi) *checkK8sClusterExposedPortsTask { 29 | return &checkK8sClusterExposedPortsTask{event: e, factory: f} 30 | } 31 | 32 | func (c checkK8sClusterExposedPortsTask) Run(ctx context.Context, i *types.Instance) error { 33 | 34 | kc, err := c.factory.GetKubeletForInstance(i) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | if isManager, err := kc.IsManager(); err != nil { 40 | log.Println(err) 41 | return err 42 | } else if !isManager { 43 | return nil 44 | } 45 | 46 | k8s, err := c.factory.GetForInstance(i) 47 | if err != nil { 48 | log.Println(err) 49 | return err 50 | } 51 | 52 | list, err := k8s.CoreV1().Services("").List(meta_v1.ListOptions{}) 53 | if err != nil { 54 | return err 55 | } 56 | exposedPorts := []int{} 57 | 58 | for _, svc := range list.Items { 59 | for _, p := range svc.Spec.Ports { 60 | if p.NodePort > 0 { 61 | exposedPorts = append(exposedPorts, int(p.NodePort)) 62 | } 63 | } 64 | } 65 | 66 | nodeList, err := k8s.CoreV1().Nodes().List(meta_v1.ListOptions{}) 67 | if err != nil { 68 | return err 69 | } 70 | instances := []string{} 71 | for _, node := range nodeList.Items { 72 | instances = append(instances, node.Name) 73 | } 74 | 75 | c.event.Emit(CheckSwarmPortsEvent, i.SessionId, ClusterPorts{Manager: i.Name, Instances: instances, Ports: exposedPorts}) 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /www/assets/style.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css?family=Rationale'); 2 | 3 | .selected button { 4 | background-color: rgba(158,158,158,0.2); 5 | } 6 | 7 | .terminal-container { 8 | background-color: #000; 9 | padding: 0; 10 | display: flex; 11 | align-items: stretch; 12 | justify-content: stretch; 13 | flex: 1; 14 | } 15 | 16 | .terminal-instance{ 17 | width: 100%; 18 | } 19 | 20 | .clock { 21 | font-family: 'Rationale', sans-serif; 22 | font-size: 3.0em; 23 | color: #1da4eb; 24 | text-align: center; 25 | } 26 | 27 | .welcome { 28 | background-color: #e7e7e7; 29 | } 30 | 31 | .welcome > div { 32 | text-align: center; 33 | } 34 | 35 | .welcome > div > img { 36 | max-width: 100%; 37 | } 38 | 39 | .g-recaptcha div { 40 | margin-left: auto; 41 | margin-right: auto; 42 | margin-bottom: auto; 43 | margin-top: 50px; 44 | } 45 | 46 | .uploadStatus .bottom-block { 47 | display: block; 48 | position: relative; 49 | background-color: rgba(255, 235, 169, 0.25); 50 | height: 30px; 51 | width: 100%; 52 | } 53 | 54 | .uploadStatus .bottom-block > span { 55 | display: inline-block; 56 | padding: 8px; 57 | font-size: 0.9em; 58 | } 59 | 60 | .uploadStatus { 61 | display: block; 62 | position: relative; 63 | width: 100%; 64 | border: 2px solid #aad1f9; 65 | transition: opacity 0.1s linear; 66 | border-top: 0px; 67 | } 68 | 69 | .disconnected { 70 | background-color: #FDF4B6; 71 | } 72 | md-input-container { 73 | margin-bottom: 0; 74 | } 75 | md-input-container .md-errors-spacer { 76 | height: 0; 77 | min-height: 0; 78 | } 79 | 80 | .stats { 81 | min-height: 230px; 82 | } 83 | 84 | ::-webkit-scrollbar { 85 | -webkit-appearance: none; 86 | width: 7px; 87 | } 88 | ::-webkit-scrollbar-thumb { 89 | border-radius: 4px; 90 | background-color: rgba(0,0,0,.5); 91 | -webkit-box-shadow: 0 0 1px rgba(255,255,255,.5); 92 | } 93 | .md-mini { 94 | min-width: 24px; 95 | } 96 | 97 | .dragover { 98 | opacity: 0.5; 99 | } 100 | -------------------------------------------------------------------------------- /dockerfiles/dind/.inputrc: -------------------------------------------------------------------------------- 1 | # /etc/inputrc - global inputrc for libreadline 2 | # See readline(3readline) and `info rluserman' for more information. 3 | 4 | # Be 8 bit clean. 5 | set input-meta on 6 | set output-meta on 7 | 8 | # To allow the use of 8bit-characters like the german umlauts, uncomment 9 | # the line below. However this makes the meta key not work as a meta key, 10 | # which is annoying to those which don't need to type in 8-bit characters. 11 | 12 | # set convert-meta off 13 | 14 | # try to enable the application keypad when it is called. Some systems 15 | # need this to enable the arrow keys. 16 | # set enable-keypad on 17 | 18 | # see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys 19 | 20 | # do not bell on tab-completion 21 | # set bell-style none 22 | # set bell-style visible 23 | 24 | # some defaults / modifications for the emacs mode 25 | $if mode=emacs 26 | 27 | # allow the use of the Home/End keys 28 | "\e[1~": beginning-of-line 29 | "\e[4~": end-of-line 30 | 31 | # allow the use of the Delete/Insert keys 32 | "\e[3~": delete-char 33 | "\e[2~": quoted-insert 34 | 35 | # mappings for "page up" and "page down" to step to the beginning/end 36 | # of the history 37 | # "\e[5~": beginning-of-history 38 | # "\e[6~": end-of-history 39 | 40 | # alternate mappings for "page up" and "page down" to search the history 41 | # "\e[5~": history-search-backward 42 | # "\e[6~": history-search-forward 43 | 44 | # mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving 45 | "\e[1;5C": forward-word 46 | "\e[1;5D": backward-word 47 | "\e[5C": forward-word 48 | "\e[5D": backward-word 49 | "\e\e[C": forward-word 50 | "\e\e[D": backward-word 51 | 52 | $if term=rxvt 53 | "\e[7~": beginning-of-line 54 | "\e[8~": end-of-line 55 | "\eOc": forward-word 56 | "\eOd": backward-word 57 | $endif 58 | 59 | # for non RH/Debian xterm, can't hurt for RH/Debian xterm 60 | # "\eOH": beginning-of-line 61 | # "\eOF": end-of-line 62 | 63 | # for freebsd console 64 | # "\e[H": beginning-of-line 65 | # "\e[F": end-of-line 66 | 67 | $endif 68 | 69 | # faster completion 70 | set show-all-if-ambiguous on 71 | 72 | "\e[A": history-search-backward 73 | "\e[B": history-search-forward 74 | -------------------------------------------------------------------------------- /dockerfiles/pwm/.inputrc: -------------------------------------------------------------------------------- 1 | # /etc/inputrc - global inputrc for libreadline 2 | # See readline(3readline) and `info rluserman' for more information. 3 | 4 | # Be 8 bit clean. 5 | set input-meta on 6 | set output-meta on 7 | 8 | # To allow the use of 8bit-characters like the german umlauts, uncomment 9 | # the line below. However this makes the meta key not work as a meta key, 10 | # which is annoying to those which don't need to type in 8-bit characters. 11 | 12 | # set convert-meta off 13 | 14 | # try to enable the application keypad when it is called. Some systems 15 | # need this to enable the arrow keys. 16 | # set enable-keypad on 17 | 18 | # see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys 19 | 20 | # do not bell on tab-completion 21 | # set bell-style none 22 | # set bell-style visible 23 | 24 | # some defaults / modifications for the emacs mode 25 | $if mode=emacs 26 | 27 | # allow the use of the Home/End keys 28 | "\e[1~": beginning-of-line 29 | "\e[4~": end-of-line 30 | 31 | # allow the use of the Delete/Insert keys 32 | "\e[3~": delete-char 33 | "\e[2~": quoted-insert 34 | 35 | # mappings for "page up" and "page down" to step to the beginning/end 36 | # of the history 37 | # "\e[5~": beginning-of-history 38 | # "\e[6~": end-of-history 39 | 40 | # alternate mappings for "page up" and "page down" to search the history 41 | # "\e[5~": history-search-backward 42 | # "\e[6~": history-search-forward 43 | 44 | # mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving 45 | "\e[1;5C": forward-word 46 | "\e[1;5D": backward-word 47 | "\e[5C": forward-word 48 | "\e[5D": backward-word 49 | "\e\e[C": forward-word 50 | "\e\e[D": backward-word 51 | 52 | $if term=rxvt 53 | "\e[7~": beginning-of-line 54 | "\e[8~": end-of-line 55 | "\eOc": forward-word 56 | "\eOd": backward-word 57 | $endif 58 | 59 | # for non RH/Debian xterm, can't hurt for RH/Debian xterm 60 | # "\eOH": beginning-of-line 61 | # "\eOF": end-of-line 62 | 63 | # for freebsd console 64 | # "\e[H": beginning-of-line 65 | # "\e[F": end-of-line 66 | 67 | $endif 68 | 69 | # faster completion 70 | set show-all-if-ambiguous on 71 | 72 | "\e[A": history-search-backward 73 | "\e[B": history-search-forward 74 | -------------------------------------------------------------------------------- /scheduler/task/collect_stats_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "io" 8 | "testing" 9 | 10 | dockerTypes "github.com/docker/docker/api/types" 11 | "github.com/play-with-docker/play-with-docker/docker" 12 | "github.com/play-with-docker/play-with-docker/event" 13 | "github.com/play-with-docker/play-with-docker/pwd/types" 14 | "github.com/play-with-docker/play-with-docker/storage" 15 | "github.com/stretchr/testify/assert" 16 | "github.com/stretchr/testify/mock" 17 | ) 18 | 19 | type mockSessionProvider struct { 20 | mock.Mock 21 | } 22 | 23 | func (m *mockSessionProvider) GetDocker(session *types.Session) (docker.DockerApi, error) { 24 | args := m.Called(session) 25 | 26 | return args.Get(0).(docker.DockerApi), args.Error(1) 27 | } 28 | 29 | type nopCloser struct { 30 | io.Reader 31 | } 32 | 33 | func (nopCloser) Close() error { return nil } 34 | 35 | func TestCollectStats_Name(t *testing.T) { 36 | e := &event.Mock{} 37 | f := &docker.FactoryMock{} 38 | s := &storage.Mock{} 39 | 40 | task := NewCollectStats(e, f, s) 41 | 42 | assert.Equal(t, "CollectStats", task.Name()) 43 | e.M.AssertExpectations(t) 44 | f.AssertExpectations(t) 45 | } 46 | 47 | func TestCollectStats_Run(t *testing.T) { 48 | d := &docker.Mock{} 49 | e := &event.Mock{} 50 | f := &docker.FactoryMock{} 51 | s := &storage.Mock{} 52 | 53 | stats := dockerTypes.StatsJSON{} 54 | b, _ := json.Marshal(stats) 55 | i := &types.Instance{ 56 | IP: "10.0.0.1", 57 | Name: "aaaabbbb_node1", 58 | SessionId: "aaaabbbbcccc", 59 | Hostname: "node1", 60 | } 61 | 62 | sess := &types.Session{ 63 | Id: "aaaabbbbcccc", 64 | } 65 | 66 | s.On("SessionGet", i.SessionId).Return(sess, nil) 67 | f.On("GetForSession", sess).Return(d, nil) 68 | d.On("ContainerStats", i.Name).Return(nopCloser{bytes.NewReader(b)}, nil) 69 | e.M.On("Emit", CollectStatsEvent, "aaaabbbbcccc", []interface{}{InstanceStats{Instance: i.Name, Mem: "0.00% (0B / 0B)", Cpu: "0.00%"}}).Return() 70 | 71 | task := NewCollectStats(e, f, s) 72 | ctx := context.Background() 73 | 74 | err := task.Run(ctx, i) 75 | 76 | assert.Nil(t, err) 77 | d.AssertExpectations(t) 78 | e.M.AssertExpectations(t) 79 | f.AssertExpectations(t) 80 | } 81 | -------------------------------------------------------------------------------- /docker/factory.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "net" 7 | "net/http" 8 | "net/url" 9 | "time" 10 | 11 | client "github.com/docker/docker/client" 12 | "github.com/docker/docker/api" 13 | "github.com/docker/go-connections/tlsconfig" 14 | "github.com/play-with-docker/play-with-docker/pwd/types" 15 | "github.com/play-with-docker/play-with-docker/router" 16 | ) 17 | 18 | type FactoryApi interface { 19 | GetForSession(session *types.Session) (DockerApi, error) 20 | GetForInstance(instance *types.Instance) (DockerApi, error) 21 | } 22 | 23 | func NewClient(instance *types.Instance, proxyHost string) (*client.Client, error) { 24 | var host string 25 | var durl string 26 | 27 | var tlsConfig *tls.Config 28 | if (len(instance.Cert) > 0 && len(instance.Key) > 0) || instance.Tls { 29 | host = router.EncodeHost(instance.SessionId, instance.RoutableIP, router.HostOpts{EncodedPort: 2376}) 30 | tlsConfig = tlsconfig.ClientDefault() 31 | tlsConfig.InsecureSkipVerify = true 32 | tlsConfig.ServerName = host 33 | if len(instance.Cert) > 0 && len(instance.Key) > 0 { 34 | tlsCert, err := tls.X509KeyPair(instance.Cert, instance.Key) 35 | if err != nil { 36 | return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) 37 | } 38 | tlsConfig.Certificates = []tls.Certificate{tlsCert} 39 | } 40 | } else { 41 | host = router.EncodeHost(instance.SessionId, instance.RoutableIP, router.HostOpts{EncodedPort: 2375}) 42 | } 43 | 44 | transport := &http.Transport{ 45 | DialContext: (&net.Dialer{ 46 | Timeout: 1 * time.Second, 47 | KeepAlive: 30 * time.Second, 48 | }).DialContext, 49 | MaxIdleConnsPerHost: 5, 50 | } 51 | 52 | if tlsConfig != nil { 53 | transport.TLSClientConfig = tlsConfig 54 | durl = fmt.Sprintf("https://%s", proxyHost) 55 | } else { 56 | transport.Proxy = http.ProxyURL(&url.URL{Host: proxyHost}) 57 | durl = fmt.Sprintf("http://%s", host) 58 | } 59 | 60 | cli := &http.Client{ 61 | Transport: transport, 62 | } 63 | 64 | dc, err := client.NewClient(durl, api.DefaultVersion, cli, nil) 65 | if err != nil { 66 | return nil, fmt.Errorf("Could not connect to DinD docker daemon: %v", err) 67 | } 68 | 69 | return dc, nil 70 | } 71 | -------------------------------------------------------------------------------- /provisioner/overlay.go: -------------------------------------------------------------------------------- 1 | package provisioner 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "net/url" 8 | "strings" 9 | 10 | dtypes "github.com/docker/docker/api/types" 11 | "github.com/play-with-docker/play-with-docker/config" 12 | "github.com/play-with-docker/play-with-docker/docker" 13 | "github.com/play-with-docker/play-with-docker/pwd/types" 14 | ) 15 | 16 | type overlaySessionProvisioner struct { 17 | dockerFactory docker.FactoryApi 18 | } 19 | 20 | func NewOverlaySessionProvisioner(df docker.FactoryApi) SessionProvisionerApi { 21 | return &overlaySessionProvisioner{dockerFactory: df} 22 | } 23 | 24 | func (p *overlaySessionProvisioner) SessionNew(ctx context.Context, s *types.Session) error { 25 | dockerClient, err := p.dockerFactory.GetForSession(s) 26 | if err != nil { 27 | // We assume we are out of capacity 28 | return fmt.Errorf("Out of capacity") 29 | } 30 | u, _ := url.Parse(dockerClient.DaemonHost()) 31 | if u.Host == "" { 32 | s.Host = "localhost" 33 | } else { 34 | chunks := strings.Split(u.Host, ":") 35 | s.Host = chunks[0] 36 | } 37 | 38 | opts := dtypes.NetworkCreate{Driver: "overlay", Attachable: true} 39 | if err := dockerClient.NetworkCreate(s.Id, opts); err != nil { 40 | log.Println("ERROR NETWORKING", err) 41 | return err 42 | } 43 | log.Printf("Network [%s] created for session [%s]\n", s.Id, s.Id) 44 | 45 | ip, err := dockerClient.NetworkConnect(config.L2ContainerName, s.Id, s.PwdIpAddress) 46 | if err != nil { 47 | log.Println(err) 48 | return err 49 | } 50 | s.PwdIpAddress = ip 51 | log.Printf("Connected %s to network [%s]\n", config.PWDContainerName, s.Id) 52 | return nil 53 | } 54 | func (p *overlaySessionProvisioner) SessionClose(s *types.Session) error { 55 | // Disconnect L2 router from the network 56 | dockerClient, err := p.dockerFactory.GetForSession(s) 57 | if err != nil { 58 | log.Println(err) 59 | return err 60 | } 61 | if err := dockerClient.NetworkDisconnect(config.L2ContainerName, s.Id); err != nil { 62 | if !strings.Contains(err.Error(), "is not connected to the network") { 63 | log.Println("ERROR NETWORKING", err) 64 | return err 65 | } 66 | } 67 | log.Printf("Disconnected l2 from network [%s]\n", s.Id) 68 | if err := dockerClient.NetworkDelete(s.Id); err != nil { 69 | if !strings.Contains(err.Error(), "not found") { 70 | log.Println(err) 71 | return err 72 | } 73 | } 74 | 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /www/bypass.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Docker Playground 5 | 6 | 7 | 8 | 9 |
10 |

Welcome!

11 |

We're bypassing the Captcha and redirecting you now..

12 |
13 | 14 | 15 | 16 | 17 |
18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 51 | 52 | -------------------------------------------------------------------------------- /dockerfiles/dind/Dockerfile.dind-ee: -------------------------------------------------------------------------------- 1 | ARG VERSION=franela/docker:ubuntu-19.03ee 2 | #ARG VERSION=franela/docker:18.09.2-ee-dind 3 | 4 | FROM ${VERSION} 5 | 6 | RUN apt-get update \ 7 | && apt-get install -y git tmux python-pip apache2-utils vim curl jq bash-completion screen tree zip \ 8 | && rm -rf /var/lib/apt/lists/* 9 | 10 | # Add kubectl client 11 | RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.11.7/bin/linux/amd64/kubectl \ 12 | && chmod +x ./kubectl \ 13 | && mv ./kubectl /usr/local/bin/kubectl 14 | 15 | ENV COMPOSE_VERSION=1.22.0 16 | 17 | RUN pip install docker-compose==${COMPOSE_VERSION} 18 | RUN curl -L https://github.com/docker/machine/releases/download/${MACHINE_VERSION}/docker-machine-Linux-x86_64 \ 19 | -o /usr/bin/docker-machine && chmod +x /usr/bin/docker-machine 20 | 21 | 22 | # Install a nice vimrc file and prompt (by soulshake) 23 | COPY ["docker-prompt", "sudo", "ucp-beta.sh", "/usr/local/bin/"] 24 | COPY [".vimrc",".profile", ".inputrc", ".gitconfig", "workshop_beta.lic", "ucp-config.toml", "./root/"] 25 | COPY ["motd", "/etc/motd"] 26 | COPY ["ee/daemon.json", "/etc/docker/"] 27 | COPY ["ee/cert.pem", "ee/key.pem", "/opt/pwd/certs/"] 28 | COPY ["ee/ucp-key.pem", "./root/key.pem"] 29 | COPY ["ee/ucp-cert.pem", "./root/cert.pem"] 30 | 31 | # Move to our home 32 | WORKDIR /root 33 | 34 | # Setup certs and uploads folders 35 | RUN mkdir -p /opt/pwd/certs /opt/pwd/uploads 36 | 37 | VOLUME ["/var/lib/kubelet"] 38 | 39 | # Remove IPv6 alias for localhost and start docker in the background ... 40 | CMD cat /etc/hosts >/etc/hosts.bak && \ 41 | sed 's/^::1.*//' /etc/hosts.bak > /etc/hosts && \ 42 | sed -i "s/\PWD_IP_ADDRESS/$PWD_IP_ADDRESS/" /etc/docker/daemon.json && \ 43 | sed -i "s/\DOCKER_TLSENABLE/$DOCKER_TLSENABLE/" /etc/docker/daemon.json && \ 44 | sed -i "s/\DOCKER_TLSCACERT/$DOCKER_TLSCACERT/" /etc/docker/daemon.json && \ 45 | sed -i "s/\DOCKER_TLSCERT/$DOCKER_TLSCERT/" /etc/docker/daemon.json && \ 46 | sed -i "s/\DOCKER_TLSKEY/$DOCKER_TLSKEY/" /etc/docker/daemon.json && \ 47 | mount -t securityfs none /sys/kernel/security && \ 48 | mount --make-rshared / && \ 49 | #mount --make-rshared -t tmpfs tmpfs /run && \ 50 | #mount --make-rshared /var/lib/kubelet && \ 51 | #mount --make-rshared /var/lib/docker && \ 52 | dockerd > /docker.log 2>&1 & \ 53 | while true ; do script -q -c "/bin/bash -l" /dev/null ; done 54 | # ... and then put a shell in the foreground, restarting it if it exits 55 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/zmodem/zmodem.js: -------------------------------------------------------------------------------- 1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.zmodem = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i void = handleLink, options: ILinkMatcherOptions = {}): void {\n options.matchIndex = 1;\n term.registerLinkMatcher(strictUrlRegex, handler, options);\n}\n\nexport function apply(terminalConstructor: typeof Terminal): void {\n (terminalConstructor.prototype).webLinksInit = function (handler?: (event: MouseEvent, uri: string) => void, options?: ILinkMatcherOptions): void {\n webLinksInit(this, handler, options);\n };\n}\n",null],"names":[],"mappings":"ACAA;;;ADOA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAEA;AACA;AACA;AAQA;AAAA;AAAA;AACA;AACA;AACA;AAHA;AAKA;AACA;AACA;AACA;AACA;AAJA;"} -------------------------------------------------------------------------------- /dockerfiles/dind/ucp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | function wait_for_url { 6 | # Wait for docker daemon to be ready 7 | while ! curl -k -sS $1 > /dev/null; do 8 | sleep 1; 9 | done 10 | } 11 | 12 | function deploy_ucp { 13 | wait_for_url "https://localhost:2376" 14 | docker run --rm -i --name ucp \ 15 | -v /var/run/docker.sock:/var/run/docker.sock \ 16 | docker/ucp:3.2.3 install --debug --force-insecure-tcp --skip-cloud-provider-check \ 17 | --san *.direct.${PWD_HOST_FQDN} \ 18 | --license $(cat $HOME/workshop_beta.lic) \ 19 | --swarm-port 2375 \ 20 | --admin-username admin \ 21 | --admin-password admin1234 22 | 23 | rm $HOME/workshop_beta.lic 24 | echo "Finished deploying UCP" 25 | } 26 | 27 | function get_instance_ip { 28 | ip -o -4 a s eth1 | awk '{print $4}' | cut -d '/' -f1 29 | } 30 | 31 | function get_node_routable_ip { 32 | curl -sS https://${PWD_HOST_FQDN}/sessions/${SESSION_ID} | jq -r '.instances[] | select(.hostname == "'$1'") | .routable_ip' 33 | } 34 | 35 | function get_direct_url_from_ip { 36 | local ip_dash="${1//./-}" 37 | local url="https://ip${ip_dash}-${SESSION_ID}.direct.${PWD_HOST_FQDN}" 38 | echo $url 39 | } 40 | 41 | function deploy_dtr { 42 | if [ $# -lt 1 ]; then 43 | echo "DTR node hostname" 44 | return 45 | fi 46 | 47 | 48 | local dtr_ip=$(get_node_routable_ip $1) 49 | local ucp_ip=$(get_instance_ip) 50 | 51 | local dtr_url=$(get_direct_url_from_ip $dtr_ip) 52 | local ucp_url=$(get_direct_url_from_ip $ucp_ip) 53 | 54 | docker run -i --rm docker/dtr:2.7.3 install \ 55 | --dtr-external-url $dtr_url \ 56 | --ucp-node $1 \ 57 | --ucp-username admin \ 58 | --ucp-password admin1234 \ 59 | --ucp-insecure-tls \ 60 | --ucp-url $ucp_url 61 | } 62 | 63 | function setup_dtr_certs { 64 | if [ $# -lt 1 ]; then 65 | echo "DTR node hostname is missing" 66 | return 67 | fi 68 | 69 | 70 | local dtr_ip=$(get_node_routable_ip $1) 71 | local dtr_url=$(get_direct_url_from_ip $dtr_ip) 72 | local dtr_hostname="${dtr_url/https:\/\/}" 73 | 74 | wait_for_url "$dtr_url/ca" 75 | 76 | curl -kfsSL $dtr_url/ca -o /usr/local/share/ca-certificates/$dtr_hostname.crt 77 | update-ca-certificates 78 | } 79 | 80 | 81 | case "$1" in 82 | deploy) 83 | deploy_ucp 84 | deploy_dtr $2 85 | setup_dtr_certs $2 86 | ;; 87 | setup-certs) 88 | setup_dtr_certs $2 89 | ;; 90 | *) 91 | echo "Illegal option $1" 92 | ;; 93 | esac 94 | 95 | -------------------------------------------------------------------------------- /dockerfiles/dind/ucp-beta.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | function wait_for_url { 6 | # Wait for docker daemon to be ready 7 | while ! curl -k -sS $1 > /dev/null; do 8 | sleep 1; 9 | done 10 | } 11 | 12 | function deploy_ucp { 13 | wait_for_url "https://localhost:2376" 14 | 15 | docker config create com.docker.ucp.config $HOME/ucp-config.toml 16 | 17 | docker run --rm -i --name ucp \ 18 | -v /var/run/docker.sock:/var/run/docker.sock \ 19 | docker/ucp:3.2.3 install --debug --force-insecure-tcp --skip-cloud-provider-check \ 20 | --san *.direct.${PWD_HOST_FQDN} \ 21 | --license $(cat $HOME/workshop_beta.lic) \ 22 | --swarm-port 2375 \ 23 | --existing-config \ 24 | --admin-username admin \ 25 | --admin-password admin1234 26 | 27 | rm $HOME/workshop_beta.lic $HOME/ucp-config.toml 28 | echo "Finished deploying UCP" 29 | } 30 | 31 | function get_instance_ip { 32 | ip -o -4 a s eth1 | awk '{print $4}' | cut -d '/' -f1 33 | } 34 | 35 | function get_node_routable_ip { 36 | curl -sS https://${PWD_HOST_FQDN}/sessions/${SESSION_ID} | jq -r '.instances[] | select(.hostname == "'$1'") | .routable_ip' 37 | } 38 | 39 | function get_direct_url_from_ip { 40 | local ip_dash="${1//./-}" 41 | local url="https://ip${ip_dash}-${SESSION_ID}.direct.${PWD_HOST_FQDN}" 42 | echo $url 43 | } 44 | 45 | function deploy_dtr { 46 | if [ $# -lt 1 ]; then 47 | echo "DTR node hostname" 48 | return 49 | fi 50 | 51 | 52 | local dtr_ip=$(get_node_routable_ip $1) 53 | local ucp_ip=$(get_instance_ip) 54 | 55 | local dtr_url=$(get_direct_url_from_ip $dtr_ip) 56 | local ucp_url=$(get_direct_url_from_ip $ucp_ip) 57 | 58 | docker run -i --rm docker/dtr:2.7.3 install \ 59 | --dtr-external-url $dtr_url \ 60 | --ucp-node $1 \ 61 | --ucp-username admin \ 62 | --ucp-password admin1234 \ 63 | --ucp-insecure-tls \ 64 | --ucp-url $ucp_url 65 | } 66 | 67 | function setup_dtr_certs { 68 | if [ $# -lt 1 ]; then 69 | echo "DTR node hostname is missing" 70 | return 71 | fi 72 | 73 | 74 | local dtr_ip=$(get_node_routable_ip $1) 75 | local dtr_url=$(get_direct_url_from_ip $dtr_ip) 76 | local dtr_hostname="${dtr_url/https:\/\/}" 77 | 78 | wait_for_url "$dtr_url/ca" 79 | 80 | curl -kfsSL $dtr_url/ca -o /usr/local/share/ca-certificates/$dtr_hostname.crt 81 | update-ca-certificates 82 | } 83 | 84 | 85 | case "$1" in 86 | deploy) 87 | deploy_ucp 88 | deploy_dtr $2 89 | setup_dtr_certs $2 90 | ;; 91 | setup-certs) 92 | setup_dtr_certs $2 93 | ;; 94 | *) 95 | echo "Illegal option $1" 96 | ;; 97 | esac 98 | 99 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/webLinks/webLinks.js: -------------------------------------------------------------------------------- 1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.webLinks = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i /etc/nsswitch.conf 11 | 12 | ENV DOCKER_CHANNEL stable 13 | ENV DOCKER_VERSION 19.03.7 14 | # TODO ENV DOCKER_SHA256 15 | # https://github.com/docker/docker-ce/blob/5b073ee2cf564edee5adca05eee574142f7627bb/components/packaging/static/hash_files !! 16 | # (no SHA file artifacts on download.docker.com yet as of 2017-06-07 though) 17 | 18 | RUN set -eux; \ 19 | \ 20 | # this "case" statement is generated via "update.sh" 21 | dpkgArch=$(dpkg --print-architecture); \ 22 | case "$dpkgArch" in \ 23 | # amd64 24 | amd64) dockerArch='x86_64' ;; \ 25 | # arm32v6 26 | armhf) dockerArch='armel' ;; \ 27 | # arm32v7 28 | armv7) dockerArch='armhf' ;; \ 29 | # arm64v8 30 | aarch64) dockerArch='aarch64' ;; \ 31 | *) echo >&2 "error: unsupported architecture ($dpkgArch)"; exit 1 ;;\ 32 | esac; \ 33 | \ 34 | if ! wget -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${dockerArch}/docker-${DOCKER_VERSION}.tgz"; then \ 35 | echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${dockerArch}'"; \ 36 | exit 1; \ 37 | fi; \ 38 | \ 39 | tar --extract \ 40 | --file docker.tgz \ 41 | --strip-components 1 \ 42 | --directory /usr/local/bin/ \ 43 | ; \ 44 | rm docker.tgz; \ 45 | \ 46 | dockerd --version; \ 47 | docker --version 48 | 49 | COPY modprobe.sh /usr/local/bin/modprobe 50 | COPY docker-entrypoint.sh /usr/local/bin/ 51 | 52 | # https://github.com/docker-library/docker/pull/166 53 | # dockerd-entrypoint.sh uses DOCKER_TLS_CERTDIR for auto-generating TLS certificates 54 | # docker-entrypoint.sh uses DOCKER_TLS_CERTDIR for auto-setting DOCKER_TLS_VERIFY and DOCKER_CERT_PATH 55 | # (For this to work, at least the "client" subdirectory of this path needs to be shared between the client and server containers via a volume, "docker cp", or other means of data sharing.) 56 | ENV DOCKER_TLS_CERTDIR=/certs 57 | # also, ensure the directory pre-exists and has wide enough permissions for "dockerd-entrypoint.sh" to create subdirectories, even when run in "rootless" mode 58 | RUN mkdir /certs /certs/client && chmod 1777 /certs /certs/client 59 | # (doing both /certs and /certs/client so that if Docker does a "copy-up" into a volume defined on /certs/client, it will "do the right thing" by default in a way that still works for rootless users) 60 | 61 | ENTRYPOINT ["docker-entrypoint.sh"] 62 | CMD ["sh"] 63 | -------------------------------------------------------------------------------- /dockerfiles/dind/workshop.lic: -------------------------------------------------------------------------------- 1 | {"key_id":"B3T_Uirjs-tpcGd4Tql8HL--kDo1iTOUaVUFNMhEXM1Z","private_key":"RbtCEoNZ4OBu-yIHNM1mGCJ6R_4SxF-ThghAd-I3b6_N","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPUzB3TkMweU5GUXhPRG93TkRvek5Gb2lMQ0owYjJ0bGJpSTZJbU16U1VnMllWSjFWak00WjBWSVIwWXRVV1l0ZGxGM2MwMHdlR05vYnpoWE4xSklPRzFLYVRaT1VUUTlJaXdpYldGNFJXNW5hVzVsY3lJNk1UQXNJbk5qWVc1dWFXNW5SVzVoWW14bFpDSTZkSEoxWlN3aWJHbGpaVzV6WlZSNWNHVWlPaUpQWm1ac2FXNWxJaXdpZEdsbGNpSTZJbEJ5YjJSMVkzUnBiMjRpZlEiLAogICAic2lnbmF0dXJlcyI6IFsKICAgICAgewogICAgICAgICAiaGVhZGVyIjogewogICAgICAgICAgICAiandrIjogewogICAgICAgICAgICAgICAiZSI6ICJBUUFCIiwKICAgICAgICAgICAgICAgImtleUlEIjogIko3TEQ6NjdWUjpMNUhaOlU3QkE6Mk80Rzo0QUwzOk9GMk46SkhHQjpFRlRIOjVDVlE6TUZFTzpBRUlUIiwKICAgICAgICAgICAgICAgImtpZCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJrdHkiOiAiUlNBIiwKICAgICAgICAgICAgICAgIm4iOiAieWRJeS1sVTdvN1BjZVktNC1zLUNRNU9FZ0N5RjhDeEljUUlXdUs4NHBJaVpjaVk2NzMweUNZbndMU0tUbHctVTZVQ19RUmVXUmlvTU5ORTVEczVUWUVYYkdHNm9sbTJxZFdiQndjQ2ctMlVVSF9PY0I5V3VQNmdSUEhwTUZNc3hEeld3dmF5OEpVdUhnWVVMVXBtMUl2LW1xN2xwNW5RX1J4clQwS1pSQVFUWUxFTUVmR3dtM2hNT19nZUxQUy1oZ0tQdElIbGtnNl9XY294VEdvS1A3OWRfd2FIWXhHTmw3V2hTbmVpQlN4YnBiUUFLazIxbGc3OThYYjd2WnlFQVRETXJSUjlNZUU2QWRqNUhKcFkzQ295UkFQQ21hS0dSQ0s0dW9aU29JdTBoRlZsS1VQeWJidzAwMEdPLXdhMktOOFV3Z0lJbTBpNUkxdVc5R2txNHpqQnk1emhncXVVWGJHOWJXUEFPWXJxNVFhODFEeEdjQmxKeUhZQXAtRERQRTlUR2c0elltWGpKbnhacUhFZHVHcWRldlo4WE1JMHVrZmtHSUkxNHdVT2lNSUlJclhsRWNCZl80Nkk4Z1FXRHp4eWNaZV9KR1gtTEF1YXlYcnlyVUZlaFZOVWRaVWw5d1hOYUpCLWthQ3F6NVF3YVI5M3NHdy1RU2Z0RDBOdkxlN0N5T0gtRTZ2ZzZTdF9OZVR2Z3Y4WW5oQ2lYSWxaOEhPZkl3TmU3dEVGX1VjejVPYlB5a20zdHlsck5VanQwVnlBbXR0YWNWSTJpR2loY1VQcm1rNGxWSVo3VkRfTFNXLWk3eW9TdXJ0cHNQWGNlMnBLRElvMzBsSkdoT18zS1VtbDJTVVpDcXpKMXlFbUtweXNINUhEVzljc0lGQ0EzZGVBamZaVXZON1UiCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgICJhbGciOiAiUlMyNTYiCiAgICAgICAgIH0sCiAgICAgICAgICJzaWduYXR1cmUiOiAid2xrQUhLd0l1TUs5Y0N3YUdINVB1MW50dGNkLVk0SkNsRnpLeTZtcmJlTzR3eXFOenpwUi16TG4tMlhsYnJGZTdlczYtSklSREhmNzBGR3JRZl9MZEc3QVQ4bC1HRXVoUk1SaF8xVTlXd1BkOGdsWnNFem44VFUyeGtzU3lkWEw2WER3TUlqMHJUMFdpQm43T29YcEc0ZGJrUGgwLWxfY1VKQnphQzlwbEZ3ZXdmdF9Ocl80a0FpcUlNa3FJZHdQaU5XOVc4NERPUFdpZ2FrcTZTTnRtMVpqT2E1UG1ldHUydk1iUGpnTzFZM19tUVFsUldpakRwRUR1Rzl1dl9yNDFsN1I2LTdKQTB6SGpvdVVqdkxDREdIMUQ3eUxnR1RFMTlXN2FMRHI4ZE5FeXBjdi1vQzVmb3pqM19ISjUyWXVDS0RnazJzb3Y5YVFYOHhNTW5DWGU4Y3JIYjlEVG05eVcyd09FN0kxYVZLYXRKbjZrSGprM1FSWGVNbnRNQnJ6TGlzanBjZnlBYzdGNlc1YnBTSUtXaUQtd2o5QTRlY0FPbFNxc0NBS3lkaWxnR2lqQTNPY1dOZHhvV0NhV1MzaXFvakFBTE1JNHlsOFlpdG50ckVMVFNuUDFFS08wTGFaaTJxVURfU0lBSmFOUlRPTVIzblRqQUNwd1ZwYXAyU3lkOEZwc1pFVllTZFJVLWJVZDJybmN1ZHZfcC1XdFZpYWVsQ3BvTWstdURzWGhud2JyWFB6Y3dkVHVobmg0V2kxMmRTcjRUQ3ZMRktSMklCaklwam1VZWt4MFBTazlKUkNXc2R2bjY0dElCZnV6dVZSRkVkSVBidnBZd2pWOUZZc19VQWJvVE85a2E1OWZmNm1zOThHYXVTbE9sYnkwSWE0TlBxTTRKY2ZvSFUiLAogICAgICAgICAicHJvdGVjdGVkIjogImV5Sm1iM0p0WVhSTVpXNW5kR2dpT2pFM05Dd2labTl5YldGMFZHRnBiQ0k2SW1aUklpd2lkR2x0WlNJNklqSXdNVGd0TURVdE1UWlVNREU2TURNNk1qTmFJbjAiCiAgICAgIH0KICAgXQp9"} -------------------------------------------------------------------------------- /www/assets/xterm/addons/fit/fit.js.map: -------------------------------------------------------------------------------- 1 | {"version":3,"file":"fit.js","sources":["../../../src/addons/fit/fit.ts","../../../node_modules/browser-pack/_prelude.js"],"sourcesContent":["/**\n * Copyright (c) 2014 The xterm.js authors. All rights reserved.\n * @license MIT\n *\n * Fit terminal columns and rows to the dimensions of its DOM element.\n *\n * ## Approach\n *\n * Rows: Truncate the division of the terminal parent element height by the\n * terminal row height.\n * Columns: Truncate the division of the terminal parent element width by the\n * terminal character width (apply display: inline at the terminal\n * row and truncate its width with the current number of columns).\n */\n\nimport { Terminal } from 'xterm';\n\nexport interface IGeometry {\n rows: number;\n cols: number;\n}\n\nexport function proposeGeometry(term: Terminal): IGeometry {\n if (!term.element.parentElement) {\n return null;\n }\n const parentElementStyle = window.getComputedStyle(term.element.parentElement);\n const parentElementHeight = parseInt(parentElementStyle.getPropertyValue('height'));\n const parentElementWidth = Math.max(0, parseInt(parentElementStyle.getPropertyValue('width')));\n const elementStyle = window.getComputedStyle(term.element);\n const elementPadding = {\n top: parseInt(elementStyle.getPropertyValue('padding-top')),\n bottom: parseInt(elementStyle.getPropertyValue('padding-bottom')),\n right: parseInt(elementStyle.getPropertyValue('padding-right')),\n left: parseInt(elementStyle.getPropertyValue('padding-left'))\n };\n const elementPaddingVer = elementPadding.top + elementPadding.bottom;\n const elementPaddingHor = elementPadding.right + elementPadding.left;\n const availableHeight = parentElementHeight - elementPaddingVer;\n const availableWidth = parentElementWidth - elementPaddingHor - (term)._core.viewport.scrollBarWidth;\n const geometry = {\n cols: Math.floor(availableWidth / (term)._core._renderCoordinator.dimensions.actualCellWidth),\n rows: Math.floor(availableHeight / (term)._core._renderCoordinator.dimensions.actualCellHeight)\n };\n return geometry;\n}\n\nexport function fit(term: Terminal): void {\n const geometry = proposeGeometry(term);\n if (geometry) {\n // Force a full render\n if (term.rows !== geometry.rows || term.cols !== geometry.cols) {\n (term)._core._renderCoordinator.clear();\n term.resize(geometry.cols, geometry.rows);\n }\n }\n}\n\nexport function apply(terminalConstructor: typeof Terminal): void {\n (terminalConstructor.prototype).proposeGeometry = function (): IGeometry {\n return proposeGeometry(this);\n };\n\n (terminalConstructor.prototype).fit = function (): void {\n fit(this);\n };\n}\n",null],"names":[],"mappings":"ACAA;;;ADsBA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAvBA;AAyBA;AACA;AACA;AAEA;AACA;AACA;AACA;AACA;AACA;AATA;AAWA;AACA;AACA;AACA;AAEA;AACA;AACA;AACA;AARA;"} -------------------------------------------------------------------------------- /docker/local_cached_factory.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "sync" 8 | "time" 9 | 10 | client "github.com/docker/docker/client" 11 | "github.com/play-with-docker/play-with-docker/pwd/types" 12 | "github.com/play-with-docker/play-with-docker/storage" 13 | ) 14 | 15 | type localCachedFactory struct { 16 | rw sync.Mutex 17 | irw sync.Mutex 18 | sessionClient DockerApi 19 | instanceClients map[string]*instanceEntry 20 | storage storage.StorageApi 21 | } 22 | 23 | type instanceEntry struct { 24 | rw sync.Mutex 25 | client DockerApi 26 | } 27 | 28 | func (f *localCachedFactory) GetForSession(session *types.Session) (DockerApi, error) { 29 | f.rw.Lock() 30 | defer f.rw.Unlock() 31 | 32 | if f.sessionClient != nil { 33 | if err := f.check(f.sessionClient.GetClient()); err == nil { 34 | return f.sessionClient, nil 35 | } else { 36 | f.sessionClient.GetClient().Close() 37 | } 38 | } 39 | 40 | c, err := client.NewClientWithOpts() 41 | if err != nil { 42 | return nil, err 43 | } 44 | err = f.check(c) 45 | if err != nil { 46 | return nil, err 47 | } 48 | d := NewDocker(c) 49 | f.sessionClient = d 50 | return f.sessionClient, nil 51 | } 52 | 53 | func (f *localCachedFactory) GetForInstance(instance *types.Instance) (DockerApi, error) { 54 | key := instance.Name 55 | 56 | f.irw.Lock() 57 | c, found := f.instanceClients[key] 58 | if !found { 59 | c := &instanceEntry{} 60 | f.instanceClients[key] = c 61 | } 62 | c = f.instanceClients[key] 63 | f.irw.Unlock() 64 | 65 | c.rw.Lock() 66 | defer c.rw.Unlock() 67 | 68 | if c.client != nil { 69 | if err := f.check(c.client.GetClient()); err == nil { 70 | return c.client, nil 71 | } else { 72 | c.client.GetClient().Close() 73 | } 74 | } 75 | 76 | dc, err := NewClient(instance, "l2:443") 77 | if err != nil { 78 | return nil, err 79 | } 80 | err = f.check(dc) 81 | if err != nil { 82 | return nil, err 83 | } 84 | dockerClient := NewDocker(dc) 85 | c.client = dockerClient 86 | 87 | return dockerClient, nil 88 | } 89 | 90 | func (f *localCachedFactory) check(c *client.Client) error { 91 | ok := false 92 | for i := 0; i < 5; i++ { 93 | _, err := c.Ping(context.Background()) 94 | if err != nil { 95 | log.Printf("Connection to [%s] has failed, maybe instance is not ready yet, sleeping and retrying in 1 second. Try #%d. Got: %v\n", c.DaemonHost(), i+1, err) 96 | time.Sleep(time.Second) 97 | continue 98 | } 99 | ok = true 100 | break 101 | } 102 | if !ok { 103 | return fmt.Errorf("Connection to docker daemon was not established.") 104 | } 105 | return nil 106 | } 107 | 108 | func NewLocalCachedFactory(s storage.StorageApi) *localCachedFactory { 109 | return &localCachedFactory{ 110 | instanceClients: make(map[string]*instanceEntry), 111 | storage: s, 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /dockerfiles/dind/Dockerfile.base-gpu: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 2 | 3 | RUN apt-get update && \ 4 | apt-get -y install ca-certificates wget 5 | 6 | # set up nsswitch.conf for Go's "netgo" implementation (which Docker explicitly uses) 7 | # - https://github.com/docker/docker-ce/blob/v17.09.0-ce/components/engine/hack/make.sh#L149 8 | # - https://github.com/golang/go/blob/go1.9.1/src/net/conf.go#L194-L275 9 | # - docker run --rm debian:stretch grep '^hosts:' /etc/nsswitch.conf 10 | RUN echo 'hosts: files dns' > /etc/nsswitch.conf 11 | 12 | ENV DOCKER_CHANNEL stable 13 | ENV DOCKER_VERSION 19.03.7 14 | # TODO ENV DOCKER_SHA256 15 | # https://github.com/docker/docker-ce/blob/5b073ee2cf564edee5adca05eee574142f7627bb/components/packaging/static/hash_files !! 16 | # (no SHA file artifacts on download.docker.com yet as of 2017-06-07 though) 17 | 18 | RUN set -eux; \ 19 | \ 20 | # this "case" statement is generated via "update.sh" 21 | dpkgArch=$(dpkg --print-architecture); \ 22 | case "$dpkgArch" in \ 23 | # amd64 24 | amd64) dockerArch='x86_64' ;; \ 25 | # arm32v6 26 | armhf) dockerArch='armel' ;; \ 27 | # arm32v7 28 | armv7) dockerArch='armhf' ;; \ 29 | # arm64v8 30 | aarch64) dockerArch='aarch64' ;; \ 31 | *) echo >&2 "error: unsupported architecture ($dpkgArch)"; exit 1 ;;\ 32 | esac; \ 33 | \ 34 | if ! wget -O docker.tgz "https://download.docker.com/linux/static/${DOCKER_CHANNEL}/${dockerArch}/docker-${DOCKER_VERSION}.tgz"; then \ 35 | echo >&2 "error: failed to download 'docker-${DOCKER_VERSION}' from '${DOCKER_CHANNEL}' for '${dockerArch}'"; \ 36 | exit 1; \ 37 | fi; \ 38 | \ 39 | tar --extract \ 40 | --file docker.tgz \ 41 | --strip-components 1 \ 42 | --directory /usr/local/bin/ \ 43 | ; \ 44 | rm docker.tgz; \ 45 | \ 46 | dockerd --version; \ 47 | docker --version 48 | 49 | COPY modprobe.sh /usr/local/bin/modprobe 50 | COPY docker-entrypoint.sh /usr/local/bin/ 51 | 52 | # https://github.com/docker-library/docker/pull/166 53 | # dockerd-entrypoint.sh uses DOCKER_TLS_CERTDIR for auto-generating TLS certificates 54 | # docker-entrypoint.sh uses DOCKER_TLS_CERTDIR for auto-setting DOCKER_TLS_VERIFY and DOCKER_CERT_PATH 55 | # (For this to work, at least the "client" subdirectory of this path needs to be shared between the client and server containers via a volume, "docker cp", or other means of data sharing.) 56 | ENV DOCKER_TLS_CERTDIR=/certs 57 | # also, ensure the directory pre-exists and has wide enough permissions for "dockerd-entrypoint.sh" to create subdirectories, even when run in "rootless" mode 58 | RUN mkdir /certs /certs/client && chmod 1777 /certs /certs/client 59 | # (doing both /certs and /certs/client so that if Docker does a "copy-up" into a volume defined on /certs/client, it will "do the right thing" by default in a way that still works for rootless users) 60 | 61 | ENTRYPOINT ["docker-entrypoint.sh"] 62 | CMD ["sh"] 63 | -------------------------------------------------------------------------------- /README.md.orig: -------------------------------------------------------------------------------- 1 | # Overview 2 | [FreeCompilerCamp](http://freecompilercamp.org) (currently alive) is a free and open online training platform aimed to automate the training of compiler developers. Our platform allows anyone who is interested in developing compilers to learn the necessary skills for free. The platform is built on top of Play-With-Docker, a docker playground for users to conduct experiments in a sandbox. We welcome anyone to try out our system, give us feedback, contribute new training courses, or enhance the training platform to make it an effective learning resource for the compiler community. 3 | 4 | While this platform can be used to host any compiler tutorials, we specially collect some tutorials for OpenMP compilers. We have created some initial tutorials to train users to learn how to use the ROSE or Clang/LLVM compiler to support OpenMP. 5 | 6 | # Project Layout 7 | Goal: an open, extensive online platform to automatically train and certify compiler researchers and developers. 8 | * compiler-classroom: the website's text content, hosted using gitpages at https://github.com/freeCompilerCamp/freecompilercamp.github.io 9 | * play-with-compiler: the online sandbox based on play-with-docker, the content of this repo 10 | * dockerfiles/dind/ : a directory contains docker files for our online sandboxed terminal 11 | * Dockerfile.base - This is the base docker file which loads the ubuntu environment. 12 | * Dockerfile.middle - In this docker file we add the user and user group required for the docker environment. This file is dependent upon fcc_docker:0.1, which is an image built from Dockerfile.base. 13 | * Dockerfile.dind - This is the main docker file. This is the file where we up setup our environment, add required tools, like LLVM and ROSE, etc. This file is dependent upon fcc_dind:0.1, which is an image buikt from Dockerfile.middle. 14 | 15 | # Installation 16 | You can install your own instance of this website. Please follow instructions at 17 | * https://github.com/chunhualiao/freeCompilerCamp/wiki/Deploy-FreeCC-to-AWS 18 | 19 | # Contact Us 20 | This work was performed under the auspices of the U.S. Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344, and partially supported by the U.S. Dept. of Energy, Office of Science, ASCR SC-21), under contract DE-AC02-06CH11357. 21 | 22 | This website is still under development (LLNL-WEB-789932). For questions and comments, please file issue tickets to this git repo. Alternatively, you can contact liao6@llnl.gov . 23 | 24 | # Publication 25 | List 26 | * Anjia Wang, Alok Mishra, Chunhua Liao, Yonghong Yan, Barbara Chapman, FreeCompilerCamp.org: Training for OpenMP Compiler Development from Cloud, Sixth SC Workshop on Best Practices for HPC Training and Education: BPHTE19, 2019 27 | * Alok Mishra, Anjia Wang, Chunhua Liao, Yonghong Yan, Barbara Chapman, FreeCompilerCamp: Online Training for Extending Compilers, SC'19 Research Poster submission, accepted (also selected as a Best Poster nominee). 28 | -------------------------------------------------------------------------------- /dockerfiles/dind/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VERSION=docker:stable-dind 2 | FROM ${VERSION} 3 | 4 | RUN apk add --no-cache py-pip python3-dev libffi-dev openssl-dev git tmux apache2-utils vim build-base gettext-dev curl bash-completion bash util-linux jq openssh openssl tree \ 5 | && ln -s /usr/local/bin/docker /usr/bin/docker 6 | 7 | ENV GOPATH /root/go 8 | ENV PATH $PATH:$GOPATH 9 | 10 | 11 | ENV DOCKER_TLS_CERTDIR="" 12 | ENV DOCKER_CLI_EXPERIMENTAL=enabled 13 | ENV DOCKERAPP_VERSION=v0.9.1-beta3 14 | ENV COMPOSE_VERSION=1.26.0 15 | 16 | RUN pip install docker-compose==${COMPOSE_VERSION} 17 | 18 | RUN curl -fsSL --output /tmp/docker-app-linux.tar.gz https://github.com/docker/app/releases/download/${DOCKERAPP_VERSION}/docker-app-linux.tar.gz \ 19 | && tar xf "/tmp/docker-app-linux.tar.gz" -C /tmp/ && mkdir -p /root/.docker/cli-plugins && mv /tmp/docker-app-plugin-linux /root/.docker/cli-plugins/docker-app && rm /tmp/docker-app* 20 | 21 | # Add bash completion and set bash as default shell 22 | RUN mkdir /etc/bash_completion.d \ 23 | && curl https://raw.githubusercontent.com/docker/cli/master/contrib/completion/bash/docker -o /etc/bash_completion.d/docker \ 24 | && sed -i "s/ash/bash/" /etc/passwd 25 | 26 | # Replace modprobe with a no-op to get rid of spurious warnings 27 | # (note: we can't just symlink to /bin/true because it might be busybox) 28 | RUN rm /sbin/modprobe && echo '#!/bin/true' >/sbin/modprobe && chmod +x /sbin/modprobe 29 | 30 | # Install a nice vimrc file and prompt (by soulshake) 31 | COPY ["docker-prompt", "sudo", "/usr/local/bin/"] 32 | COPY [".vimrc", ".profile", ".inputrc", ".gitconfig", "./root/"] 33 | COPY ["motd", "/etc/motd"] 34 | COPY ["daemon.json", "/etc/docker/"] 35 | 36 | 37 | # Move to our home 38 | WORKDIR /root 39 | 40 | # Setup certs and ssh keys 41 | RUN mkdir -p /var/run/pwd/certs && mkdir -p /var/run/pwd/uploads \ 42 | && ssh-keygen -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key >/dev/null \ 43 | && mkdir ~/.ssh && ssh-keygen -N "" -t rsa -f ~/.ssh/id_rsa \ 44 | && cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys 45 | 46 | # Remove IPv6 alias for localhost and start docker in the background ... 47 | CMD cat /etc/hosts >/etc/hosts.bak && \ 48 | sed 's/^::1.*//' /etc/hosts.bak > /etc/hosts && \ 49 | sed -i "s/\PWD_IP_ADDRESS/$PWD_IP_ADDRESS/" /etc/docker/daemon.json && \ 50 | sed -i "s/\DOCKER_TLSENABLE/$DOCKER_TLSENABLE/" /etc/docker/daemon.json && \ 51 | sed -i "s/\DOCKER_TLSCACERT/$DOCKER_TLSCACERT/" /etc/docker/daemon.json && \ 52 | sed -i "s/\DOCKER_TLSCERT/$DOCKER_TLSCERT/" /etc/docker/daemon.json && \ 53 | sed -i "s/\DOCKER_TLSKEY/$DOCKER_TLSKEY/" /etc/docker/daemon.json && \ 54 | mount -t securityfs none /sys/kernel/security && \ 55 | echo "root:root" | chpasswd &> /dev/null && \ 56 | /usr/sbin/sshd -o PermitRootLogin=yes -o PrintMotd=no 2>/dev/null && \ 57 | dockerd &>/docker.log & \ 58 | while true ; do script -q -c "/bin/bash -l" /dev/null ; done 59 | # ... and then put a shell in the foreground, restarting it if it exits 60 | -------------------------------------------------------------------------------- /handlers/playground.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/play-with-docker/play-with-docker/config" 11 | "github.com/play-with-docker/play-with-docker/pwd/types" 12 | ) 13 | 14 | func NewPlayground(rw http.ResponseWriter, req *http.Request) { 15 | if !ValidateToken(req) { 16 | rw.WriteHeader(http.StatusForbidden) 17 | return 18 | } 19 | 20 | var playground types.Playground 21 | 22 | err := json.NewDecoder(req.Body).Decode(&playground) 23 | if err != nil { 24 | rw.WriteHeader(http.StatusBadRequest) 25 | fmt.Fprintf(rw, "Error creating playground. Got: %v", err) 26 | return 27 | } 28 | 29 | newPlayground, err := core.PlaygroundNew(playground) 30 | if err != nil { 31 | rw.WriteHeader(http.StatusBadRequest) 32 | fmt.Fprintf(rw, "Error creating playground. Got: %v", err) 33 | return 34 | } 35 | 36 | json.NewEncoder(rw).Encode(newPlayground) 37 | } 38 | 39 | func ListPlaygrounds(rw http.ResponseWriter, req *http.Request) { 40 | if !ValidateToken(req) { 41 | rw.WriteHeader(http.StatusForbidden) 42 | return 43 | } 44 | 45 | playgrounds, err := core.PlaygroundList() 46 | if err != nil { 47 | log.Printf("Error listing playgrounds. Got: %v\n", err) 48 | rw.WriteHeader(http.StatusInternalServerError) 49 | return 50 | } 51 | 52 | json.NewEncoder(rw).Encode(playgrounds) 53 | } 54 | 55 | type PlaygroundConfigurationResponse struct { 56 | Id string `json:"id"` 57 | Domain string `json:"domain"` 58 | DefaultDinDInstanceImage string `json:"default_dind_instance_image"` 59 | AvailableDinDInstanceImages []string `json:"available_dind_instance_images"` 60 | AllowWindowsInstances bool `json:"allow_windows_instances"` 61 | DefaultSessionDuration time.Duration `json:"default_session_duration"` 62 | DindVolumeSize string `json:"dind_volume_size"` 63 | } 64 | 65 | func GetCurrentPlayground(rw http.ResponseWriter, req *http.Request) { 66 | playground := core.PlaygroundFindByDomain(req.Host) 67 | if playground == nil { 68 | log.Printf("Playground for domain %s was not found!", req.Host) 69 | rw.WriteHeader(http.StatusBadRequest) 70 | return 71 | } 72 | json.NewEncoder(rw).Encode(PlaygroundConfigurationResponse{ 73 | Id: playground.Id, 74 | Domain: playground.Domain, 75 | DefaultDinDInstanceImage: playground.DefaultDinDInstanceImage, 76 | AvailableDinDInstanceImages: playground.AvailableDinDInstanceImages, 77 | AllowWindowsInstances: playground.AllowWindowsInstances, 78 | DefaultSessionDuration: playground.DefaultSessionDuration, 79 | DindVolumeSize: playground.DindVolumeSize, 80 | }) 81 | } 82 | 83 | func ValidateToken(req *http.Request) bool { 84 | _, password, ok := req.BasicAuth() 85 | if !ok { 86 | return false 87 | } 88 | 89 | if password != config.AdminToken { 90 | return false 91 | } 92 | 93 | return true 94 | } 95 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/fit/fit.js: -------------------------------------------------------------------------------- 1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.fit = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i, )` - creates a Zmodem.Sentry\n * on the passed WebSocket object. The Object passed is optional and\n * can contain:\n * - noTerminalWriteOutsideSession: Suppress writes from the Sentry\n * object to the Terminal while there is no active Session. This\n * is necessary for compatibility with, for example, the\n * `attach.js` addon.\n *\n * - event `zmodemDetect` - fired on Zmodem.Sentry’s `on_detect` callback.\n * Passes the zmodem.js Detection object.\n *\n * - event `zmodemRetract` - fired on Zmodem.Sentry’s `on_retract` callback.\n *\n * You’ll need to provide logic to handle uploads and downloads.\n * See zmodem.js’s documentation for more details.\n *\n * **IMPORTANT:** After you confirm() a zmodem.js Detection, if you have\n * used the `attach` or `terminado` addons, you’ll need to suspend their\n * operation for the duration of the ZMODEM session. (The demo does this\n * via `detach()` and a re-`attach()`.)\n */\n\nlet zmodem: any;\n\nexport interface IZmodemOptions {\n noTerminalWriteOutsideSession?: boolean;\n}\n\nfunction zmodemAttach(ws: WebSocket, opts: IZmodemOptions = {}): void {\n const term = this;\n const senderFunc = (octets: ArrayLike) => ws.send(new Uint8Array(octets));\n\n let zsentry: any;\n\n function shouldWrite(): boolean {\n return !!zsentry.get_confirmed_session() || !opts.noTerminalWriteOutsideSession;\n }\n\n zsentry = new zmodem.Sentry({\n to_terminal: (octets: ArrayLike) => {\n if (shouldWrite()) {\n term.write(\n String.fromCharCode.apply(String, octets)\n );\n }\n },\n sender: senderFunc,\n on_retract: () => (term).emit('zmodemRetract'),\n on_detect: (detection: any) => (term).emit('zmodemDetect', detection)\n });\n\n function handleWSMessage(evt: MessageEvent): void {\n\n // In testing with xterm.js’s demo the first message was\n // always text even if the rest were binary. While that\n // may be specific to xterm.js’s demo, ultimately we\n // should reject anything that isn’t binary.\n if (typeof evt.data === 'string') {\n if (shouldWrite()) {\n term.write(evt.data);\n }\n }\n else {\n zsentry.consume(evt.data);\n }\n }\n\n ws.binaryType = 'arraybuffer';\n ws.addEventListener('message', handleWSMessage);\n}\n\nexport function apply(terminalConstructor: typeof Terminal): void {\n zmodem = (typeof window === 'object') ? (window).Zmodem : {Browser: null}; // Nullify browser for tests\n\n (terminalConstructor.prototype).zmodemAttach = zmodemAttach;\n (terminalConstructor.prototype).zmodemBrowser = zmodem.Browser;\n}\n",null],"names":[],"mappings":"ACAA;;;ADoCA;AAMA;AAAA;AACA;AACA;AAEA;AAEA;AACA;AACA;AAEA;AACA;AACA;AACA;AAGA;AACA;AACA;AACA;AACA;AACA;AAEA;AAMA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAEA;AACA;AACA;AAEA;AACA;AAEA;AACA;AACA;AALA;"} -------------------------------------------------------------------------------- /handlers/exam_run.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Specialized version of exec that is intended to run uploaded code. 3 | * It does the following: 4 | * - Checks to make sure that the exam has already been uploaded AND compiled. 5 | * - Runs the make check target in the corresponding Makefile. 6 | */ 7 | 8 | package handlers 9 | 10 | import ( 11 | "strings" 12 | "io" 13 | "log" 14 | "net/http" 15 | "encoding/json" 16 | "fmt" 17 | 18 | "github.com/gorilla/mux" 19 | "github.com/play-with-docker/play-with-docker/storage" 20 | ) 21 | 22 | func ExamRun(rw http.ResponseWriter, req *http.Request) { 23 | vars := mux.Vars(req) 24 | sessionId := vars["sessionId"] 25 | instanceName := vars["instanceName"] 26 | 27 | s, err := core.SessionGet(sessionId) 28 | if err == storage.NotFoundError { 29 | rw.WriteHeader(http.StatusNotFound) 30 | return 31 | } else if err != nil { 32 | rw.WriteHeader(http.StatusInternalServerError) 33 | return 34 | } 35 | i := core.InstanceGet(s, instanceName) 36 | 37 | examName := req.URL.Query().Get("examname") 38 | 39 | // Step 1: Make sure the uploaded code has been compiled. 40 | var lsCmd = fmt.Sprintf( 41 | `{ "command": ["ls", "exams/%s/%s"] }`, 42 | examName, examName + "_submission") 43 | 44 | var er1 execRequest // from exec.go handler 45 | 46 | err = json.NewDecoder(strings.NewReader(lsCmd)).Decode(&er1) 47 | if err != nil { 48 | log.Fatal(err) 49 | rw.WriteHeader(http.StatusBadRequest) 50 | return 51 | } 52 | 53 | cmdout, err := core.InstanceExecOutput(i, er1.Cmd) 54 | 55 | if err != nil { 56 | log.Printf("Error executing command; error: %s, cmdout: %s", err, cmdout) 57 | rw.WriteHeader(http.StatusInternalServerError) 58 | return 59 | } 60 | 61 | buf := new(strings.Builder) 62 | io.Copy(buf, cmdout) 63 | 64 | // If ls returns "No such file or directory" for the test name executable, 65 | // then it has not been successfully compiled. Respond with 404 NOT FOUND. 66 | // Otherwise, we can continue to step 2. 67 | if strings.Contains(buf.String(), "No such file or directory") { 68 | rw.WriteHeader(http.StatusNotFound) 69 | return 70 | } 71 | 72 | // Step 2: Run the make check target 73 | // The -s flag could be used instead of --no-print-directory, but -s strips a lot more. 74 | var makeCheckCmd = fmt.Sprintf(`{ "command": ["make", "check", "--no-print-directory", "-C", "%s"] }`, "exams/" + examName) 75 | 76 | var er2 execRequest // from exec.go handler 77 | 78 | err = json.NewDecoder(strings.NewReader(makeCheckCmd)).Decode(&er2) 79 | if err != nil { 80 | log.Fatal(err) 81 | rw.WriteHeader(http.StatusInternalServerError) 82 | return 83 | } 84 | 85 | // Step 2.5: Check if make compilation was successful 86 | 87 | cmdout2, err := core.InstanceExecOutput(i, er2.Cmd) 88 | 89 | if err != nil { 90 | log.Printf("Error executing command; error: %s, cmdout: %s", err, cmdout) 91 | rw.WriteHeader(http.StatusInternalServerError) 92 | return 93 | } 94 | 95 | rw.Header().Set("content-type", "text/html") 96 | 97 | if _,err = io.Copy(rw, cmdout2); err != nil { 98 | log.Println(err) 99 | rw.WriteHeader(http.StatusInternalServerError) 100 | return 101 | } 102 | 103 | rw.WriteHeader(http.StatusOK) 104 | return 105 | 106 | } 107 | -------------------------------------------------------------------------------- /pwd/types/playground_test.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | "time" 7 | 8 | "github.com/satori/go.uuid" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestPlayground_Extras_GetInt(t *testing.T) { 13 | p := Playground{ 14 | Id: uuid.NewV4().String(), 15 | Domain: "localhost", 16 | DefaultDinDInstanceImage: "franel/dind", 17 | AllowWindowsInstances: false, 18 | DefaultSessionDuration: time.Hour * 4, 19 | Extras: PlaygroundExtras{ 20 | "intFromInt": 10, 21 | "intFromFloat": 32.0, 22 | "intFromString": "15", 23 | }, 24 | } 25 | 26 | b, err := json.Marshal(p) 27 | assert.Nil(t, err) 28 | 29 | var p2 Playground 30 | json.Unmarshal(b, &p2) 31 | 32 | v, found := p2.Extras.GetInt("intFromInt") 33 | assert.True(t, found) 34 | assert.Equal(t, 10, v) 35 | 36 | v, found = p2.Extras.GetInt("intFromFloat") 37 | assert.True(t, found) 38 | assert.Equal(t, 32, v) 39 | 40 | v, found = p2.Extras.GetInt("intFromString") 41 | assert.True(t, found) 42 | assert.Equal(t, 15, v) 43 | } 44 | 45 | func TestPlayground_Extras_GetString(t *testing.T) { 46 | p := Playground{ 47 | Id: uuid.NewV4().String(), 48 | Domain: "localhost", 49 | DefaultDinDInstanceImage: "franel/dind", 50 | AllowWindowsInstances: false, 51 | DefaultSessionDuration: time.Hour * 4, 52 | Extras: PlaygroundExtras{ 53 | "stringFromInt": 10, 54 | "stringFromFloat": 32.3, 55 | "stringFromString": "15", 56 | "stringFromBool": false, 57 | }, 58 | } 59 | 60 | b, err := json.Marshal(p) 61 | assert.Nil(t, err) 62 | 63 | var p2 Playground 64 | json.Unmarshal(b, &p2) 65 | 66 | v, found := p2.Extras.GetString("stringFromInt") 67 | assert.True(t, found) 68 | assert.Equal(t, "10", v) 69 | 70 | v, found = p2.Extras.GetString("stringFromFloat") 71 | assert.True(t, found) 72 | assert.Equal(t, "32.3", v) 73 | 74 | v, found = p2.Extras.GetString("stringFromString") 75 | assert.True(t, found) 76 | assert.Equal(t, "15", v) 77 | 78 | v, found = p2.Extras.GetString("stringFromBool") 79 | assert.True(t, found) 80 | assert.Equal(t, "false", v) 81 | } 82 | 83 | func TestPlayground_Extras_GetDuration(t *testing.T) { 84 | p := Playground{ 85 | Id: uuid.NewV4().String(), 86 | Domain: "localhost", 87 | DefaultDinDInstanceImage: "franel/dind", 88 | AllowWindowsInstances: false, 89 | DefaultSessionDuration: time.Hour * 4, 90 | Extras: PlaygroundExtras{ 91 | "durationFromInt": 10, 92 | "durationFromFloat": 32.3, 93 | "durationFromString": "4h", 94 | "durationFromDuration": time.Hour * 3, 95 | }, 96 | } 97 | 98 | b, err := json.Marshal(p) 99 | assert.Nil(t, err) 100 | 101 | var p2 Playground 102 | json.Unmarshal(b, &p2) 103 | 104 | v, found := p2.Extras.GetDuration("durationFromInt") 105 | assert.True(t, found) 106 | assert.Equal(t, time.Duration(10), v) 107 | 108 | v, found = p2.Extras.GetDuration("durationFromFloat") 109 | assert.True(t, found) 110 | assert.Equal(t, time.Duration(32), v) 111 | 112 | v, found = p2.Extras.GetDuration("durationFromString") 113 | assert.True(t, found) 114 | assert.Equal(t, time.Hour*4, v) 115 | 116 | v, found = p2.Extras.GetDuration("durationFromDuration") 117 | assert.True(t, found) 118 | assert.Equal(t, time.Hour*3, v) 119 | } 120 | -------------------------------------------------------------------------------- /dockerfiles/dind/ee/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJJwIBAAKCAgEAqqa5j0cHgK7WUOhxw3mAkgt1sUDsuoWpHN1WWuiRJXWxKFYz 3 | nOzKmTT70yKfG/1vBztdImxBmdN78nqtdFLIhUImJnYZ9UqhIpWZxUqPPdj0qRmj 4 | e1Tt9ublJyHHCanNztGDnbzQnafa6GWfyu8/Z7gt+3C8uvi6tqC69VmAMZ3CR3nV 5 | Ity0AgvtqhASFS/pnfchPGpha2Gw4WzapiHACdippKgT2tD2vkHtRcakCOcyTk+a 6 | ziYBdyW1nqhaZtGY8Qj07dHD2OJckNlPZcjFyZkuPkdMHpJGV8sGlNZiYtySSJTE 7 | gTFplqE68eZi1Vji77uOIWHq3zpjfiJl0VquDQDy4uO9tvzi8wphZ/NX3VBzhcsY 8 | Nk69t2+iOrDMlHyydJakvrYSezf7XEHt503kJvXXK7g2yaYyUK0JI6vQhRbMTRFw 9 | U+hBTEafKA7rTgMh34LlXqnR+ZM+r6azYi30k6qmUhHkKJEZB08saWeIKwQQoqHL 10 | AMZvMizaoXIN8OOAjfRv96cpYLu8WEhFx2J55cbJNDB8QxWeDU6NFAbsiLqM6Hjy 11 | Eutxr7MgbAcyszv0WTbJz5t5JWeEJ093SHtHkbQsrv6a0ZhDA/8wttH0XALohu48 12 | lfTQf/Zmy3g4wAJ4FtVIa1qQDIjm0GTeIefqH3NBFxR1bzlmqgZmF5TZRHkCAwEA 13 | AQKCAgBDSNmBFJBwvH7kB8JTQGThMIOHEAJGyMyVBPA3h9sy2eSv8s0G4pY/MhTY 14 | ep4hext7znw6RlTXQfts79HUO4+0exBvucEiZfqCmFm44Fz6FcDhq6o5xpLM9t0D 15 | QN4pgToUgadTWk8m2jgFyYvnh82IJ6Z5rUm8rrVvrJAKjO9uoLUpWXAf/sU6yVk7 16 | 5Ho8wFdsYTRJjeg7XplPSIwtVMFTIIpC0cKCVEH1YikbiebDW+UJ23k+Lt4FDGk/ 17 | 1UFPqPSUlON9oWeG7DlzIzua9j6F7k+9Xn80zpfNpc9CgATq1e0XkRCpn8HyEkAb 18 | gKsXU6SmwVyY7PKecXcpFIbwtMBK2zTG4VrmgsjwptK1S8lbqYftQeTxvNYdhjxA 19 | gdkBG5qIBkLcr8m796V2fDtJ6wvsVi+yDh+H7T8/vZuB9iaHJ3L1v36WiTODLTFW 20 | /OlgfimiBXuK8Z1EiB6+w522TdmhKOiWfjHdl7JSzsOla5i5cbcdeaD4AUzlmvGZ 21 | RCBE9Cd7RWGmDxnWz4NWFepwSfnOOQI9W95QkcRgwH61Y2axcdio0xJpQnUXiKHH 22 | rHhPTW0eDD7yoIqqKKK3evCOxpbJy6M/+fVqNZYWEfJ0cb7+Ska6aW3rUv8aeYFj 23 | xzitqKuL/0nFKpeppAkvXvoZf/mM0QtG+lgUHgOngwweYrrkwQKCAQEA3JtXFZDQ 24 | mIfkv0mAiwV5QbzQ63OxkO0MtPqSq50I8F6S+fIz+ILhxbMjcGq5dCbnJCFGJqn3 25 | 7PXrT6nFXZ8j2/dcXmtxala2VAAq+GyA0TY/DQ6seTaKhsLq50vnzMXHT0pU8s/2 26 | 4n7euf66lzQ1ByKrqXZCAyNajUXPoL37HFgFtCrEJlvi//K8x+tHr3QgF4Si8l31 27 | A1HLq2+KbppWXzc//knanstsCIxPvEelV0GZn3r5opiOczS30rYo87wKI9aCRgLZ 28 | GEKrMwlNVwwhScJd4msEYMsXUUxzDcNr5oi+iQmEDJpBKd98+3/Sp9XWVXUbik9a 29 | QfOvUcQMfDc1pwKCAQEAxgeiaYBb369Z6CW7rC3b3YnwOBJVK23PYcpN2DtnhRRI 30 | ARZgZBhwYKxDQ3djXZCiPEVtwO4WO8fCcY0GUFP2aVWuaokGjk1gNFwN6F046OdY 31 | WGETEe7AUCLuuwAv7Aqqug3Y6bxCtPGN3MNHT8qjTH99EMHx8L2+0UiIXnQreGmH 32 | VL/HEnpfDDZK4nfrwxdJOSueGdyOlflUIpDgmScIbKvIsyKhB2UstFBsCuDzhfE/ 33 | a0VWDnZHgZPA/JhyhRy5eL9QGOqsdnzSxgvEbOyCR5p2jtO9otFw9fxpxF7uA0Yq 34 | EBye0gidmnF/FKDNK0iggtk34LTrDv2fz4tclXM43wKCAQBY79NC4XgHFjoVGBfX 35 | dCR3aRy8346Fg9uslpxMzWlOJoKQe6GSHYnRvy+ZYZ1Ug16KBVQXwEwwXtA39JSZ 36 | 8s9tHaNCeYRmv4CQCuVH885XCcyPggvsbh2YyLoU91gDCPUaNThcD5VTqJw4VcZ5 37 | sNV0A/k6v29LfpRCAhP7lLvIqH/cK6WaZU71qrGK04K57FIHyTQ8C778UJyQh85C 38 | WrxZdJe696FIhXAPXinDGQtCSzMYxWYgs+ox7d3x9/g4kuVvn0oz2XAWRMJqN+TT 39 | JBPDfbWF02kXcKj84Jo9wTwd26Ec9BYlUobUz8G+TsDpYt8e4rBwqR8VGZ3jk+sI 40 | pOVfAoIBAA78xO33KPzk6IkJUgrV7a32opeby5Zd2TQte3bCCDOqNUjfyKvKrbaj 41 | UvPoNTz/lUe6eXQAkO41UCIH6lJqCFwwf+LQPA7JDF7qGKNdatE1sRn/PtI8n5Fx 42 | E2BTw0y6AfHS2nfWJ7ZKEdKDdQI08+b2PyDljMoLkkWEl82OPTv/wJ5JZWegm1Dx 43 | SvmY2d8KBCCvjGeoqaHwHM4A6P6uVZTj62yjUkyc+6Up8QNhwwyAFayosrqleQP1 44 | isWTRBeO9PqOgCFioWrWR511hog33iRNLGvi2pdYApSbZeXWyWy2Arj1cY+z1zm5 45 | HUUSZnTAKmW8yt3W03Nu/olWossszUECggEAD+dqDccmWF30yg82mxIMPb8pMV27 46 | +ciQssiibGmhFvPcIfzish9FunXqLG7q+4M4M+O4WQ9unuaTH+z9TU7w3Foo4Xdf 47 | GePuwmZdpuYxClHAsNALuKWEJcjfFOdETLkAbk81+ghtyFblkPPI82wofs4K8OII 48 | 1KPPDKoxeXmKXVF1UmOJX1KFyMnEjv0+Z1GrHnNV4703cNTMpDybaGpHsE77Vqd0 49 | ToZY9VG9eDLzaB6n5emSyFGBG73WQFU4EbLKjEBxtthgu8J9b17x96eF1NGZsEl1 50 | wEJvZpg7v6wyHK5XcYpwLY19+0khtvXwA7KKEr+sHqzF6arIqhl5hDLDAQ== 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /pwd/types/playground.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | ) 7 | 8 | type PlaygroundExtras map[string]interface{} 9 | 10 | func (e PlaygroundExtras) Get(name string) (interface{}, bool) { 11 | v, f := e[name] 12 | return v, f 13 | } 14 | func (e PlaygroundExtras) GetInt(name string) (int, bool) { 15 | v, f := e[name] 16 | if f { 17 | if r, ok := v.(int); ok { 18 | return r, ok 19 | } else if r, ok := v.(float64); ok { 20 | return int(r), ok 21 | } else if r, ok := v.(string); ok { 22 | if v, err := strconv.Atoi(r); err != nil { 23 | return 0, false 24 | } else { 25 | return v, true 26 | } 27 | } 28 | return v.(int), f 29 | } else { 30 | return 0, f 31 | } 32 | } 33 | 34 | func (e PlaygroundExtras) GetString(name string) (string, bool) { 35 | v, f := e[name] 36 | if f { 37 | if r, ok := v.(int); ok { 38 | return strconv.Itoa(r), ok 39 | } else if r, ok := v.(float64); ok { 40 | return strconv.FormatFloat(r, 'g', -1, 64), ok 41 | } else if r, ok := v.(bool); ok { 42 | return strconv.FormatBool(r), ok 43 | } else if r, ok := v.(string); ok { 44 | return r, ok 45 | } else { 46 | return "", false 47 | } 48 | } else { 49 | return "", f 50 | } 51 | } 52 | 53 | func (e PlaygroundExtras) GetDuration(name string) (time.Duration, bool) { 54 | v, f := e[name] 55 | if f { 56 | if r, ok := v.(int); ok { 57 | return time.Duration(r), ok 58 | } else if r, ok := v.(float64); ok { 59 | return time.Duration(r), ok 60 | } else if r, ok := v.(string); ok { 61 | if d, err := time.ParseDuration(r); err != nil { 62 | return time.Duration(0), false 63 | } else { 64 | return d, true 65 | } 66 | } else { 67 | return time.Duration(0), false 68 | } 69 | } else { 70 | return time.Duration(0), f 71 | } 72 | } 73 | 74 | type Playground struct { 75 | Id string `json:"id" bson:"id"` 76 | Domain string `json:"domain" bson:"domain"` 77 | DefaultDinDInstanceImage string `json:"default_dind_instance_image" bson:"default_dind_instance_image"` 78 | AvailableDinDInstanceImages []string `json:"available_dind_instance_images" bson:"available_dind_instance_images"` 79 | AllowWindowsInstances bool `json:"allow_windows_instances" bson:"allow_windows_instances"` 80 | DefaultSessionDuration time.Duration `json:"default_session_duration" bson:"default_session_duration"` 81 | DindVolumeSize string `json:"dind_volume_size" bson:"dind_volume_size"` 82 | Extras PlaygroundExtras `json:"extras" bson:"extras"` 83 | AssetsDir string `json:"assets_dir" bson:"assets_dir"` 84 | Tasks []string `json:"tasks" bson:"tasks"` 85 | GithubClientID string `json:"github_client_id" bson:"github_client_id"` 86 | GithubClientSecret string `json:"github_client_secret" bson:"github_client_secret"` 87 | GoogleClientID string `json:"google_client_id" bson:"google_client_id"` 88 | GoogleClientSecret string `json:"google_client_secert" bson:"google_client_secret"` 89 | DockerClientID string `json:"docker_client_id" bson:"docker_client_id"` 90 | DockerClientSecret string `json:"docker_client_secret" bson:"docker_client_secret"` 91 | DockerHost string `json:"docker_host" bson:"docker_host"` 92 | MaxInstances int `json:"max_instances" bson:"max_instances"` 93 | } 94 | -------------------------------------------------------------------------------- /dockerfiles/dind/copy_certs.ps1: -------------------------------------------------------------------------------- 1 | param ( 2 | [Parameter(Mandatory = $true)] 3 | [string] $Node, 4 | [Parameter(Mandatory = $true)] 5 | [string] $SessionId, 6 | [Parameter(Mandatory = $true)] 7 | [string] $FQDN 8 | ) 9 | 10 | 11 | function GetDirectUrlFromIp ($ip) { 12 | $ip_dash=$ip -replace "\.","-" 13 | $url="https://ip${ip_dash}-${SessionId}.direct.${FQDN}" 14 | return $url 15 | } 16 | 17 | function WaitForUrl ($url) { 18 | write-host $url 19 | do { 20 | try{ 21 | invoke-webrequest -UseBasicParsing -uri $url | Out-Null 22 | } catch {} 23 | $status = $? 24 | sleep 1 25 | } until($status) 26 | } 27 | 28 | function GetNodeRoutableIp ($nodeName) { 29 | $JQFilter='.instances[] | select (.hostname == \"{0}\") | .routable_ip' -f $nodeName 30 | $rip = (invoke-webrequest -UseBasicParsing -uri "https://$FQDN/sessions/$SessionId").Content | jq -r $JQFilter 31 | 32 | IF([string]::IsNullOrEmpty($rip)) { 33 | Write-Host "Could not fetch IP for node $nodeName" 34 | exit 1 35 | } 36 | return $rip 37 | } 38 | 39 | function Set-UseUnsafeHeaderParsing 40 | { 41 | param( 42 | [Parameter(Mandatory,ParameterSetName='Enable')] 43 | [switch]$Enable, 44 | 45 | [Parameter(Mandatory,ParameterSetName='Disable')] 46 | [switch]$Disable 47 | ) 48 | 49 | $ShouldEnable = $PSCmdlet.ParameterSetName -eq 'Enable' 50 | 51 | $netAssembly = [Reflection.Assembly]::GetAssembly([System.Net.Configuration.SettingsSection]) 52 | 53 | if($netAssembly) 54 | { 55 | $bindingFlags = [Reflection.BindingFlags] 'Static,GetProperty,NonPublic' 56 | $settingsType = $netAssembly.GetType('System.Net.Configuration.SettingsSectionInternal') 57 | 58 | $instance = $settingsType.InvokeMember('Section', $bindingFlags, $null, $null, @()) 59 | 60 | if($instance) 61 | { 62 | $bindingFlags = 'NonPublic','Instance' 63 | $useUnsafeHeaderParsingField = $settingsType.GetField('useUnsafeHeaderParsing', $bindingFlags) 64 | 65 | if($useUnsafeHeaderParsingField) 66 | { 67 | $useUnsafeHeaderParsingField.SetValue($instance, $ShouldEnable) 68 | } 69 | } 70 | } 71 | } 72 | 73 | 74 | $ProgressPreference = 'SilentlyContinue' 75 | $ErrorActionPreference = 'Stop' 76 | 77 | Set-UseUnsafeHeaderParsing -Enable 78 | 79 | Start-Transcript -path ("C:\{0}.log" -f $MyInvocation.MyCommand.Name) -append 80 | 81 | add-type @" 82 | using System.Net; 83 | using System.Security.Cryptography.X509Certificates; 84 | 85 | public class IDontCarePolicy : ICertificatePolicy { 86 | public IDontCarePolicy() {} 87 | public bool CheckValidationResult( 88 | ServicePoint sPoint, X509Certificate cert, 89 | WebRequest wRequest, int certProb) { 90 | return true; 91 | } 92 | } 93 | "@ 94 | 95 | [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 96 | 97 | [System.Net.ServicePointManager]::CertificatePolicy = new-object IDontCarePolicy 98 | 99 | 100 | $dtr_ip = GetNodeRoutableIp $Node 101 | $dtr_url = GetDirectUrlFromIp $dtr_ip 102 | $dtr_hostname = $dtr_url -replace "https://","" 103 | 104 | WaitForUrl "${dtr_url}/ca" 105 | 106 | invoke-webrequest -UseBasicParsing -uri "$dtr_url/ca" -o c:\ca.crt 107 | 108 | $cert = new-object System.Security.Cryptography.X509Certificates.X509Certificate2 c:\ca.crt 109 | $store = new-object System.Security.Cryptography.X509Certificates.X509Store('Root','localmachine') 110 | $store.Open('ReadWrite') 111 | $store.Add($cert) 112 | $store.Close() 113 | 114 | Stop-Transcript 115 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/terminado/terminado.js: -------------------------------------------------------------------------------- 1 | (function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.terminado = f()}})(function(){var define,module,exports;return (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i 0 { 35 | cookie, err := ReadCookie(req) 36 | if err != nil { 37 | // User it not a human 38 | rw.WriteHeader(http.StatusForbidden) 39 | return 40 | } 41 | userId = cookie.Id 42 | } 43 | 44 | reqDur := req.Form.Get("session-duration") 45 | stack := req.Form.Get("stack") 46 | stackName := req.Form.Get("stack_name") 47 | imageName := req.Form.Get("image_name") 48 | 49 | if stack != "" { 50 | stack = formatStack(stack) 51 | if ok, err := stackExists(stack); err != nil { 52 | log.Printf("Error retrieving stack: %s", err) 53 | rw.WriteHeader(http.StatusInternalServerError) 54 | return 55 | } else if !ok { 56 | log.Printf("Stack [%s] could not be found", stack) 57 | rw.WriteHeader(http.StatusBadRequest) 58 | return 59 | } 60 | 61 | } 62 | 63 | var duration time.Duration 64 | if reqDur != "" { 65 | d, err := time.ParseDuration(reqDur) 66 | if err != nil { 67 | rw.WriteHeader(http.StatusBadRequest) 68 | return 69 | } 70 | if d > playground.DefaultSessionDuration { 71 | log.Printf("Specified session duration was %s but maximum allowed by this playground is %s\n", d.String(), playground.DefaultSessionDuration.String()) 72 | rw.WriteHeader(http.StatusBadRequest) 73 | return 74 | } 75 | duration = d 76 | } else { 77 | duration = playground.DefaultSessionDuration 78 | } 79 | 80 | sConfig := types.SessionConfig{Playground: playground, UserId: userId, Duration: duration, Stack: stack, StackName: stackName, ImageName: imageName} 81 | s, err := core.SessionNew(context.Background(), sConfig) 82 | if err != nil { 83 | if provisioner.OutOfCapacity(err) { 84 | http.Redirect(rw, req, "/ooc", http.StatusFound) 85 | return 86 | } 87 | log.Printf("%#v \n", err) 88 | http.Redirect(rw, req, "/500", http.StatusInternalServerError) 89 | return 90 | //TODO: Return some error code 91 | } else { 92 | hostname := req.Host 93 | // If request is not a form, return sessionId in the body 94 | if req.Header.Get("X-Requested-With") == "XMLHttpRequest" { 95 | resp := NewSessionResponse{SessionId: s.Id, Hostname: hostname} 96 | rw.Header().Set("Content-Type", "application/json") 97 | json.NewEncoder(rw).Encode(resp) 98 | return 99 | } 100 | 101 | http.Redirect(rw, req, fmt.Sprintf("/p/%s", s.Id), http.StatusFound) 102 | } 103 | } 104 | 105 | func formatStack(stack string) string { 106 | if !strings.HasSuffix(stack, ".yml") { 107 | // If it doesn't end with ".yml", assume it hasn't been specified, then default to "stack.yml" 108 | stack = path.Join(stack, "stack.yml") 109 | } 110 | if strings.HasPrefix(stack, "/") { 111 | // The host is anonymous, then use our own stack repo. 112 | stack = fmt.Sprintf("%s%s", "https://raw.githubusercontent.com/play-with-docker/stacks/master", stack) 113 | } 114 | return stack 115 | } 116 | 117 | func stackExists(stack string) (bool, error) { 118 | resp, err := http.Head(stack) 119 | if err != nil { 120 | return false, err 121 | } 122 | defer resp.Body.Close() 123 | 124 | return resp.StatusCode == 200, nil 125 | } 126 | -------------------------------------------------------------------------------- /k8s/factory.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "crypto/tls" 5 | "encoding/json" 6 | "fmt" 7 | "net" 8 | "net/http" 9 | "net/url" 10 | "time" 11 | 12 | "github.com/docker/go-connections/tlsconfig" 13 | "github.com/play-with-docker/play-with-docker/pwd/types" 14 | "github.com/play-with-docker/play-with-docker/router" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | "k8s.io/apimachinery/pkg/runtime/serializer" 17 | "k8s.io/client-go/kubernetes" 18 | "k8s.io/client-go/kubernetes/scheme" 19 | "k8s.io/client-go/rest" 20 | ) 21 | 22 | type FactoryApi interface { 23 | GetForInstance(instance *types.Instance) (*kubernetes.Clientset, error) 24 | GetKubeletForInstance(instance *types.Instance) (*KubeletClient, error) 25 | } 26 | 27 | func NewClient(instance *types.Instance, proxyHost string) (*kubernetes.Clientset, error) { 28 | var durl string 29 | 30 | host := router.EncodeHost(instance.SessionId, instance.RoutableIP, router.HostOpts{EncodedPort: 6443}) 31 | 32 | var tlsConfig *tls.Config 33 | tlsConfig = tlsconfig.ClientDefault() 34 | tlsConfig.InsecureSkipVerify = true 35 | tlsConfig.ServerName = host 36 | 37 | var transport http.RoundTripper 38 | transport = &http.Transport{ 39 | DialContext: (&net.Dialer{ 40 | Timeout: 1 * time.Second, 41 | KeepAlive: 30 * time.Second, 42 | }).DialContext, 43 | TLSClientConfig: tlsConfig, 44 | MaxIdleConnsPerHost: 5, 45 | } 46 | 47 | durl = fmt.Sprintf("https://%s", proxyHost) 48 | 49 | cc := rest.ContentConfig{ 50 | ContentType: "application/json", 51 | GroupVersion: &schema.GroupVersion{Version: "v1"}, 52 | NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, 53 | } 54 | restConfig := &rest.Config{ 55 | Host: durl, 56 | APIPath: "/api/", 57 | BearerToken: "31ada4fd-adec-460c-809a-9e56ceb75269", 58 | ContentConfig: cc, 59 | } 60 | 61 | transport, err := rest.HTTPWrappersForConfig(restConfig, transport) 62 | if err != nil { 63 | return nil, fmt.Errorf("Error wrapping transport %v", err) 64 | } 65 | cli := &http.Client{ 66 | Transport: transport, 67 | } 68 | 69 | rc, err := rest.RESTClientFor(restConfig) 70 | rc.Client = cli 71 | if err != nil { 72 | return nil, fmt.Errorf("Error creating K8s client %v", err) 73 | } 74 | 75 | return kubernetes.New(rc), nil 76 | } 77 | 78 | func NewKubeletClient(instance *types.Instance, proxyHost string) (*KubeletClient, error) { 79 | var durl string 80 | 81 | host := router.EncodeHost(instance.SessionId, instance.RoutableIP, router.HostOpts{EncodedPort: 10255}) 82 | 83 | transport := &http.Transport{ 84 | DialContext: (&net.Dialer{ 85 | Timeout: 1 * time.Second, 86 | KeepAlive: 30 * time.Second, 87 | }).DialContext, 88 | MaxIdleConnsPerHost: 5, 89 | } 90 | 91 | durl = fmt.Sprintf("http://%s", host) 92 | transport.Proxy = http.ProxyURL(&url.URL{Host: proxyHost}) 93 | 94 | cli := &http.Client{ 95 | Transport: transport, 96 | } 97 | kc := &KubeletClient{client: cli, baseURL: durl} 98 | return kc, nil 99 | } 100 | 101 | type KubeletClient struct { 102 | client *http.Client 103 | baseURL string 104 | } 105 | 106 | func (c *KubeletClient) Get(path string) (*http.Response, error) { 107 | return c.client.Get(c.baseURL + path) 108 | } 109 | 110 | type metadata struct { 111 | Labels map[string]string 112 | } 113 | 114 | type item struct { 115 | Metadata metadata 116 | } 117 | 118 | type kubeletPodsResponse struct { 119 | Items []item 120 | } 121 | 122 | func (c *KubeletClient) IsManager() (bool, error) { 123 | res, err := c.client.Get(c.baseURL + "/pods") 124 | if err != nil { 125 | return false, err 126 | } 127 | podsData := &kubeletPodsResponse{} 128 | 129 | json.NewDecoder(res.Body).Decode(podsData) 130 | 131 | for _, i := range podsData.Items { 132 | for _, v := range i.Metadata.Labels { 133 | if v == "kube-apiserver" { 134 | return true, nil 135 | } 136 | } 137 | } 138 | 139 | return false, nil 140 | } 141 | -------------------------------------------------------------------------------- /dockerfiles/dind/ee/ucp-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIGPDCCBSSgAwIBAgISA4MIK4JV9npV+QdQS7wVa48rMA0GCSqGSIb3DQEBCwUA 3 | MEoxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MSMwIQYDVQQD 4 | ExpMZXQncyBFbmNyeXB0IEF1dGhvcml0eSBYMzAeFw0xODAzMzEyMTQ3MjZaFw0x 5 | ODA2MjkyMTQ3MjZaMDQxMjAwBgNVBAMMKSouZGlyZWN0LmJldGEtaHlicmlkLnBs 6 | YXktd2l0aC1kb2NrZXIuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC 7 | AQEA6PQCi9Rqr7Ka1KXSGCfBQVzgPyx/hh+uST1dz7PDw2epghYyaqNByaQEVKNR 8 | 3ubPvOoASzhdJ1dZdyUzKUoU/jm8hgVK7HHdQDpFEX60az+r4Xo32R6WirG5+GXd 9 | hU3M0yRzbu0zZx7eVZognP/HcXJDhuf16hiHKmCr6MYXV4JY9xLMxExZOTB4fpGA 10 | Loiyvn2OEZAhREhiSX+6n4x7KJga8gYn/0f89o7up1DYQSwev+gQgRjTGlo1xrgu 11 | Oztekc3ydvbhGv7aL7Uj/zqPcVvXnDfnioQV7kEDcz8gupFyV7gZKolR1G8IQJdm 12 | TaYHguzFXF5Q3lKVWx19/CSZ8wIDAQABo4IDMDCCAywwDgYDVR0PAQH/BAQDAgWg 13 | MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0G 14 | A1UdDgQWBBTVloZoUI5vKAN+D1PTgtYBgU184zAfBgNVHSMEGDAWgBSoSmpjBH3d 15 | uubRObemRWXv86jsoTBvBggrBgEFBQcBAQRjMGEwLgYIKwYBBQUHMAGGImh0dHA6 16 | Ly9vY3NwLmludC14My5sZXRzZW5jcnlwdC5vcmcwLwYIKwYBBQUHMAKGI2h0dHA6 17 | Ly9jZXJ0LmludC14My5sZXRzZW5jcnlwdC5vcmcvMDQGA1UdEQQtMCuCKSouZGly 18 | ZWN0LmJldGEtaHlicmlkLnBsYXktd2l0aC1kb2NrZXIuY29tMIH+BgNVHSAEgfYw 19 | gfMwCAYGZ4EMAQIBMIHmBgsrBgEEAYLfEwEBATCB1jAmBggrBgEFBQcCARYaaHR0 20 | cDovL2Nwcy5sZXRzZW5jcnlwdC5vcmcwgasGCCsGAQUFBwICMIGeDIGbVGhpcyBD 21 | ZXJ0aWZpY2F0ZSBtYXkgb25seSBiZSByZWxpZWQgdXBvbiBieSBSZWx5aW5nIFBh 22 | cnRpZXMgYW5kIG9ubHkgaW4gYWNjb3JkYW5jZSB3aXRoIHRoZSBDZXJ0aWZpY2F0 23 | ZSBQb2xpY3kgZm91bmQgYXQgaHR0cHM6Ly9sZXRzZW5jcnlwdC5vcmcvcmVwb3Np 24 | dG9yeS8wggEDBgorBgEEAdZ5AgQCBIH0BIHxAO8AdQDbdK/uyynssf7KPnFtLOW5 25 | qrs294Rxg8ddnU83th+/ZAAAAWJ+PniYAAAEAwBGMEQCIDngZdWcYWY0fPfUGTqX 26 | /Vt2qx+PRN5DN+m13TnA37e2AiBHIi5kMSxlvKNc3xzuJrvt/RKaj9xsBLmc8+uW 27 | ckaEdAB2ACk8UZZUyDlluqpQ/FgH1Ldvv1h6KXLcpMMM9OVFR/R4AAABYn4+eLUA 28 | AAQDAEcwRQIhAMkf8SYdt1egjzBE6nzOrY+f4WMS/N6XWN+gFl0mQIkhAiBn9+GG 29 | 0XbLw33+WNJLUkau2ZdTo5kTw2qdUXdYpWJwrDANBgkqhkiG9w0BAQsFAAOCAQEA 30 | TAl62gFi+2l/yLItjNIrXeWh2ICH/epjeWlmF+rAb7Sb4iz9U8fsNBdDBQh25xJo 31 | 6nLOlS2NG0hdUScylCYyGJZe6PeQvGO+qSLDamXf1DvXWvzbmQOCUkejgD7Uwbol 32 | 5huuCAKoW4SsiaMku0J3545MEQx4Q5cPetsPawaByY5sgr2GZJzgM7lvtzr4hKWg 33 | x5QAns/bmcqe9LCJ2NLcgArliYu6dOHtS62kB7/Dz2DQRtCvpV553RaBe4k9Ruwl 34 | 0ndHvjEC5OWa5sW1hwow5W3PC7Db7s0zqpt63EITkhrUOqtqtkwOMYBAkFIIe1eR 35 | T5fSFAdirKUOt5GnRJ40qw== 36 | -----END CERTIFICATE----- 37 | -----BEGIN CERTIFICATE----- 38 | MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ 39 | MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT 40 | DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow 41 | SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT 42 | GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC 43 | AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF 44 | q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 45 | SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 46 | Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA 47 | a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj 48 | /PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T 49 | AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG 50 | CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv 51 | bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k 52 | c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw 53 | VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC 54 | ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz 55 | MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu 56 | Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF 57 | AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo 58 | uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ 59 | wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu 60 | X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG 61 | PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 62 | KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== 63 | -----END CERTIFICATE----- 64 | -------------------------------------------------------------------------------- /storage/mock.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "github.com/play-with-docker/play-with-docker/pwd/types" 5 | "github.com/stretchr/testify/mock" 6 | ) 7 | 8 | type Mock struct { 9 | mock.Mock 10 | } 11 | 12 | func (m *Mock) SessionGet(id string) (*types.Session, error) { 13 | args := m.Called(id) 14 | return args.Get(0).(*types.Session), args.Error(1) 15 | } 16 | func (m *Mock) SessionGetAll() ([]*types.Session, error) { 17 | args := m.Called() 18 | return args.Get(0).([]*types.Session), args.Error(1) 19 | } 20 | func (m *Mock) SessionPut(session *types.Session) error { 21 | args := m.Called(session) 22 | return args.Error(0) 23 | } 24 | func (m *Mock) SessionDelete(id string) error { 25 | args := m.Called(id) 26 | return args.Error(0) 27 | } 28 | func (m *Mock) SessionCount() (int, error) { 29 | args := m.Called() 30 | return args.Int(0), args.Error(1) 31 | } 32 | func (m *Mock) InstanceGet(name string) (*types.Instance, error) { 33 | args := m.Called(name) 34 | return args.Get(0).(*types.Instance), args.Error(1) 35 | } 36 | func (m *Mock) InstancePut(instance *types.Instance) error { 37 | args := m.Called(instance) 38 | return args.Error(0) 39 | } 40 | func (m *Mock) InstanceDelete(name string) error { 41 | args := m.Called(name) 42 | return args.Error(0) 43 | } 44 | func (m *Mock) InstanceCount() (int, error) { 45 | args := m.Called() 46 | return args.Int(0), args.Error(1) 47 | } 48 | func (m *Mock) InstanceFindBySessionId(sessionId string) ([]*types.Instance, error) { 49 | args := m.Called(sessionId) 50 | return args.Get(0).([]*types.Instance), args.Error(1) 51 | } 52 | 53 | func (m *Mock) WindowsInstanceGetAll() ([]*types.WindowsInstance, error) { 54 | args := m.Called() 55 | return args.Get(0).([]*types.WindowsInstance), args.Error(1) 56 | } 57 | func (m *Mock) WindowsInstancePut(instance *types.WindowsInstance) error { 58 | args := m.Called(instance) 59 | return args.Error(0) 60 | } 61 | func (m *Mock) WindowsInstanceDelete(id string) error { 62 | args := m.Called(id) 63 | return args.Error(0) 64 | } 65 | func (m *Mock) ClientGet(id string) (*types.Client, error) { 66 | args := m.Called(id) 67 | return args.Get(0).(*types.Client), args.Error(1) 68 | } 69 | func (m *Mock) ClientPut(client *types.Client) error { 70 | args := m.Called(client) 71 | return args.Error(0) 72 | } 73 | func (m *Mock) ClientDelete(id string) error { 74 | args := m.Called(id) 75 | return args.Error(0) 76 | } 77 | func (m *Mock) ClientCount() (int, error) { 78 | args := m.Called() 79 | return args.Int(0), args.Error(1) 80 | } 81 | func (m *Mock) ClientFindBySessionId(sessionId string) ([]*types.Client, error) { 82 | args := m.Called(sessionId) 83 | return args.Get(0).([]*types.Client), args.Error(1) 84 | } 85 | func (m *Mock) LoginRequestPut(loginRequest *types.LoginRequest) error { 86 | args := m.Called(loginRequest) 87 | return args.Error(0) 88 | } 89 | func (m *Mock) LoginRequestGet(id string) (*types.LoginRequest, error) { 90 | args := m.Called(id) 91 | return args.Get(0).(*types.LoginRequest), args.Error(1) 92 | } 93 | func (m *Mock) LoginRequestDelete(id string) error { 94 | args := m.Called(id) 95 | return args.Error(0) 96 | } 97 | func (m *Mock) UserFindByProvider(providerName, providerUserId string) (*types.User, error) { 98 | args := m.Called(providerName, providerUserId) 99 | return args.Get(0).(*types.User), args.Error(1) 100 | } 101 | func (m *Mock) UserPut(user *types.User) error { 102 | args := m.Called(user) 103 | return args.Error(0) 104 | } 105 | func (m *Mock) UserGet(id string) (*types.User, error) { 106 | args := m.Called(id) 107 | return args.Get(0).(*types.User), args.Error(1) 108 | } 109 | func (m *Mock) PlaygroundPut(playground *types.Playground) error { 110 | args := m.Called(playground) 111 | return args.Error(0) 112 | } 113 | func (m *Mock) PlaygroundGet(id string) (*types.Playground, error) { 114 | args := m.Called(id) 115 | return args.Get(0).(*types.Playground), args.Error(1) 116 | } 117 | func (m *Mock) PlaygroundGetAll() ([]*types.Playground, error) { 118 | args := m.Called() 119 | return args.Get(0).([]*types.Playground), args.Error(1) 120 | } 121 | -------------------------------------------------------------------------------- /www/assets/xterm/addons/terminado/terminado.js.map: -------------------------------------------------------------------------------- 1 | {"version":3,"file":"terminado.js","sources":["../../../src/addons/terminado/terminado.ts","../../../node_modules/browser-pack/_prelude.js"],"sourcesContent":["/**\n * Copyright (c) 2016 The xterm.js authors. All rights reserved.\n * @license MIT\n *\n * This module provides methods for attaching a terminal to a terminado\n * WebSocket stream.\n */\n\nimport { Terminal } from 'xterm';\nimport { ITerminadoAddonTerminal } from './Interfaces';\n\n/**\n * Attaches the given terminal to the given socket.\n *\n * @param term The terminal to be attached to the given socket.\n * @param socket The socket to attach the current terminal.\n * @param bidirectional Whether the terminal should send data to the socket as well.\n * @param buffered Whether the rendering of incoming data should happen instantly or at a maximum\n * frequency of 1 rendering per 10ms.\n */\nexport function terminadoAttach(term: Terminal, socket: WebSocket, bidirectional: boolean, buffered: boolean): void {\n const addonTerminal = term;\n bidirectional = (typeof bidirectional === 'undefined') ? true : bidirectional;\n addonTerminal.__socket = socket;\n\n addonTerminal.__flushBuffer = () => {\n addonTerminal.write(addonTerminal.__attachSocketBuffer);\n addonTerminal.__attachSocketBuffer = null;\n };\n\n addonTerminal.__pushToBuffer = (data: string) => {\n if (addonTerminal.__attachSocketBuffer) {\n addonTerminal.__attachSocketBuffer += data;\n } else {\n addonTerminal.__attachSocketBuffer = data;\n setTimeout(addonTerminal.__flushBuffer, 10);\n }\n };\n\n addonTerminal.__getMessage = (ev: MessageEvent) => {\n const data = JSON.parse(ev.data);\n if (data[0] === 'stdout') {\n if (buffered) {\n addonTerminal.__pushToBuffer(data[1]);\n } else {\n addonTerminal.write(data[1]);\n }\n }\n };\n\n addonTerminal.__sendData = (data: string) => {\n socket.send(JSON.stringify(['stdin', data]));\n };\n\n addonTerminal.__setSize = (size: {rows: number, cols: number}) => {\n socket.send(JSON.stringify(['set_size', size.rows, size.cols]));\n };\n\n socket.addEventListener('message', addonTerminal.__getMessage);\n\n if (bidirectional) {\n addonTerminal._core.register(addonTerminal.onData(addonTerminal.__sendData));\n }\n addonTerminal._core.register(addonTerminal.onResize(addonTerminal.__setSize));\n\n socket.addEventListener('close', () => terminadoDetach(addonTerminal, socket));\n socket.addEventListener('error', () => terminadoDetach(addonTerminal, socket));\n}\n\n/**\n * Detaches the given terminal from the given socket\n *\n * @param term The terminal to be detached from the given socket.\n * @param socket The socket from which to detach the current terminal.\n */\nexport function terminadoDetach(term: Terminal, socket: WebSocket): void {\n const addonTerminal = term;\n addonTerminal.__dataListener.dispose();\n addonTerminal.__dataListener = undefined;\n\n socket = (typeof socket === 'undefined') ? addonTerminal.__socket : socket;\n\n if (socket) {\n socket.removeEventListener('message', addonTerminal.__getMessage);\n }\n\n delete addonTerminal.__socket;\n}\n\nexport function apply(terminalConstructor: typeof Terminal): void {\n /**\n * Attaches the current terminal to the given socket\n *\n * @param socket - The socket to attach the current terminal.\n * @param bidirectional - Whether the terminal should send data to the socket as well.\n * @param buffered - Whether the rendering of incoming data should happen instantly or at a\n * maximum frequency of 1 rendering per 10ms.\n */\n (terminalConstructor.prototype).terminadoAttach = function (socket: WebSocket, bidirectional: boolean, buffered: boolean): void {\n return terminadoAttach(this, socket, bidirectional, buffered);\n };\n\n /**\n * Detaches the current terminal from the given socket.\n *\n * @param socket The socket from which to detach the current terminal.\n */\n (terminalConstructor.prototype).terminadoDetach = function (socket: WebSocket): void {\n return terminadoDetach(this, socket);\n };\n}\n",null],"names":[],"mappings":"ACAA;;;ADoBA;AACA;AACA;AACA;AAEA;AACA;AACA;AACA;AAEA;AACA;AACA;AACA;AAAA;AACA;AACA;AACA;AACA;AAEA;AACA;AACA;AACA;AACA;AACA;AAAA;AACA;AACA;AACA;AACA;AAEA;AACA;AACA;AAEA;AACA;AACA;AAEA;AAEA;AACA;AACA;AACA;AAEA;AACA;AACA;AA/CA;AAuDA;AACA;AACA;AACA;AAEA;AAEA;AACA;AACA;AAEA;AACA;AAZA;AAcA;AASA;AACA;AACA;AAOA;AACA;AACA;AACA;AArBA;"} -------------------------------------------------------------------------------- /scheduler/task/check_swarm_status_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | dockerTypes "github.com/docker/docker/api/types" 8 | "github.com/docker/docker/api/types/swarm" 9 | "github.com/play-with-docker/play-with-docker/docker" 10 | "github.com/play-with-docker/play-with-docker/event" 11 | "github.com/play-with-docker/play-with-docker/pwd/types" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestCheckSwarmStatus_Name(t *testing.T) { 16 | e := &event.Mock{} 17 | f := &docker.FactoryMock{} 18 | 19 | task := NewCheckSwarmStatus(e, f) 20 | 21 | assert.Equal(t, "CheckSwarmStatus", task.Name()) 22 | e.M.AssertExpectations(t) 23 | f.AssertExpectations(t) 24 | } 25 | 26 | func TestCheckSwarmStatus_RunWhenInactive(t *testing.T) { 27 | d := &docker.Mock{} 28 | e := &event.Mock{} 29 | f := &docker.FactoryMock{} 30 | 31 | i := &types.Instance{ 32 | IP: "10.0.0.1", 33 | Name: "node1", 34 | SessionId: "aaabbbccc", 35 | } 36 | infoInactive := dockerTypes.Info{ 37 | Swarm: swarm.Info{ 38 | LocalNodeState: swarm.LocalNodeStateInactive, 39 | }, 40 | } 41 | 42 | f.On("GetForInstance", i).Return(d, nil) 43 | d.On("DaemonInfo").Return(infoInactive, nil) 44 | e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{ClusterStatus{IsManager: false, IsWorker: false, Instance: "node1"}}).Return() 45 | 46 | task := NewCheckSwarmStatus(e, f) 47 | ctx := context.Background() 48 | 49 | err := task.Run(ctx, i) 50 | 51 | assert.Nil(t, err) 52 | d.AssertExpectations(t) 53 | e.M.AssertExpectations(t) 54 | f.AssertExpectations(t) 55 | } 56 | 57 | func TestCheckSwarmStatus_RunWhenLocked(t *testing.T) { 58 | d := &docker.Mock{} 59 | e := &event.Mock{} 60 | f := &docker.FactoryMock{} 61 | 62 | i := &types.Instance{ 63 | IP: "10.0.0.1", 64 | Name: "node1", 65 | SessionId: "aaabbbccc", 66 | } 67 | infoLocked := dockerTypes.Info{ 68 | Swarm: swarm.Info{ 69 | LocalNodeState: swarm.LocalNodeStateLocked, 70 | }, 71 | } 72 | 73 | f.On("GetForInstance", i).Return(d, nil) 74 | d.On("DaemonInfo").Return(infoLocked, nil) 75 | e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{ClusterStatus{IsManager: false, IsWorker: false, Instance: "node1"}}).Return() 76 | 77 | task := NewCheckSwarmStatus(e, f) 78 | ctx := context.Background() 79 | 80 | err := task.Run(ctx, i) 81 | 82 | assert.Nil(t, err) 83 | d.AssertExpectations(t) 84 | e.M.AssertExpectations(t) 85 | f.AssertExpectations(t) 86 | } 87 | 88 | func TestCheckSwarmStatus_RunWhenManager(t *testing.T) { 89 | d := &docker.Mock{} 90 | e := &event.Mock{} 91 | f := &docker.FactoryMock{} 92 | 93 | i := &types.Instance{ 94 | IP: "10.0.0.1", 95 | Name: "node1", 96 | SessionId: "aaabbbccc", 97 | } 98 | infoLocked := dockerTypes.Info{ 99 | Swarm: swarm.Info{ 100 | LocalNodeState: swarm.LocalNodeStateActive, 101 | ControlAvailable: true, 102 | }, 103 | } 104 | 105 | f.On("GetForInstance", i).Return(d, nil) 106 | d.On("DaemonInfo").Return(infoLocked, nil) 107 | e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{ClusterStatus{IsManager: true, IsWorker: false, Instance: "node1"}}).Return() 108 | 109 | task := NewCheckSwarmStatus(e, f) 110 | ctx := context.Background() 111 | 112 | err := task.Run(ctx, i) 113 | 114 | assert.Nil(t, err) 115 | d.AssertExpectations(t) 116 | e.M.AssertExpectations(t) 117 | f.AssertExpectations(t) 118 | } 119 | 120 | func TestCheckSwarmStatus_RunWhenWorker(t *testing.T) { 121 | d := &docker.Mock{} 122 | e := &event.Mock{} 123 | f := &docker.FactoryMock{} 124 | 125 | i := &types.Instance{ 126 | IP: "10.0.0.1", 127 | Name: "node1", 128 | SessionId: "aaabbbccc", 129 | } 130 | infoLocked := dockerTypes.Info{ 131 | Swarm: swarm.Info{ 132 | LocalNodeState: swarm.LocalNodeStateActive, 133 | ControlAvailable: false, 134 | }, 135 | } 136 | 137 | f.On("GetForInstance", i).Return(d, nil) 138 | d.On("DaemonInfo").Return(infoLocked, nil) 139 | e.M.On("Emit", CheckSwarmStatusEvent, "aaabbbccc", []interface{}{ClusterStatus{IsManager: false, IsWorker: true, Instance: "node1"}}).Return() 140 | 141 | task := NewCheckSwarmStatus(e, f) 142 | ctx := context.Background() 143 | 144 | err := task.Run(ctx, i) 145 | 146 | assert.Nil(t, err) 147 | d.AssertExpectations(t) 148 | e.M.AssertExpectations(t) 149 | f.AssertExpectations(t) 150 | } 151 | --------------------------------------------------------------------------------