├── .dockerignore
├── .gitignore
├── .lintignore
├── .semaphore
└── semaphore.yml
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── bpf_helpers.h
├── circle.yml
├── ebpf.mk
├── pkg
└── tracer
│ ├── byteorder.go
│ ├── event.go
│ ├── event_common.go
│ ├── offsetguess.go
│ ├── offsetguess_unsupported.go
│ ├── tcptracer-ebpf.go
│ ├── tracer.go
│ ├── tracer_cb.go
│ └── tracer_unsupported.go
├── smoketest.sh
├── tcptracer-bpf.c
├── tcptracer-bpf.h
├── tests
├── .gitignore
├── Dockerfile
├── Makefile
├── multiple_connections_refused.sh
├── run
├── test.sh
└── tracer.go
├── tools
├── .gitignore
├── README.md
├── build
│ ├── Makefile
│ ├── golang
│ │ ├── Dockerfile
│ │ └── build.sh
│ └── haskell
│ │ ├── Dockerfile
│ │ ├── build.sh
│ │ └── copy-libraries
├── circle.yml
├── config_management
│ ├── README.md
│ ├── group_vars
│ │ └── all
│ ├── library
│ │ └── setup_ansible_dependencies.yml
│ ├── roles
│ │ ├── dev-tools
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── docker-configuration
│ │ │ ├── files
│ │ │ │ └── docker.conf
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── docker-from-docker-ce-repo
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── docker-from-docker-repo
│ │ │ └── tasks
│ │ │ │ ├── debian.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── redhat.yml
│ │ ├── docker-from-get.docker.com
│ │ │ └── tasks
│ │ │ │ ├── debian.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── redhat.yml
│ │ ├── docker-from-tarball
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── vars
│ │ │ │ └── main.yml
│ │ ├── docker-install
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── docker-prerequisites
│ │ │ └── tasks
│ │ │ │ ├── debian.yml
│ │ │ │ └── main.yml
│ │ ├── golang-from-tarball
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── kubelet-stop
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── kubernetes-docker-images
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── kubernetes-install
│ │ │ └── tasks
│ │ │ │ ├── debian.yml
│ │ │ │ ├── main.yml
│ │ │ │ └── redhat.yml
│ │ ├── kubernetes-start
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── setup-ansible
│ │ │ └── pre_tasks
│ │ │ │ └── main.yml
│ │ ├── sock-shop
│ │ │ └── tasks
│ │ │ │ └── tasks.yml
│ │ ├── weave-kube
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── weave-net-sources
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── weave-net-utilities
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ └── weave-net
│ │ │ └── tasks
│ │ │ └── main.yml
│ ├── setup_weave-kube.yml
│ ├── setup_weave-net_debug.yml
│ ├── setup_weave-net_dev.yml
│ └── setup_weave-net_test.yml
├── cover
│ ├── Makefile
│ ├── cover.go
│ └── gather_coverage.sh
├── dependencies
│ ├── cross_versions.py
│ ├── list_os_images.sh
│ └── list_versions.py
├── files-with-type
├── image-tag
├── integration
│ ├── assert.sh
│ ├── config.sh
│ ├── gce.sh
│ ├── run_all.sh
│ └── sanity_check.sh
├── lint
├── provisioning
│ ├── README.md
│ ├── aws
│ │ ├── README.md
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── do
│ │ ├── README.md
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── gcp
│ │ ├── README.md
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ └── setup.sh
├── publish-site
├── push-images
├── rebuild-image
├── runner
│ ├── Makefile
│ └── runner.go
├── sched
├── scheduler
│ ├── .gitignore
│ ├── README.md
│ ├── app.yaml
│ ├── appengine_config.py
│ ├── cron.yaml
│ ├── main.py
│ └── requirements.txt
├── shell-lint
├── socks
│ ├── Dockerfile
│ ├── Makefile
│ ├── README.md
│ ├── connect.sh
│ └── main.go
└── test
└── vendor
├── github.com
└── iovisor
│ └── gobpf
│ ├── elf
│ ├── COPYRIGHT.txt
│ ├── LICENSE.txt
│ ├── elf.go
│ ├── elf_unsupported.go
│ ├── include
│ │ ├── bpf.h
│ │ └── bpf_map.h
│ ├── kernel_version.go
│ ├── module.go
│ ├── module_unsupported.go
│ ├── perf.go
│ ├── perf_unsupported.go
│ ├── pinning.go
│ ├── table.go
│ ├── utsname_int8.go
│ └── utsname_uint8.go
│ └── pkg
│ ├── bpffs
│ ├── COPYRIGHT.txt
│ ├── LICENSE.txt
│ └── fs.go
│ └── cpuonline
│ ├── COPYRIGHT.txt
│ ├── LICENSE.txt
│ └── cpu_range.go
└── manifest
/.dockerignore:
--------------------------------------------------------------------------------
1 | weaveworks-tcptracer-bpf-ci-latest.aci
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .*.swp
2 | ebpf/
3 | weaveworks-tcptracer-bpf-ci-latest.aci
4 |
--------------------------------------------------------------------------------
/.lintignore:
--------------------------------------------------------------------------------
1 | ./pkg/tracer/tcptracer-ebpf.go
2 | ./ebpf/tcptracer-ebpf.go
3 | ./.git/*
4 |
--------------------------------------------------------------------------------
/.semaphore/semaphore.yml:
--------------------------------------------------------------------------------
1 | version: v1.0
2 | name: CI Build
3 |
4 | agent:
5 | machine:
6 | type: e1-standard-2
7 | os_image: ubuntu1804
8 |
9 | blocks:
10 | - name: Run tests
11 | task:
12 | jobs:
13 | - name: Smoke Tests
14 | commands:
15 | - checkout
16 | - go get github.com/appc/docker2aci
17 | - PATH=$HOME/go/bin:$PATH ./smoketest.sh
18 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fedora:26
2 |
3 | ENV GOPATH /go
4 |
5 | # vim-common is needed for xxd
6 | # vim-minimal needs to be updated first to avoid an RPM conflict on man1/vim.1.gz
7 | RUN dnf update -y vim-minimal && \
8 | dnf install -y llvm clang kernel-devel make binutils vim-common golang go-bindata ShellCheck git file
9 |
10 | RUN curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
11 | echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
12 | chmod +x shfmt && \
13 | mv shfmt /usr/bin
14 | RUN go get -u github.com/fatih/hclfmt
15 |
16 | RUN mkdir -p /src /go
17 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | DEBUG=1
2 | UID=$(shell id -u)
3 | PWD=$(shell pwd)
4 |
5 | DOCKER_FILE?=Dockerfile
6 | DOCKER_IMAGE?=weaveworks/tcptracer-bpf-builder
7 |
8 | # If you can use docker without being root, you can do "make SUDO="
9 | SUDO=$(shell docker info >/dev/null 2>&1 || echo "sudo -E")
10 |
11 | all: build-docker-image build-ebpf-object install-generated-go
12 |
13 | build-docker-image:
14 | $(SUDO) docker build -t $(DOCKER_IMAGE) -f $(DOCKER_FILE) .
15 |
16 | build-ebpf-object:
17 | $(SUDO) docker run --rm -e DEBUG=$(DEBUG) \
18 | -e CIRCLE_BUILD_URL=$(CIRCLE_BUILD_URL) \
19 | -v $(PWD):/src:ro \
20 | -v $(PWD)/ebpf:/dist/ \
21 | --workdir=/src \
22 | $(DOCKER_IMAGE) \
23 | make -f ebpf.mk build
24 | sudo chown -R $(UID):$(UID) ebpf
25 |
26 | install-generated-go:
27 | cp ebpf/tcptracer-ebpf.go pkg/tracer/tcptracer-ebpf.go
28 |
29 | delete-docker-image:
30 | $(SUDO) docker rmi -f $(DOCKER_IMAGE)
31 |
32 | lint:
33 | ./tools/lint -ignorespelling "agre " -ignorespelling "AGRE " .
34 | ./tools/shell-lint .
35 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DEPRECATED: tcptracer-bpf
2 |
3 | tcptracer-bpf is an eBPF program using kprobes to trace TCP events (connect,
4 | accept, close). The eBPF program is compiled to an ELF object file.
5 |
6 | tcptracer-bpf also provides a Go library that provides a simple API for loading
7 | the ELF object file. Internally, it is using the [gobpf elf
8 | package](https://github.com/iovisor/gobpf).
9 |
10 | tcptracer-bpf does not have any run-time dependencies on kernel headers and is
11 | not tied to a specific kernel version or kernel configuration. This is quite
12 | unusual for eBPF programs using kprobes: for example, eBPF programs using
13 | kprobes with [bcc](https://github.com/iovisor/bcc) are compiled on the fly and
14 | depend on kernel headers. And [perf tools](https://perf.wiki.kernel.org)
15 | compiled for one kernel version cannot be used on another kernel version.
16 |
17 | To adapt to the currently running kernel at run-time, tcptracer-bpf creates a
18 | series of TCP connections with known parameters (such as known IP addresses and
19 | ports) and discovers where those parameters are stored in the [kernel struct
20 | sock](https://github.com/torvalds/linux/blob/v4.4/include/net/sock.h#L248). The
21 | offsets of the struct sock fields vary depending on the kernel version and
22 | kernel configuration. Since an eBPF programs cannot loop, tcptracer-bpf does
23 | not directly iterate over the possible offsets. It is instead controlled from
24 | userspace by the Go library using a state machine.
25 |
26 | See `tests/tracer.go` for an example how to use tcptracer-bpf.
27 |
28 | ## Build the elf object
29 |
30 | ```
31 | make
32 | ```
33 |
34 | The object file can be found in `ebpf/tcptracer-ebpf.o`.
35 |
36 | ## Test
37 |
38 | ```
39 | cd tests
40 | make
41 | sudo ./run
42 | ```
43 |
44 | ## Vendoring
45 |
46 | We use [gvt](https://github.com/FiloSottile/gvt).
47 |
48 | ## Getting Help
49 |
50 | If you have any questions about, feedback for or problems with `tcptracer-bpf`:
51 |
52 | - Invite yourself to the Weave Users Slack.
53 | - Ask a question on the [#general](https://weave-community.slack.com/messages/general/) slack channel.
54 | - [File an issue](https://github.com/weaveworks/tcptracer-bpf/issues/new).
55 |
56 | Weaveworks follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Weaveworks project maintainer, or Alexis Richardson (alexis@weave.works).
57 |
58 | Your feedback is always welcome!
59 |
--------------------------------------------------------------------------------
/bpf_helpers.h:
--------------------------------------------------------------------------------
1 | #ifndef __BPF_HELPERS_H
2 | #define __BPF_HELPERS_H
3 |
4 | /* helper macro to place programs, maps, license in
5 | * different sections in elf_bpf file. Section names
6 | * are interpreted by elf_bpf loader
7 | */
8 | #define SEC(NAME) __attribute__((section(NAME), used))
9 |
10 | /* helper functions called from eBPF programs written in C */
11 | static void *(*bpf_map_lookup_elem)(void *map, void *key) =
12 | (void *) BPF_FUNC_map_lookup_elem;
13 | static int (*bpf_map_update_elem)(void *map, void *key, void *value,
14 | unsigned long long flags) =
15 | (void *) BPF_FUNC_map_update_elem;
16 | static int (*bpf_map_delete_elem)(void *map, void *key) =
17 | (void *) BPF_FUNC_map_delete_elem;
18 | static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
19 | (void *) BPF_FUNC_probe_read;
20 | static unsigned long long (*bpf_ktime_get_ns)(void) =
21 | (void *) BPF_FUNC_ktime_get_ns;
22 | static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
23 | (void *) BPF_FUNC_trace_printk;
24 | static unsigned long long (*bpf_get_smp_processor_id)(void) =
25 | (void *) BPF_FUNC_get_smp_processor_id;
26 | static unsigned long long (*bpf_get_current_pid_tgid)(void) =
27 | (void *) BPF_FUNC_get_current_pid_tgid;
28 | static unsigned long long (*bpf_get_current_uid_gid)(void) =
29 | (void *) BPF_FUNC_get_current_uid_gid;
30 | static int (*bpf_get_current_comm)(void *buf, int buf_size) =
31 | (void *) BPF_FUNC_get_current_comm;
32 | static int (*bpf_perf_event_read)(void *map, int index) =
33 | (void *) BPF_FUNC_perf_event_read;
34 | static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
35 | (void *) BPF_FUNC_clone_redirect;
36 | static int (*bpf_redirect)(int ifindex, int flags) =
37 | (void *) BPF_FUNC_redirect;
38 | static int (*bpf_perf_event_output)(void *ctx, void *map,
39 | unsigned long long flags, void *data,
40 | int size) =
41 | (void *) BPF_FUNC_perf_event_output;
42 | static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
43 | (void *) BPF_FUNC_skb_get_tunnel_key;
44 | static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
45 | (void *) BPF_FUNC_skb_set_tunnel_key;
46 | static unsigned long long (*bpf_get_prandom_u32)(void) =
47 | (void *) BPF_FUNC_get_prandom_u32;
48 |
49 | /* llvm builtin functions that eBPF C program may use to
50 | * emit BPF_LD_ABS and BPF_LD_IND instructions
51 | */
52 | struct sk_buff;
53 | unsigned long long load_byte(void *skb,
54 | unsigned long long off) asm("llvm.bpf.load.byte");
55 | unsigned long long load_half(void *skb,
56 | unsigned long long off) asm("llvm.bpf.load.half");
57 | unsigned long long load_word(void *skb,
58 | unsigned long long off) asm("llvm.bpf.load.word");
59 |
60 | /* a helper structure used by eBPF C program
61 | * to describe map attributes to elf_bpf loader
62 | */
63 | #define BUF_SIZE_MAP_NS 256
64 |
65 | struct bpf_map_def {
66 | unsigned int type;
67 | unsigned int key_size;
68 | unsigned int value_size;
69 | unsigned int max_entries;
70 | unsigned int map_flags;
71 | unsigned int pinning;
72 | char namespace[BUF_SIZE_MAP_NS];
73 | };
74 |
75 | static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
76 | (void *) BPF_FUNC_skb_store_bytes;
77 | static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
78 | (void *) BPF_FUNC_l3_csum_replace;
79 | static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
80 | (void *) BPF_FUNC_l4_csum_replace;
81 |
82 | #if defined(__x86_64__)
83 |
84 | #define PT_REGS_PARM1(x) ((x)->di)
85 | #define PT_REGS_PARM2(x) ((x)->si)
86 | #define PT_REGS_PARM3(x) ((x)->dx)
87 | #define PT_REGS_PARM4(x) ((x)->cx)
88 | #define PT_REGS_PARM5(x) ((x)->r8)
89 | #define PT_REGS_RET(x) ((x)->sp)
90 | #define PT_REGS_FP(x) ((x)->bp)
91 | #define PT_REGS_RC(x) ((x)->ax)
92 | #define PT_REGS_SP(x) ((x)->sp)
93 | #define PT_REGS_IP(x) ((x)->ip)
94 |
95 | #elif defined(__s390x__)
96 |
97 | #define PT_REGS_PARM1(x) ((x)->gprs[2])
98 | #define PT_REGS_PARM2(x) ((x)->gprs[3])
99 | #define PT_REGS_PARM3(x) ((x)->gprs[4])
100 | #define PT_REGS_PARM4(x) ((x)->gprs[5])
101 | #define PT_REGS_PARM5(x) ((x)->gprs[6])
102 | #define PT_REGS_RET(x) ((x)->gprs[14])
103 | #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
104 | #define PT_REGS_RC(x) ((x)->gprs[2])
105 | #define PT_REGS_SP(x) ((x)->gprs[15])
106 | #define PT_REGS_IP(x) ((x)->ip)
107 |
108 | #elif defined(__aarch64__)
109 |
110 | #define PT_REGS_PARM1(x) ((x)->regs[0])
111 | #define PT_REGS_PARM2(x) ((x)->regs[1])
112 | #define PT_REGS_PARM3(x) ((x)->regs[2])
113 | #define PT_REGS_PARM4(x) ((x)->regs[3])
114 | #define PT_REGS_PARM5(x) ((x)->regs[4])
115 | #define PT_REGS_RET(x) ((x)->regs[30])
116 | #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
117 | #define PT_REGS_RC(x) ((x)->regs[0])
118 | #define PT_REGS_SP(x) ((x)->sp)
119 | #define PT_REGS_IP(x) ((x)->pc)
120 |
121 | #elif defined(__powerpc__)
122 |
123 | #define PT_REGS_PARM1(x) ((x)->gpr[3])
124 | #define PT_REGS_PARM2(x) ((x)->gpr[4])
125 | #define PT_REGS_PARM3(x) ((x)->gpr[5])
126 | #define PT_REGS_PARM4(x) ((x)->gpr[6])
127 | #define PT_REGS_PARM5(x) ((x)->gpr[7])
128 | #define PT_REGS_RC(x) ((x)->gpr[3])
129 | #define PT_REGS_SP(x) ((x)->sp)
130 | #define PT_REGS_IP(x) ((x)->nip)
131 |
132 | #endif
133 |
134 | #ifdef __powerpc__
135 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
136 | #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
137 | #else
138 | #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
139 | bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
140 | #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
141 | bpf_probe_read(&(ip), sizeof(ip), \
142 | (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
143 | #endif
144 |
145 | #endif
146 |
--------------------------------------------------------------------------------
/circle.yml:
--------------------------------------------------------------------------------
1 | general:
2 | branches:
3 | ignore:
4 | - gh-pages
5 |
6 | machine:
7 | services:
8 | - docker
9 | environment:
10 | GOPATH: $HOME
11 | SRCDIR: $HOME/src/github.com/weaveworks/tcptracer-bpf
12 | SRCDIR2: /go/src/github.com/weaveworks/tcptracer-bpf
13 | PATH: $PATH:$HOME/.local/bin
14 |
15 | dependencies:
16 | override:
17 | - |
18 | mkdir -p $(dirname $SRCDIR) &&
19 | cp -r --preserve=timestamps $(pwd)/ $SRCDIR
20 | - |
21 | cd $SRCDIR &&
22 | make build-docker-image &&
23 | make build-ebpf-object &&
24 | cmp ebpf/tcptracer-ebpf.go pkg/tracer/tcptracer-ebpf.go
25 |
26 | test:
27 | override:
28 | - |
29 | cd $SRCDIR && sudo docker run --rm \
30 | -e GOPATH=/go \
31 | -v $SRCDIR:$SRCDIR2 \
32 | --workdir=$SRCDIR2 \
33 | weaveworks/tcptracer-bpf-builder \
34 | /bin/sh -c \
35 | 'export PATH=$GOPATH/bin:$PATH &&
36 | make lint &&
37 | cd tests &&
38 | make &&
39 | GOOS=darwin make'
40 | - |
41 | cd $SRCDIR && sha512sum ebpf/tcptracer-ebpf.*
42 |
43 | deployment:
44 | hub:
45 | branch: /.*/
46 | commands:
47 | - |
48 | cd $SRCDIR && cp ebpf/tcptracer-ebpf.* $CIRCLE_ARTIFACTS/
49 |
--------------------------------------------------------------------------------
/ebpf.mk:
--------------------------------------------------------------------------------
1 | SHELL=/bin/bash -o pipefail
2 | DEST_DIR?=/dist
3 | LINUX_HEADERS=$(shell rpm -q kernel-devel --last | head -n 1 | awk -F'kernel-devel-' '{print "/usr/src/kernels/"$$2}' | cut -d " " -f 1)
4 |
5 | build:
6 | @mkdir -p "$(DEST_DIR)"
7 | clang -D__KERNEL__ -D__ASM_SYSREG_H \
8 | -DCIRCLE_BUILD_URL=\"$(CIRCLE_BUILD_URL)\" \
9 | -Wno-unused-value \
10 | -Wno-pointer-sign \
11 | -Wno-compare-distinct-pointer-types \
12 | -Wunused \
13 | -Wall \
14 | -Werror \
15 | -O2 -emit-llvm -c tcptracer-bpf.c \
16 | $(foreach path,$(LINUX_HEADERS), -I $(path)/arch/x86/include -I $(path)/arch/x86/include/generated -I $(path)/include -I $(path)/include/generated/uapi -I $(path)/arch/x86/include/uapi -I $(path)/include/uapi) \
17 | -o - | llc -march=bpf -filetype=obj -o "${DEST_DIR}/tcptracer-ebpf.o"
18 | go-bindata -pkg tracer -prefix "${DEST_DIR}/" -modtime 1 -o "${DEST_DIR}/tcptracer-ebpf.go" "${DEST_DIR}/tcptracer-ebpf.o"
19 |
--------------------------------------------------------------------------------
/pkg/tracer/byteorder.go:
--------------------------------------------------------------------------------
1 | package tracer
2 |
3 | import (
4 | "encoding/binary"
5 | "unsafe"
6 | )
7 |
8 | var nativeEndian binary.ByteOrder
9 |
10 | // In lack of binary.NativeEndian ...
11 | func init() {
12 | var i int32 = 0x01020304
13 | u := unsafe.Pointer(&i)
14 | pb := (*byte)(u)
15 | b := *pb
16 | if b == 0x04 {
17 | nativeEndian = binary.LittleEndian
18 | } else {
19 | nativeEndian = binary.BigEndian
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/tracer/event.go:
--------------------------------------------------------------------------------
1 | // +build linux
2 |
3 | package tracer
4 |
5 | import (
6 | "encoding/binary"
7 | "net"
8 | "unsafe"
9 | )
10 |
11 | /*
12 | #include "../../tcptracer-bpf.h"
13 | */
14 | import "C"
15 |
16 | func tcpV4ToGo(data []byte) (ret TcpV4) {
17 | eventC := (*C.struct_tcp_ipv4_event_t)(unsafe.Pointer(&data[0]))
18 |
19 | ret.Timestamp = uint64(eventC.timestamp)
20 | ret.CPU = uint64(eventC.cpu)
21 | ret.Type = EventType(eventC._type)
22 | ret.Pid = uint32(eventC.pid & 0xffffffff)
23 | ret.Comm = C.GoString(&eventC.comm[0])
24 |
25 | saddrbuf := make([]byte, 4)
26 | daddrbuf := make([]byte, 4)
27 |
28 | binary.LittleEndian.PutUint32(saddrbuf, uint32(eventC.saddr))
29 | binary.LittleEndian.PutUint32(daddrbuf, uint32(eventC.daddr))
30 |
31 | ret.SAddr = net.IPv4(saddrbuf[0], saddrbuf[1], saddrbuf[2], saddrbuf[3])
32 | ret.DAddr = net.IPv4(daddrbuf[0], daddrbuf[1], daddrbuf[2], daddrbuf[3])
33 |
34 | ret.SPort = uint16(eventC.sport)
35 | ret.DPort = uint16(eventC.dport)
36 | ret.NetNS = uint32(eventC.netns)
37 | ret.Fd = uint32(eventC.fd)
38 |
39 | return
40 | }
41 |
42 | // Offset added to all timestamps, to hold back events so they are less
43 | // likely to be reported out of order. Value is in nanoseconds.
44 | var (
45 | TimestampOffset uint64 = 100000
46 | )
47 |
48 | func tcpV4Timestamp(data *[]byte) uint64 {
49 | eventC := (*C.struct_tcp_ipv4_event_t)(unsafe.Pointer(&(*data)[0]))
50 | return uint64(eventC.timestamp) + TimestampOffset
51 | }
52 |
53 | func tcpV6ToGo(data []byte) (ret TcpV6) {
54 | eventC := (*C.struct_tcp_ipv6_event_t)(unsafe.Pointer(&data[0]))
55 |
56 | ret.Timestamp = uint64(eventC.timestamp)
57 | ret.CPU = uint64(eventC.cpu)
58 | ret.Type = EventType(eventC._type)
59 | ret.Pid = uint32(eventC.pid & 0xffffffff)
60 | ret.Comm = C.GoString(&eventC.comm[0])
61 |
62 | saddrbuf := make([]byte, 16)
63 | daddrbuf := make([]byte, 16)
64 |
65 | binary.LittleEndian.PutUint64(saddrbuf, uint64(eventC.saddr_h))
66 | binary.LittleEndian.PutUint64(saddrbuf[8:], uint64(eventC.saddr_l))
67 | binary.LittleEndian.PutUint64(daddrbuf, uint64(eventC.daddr_h))
68 | binary.LittleEndian.PutUint64(daddrbuf[8:], uint64(eventC.daddr_l))
69 |
70 | ret.SAddr = net.IP(saddrbuf)
71 | ret.DAddr = net.IP(daddrbuf)
72 |
73 | ret.SPort = uint16(eventC.sport)
74 | ret.DPort = uint16(eventC.dport)
75 | ret.NetNS = uint32(eventC.netns)
76 | ret.Fd = uint32(eventC.fd)
77 |
78 | return
79 | }
80 |
81 | func tcpV6Timestamp(data *[]byte) uint64 {
82 | eventC := (*C.struct_tcp_ipv6_event_t)(unsafe.Pointer(&(*data)[0]))
83 | return uint64(eventC.timestamp) + TimestampOffset
84 | }
85 |
--------------------------------------------------------------------------------
/pkg/tracer/event_common.go:
--------------------------------------------------------------------------------
1 | package tracer
2 |
3 | import (
4 | "net"
5 | )
6 |
7 | type EventType uint32
8 |
9 | // These constants should be in sync with the equivalent definitions in the ebpf program.
10 | const (
11 | EventConnect EventType = 1
12 | EventAccept = 2
13 | EventClose = 3
14 | EventFdInstall = 4
15 | )
16 |
17 | func (e EventType) String() string {
18 | switch e {
19 | case EventConnect:
20 | return "connect"
21 | case EventAccept:
22 | return "accept"
23 | case EventClose:
24 | return "close"
25 | case EventFdInstall:
26 | return "fdinstall"
27 | default:
28 | return "unknown"
29 | }
30 | }
31 |
32 | // TcpV4 represents a TCP event (connect, accept or close) on IPv4
33 | type TcpV4 struct {
34 | Timestamp uint64 // Monotonic timestamp
35 | CPU uint64 // CPU index
36 | Type EventType // connect, accept or close
37 | Pid uint32 // Process ID, who triggered the event
38 | Comm string // The process command (as in /proc/$pid/comm)
39 | SAddr net.IP // Local IP address
40 | DAddr net.IP // Remote IP address
41 | SPort uint16 // Local TCP port
42 | DPort uint16 // Remote TCP port
43 | NetNS uint32 // Network namespace ID (as in /proc/$pid/ns/net)
44 | Fd uint32 // File descriptor for fd_install events
45 | }
46 |
47 | // TcpV6 represents a TCP event (connect, accept or close) on IPv6
48 | type TcpV6 struct {
49 | Timestamp uint64 // Monotonic timestamp
50 | CPU uint64 // CPU index
51 | Type EventType // connect, accept or close
52 | Pid uint32 // Process ID, who triggered the event
53 | Comm string // The process command (as in /proc/$pid/comm)
54 | SAddr net.IP // Local IP address
55 | DAddr net.IP // Remote IP address
56 | SPort uint16 // Local TCP port
57 | DPort uint16 // Remote TCP port
58 | NetNS uint32 // Network namespace ID (as in /proc/$pid/ns/net)
59 | Fd uint32 // File descriptor for fd_install events
60 | }
61 |
--------------------------------------------------------------------------------
/pkg/tracer/offsetguess_unsupported.go:
--------------------------------------------------------------------------------
1 | // +build !linux
2 |
3 | package tracer
4 |
5 | import (
6 | "fmt"
7 |
8 | "github.com/iovisor/gobpf/elf"
9 | )
10 |
11 | func guess(b *elf.Module) error {
12 | return fmt.Errorf("not supported on non-Linux systems")
13 | }
14 |
--------------------------------------------------------------------------------
/pkg/tracer/tracer.go:
--------------------------------------------------------------------------------
1 | // +build linux
2 |
3 | package tracer
4 |
5 | import (
6 | "bytes"
7 | "fmt"
8 | "unsafe"
9 |
10 | bpflib "github.com/iovisor/gobpf/elf"
11 | )
12 |
13 | type Tracer struct {
14 | m *bpflib.Module
15 | perfMapIPV4 *bpflib.PerfMap
16 | perfMapIPV6 *bpflib.PerfMap
17 | stopChan chan struct{}
18 | }
19 |
20 | // maxActive configures the maximum number of instances of the probed functions
21 | // that can be handled simultaneously.
22 | // This value should be enough to handle typical workloads (for example, some
23 | // amount of processes blocked on the accept syscall).
24 | const maxActive = 128
25 |
26 | func TracerAsset() ([]byte, error) {
27 | buf, err := Asset("tcptracer-ebpf.o")
28 | if err != nil {
29 | return nil, fmt.Errorf("couldn't find asset: %s", err)
30 | }
31 | return buf, nil
32 | }
33 |
34 | func NewTracer(cb Callback) (*Tracer, error) {
35 | buf, err := Asset("tcptracer-ebpf.o")
36 | if err != nil {
37 | return nil, fmt.Errorf("couldn't find asset: %s", err)
38 | }
39 | reader := bytes.NewReader(buf)
40 |
41 | m := bpflib.NewModuleFromReader(reader)
42 | if m == nil {
43 | return nil, fmt.Errorf("BPF not supported")
44 | }
45 |
46 | sectionParams := make(map[string]bpflib.SectionParams)
47 | sectionParams["maps/tcp_event_ipv4"] = bpflib.SectionParams{PerfRingBufferPageCount: 256}
48 | err = m.Load(sectionParams)
49 | if err != nil {
50 | return nil, err
51 | }
52 |
53 | err = m.EnableKprobes(maxActive)
54 | if err != nil {
55 | return nil, err
56 | }
57 |
58 | channelV4 := make(chan []byte)
59 | channelV6 := make(chan []byte)
60 | lostChanV4 := make(chan uint64)
61 | lostChanV6 := make(chan uint64)
62 |
63 | perfMapIPV4, err := initializeIPv4(m, channelV4, lostChanV4)
64 | if err != nil {
65 | return nil, fmt.Errorf("failed to init perf map for IPv4 events: %s", err)
66 | }
67 |
68 | perfMapIPV6, err := initializeIPv6(m, channelV6, lostChanV6)
69 | if err != nil {
70 | return nil, fmt.Errorf("failed to init perf map for IPv6 events: %s", err)
71 | }
72 |
73 | perfMapIPV4.SetTimestampFunc(tcpV4Timestamp)
74 | perfMapIPV6.SetTimestampFunc(tcpV6Timestamp)
75 |
76 | stopChan := make(chan struct{})
77 |
78 | go func() {
79 | for {
80 | select {
81 | case <-stopChan:
82 | // On stop, stopChan will be closed but the other channels will
83 | // also be closed shortly after. The select{} has no priorities,
84 | // therefore, the "ok" value must be checked below.
85 | return
86 | case data, ok := <-channelV4:
87 | if !ok {
88 | return // see explanation above
89 | }
90 | cb.TCPEventV4(tcpV4ToGo(data))
91 | case lost, ok := <-lostChanV4:
92 | if !ok {
93 | return // see explanation above
94 | }
95 | cb.LostV4(lost)
96 | }
97 | }
98 | }()
99 |
100 | go func() {
101 | for {
102 | select {
103 | case <-stopChan:
104 | return
105 | case data, ok := <-channelV6:
106 | if !ok {
107 | return // see explanation above
108 | }
109 | cb.TCPEventV6(tcpV6ToGo(data))
110 | case lost, ok := <-lostChanV6:
111 | if !ok {
112 | return // see explanation above
113 | }
114 | cb.LostV6(lost)
115 | }
116 | }
117 | }()
118 |
119 | return &Tracer{
120 | m: m,
121 | perfMapIPV4: perfMapIPV4,
122 | perfMapIPV6: perfMapIPV6,
123 | stopChan: stopChan,
124 | }, nil
125 | }
126 |
127 | func (t *Tracer) Start() {
128 | t.perfMapIPV4.PollStart()
129 | t.perfMapIPV6.PollStart()
130 | }
131 |
132 | func (t *Tracer) AddFdInstallWatcher(pid uint32) (err error) {
133 | var one uint32 = 1
134 | mapFdInstall := t.m.Map("fdinstall_pids")
135 | err = t.m.UpdateElement(mapFdInstall, unsafe.Pointer(&pid), unsafe.Pointer(&one), 0)
136 | return err
137 | }
138 |
139 | func (t *Tracer) RemoveFdInstallWatcher(pid uint32) (err error) {
140 | mapFdInstall := t.m.Map("fdinstall_pids")
141 | err = t.m.DeleteElement(mapFdInstall, unsafe.Pointer(&pid))
142 | return err
143 | }
144 |
145 | func (t *Tracer) Stop() {
146 | close(t.stopChan)
147 | t.perfMapIPV4.PollStop()
148 | t.perfMapIPV6.PollStop()
149 | t.m.Close()
150 | }
151 |
152 | func initialize(module *bpflib.Module, eventMapName string, eventChan chan []byte, lostChan chan uint64) (*bpflib.PerfMap, error) {
153 | if err := guess(module); err != nil {
154 | return nil, fmt.Errorf("error guessing offsets: %v", err)
155 | }
156 |
157 | pm, err := bpflib.InitPerfMap(module, eventMapName, eventChan, lostChan)
158 | if err != nil {
159 | return nil, fmt.Errorf("error initializing perf map for %q: %v", eventMapName, err)
160 | }
161 |
162 | return pm, nil
163 |
164 | }
165 |
166 | func initializeIPv4(module *bpflib.Module, eventChan chan []byte, lostChan chan uint64) (*bpflib.PerfMap, error) {
167 | return initialize(module, "tcp_event_ipv4", eventChan, lostChan)
168 | }
169 |
170 | func initializeIPv6(module *bpflib.Module, eventChan chan []byte, lostChan chan uint64) (*bpflib.PerfMap, error) {
171 | return initialize(module, "tcp_event_ipv6", eventChan, lostChan)
172 | }
173 |
--------------------------------------------------------------------------------
/pkg/tracer/tracer_cb.go:
--------------------------------------------------------------------------------
1 | package tracer
2 |
3 | type Callback interface {
4 | TCPEventV4(TcpV4)
5 | TCPEventV6(TcpV6)
6 | LostV4(uint64)
7 | LostV6(uint64)
8 | }
9 |
--------------------------------------------------------------------------------
/pkg/tracer/tracer_unsupported.go:
--------------------------------------------------------------------------------
1 | // +build !linux
2 |
3 | package tracer
4 |
5 | import (
6 | "fmt"
7 | )
8 |
9 | type Tracer struct{}
10 |
11 | func TracerAsset() ([]byte, error) {
12 | return nil, fmt.Errorf("not supported on non-Linux systems")
13 | }
14 |
15 | func NewTracer(cb Callback) (*Tracer, error) {
16 | return nil, fmt.Errorf("not supported on non-Linux systems")
17 | }
18 | func (t *Tracer) Start() {
19 | }
20 | func (t *Tracer) AddFdInstallWatcher(pid uint32) (err error) {
21 | return fmt.Errorf("not supported on non-Linux systems")
22 | }
23 | func (t *Tracer) RemoveFdInstallWatcher(pid uint32) (err error) {
24 | return fmt.Errorf("not supported on non-Linux systems")
25 | }
26 | func (t *Tracer) Stop() {
27 | }
28 |
--------------------------------------------------------------------------------
/smoketest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to smoke test tcptracer-bpf
4 | #
5 | # `semaphore.sh` runs the test (`tests/run`) in a rkt container with
6 | # custom build stage1-kvm images to test under different kernels.
7 | # stage1-kvm allows us to run a container inside a KVM virtual machine
8 | # and thus test eBPF workloads, which need a modern Linux kernel and
9 | # root access.
10 | #
11 | # `tests/run` starts a tracer (see `tests/tracer.go`) and verifies the
12 | # tracer sees all events for a single TCP connection (client connect,
13 | # server accept, client close, server close) with the expected addr:port
14 | # combinations.
15 |
16 | set -eux
17 | set -o pipefail
18 |
19 | # Currently we test on Linux version
20 | # 4.4 - the longterm release used in Amazon Linux
21 | # 4.9 - the latest stable release
22 | readonly kernel_versions=("4.4.129" "4.9.96")
23 | readonly rkt_version="1.30.0"
24 |
25 | if [[ ! -f "./rkt/rkt" ]] \
26 | || [[ ! "$(./rkt/rkt version | awk '/rkt Version/{print $3}')" == "${rkt_version}" ]]; then
27 |
28 | curl -LsS "https://github.com/coreos/rkt/releases/download/v${rkt_version}/rkt-v${rkt_version}.tar.gz" \
29 | -o rkt.tgz
30 |
31 | mkdir -p rkt
32 | tar -xvf rkt.tgz -C rkt --strip-components=1
33 | fi
34 |
35 | # Pre-fetch stage1 dependency due to rkt#2241
36 | # https://github.com/coreos/rkt/issues/2241
37 | sudo ./rkt/rkt image fetch --insecure-options=image "coreos.com/rkt/stage1-kvm:${rkt_version}"
38 |
39 | sudo docker build -t "weaveworks/tcptracer-bpf-ci" -f "./tests/Dockerfile" .
40 | # shellcheck disable=SC2024
41 | sudo docker save "weaveworks/tcptracer-bpf-ci" >"tcptracer-bpf-ci.tar"
42 | docker2aci "./tcptracer-bpf-ci.tar"
43 | rm "./tcptracer-bpf-ci.tar"
44 | trap "rm -f ./weaveworks-tcptracer-bpf-ci-latest.aci" EXIT
45 |
46 | make
47 |
48 | for kernel_version in "${kernel_versions[@]}"; do
49 | kernel_header_dir="/lib/modules/${kernel_version}-kinvolk-v1/source/include"
50 | # stage1 image build with https://github.com/kinvolk/stage1-builder
51 | stage1_name="kinvolk.io/aci/rkt/stage1-kvm:${rkt_version},kernelversion=${kernel_version}"
52 |
53 | rm -f ./rkt-uuid
54 |
55 | sudo timeout --foreground --kill-after=10 5m \
56 | ./rkt/rkt \
57 | run --interactive \
58 | --uuid-file-save=./rkt-uuid \
59 | --insecure-options=image,all-run \
60 | --dns=8.8.8.8 \
61 | --stage1-name="${stage1_name}" \
62 | --volume=ttbpf,kind=host,source="$PWD" \
63 | ./weaveworks-tcptracer-bpf-ci-latest.aci \
64 | --mount=volume=ttbpf,target=/go/src/github.com/weaveworks/tcptracer-bpf \
65 | --environment=GOPATH=/go \
66 | --environment=C_INCLUDE_PATH="${kernel_header_dir}/arch/x86/include:${kernel_header_dir}/arch/x86/include/generated" \
67 | --exec=/bin/bash -- -o xtrace -c \
68 | 'cd /go/src/github.com/weaveworks/tcptracer-bpf/tests &&
69 | mount -t tmpfs tmpfs /tmp &&
70 | mount -t debugfs debugfs /sys/kernel/debug/ &&
71 | make &&
72 | ./run'
73 |
74 | # Determine exit code from pod status due to
75 | # https://github.com/coreos/rkt/issues/2777
76 | test_status=$(sudo ./rkt/rkt status "$(
5 |
6 | #define TCP_EVENT_TYPE_CONNECT 1
7 | #define TCP_EVENT_TYPE_ACCEPT 2
8 | #define TCP_EVENT_TYPE_CLOSE 3
9 | #define TCP_EVENT_TYPE_FD_INSTALL 4
10 |
11 | #define GUESS_SADDR 0
12 | #define GUESS_DADDR 1
13 | #define GUESS_FAMILY 2
14 | #define GUESS_SPORT 3
15 | #define GUESS_DPORT 4
16 | #define GUESS_NETNS 5
17 | #define GUESS_DADDR_IPV6 6
18 |
19 | #ifndef TASK_COMM_LEN
20 | #define TASK_COMM_LEN 16
21 | #endif
22 |
23 | struct tcp_ipv4_event_t {
24 | __u64 timestamp;
25 | __u64 cpu;
26 | __u32 type;
27 | __u32 pid;
28 | char comm[TASK_COMM_LEN];
29 | __u32 saddr;
30 | __u32 daddr;
31 | __u16 sport;
32 | __u16 dport;
33 | __u32 netns;
34 | __u32 fd;
35 | __u32 dummy;
36 | };
37 |
38 | struct tcp_ipv6_event_t {
39 | __u64 timestamp;
40 | __u64 cpu;
41 | __u32 type;
42 | __u32 pid;
43 | char comm[TASK_COMM_LEN];
44 | /* Using the type unsigned __int128 generates an error in the ebpf verifier */
45 | __u64 saddr_h;
46 | __u64 saddr_l;
47 | __u64 daddr_h;
48 | __u64 daddr_l;
49 | __u16 sport;
50 | __u16 dport;
51 | __u32 netns;
52 | __u32 fd;
53 | __u32 dummy;
54 | };
55 |
56 | // tcp_set_state doesn't run in the context of the process that initiated the
57 | // connection so we need to store a map TUPLE -> PID to send the right PID on
58 | // the event
59 | struct ipv4_tuple_t {
60 | __u32 saddr;
61 | __u32 daddr;
62 | __u16 sport;
63 | __u16 dport;
64 | __u32 netns;
65 | };
66 |
67 | struct ipv6_tuple_t {
68 | /* Using the type unsigned __int128 generates an error in the ebpf verifier */
69 | __u64 saddr_h;
70 | __u64 saddr_l;
71 | __u64 daddr_h;
72 | __u64 daddr_l;
73 | __u16 sport;
74 | __u16 dport;
75 | __u32 netns;
76 | };
77 |
78 | struct pid_comm_t {
79 | __u64 pid;
80 | char comm[TASK_COMM_LEN];
81 | };
82 |
83 | #define TCPTRACER_STATE_UNINITIALIZED 0
84 | #define TCPTRACER_STATE_CHECKING 1
85 | #define TCPTRACER_STATE_CHECKED 2
86 | #define TCPTRACER_STATE_READY 3
87 | struct tcptracer_status_t {
88 | __u64 state;
89 |
90 | /* checking */
91 | __u64 pid_tgid;
92 | __u64 what;
93 | __u64 offset_saddr;
94 | __u64 offset_daddr;
95 | __u64 offset_sport;
96 | __u64 offset_dport;
97 | __u64 offset_netns;
98 | __u64 offset_ino;
99 | __u64 offset_family;
100 | __u64 offset_daddr_ipv6;
101 |
102 | __u64 err;
103 |
104 | __u32 daddr_ipv6[4];
105 | __u32 netns;
106 | __u32 saddr;
107 | __u32 daddr;
108 | __u16 sport;
109 | __u16 dport;
110 | __u16 family;
111 | __u16 padding;
112 | };
113 |
114 | #endif
115 |
--------------------------------------------------------------------------------
/tests/.gitignore:
--------------------------------------------------------------------------------
1 | tracer
2 |
--------------------------------------------------------------------------------
/tests/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fedora:25
2 |
3 | RUN dnf install -y iproute make nmap-ncat procps-ng golang busybox
4 |
5 | RUN mkdir -p /go
6 |
7 | ENV GOPATH /go
8 | CMD ["bash"]
9 |
--------------------------------------------------------------------------------
/tests/Makefile:
--------------------------------------------------------------------------------
1 | .phony: all build
2 |
3 | all: build
4 |
5 | build:
6 | go build -o tracer tracer.go
7 |
8 | build-in-docker:
9 | sudo docker build -t "weaveworks/tcptracer-bpf-ci" .
10 | sudo docker run \
11 | -v $(GOPATH)/src/github.com/weaveworks/tcptracer-bpf:/go/src/github.com/weaveworks/tcptracer-bpf \
12 | --env GOPATH=/go \
13 | weaveworks/tcptracer-bpf-ci \
14 | sh -c 'cd /go/src/github.com/weaveworks/tcptracer-bpf/tests && make'
15 | sudo chown $(shell id -u):$(shell id -u) ./tracer
16 |
--------------------------------------------------------------------------------
/tests/multiple_connections_refused.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | readonly nonlistening_port=65530
4 |
5 | for _ in $(seq 1 "$1"); do
6 | wget -q http://127.0.0.1:"${nonlistening_port}" &>/dev/null
7 | done
8 |
9 | exit 0
10 |
--------------------------------------------------------------------------------
/tests/run:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [[ $EUID -ne 0 ]]; then
6 | echo "root required - aborting" >&2
7 | exit 1
8 | fi
9 |
10 | test_pid=-1
11 |
12 | function shutdown() {
13 | if [[ $test_pid -ne -1 ]]; then
14 | kill $test_pid 2>/dev/null || true
15 | fi
16 | }
17 |
18 | trap shutdown EXIT
19 |
20 | timeout 150 ./test.sh &
21 | test_pid=$!
22 | wait $test_pid
23 |
24 | exit $?
25 |
--------------------------------------------------------------------------------
/tests/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [[ $EUID -ne 0 ]]; then
6 | echo "root required - aborting" >&2
7 | exit 1
8 | fi
9 |
10 | readonly dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11 | readonly tracer="${dir}/tracer"
12 | readonly port=61111
13 | readonly port2=61112
14 | readonly netns=$(mktemp /tmp/tcptracer-bpf-test-netns-XXXXXXXX)
15 | readonly tracer_output=$(mktemp /tmp/tcptracer-bpf-test-stdout-XXXXXXXX)
16 | exec 3<> <(tail --pid "$$" -f "${tracer_output}")
17 | server_pid=-1
18 | server2_pid=-1
19 | tracer_pid=-1
20 |
21 | function shutdown() {
22 | if [[ $server_pid -ne -1 ]]; then
23 | kill $server_pid 2>/dev/null || true
24 | fi
25 | if [[ $server2_pid -ne -1 ]]; then
26 | kill $server2_pid 2>/dev/null || true
27 | fi
28 | if [[ $tracer_pid -ne -1 ]]; then
29 | kill $tracer_pid 2>/dev/null || true
30 | fi
31 | exec 3>&-
32 | rm "${tracer_output}"
33 | umount -f "${netns}"
34 | rm "${netns}"
35 | }
36 |
37 | trap shutdown EXIT
38 |
39 | uname -r
40 |
41 | unshare --net="${netns}" ip link set lo up
42 |
43 | # start a process in the accept syscall to test
44 | # https://github.com/weaveworks/tcptracer-bpf/issues/10
45 | nsenter --net="${netns}" busybox nc -l -p "${port2}" &3 &
51 | tracer_pid=$!
52 |
53 | sleep 1 # wait for tracer to load
54 |
55 | # stop and fail here when tracer encountered an error and didn't start
56 | ps -p "$tracer_pid" >/dev/null
57 |
58 | # generate some refused connections to test for
59 | # https://github.com/weaveworks/tcptracer-bpf/issues/21
60 | nsenter --net="${netns}" ./multiple_connections_refused.sh "1200"
61 |
62 | nsenter --net="${netns}" nc -l "${port}" e.Timestamp {
40 | fmt.Printf("ERROR: late event!\n")
41 | os.Exit(TCP_EVENT_LATE)
42 | }
43 |
44 | t.lastTimestampV4 = e.Timestamp
45 | }
46 |
47 | func (t *tcpEventTracer) TCPEventV6(e tracer.TcpV6) {
48 | fmt.Printf("%v cpu#%d %s %v %s %v:%v %v:%v %v\n",
49 | e.Timestamp, e.CPU, e.Type, e.Pid, e.Comm, e.SAddr, e.SPort, e.DAddr, e.DPort, e.NetNS)
50 |
51 | if t.lastTimestampV6 > e.Timestamp {
52 | fmt.Printf("ERROR: late event!\n")
53 | os.Exit(TCP_EVENT_LATE)
54 | }
55 |
56 | t.lastTimestampV6 = e.Timestamp
57 | }
58 |
59 | func (t *tcpEventTracer) LostV4(count uint64) {
60 | fmt.Printf("ERROR: lost %d events!\n", count)
61 | os.Exit(TCP_EVENTS_LOST)
62 | }
63 |
64 | func (t *tcpEventTracer) LostV6(count uint64) {
65 | fmt.Printf("ERROR: lost %d events!\n", count)
66 | os.Exit(TCP_EVENTS_LOST)
67 | }
68 |
69 | func init() {
70 | flag.StringVar(&watchFdInstallPids, "monitor-fdinstall-pids", "", "a comma-separated list of pids that need to be monitored for fdinstall events")
71 |
72 | flag.Parse()
73 | }
74 |
75 | func main() {
76 | if flag.NArg() > 1 {
77 | flag.Usage()
78 | os.Exit(BAD_ARGUMENTS)
79 | }
80 |
81 | t, err := tracer.NewTracer(&tcpEventTracer{})
82 | if err != nil {
83 | fmt.Fprintf(os.Stderr, "%v\n", err)
84 | os.Exit(TRACER_INSERT_FAILED)
85 | }
86 |
87 | t.Start()
88 |
89 | for _, p := range strings.Split(watchFdInstallPids, ",") {
90 | if p == "" {
91 | continue
92 | }
93 |
94 | pid, err := strconv.ParseUint(p, 10, 32)
95 | if err != nil {
96 | fmt.Fprintf(os.Stderr, "Invalid pid: %v\n", err)
97 | os.Exit(PROCESS_NOT_FOUND)
98 | }
99 | fmt.Printf("Monitor fdinstall events for pid %d\n", pid)
100 | t.AddFdInstallWatcher(uint32(pid))
101 | }
102 |
103 | sig := make(chan os.Signal, 1)
104 | signal.Notify(sig, os.Interrupt, os.Kill)
105 |
106 | <-sig
107 | t.Stop()
108 | }
109 |
--------------------------------------------------------------------------------
/tools/.gitignore:
--------------------------------------------------------------------------------
1 | cover/cover
2 | socks/proxy
3 | socks/image.tar
4 | runner/runner
5 | *.pyc
6 | *~
7 | terraform.tfstate
8 | terraform.tfstate.backup
9 | *.retry
10 | build/**/.uptodate
11 |
--------------------------------------------------------------------------------
/tools/README.md:
--------------------------------------------------------------------------------
1 | # Weaveworks Build Tools
2 |
3 | Included in this repo are tools shared by weave.git and scope.git. They include
4 |
5 | - ```build```: a set of docker base-images for building weave
6 | projects. These should be used instead of giving each project its
7 | own build image.
8 | - ```provisioning```: a set of Terraform scripts to provision virtual machines in GCP, AWS or Digital Ocean.
9 | - ```config_management```: a set of Ansible playbooks to configure virtual machines for development, testing, etc.
10 | - ```cover```: a tool which merges overlapping coverage reports generated by go
11 | test
12 | - ```files-with-type```: a tool to search directories for files of a given
13 | MIME type
14 | - ```lint```: a script to lint go, sh and hcl files; runs various tools like
15 | golint, go vet, errcheck, shellcheck etc
16 | - ```rebuild-image```: a script to rebuild docker images when their input files
17 | change; useful when you using docker images to build your software, but you
18 | don't want to build the image every time.
19 | - ```shell-lint```: a script to lint multiple shell files with
20 | [shellcheck](http://www.shellcheck.net/)
21 | - ```socks```: a simple, dockerised SOCKS proxy for getting your laptop onto
22 | the Weave network
23 | - ```test```: a script to run all go unit tests in subdirectories, gather the
24 | coverage results, and merge them into a single report.
25 | - ```runner```: a tool for running tests in parallel; given each test is
26 | suffixed with the number of hosts it requires, and the hosts available are
27 | contained in the environment variable HOSTS, the tool will run tests in
28 | parallel, on different hosts.
29 | - ```scheduler```: an appengine application that can be used to distribute
30 | tests across different shards in CircleCI.
31 |
32 | ## Requirements
33 |
34 | - ```lint``` requires shfmt to lint sh files; get shfmt with
35 | ```go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt```
36 |
37 | ## Using build-tools.git
38 |
39 | To allow you to tie your code to a specific version of build-tools.git, such
40 | that future changes don't break you, we recommendation that you [`git subtree`]()
41 | this repository into your own repository:
42 |
43 | [`git subtree`]: http://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/
44 |
45 | ```
46 | git subtree add --prefix tools https://github.com/weaveworks/build-tools.git master --squash
47 | ````
48 |
49 | To update the code in build-tools.git, the process is therefore:
50 | - PR into build-tools.git, go through normal review process etc.
51 | - Do `git subtree pull --prefix tools https://github.com/weaveworks/build-tools.git master --squash`
52 | in your repo, and PR that.
53 |
--------------------------------------------------------------------------------
/tools/build/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all clean images
2 | .DEFAULT_GOAL := all
3 |
4 | # Boiler plate for bulding Docker containers.
5 | # All this must go at top of file I'm afraid.
6 | IMAGE_PREFIX := quay.io/weaveworks/build-
7 | IMAGE_TAG := $(shell ../image-tag)
8 | UPTODATE := .uptodate
9 |
10 | # Every directory with a Dockerfile in it builds an image called
11 | # $(IMAGE_PREFIX). Dependencies (i.e. things that go in the image)
12 | # still need to be explicitly declared.
13 | %/$(UPTODATE): %/Dockerfile %/*
14 | $(SUDO) docker build -t $(IMAGE_PREFIX)$(shell basename $(@D)) $(@D)/
15 | $(SUDO) docker tag $(IMAGE_PREFIX)$(shell basename $(@D)) $(IMAGE_PREFIX)$(shell basename $(@D)):$(IMAGE_TAG)
16 | touch $@
17 |
18 | # Get a list of directories containing Dockerfiles
19 | DOCKERFILES := $(shell find . -name tools -prune -o -name vendor -prune -o -type f -name 'Dockerfile' -print)
20 | UPTODATE_FILES := $(patsubst %/Dockerfile,%/$(UPTODATE),$(DOCKERFILES))
21 | DOCKER_IMAGE_DIRS := $(patsubst %/Dockerfile,%,$(DOCKERFILES))
22 | IMAGE_NAMES := $(foreach dir,$(DOCKER_IMAGE_DIRS),$(patsubst %,$(IMAGE_PREFIX)%,$(shell basename $(dir))))
23 | images:
24 | $(info $(IMAGE_NAMES))
25 | @echo > /dev/null
26 |
27 | # Define imagetag-golang, etc, for each image, which parses the dockerfile and
28 | # prints an image tag. For example:
29 | # FROM golang:1.8.1-stretch
30 | # in the "foo/Dockerfile" becomes:
31 | # $ make imagetag-foo
32 | # 1.8.1-stretch
33 | define imagetag_dep
34 | .PHONY: imagetag-$(1)
35 | $(patsubst $(IMAGE_PREFIX)%,imagetag-%,$(1)): $(patsubst $(IMAGE_PREFIX)%,%,$(1))/Dockerfile
36 | @cat $$< | grep "^FROM " | head -n1 | sed 's/FROM \(.*\):\(.*\)/\2/'
37 | endef
38 | $(foreach image, $(IMAGE_NAMES), $(eval $(call imagetag_dep, $(image))))
39 |
40 | all: $(UPTODATE_FILES)
41 |
42 | clean:
43 | $(SUDO) docker rmi $(IMAGE_NAMES) >/dev/null 2>&1 || true
44 | rm -rf $(UPTODATE_FILES)
45 |
46 |
47 |
--------------------------------------------------------------------------------
/tools/build/golang/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.8.0-stretch
2 | RUN apt-get update && \
3 | apt-get install -y \
4 | curl \
5 | file \
6 | git \
7 | jq \
8 | libprotobuf-dev \
9 | make \
10 | protobuf-compiler \
11 | python-pip \
12 | python-requests \
13 | python-yaml \
14 | unzip && \
15 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
16 | RUN pip install attrs pyhcl
17 | RUN curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
18 | echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
19 | chmod +x shfmt && \
20 | mv shfmt /usr/bin
21 | RUN go clean -i net && \
22 | go install -tags netgo std && \
23 | go install -race -tags netgo std
24 | RUN go get -tags netgo \
25 | github.com/FiloSottile/gvt \
26 | github.com/client9/misspell/cmd/misspell \
27 | github.com/fatih/hclfmt \
28 | github.com/fzipp/gocyclo \
29 | github.com/gogo/protobuf/gogoproto \
30 | github.com/gogo/protobuf/protoc-gen-gogoslick \
31 | github.com/golang/dep/... \
32 | github.com/golang/lint/golint \
33 | github.com/golang/protobuf/protoc-gen-go \
34 | github.com/kisielk/errcheck \
35 | github.com/mjibson/esc \
36 | github.com/prometheus/prometheus/cmd/promtool && \
37 | rm -rf /go/pkg /go/src
38 | RUN mkdir protoc && \
39 | cd protoc && \
40 | curl -O -L https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip && \
41 | unzip protoc-3.1.0-linux-x86_64.zip && \
42 | cp bin/protoc /usr/bin/ && \
43 | chmod o+x /usr/bin/protoc && \
44 | cd .. && \
45 | rm -rf protoc
46 | RUN mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \
47 | touch /var/run/secrets/kubernetes.io/serviceaccount/token
48 | COPY build.sh /
49 | ENTRYPOINT ["/build.sh"]
50 |
--------------------------------------------------------------------------------
/tools/build/golang/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -eu
4 |
5 | if [ -n "${SRC_NAME:-}" ]; then
6 | SRC_PATH=${SRC_PATH:-$GOPATH/src/$SRC_NAME}
7 | elif [ -z "${SRC_PATH:-}" ]; then
8 | echo "Must set either \$SRC_NAME or \$SRC_PATH."
9 | exit 1
10 | fi
11 |
12 | # If we run make directly, any files created on the bind mount
13 | # will have awkward ownership. So we switch to a user with the
14 | # same user and group IDs as source directory. We have to set a
15 | # few things up so that sudo works without complaining later on.
16 | uid=$(stat --format="%u" "$SRC_PATH")
17 | gid=$(stat --format="%g" "$SRC_PATH")
18 | echo "weave:x:$uid:$gid::$SRC_PATH:/bin/sh" >>/etc/passwd
19 | echo "weave:*:::::::" >>/etc/shadow
20 | echo "weave ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers
21 |
22 | su weave -c "PATH=$PATH make -C $SRC_PATH BUILD_IN_CONTAINER=false $*"
23 |
--------------------------------------------------------------------------------
/tools/build/haskell/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fpco/stack-build:lts-8.9
2 | COPY build.sh /
3 | COPY copy-libraries /usr/local/bin/
4 | ENTRYPOINT ["/build.sh"]
5 |
--------------------------------------------------------------------------------
/tools/build/haskell/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Build a static Haskell binary using stack.
4 |
5 | set -eu
6 |
7 | if [ -z "${SRC_PATH:-}" ]; then
8 | echo "Must set \$SRC_PATH."
9 | exit 1
10 | fi
11 |
12 | make -C "$SRC_PATH" BUILD_IN_CONTAINER=false "$@"
13 |
--------------------------------------------------------------------------------
/tools/build/haskell/copy-libraries:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copy dynamically linked libraries for a binary, so we can assemble a Docker
4 | # image.
5 | #
6 | # Run with:
7 | # copy-libraries /path/to/binary /output/dir
8 | #
9 | # Dependencies:
10 | # - awk
11 | # - cp
12 | # - grep
13 | # - ldd
14 | # - mkdir
15 |
16 | set -o errexit
17 | set -o nounset
18 | set -o pipefail
19 |
20 | # Path to a Linux binary that we're going to run in the container.
21 | binary_path="${1}"
22 | # Path to directory to write the output to.
23 | output_dir="${2}"
24 |
25 | exe_name=$(basename "${binary_path}")
26 |
27 | # Identify linked libraries.
28 | libraries=($(ldd "${binary_path}" | awk '{print $(NF-1)}' | grep -v '=>'))
29 | # Add /bin/sh, which we need for Docker imports.
30 | libraries+=('/bin/sh')
31 |
32 | mkdir -p "${output_dir}"
33 |
34 | # Copy executable and all needed libraries into temporary directory.
35 | cp "${binary_path}" "${output_dir}/${exe_name}"
36 | for lib in "${libraries[@]}"; do
37 | mkdir -p "${output_dir}/$(dirname "$lib")"
38 | # Need -L to make sure we get actual libraries & binaries, not symlinks to
39 | # them.
40 | cp -L "${lib}" "${output_dir}/${lib}"
41 | done
42 |
--------------------------------------------------------------------------------
/tools/circle.yml:
--------------------------------------------------------------------------------
1 | machine:
2 | services:
3 | - docker
4 | environment:
5 | GOPATH: /home/ubuntu
6 | SRCDIR: /home/ubuntu/src/github.com/weaveworks/tools
7 | PATH: $PATH:$HOME/bin
8 |
9 | dependencies:
10 | post:
11 | - sudo chmod a+wr --recursive /usr/local/go/pkg
12 | - go clean -i net
13 | - go install -tags netgo std
14 | - mkdir -p $(dirname $SRCDIR)
15 | - cp -r $(pwd)/ $SRCDIR
16 | - |
17 | curl -fsSLo shfmt https://github.com/mvdan/sh/releases/download/v1.3.0/shfmt_v1.3.0_linux_amd64 && \
18 | echo "b1925c2c405458811f0c227266402cf1868b4de529f114722c2e3a5af4ac7bb2 shfmt" | sha256sum -c && \
19 | chmod +x shfmt && \
20 | sudo mv shfmt /usr/bin
21 | - |
22 | cd $SRCDIR;
23 | go get \
24 | github.com/fzipp/gocyclo \
25 | github.com/golang/lint/golint \
26 | github.com/kisielk/errcheck \
27 | github.com/fatih/hclfmt
28 |
29 | test:
30 | override:
31 | - cd $SRCDIR; ./lint .
32 | - cd $SRCDIR/cover; make
33 | - cd $SRCDIR/socks; make
34 | - cd $SRCDIR/runner; make
35 | - cd $SRCDIR/build; make
36 |
37 | deployment:
38 | snapshot:
39 | branch: master
40 | commands:
41 | - docker login -e "$DOCKER_REGISTRY_EMAIL" -u "$DOCKER_REGISTRY_USER" -p "$DOCKER_REGISTRY_PASS" "$DOCKER_REGISTRY_URL"
42 | - |
43 | cd $SRCDIR/build;
44 | for image in $(make images); do
45 | # Tag the built images with the revision of this repo.
46 | docker push "${image}:${GIT_TAG}"
47 |
48 | # Tag the built images with something derived from the base images in
49 | # their respective Dockerfiles. So "FROM golang:1.8.0-stretch" as a
50 | # base image would lead to a tag of "1.8.0-stretch"
51 | IMG_TAG=$(make "imagetag-${image#quay.io/weaveworks/build-}")
52 | docker tag "${image}:latest" "${image}:${IMG_TAG}"
53 | docker push "${image}:${IMG_TAG}"
54 | done
55 |
--------------------------------------------------------------------------------
/tools/config_management/README.md:
--------------------------------------------------------------------------------
1 | # Weaveworks configuration management
2 |
3 | ## Introduction
4 |
5 | This project allows you to configure a machine with:
6 |
7 | * Docker and Weave Net for development: `setup_weave-net_dev.yml`
8 | * Docker and Weave Net for testing: `setup_weave-net_test.yml`
9 | * Docker, Kubernetes and Weave Kube (CNI plugin): `setup_weave-kube.yml`
10 |
11 | You can then use these environments for development, testing and debugging.
12 |
13 | ## Set up
14 |
15 | You will need [Python](https://www.python.org/downloads/) and [Ansible 2.+](http://docs.ansible.com/ansible/intro_installation.html) installed on your machine and added to your `PATH` in order to be able to configure environments automatically.
16 |
17 | * On any platform, if you have Python installed: `pip install ansible`
18 | * On macOS: `brew install ansible`
19 | * On Linux (via Aptitude): `sudo apt install ansible`
20 | * On Linux (via YUM): `sudo yum install ansible`
21 | * For other platforms or more details, see [here](http://docs.ansible.com/ansible/intro_installation.html)
22 |
23 | Frequent errors during installation are:
24 |
25 | * `fatal error: Python.h: No such file or directory`: install `python-dev`
26 | * `fatal error: ffi.h: No such file or directory`: install `libffi-dev`
27 | * `fatal error: openssl/opensslv.h: No such file or directory`: install `libssl-dev`
28 |
29 | Full steps for a blank Ubuntu/Debian Linux machine:
30 |
31 | sudo apt-get install -qq -y python-pip python-dev libffi-dev libssl-dev
32 | sudo pip install -U cffi
33 | sudo pip install ansible
34 |
35 | ## Tags
36 |
37 | These can be used to selectively run (`--tags "tag1,tag2"`) or skip (`--skip-tags "tag1,tag2"`) tasks.
38 |
39 | * `output`: print potentially useful output from hosts (e.g. output of `kubectl get pods --all-namespaces`)
40 |
41 | ## Usage
42 |
43 | ### Local machine
44 |
45 | ```
46 | ansible-playbook -u -i "localhost", -c local setup_weave-kube.yml
47 | ```
48 |
49 | ### Vagrant
50 |
51 | Provision your local VM using Vagrant:
52 |
53 | ```
54 | cd $(mktemp -d -t XXX)
55 | vagrant init ubuntu/xenial64 # or, e.g. centos/7
56 | vagrant up
57 | ```
58 |
59 | then set the following environment variables by extracting the output of `vagrant ssh-config`:
60 |
61 | ```
62 | eval $(vagrant ssh-config | sed \
63 | -ne 's/\ *HostName /vagrant_ssh_host=/p' \
64 | -ne 's/\ *User /vagrant_ssh_user=/p' \
65 | -ne 's/\ *Port /vagrant_ssh_port=/p' \
66 | -ne 's/\ *IdentityFile /vagrant_ssh_id_file=/p')
67 | ```
68 |
69 | and finally run:
70 |
71 | ```
72 | ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \
73 | --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
74 | -i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml
75 | ```
76 |
77 | or, for specific versions of Kubernetes and Docker:
78 |
79 | ```
80 | ansible-playbook --private-key=$vagrant_ssh_id_file -u $vagrant_ssh_user \
81 | --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
82 | -i "$vagrant_ssh_host:$vagrant_ssh_port," setup_weave-kube.yml \
83 | --extra-vars "docker_version=1.12.3 kubernetes_version=1.4.4"
84 | ```
85 |
86 | NOTE: Kubernetes APT repo includes only the latest version, so currently
87 | retrieving an older version will fail.
88 |
89 | ### Terraform
90 |
91 | Provision your machine using the Terraform scripts from `../provisioning`, then run:
92 |
93 | ```
94 | terraform output ansible_inventory > /tmp/ansible_inventory
95 | ```
96 |
97 | and
98 |
99 | ```
100 | ansible-playbook \
101 | --private-key="$(terraform output private_key_path)" \
102 | -u "$(terraform output username)" \
103 | -i /tmp/ansible_inventory \
104 | --ssh-extra-args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
105 | ../../config_management/setup_weave-kube.yml
106 |
107 | ```
108 |
109 | To specify versions of Kubernetes and Docker see Vagrant examples above.
110 |
111 | N.B.: `--ssh-extra-args` is used to provide:
112 |
113 | * `StrictHostKeyChecking=no`: as VMs come and go, the same IP can be used by a different machine, so checking the host's SSH key may fail. Note that this introduces a risk of a man-in-the-middle attack.
114 | * `UserKnownHostsFile=/dev/null`: if you previously connected a VM with the same IP but a different public key, and added it to `~/.ssh/known_hosts`, SSH may still fail to connect, hence we use `/dev/null` instead of `~/.ssh/known_hosts`.
115 |
116 | ## Resources
117 |
118 | * [https://www.vagrantup.com/docs/provisioning/ansible.html](https://www.vagrantup.com/docs/provisioning/ansible.html)
119 | * [http://docs.ansible.com/ansible/guide_vagrant.html](http://docs.ansible.com/ansible/guide_vagrant.html)
120 |
--------------------------------------------------------------------------------
/tools/config_management/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | go_version: 1.8.1
3 | terraform_version: 0.8.5
4 | docker_version: 1.11.2
5 | docker_install_role: 'docker-from-get.docker.com'
6 | kubernetes_version: 1.6.1
7 | kubernetes_cni_version: 0.5.1
8 | kubernetes_token: '123456.0123456789123456'
9 | etcd_container_version: 2.2.5
10 | kube_discovery_container_version: 1.0
11 | pause_container_version: 3.0
12 |
--------------------------------------------------------------------------------
/tools/config_management/library/setup_ansible_dependencies.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # Install Ansible's dependencies: python and lsb_release, required respectively
4 | # to run Ansible modules and gather Ansible facts.
5 | #
6 | # See also:
7 | # - http://docs.ansible.com/ansible/intro_installation.html#managed-node-requirements
8 | # - http://docs.ansible.com/ansible/setup_module.html
9 | ################################################################################
10 |
11 | - name: check if python is installed (as required by ansible modules)
12 | raw: test -e /usr/bin/python
13 | register: is_python_installed
14 | failed_when: is_python_installed.rc not in [0, 1]
15 | changed_when: false # never mutates state.
16 |
17 | - name: install python if missing (as required by ansible modules)
18 | when: is_python_installed|failed # skip otherwise
19 | raw: (test -e /usr/bin/apt-get && apt-get update && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum update && yum install -y python)
20 | changed_when: is_python_installed.rc == 1
21 |
22 | - name: check if lsb_release is installed (as required for ansible facts)
23 | raw: test -e /usr/bin/lsb_release
24 | register: is_lsb_release_installed
25 | failed_when: is_lsb_release_installed.rc not in [0, 1]
26 | changed_when: false # never mutates state.
27 |
28 | - name: install lsb_release if missing (as required for ansible facts)
29 | when: is_lsb_release_installed|failed # skip otherwise
30 | raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y redhat-lsb-core)
31 | changed_when: is_lsb_release_installed.rc == 1
32 |
33 | - setup: # gather 'facts', i.e. compensates for 'gather_facts: false' in calling playbook.
34 |
--------------------------------------------------------------------------------
/tools/config_management/roles/dev-tools/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Development Environment.
3 |
4 | - name: install development tools
5 | package:
6 | name: "{{ item }}"
7 | state: present
8 | with_items:
9 | # weave net dependencies
10 | - make
11 | - vagrant
12 | # ansible dependencies
13 | - python-pip
14 | - python-dev
15 | - libffi-dev
16 | - libssl-dev
17 | # terraform dependencies
18 | - unzip
19 | # other potentially useful tools:
20 | - aufs-tools
21 | - ethtool
22 | - iputils-arping
23 | - libpcap-dev
24 | - git
25 | - mercurial
26 | - bc
27 | - jq
28 |
29 | - name: install ansible
30 | pip:
31 | name: ansible
32 | state: present
33 |
34 | - name: install terraform
35 | unarchive:
36 | src: 'https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_linux_{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.zip'
37 | remote_src: yes
38 | dest: /usr/bin
39 | mode: 0555
40 | creates: /usr/bin/terraform
41 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-configuration/files/docker.conf:
--------------------------------------------------------------------------------
1 | [Service]
2 | ExecStart=
3 | ExecStart=/usr/bin/docker daemon -H fd:// -H unix:///var/run/alt-docker.sock -H tcp://0.0.0.0:2375 -s overlay --insecure-registry "weave-ci-registry:5000"
4 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-configuration/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Configure Docker
3 | # See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
4 |
5 | - name: ensure docker group is present (or create it)
6 | group:
7 | name: docker
8 | state: present
9 |
10 | - name: add user to docker group (avoids sudo-ing)
11 | user:
12 | name: "{{ ansible_user }}"
13 | group: docker
14 | state: present
15 |
16 | - name: ensure docker's systemd directory exists
17 | file:
18 | path: /etc/systemd/system/docker.service.d
19 | state: directory
20 | recurse: yes
21 | when: ansible_os_family != "RedHat"
22 |
23 | - name: enable docker remote api over tcp
24 | copy:
25 | src: "{{ role_path }}/files/docker.conf"
26 | dest: /etc/systemd/system/docker.service.d/docker.conf
27 | register: docker_conf
28 | when: ansible_os_family != "RedHat"
29 |
30 | - name: restart docker service
31 | systemd:
32 | name: docker
33 | state: restarted
34 | daemon_reload: yes # ensure docker.conf is picked up.
35 | enabled: yes
36 | when: docker_conf.changed or ansible_os_family == "RedHat"
37 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-docker-ce-repo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Docker installation from Docker's CentOS Community Edition
2 | # See also: https://docs.docker.com/engine/installation/linux/centos/
3 |
4 | - name: remove all potentially pre existing packages
5 | yum:
6 | name: '{{ item }}'
7 | state: absent
8 | with_items:
9 | - docker
10 | - docker-common
11 | - container-selinux
12 | - docker-selinux
13 | - docker-engine
14 |
15 | - name: install yum-utils
16 | yum:
17 | name: yum-utils
18 | state: present
19 |
20 | - name: add docker ce repo
21 | command: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
22 |
23 | # Note that Docker CE versions do not follow regular Docker versions, but look
24 | # like, for example: "17.03.0.el7"
25 | - name: install docker
26 | yum:
27 | name: 'docker-ce-{{ docker_version }}'
28 | update_cache: yes
29 | state: present
30 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-docker-repo/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Debian / Ubuntu specific:
3 |
4 | - name: install dependencies for docker repository
5 | package:
6 | name: "{{ item }}"
7 | state: present
8 | with_items:
9 | - apt-transport-https
10 | - ca-certificates
11 |
12 | - name: add apt key for the docker repository
13 | apt_key:
14 | keyserver: hkp://ha.pool.sks-keyservers.net:80
15 | id: 58118E89F3A912897C070ADBF76221572C52609D
16 | state: present
17 | register: apt_key_docker_repo
18 |
19 | - name: add docker's apt repository ({{ ansible_distribution | lower }}-{{ ansible_distribution_release }})
20 | apt_repository:
21 | repo: deb https://apt.dockerproject.org/repo {{ ansible_distribution | lower }}-{{ ansible_distribution_release }} main
22 | state: present
23 | register: apt_docker_repo
24 |
25 | - name: update apt's cache
26 | apt:
27 | update_cache: yes
28 | when: apt_key_docker_repo.changed or apt_docker_repo.changed
29 |
30 | - name: install docker-engine
31 | package:
32 | name: "{{ item }}"
33 | state: present
34 | with_items:
35 | - docker-engine={{ docker_version }}*
36 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-docker-repo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Docker
3 | # See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
4 |
5 | # Distribution-specific tasks:
6 | - include: debian.yml
7 | when: ansible_os_family == "Debian"
8 |
9 | - include: redhat.yml
10 | when: ansible_os_family == "RedHat"
11 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-docker-repo/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # RedHat / CentOS specific:
3 |
4 | - name: add docker' yum repository (centos/{{ ansible_lsb.major_release }})
5 | yum_repository:
6 | name: docker
7 | description: Docker YUM repo
8 | file: external_repos
9 | baseurl: https://yum.dockerproject.org/repo/main/centos/{{ ansible_lsb.major_release }}
10 | enabled: yes
11 | gpgkey: https://yum.dockerproject.org/gpg
12 | gpgcheck: yes
13 | state: present
14 |
15 | - name: update yum's cache
16 | yum:
17 | name: "*"
18 | update_cache: yes
19 |
20 | - name: install docker-engine
21 | package:
22 | name: "{{ item }}"
23 | state: present
24 | with_items:
25 | - docker-engine-{{ docker_version }}
26 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-get.docker.com/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Debian / Ubuntu specific:
3 |
4 | - name: apt-import gpg key for the docker repository
5 | shell: curl -sSL https://get.docker.com/gpg | sudo apt-key add -
6 |
7 | - name: install docker
8 | shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine={{ docker_version }}*/ | sh'
9 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-get.docker.com/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Docker
3 | # See also: legacy gce.sh script
4 |
5 | # Distribution-specific tasks:
6 | - include: debian.yml
7 | when: ansible_os_family == "Debian"
8 |
9 | - include: redhat.yml
10 | when: ansible_os_family == "RedHat"
11 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-get.docker.com/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # RedHat / CentOS specific:
3 |
4 | - name: rpm-import gpg key for the docker repository
5 | shell: curl -sSLo /tmp/docker.gpg https://get.docker.com/gpg && sudo rpm --import /tmp/docker.gpg
6 |
7 | - name: install docker
8 | shell: 'curl -sSL https://get.docker.com/ | sed -e s/docker-engine/docker-engine-{{ docker_version }}*/ | sh'
9 |
10 | - name: wait for docker installation to complete
11 | shell: yum install -y yum-utils && yum-complete-transaction
12 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-tarball/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Docker
3 | # See also:
4 | # - https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
5 | # - https://github.com/docker/docker/releases
6 |
7 | - include_role:
8 | name: docker-prerequisites
9 |
10 | - name: install daemon
11 | package:
12 | name: daemon
13 | state: present
14 |
15 | - name: 'create directory {{ docker_dir }}/{{ docker_version }}'
16 | file:
17 | path: '{{ docker_dir }}/{{ docker_version }}'
18 | state: directory
19 | mode: 0755
20 |
21 | - name: download and extract docker
22 | unarchive:
23 | src: 'https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz'
24 | remote_src: yes
25 | dest: '{{ docker_dir }}/{{ docker_version }}'
26 | extra_opts: '--strip-components=1'
27 | mode: 0555
28 | creates: '{{ docker_dir }}/{{ docker_version }}/docker'
29 |
30 | - name: create symlink to current version
31 | file:
32 | src: '{{ docker_dir }}/{{ docker_version }}'
33 | dest: '{{ docker_dir }}/current'
34 | state: link
35 | mode: 0555
36 |
37 | - name: list all files to symlink
38 | find:
39 | paths: '{{ docker_dir }}/current'
40 | file_type: file
41 | register: binaries
42 | changed_when: false
43 |
44 | - name: create symlinks to all binaries
45 | file:
46 | src: '{{ item }}'
47 | dest: /usr/bin/{{ item | basename }}
48 | state: link
49 | with_items: "{{ binaries.files | map(attribute='path') | list }}"
50 |
51 | - name: killall docker
52 | command: killall docker
53 | register: killall
54 | failed_when: false
55 | changed_when: killall.rc == 0
56 |
57 | - name: start dockerd
58 | command: daemon -- /usr/bin/dockerd
59 |
60 | - include_role:
61 | name: docker-configuration
62 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-from-tarball/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_dir: '/opt/docker'
3 | docker_url: '{{ "rc" in {{ docker_version }} | ternary( >
4 | "https://test.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz", >
5 | "https://get.docker.com/builds/Linux/x86_64/docker-{{ docker_version }}.tgz") }}'
6 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-install/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Docker
3 |
4 | - include_role:
5 | name: docker-prerequisites
6 |
7 | # Dynamically include docker installation role using 'when' as Ansible does not
8 | # allow for include_role's name to be set to a variable. Indeed:
9 | # - include_role:
10 | # name: '{{ docker_install_role }}'
11 | # fails with:
12 | # ERROR! 'docker_install_role' is undefined
13 | - include_role:
14 | name: docker-from-docker-repo
15 | when: docker_install_role == 'docker-from-docker-repo'
16 |
17 | - include_role:
18 | name: docker-from-docker-ce-repo
19 | when: docker_install_role == 'docker-from-docker-ce-repo'
20 |
21 | - include_role:
22 | name: docker-from-get.docker.com
23 | when: docker_install_role == 'docker-from-get.docker.com'
24 |
25 | - include_role:
26 | name: docker-from-tarball
27 | when: docker_install_role == 'docker-from-tarball'
28 |
29 | - include_role:
30 | name: docker-configuration
31 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-prerequisites/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Install Docker's dependencies
3 | # See also: https://docs.docker.com/engine/installation/linux/ubuntulinux/#install
4 |
5 | - name: install linux-image-extra-*/virtual
6 | package:
7 | name: "{{ item }}"
8 | state: present
9 | with_items:
10 | - linux-image-extra-{{ ansible_kernel }}
11 | - linux-image-extra-virtual
12 |
--------------------------------------------------------------------------------
/tools/config_management/roles/docker-prerequisites/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Distribution-specific tasks:
4 | - include: debian.yml
5 | when: ansible_os_family == "Debian"
6 |
--------------------------------------------------------------------------------
/tools/config_management/roles/golang-from-tarball/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Go.
3 |
4 | - name: install go
5 | unarchive:
6 | src: 'https://storage.googleapis.com/golang/go{{ go_version }}.linux-{{ {"x86_64": "amd64", "i386": "386"}[ansible_architecture] }}.tar.gz'
7 | remote_src: yes
8 | dest: /usr/local
9 | mode: 0777
10 | creates: /usr/local/go/bin/go
11 |
12 | - name: set go env. vars. and add go to path
13 | blockinfile:
14 | dest: '$HOME/.bashrc'
15 | block: |
16 | export PATH=$PATH:/usr/local/go/bin
17 | export GOPATH=$HOME
18 | state: present
19 | create: yes
20 | mode: 0644
21 | become: '{{ item }}'
22 | with_items:
23 | - true # Run as root
24 | - false # Run as SSH user
25 |
26 | - name: source ~/.bashrc from ~/.bash_profile
27 | lineinfile:
28 | dest: '$HOME/.bash_profile'
29 | line: '[ -r $HOME/.bashrc ] && source $HOME/.bashrc'
30 | state: present
31 | create: yes
32 | mode: 0644
33 | become: '{{ item }}'
34 | with_items:
35 | - true # Run as root
36 | - false # Run as SSH user
37 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubelet-stop/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: check if kubelet service exists
4 | stat:
5 | path: /etc/init.d/kubelet
6 | register: kubelet
7 |
8 | # avoids having weave-net and weave-kube conflict in some test cases (e.g. 130_expose_test.sh)
9 | - name: stop kubelet service
10 | systemd:
11 | name: kubelet
12 | state: stopped
13 | enabled: no
14 | when: kubelet.stat.exists
15 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubernetes-docker-images/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: docker pull images used by k8s tests
4 | docker_image:
5 | name: '{{ item }}'
6 | state: present
7 | with_items:
8 | - gcr.io/google_containers/etcd-amd64:{{ etcd_container_version }}
9 | - gcr.io/google_containers/kube-apiserver-amd64:v{{ kubernetes_version }}
10 | - gcr.io/google_containers/kube-controller-manager-amd64:v{{ kubernetes_version }}
11 | - gcr.io/google_containers/kube-proxy-amd64:v{{ kubernetes_version }}
12 | - gcr.io/google_containers/kube-scheduler-amd64:v{{ kubernetes_version }}
13 | - gcr.io/google_containers/kube-discovery-amd64:{{ kube_discovery_container_version }}
14 | - gcr.io/google_containers/pause-amd64:{{ pause_container_version }}
15 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubernetes-install/tasks/debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Debian / Ubuntu specific:
3 |
4 | - name: add apt key for the kubernetes repository
5 | apt_key:
6 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
7 | state: present
8 | register: apt_key_k8s_repo
9 |
10 | - name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }})
11 | apt_repository:
12 | repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main
13 | state: present
14 | register: apt_k8s_repo
15 | when: '"alpha" not in kubernetes_version and "beta" not in kubernetes_version'
16 |
17 | - name: add kubernetes' apt repository (kubernetes-{{ ansible_distribution_release }}-unstable)
18 | apt_repository:
19 | repo: deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }}-unstable main
20 | state: present
21 | register: apt_k8s_repo
22 | when: '"alpha" in kubernetes_version or "beta" in kubernetes_version'
23 |
24 | - name: update apt's cache
25 | apt:
26 | update_cache: yes
27 | when: apt_key_k8s_repo.changed or apt_k8s_repo.changed
28 |
29 | - name: install kubelet and kubectl
30 | package:
31 | name: "{{ item }}"
32 | state: present
33 | with_items:
34 | - kubelet={{ kubernetes_version }}*
35 | - kubectl={{ kubernetes_version }}*
36 | - kubeadm={{ kubernetes_version }}*
37 | - kubernetes-cni={{ kubernetes_cni_version }}*
38 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubernetes-install/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Install Kubernetes
3 |
4 | # Distribution-specific tasks:
5 | - include: debian.yml
6 | when: ansible_os_family == "Debian"
7 |
8 | - include: redhat.yml
9 | when: ansible_os_family == "RedHat"
10 |
11 | - name: install ebtables
12 | package:
13 | name: "{{ item }}"
14 | state: present
15 | with_items:
16 | - ebtables
17 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubernetes-install/tasks/redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # RedHat / CentOS specific:
3 |
4 | - name: add kubernetes' yum repository (kubernetes-el{{ ansible_lsb.major_release }}-x86-64)
5 | yum_repository:
6 | name: kubernetes
7 | description: Kubernetes YUM repo
8 | file: external_repos
9 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el{{ ansible_lsb.major_release }}-x86_64
10 | enabled: yes
11 | gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
12 | gpgcheck: yes
13 | state: present
14 | register: yum_k8s_repo
15 |
16 | - name: update yum's cache
17 | yum:
18 | name: "*"
19 | update_cache: yes
20 | when: yum_k8s_repo.changed
21 |
22 | - name: install kubelet and kubectl
23 | package:
24 | name: "{{ item }}"
25 | state: present
26 | with_items:
27 | - kubelet-{{ kubernetes_version }}*
28 | - kubectl-{{ kubernetes_version }}*
29 | - kubeadm-{{ kubernetes_version }}*
30 | - kubernetes-cni-{{ kubernetes_cni_version }}*
31 |
--------------------------------------------------------------------------------
/tools/config_management/roles/kubernetes-start/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Start Kubernetes
3 |
4 | - name: kubeadm reset
5 | command: kubeadm reset
6 |
7 | - name: restart kubelet service
8 | systemd:
9 | name: kubelet
10 | state: restarted
11 | enabled: yes
12 |
13 | - name: optionally set kubeconfig option
14 | set_fact:
15 | kubeconfig: '{{ (kubernetes_version >= "1.5.4") | ternary("--kubeconfig /etc/kubernetes/admin.conf", "") }}'
16 | kubernetes_version_option: '{{ (kubernetes_version >= "1.6") | ternary("kubernetes_version", "use-kubernetes-version") }}'
17 |
18 | - name: kubeadm init on the master
19 | command: 'kubeadm init --{{ kubernetes_version_option }}=v{{ kubernetes_version }} --token={{ kubernetes_token }}'
20 | when: ' {{ play_hosts[0] == inventory_hostname }}'
21 |
22 | - name: allow pods to be run on the master (if only node)
23 | command: 'kubectl {{ kubeconfig }} taint nodes --all {{ (kubernetes_version < "1.6") | ternary("dedicated-", "node-role.kubernetes.io/master:NoSchedule-") }}'
24 | when: '{{ play_hosts | length }} == 1'
25 |
26 | - name: kubeadm join on workers
27 | command: 'kubeadm join --token={{ kubernetes_token }} {{ hostvars[play_hosts[0]].private_ip }}{{ (kubernetes_version > "1.6") | ternary(":6443", "") }}'
28 | when: ' {{ play_hosts[0] != inventory_hostname }}'
29 |
30 | - name: list kubernetes' pods
31 | command: kubectl {{ kubeconfig }} get pods --all-namespaces
32 | when: ' {{ play_hosts[0] == inventory_hostname }}'
33 | changed_when: false
34 | register: kubectl_get_pods
35 | tags:
36 | - output
37 |
38 | - name: print outpout of `kubectl get pods --all-namespaces`
39 | debug: msg="{{ kubectl_get_pods.stdout_lines }}"
40 | when: ' {{ play_hosts[0] == inventory_hostname }}'
41 | tags:
42 | - output
43 |
--------------------------------------------------------------------------------
/tools/config_management/roles/setup-ansible/pre_tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set machine up to be able to run ansible playbooks.
3 |
4 | - name: check if python is installed (as required by ansible modules)
5 | raw: test -e /usr/bin/python
6 | register: is_python_installed
7 | failed_when: is_python_installed.rc not in [0, 1]
8 | changed_when: false # never mutates state.
9 |
10 | - name: install python if missing (as required by ansible modules)
11 | when: is_python_installed|failed # skip otherwise
12 | raw: (test -e /usr/bin/apt-get && apt-get install -y python-minimal) || (test -e /usr/bin/yum && yum install -y python)
13 | changed_when: is_python_installed.rc == 1
14 |
15 | - name: check if lsb_release is installed (as required for ansible facts)
16 | raw: test -e /usr/bin/lsb_release
17 | register: is_lsb_release_installed
18 | failed_when: is_lsb_release_installed.rc not in [0, 1]
19 | changed_when: false # never mutates state.
20 |
21 | - name: install lsb_release if missing (as required for ansible facts)
22 | when: is_lsb_release_installed|failed # skip otherwise
23 | raw: (test -e /usr/bin/apt-get && apt-get install -y lsb_release) || (test -e /usr/bin/yum && yum install -y lsb_release)
24 | changed_when: is_lsb_release_installed.rc == 1
25 |
26 | - setup: # gather 'facts', i.e. compensates for the above 'gather_facts: false'.
27 |
--------------------------------------------------------------------------------
/tools/config_management/roles/sock-shop/tasks/tasks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up sock-shop on top of Kubernetes.
3 | # Dependencies on other roles:
4 | # - kubernetes
5 |
6 | - name: create sock-shop namespace in k8s
7 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf create namespace sock-shop
8 |
9 | - name: create sock-shop in k8s
10 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf apply -n sock-shop -f "https://github.com/microservices-demo/microservices-demo/blob/master/deploy/kubernetes/complete-demo.yaml?raw=true"
11 |
12 | - name: describe front-end service
13 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf describe svc front-end -n sock-shop
14 | changed_when: false
15 | register: kubectl_describe_svc_frontend
16 | tags:
17 | - output
18 |
19 | - name: print outpout of `kubectl describe svc front-end -n sock-shop`
20 | debug: msg="{{ kubectl_describe_svc_frontend.stdout_lines }}"
21 | tags:
22 | - output
23 |
24 | - name: list sock-shop k8s' pods
25 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf get pods -n sock-shop
26 | changed_when: false
27 | register: kubectl_get_pods
28 | tags:
29 | - output
30 |
31 | - name: print outpout of `kubectl get pods -n sock-shop`
32 | debug: msg="{{ kubectl_get_pods.stdout_lines }}"
33 | tags:
34 | - output
35 |
--------------------------------------------------------------------------------
/tools/config_management/roles/weave-kube/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Weave Kube on top of Kubernetes.
3 |
4 | - name: set url for weave-kube daemonset
5 | set_fact:
6 | weave_kube_url: '{{ (kubernetes_version < "1.6") | ternary("https://git.io/weave-kube", "https://git.io/weave-kube-1.6") }}'
7 |
8 | - name: configure weave net's cni plugin
9 | command: 'kubectl {{ kubeconfig }} apply -f {{ weave_kube_url }}'
10 | when: '{{ play_hosts[0] == inventory_hostname }}'
11 |
12 | - name: list kubernetes' pods
13 | command: 'kubectl {{ kubeconfig }} get pods --all-namespaces'
14 | when: '{{ play_hosts[0] == inventory_hostname }}'
15 | changed_when: false
16 | register: kubectl_get_pods
17 | tags:
18 | - output
19 |
20 | - name: print outpout of `kubectl get pods --all-namespaces`
21 | debug: msg="{{ kubectl_get_pods.stdout_lines }}"
22 | when: '{{ play_hosts[0] == inventory_hostname }}'
23 | tags:
24 | - output
25 |
--------------------------------------------------------------------------------
/tools/config_management/roles/weave-net-sources/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Development Environment for Weave Net.
3 |
4 | - name: check if weave net has been checked out
5 | become: false # Run as SSH-user
6 | stat:
7 | path: $HOME/src/github.com/weaveworks/weave
8 | register: weave
9 | failed_when: false
10 | changed_when: false
11 |
12 | - name: git clone weave net
13 | become: false # Run as SSH-user
14 | git:
15 | repo: https://github.com/weaveworks/weave.git
16 | dest: $HOME/src/github.com/weaveworks/weave
17 | when: not weave.stat.exists
18 |
19 | - name: create a convenience symlink to $HOME/src/github.com/weaveworks/weave
20 | become: false # Run as SSH-user
21 | file:
22 | src: $HOME/src/github.com/weaveworks/weave
23 | dest: $HOME/weave
24 | state: link
25 |
--------------------------------------------------------------------------------
/tools/config_management/roles/weave-net-utilities/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install epel-release
4 | package:
5 | name: "{{ item }}"
6 | state: present
7 | with_items:
8 | - epel-release
9 | when: ansible_os_family == "RedHat"
10 |
11 | - name: install jq
12 | package:
13 | name: "{{ item }}"
14 | state: present
15 | with_items:
16 | - jq
17 |
18 | - name: install ethtool (used by the weave script)
19 | package:
20 | name: "{{ item }}"
21 | state: present
22 | with_items:
23 | - ethtool
24 |
25 | - name: install nsenter (used by the weave script)
26 | command: docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
27 |
28 | - name: install pip (for docker-py)
29 | package:
30 | name: "{{ item }}"
31 | state: present
32 | with_items:
33 | - python-pip
34 |
35 | - name: install docker-py (for docker_image)
36 | pip:
37 | name: docker-py
38 | state: present
39 |
40 | - name: docker pull images used by tests
41 | docker_image:
42 | name: '{{ item }}'
43 | state: present
44 | with_items:
45 | - alpine
46 | - aanand/docker-dnsutils
47 | - weaveworks/hello-world
48 |
--------------------------------------------------------------------------------
/tools/config_management/roles/weave-net/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Set up Weave Net.
3 |
4 | - name: install weave net
5 | get_url:
6 | url: https://git.io/weave
7 | dest: /usr/local/bin/weave
8 | mode: 0555
9 |
10 | - name: stop weave net
11 | command: /usr/local/bin/weave stop
12 |
13 | - name: start weave net
14 | command: /usr/local/bin/weave launch
15 |
16 | - name: get weave net's status
17 | command: /usr/local/bin/weave status
18 | changed_when: false
19 | register: weave_status
20 | tags:
21 | - output
22 |
23 | - name: print outpout of `weave status`
24 | debug: msg="{{ weave_status.stdout_lines }}"
25 | tags:
26 | - output
27 |
--------------------------------------------------------------------------------
/tools/config_management/setup_weave-kube.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # Install Docker and Kubernetes, and configure Kubernetes to
4 | # use Weave Net's CNI plugin (a.k.a. Weave Kube).
5 | #
6 | # See also:
7 | # - http://kubernetes.io/docs/getting-started-guides/kubeadm/
8 | # - https://github.com/weaveworks/weave-kube
9 | ################################################################################
10 |
11 | - name: install docker, kubernetes and weave-kube
12 | hosts: all
13 | gather_facts: false # required in case Python is not available on the host
14 | become: true
15 | become_user: root
16 |
17 | pre_tasks:
18 | - include: library/setup_ansible_dependencies.yml
19 |
20 | roles:
21 | - docker-install
22 | - weave-net-utilities
23 | - kubernetes-install
24 | - kubernetes-docker-images
25 | - kubelet-stop
26 | - kubernetes-start
27 | - weave-kube
28 |
--------------------------------------------------------------------------------
/tools/config_management/setup_weave-net_debug.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # Install Docker from Docker's official repository and Weave Net.
4 | ################################################################################
5 |
6 | - name: install docker and weave net for development
7 | hosts: all
8 | gather_facts: false # required in case Python is not available on the host
9 | become: true
10 | become_user: root
11 |
12 | pre_tasks:
13 | - include: library/setup_ansible_dependencies.yml
14 |
15 | roles:
16 | - docker-install
17 | - weave-net-utilities
18 | - weave-net
19 |
--------------------------------------------------------------------------------
/tools/config_management/setup_weave-net_dev.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # Install Docker from Docker's official repository and Weave Net.
4 | ################################################################################
5 |
6 | - name: install docker and weave net for development
7 | hosts: all
8 | gather_facts: false # required in case Python is not available on the host
9 | become: true
10 | become_user: root
11 |
12 | pre_tasks:
13 | - include: library/setup_ansible_dependencies.yml
14 |
15 | roles:
16 | - dev-tools
17 | - golang-from-tarball
18 | - docker-install
19 | # Do not run this role when building with Vagrant, as sources have been already checked out:
20 | - { role: weave-net-sources, when: "ansible_user != 'vagrant'" }
21 |
--------------------------------------------------------------------------------
/tools/config_management/setup_weave-net_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # Install Docker from Docker's official repository and Weave Net.
4 | ################################################################################
5 |
6 | - name: install docker and weave net for testing
7 | hosts: all
8 | gather_facts: false # required in case Python is not available on the host
9 | become: true
10 | become_user: root
11 |
12 | pre_tasks:
13 | - include: library/setup_ansible_dependencies.yml
14 |
15 | roles:
16 | - docker-install
17 | - weave-net-utilities
18 | - kubernetes-install
19 | - kubernetes-docker-images
20 | - kubelet-stop
21 |
--------------------------------------------------------------------------------
/tools/cover/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all clean
2 |
3 | all: cover
4 |
5 | cover: *.go
6 | go get -tags netgo ./$(@D)
7 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D)
8 |
9 | clean:
10 | rm -rf cover
11 | go clean ./...
12 |
--------------------------------------------------------------------------------
/tools/cover/cover.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sort"
7 |
8 | "golang.org/x/tools/cover"
9 | )
10 |
11 | func merge(p1, p2 *cover.Profile) *cover.Profile {
12 | output := cover.Profile{
13 | FileName: p1.FileName,
14 | Mode: p1.Mode,
15 | }
16 |
17 | i, j := 0, 0
18 | for i < len(p1.Blocks) && j < len(p2.Blocks) {
19 | bi, bj := p1.Blocks[i], p2.Blocks[j]
20 | if bi.StartLine == bj.StartLine && bi.StartCol == bj.StartCol {
21 |
22 | if bi.EndLine != bj.EndLine ||
23 | bi.EndCol != bj.EndCol ||
24 | bi.NumStmt != bj.NumStmt {
25 | panic("Not run on same source!")
26 | }
27 |
28 | output.Blocks = append(output.Blocks, cover.ProfileBlock{
29 | StartLine: bi.StartLine,
30 | StartCol: bi.StartCol,
31 | EndLine: bi.EndLine,
32 | EndCol: bi.EndCol,
33 | NumStmt: bi.NumStmt,
34 | Count: bi.Count + bj.Count,
35 | })
36 | i++
37 | j++
38 | } else if bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol {
39 | output.Blocks = append(output.Blocks, bi)
40 | i++
41 | } else {
42 | output.Blocks = append(output.Blocks, bj)
43 | j++
44 | }
45 | }
46 |
47 | for ; i < len(p1.Blocks); i++ {
48 | output.Blocks = append(output.Blocks, p1.Blocks[i])
49 | }
50 |
51 | for ; j < len(p2.Blocks); j++ {
52 | output.Blocks = append(output.Blocks, p2.Blocks[j])
53 | }
54 |
55 | return &output
56 | }
57 |
58 | func print(profiles []*cover.Profile) {
59 | fmt.Println("mode: atomic")
60 | for _, profile := range profiles {
61 | for _, block := range profile.Blocks {
62 | fmt.Printf("%s:%d.%d,%d.%d %d %d\n", profile.FileName, block.StartLine, block.StartCol,
63 | block.EndLine, block.EndCol, block.NumStmt, block.Count)
64 | }
65 | }
66 | }
67 |
68 | // Copied from https://github.com/golang/tools/blob/master/cover/profile.go
69 | type byFileName []*cover.Profile
70 |
71 | func (p byFileName) Len() int { return len(p) }
72 | func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
73 | func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
74 |
75 | func main() {
76 | outputProfiles := map[string]*cover.Profile{}
77 | for _, input := range os.Args[1:] {
78 | inputProfiles, err := cover.ParseProfiles(input)
79 | if err != nil {
80 | panic(fmt.Sprintf("Error parsing %s: %v", input, err))
81 | }
82 | for _, ip := range inputProfiles {
83 | op := outputProfiles[ip.FileName]
84 | if op == nil {
85 | outputProfiles[ip.FileName] = ip
86 | } else {
87 | outputProfiles[ip.FileName] = merge(op, ip)
88 | }
89 | }
90 | }
91 | profiles := make([]*cover.Profile, 0, len(outputProfiles))
92 | for _, profile := range outputProfiles {
93 | profiles = append(profiles, profile)
94 | }
95 | sort.Sort(byFileName(profiles))
96 | print(profiles)
97 | }
98 |
--------------------------------------------------------------------------------
/tools/cover/gather_coverage.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This scripts copies all the coverage reports from various circle shards,
3 | # merges them and produces a complete report.
4 |
5 | set -ex
6 | DESTINATION=$1
7 | FROMDIR=$2
8 | mkdir -p "$DESTINATION"
9 |
10 | if [ -n "$CIRCLECI" ]; then
11 | for i in $(seq 1 $((CIRCLE_NODE_TOTAL - 1))); do
12 | scp "node$i:$FROMDIR"/* "$DESTINATION" || true
13 | done
14 | fi
15 |
16 | go get github.com/weaveworks/build-tools/cover
17 | cover "$DESTINATION"/* >profile.cov
18 | go tool cover -html=profile.cov -o coverage.html
19 | go tool cover -func=profile.cov -o coverage.txt
20 | tar czf coverage.tar.gz "$DESTINATION"
21 |
--------------------------------------------------------------------------------
/tools/dependencies/cross_versions.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Generate the cross product of latest versions of Weave Net's dependencies:
4 | # - Go
5 | # - Docker
6 | # - Kubernetes
7 | #
8 | # Dependencies:
9 | # - python
10 | # - git
11 | # - list_versions.py
12 | #
13 | # Testing:
14 | # $ python -m doctest -v cross_versions.py
15 |
16 | from os import linesep
17 | from sys import argv, exit, stdout, stderr
18 | from getopt import getopt, GetoptError
19 | from list_versions import DEPS, get_versions_from, filter_versions
20 | from itertools import product
21 |
22 | # See also: /usr/include/sysexits.h
23 | _ERROR_RUNTIME=1
24 | _ERROR_ILLEGAL_ARGS=64
25 |
26 | def _usage(error_message=None):
27 | if error_message:
28 | stderr.write('ERROR: ' + error_message + linesep)
29 | stdout.write(linesep.join([
30 | 'Usage:',
31 | ' cross_versions.py [OPTION]...',
32 | 'Examples:',
33 | ' cross_versions.py',
34 | ' cross_versions.py -r',
35 | ' cross_versions.py --rc',
36 | ' cross_versions.py -l',
37 | ' cross_versions.py --latest',
38 | 'Options:',
39 | '-l/--latest Include only the latest version of each major and minor versions sub-tree.',
40 | '-r/--rc Include release candidate versions.',
41 | '-h/--help Prints this!',
42 | ''
43 | ]))
44 |
45 | def _validate_input(argv):
46 | try:
47 | config = {
48 | 'rc': False,
49 | 'latest': False
50 | }
51 | opts, args = getopt(argv, 'hlr', ['help', 'latest', 'rc'])
52 | for opt, value in opts:
53 | if opt in ('-h', '--help'):
54 | _usage()
55 | exit()
56 | if opt in ('-l', '--latest'):
57 | config['latest'] = True
58 | if opt in ('-r', '--rc'):
59 | config['rc'] = True
60 | if len(args) != 0:
61 | raise ValueError('Unsupported argument(s): %s.' % args)
62 | return config
63 | except GetoptError as e:
64 | _usage(str(e))
65 | exit(_ERROR_ILLEGAL_ARGS)
66 | except ValueError as e:
67 | _usage(str(e))
68 | exit(_ERROR_ILLEGAL_ARGS)
69 |
70 | def _versions(dependency, config):
71 | return map(str,
72 | filter_versions(
73 | get_versions_from(DEPS[dependency]['url'], DEPS[dependency]['re']),
74 | DEPS[dependency]['min'],
75 | **config
76 | )
77 | )
78 |
79 | def cross_versions(config):
80 | docker_versions = _versions('docker', config)
81 | k8s_versions = _versions('kubernetes', config)
82 | return product(docker_versions, k8s_versions)
83 |
84 | def main(argv):
85 | try:
86 | config = _validate_input(argv)
87 | print(linesep.join('\t'.join(triple) for triple in cross_versions(config)))
88 | except Exception as e:
89 | print(str(e))
90 | exit(_ERROR_RUNTIME)
91 |
92 | if __name__ == '__main__':
93 | main(argv[1:])
94 |
--------------------------------------------------------------------------------
/tools/dependencies/list_os_images.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function usage() {
4 | cat <&2 "No AWS owner ID for $1."
38 | exit 1
39 | }
40 |
41 | if [ -z "$1" ]; then
42 | echo >&2 "No specified provider."
43 | usage
44 | exit 1
45 | fi
46 |
47 | if [ -z "$2" ]; then
48 | if [ "$1" == "help" ]; then
49 | usage
50 | exit 0
51 | else
52 | echo >&2 "No specified operating system."
53 | usage
54 | exit 1
55 | fi
56 | fi
57 |
58 | case "$1" in
59 | 'gcp')
60 | gcloud compute images list --standard-images --regexp=".*?$2.*" \
61 | --format="csv[no-heading][separator=/](selfLink.map().scope(projects).segment(0),family)" \
62 | | sort -d
63 | ;;
64 | 'aws')
65 | aws --region "${3:-us-east-1}" ec2 describe-images \
66 | --owners "$(find_aws_owner_id "$2")" \
67 | --filters "Name=name,Values=$2*" \
68 | --query 'Images[*].{name:Name,id:ImageId}'
69 | # Other examples:
70 | # - CentOS: aws --region us-east-1 ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=aw0evgkw8e5c1q413zgy5pjce
71 | # - Debian: aws --region us-east-1 ec2 describe-images --owners 379101102735 --filters "Name=architecture,Values=x86_64" "Name=name,Values=debian-jessie-*" "Name=root-device-type,Values=ebs" "Name=virtualization-type,Values=hvm"
72 | ;;
73 | 'do')
74 | curl -s -X GET \
75 | -H "Content-Type: application/json" \
76 | -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" \
77 | "https://api.digitalocean.com/v2/images?page=1&per_page=999999" \
78 | | jq --raw-output ".images | .[] | .slug" | grep "$2" | sort -d
79 | ;;
80 | *)
81 | echo >&2 "Unknown provider [$1]."
82 | usage
83 | exit 1
84 | ;;
85 | esac
86 |
--------------------------------------------------------------------------------
/tools/files-with-type:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Find all files with a given MIME type.
4 | #
5 | # e.g.
6 | # $ files-with-type text/x-shellscript k8s infra
7 |
8 | mime_type=$1
9 | shift
10 |
11 | git ls-files "$@" | grep -vE '^vendor/' | xargs file --mime-type | grep "${mime_type}" | sed -e 's/:.*$//'
12 |
--------------------------------------------------------------------------------
/tools/image-tag:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | WORKING_SUFFIX=$(if ! git diff --exit-code --quiet HEAD >&2; then echo "-WIP"; else echo ""; fi)
8 | BRANCH_PREFIX=$(git rev-parse --abbrev-ref HEAD)
9 | echo "${BRANCH_PREFIX//\//-}-$(git rev-parse --short HEAD)$WORKING_SUFFIX"
10 |
--------------------------------------------------------------------------------
/tools/integration/assert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # assert.sh 1.1 - bash unit testing framework
3 | # Copyright (C) 2009-2015 Robert Lehmann
4 | #
5 | # http://github.com/lehmannro/assert.sh
6 | #
7 | # This program is free software: you can redistribute it and/or modify
8 | # it under the terms of the GNU Lesser General Public License as published
9 | # by the Free Software Foundation, either version 3 of the License, or
10 | # (at your option) any later version.
11 | #
12 | # This program is distributed in the hope that it will be useful,
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | # GNU Lesser General Public License for more details.
16 | #
17 | # You should have received a copy of the GNU Lesser General Public License
18 | # along with this program. If not, see .
19 |
20 | export DISCOVERONLY=${DISCOVERONLY:-}
21 | export DEBUG=${DEBUG:-}
22 | export STOP=${STOP:-}
23 | export INVARIANT=${INVARIANT:-}
24 | export CONTINUE=${CONTINUE:-}
25 |
26 | args="$(getopt -n "$0" -l \
27 | verbose,help,stop,discover,invariant,continue vhxdic "$@")" \
28 | || exit -1
29 | for arg in $args; do
30 | case "$arg" in
31 | -h)
32 | echo "$0 [-vxidc]" \
33 | "[--verbose] [--stop] [--invariant] [--discover] [--continue]"
34 | echo "$(sed 's/./ /g' <<<"$0") [-h] [--help]"
35 | exit 0
36 | ;;
37 | --help)
38 | cat < [stdin]
111 | ((tests_ran++)) || :
112 | [[ -z "$DISCOVERONLY" ]] || return
113 | expected=$(echo -ne "${2:-}")
114 | result="$(eval "$1" 2>/dev/null <<<"${3:-}")" || true
115 | if [[ "$result" == "$expected" ]]; then
116 | [[ -z "$DEBUG" ]] || echo -n .
117 | return
118 | fi
119 | result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<<"$result")"
120 | [[ -z "$result" ]] && result="nothing" || result="\"$result\""
121 | [[ -z "$2" ]] && expected="nothing" || expected="\"$2\""
122 | _assert_fail "expected $expected${_indent}got $result" "$1" "$3"
123 | }
124 |
125 | assert_raises() {
126 | # assert_raises [stdin]
127 | ((tests_ran++)) || :
128 | [[ -z "$DISCOVERONLY" ]] || return
129 | status=0
130 | (eval "$1" <<<"${3:-}") >/dev/null 2>&1 || status=$?
131 | expected=${2:-0}
132 | if [[ "$status" -eq "$expected" ]]; then
133 | [[ -z "$DEBUG" ]] || echo -n .
134 | return
135 | fi
136 | _assert_fail "program terminated with code $status instead of $expected" "$1" "$3"
137 | }
138 |
139 | _assert_fail() {
140 | # _assert_fail
141 | [[ -n "$DEBUG" ]] && echo -n X
142 | report="test #$tests_ran \"$2${3:+ <<< $3}\" failed:${_indent}$1"
143 | if [[ -n "$STOP" ]]; then
144 | [[ -n "$DEBUG" ]] && echo
145 | echo "$report"
146 | exit 1
147 | fi
148 | tests_errors[$tests_failed]="$report"
149 | ((tests_failed++)) || :
150 | }
151 |
152 | skip_if() {
153 | # skip_if
154 | (eval "$@") >/dev/null 2>&1 && status=0 || status=$?
155 | [[ "$status" -eq 0 ]] || return
156 | skip
157 | }
158 |
159 | skip() {
160 | # skip (no arguments)
161 | shopt -q extdebug && tests_extdebug=0 || tests_extdebug=1
162 | shopt -q -o errexit && tests_errexit=0 || tests_errexit=1
163 | # enable extdebug so returning 1 in a DEBUG trap handler skips next command
164 | shopt -s extdebug
165 | # disable errexit (set -e) so we can safely return 1 without causing exit
166 | set +o errexit
167 | tests_trapped=0
168 | trap _skip DEBUG
169 | }
170 | _skip() {
171 | if [[ $tests_trapped -eq 0 ]]; then
172 | # DEBUG trap for command we want to skip. Do not remove the handler
173 | # yet because *after* the command we need to reset extdebug/errexit (in
174 | # another DEBUG trap.)
175 | tests_trapped=1
176 | [[ -z "$DEBUG" ]] || echo -n s
177 | return 1
178 | else
179 | trap - DEBUG
180 | [[ $tests_extdebug -eq 0 ]] || shopt -u extdebug
181 | [[ $tests_errexit -eq 1 ]] || set -o errexit
182 | return 0
183 | fi
184 | }
185 |
186 | _assert_reset
187 | : ${tests_suite_status:=0} # remember if any of the tests failed so far
188 | _assert_cleanup() {
189 | local status=$?
190 | # modify exit code if it's not already non-zero
191 | [[ $status -eq 0 && -z $CONTINUE ]] && exit $tests_suite_status
192 | }
193 | trap _assert_cleanup EXIT
194 |
--------------------------------------------------------------------------------
/tools/integration/config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # NB only to be sourced
3 |
4 | set -e
5 |
6 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
7 |
8 | # Protect against being sourced multiple times to prevent
9 | # overwriting assert.sh global state
10 | if ! [ -z "$SOURCED_CONFIG_SH" ]; then
11 | return
12 | fi
13 | SOURCED_CONFIG_SH=true
14 |
15 | # these ought to match what is in Vagrantfile
16 | N_MACHINES=${N_MACHINES:-3}
17 | IP_PREFIX=${IP_PREFIX:-192.168.48}
18 | IP_SUFFIX_BASE=${IP_SUFFIX_BASE:-10}
19 |
20 | if [ -z "$HOSTS" ]; then
21 | for i in $(seq 1 "$N_MACHINES"); do
22 | IP="${IP_PREFIX}.$((IP_SUFFIX_BASE + i))"
23 | HOSTS="$HOSTS $IP"
24 | done
25 | fi
26 |
27 | # these are used by the tests
28 | # shellcheck disable=SC2034
29 | HOST1=$(echo "$HOSTS" | cut -f 1 -d ' ')
30 | # shellcheck disable=SC2034
31 | HOST2=$(echo "$HOSTS" | cut -f 2 -d ' ')
32 | # shellcheck disable=SC2034
33 | HOST3=$(echo "$HOSTS" | cut -f 3 -d ' ')
34 |
35 | # shellcheck disable=SC1090
36 | . "$DIR/assert.sh"
37 |
38 | SSH_DIR=${SSH_DIR:-$DIR}
39 | SSH=${SSH:-ssh -l vagrant -i \"$SSH_DIR/insecure_private_key\" -o \"UserKnownHostsFile=$SSH_DIR/.ssh_known_hosts\" -o CheckHostIP=no -o StrictHostKeyChecking=no}
40 |
41 | SMALL_IMAGE="alpine"
42 | # shellcheck disable=SC2034
43 | TEST_IMAGES="$SMALL_IMAGE"
44 |
45 | # shellcheck disable=SC2034
46 | PING="ping -nq -W 1 -c 1"
47 | DOCKER_PORT=2375
48 |
49 | remote() {
50 | rem=$1
51 | shift 1
52 | "$@" > >(while read -r line; do echo -e $'\e[0;34m'"$rem>"$'\e[0m'" $line"; done)
53 | }
54 |
55 | colourise() {
56 | ([ -t 0 ] && echo -ne $'\e['"$1"'m') || true
57 | shift
58 | # It's important that we don't do this in a subshell, as some
59 | # commands we execute need to modify global state
60 | "$@"
61 | ([ -t 0 ] && echo -ne $'\e[0m') || true
62 | }
63 |
64 | whitely() {
65 | colourise '1;37' "$@"
66 | }
67 |
68 | greyly() {
69 | colourise '0;37' "$@"
70 | }
71 |
72 | redly() {
73 | colourise '1;31' "$@"
74 | }
75 |
76 | greenly() {
77 | colourise '1;32' "$@"
78 | }
79 |
80 | run_on() {
81 | host=$1
82 | shift 1
83 | [ -z "$DEBUG" ] || greyly echo "Running on $host:" "$@" >&2
84 | # shellcheck disable=SC2086
85 | remote "$host" $SSH "$host" "$@"
86 | }
87 |
88 | docker_on() {
89 | host=$1
90 | shift 1
91 | [ -z "$DEBUG" ] || greyly echo "Docker on $host:$DOCKER_PORT:" "$@" >&2
92 | docker -H "tcp://$host:$DOCKER_PORT" "$@"
93 | }
94 |
95 | weave_on() {
96 | host=$1
97 | shift 1
98 | [ -z "$DEBUG" ] || greyly echo "Weave on $host:$DOCKER_PORT:" "$@" >&2
99 | DOCKER_HOST=tcp://$host:$DOCKER_PORT $WEAVE "$@"
100 | }
101 |
102 | exec_on() {
103 | host=$1
104 | container=$2
105 | shift 2
106 | docker -H "tcp://$host:$DOCKER_PORT" exec "$container" "$@"
107 | }
108 |
109 | rm_containers() {
110 | host=$1
111 | shift
112 | [ $# -eq 0 ] || docker_on "$host" rm -f "$@" >/dev/null
113 | }
114 |
115 | start_suite() {
116 | for host in $HOSTS; do
117 | [ -z "$DEBUG" ] || echo "Cleaning up on $host: removing all containers and resetting weave"
118 | PLUGIN_ID=$(docker_on "$host" ps -aq --filter=name=weaveplugin)
119 | PLUGIN_FILTER="cat"
120 | [ -n "$PLUGIN_ID" ] && PLUGIN_FILTER="grep -v $PLUGIN_ID"
121 | # shellcheck disable=SC2046
122 | rm_containers "$host" $(docker_on "$host" ps -aq 2>/dev/null | $PLUGIN_FILTER)
123 | run_on "$host" "docker network ls | grep -q ' weave ' && docker network rm weave" || true
124 | weave_on "$host" reset 2>/dev/null
125 | done
126 | whitely echo "$@"
127 | }
128 |
129 | end_suite() {
130 | whitely assert_end
131 | }
132 |
133 | WEAVE=$DIR/../../integration/weave
134 |
--------------------------------------------------------------------------------
/tools/integration/gce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This script has a bunch of GCE-related functions:
3 | # ./gce.sh setup - starts two VMs on GCE and configures them to run our integration tests
4 | # . ./gce.sh; ./run_all.sh - set a bunch of environment variables for the tests
5 | # ./gce.sh destroy - tear down the VMs
6 | # ./gce.sh make_template - make a fresh VM template; update TEMPLATE_NAME first!
7 |
8 | set -e
9 |
10 | : "${KEY_FILE:=/tmp/gce_private_key.json}"
11 | : "${SSH_KEY_FILE:=$HOME/.ssh/gce_ssh_key}"
12 | : "${IMAGE_FAMILY:=ubuntu-1404-lts}"
13 | : "${IMAGE_PROJECT:=ubuntu-os-cloud}"
14 | : "${USER_ACCOUNT:=ubuntu}"
15 | : "${ZONE:=us-central1-a}"
16 | : "${PROJECT:=}"
17 | : "${TEMPLATE_NAME:=}"
18 | : "${NUM_HOSTS:=}"
19 |
20 | if [ -z "${PROJECT}" ] || [ -z "${NUM_HOSTS}" ] || [ -z "${TEMPLATE_NAME}" ]; then
21 | echo "Must specify PROJECT, NUM_HOSTS and TEMPLATE_NAME"
22 | exit 1
23 | fi
24 |
25 | SUFFIX=""
26 | if [ -n "$CIRCLECI" ]; then
27 | SUFFIX="-${CIRCLE_PROJECT_USERNAME}-${CIRCLE_PROJECT_REPONAME}-${CIRCLE_BUILD_NUM}-$CIRCLE_NODE_INDEX"
28 | else
29 | SUFFIX="-${USER}"
30 | fi
31 |
32 | # Setup authentication
33 | gcloud auth activate-service-account --key-file "$KEY_FILE" 1>/dev/null
34 | gcloud config set project "$PROJECT"
35 |
36 | function vm_names() {
37 | local names=
38 | for i in $(seq 1 "$NUM_HOSTS"); do
39 | names=("host$i$SUFFIX" "${names[@]}")
40 | done
41 | echo "${names[@]}"
42 | }
43 |
44 | # Delete all vms in this account
45 | function destroy() {
46 | local names
47 | # shellcheck disable=SC2046
48 | if [ $(gcloud compute firewall-rules list "test-allow-docker$SUFFIX" 2>/dev/null | wc -l) -gt 0 ]; then
49 | gcloud compute firewall-rules delete "test-allow-docker$SUFFIX"
50 | fi
51 | names="$(vm_names)"
52 | # shellcheck disable=SC2086
53 | if [ "$(gcloud compute instances list --zones "$ZONE" -q $names | wc -l)" -le 1 ]; then
54 | return 0
55 | fi
56 | for i in {0..10}; do
57 | # gcloud instances delete can sometimes hang.
58 | case $(
59 | set +e
60 | timeout 60s /bin/bash -c "gcloud compute instances delete --zone $ZONE -q $names >/dev/null 2>&1"
61 | echo $?
62 | ) in
63 | 0)
64 | return 0
65 | ;;
66 | 124)
67 | # 124 means it timed out
68 | break
69 | ;;
70 | *)
71 | return 1
72 | ;;
73 | esac
74 | done
75 | }
76 |
77 | function internal_ip() {
78 | jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].networkIP" "$1"
79 | }
80 |
81 | function external_ip() {
82 | jq -r ".[] | select(.name == \"$2\") | .networkInterfaces[0].accessConfigs[0].natIP" "$1"
83 | }
84 |
85 | function try_connect() {
86 | for i in {0..10}; do
87 | ssh -t "$1" true && return
88 | sleep 2
89 | done
90 | }
91 |
92 | function install_docker_on() {
93 | name=$1
94 | echo "Installing Docker on $name for user ${USER_ACCOUNT}"
95 | # shellcheck disable=SC2087
96 | ssh -t "$name" sudo bash -x -s <> /etc/default/docker;
105 | service docker restart
106 | EOF
107 | # It seems we need a short delay for docker to start up, so I put this in
108 | # a separate ssh connection. This installs nsenter.
109 | ssh -t "$name" sudo docker run --rm -v /usr/local/bin:/target jpetazzo/nsenter
110 | }
111 |
112 | function copy_hosts() {
113 | hostname=$1
114 | hosts=$2
115 | ssh -t "$hostname" "sudo -- sh -c \"cat >>/etc/hosts\"" <"$hosts"
116 | }
117 |
118 | # Create new set of VMs
119 | function setup() {
120 | destroy
121 |
122 | names=($(vm_names))
123 | gcloud compute instances create "${names[@]}" --image "$TEMPLATE_NAME" --zone "$ZONE" --tags "test$SUFFIX" --network=test
124 | my_ip="$(curl -s http://ipinfo.io/ip)"
125 | gcloud compute firewall-rules create "test-allow-docker$SUFFIX" --network=test --allow tcp:2375,tcp:12375,tcp:4040,tcp:80 --target-tags "test$SUFFIX" --source-ranges "$my_ip"
126 |
127 | gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
128 | sed -i '/UserKnownHostsFile=\/dev\/null/d' ~/.ssh/config
129 |
130 | # build an /etc/hosts file for these vms
131 | hosts=$(mktemp hosts.XXXXXXXXXX)
132 | json=$(mktemp json.XXXXXXXXXX)
133 | gcloud compute instances list --format=json >"$json"
134 | for name in "${names[@]}"; do
135 | echo "$(internal_ip "$json" "$name") $name.$ZONE.$PROJECT" >>"$hosts"
136 | done
137 |
138 | for name in "${names[@]}"; do
139 | hostname="$name.$ZONE.$PROJECT"
140 |
141 | # Add the remote ip to the local /etc/hosts
142 | sudo sed -i "/$hostname/d" /etc/hosts
143 | sudo sh -c "echo \"$(external_ip "$json" "$name") $hostname\" >>/etc/hosts"
144 | try_connect "$hostname"
145 |
146 | copy_hosts "$hostname" "$hosts" &
147 | done
148 |
149 | wait
150 |
151 | rm "$hosts" "$json"
152 | }
153 |
154 | function make_template() {
155 | gcloud compute instances create "$TEMPLATE_NAME" --image-family "$IMAGE_FAMILY" --image-project "$IMAGE_PROJECT" --zone "$ZONE"
156 | gcloud compute config-ssh --ssh-key-file "$SSH_KEY_FILE"
157 | name="$TEMPLATE_NAME.$ZONE.$PROJECT"
158 | try_connect "$name"
159 | install_docker_on "$name"
160 | gcloud -q compute instances delete "$TEMPLATE_NAME" --keep-disks boot --zone "$ZONE"
161 | gcloud compute images create "$TEMPLATE_NAME" --source-disk "$TEMPLATE_NAME" --source-disk-zone "$ZONE"
162 | }
163 |
164 | function hosts() {
165 | hosts=
166 | args=
167 | json=$(mktemp json.XXXXXXXXXX)
168 | gcloud compute instances list --format=json >"$json"
169 | for name in $(vm_names); do
170 | hostname="$name.$ZONE.$PROJECT"
171 | hosts=($hostname "${hosts[@]}")
172 | args=("--add-host=$hostname:$(internal_ip "$json" "$name")" "${args[@]}")
173 | done
174 | echo export SSH=\"ssh -l "${USER_ACCOUNT}"\"
175 | echo "export HOSTS=\"${hosts[*]}\""
176 | echo "export ADD_HOST_ARGS=\"${args[*]}\""
177 | rm "$json"
178 | }
179 |
180 | case "$1" in
181 | setup)
182 | setup
183 | ;;
184 |
185 | hosts)
186 | hosts
187 | ;;
188 |
189 | destroy)
190 | destroy
191 | ;;
192 |
193 | make_template)
194 | # see if template exists
195 | if ! gcloud compute images list | grep "$PROJECT" | grep "$TEMPLATE_NAME"; then
196 | make_template
197 | else
198 | echo "Reusing existing template:"
199 | gcloud compute images describe "$TEMPLATE_NAME" | grep "^creationTimestamp"
200 | fi
201 | ;;
202 | esac
203 |
--------------------------------------------------------------------------------
/tools/integration/run_all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
6 | # shellcheck disable=SC1090
7 | . "$DIR/config.sh"
8 |
9 | whitely echo Sanity checks
10 | if ! bash "$DIR/sanity_check.sh"; then
11 | whitely echo ...failed
12 | exit 1
13 | fi
14 | whitely echo ...ok
15 |
16 | # shellcheck disable=SC2068
17 | TESTS=(${@:-$(find . -name '*_test.sh')})
18 | RUNNER_ARGS=()
19 |
20 | # If running on circle, use the scheduler to work out what tests to run
21 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ]; then
22 | RUNNER_ARGS=("${RUNNER_ARGS[@]}" -scheduler)
23 | fi
24 |
25 | # If running on circle or PARALLEL is not empty, run tests in parallel
26 | if [ -n "$CIRCLECI" ] || [ -n "$PARALLEL" ]; then
27 | RUNNER_ARGS=("${RUNNER_ARGS[@]}" -parallel)
28 | fi
29 |
30 | make -C "${DIR}/../runner"
31 | HOSTS="$HOSTS" "${DIR}/../runner/runner" "${RUNNER_ARGS[@]}" "${TESTS[@]}"
32 |
--------------------------------------------------------------------------------
/tools/integration/sanity_check.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | # shellcheck disable=SC1090,SC1091
3 | . "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/config.sh"
4 |
5 | set -e
6 |
7 | whitely echo Ping each host from the other
8 | for host in $HOSTS; do
9 | for other in $HOSTS; do
10 | [ "$host" = "$other" ] || run_on "$host" "$PING" "$other"
11 | done
12 | done
13 |
14 | whitely echo Check we can reach docker
15 |
16 | for host in $HOSTS; do
17 | echo
18 | echo "Host Version Info: $host"
19 | echo "====================================="
20 | echo "# docker version"
21 | docker_on "$host" version
22 | echo "# docker info"
23 | docker_on "$host" info
24 | echo "# weave version"
25 | weave_on "$host" version
26 | done
27 |
--------------------------------------------------------------------------------
/tools/lint:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # This scipt lints files for common errors.
3 | #
4 | # For go files, it runs gofmt and go vet, and optionally golint and
5 | # gocyclo, if they are installed.
6 | #
7 | # For shell files, it runs shfmt. If you don't have that installed, you can get
8 | # it with:
9 | # go get -u gopkg.in/mvdan/sh.v1/cmd/shfmt
10 | #
11 | # With no arguments, it lints the current files staged
12 | # for git commit. Or you can pass it explicit filenames
13 | # (or directories) and it will lint them.
14 | #
15 | # To use this script automatically, run:
16 | # ln -s ../../bin/lint .git/hooks/pre-commit
17 |
18 | set -e
19 |
20 | LINT_IGNORE_FILE=${LINT_IGNORE_FILE:-".lintignore"}
21 |
22 | IGNORE_LINT_COMMENT=
23 | IGNORE_SPELLINGS=
24 | PARALLEL=
25 | while true; do
26 | case "$1" in
27 | -nocomment)
28 | IGNORE_LINT_COMMENT=1
29 | shift 1
30 | ;;
31 | -notestpackage)
32 | # NOOP, still accepted for backwards compatibility
33 | shift 1
34 | ;;
35 | -ignorespelling)
36 | IGNORE_SPELLINGS="$2,$IGNORE_SPELLINGS"
37 | shift 2
38 | ;;
39 | -p)
40 | PARALLEL=1
41 | shift 1
42 | ;;
43 | *)
44 | break
45 | ;;
46 | esac
47 | done
48 |
49 | spell_check() {
50 | local filename="$1"
51 | local lint_result=0
52 |
53 | # we don't want to spell check tar balls, binaries, Makefile and json files
54 | if file "$filename" | grep executable >/dev/null 2>&1; then
55 | return $lint_result
56 | fi
57 | if [[ $filename == *".tar" || $filename == *".gz" || $filename == *".json" || $(basename "$filename") == "Makefile" ]]; then
58 | return $lint_result
59 | fi
60 |
61 | # misspell is completely optional. If you don't like it
62 | # don't have it installed.
63 | if ! type misspell >/dev/null 2>&1; then
64 | return $lint_result
65 | fi
66 |
67 | if ! misspell -error -i "$IGNORE_SPELLINGS" "${filename}"; then
68 | lint_result=1
69 | fi
70 |
71 | return $lint_result
72 | }
73 |
74 | lint_go() {
75 | local filename="$1"
76 | local lint_result=0
77 |
78 | if [ -n "$(gofmt -s -l "${filename}")" ]; then
79 | lint_result=1
80 | echo "${filename}: run gofmt -s -w ${filename}"
81 | fi
82 |
83 | go tool vet "${filename}" || lint_result=$?
84 |
85 | # golint is completely optional. If you don't like it
86 | # don't have it installed.
87 | if type golint >/dev/null 2>&1; then
88 | # golint doesn't set an exit code it seems
89 | if [ -z "$IGNORE_LINT_COMMENT" ]; then
90 | lintoutput=$(golint "${filename}")
91 | else
92 | lintoutput=$(golint "${filename}" | grep -vE 'comment|dot imports|ALL_CAPS')
93 | fi
94 | if [ -n "$lintoutput" ]; then
95 | lint_result=1
96 | echo "$lintoutput"
97 | fi
98 | fi
99 |
100 | # gocyclo is completely optional. If you don't like it
101 | # don't have it installed. Also never blocks a commit,
102 | # it just warns.
103 | if type gocyclo >/dev/null 2>&1; then
104 | gocyclo -over 25 "${filename}" | while read -r line; do
105 | echo "${filename}": higher than 25 cyclomatic complexity - "${line}"
106 | done
107 | fi
108 |
109 | return $lint_result
110 | }
111 |
112 | lint_sh() {
113 | local filename="$1"
114 | local lint_result=0
115 |
116 | if ! diff -u <(shfmt -i 4 "${filename}") "${filename}"; then
117 | lint_result=1
118 | echo "${filename}: run shfmt -i 4 -w ${filename}"
119 | fi
120 |
121 | # the shellcheck is completely optional. If you don't like it
122 | # don't have it installed.
123 | if type shellcheck >/dev/null 2>&1; then
124 | shellcheck "${filename}" || lint_result=1
125 | fi
126 |
127 | return $lint_result
128 | }
129 |
130 | lint_tf() {
131 | local filename="$1"
132 | local lint_result=0
133 |
134 | if ! diff -u <(hclfmt "${filename}") "${filename}"; then
135 | lint_result=1
136 | echo "${filename}: run hclfmt -w ${filename}"
137 | fi
138 |
139 | return $lint_result
140 | }
141 |
142 | lint_md() {
143 | local filename="$1"
144 | local lint_result=0
145 |
146 | for i in '=======' '>>>>>>>'; do
147 | if grep -q "${i}" "${filename}"; then
148 | lint_result=1
149 | echo "${filename}: bad merge/rebase!"
150 | fi
151 | done
152 |
153 | return $lint_result
154 | }
155 |
156 | lint() {
157 | filename="$1"
158 | ext="${filename##*\.}"
159 | local lint_result=0
160 |
161 | # Don't lint deleted files
162 | if [ ! -f "$filename" ]; then
163 | return
164 | fi
165 |
166 | # Don't lint specific files
167 | case "$(basename "${filename}")" in
168 | static.go) return ;;
169 | coverage.html) return ;;
170 | *.pb.go) return ;;
171 | esac
172 |
173 | if [[ "$(file --mime-type "${filename}" | awk '{print $2}')" == "text/x-shellscript" ]]; then
174 | ext="sh"
175 | fi
176 |
177 | case "$ext" in
178 | go) lint_go "${filename}" || lint_result=1 ;;
179 | sh) lint_sh "${filename}" || lint_result=1 ;;
180 | tf) lint_tf "${filename}" || lint_result=1 ;;
181 | md) lint_md "${filename}" || lint_result=1 ;;
182 | esac
183 |
184 | spell_check "${filename}" || lint_result=1
185 |
186 | return $lint_result
187 | }
188 |
189 | lint_files() {
190 | local lint_result=0
191 | while read -r filename; do
192 | lint "${filename}" || lint_result=1
193 | done
194 | exit $lint_result
195 | }
196 |
197 | matches_any() {
198 | local filename="$1"
199 | local patterns="$2"
200 | while read -r pattern; do
201 | # shellcheck disable=SC2053
202 | # Use the [[ operator without quotes on $pattern
203 | # in order to "glob" the provided filename:
204 | [[ "$filename" == $pattern ]] && return 0
205 | done <<<"$patterns"
206 | return 1
207 | }
208 |
209 | filter_out() {
210 | local patterns_file="$1"
211 | if [ -n "$patterns_file" ] && [ -r "$patterns_file" ]; then
212 | local patterns
213 | patterns=$(sed '/^#.*$/d ; /^\s*$/d' "$patterns_file") # Remove blank lines and comments before we start iterating.
214 | [ -n "$DEBUG" ] && echo >&2 "> Filters:" && echo >&2 "$patterns"
215 | local filtered_out=()
216 | while read -r filename; do
217 | matches_any "$filename" "$patterns" && filtered_out+=("$filename") || echo "$filename"
218 | done
219 | [ -n "$DEBUG" ] && echo >&2 "> Files filtered out (i.e. NOT linted):" && printf >&2 '%s\n' "${filtered_out[@]}"
220 | else
221 | cat # No patterns provided: simply propagate stdin to stdout.
222 | fi
223 | }
224 |
225 | list_files() {
226 | if [ $# -gt 0 ]; then
227 | find "$@" | grep -vE '(^|/)vendor/'
228 | else
229 | git ls-files --exclude-standard | grep -vE '(^|/)vendor/'
230 | fi
231 | }
232 |
233 | if [ $# = 1 ] && [ -f "$1" ]; then
234 | lint "$1"
235 | elif [ -n "$PARALLEL" ]; then
236 | list_files "$@" | filter_out "$LINT_IGNORE_FILE" | xargs -n1 -P16 "$0"
237 | else
238 | list_files "$@" | filter_out "$LINT_IGNORE_FILE" | lint_files
239 | fi
240 |
--------------------------------------------------------------------------------
/tools/provisioning/README.md:
--------------------------------------------------------------------------------
1 | # Weaveworks provisioning
2 |
3 | ## Introduction
4 |
5 | This project allows you to get hold of some machine either locally or on one of the below cloud providers:
6 |
7 | * Amazon Web Services
8 | * Digital Ocean
9 | * Google Cloud Platform
10 |
11 | You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
12 |
13 | ## Set up
14 |
15 | * You will need [Vagrant](https://www.vagrantup.com) installed on your machine and added to your `PATH` in order to be able to provision local (virtual) machines automatically.
16 |
17 | * On macOS: `brew install vagrant`
18 | * On Linux (via Aptitude): `sudo apt install vagrant`
19 | * If you need a specific version:
20 |
21 | curl -fsS https://releases.hashicorp.com/terraform/x.y.z/terraform_x.y.z_linux_amd64.zip | gunzip > terraform && chmod +x terraform && sudo mv terraform /usr/bin
22 |
23 | * For other platforms or more details, see [here](https://www.vagrantup.com/docs/installation/)
24 |
25 | * You will need [Terraform](https://www.terraform.io) installed on your machine and added to your `PATH` in order to be able to provision cloud-hosted machines automatically.
26 |
27 | * On macOS: `brew install terraform`
28 | * On Linux (via Aptitude): `sudo apt install terraform`
29 | * For other platforms or more details, see [here](https://www.terraform.io/intro/getting-started/install.html)
30 |
31 | * Depending on the cloud provider, you may have to create an account, manually onboard, create and register SSH keys, etc.
32 | Please refer to the `README.md` in each sub-folder for more details.
33 |
34 | ## Usage in scripts
35 |
36 | Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either:
37 |
38 | * `gcp_on` / `gcp_off`
39 | * `do_on` / `do_off`
40 | * `aws_on` / `aws_off`
41 |
42 | ## Usage in shell
43 |
44 | Source `setup.sh`, set the `SECRET_KEY` environment variable, and depending on the cloud provider you want to use, call either:
45 |
46 | * `gcp_on` / `gcp_off`
47 | * `do_on` / `do_off`
48 | * `aws_on` / `aws_off`
49 |
50 | Indeed, the functions defined in `setup.sh` are also exported as aliases, so you can call them from your shell directly.
51 |
52 | Other aliases are also defined, in order to make your life easier:
53 |
54 | * `tf_ssh`: to ease SSH-ing into the virtual machines, reading the username and IP address to use from Terraform, as well as setting default SSH options.
55 | * `tf_ansi`: to ease applying an Ansible playbook to a set of virtual machines, dynamically creating the inventory, as well as setting default SSH options.
56 |
--------------------------------------------------------------------------------
/tools/provisioning/aws/README.md:
--------------------------------------------------------------------------------
1 | # Amazon Web Services
2 |
3 | ## Introduction
4 |
5 | This project allows you to get hold of some machine on Amazon Web Services.
6 | You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
7 |
8 | ## Setup
9 |
10 | * Log in [weaveworks.signin.aws.amazon.com/console](https://weaveworks.signin.aws.amazon.com/console/) with your account.
11 |
12 | * Go to `Services` > `IAM` > `Users` > Click on your username > `Security credentials` > `Create access key`.
13 | Your access key and secret key will appear on the screen. Set these as environment variables:
14 |
15 | ```
16 | export AWS_ACCESS_KEY_ID=
17 | export AWS_SECRET_ACCESS_KEY=
18 | ```
19 |
20 | * Go to `Services` > `EC2` > Select the availability zone you want to use (see top right corner, e.g. `us-east-1`) > `Import Key Pair`.
21 | Enter your SSH public key and the name for it, and click `Import`.
22 | Set the path to your private key as an environment variable:
23 |
24 | ```
25 | export TF_VAR_aws_public_key_name=
26 | export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa"
27 | ```
28 |
29 | * Set your current IP address as an environment variable:
30 |
31 | ```
32 | export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
33 | ```
34 |
35 | or pass it as a Terraform variable:
36 |
37 | ```
38 | $ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)'
39 | ```
40 |
41 | ### Bash aliases
42 |
43 | You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
44 |
45 | ```
46 | function _aws_on() {
47 | export AWS_ACCESS_KEY_ID="" # Replace with appropriate value.
48 | export AWS_SECRET_ACCESS_KEY="" # Replace with appropriate value.
49 | export TF_VAR_aws_public_key_name="" # Replace with appropriate value.
50 | export TF_VAR_aws_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value.
51 | }
52 | alias _aws_on='_aws_on'
53 | function _aws_off() {
54 | unset AWS_ACCESS_KEY_ID
55 | unset AWS_SECRET_ACCESS_KEY
56 | unset TF_VAR_aws_public_key_name
57 | unset TF_VAR_aws_private_key_path
58 | }
59 | alias _aws_off='_aws_off'
60 | ```
61 |
62 | N.B.:
63 |
64 | * sourcing `../setup.sh` defines aliases called `aws_on` and `aws_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above);
65 | * `../setup.sh`'s `aws_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information.
66 |
67 | ## Usage
68 |
69 | * Create the machine: `terraform apply`
70 | * Show the machine's status: `terraform show`
71 | * Stop and destroy the machine: `terraform destroy`
72 | * SSH into the newly-created machine:
73 |
74 | ```
75 | $ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips`
76 | # N.B.: the default username will differ depending on the AMI/OS you installed, e.g. ubuntu for Ubuntu, ec2-user for Red Hat, etc.
77 | ```
78 |
79 | or
80 |
81 | ```
82 | source ../setup.sh
83 | tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned.
84 | ```
85 |
86 | ## Resources
87 |
88 | * [https://www.terraform.io/docs/providers/aws/](https://www.terraform.io/docs/providers/aws/)
89 | * [https://www.terraform.io/docs/providers/aws/r/instance.html](https://www.terraform.io/docs/providers/aws/r/instance.html)
90 | * [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html)
91 |
--------------------------------------------------------------------------------
/tools/provisioning/aws/main.tf:
--------------------------------------------------------------------------------
1 | # Specify the provider and access details
2 | provider "aws" {
3 | # Access key, secret key and region are sourced from environment variables or input arguments -- see README.md
4 | region = "${var.aws_dc}"
5 | }
6 |
7 | resource "aws_security_group" "allow_ssh" {
8 | name = "${var.name}_allow_ssh"
9 | description = "AWS security group to allow SSH-ing onto AWS EC2 instances (created using Terraform)."
10 |
11 | # Open TCP port for SSH:
12 | ingress {
13 | from_port = 22
14 | to_port = 22
15 | protocol = "tcp"
16 | cidr_blocks = ["${var.client_ip}/32"]
17 | }
18 |
19 | tags {
20 | Name = "${var.name}_allow_ssh"
21 | App = "${var.app}"
22 | CreatedBy = "terraform"
23 | }
24 | }
25 |
26 | resource "aws_security_group" "allow_docker" {
27 | name = "${var.name}_allow_docker"
28 | description = "AWS security group to allow communication with Docker on AWS EC2 instances (created using Terraform)."
29 |
30 | # Open TCP port for Docker:
31 | ingress {
32 | from_port = 2375
33 | to_port = 2375
34 | protocol = "tcp"
35 | cidr_blocks = ["${var.client_ip}/32"]
36 | }
37 |
38 | tags {
39 | Name = "${var.name}_allow_docker"
40 | App = "${var.app}"
41 | CreatedBy = "terraform"
42 | }
43 | }
44 |
45 | resource "aws_security_group" "allow_weave" {
46 | name = "${var.name}_allow_weave"
47 | description = "AWS security group to allow communication with Weave on AWS EC2 instances (created using Terraform)."
48 |
49 | # Open TCP port for Weave:
50 | ingress {
51 | from_port = 12375
52 | to_port = 12375
53 | protocol = "tcp"
54 | cidr_blocks = ["${var.client_ip}/32"]
55 | }
56 |
57 | tags {
58 | Name = "${var.name}_allow_weave"
59 | App = "${var.app}"
60 | CreatedBy = "terraform"
61 | }
62 | }
63 |
64 | resource "aws_security_group" "allow_private_ingress" {
65 | name = "${var.name}_allow_private_ingress"
66 | description = "AWS security group to allow all private ingress traffic on AWS EC2 instances (created using Terraform)."
67 |
68 | # Full inbound local network access on both TCP and UDP
69 | ingress {
70 | from_port = 0
71 | to_port = 0
72 | protocol = "-1"
73 | cidr_blocks = ["${var.aws_vpc_cidr_block}"]
74 | }
75 |
76 | tags {
77 | Name = "${var.name}_allow_private_ingress"
78 | App = "${var.app}"
79 | CreatedBy = "terraform"
80 | }
81 | }
82 |
83 | resource "aws_security_group" "allow_all_egress" {
84 | name = "${var.name}_allow_all_egress"
85 | description = "AWS security group to allow all egress traffic on AWS EC2 instances (created using Terraform)."
86 |
87 | # Full outbound internet access on both TCP and UDP
88 | egress {
89 | from_port = 0
90 | to_port = 0
91 | protocol = "-1"
92 | cidr_blocks = ["0.0.0.0/0"]
93 | }
94 |
95 | tags {
96 | Name = "${var.name}_allow_all_egress"
97 | App = "${var.app}"
98 | CreatedBy = "terraform"
99 | }
100 | }
101 |
102 | resource "aws_instance" "tf_test_vm" {
103 | instance_type = "${var.aws_size}"
104 | count = "${var.num_hosts}"
105 |
106 | # Lookup the correct AMI based on the region we specified
107 | ami = "${lookup(var.aws_amis, var.aws_dc)}"
108 |
109 | key_name = "${var.aws_public_key_name}"
110 |
111 | security_groups = [
112 | "${aws_security_group.allow_ssh.name}",
113 | "${aws_security_group.allow_docker.name}",
114 | "${aws_security_group.allow_weave.name}",
115 | "${aws_security_group.allow_private_ingress.name}",
116 | "${aws_security_group.allow_all_egress.name}",
117 | ]
118 |
119 | # Wait for machine to be SSH-able:
120 | provisioner "remote-exec" {
121 | inline = ["exit"]
122 |
123 | connection {
124 | type = "ssh"
125 |
126 | # Lookup the correct username based on the AMI we specified
127 | user = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}"
128 | private_key = "${file("${var.aws_private_key_path}")}"
129 | }
130 | }
131 |
132 | tags {
133 | Name = "${var.name}-${count.index}"
134 | App = "${var.app}"
135 | CreatedBy = "terraform"
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/tools/provisioning/aws/outputs.tf:
--------------------------------------------------------------------------------
1 | output "username" {
2 | value = "${lookup(var.aws_usernames, "${lookup(var.aws_amis, var.aws_dc)}")}"
3 | }
4 |
5 | output "public_ips" {
6 | value = ["${aws_instance.tf_test_vm.*.public_ip}"]
7 | }
8 |
9 | output "hostnames" {
10 | value = "${join("\n",
11 | "${formatlist("%v.%v.%v",
12 | aws_instance.tf_test_vm.*.tags.Name,
13 | aws_instance.tf_test_vm.*.availability_zone,
14 | var.app
15 | )}"
16 | )}"
17 | }
18 |
19 | # /etc/hosts file for the Droplets:
20 | output "private_etc_hosts" {
21 | value = "${join("\n",
22 | "${formatlist("%v %v.%v.%v",
23 | aws_instance.tf_test_vm.*.private_ip,
24 | aws_instance.tf_test_vm.*.tags.Name,
25 | aws_instance.tf_test_vm.*.availability_zone,
26 | var.app
27 | )}"
28 | )}"
29 | }
30 |
31 | # /etc/hosts file for the client:
32 | output "public_etc_hosts" {
33 | value = "${join("\n",
34 | "${formatlist("%v %v.%v.%v",
35 | aws_instance.tf_test_vm.*.public_ip,
36 | aws_instance.tf_test_vm.*.tags.Name,
37 | aws_instance.tf_test_vm.*.availability_zone,
38 | var.app
39 | )}"
40 | )}"
41 | }
42 |
43 | output "ansible_inventory" {
44 | value = "${format("[all]\n%s", join("\n",
45 | "${formatlist("%v private_ip=%v",
46 | aws_instance.tf_test_vm.*.public_ip,
47 | aws_instance.tf_test_vm.*.private_ip,
48 | )}"
49 | ))}"
50 | }
51 |
52 | output "private_key_path" {
53 | value = "${var.aws_private_key_path}"
54 | }
55 |
--------------------------------------------------------------------------------
/tools/provisioning/aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "client_ip" {
2 | description = "IP address of the client machine"
3 | }
4 |
5 | variable "app" {
6 | description = "Name of the application using the created EC2 instance(s)."
7 | default = "default"
8 | }
9 |
10 | variable "name" {
11 | description = "Name of the EC2 instance(s)."
12 | default = "test"
13 | }
14 |
15 | variable "num_hosts" {
16 | description = "Number of EC2 instance(s)."
17 | default = 1
18 | }
19 |
20 | variable "aws_vpc_cidr_block" {
21 | description = "AWS VPC CIDR block to use to attribute private IP addresses."
22 | default = "172.31.0.0/16"
23 | }
24 |
25 | variable "aws_public_key_name" {
26 | description = "Name of the SSH keypair to use in AWS."
27 | }
28 |
29 | variable "aws_private_key_path" {
30 | description = "Path to file containing private key"
31 | default = "~/.ssh/id_rsa"
32 | }
33 |
34 | variable "aws_dc" {
35 | description = "The AWS region to create things in."
36 | default = "us-east-1"
37 | }
38 |
39 | variable "aws_amis" {
40 | default = {
41 | # Ubuntu Server 16.04 LTS (HVM), SSD Volume Type:
42 | "us-east-1" = "ami-40d28157"
43 | "eu-west-2" = "ami-23d0da47"
44 |
45 | # Red Hat Enterprise Linux 7.3 (HVM), SSD Volume Type:
46 |
47 | #"us-east-1" = "ami-b63769a1"
48 |
49 | # CentOS 7 (x86_64) - with Updates HVM
50 |
51 | #"us-east-1" = "ami-6d1c2007"
52 | }
53 | }
54 |
55 | variable "aws_usernames" {
56 | description = "User to SSH as into the AWS instance."
57 |
58 | default = {
59 | "ami-40d28157" = "ubuntu" # Ubuntu Server 16.04 LTS (HVM)
60 | "ami-b63769a1" = "ec2-user" # Red Hat Enterprise Linux 7.3 (HVM)
61 | "ami-6d1c2007" = "centos" # CentOS 7 (x86_64) - with Updates HVM
62 | }
63 | }
64 |
65 | variable "aws_size" {
66 | description = "AWS' selected machine size"
67 | default = "t2.medium" # Instance with 2 cores & 4 GB memory
68 | }
69 |
--------------------------------------------------------------------------------
/tools/provisioning/do/README.md:
--------------------------------------------------------------------------------
1 | # Digital Ocean
2 |
3 | ## Introduction
4 |
5 | This project allows you to get hold of some machine on Digital Ocean.
6 | You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
7 |
8 | ## Setup
9 |
10 | * Log in [cloud.digitalocean.com](https://cloud.digitalocean.com) with your account.
11 |
12 | * Go to `Settings` > `Security` > `SSH keys` > `Add SSH Key`.
13 | Enter your SSH public key and the name for it, and click `Add SSH Key`.
14 | Set the path to your private key as an environment variable:
15 |
16 | ```
17 | export DIGITALOCEAN_SSH_KEY_NAME=
18 | export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa"
19 | ```
20 |
21 | * Go to `API` > `Tokens` > `Personal access tokens` > `Generate New Token`
22 | Enter your token name and click `Generate Token` to get your 64-characters-long API token.
23 | Set these as environment variables:
24 |
25 | ```
26 | export DIGITALOCEAN_TOKEN_NAME=""
27 | export DIGITALOCEAN_TOKEN=
28 | ```
29 |
30 | * Run the following command to get the Digital Ocean ID for your SSH public key (e.g. `1234567`) and set it as an environment variable:
31 |
32 | ```
33 | $ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" \
34 | -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" \
35 | | jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id')
36 | ```
37 |
38 | or pass it as a Terraform variable:
39 |
40 | ```
41 | $ terraform \
42 | -var 'do_private_key_path=' \
43 | -var 'do_public_key_id='
44 | ```
45 |
46 | ### Bash aliases
47 |
48 | You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
49 |
50 | ```
51 | function _do_on() {
52 | export DIGITALOCEAN_TOKEN_NAME="" # Replace with appropriate value.
53 | export DIGITALOCEAN_TOKEN= # Replace with appropriate value.
54 | export DIGITALOCEAN_SSH_KEY_NAME="" # Replace with appropriate value.
55 | export TF_VAR_do_private_key_path="$HOME/.ssh/id_rsa" # Replace with appropriate value.
56 | export TF_VAR_do_public_key_path="$HOME/.ssh/id_rsa.pub" # Replace with appropriate value.
57 | export TF_VAR_do_public_key_id= # Replace with appropriate value.
58 | }
59 | alias _do_on='_do_on'
60 | function _do_off() {
61 | unset DIGITALOCEAN_TOKEN_NAME
62 | unset DIGITALOCEAN_TOKEN
63 | unset DIGITALOCEAN_SSH_KEY_NAME
64 | unset TF_VAR_do_private_key_path
65 | unset TF_VAR_do_public_key_path
66 | unset TF_VAR_do_public_key_id
67 | }
68 | alias _do_off='_do_off'
69 | ```
70 |
71 | N.B.:
72 |
73 | * sourcing `../setup.sh` defines aliases called `do_on` and `do_off`, similarly to the above (however, notice no `_` in front of the name, as opposed to the ones above);
74 | * `../setup.sh`'s `do_on` alias needs the `SECRET_KEY` environment variable to be set in order to decrypt sensitive information.
75 |
76 | ## Usage
77 |
78 | * Create the machine: `terraform apply`
79 | * Show the machine's status: `terraform show`
80 | * Stop and destroy the machine: `terraform destroy`
81 | * SSH into the newly-created machine:
82 |
83 | ```
84 | $ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no `terraform output username`@`terraform output public_ips`
85 | ```
86 |
87 | or
88 |
89 | ```
90 | source ../setup.sh
91 | tf_ssh 1 # Or the nth machine, if multiple VMs are provisioned.
92 | ```
93 |
94 | ## Resources
95 |
96 | * [https://www.terraform.io/docs/providers/do/](https://www.terraform.io/docs/providers/do/)
97 | * [https://www.terraform.io/docs/providers/do/r/droplet.html](https://www.terraform.io/docs/providers/do/r/droplet.html)
98 | * [Terraform variables](https://www.terraform.io/intro/getting-started/variables.html)
99 |
--------------------------------------------------------------------------------
/tools/provisioning/do/main.tf:
--------------------------------------------------------------------------------
1 | provider "digitalocean" {
2 | # See README.md for setup instructions.
3 | }
4 |
5 | # Tags to label and organize droplets:
6 | resource "digitalocean_tag" "name" {
7 | name = "${var.name}"
8 | }
9 |
10 | resource "digitalocean_tag" "app" {
11 | name = "${var.app}"
12 | }
13 |
14 | resource "digitalocean_tag" "terraform" {
15 | name = "terraform"
16 | }
17 |
18 | resource "digitalocean_droplet" "tf_test_vm" {
19 | ssh_keys = ["${var.do_public_key_id}"]
20 | image = "${var.do_os}"
21 | region = "${var.do_dc}"
22 | size = "${var.do_size}"
23 | name = "${var.name}-${count.index}"
24 | count = "${var.num_hosts}"
25 |
26 | tags = [
27 | "${var.app}",
28 | "${var.name}",
29 | "terraform",
30 | ]
31 |
32 | # Wait for machine to be SSH-able:
33 | provisioner "remote-exec" {
34 | inline = ["exit"]
35 |
36 | connection {
37 | type = "ssh"
38 | user = "${var.do_username}"
39 | private_key = "${file("${var.do_private_key_path}")}"
40 | }
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/tools/provisioning/do/outputs.tf:
--------------------------------------------------------------------------------
1 | output "username" {
2 | value = "${var.do_username}"
3 | }
4 |
5 | output "public_ips" {
6 | value = ["${digitalocean_droplet.tf_test_vm.*.ipv4_address}"]
7 | }
8 |
9 | output "hostnames" {
10 | value = "${join("\n",
11 | "${formatlist("%v.%v.%v",
12 | digitalocean_droplet.tf_test_vm.*.name,
13 | digitalocean_droplet.tf_test_vm.*.region,
14 | var.app
15 | )}"
16 | )}"
17 | }
18 |
19 | # /etc/hosts file for the Droplets:
20 | # N.B.: by default Digital Ocean droplets only have public IPs, but in order to
21 | # be consistent with other providers' recipes, we provide an output to generate
22 | # an /etc/hosts file on the Droplets, even though it is using public IPs only.
23 | output "private_etc_hosts" {
24 | value = "${join("\n",
25 | "${formatlist("%v %v.%v.%v",
26 | digitalocean_droplet.tf_test_vm.*.ipv4_address,
27 | digitalocean_droplet.tf_test_vm.*.name,
28 | digitalocean_droplet.tf_test_vm.*.region,
29 | var.app
30 | )}"
31 | )}"
32 | }
33 |
34 | # /etc/hosts file for the client:
35 | output "public_etc_hosts" {
36 | value = "${join("\n",
37 | "${formatlist("%v %v.%v.%v",
38 | digitalocean_droplet.tf_test_vm.*.ipv4_address,
39 | digitalocean_droplet.tf_test_vm.*.name,
40 | digitalocean_droplet.tf_test_vm.*.region,
41 | var.app
42 | )}"
43 | )}"
44 | }
45 |
46 | output "ansible_inventory" {
47 | value = "${format("[all]\n%s", join("\n",
48 | "${formatlist("%v private_ip=%v",
49 | digitalocean_droplet.tf_test_vm.*.ipv4_address,
50 | digitalocean_droplet.tf_test_vm.*.ipv4_address
51 | )}"
52 | ))}"
53 | }
54 |
55 | output "private_key_path" {
56 | value = "${var.do_private_key_path}"
57 | }
58 |
--------------------------------------------------------------------------------
/tools/provisioning/do/variables.tf:
--------------------------------------------------------------------------------
1 | variable "client_ip" {
2 | description = "IP address of the client machine"
3 | }
4 |
5 | variable "app" {
6 | description = "Name of the application using the created droplet(s)."
7 | default = "default"
8 | }
9 |
10 | variable "name" {
11 | description = "Name of the droplet(s)."
12 | default = "test"
13 | }
14 |
15 | variable "num_hosts" {
16 | description = "Number of droplet(s)."
17 | default = 1
18 | }
19 |
20 | variable "do_private_key_path" {
21 | description = "Digital Ocean SSH private key path"
22 | default = "~/.ssh/id_rsa"
23 | }
24 |
25 | variable "do_public_key_id" {
26 | description = "Digital Ocean ID for your SSH public key"
27 |
28 | # You can retrieve it and set it as an environment variable this way:
29 |
30 | # $ export TF_VAR_do_public_key_id=$(curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/account/keys" | jq -c --arg key_name "$DIGITALOCEAN_SSH_KEY_NAME" '.ssh_keys | .[] | select(.name==$key_name) | .id')
31 | }
32 |
33 | variable "do_username" {
34 | description = "Digital Ocean SSH username"
35 | default = "root"
36 | }
37 |
38 | variable "do_os" {
39 | description = "Digital Ocean OS"
40 | default = "ubuntu-16-04-x64"
41 | }
42 |
43 | # curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/images?page=1&per_page=999999" | jq ".images | .[] | .slug" | grep -P "ubuntu|coreos|centos" | grep -v alpha | grep -v beta
44 | # "ubuntu-16-04-x32"
45 | # "ubuntu-16-04-x64"
46 | # "ubuntu-16-10-x32"
47 | # "ubuntu-16-10-x64"
48 | # "ubuntu-14-04-x32"
49 | # "ubuntu-14-04-x64"
50 | # "ubuntu-12-04-x64"
51 | # "ubuntu-12-04-x32"
52 | # "coreos-stable"
53 | # "centos-6-5-x32"
54 | # "centos-6-5-x64"
55 | # "centos-7-0-x64"
56 | # "centos-7-x64"
57 | # "centos-6-x64"
58 | # "centos-6-x32"
59 | # "centos-5-x64"
60 | # "centos-5-x32"
61 |
62 | # Digital Ocean datacenters
63 | # See also:
64 | # $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/regions" | jq -c ".regions | .[] | .slug" | sort -u
65 |
66 | variable "do_dc_ams2" {
67 | description = "Digital Ocean Amsterdam Datacenter 2"
68 | default = "ams2"
69 | }
70 |
71 | variable "do_dc_ams3" {
72 | description = "Digital Ocean Amsterdam Datacenter 3"
73 | default = "ams3"
74 | }
75 |
76 | variable "do_dc_blr1" {
77 | description = "Digital Ocean Bangalore Datacenter 1"
78 | default = "blr1"
79 | }
80 |
81 | variable "do_dc_fra1" {
82 | description = "Digital Ocean Frankfurt Datacenter 1"
83 | default = "fra1"
84 | }
85 |
86 | variable "do_dc_lon1" {
87 | description = "Digital Ocean London Datacenter 1"
88 | default = "lon1"
89 | }
90 |
91 | variable "do_dc_nyc1" {
92 | description = "Digital Ocean New York Datacenter 1"
93 | default = "nyc1"
94 | }
95 |
96 | variable "do_dc_nyc2" {
97 | description = "Digital Ocean New York Datacenter 2"
98 | default = "nyc2"
99 | }
100 |
101 | variable "do_dc_nyc3" {
102 | description = "Digital Ocean New York Datacenter 3"
103 | default = "nyc3"
104 | }
105 |
106 | variable "do_dc_sfo1" {
107 | description = "Digital Ocean San Francisco Datacenter 1"
108 | default = "sfo1"
109 | }
110 |
111 | variable "do_dc_sfo2" {
112 | description = "Digital Ocean San Francisco Datacenter 2"
113 | default = "sfo2"
114 | }
115 |
116 | variable "do_dc_sgp1" {
117 | description = "Digital Ocean Singapore Datacenter 1"
118 | default = "sgp1"
119 | }
120 |
121 | variable "do_dc_tor1" {
122 | description = "Digital Ocean Toronto Datacenter 1"
123 | default = "tor1"
124 | }
125 |
126 | variable "do_dc" {
127 | description = "Digital Ocean's selected datacenter"
128 | default = "lon1"
129 | }
130 |
131 | variable "do_size" {
132 | description = "Digital Ocean's selected machine size"
133 | default = "4gb"
134 | }
135 |
136 | # Digital Ocean sizes
137 |
138 |
139 | # See also:
140 |
141 |
142 | # $ curl -s -X GET -H "Content-Type: application/json" -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/sizes" | jq -c ".sizes | .[] | .slug"
143 |
144 |
145 | # "512mb"
146 |
147 |
148 | # "1gb"
149 |
150 |
151 | # "2gb"
152 |
153 |
154 | # "4gb"
155 |
156 |
157 | # "8gb"
158 |
159 |
160 | # "16gb"
161 |
162 |
163 | # "m-16gb"
164 |
165 |
166 | # "32gb"
167 |
168 |
169 | # "m-32gb"
170 |
171 |
172 | # "48gb"
173 |
174 |
175 | # "m-64gb"
176 |
177 |
178 | # "64gb"
179 |
180 |
181 | # "m-128gb"
182 |
183 |
184 | # "m-224gb"
185 |
186 |
--------------------------------------------------------------------------------
/tools/provisioning/gcp/README.md:
--------------------------------------------------------------------------------
1 | # Google Cloud Platform
2 |
3 | ## Introduction
4 |
5 | This project allows you to get hold of some machine on Google Cloud Platform.
6 | You can then use these machines as is or run various Ansible playbooks from `../config_management` to set up Weave Net, Kubernetes, etc.
7 |
8 | ## Setup
9 |
10 | * Log in [console.cloud.google.com](https://console.cloud.google.com) with your Google account.
11 |
12 | * Go to `API Manager` > `Credentials` > `Create credentials` > `Service account key`,
13 | in `Service account`, select `Compute Engine default service account`,
14 | in `Key type`, select `JSON`, and then click `Create`.
15 |
16 | * This will download a JSON file to your machine. Place this file wherever you want and then create the following environment variables:
17 |
18 | ```
19 | $ export GOOGLE_CREDENTIALS_FILE="path/to/your.json"
20 | $ export GOOGLE_CREDENTIALS=$(cat "$GOOGLE_CREDENTIALS_FILE")
21 | ```
22 |
23 | * Go to `Compute Engine` > `Metadata` > `SSH keys` and add your username and SSH public key;
24 | or
25 | set it up using `gcloud compute project-info add-metadata --metadata-from-file sshKeys=~/.ssh/id_rsa.pub`.
26 | If you used your default SSH key (i.e. `~/.ssh/id_rsa.pub`), then you do not have anything to do.
27 | Otherwise, you will have to either define the below environment variable:
28 |
29 | ```
30 | $ export TF_VAR_gcp_public_key_path=
31 | $ export TF_VAR_gcp_private_key_path=
32 | ```
33 |
34 | or to pass these as Terraform variables:
35 |
36 | ```
37 | $ terraform \
38 | -var 'gcp_public_key_path=' \
39 | -var 'gcp_private_key_path='
40 | ```
41 |
42 | * Set the username in your public key as an environment variable.
43 | This will be used as the username of the Linux account created on the machine, which you will need to SSH into it later on.
44 |
45 | N.B.:
46 | * GCP already has the username set from the SSH public key you uploaded in the previous step.
47 | * If your username is an email address, e.g. `name@domain.com`, then GCP uses `name` as the username.
48 |
49 | ```
50 | export TF_VAR_gcp_username=
51 | ```
52 |
53 | * Set your current IP address as an environment variable:
54 |
55 | ```
56 | export TF_VAR_client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)
57 | ```
58 |
59 | or pass it as a Terraform variable:
60 |
61 | ```
62 | $ terraform -var 'client_ip=$(curl -s -X GET http://checkip.amazonaws.com/)'
63 | ```
64 |
65 | * Set your project as an environment variable:
66 |
67 | ```
68 | export TF_VAR_gcp_project=weave-net-tests
69 | ```
70 |
71 | or pass it as a Terraform variable:
72 |
73 | ```
74 | $ terraform -var 'gcp_project=weave-net-tests'
75 | ```
76 |
77 | ### Bash aliases
78 |
79 | You can set the above variables temporarily in your current shell, permanently in your `~/.bashrc` file, or define aliases to activate/deactivate them at will with one single command by adding the below to your `~/.bashrc` file:
80 |
81 | ```
82 | function _gcp_on() {
83 | export GOOGLE_CREDENTIALS_FILE="&authuser=1
10 | region = "${var.gcp_region}"
11 |
12 | project = "${var.gcp_project}"
13 | }
14 |
15 | resource "google_compute_instance" "tf_test_vm" {
16 | name = "${var.name}-${count.index}"
17 | machine_type = "${var.gcp_size}"
18 | zone = "${var.gcp_zone}"
19 | count = "${var.num_hosts}"
20 |
21 | disk {
22 | image = "${var.gcp_image}"
23 | }
24 |
25 | tags = [
26 | "${var.app}",
27 | "${var.name}",
28 | "terraform",
29 | ]
30 |
31 | network_interface {
32 | network = "${var.gcp_network}"
33 |
34 | access_config {
35 | // Ephemeral IP
36 | }
37 | }
38 |
39 | metadata {
40 | ssh-keys = "${var.gcp_username}:${file("${var.gcp_public_key_path}")}"
41 | }
42 |
43 | # Wait for machine to be SSH-able:
44 | provisioner "remote-exec" {
45 | inline = ["exit"]
46 |
47 | connection {
48 | type = "ssh"
49 | user = "${var.gcp_username}"
50 | private_key = "${file("${var.gcp_private_key_path}")}"
51 | }
52 | }
53 | }
54 |
55 | resource "google_compute_firewall" "fw-allow-docker-and-weave" {
56 | name = "${var.name}-allow-docker-and-weave"
57 | network = "${var.gcp_network}"
58 | target_tags = ["${var.name}"]
59 |
60 | allow {
61 | protocol = "tcp"
62 | ports = ["2375", "12375"]
63 | }
64 |
65 | source_ranges = ["${var.client_ip}"]
66 | }
67 |
68 | # Required for FastDP crypto in Weave Net:
69 | resource "google_compute_firewall" "fw-allow-esp" {
70 | name = "${var.name}-allow-esp"
71 | network = "${var.gcp_network}"
72 | target_tags = ["${var.name}"]
73 |
74 | allow {
75 | protocol = "esp"
76 | }
77 |
78 | source_ranges = ["${var.gcp_network_global_cidr}"]
79 | }
80 |
--------------------------------------------------------------------------------
/tools/provisioning/gcp/outputs.tf:
--------------------------------------------------------------------------------
1 | output "username" {
2 | value = "${var.gcp_username}"
3 | }
4 |
5 | output "public_ips" {
6 | value = ["${google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip}"]
7 | }
8 |
9 | output "hostnames" {
10 | value = "${join("\n",
11 | "${formatlist("%v.%v.%v",
12 | google_compute_instance.tf_test_vm.*.name,
13 | google_compute_instance.tf_test_vm.*.zone,
14 | var.app
15 | )}"
16 | )}"
17 | }
18 |
19 | # /etc/hosts file for the Compute Engine instances:
20 | output "private_etc_hosts" {
21 | value = "${join("\n",
22 | "${formatlist("%v %v.%v.%v",
23 | google_compute_instance.tf_test_vm.*.network_interface.0.address,
24 | google_compute_instance.tf_test_vm.*.name,
25 | google_compute_instance.tf_test_vm.*.zone,
26 | var.app
27 | )}"
28 | )}"
29 | }
30 |
31 | # /etc/hosts file for the client:
32 | output "public_etc_hosts" {
33 | value = "${join("\n",
34 | "${formatlist("%v %v.%v.%v",
35 | google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip,
36 | google_compute_instance.tf_test_vm.*.name,
37 | google_compute_instance.tf_test_vm.*.zone,
38 | var.app
39 | )}"
40 | )}"
41 | }
42 |
43 | output "ansible_inventory" {
44 | value = "${format("[all]\n%s", join("\n",
45 | "${formatlist("%v private_ip=%v",
46 | google_compute_instance.tf_test_vm.*.network_interface.0.access_config.0.assigned_nat_ip,
47 | google_compute_instance.tf_test_vm.*.network_interface.0.address
48 | )}"
49 | ))}"
50 | }
51 |
52 | output "private_key_path" {
53 | value = "${var.gcp_private_key_path}"
54 | }
55 |
56 | output "instances_names" {
57 | value = ["${google_compute_instance.tf_test_vm.*.name}"]
58 | }
59 |
60 | output "image" {
61 | value = "${var.gcp_image}"
62 | }
63 |
64 | output "zone" {
65 | value = "${var.gcp_zone}"
66 | }
67 |
--------------------------------------------------------------------------------
/tools/provisioning/gcp/variables.tf:
--------------------------------------------------------------------------------
1 | variable "gcp_username" {
2 | description = "Google Cloud Platform SSH username"
3 | }
4 |
5 | variable "app" {
6 | description = "Name of the application using the created Compute Engine instance(s)."
7 | default = "default"
8 | }
9 |
10 | variable "name" {
11 | description = "Name of the Compute Engine instance(s)."
12 | default = "test"
13 | }
14 |
15 | variable "num_hosts" {
16 | description = "Number of Compute Engine instance(s)."
17 | default = 1
18 | }
19 |
20 | variable "client_ip" {
21 | description = "IP address of the client machine"
22 | }
23 |
24 | variable "gcp_public_key_path" {
25 | description = "Path to file containing public key"
26 | default = "~/.ssh/id_rsa.pub"
27 | }
28 |
29 | variable "gcp_private_key_path" {
30 | description = "Path to file containing private key"
31 | default = "~/.ssh/id_rsa"
32 | }
33 |
34 | variable "gcp_project" {
35 | description = "Google Cloud Platform project"
36 | default = "weave-net-tests"
37 | }
38 |
39 | variable "gcp_image" {
40 | # See also: https://cloud.google.com/compute/docs/images
41 | # For example:
42 | # - "ubuntu-os-cloud/ubuntu-1604-lts"
43 | # - "debian-cloud/debian-8"
44 | # - "centos-cloud/centos-7"
45 | # - "rhel-cloud/rhel7"
46 | description = "Google Cloud Platform OS"
47 |
48 | default = "ubuntu-os-cloud/ubuntu-1604-lts"
49 | }
50 |
51 | variable "gcp_size" {
52 | # See also:
53 | # $ gcloud compute machine-types list
54 | description = "Google Cloud Platform's selected machine size"
55 |
56 | default = "n1-standard-1"
57 | }
58 |
59 | variable "gcp_region" {
60 | description = "Google Cloud Platform's selected region"
61 | default = "us-central1"
62 | }
63 |
64 | variable "gcp_zone" {
65 | description = "Google Cloud Platform's selected zone"
66 | default = "us-central1-a"
67 | }
68 |
69 | variable "gcp_network" {
70 | description = "Google Cloud Platform's selected network"
71 | default = "test"
72 | }
73 |
74 | variable "gcp_network_global_cidr" {
75 | description = "CIDR covering all regions for the selected Google Cloud Platform network"
76 | default = "10.128.0.0/9"
77 | }
78 |
--------------------------------------------------------------------------------
/tools/publish-site:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -o pipefail
5 |
6 | : "${PRODUCT:=}"
7 |
8 | fatal() {
9 | echo "$@" >&2
10 | exit 1
11 | }
12 |
13 | if [ ! -d .git ]; then
14 | fatal "Current directory is not a git clone"
15 | fi
16 |
17 | if [ -z "${PRODUCT}" ]; then
18 | fatal "Must specify PRODUCT"
19 | fi
20 |
21 | if ! BRANCH=$(git symbolic-ref --short HEAD) || [ -z "$BRANCH" ]; then
22 | fatal "Could not determine branch"
23 | fi
24 |
25 | case "$BRANCH" in
26 | issues/*)
27 | VERSION="${BRANCH#issues/}"
28 | TAGS="$VERSION"
29 | ;;
30 | *)
31 | if echo "$BRANCH" | grep -qE '^[0-9]+\.[0-9]+'; then
32 | DESCRIBE=$(git describe --match 'v*')
33 | if ! VERSION=$(echo "$DESCRIBE" | grep -oP '(?<=^v)[0-9]+\.[0-9]+\.[0-9]+'); then
34 | fatal "Could not infer latest $BRANCH version from $DESCRIBE"
35 | fi
36 | TAGS="$VERSION latest"
37 | else
38 | VERSION="$BRANCH"
39 | TAGS="$VERSION"
40 | fi
41 | ;;
42 | esac
43 |
44 | for TAG in $TAGS; do
45 | echo ">>> Publishing $PRODUCT $VERSION to $1/docs/$PRODUCT/$TAG"
46 | wordepress \
47 | --url "$1" --user "$2" --password "$3" \
48 | --product "$PRODUCT" --version "$VERSION" --tag "$TAG" \
49 | publish site
50 | done
51 |
--------------------------------------------------------------------------------
/tools/push-images:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | QUAY_PREFIX=quay.io/
8 | IMAGES=$(make images)
9 | IMAGE_TAG=$(./tools/image-tag)
10 |
11 | usage() {
12 | echo "$0 [-no-docker-hub]"
13 | }
14 |
15 | NO_DOCKER_HUB=
16 | while [ $# -gt 0 ]; do
17 | case "$1" in
18 | -no-docker-hub)
19 | NO_DOCKER_HUB=1
20 | shift 1
21 | ;;
22 | *)
23 | usage
24 | exit 2
25 | ;;
26 | esac
27 | done
28 |
29 | push_image() {
30 | local image="$1"
31 | docker push "${image}:${IMAGE_TAG}"
32 | }
33 |
34 | for image in ${IMAGES}; do
35 | if [[ "$image" == *"build"* ]]; then
36 | continue
37 | fi
38 | echo "Will push ${image}:${IMAGE_TAG}"
39 | push_image "${image}" &
40 |
41 | if [ -z "$NO_DOCKER_HUB" ]; then
42 | # remove the quey prefix and push to docker hub
43 | docker_hub_image=${image#$QUAY_PREFIX}
44 | docker tag "${image}:${IMAGE_TAG}" "${docker_hub_image}:${IMAGE_TAG}"
45 | echo "Will push ${docker_hub_image}:${IMAGE_TAG}"
46 | docker push "${docker_hub_image}:${IMAGE_TAG}"
47 | fi
48 | done
49 |
50 | wait
51 |
--------------------------------------------------------------------------------
/tools/rebuild-image:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Rebuild a cached docker image if the input files have changed.
3 | # Usage: ./rebuild-image
4 |
5 | set -eux
6 |
7 | IMAGENAME=$1
8 | # shellcheck disable=SC2001
9 | SAVEDNAME=$(echo "$IMAGENAME" | sed "s/[\/\-]/\./g")
10 | IMAGEDIR=$2
11 | shift 2
12 |
13 | INPUTFILES=("$@")
14 | CACHEDIR=$HOME/docker/
15 |
16 | # Rebuild the image
17 | rebuild() {
18 | mkdir -p "$CACHEDIR"
19 | rm "$CACHEDIR/$SAVEDNAME"* || true
20 | docker build -t "$IMAGENAME" "$IMAGEDIR"
21 | docker save "$IMAGENAME:latest" | gzip - >"$CACHEDIR/$SAVEDNAME-$CIRCLE_SHA1.gz"
22 | }
23 |
24 | # Get the revision the cached image was build at
25 | cached_image_rev() {
26 | find "$CACHEDIR" -name "$SAVEDNAME-*" -type f | sed -n 's/^[^\-]*\-\([a-z0-9]*\).gz$/\1/p'
27 | }
28 |
29 | # Have there been any revision between $1 and $2
30 | has_changes() {
31 | local rev1=$1
32 | local rev2=$2
33 | local changes
34 | changes=$(git diff --oneline "$rev1..$rev2" -- "${INPUTFILES[@]}" | wc -l)
35 | [ "$changes" -gt 0 ]
36 | }
37 |
38 | commit_timestamp() {
39 | local rev=$1
40 | git show -s --format=%ct "$rev"
41 | }
42 |
43 | # Is the SHA1 actually present in the repo?
44 | # It could be it isn't, e.g. after a force push
45 | is_valid_commit() {
46 | local rev=$1
47 | git rev-parse --quiet --verify "$rev^{commit}" >/dev/null
48 | }
49 |
50 | cached_revision=$(cached_image_rev)
51 | if [ -z "$cached_revision" ]; then
52 | echo ">>> No cached image found; rebuilding"
53 | rebuild
54 | exit 0
55 | fi
56 |
57 | if ! is_valid_commit "$cached_revision"; then
58 | echo ">>> Git commit of cached image not found in repo; rebuilding"
59 | rebuild
60 | exit 0
61 | fi
62 |
63 | echo ">>> Found cached image rev $cached_revision"
64 | if has_changes "$cached_revision" "$CIRCLE_SHA1"; then
65 | echo ">>> Found changes, rebuilding"
66 | rebuild
67 | exit 0
68 | fi
69 |
70 | IMAGE_TIMEOUT="$((3 * 24 * 60 * 60))"
71 | if [ "$(commit_timestamp "$cached_revision")" -lt "${IMAGE_TIMEOUT}" ]; then
72 | echo ">>> Image is more the 24hrs old; rebuilding"
73 | rebuild
74 | exit 0
75 | fi
76 |
77 | # we didn't rebuild; import cached version
78 | echo ">>> No changes found, importing cached image"
79 | zcat "$CACHEDIR/$SAVEDNAME-$cached_revision.gz" | docker load
80 |
--------------------------------------------------------------------------------
/tools/runner/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all clean
2 |
3 | all: runner
4 |
5 | runner: *.go
6 | go get -tags netgo ./$(@D)
7 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D)
8 |
9 | clean:
10 | rm -rf runner
11 | go clean ./...
12 |
--------------------------------------------------------------------------------
/tools/runner/runner.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 | "net/url"
9 | "os"
10 | "os/exec"
11 | "sort"
12 | "strconv"
13 | "strings"
14 | "sync"
15 | "time"
16 |
17 | "github.com/mgutz/ansi"
18 | "github.com/weaveworks/common/mflag"
19 | )
20 |
21 | const (
22 | defaultSchedulerHost = "positive-cocoa-90213.appspot.com"
23 | jsonContentType = "application/json"
24 | )
25 |
26 | var (
27 | start = ansi.ColorCode("black+ub")
28 | fail = ansi.ColorCode("red+b")
29 | succ = ansi.ColorCode("green+b")
30 | reset = ansi.ColorCode("reset")
31 |
32 | schedulerHost = defaultSchedulerHost
33 | useScheduler = false
34 | runParallel = false
35 | verbose = false
36 | timeout = 180 // In seconds. Three minutes ought to be enough for any test
37 |
38 | consoleLock = sync.Mutex{}
39 | )
40 |
41 | type test struct {
42 | name string
43 | hosts int
44 | }
45 |
46 | type schedule struct {
47 | Tests []string `json:"tests"`
48 | }
49 |
50 | type result struct {
51 | test
52 | errored bool
53 | hosts []string
54 | }
55 |
56 | type tests []test
57 |
58 | func (ts tests) Len() int { return len(ts) }
59 | func (ts tests) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
60 | func (ts tests) Less(i, j int) bool {
61 | if ts[i].hosts != ts[j].hosts {
62 | return ts[i].hosts < ts[j].hosts
63 | }
64 | return ts[i].name < ts[j].name
65 | }
66 |
67 | func (ts *tests) pick(available int) (test, bool) {
68 | // pick the first test that fits in the available hosts
69 | for i, test := range *ts {
70 | if test.hosts <= available {
71 | *ts = append((*ts)[:i], (*ts)[i+1:]...)
72 | return test, true
73 | }
74 | }
75 |
76 | return test{}, false
77 | }
78 |
79 | func (t test) run(hosts []string) bool {
80 | consoleLock.Lock()
81 | fmt.Printf("%s>>> Running %s on %s%s\n", start, t.name, hosts, reset)
82 | consoleLock.Unlock()
83 |
84 | var out bytes.Buffer
85 |
86 | cmd := exec.Command(t.name)
87 | cmd.Env = os.Environ()
88 | cmd.Stdout = &out
89 | cmd.Stderr = &out
90 |
91 | // replace HOSTS in env
92 | for i, env := range cmd.Env {
93 | if strings.HasPrefix(env, "HOSTS") {
94 | cmd.Env[i] = fmt.Sprintf("HOSTS=%s", strings.Join(hosts, " "))
95 | break
96 | }
97 | }
98 |
99 | start := time.Now()
100 | var err error
101 |
102 | c := make(chan error, 1)
103 | go func() { c <- cmd.Run() }()
104 | select {
105 | case err = <-c:
106 | case <-time.After(time.Duration(timeout) * time.Second):
107 | err = fmt.Errorf("timed out")
108 | }
109 |
110 | duration := float64(time.Now().Sub(start)) / float64(time.Second)
111 |
112 | consoleLock.Lock()
113 | if err != nil {
114 | fmt.Printf("%s>>> Test %s finished after %0.1f secs with error: %v%s\n", fail, t.name, duration, err, reset)
115 | } else {
116 | fmt.Printf("%s>>> Test %s finished with success after %0.1f secs%s\n", succ, t.name, duration, reset)
117 | }
118 | if err != nil || verbose {
119 | fmt.Print(out.String())
120 | fmt.Println()
121 | }
122 | consoleLock.Unlock()
123 |
124 | if err != nil && useScheduler {
125 | updateScheduler(t.name, duration)
126 | }
127 |
128 | return err != nil
129 | }
130 |
131 | func updateScheduler(test string, duration float64) {
132 | req := &http.Request{
133 | Method: "POST",
134 | Host: schedulerHost,
135 | URL: &url.URL{
136 | Opaque: fmt.Sprintf("/record/%s/%0.2f", url.QueryEscape(test), duration),
137 | Scheme: "http",
138 | Host: schedulerHost,
139 | },
140 | Close: true,
141 | }
142 | if resp, err := http.DefaultClient.Do(req); err != nil {
143 | fmt.Printf("Error updating scheduler: %v\n", err)
144 | } else {
145 | resp.Body.Close()
146 | }
147 | }
148 |
149 | func getSchedule(tests []string) ([]string, error) {
150 | var (
151 | userName = os.Getenv("CIRCLE_PROJECT_USERNAME")
152 | project = os.Getenv("CIRCLE_PROJECT_REPONAME")
153 | buildNum = os.Getenv("CIRCLE_BUILD_NUM")
154 | testRun = userName + "-" + project + "-integration-" + buildNum
155 | shardCount = os.Getenv("CIRCLE_NODE_TOTAL")
156 | shardID = os.Getenv("CIRCLE_NODE_INDEX")
157 | requestBody = &bytes.Buffer{}
158 | )
159 | if err := json.NewEncoder(requestBody).Encode(schedule{tests}); err != nil {
160 | return []string{}, err
161 | }
162 | url := fmt.Sprintf("http://%s/schedule/%s/%s/%s", schedulerHost, testRun, shardCount, shardID)
163 | resp, err := http.Post(url, jsonContentType, requestBody)
164 | if err != nil {
165 | return []string{}, err
166 | }
167 | var sched schedule
168 | if err := json.NewDecoder(resp.Body).Decode(&sched); err != nil {
169 | return []string{}, err
170 | }
171 | return sched.Tests, nil
172 | }
173 |
174 | func getTests(testNames []string) (tests, error) {
175 | var err error
176 | if useScheduler {
177 | testNames, err = getSchedule(testNames)
178 | if err != nil {
179 | return tests{}, err
180 | }
181 | }
182 | tests := tests{}
183 | for _, name := range testNames {
184 | parts := strings.Split(strings.TrimSuffix(name, "_test.sh"), "_")
185 | numHosts, err := strconv.Atoi(parts[len(parts)-1])
186 | if err != nil {
187 | numHosts = 1
188 | }
189 | tests = append(tests, test{name, numHosts})
190 | fmt.Printf("Test %s needs %d hosts\n", name, numHosts)
191 | }
192 | return tests, nil
193 | }
194 |
195 | func summary(tests, failed tests) {
196 | if len(failed) > 0 {
197 | fmt.Printf("%s>>> Ran %d tests, %d failed%s\n", fail, len(tests), len(failed), reset)
198 | for _, test := range failed {
199 | fmt.Printf("%s>>> Fail %s%s\n", fail, test.name, reset)
200 | }
201 | } else {
202 | fmt.Printf("%s>>> Ran %d tests, all succeeded%s\n", succ, len(tests), reset)
203 | }
204 | }
205 |
206 | func parallel(ts tests, hosts []string) bool {
207 | testsCopy := ts
208 | sort.Sort(sort.Reverse(ts))
209 | resultsChan := make(chan result)
210 | outstanding := 0
211 | failed := tests{}
212 | for len(ts) > 0 || outstanding > 0 {
213 | // While we have some free hosts, try and schedule
214 | // a test on them
215 | for len(hosts) > 0 {
216 | test, ok := ts.pick(len(hosts))
217 | if !ok {
218 | break
219 | }
220 | testHosts := hosts[:test.hosts]
221 | hosts = hosts[test.hosts:]
222 |
223 | go func() {
224 | errored := test.run(testHosts)
225 | resultsChan <- result{test, errored, testHosts}
226 | }()
227 | outstanding++
228 | }
229 |
230 | // Otherwise, wait for the test to finish and return
231 | // the hosts to the pool
232 | result := <-resultsChan
233 | hosts = append(hosts, result.hosts...)
234 | outstanding--
235 | if result.errored {
236 | failed = append(failed, result.test)
237 | }
238 | }
239 | summary(testsCopy, failed)
240 | return len(failed) > 0
241 | }
242 |
243 | func sequential(ts tests, hosts []string) bool {
244 | failed := tests{}
245 | for _, test := range ts {
246 | if test.run(hosts) {
247 | failed = append(failed, test)
248 | }
249 | }
250 | summary(ts, failed)
251 | return len(failed) > 0
252 | }
253 |
254 | func main() {
255 | mflag.BoolVar(&useScheduler, []string{"scheduler"}, false, "Use scheduler to distribute tests across shards")
256 | mflag.BoolVar(&runParallel, []string{"parallel"}, false, "Run tests in parallel on hosts where possible")
257 | mflag.BoolVar(&verbose, []string{"v"}, false, "Print output from all tests (Also enabled via DEBUG=1)")
258 | mflag.StringVar(&schedulerHost, []string{"scheduler-host"}, defaultSchedulerHost, "Hostname of scheduler.")
259 | mflag.IntVar(&timeout, []string{"timeout"}, 180, "Max time to run one test for, in seconds")
260 | mflag.Parse()
261 |
262 | if len(os.Getenv("DEBUG")) > 0 {
263 | verbose = true
264 | }
265 |
266 | testArgs := mflag.Args()
267 | tests, err := getTests(testArgs)
268 | if err != nil {
269 | fmt.Printf("Error parsing tests: %v (%v)\n", err, testArgs)
270 | os.Exit(1)
271 | }
272 |
273 | hosts := strings.Fields(os.Getenv("HOSTS"))
274 | maxHosts := len(hosts)
275 | if maxHosts == 0 {
276 | fmt.Print("No HOSTS specified.\n")
277 | os.Exit(1)
278 | }
279 |
280 | var errored bool
281 | if runParallel {
282 | errored = parallel(tests, hosts)
283 | } else {
284 | errored = sequential(tests, hosts)
285 | }
286 |
287 | if errored {
288 | os.Exit(1)
289 | }
290 | }
291 |
--------------------------------------------------------------------------------
/tools/sched:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import sys, string, urllib
3 | import requests
4 | import optparse
5 |
6 | def test_time(target, test_name, runtime):
7 | r = requests.post(target + "/record/%s/%f" % (urllib.quote(test_name, safe=""), runtime))
8 | print r.text.encode('utf-8')
9 | assert r.status_code == 204
10 |
11 | def test_sched(target, test_run, shard_count, shard_id):
12 | tests = {'tests': string.split(sys.stdin.read())}
13 | r = requests.post(target + "/schedule/%s/%d/%d" % (test_run, shard_count, shard_id), json=tests)
14 | assert r.status_code == 200
15 | result = r.json()
16 | for test in sorted(result['tests']):
17 | print test.encode('utf-8')
18 |
19 | def usage():
20 | print "%s (--target=...) " % sys.argv[0]
21 | print " time "
22 | print " sched "
23 |
24 | def main():
25 | parser = optparse.OptionParser()
26 | parser.add_option('--target', default="http://positive-cocoa-90213.appspot.com")
27 | options, args = parser.parse_args()
28 | if len(args) < 3:
29 | usage()
30 | sys.exit(1)
31 |
32 | if args[0] == "time":
33 | test_time(options.target, args[1], float(args[2]))
34 | elif args[0] == "sched":
35 | test_sched(options.target, args[1], int(args[2]), int(args[3]))
36 | else:
37 | usage()
38 |
39 | if __name__ == '__main__':
40 | main()
41 |
--------------------------------------------------------------------------------
/tools/scheduler/.gitignore:
--------------------------------------------------------------------------------
1 | lib
2 |
--------------------------------------------------------------------------------
/tools/scheduler/README.md:
--------------------------------------------------------------------------------
1 | To upload newer version:
2 |
3 | ```
4 | pip install -r requirements.txt -t lib
5 | appcfg.py update .
6 | ```
7 |
--------------------------------------------------------------------------------
/tools/scheduler/app.yaml:
--------------------------------------------------------------------------------
1 | application: positive-cocoa-90213
2 | version: 1
3 | runtime: python27
4 | api_version: 1
5 | threadsafe: true
6 |
7 | handlers:
8 | - url: .*
9 | script: main.app
10 |
11 | libraries:
12 | - name: webapp2
13 | version: latest
14 | - name: ssl
15 | version: latest
16 |
--------------------------------------------------------------------------------
/tools/scheduler/appengine_config.py:
--------------------------------------------------------------------------------
1 | from google.appengine.ext import vendor
2 |
3 | vendor.add('lib')
4 |
--------------------------------------------------------------------------------
/tools/scheduler/cron.yaml:
--------------------------------------------------------------------------------
1 | cron:
2 | - description: periodic gc
3 | url: /tasks/gc
4 | schedule: every 5 minutes
5 |
--------------------------------------------------------------------------------
/tools/scheduler/main.py:
--------------------------------------------------------------------------------
1 | import collections
2 | import json
3 | import logging
4 | import operator
5 | import re
6 |
7 | import flask
8 | from oauth2client.client import GoogleCredentials
9 | from googleapiclient import discovery
10 |
11 | from google.appengine.api import urlfetch
12 | from google.appengine.ext import ndb
13 |
14 | app = flask.Flask('scheduler')
15 | app.debug = True
16 |
17 | # We use exponential moving average to record
18 | # test run times. Higher alpha discounts historic
19 | # observations faster.
20 | alpha = 0.3
21 |
22 | class Test(ndb.Model):
23 | total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
24 | total_runs = ndb.IntegerProperty(default=0)
25 |
26 | def parallelism(self):
27 | name = self.key.string_id()
28 | m = re.search('(\d+)_test.sh$', name)
29 | if m is None:
30 | return 1
31 | else:
32 | return int(m.group(1))
33 |
34 | def cost(self):
35 | p = self.parallelism()
36 | logging.info("Test %s has parallelism %d and avg run time %s", self.key.string_id(), p, self.total_run_time)
37 | return self.parallelism() * self.total_run_time
38 |
39 | class Schedule(ndb.Model):
40 | shards = ndb.JsonProperty()
41 |
42 | @app.route('/record//', methods=['POST'])
43 | @ndb.transactional
44 | def record(test_name, runtime):
45 | test = Test.get_by_id(test_name)
46 | if test is None:
47 | test = Test(id=test_name)
48 | test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha)
49 | test.total_runs += 1
50 | test.put()
51 | return ('', 204)
52 |
53 | @app.route('/schedule///', methods=['POST'])
54 | def schedule(test_run, shard_count, shard):
55 | # read tests from body
56 | test_names = flask.request.get_json(force=True)['tests']
57 |
58 | # first see if we have a scedule already
59 | schedule_id = "%s-%d" % (test_run, shard_count)
60 | schedule = Schedule.get_by_id(schedule_id)
61 | if schedule is not None:
62 | return flask.json.jsonify(tests=schedule.shards[str(shard)])
63 |
64 | # if not, do simple greedy algorithm
65 | test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names)
66 | def avg(test):
67 | if test is not None:
68 | return test.cost()
69 | return 1
70 | test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)]
71 | test_times_dict = dict(test_times)
72 | test_times.sort(key=operator.itemgetter(1))
73 |
74 | shards = {i: [] for i in xrange(shard_count)}
75 | while test_times:
76 | test_name, time = test_times.pop()
77 |
78 | # find shortest shard and put it in that
79 | s, _ = min(((i, sum(test_times_dict[t] for t in shards[i]))
80 | for i in xrange(shard_count)), key=operator.itemgetter(1))
81 |
82 | shards[s].append(test_name)
83 |
84 | # atomically insert or retrieve existing schedule
85 | schedule = Schedule.get_or_insert(schedule_id, shards=shards)
86 | return flask.json.jsonify(tests=schedule.shards[str(shard)])
87 |
88 | FIREWALL_REGEXES = [
89 | re.compile(r'^(?P\w+)-allow-(?P\w+)-(?P\d+)-(?P\d+)$'),
90 | re.compile(r'^(?P\w+)-(?P\d+)-(?P\d+)-allow-(?P[\w\-]+)$'),
91 | ]
92 | NAME_REGEXES = [
93 | re.compile(r'^host(?P\d+)-(?P\d+)-(?P\d+)$'),
94 | re.compile(r'^test-(?P\d+)-(?P\d+)-(?P\d+)$'),
95 | ]
96 |
97 | def _matches_any_regex(name, regexes):
98 | for regex in regexes:
99 | matches = regex.match(name)
100 | if matches:
101 | return matches
102 |
103 | PROJECTS = [
104 | ('weaveworks/weave', 'weave-net-tests', 'us-central1-a', True),
105 | ('weaveworks/weave', 'positive-cocoa-90213', 'us-central1-a', True),
106 | ('weaveworks/scope', 'scope-integration-tests', 'us-central1-a', False),
107 | ]
108 |
109 | @app.route('/tasks/gc')
110 | def gc():
111 | # Get list of running VMs, pick build id out of VM name
112 | credentials = GoogleCredentials.get_application_default()
113 | compute = discovery.build('compute', 'v1', credentials=credentials)
114 |
115 | for repo, project, zone, gc_fw in PROJECTS:
116 | gc_project(compute, repo, project, zone, gc_fw)
117 |
118 | return "Done"
119 |
120 | def gc_project(compute, repo, project, zone, gc_fw):
121 | logging.info("GCing %s, %s, %s", repo, project, zone)
122 | # Get list of builds, filter down to running builds:
123 | running = _get_running_builds(repo)
124 | # Stop VMs for builds that aren't running:
125 | _gc_compute_engine_instances(compute, project, zone, running)
126 | # Remove firewall rules for builds that aren't running:
127 | if gc_fw:
128 | _gc_firewall_rules(compute, project, running)
129 |
130 | def _get_running_builds(repo):
131 | result = urlfetch.fetch('https://circleci.com/api/v1/project/%s' % repo,
132 | headers={'Accept': 'application/json'})
133 | assert result.status_code == 200
134 | builds = json.loads(result.content)
135 | running = {build['build_num'] for build in builds if not build.get('stop_time')}
136 | logging.info("Runnings builds: %r", running)
137 | return running
138 |
139 | def _get_hosts_by_build(instances):
140 | host_by_build = collections.defaultdict(list)
141 | for instance in instances['items']:
142 | matches = _matches_any_regex(instance['name'], NAME_REGEXES)
143 | if not matches:
144 | continue
145 | host_by_build[int(matches.group('build'))].append(instance['name'])
146 | logging.info("Running VMs by build: %r", host_by_build)
147 | return host_by_build
148 |
149 | def _gc_compute_engine_instances(compute, project, zone, running):
150 | instances = compute.instances().list(project=project, zone=zone).execute()
151 | if 'items' not in instances:
152 | return
153 | host_by_build = _get_hosts_by_build(instances)
154 | stopped = []
155 | for build, names in host_by_build.iteritems():
156 | if build in running:
157 | continue
158 | for name in names:
159 | stopped.append(name)
160 | logging.info("Stopping VM %s", name)
161 | compute.instances().delete(project=project, zone=zone, instance=name).execute()
162 | return stopped
163 |
164 | def _gc_firewall_rules(compute, project, running):
165 | firewalls = compute.firewalls().list(project=project).execute()
166 | if 'items' not in firewalls:
167 | return
168 | for firewall in firewalls['items']:
169 | matches = _matches_any_regex(firewall['name'], FIREWALL_REGEXES)
170 | if not matches:
171 | continue
172 | if int(matches.group('build')) in running:
173 | continue
174 | logging.info("Deleting firewall rule %s", firewall['name'])
175 | compute.firewalls().delete(project=project, firewall=firewall['name']).execute()
176 |
--------------------------------------------------------------------------------
/tools/scheduler/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 | google-api-python-client
3 |
--------------------------------------------------------------------------------
/tools/shell-lint:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Lint all shell files in given directories with `shellcheck`.
4 | #
5 | # e.g.
6 | # $ shell-lint infra k8s
7 | #
8 | # Depends on:
9 | # - shellcheck
10 | # - files-with-type
11 | # - file >= 5.22
12 |
13 | "$(dirname "${BASH_SOURCE[0]}")/files-with-type" text/x-shellscript "$@" | xargs --no-run-if-empty shellcheck
14 |
--------------------------------------------------------------------------------
/tools/socks/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gliderlabs/alpine
2 | MAINTAINER Weaveworks Inc
3 | WORKDIR /
4 | COPY proxy /
5 | EXPOSE 8000
6 | EXPOSE 8080
7 | ENTRYPOINT ["/proxy"]
8 |
--------------------------------------------------------------------------------
/tools/socks/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all clean
2 |
3 | IMAGE_TAR=image.tar
4 | IMAGE_NAME=weaveworks/socksproxy
5 | PROXY_EXE=proxy
6 | NETGO_CHECK=@strings $@ | grep cgo_stub\\\.go >/dev/null || { \
7 | rm $@; \
8 | echo "\nYour go standard library was built without the 'netgo' build tag."; \
9 | echo "To fix that, run"; \
10 | echo " sudo go clean -i net"; \
11 | echo " sudo go install -tags netgo std"; \
12 | false; \
13 | }
14 |
15 | all: $(IMAGE_TAR)
16 |
17 | $(IMAGE_TAR): Dockerfile $(PROXY_EXE)
18 | docker build -t $(IMAGE_NAME) .
19 | docker save $(IMAGE_NAME):latest > $@
20 |
21 | $(PROXY_EXE): *.go
22 | go get -tags netgo ./$(@D)
23 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D)
24 | $(NETGO_CHECK)
25 |
26 | clean:
27 | -docker rmi $(IMAGE_NAME)
28 | rm -rf $(PROXY_EXE) $(IMAGE_TAR)
29 | go clean ./...
30 |
--------------------------------------------------------------------------------
/tools/socks/README.md:
--------------------------------------------------------------------------------
1 | # SOCKS Proxy
2 |
3 | The challenge: you’ve built and deployed your microservices based
4 | application on a Weave network, running on a set of VMs on EC2. Many
5 | of the services’ public API are reachable from the internet via an
6 | Nginx-based reverse proxy, but some of the services also expose
7 | private monitoring and manage endpoints via embedded HTTP servers.
8 | How do I securely get access to these from my laptop, without exposing
9 | them to the world?
10 |
11 | One method we’ve started using at Weaveworks is a 90’s technology - a
12 | SOCKS proxy combined with a PAC script. It’s relatively
13 | straight-forward: one ssh’s into any of the VMs participating in the
14 | Weave network, starts the SOCKS proxy in a container on Weave the
15 | network, and SSH port forwards a few local port to the proxy. All
16 | that’s left is for the user to configure his browser to use the proxy,
17 | and voila, you can now access your Docker containers, via the Weave
18 | network (and with all the magic of weavedns), from your laptop’s
19 | browser!
20 |
21 | It is perhaps worth noting there is nothing Weave-specific about this
22 | approach - this should work with any SDN or private network.
23 |
24 | A quick example:
25 |
26 | ```
27 | vm1$ weave launch
28 | vm1$ eval $(weave env)
29 | vm1$ docker run -d --name nginx nginx
30 | ```
31 |
32 | And on your laptop
33 |
34 | ```
35 | laptop$ git clone https://github.com/weaveworks/tools
36 | laptop$ cd tools/socks
37 | laptop$ ./connect.sh vm1
38 | Starting proxy container...
39 | Please configure your browser for proxy
40 | http://localhost:8080/proxy.pac
41 | ```
42 |
43 | To configure your Mac to use the proxy:
44 |
45 | 1. Open System Preferences
46 | 2. Select Network
47 | 3. Click the 'Advanced' button
48 | 4. Select the Proxies tab
49 | 5. Click the 'Automatic Proxy Configuration' check box
50 | 6. Enter 'http://localhost:8080/proxy.pac' in the URL box
51 | 7. Remove `*.local` from the 'Bypass proxy settings for these Hosts & Domains'
52 |
53 | Now point your browser at http://nginx.weave.local/
54 |
--------------------------------------------------------------------------------
/tools/socks/connect.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [ $# -ne 1 ]; then
6 | echo "Usage: $0 "
7 | exit 1
8 | fi
9 |
10 | HOST=$1
11 |
12 | echo "Starting proxy container..."
13 | PROXY_CONTAINER=$(ssh "$HOST" weave run -d weaveworks/socksproxy)
14 |
15 | function finish() {
16 | echo "Removing proxy container.."
17 | # shellcheck disable=SC2029
18 | ssh "$HOST" docker rm -f "$PROXY_CONTAINER"
19 | }
20 | trap finish EXIT
21 |
22 | # shellcheck disable=SC2029
23 | PROXY_IP=$(ssh "$HOST" -- "docker inspect --format='{{.NetworkSettings.IPAddress}}' $PROXY_CONTAINER")
24 | echo 'Please configure your browser for proxy http://localhost:8080/proxy.pac'
25 | # shellcheck disable=SC2029
26 | ssh "-L8000:$PROXY_IP:8000" "-L8080:$PROXY_IP:8080" "$HOST" docker attach "$PROXY_CONTAINER"
27 |
--------------------------------------------------------------------------------
/tools/socks/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net"
6 | "net/http"
7 | "os"
8 | "strings"
9 | "text/template"
10 |
11 | socks5 "github.com/armon/go-socks5"
12 | "github.com/weaveworks/common/mflag"
13 | "github.com/weaveworks/common/mflagext"
14 | "golang.org/x/net/context"
15 | )
16 |
17 | type pacFileParameters struct {
18 | HostMatch string
19 | Aliases map[string]string
20 | }
21 |
22 | const (
23 | pacfile = `
24 | function FindProxyForURL(url, host) {
25 | if(shExpMatch(host, "{{.HostMatch}}")) {
26 | return "SOCKS5 localhost:8000";
27 | }
28 | {{range $key, $value := .Aliases}}
29 | if (host == "{{$key}}") {
30 | return "SOCKS5 localhost:8000";
31 | }
32 | {{end}}
33 | return "DIRECT";
34 | }
35 | `
36 | )
37 |
38 | func main() {
39 | var (
40 | as []string
41 | hostMatch string
42 | )
43 | mflagext.ListVar(&as, []string{"a", "-alias"}, []string{}, "Specify hostname aliases in the form alias:hostname. Can be repeated.")
44 | mflag.StringVar(&hostMatch, []string{"h", "-host-match"}, "*.weave.local", "Specify main host shExpMatch expression in pacfile")
45 | mflag.Parse()
46 |
47 | var aliases = map[string]string{}
48 | for _, a := range as {
49 | parts := strings.SplitN(a, ":", 2)
50 | if len(parts) != 2 {
51 | fmt.Printf("'%s' is not a valid alias.\n", a)
52 | mflag.Usage()
53 | os.Exit(1)
54 | }
55 | aliases[parts[0]] = parts[1]
56 | }
57 |
58 | go socksProxy(aliases)
59 |
60 | t := template.Must(template.New("pacfile").Parse(pacfile))
61 | http.HandleFunc("/proxy.pac", func(w http.ResponseWriter, r *http.Request) {
62 | w.Header().Set("Content-Type", "application/x-ns-proxy-autoconfig")
63 | t.Execute(w, pacFileParameters{hostMatch, aliases})
64 | })
65 |
66 | if err := http.ListenAndServe(":8080", nil); err != nil {
67 | panic(err)
68 | }
69 | }
70 |
71 | type aliasingResolver struct {
72 | aliases map[string]string
73 | socks5.NameResolver
74 | }
75 |
76 | func (r aliasingResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) {
77 | if alias, ok := r.aliases[name]; ok {
78 | return r.NameResolver.Resolve(ctx, alias)
79 | }
80 | return r.NameResolver.Resolve(ctx, name)
81 | }
82 |
83 | func socksProxy(aliases map[string]string) {
84 | conf := &socks5.Config{
85 | Resolver: aliasingResolver{
86 | aliases: aliases,
87 | NameResolver: socks5.DNSResolver{},
88 | },
89 | }
90 | server, err := socks5.New(conf)
91 | if err != nil {
92 | panic(err)
93 | }
94 | if err := server.ListenAndServe("tcp", ":8000"); err != nil {
95 | panic(err)
96 | }
97 | }
98 |
--------------------------------------------------------------------------------
/tools/test:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
6 | SLOW=
7 | NO_GO_GET=true
8 | TAGS=
9 | PARALLEL=
10 | RACE="-race -covermode=atomic"
11 | TIMEOUT=1m
12 |
13 | usage() {
14 | echo "$0 [-slow] [-in-container foo] [-netgo] [-(no-)go-get] [-timeout 1m]"
15 | }
16 |
17 | while [ $# -gt 0 ]; do
18 | case "$1" in
19 | "-slow")
20 | SLOW=true
21 | shift 1
22 | ;;
23 | "-no-race")
24 | RACE=
25 | shift 1
26 | ;;
27 | "-no-go-get")
28 | NO_GO_GET=true
29 | shift 1
30 | ;;
31 | "-go-get")
32 | NO_GO_GET=
33 | shift 1
34 | ;;
35 | "-netgo")
36 | TAGS="-tags netgo"
37 | shift 1
38 | ;;
39 | "-p")
40 | PARALLEL=true
41 | shift 1
42 | ;;
43 | "-timeout")
44 | TIMEOUT=$2
45 | shift 2
46 | ;;
47 | *)
48 | usage
49 | exit 2
50 | ;;
51 | esac
52 | done
53 |
54 | GO_TEST_ARGS=($TAGS -cpu 4 -timeout $TIMEOUT)
55 |
56 | if [ -n "$SLOW" ] || [ -n "$CIRCLECI" ]; then
57 | SLOW=true
58 | fi
59 |
60 | if [ -n "$SLOW" ]; then
61 | GO_TEST_ARGS=("${GO_TEST_ARGS[@]}" ${RACE})
62 |
63 | # shellcheck disable=SC2153
64 | if [ -n "$COVERDIR" ]; then
65 | coverdir="$COVERDIR"
66 | else
67 | coverdir=$(mktemp -d coverage.XXXXXXXXXX)
68 | fi
69 |
70 | mkdir -p "$coverdir"
71 | fi
72 |
73 | fail=0
74 |
75 | if [ -z "$TESTDIRS" ]; then
76 | # NB: Relies on paths being prefixed with './'.
77 | TESTDIRS=($(git ls-files -- '*_test.go' | grep -vE '^(vendor|prog|experimental)/' | xargs -n1 dirname | sort -u | sed -e 's|^|./|'))
78 | else
79 | # TESTDIRS on the right side is not really an array variable, it
80 | # is just a string with spaces, but it is written like that to
81 | # shut up the shellcheck tool.
82 | TESTDIRS=($(for d in ${TESTDIRS[*]}; do echo "$d"; done))
83 | fi
84 |
85 | # If running on circle, use the scheduler to work out what tests to run on what shard
86 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
87 | PREFIX=$(go list -e ./ | sed -e 's/\//-/g')
88 | TESTDIRS=($(echo "${TESTDIRS[@]}" | "$DIR/sched" sched "$PREFIX-$CIRCLE_PROJECT_USERNAME-$CIRCLE_PROJECT_REPONAME-$CIRCLE_BUILD_NUM" "$CIRCLE_NODE_TOTAL" "$CIRCLE_NODE_INDEX"))
89 | echo "${TESTDIRS[@]}"
90 | fi
91 |
92 | PACKAGE_BASE=$(go list -e ./)
93 |
94 | # Speed up the tests by compiling and installing their dependencies first.
95 | go test -i "${GO_TEST_ARGS[@]}" "${TESTDIRS[@]}"
96 |
97 | run_test() {
98 | local dir=$1
99 | if [ -z "$NO_GO_GET" ]; then
100 | go get -t "$TAGS" "$dir"
101 | fi
102 |
103 | local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}")
104 | if [ -n "$SLOW" ]; then
105 | local COVERPKGS
106 | COVERPKGS=$( (
107 | go list "$dir"
108 | go list -f '{{join .Deps "\n"}}' "$dir" | grep -v "vendor" | grep "^$PACKAGE_BASE/"
109 | ) | paste -s -d, -)
110 | local output
111 | output=$(mktemp "$coverdir/unit.XXXXXXXXXX")
112 | local GO_TEST_ARGS_RUN=("${GO_TEST_ARGS[@]}" "-coverprofile=$output" "-coverpkg=$COVERPKGS")
113 | fi
114 |
115 | local START
116 | START=$(date +%s)
117 | if ! go test "${GO_TEST_ARGS_RUN[@]}" "$dir"; then
118 | fail=1
119 | fi
120 | local END
121 | END=$(date +%s)
122 | local RUNTIME=$((END - START))
123 |
124 | # Report test runtime when running on circle, to help scheduler
125 | if [ -n "$CIRCLECI" ] && [ -z "$NO_SCHEDULER" ] && [ -x "$DIR/sched" ]; then
126 | "$DIR/sched" time "$dir" "$RUNTIME"
127 | fi
128 | }
129 |
130 | for dir in "${TESTDIRS[@]}"; do
131 | if [ -n "$PARALLEL" ]; then
132 | run_test "$dir" &
133 | else
134 | run_test "$dir"
135 | fi
136 | done
137 |
138 | if [ -n "$PARALLEL" ]; then
139 | wait
140 | fi
141 |
142 | if [ -n "$SLOW" ] && [ -z "$COVERDIR" ]; then
143 | go get github.com/weaveworks/tools/cover
144 | cover "$coverdir"/* >profile.cov
145 | rm -rf "$coverdir"
146 | go tool cover -html=profile.cov -o=coverage.html
147 | go tool cover -func=profile.cov | tail -n1
148 | fi
149 |
150 | exit $fail
151 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/COPYRIGHT.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016 PLUMgrid
2 | Copyright 2016 Kinvolk
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/elf_unsupported.go:
--------------------------------------------------------------------------------
1 | // +build !linux
2 |
3 | package elf
4 |
5 | import (
6 | "fmt"
7 | )
8 |
9 | // not supported; dummy struct
10 | type BPFKProbePerf struct{}
11 | type SectionParams struct{}
12 |
13 | func (b *Module) Load(parameters map[string]SectionParams) error {
14 | return fmt.Errorf("not supported")
15 | }
16 |
17 | func NewBpfPerfEvent(fileName string) *BPFKProbePerf {
18 | // not supported
19 | return nil
20 | }
21 |
22 | func (b *BPFKProbePerf) Load() error {
23 | return fmt.Errorf("not supported")
24 | }
25 |
26 | func (b *BPFKProbePerf) PollStart(mapName string, receiverChan chan []byte, lostChan chan uint64) {
27 | // not supported
28 | return
29 | }
30 |
31 | func (b *BPFKProbePerf) PollStop(mapName string) {
32 | // not supported
33 | return
34 | }
35 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/include/bpf_map.h:
--------------------------------------------------------------------------------
1 | #define BUF_SIZE_MAP_NS 256
2 |
3 | typedef struct bpf_map_def {
4 | unsigned int type;
5 | unsigned int key_size;
6 | unsigned int value_size;
7 | unsigned int max_entries;
8 | unsigned int map_flags;
9 | unsigned int pinning;
10 | char namespace[BUF_SIZE_MAP_NS];
11 | } bpf_map_def;
12 |
13 | enum bpf_pin_type {
14 | PIN_NONE = 0,
15 | PIN_OBJECT_NS,
16 | PIN_GLOBAL_NS,
17 | PIN_CUSTOM_NS,
18 | };
19 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/kernel_version.go:
--------------------------------------------------------------------------------
1 | // +build linux
2 |
3 | // Copyright 2016-2017 Kinvolk
4 | //
5 | // Licensed under the Apache License, Version 2.0 (the "License");
6 | // you may not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing, software
12 | // distributed under the License is distributed on an "AS IS" BASIS,
13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | // See the License for the specific language governing permissions and
15 | // limitations under the License.
16 |
17 | package elf
18 |
19 | import (
20 | "fmt"
21 | "io/ioutil"
22 | "regexp"
23 | "strconv"
24 | "strings"
25 | "syscall"
26 | )
27 |
28 | var versionRegex = regexp.MustCompile(`^(\d+)\.(\d+).(\d+).*$`)
29 |
30 | // KernelVersionFromReleaseString converts a release string with format
31 | // 4.4.2[-1] to a kernel version number in LINUX_VERSION_CODE format.
32 | // That is, for kernel "a.b.c", the version number will be (a<<16 + b<<8 + c)
33 | func KernelVersionFromReleaseString(releaseString string) (uint32, error) {
34 | versionParts := versionRegex.FindStringSubmatch(releaseString)
35 | if len(versionParts) != 4 {
36 | return 0, fmt.Errorf("got invalid release version %q (expected format '4.3.2-1')", releaseString)
37 | }
38 | major, err := strconv.Atoi(versionParts[1])
39 | if err != nil {
40 | return 0, err
41 | }
42 |
43 | minor, err := strconv.Atoi(versionParts[2])
44 | if err != nil {
45 | return 0, err
46 | }
47 |
48 | patch, err := strconv.Atoi(versionParts[3])
49 | if err != nil {
50 | return 0, err
51 | }
52 | out := major*256*256 + minor*256 + patch
53 | return uint32(out), nil
54 | }
55 |
56 | func currentVersionUname() (uint32, error) {
57 | var buf syscall.Utsname
58 | if err := syscall.Uname(&buf); err != nil {
59 | return 0, err
60 | }
61 | releaseString := strings.Trim(utsnameStr(buf.Release[:]), "\x00")
62 | return KernelVersionFromReleaseString(releaseString)
63 | }
64 |
65 | func currentVersionUbuntu() (uint32, error) {
66 | procVersion, err := ioutil.ReadFile("/proc/version_signature")
67 | if err != nil {
68 | return 0, err
69 | }
70 | var u1, u2, releaseString string
71 | _, err = fmt.Sscanf(string(procVersion), "%s %s %s", &u1, &u2, &releaseString)
72 | if err != nil {
73 | return 0, err
74 | }
75 | return KernelVersionFromReleaseString(releaseString)
76 | }
77 |
78 | var debianVersionRegex = regexp.MustCompile(`.* SMP Debian (\d+\.\d+.\d+-\d+) .*`)
79 |
80 | func currentVersionDebian() (uint32, error) {
81 | procVersion, err := ioutil.ReadFile("/proc/version")
82 | if err != nil {
83 | return 0, err
84 | }
85 | match := debianVersionRegex.FindStringSubmatch(string(procVersion))
86 | if len(match) != 2 {
87 | return 0, fmt.Errorf("failed to get kernel version from /proc/version: %s", procVersion)
88 | }
89 | return KernelVersionFromReleaseString(match[1])
90 | }
91 |
92 | // CurrentKernelVersion returns the current kernel version in
93 | // LINUX_VERSION_CODE format (see KernelVersionFromReleaseString())
94 | func CurrentKernelVersion() (uint32, error) {
95 | // We need extra checks for Debian and Ubuntu as they modify
96 | // the kernel version patch number for compatibilty with
97 | // out-of-tree modules. Linux perf tools do the same for Ubuntu
98 | // systems: https://github.com/torvalds/linux/commit/d18acd15c
99 | //
100 | // See also:
101 | // https://kernel-handbook.alioth.debian.org/ch-versions.html
102 | // https://wiki.ubuntu.com/Kernel/FAQ
103 | version, err := currentVersionUbuntu()
104 | if err == nil {
105 | return version, nil
106 | }
107 | version, err = currentVersionDebian()
108 | if err == nil {
109 | return version, nil
110 | }
111 | return currentVersionUname()
112 | }
113 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/module_unsupported.go:
--------------------------------------------------------------------------------
1 | // +build !linux
2 |
3 | package elf
4 |
5 | import (
6 | "fmt"
7 | "io"
8 | )
9 |
10 | type Module struct{}
11 | type Kprobe struct{}
12 | type CgroupProgram struct{}
13 | type AttachType struct{}
14 |
15 | func NewModule(fileName string) *Module {
16 | return nil
17 | }
18 |
19 | func NewModuleFromReader(fileReader io.ReaderAt) *Module {
20 | return nil
21 | }
22 |
23 | func (b *Module) EnableKprobe(secName string, maxactive int) error {
24 | return fmt.Errorf("not supported")
25 | }
26 |
27 | func (b *Module) IterKprobes() <-chan *Kprobe {
28 | return nil
29 | }
30 |
31 | func (b *Module) EnableKprobes(maxactive int) error {
32 | return fmt.Errorf("not supported")
33 | }
34 |
35 | func (b *Module) IterCgroupProgram() <-chan *CgroupProgram {
36 | return nil
37 | }
38 |
39 | func (b *Module) CgroupProgram(name string) *CgroupProgram {
40 | return nil
41 | }
42 |
43 | func (b *Module) Kprobe(name string) *Kprobe {
44 | return nil
45 | }
46 |
47 | func (b *Module) AttachProgram(cgroupProg *CgroupProgram, cgroupPath string, attachType AttachType) error {
48 | return fmt.Errorf("not supported")
49 | }
50 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/perf_unsupported.go:
--------------------------------------------------------------------------------
1 | // +build !linux
2 |
3 | package elf
4 |
5 | import "fmt"
6 |
7 | type PerfMap struct{}
8 |
9 | func InitPerfMap(b *Module, mapName string, receiverChan chan []byte) (*PerfMap, error) {
10 | return nil, fmt.Errorf("not supported")
11 | }
12 |
13 | func (pm *PerfMap) SetTimestampFunc(timestamp func(*[]byte) uint64) {}
14 |
15 | func (pm *PerfMap) PollStart() {}
16 |
17 | func (pm *PerfMap) PollStop() {}
18 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/pinning.go:
--------------------------------------------------------------------------------
1 | // +build linux
2 |
3 | package elf
4 |
5 | import (
6 | "fmt"
7 | "os"
8 | "path/filepath"
9 | "strings"
10 | "unsafe"
11 |
12 | "github.com/iovisor/gobpf/pkg/bpffs"
13 | )
14 |
15 | /*
16 | #include
17 | #include
18 | #include
19 | #include
20 |
21 | extern __u64 ptr_to_u64(void *);
22 |
23 | int bpf_pin_object(int fd, const char *pathname)
24 | {
25 | union bpf_attr attr = {};
26 |
27 | attr.pathname = ptr_to_u64((void *)pathname);
28 | attr.bpf_fd = fd;
29 |
30 | return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr));
31 | }
32 | */
33 | import "C"
34 |
35 | const (
36 | BPFDirGlobals = "globals" // as in iproute2's BPF_DIR_GLOBALS
37 | BPFFSPath = "/sys/fs/bpf/"
38 | )
39 |
40 | func validPinPath(PinPath string) bool {
41 | if !strings.HasPrefix(PinPath, BPFFSPath) {
42 | return false
43 | }
44 |
45 | return filepath.Clean(PinPath) == PinPath
46 | }
47 |
48 | func pinObject(fd int, pinPath string) error {
49 | mounted, err := bpffs.IsMounted()
50 | if err != nil {
51 | return fmt.Errorf("error checking if %q is mounted: %v", BPFFSPath, err)
52 | }
53 | if !mounted {
54 | return fmt.Errorf("bpf fs not mounted at %q", BPFFSPath)
55 | }
56 | err = os.MkdirAll(filepath.Dir(pinPath), 0755)
57 | if err != nil {
58 | return fmt.Errorf("error creating directory %q: %v", filepath.Dir(pinPath), err)
59 | }
60 | _, err = os.Stat(pinPath)
61 | if err == nil {
62 | return fmt.Errorf("aborting, found file at %q: %v", pinPath, err)
63 | }
64 | if err != nil && !os.IsNotExist(err) {
65 | return fmt.Errorf("failed to stat %q: %v", pinPath, err)
66 | }
67 | pinPathC := C.CString(pinPath)
68 | defer C.free(unsafe.Pointer(pinPathC))
69 | ret, err := C.bpf_pin_object(C.int(fd), pinPathC)
70 | if ret != 0 {
71 | return fmt.Errorf("error pinning object to %q: %v", pinPath, err)
72 | }
73 | return nil
74 | }
75 |
76 | // PinObjectGlobal pins and object to a name in a namespaces
77 | // e.g. `/sys/fs/bpf/my-namespace/globals/my-name`
78 | func PinObjectGlobal(fd int, namespace, name string) error {
79 | pinPath := filepath.Join(BPFFSPath, namespace, BPFDirGlobals, name)
80 | return pinObject(fd, pinPath)
81 | }
82 |
83 | // PinObject pins an object to a path
84 | func PinObject(fd int, pinPath string) error {
85 | if !validPinPath(pinPath) {
86 | return fmt.Errorf("not a valid pin path: %s", pinPath)
87 | }
88 | return pinObject(fd, pinPath)
89 | }
90 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/table.go:
--------------------------------------------------------------------------------
1 | // +build linux
2 |
3 | // Copyright 2016 Cilium Project
4 | // Copyright 2016 Sylvain Afchain
5 | // Copyright 2016 Kinvolk
6 | //
7 | // Licensed under the Apache License, Version 2.0 (the "License");
8 | // you may not use this file except in compliance with the License.
9 | // You may obtain a copy of the License at
10 | //
11 | // http://www.apache.org/licenses/LICENSE-2.0
12 | //
13 | // Unless required by applicable law or agreed to in writing, software
14 | // distributed under the License is distributed on an "AS IS" BASIS,
15 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | // See the License for the specific language governing permissions and
17 | // limitations under the License.
18 |
19 | package elf
20 |
21 | import (
22 | "fmt"
23 | "syscall"
24 | "unsafe"
25 | )
26 |
27 | /*
28 | #include
29 | #include
30 |
31 | extern __u64 ptr_to_u64(void *);
32 |
33 | // from https://github.com/cilium/cilium/blob/master/pkg/bpf/bpf.go
34 | // Apache License, Version 2.0
35 |
36 | static void create_bpf_update_elem(int fd, void *key, void *value,
37 | unsigned long long flags, void *attr)
38 | {
39 | union bpf_attr* ptr_bpf_attr;
40 | ptr_bpf_attr = (union bpf_attr*)attr;
41 | ptr_bpf_attr->map_fd = fd;
42 | ptr_bpf_attr->key = ptr_to_u64(key);
43 | ptr_bpf_attr->value = ptr_to_u64(value);
44 | ptr_bpf_attr->flags = flags;
45 | }
46 |
47 | static void create_bpf_lookup_elem(int fd, void *key, void *value, void *attr)
48 | {
49 | union bpf_attr* ptr_bpf_attr;
50 | ptr_bpf_attr = (union bpf_attr*)attr;
51 | ptr_bpf_attr->map_fd = fd;
52 | ptr_bpf_attr->key = ptr_to_u64(key);
53 | ptr_bpf_attr->value = ptr_to_u64(value);
54 | }
55 |
56 | static int next_bpf_elem(int fd, void *key, void *next_key, void *attr)
57 | {
58 | union bpf_attr* ptr_bpf_attr;
59 | ptr_bpf_attr = (union bpf_attr*)attr;
60 | ptr_bpf_attr->map_fd = fd;
61 | ptr_bpf_attr->key = ptr_to_u64(key);
62 | ptr_bpf_attr->next_key = ptr_to_u64(next_key);
63 | }
64 | */
65 | import "C"
66 |
67 | // UpdateElement stores value in key in the map stored in mp.
68 | // The flags can have the following values (if you include "uapi/linux/bpf.h"):
69 | // C.BPF_ANY to create new element or update existing;
70 | // C.BPF_NOEXIST to create new element if it didn't exist;
71 | // C.BPF_EXIST to update existing element.
72 | func (b *Module) UpdateElement(mp *Map, key, value unsafe.Pointer, flags uint64) error {
73 | uba := C.union_bpf_attr{}
74 | C.create_bpf_update_elem(
75 | C.int(mp.m.fd),
76 | key,
77 | value,
78 | C.ulonglong(flags),
79 | unsafe.Pointer(&uba),
80 | )
81 | ret, _, err := syscall.Syscall(
82 | C.__NR_bpf,
83 | C.BPF_MAP_UPDATE_ELEM,
84 | uintptr(unsafe.Pointer(&uba)),
85 | unsafe.Sizeof(uba),
86 | )
87 |
88 | if ret != 0 || err != 0 {
89 | return fmt.Errorf("unable to update element: %s", err)
90 | }
91 |
92 | return nil
93 | }
94 |
95 | // LookupElement looks up the given key in the the map stored in mp.
96 | // The value is stored in the value unsafe.Pointer.
97 | func (b *Module) LookupElement(mp *Map, key, value unsafe.Pointer) error {
98 | uba := C.union_bpf_attr{}
99 | C.create_bpf_lookup_elem(
100 | C.int(mp.m.fd),
101 | key,
102 | value,
103 | unsafe.Pointer(&uba),
104 | )
105 | ret, _, err := syscall.Syscall(
106 | C.__NR_bpf,
107 | C.BPF_MAP_LOOKUP_ELEM,
108 | uintptr(unsafe.Pointer(&uba)),
109 | unsafe.Sizeof(uba),
110 | )
111 |
112 | if ret != 0 || err != 0 {
113 | return fmt.Errorf("unable to lookup element: %s", err)
114 | }
115 |
116 | return nil
117 | }
118 |
119 | // DeleteElement deletes the given key in the the map stored in mp.
120 | // The key is stored in the key unsafe.Pointer.
121 | func (b *Module) DeleteElement(mp *Map, key unsafe.Pointer) error {
122 | uba := C.union_bpf_attr{}
123 | value := unsafe.Pointer(nil)
124 | C.create_bpf_lookup_elem(
125 | C.int(mp.m.fd),
126 | key,
127 | value,
128 | unsafe.Pointer(&uba),
129 | )
130 | ret, _, err := syscall.Syscall(
131 | C.__NR_bpf,
132 | C.BPF_MAP_DELETE_ELEM,
133 | uintptr(unsafe.Pointer(&uba)),
134 | unsafe.Sizeof(uba),
135 | )
136 |
137 | if ret != 0 || err != 0 {
138 | return fmt.Errorf("unable to delete element: %s", err)
139 | }
140 |
141 | return nil
142 | }
143 |
144 | // LookupNextElement looks up the next element in mp using the given key.
145 | // The next key and the value are stored in the nextKey and value parameter.
146 | // Returns false at the end of the mp.
147 | func (b *Module) LookupNextElement(mp *Map, key, nextKey, value unsafe.Pointer) (bool, error) {
148 | uba := C.union_bpf_attr{}
149 | C.next_bpf_elem(
150 | C.int(mp.m.fd),
151 | key,
152 | nextKey,
153 | unsafe.Pointer(&uba),
154 | )
155 | ret, _, err := syscall.Syscall(
156 | C.__NR_bpf,
157 | C.BPF_MAP_GET_NEXT_KEY,
158 | uintptr(unsafe.Pointer(&uba)),
159 | unsafe.Sizeof(uba),
160 | )
161 | if err != 0 {
162 | return false, fmt.Errorf("unable to find next element: %s", err)
163 | }
164 | if ret != 0 {
165 | return false, nil
166 | }
167 |
168 | if err := b.LookupElement(mp, nextKey, value); err != nil {
169 | return false, err
170 | }
171 | return true, nil
172 | }
173 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/utsname_int8.go:
--------------------------------------------------------------------------------
1 | // +build linux,amd64 linux,arm64
2 |
3 | package elf
4 |
5 | func utsnameStr(in []int8) string {
6 | out := make([]byte, len(in))
7 | for i := 0; i < len(in); i++ {
8 | if in[i] == 0 {
9 | break
10 | }
11 | out = append(out, byte(in[i]))
12 | }
13 | return string(out)
14 | }
15 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/elf/utsname_uint8.go:
--------------------------------------------------------------------------------
1 | // +build linux,arm linux,ppc64 linux,ppc64le s390x
2 |
3 | package elf
4 |
5 | func utsnameStr(in []uint8) string {
6 | out := make([]byte, len(in))
7 | for i := 0; i < len(in); i++ {
8 | if in[i] == 0 {
9 | break
10 | }
11 | out = append(out, byte(in[i]))
12 | }
13 | return string(out)
14 | }
15 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/pkg/bpffs/COPYRIGHT.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016 PLUMgrid
2 | Copyright 2016 Kinvolk
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/pkg/bpffs/fs.go:
--------------------------------------------------------------------------------
1 | package bpffs
2 |
3 | import (
4 | "fmt"
5 | "syscall"
6 | "unsafe"
7 | )
8 |
9 | const BPFFSPath = "/sys/fs/bpf"
10 |
11 | var FsMagicBPFFS int32
12 |
13 | func init() {
14 | // https://github.com/coreutils/coreutils/blob/v8.27/src/stat.c#L275
15 | // https://github.com/torvalds/linux/blob/v4.8/include/uapi/linux/magic.h#L80
16 | magic := uint32(0xCAFE4A11)
17 | // 0xCAFE4A11 overflows an int32, which is what's expected by Statfs_t.Type in 32bit platforms.
18 | // To avoid conditional compilation for all 32bit/64bit platforms, we use an unsafe cast
19 | FsMagicBPFFS = *(*int32)(unsafe.Pointer(&magic))
20 | }
21 |
22 | // IsMountedAt checks if the BPF fs is mounted already in the custom location
23 | func IsMountedAt(mountpoint string) (bool, error) {
24 | var data syscall.Statfs_t
25 | if err := syscall.Statfs(mountpoint, &data); err != nil {
26 | return false, fmt.Errorf("cannot statfs %q: %v", mountpoint, err)
27 | }
28 | return int32(data.Type) == FsMagicBPFFS, nil
29 | }
30 |
31 | // IsMounted checks if the BPF fs is mounted already in the default location
32 | func IsMounted() (bool, error) {
33 | return IsMountedAt(BPFFSPath)
34 | }
35 |
36 | // MountAt mounts the BPF fs in the custom location (if not already mounted)
37 | func MountAt(mountpoint string) error {
38 | mounted, err := IsMountedAt(mountpoint)
39 | if err != nil {
40 | return err
41 | }
42 | if mounted {
43 | return nil
44 | }
45 | if err := syscall.Mount(mountpoint, mountpoint, "bpf", 0, ""); err != nil {
46 | return fmt.Errorf("error mounting %q: %v", mountpoint, err)
47 | }
48 | return nil
49 | }
50 |
51 | // Mount mounts the BPF fs in the default location (if not already mounted)
52 | func Mount() error {
53 | return MountAt(BPFFSPath)
54 | }
55 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/pkg/cpuonline/COPYRIGHT.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016 PLUMgrid
2 | Copyright 2016 Kinvolk
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
--------------------------------------------------------------------------------
/vendor/github.com/iovisor/gobpf/pkg/cpuonline/cpu_range.go:
--------------------------------------------------------------------------------
1 | package cpuonline
2 |
3 | import (
4 | "io/ioutil"
5 | "strconv"
6 | "strings"
7 | )
8 |
9 | const cpuOnline = "/sys/devices/system/cpu/online"
10 |
11 | // loosely based on https://github.com/iovisor/bcc/blob/v0.3.0/src/python/bcc/utils.py#L15
12 | func readCPURange(cpuRangeStr string) ([]uint, error) {
13 | var cpus []uint
14 | cpuRangeStr = strings.Trim(cpuRangeStr, "\n ")
15 | for _, cpuRange := range strings.Split(cpuRangeStr, ",") {
16 | rangeOp := strings.SplitN(cpuRange, "-", 2)
17 | first, err := strconv.ParseUint(rangeOp[0], 10, 32)
18 | if err != nil {
19 | return nil, err
20 | }
21 | if len(rangeOp) == 1 {
22 | cpus = append(cpus, uint(first))
23 | continue
24 | }
25 | last, err := strconv.ParseUint(rangeOp[1], 10, 32)
26 | if err != nil {
27 | return nil, err
28 | }
29 | for n := first; n <= last; n++ {
30 | cpus = append(cpus, uint(n))
31 | }
32 | }
33 | return cpus, nil
34 | }
35 |
36 | // Get returns a slice with the online CPUs, for example `[0, 2, 3]`
37 | func Get() ([]uint, error) {
38 | buf, err := ioutil.ReadFile(cpuOnline)
39 | if err != nil {
40 | return nil, err
41 | }
42 | return readCPURange(string(buf))
43 | }
44 |
--------------------------------------------------------------------------------
/vendor/manifest:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "dependencies": [
4 | {
5 | "importpath": "github.com/iovisor/gobpf/elf",
6 | "repository": "https://github.com/iovisor/gobpf",
7 | "vcs": "git",
8 | "revision": "4ece6c56f93637c86338885d37e98ff0fb97b4c9",
9 | "branch": "master",
10 | "path": "/elf",
11 | "notests": true
12 | },
13 | {
14 | "importpath": "github.com/iovisor/gobpf/pkg/bpffs",
15 | "repository": "https://github.com/iovisor/gobpf",
16 | "vcs": "git",
17 | "revision": "4ece6c56f93637c86338885d37e98ff0fb97b4c9",
18 | "branch": "master",
19 | "path": "/pkg/bpffs",
20 | "notests": true
21 | },
22 | {
23 | "importpath": "github.com/iovisor/gobpf/pkg/cpuonline",
24 | "repository": "https://github.com/iovisor/gobpf",
25 | "vcs": "git",
26 | "revision": "4ece6c56f93637c86338885d37e98ff0fb97b4c9",
27 | "branch": "master",
28 | "path": "/pkg/cpuonline",
29 | "notests": true
30 | }
31 | ]
32 | }
--------------------------------------------------------------------------------